Ejemplo n.º 1
0
    def run_tests(self, args=None, **kw):
        '''
        AWSY test should be implemented here
        '''
        dirs = self.abs_dirs
        env = {}
        error_summary_file = os.path.join(dirs['abs_blob_upload_dir'],
                                          'marionette_errorsummary.log')

        runtime_testvars = {
            'webRootDir': self.webroot_dir,
            'resultsDir': self.results_dir
        }
        runtime_testvars_path = os.path.join(self.awsy_path,
                                             'runtime-testvars.json')
        runtime_testvars_file = open(runtime_testvars_path, 'wb')
        runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
        runtime_testvars_file.close()

        cmd = ['marionette']
        cmd.append("--preferences=%s" %
                   os.path.join(self.awsy_path, "conf", "prefs.json"))
        cmd.append("--testvars=%s" %
                   os.path.join(self.awsy_path, "conf", "testvars.json"))
        cmd.append("--testvars=%s" % runtime_testvars_path)
        cmd.append("--log-raw=-")
        cmd.append("--log-errorsummary=%s" % error_summary_file)
        cmd.append("--binary=%s" % self.binary_path)
        cmd.append("--profile=%s" %
                   (os.path.join(dirs['abs_work_dir'], 'profile')))
        if not self.config['e10s']:
            cmd.append('--disable-e10s')
        cmd.append('--gecko-log=%s' %
                   os.path.join(dirs["abs_blob_upload_dir"], 'gecko.log'))

        test_file = os.path.join(self.awsy_libdir, 'test_memory_usage.py')
        cmd.append(test_file)

        env['MOZ_UPLOAD_DIR'] = dirs['abs_blob_upload_dir']
        if not os.path.isdir(env['MOZ_UPLOAD_DIR']):
            self.mkdir_p(env['MOZ_UPLOAD_DIR'])
        env = self.query_env(partial_env=env)
        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        strict=False)
        return_code = self.run_command(
            command=cmd,
            cwd=self.awsy_path,
            output_timeout=self.config.get("cmd_timeout"),
            env=env,
            output_parser=parser)

        level = INFO
        tbpl_status, log_level = parser.evaluate_parser(
            return_code=return_code)

        self.log("AWSY exited with return code %s: %s" %
                 (return_code, tbpl_status),
                 level=level)
        self.buildbot_status(tbpl_status)
Ejemplo n.º 2
0
    def run_tests(self):
        dirs = self.query_abs_dirs()
        cmd = self._query_cmd()

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        log_compact=True)

        env = {'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']}
        env['RUST_BACKTRACE'] = '1'

        if self.config['allow_software_gl_layers']:
            env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'
        if self.config['enable_webrender']:
            env['MOZ_WEBRENDER'] = '1'

        env = self.query_env(partial_env=env, log_level=INFO)

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=1000,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)

        self.buildbot_status(tbpl_status, level=log_level)
Ejemplo n.º 3
0
    def run_tests(self):
        dirs = self.query_abs_dirs()
        cmd = self._query_cmd()

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        log_compact=True)

        env = {'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']}
        env['RUST_BACKTRACE'] = '1'

        if self.config['allow_software_gl_layers']:
            env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'
        if self.config['enable_webrender']:
            env['MOZ_WEBRENDER'] = '1'

        env = self.query_env(partial_env=env, log_level=INFO)

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=1000,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)

        self.buildbot_status(tbpl_status, level=log_level)
Ejemplo n.º 4
0
    def run_test(self, binary_path, env=None, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        # Import the harness to retrieve the location of the cli scripts
        import firefox_ui_harness

        cmd = [
            self.query_python_path(),
            os.path.join(os.path.dirname(firefox_ui_harness.__file__),
                         self.cli_script),
            '--binary', binary_path,
            '--address', 'localhost:{}'.format(marionette_port),

            # Resource files to serve via local webserver
            '--server-root', os.path.join(dirs['abs_fxui_dir'], 'resources'),

            # Use the work dir to get temporary data stored
            '--workspace', dirs['abs_work_dir'],

            # logging options
            '--gecko-log=-',  # output from the gecko process redirected to stdout
            '--log-raw=-',  # structured log for output parser redirected to stdout

            # additional reports helpful for Jenkins and inpection via Treeherder
            '--log-html', os.path.join(dirs["abs_reports_dir"], self.reports['html']),
            '--log-xunit', os.path.join(dirs["abs_reports_dir"], self.reports['xunit']),
        ]

        # Collect all pass-through harness options to the script
        cmd.extend(self.query_harness_args())

        # Set further environment settings
        env = env or self.query_env()

        if self.symbols_url:
            cmd.extend(['--symbols-path', self.symbols_url])

        if self.query_minidump_stackwalk():
            env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        strict=False)

        # Add the default tests to run
        tests = [os.path.join(dirs['abs_fxui_dir'], 'tests', test) for test in self.default_tests]
        cmd.extend(tests)

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=300,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)
        self.buildbot_status(tbpl_status, level=log_level)

        return return_code
Ejemplo n.º 5
0
    def run_test(self, binary_path, env=None, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        # Import the harness to retrieve the location of the cli scripts
        import telemetry_harness

        cmd = [
            self.query_python_path(),
            os.path.join(os.path.dirname(telemetry_harness.__file__),
                         self.cli_script),
            '--binary', binary_path,
            '--address', 'localhost:{}'.format(marionette_port),

            # Resource files to serve via local webserver
            '--server-root', os.path.join(dirs['abs_telemetry_dir'], 'harness', 'www'),
            # Use the work dir to get temporary data stored
            '--workspace', dirs['abs_work_dir'],
            # logging options
            '--gecko-log=-',  # output from the gecko process redirected to stdout
            '--log-raw=-',  # structured log for output parser redirected to stdout
            # additional reports helpful for Jenkins and inpection via Treeherder
            '--log-html', os.path.join(dirs['abs_blob_upload_dir'], 'report.html'),
            '--log-xunit', os.path.join(dirs['abs_blob_upload_dir'], 'report.xml'),
            # Enable tracing output to log transmission protocol
            '-vv',
        ]

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        strict=False)

        # Add the default tests to run
        tests = [os.path.join(dirs['abs_telemetry_dir'], 'tests', test)
                 for test in self.default_tests]
        cmd.extend(tests)

        # Set further environment settings
        env = env or self.query_env()
        env.update({'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']})
        if self.query_minidump_stackwalk():
            env.update({'MINIDUMP_STACKWALK': self.minidump_stackwalk_path})
        env['RUST_BACKTRACE'] = '1'

        # If code coverage is enabled, set GCOV_PREFIX env variable
        if self.config.get('code_coverage'):
            env['GCOV_PREFIX'] = self.gcov_dir

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=300,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)
        self.record_status(tbpl_status, level=log_level)

        return return_code
Ejemplo n.º 6
0
    def run_test(self, binary_path, env=None, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        # Import the harness to retrieve the location of the cli scripts
        import telemetry_harness

        cmd = [
            self.query_python_path(),
            os.path.join(os.path.dirname(telemetry_harness.__file__),
                         self.cli_script),
            '--binary', binary_path,
            '--address', 'localhost:{}'.format(marionette_port),

            # Resource files to serve via local webserver
            '--server-root', os.path.join(dirs['abs_telemetry_dir'], 'harness', 'www'),
            # Use the work dir to get temporary data stored
            '--workspace', dirs['abs_work_dir'],
            # logging options
            '--gecko-log=-',  # output from the gecko process redirected to stdout
            '--log-raw=-',  # structured log for output parser redirected to stdout
            # additional reports helpful for Jenkins and inpection via Treeherder
            '--log-html', os.path.join(dirs['abs_blob_upload_dir'], 'report.html'),
            '--log-xunit', os.path.join(dirs['abs_blob_upload_dir'], 'report.xml'),
            # Enable tracing output to log transmission protocol
            '-vv',
        ]

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        strict=False)

        # Add the default tests to run
        tests = [os.path.join(dirs['abs_telemetry_dir'], 'tests', test)
                 for test in self.default_tests]
        cmd.extend(tests)

        # Set further environment settings
        env = env or self.query_env()
        env.update({'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']})
        if self.query_minidump_stackwalk():
            env.update({'MINIDUMP_STACKWALK': self.minidump_stackwalk_path})
        env['RUST_BACKTRACE'] = '1'

        # If code coverage is enabled, set GCOV_PREFIX env variable
        if self.config.get('code_coverage'):
            env['GCOV_PREFIX'] = self.gcov_dir

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=300,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)
        self.buildbot_status(tbpl_status, level=log_level)

        return return_code
Ejemplo n.º 7
0
    def run_test(self, binary_path, env=None, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        # Import the harness to retrieve the location of the cli scripts
        import firefox_ui_harness

        cmd = [
            self.query_python_path(),
            os.path.join(os.path.dirname(firefox_ui_harness.__file__),
                         self.cli_script),
            '--binary',
            binary_path,
            '--address',
            'localhost:{}'.format(marionette_port),

            # Use the work dir to get temporary data stored
            '--workspace',
            dirs['abs_work_dir'],

            # logging options
            '--gecko-log=-',  # output from the gecko process redirected to stdout
            '--log-raw=-',  # structured log for output parser redirected to stdout

            # additional reports helpful for Jenkins and inpection via Treeherder
            '--log-html',
            os.path.join(dirs["abs_reports_dir"], self.reports['html']),
            '--log-xunit',
            os.path.join(dirs["abs_reports_dir"], self.reports['xunit']),
        ]

        # Collect all pass-through harness options to the script
        cmd.extend(self.query_harness_args())

        # Set further environment settings
        env = env or self.query_env()

        if self.symbols_url:
            cmd.extend(['--symbols-path', self.symbols_url])

        if self.query_minidump_stackwalk():
            env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        strict=False)

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=300,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)
        self.buildbot_status(tbpl_status, level=log_level)

        return return_code
Ejemplo n.º 8
0
    def run_test(self, installer_path, env=None, cleanup=True, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        cmd = [
            self.query_python_path(),
            os.path.join(dirs['fx_ui_dir'], 'firefox_ui_harness', self.cli_script),
            '--installer', installer_path,
            '--address', 'localhost:{}'.format(marionette_port),

            # Use the work dir to get temporary data stored
            '--workspace', dirs['abs_work_dir'],

            # logging options
            '--gecko-log=-',  # output from the gecko process redirected to stdout
            '--log-raw=-',  # structured log for output parser redirected to stdout

            # additional reports helpful for Jenkins and inpection via Treeherder
            '--log-html', os.path.join(dirs["abs_reports_dir"], self.reports['html']),
            '--log-xunit', os.path.join(dirs["abs_reports_dir"], self.reports['xunit']),
        ]

        # Collect all pass-through harness options to the script
        cmd.extend(self.query_extra_cmd_args())

        # Set further environment settings
        env = env or self.query_env()

        if self.minidump_stackwalk_path:
            env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path

            if self.query_symbols_url():
                cmd += ['--symbols-path', self.symbols_url]

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        strict=False)

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=300,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)
        self.buildbot_status(tbpl_status, level=log_level)

        if cleanup:
            for filepath in (installer_path,):
                if os.path.exists(filepath):
                    self.debug('Removing {}'.format(filepath))
                    os.remove(filepath)

        return return_code
Ejemplo n.º 9
0
def get_mozharness_status(lines, status):
    parser = StructuredOutputParser(
        config={'log_level': INFO},
        error_list=BaseErrorList + HarnessErrorList,
        strict=False,
        suite_category='mochitest',
    )

    for line in lines:
        parser.parse_single_line(json.dumps(line))
    return parser.evaluate_parser(status)
Ejemplo n.º 10
0
    def run_tests(self):
        dirs = self.query_abs_dirs()
        cmd = self._query_cmd()

        self._install_fonts()

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        log_compact=True,
                                        error_list=BaseErrorList +
                                        HarnessErrorList)

        env = {'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']}
        env['RUST_BACKTRACE'] = 'full'

        if self.config['allow_software_gl_layers']:
            env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'
        if self.config['enable_webrender']:
            env['MOZ_WEBRENDER'] = '1'
            env['MOZ_ACCELERATED'] = '1'
        if self.config['headless']:
            env['MOZ_HEADLESS'] = '1'
            env['MOZ_HEADLESS_WIDTH'] = self.config['headless_width']
            env['MOZ_HEADLESS_HEIGHT'] = self.config['headless_height']

        if self.config['disable_stylo']:
            if self.config['single_stylo_traversal']:
                self.fatal(
                    "--disable-stylo conflicts with --single-stylo-traversal")
            if self.config['enable_stylo']:
                self.fatal("--disable-stylo conflicts with --enable-stylo")

        if self.config['single_stylo_traversal']:
            env['STYLO_THREADS'] = '1'
        else:
            env['STYLO_THREADS'] = '4'

        if self.config['enable_stylo']:
            env['STYLO_FORCE_ENABLED'] = '1'
        if self.config['disable_stylo']:
            env['STYLO_FORCE_DISABLED'] = '1'

        env = self.query_env(partial_env=env, log_level=INFO)

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=1000,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)

        self.buildbot_status(tbpl_status, level=log_level)
Ejemplo n.º 11
0
    def run_tests(self, args=None, **kw):
        '''
        AWSY test should be implemented here
        '''
        dirs = self.abs_dirs
        env = {}
        error_summary_file = os.path.join(dirs['abs_blob_upload_dir'],
                                          'marionette_errorsummary.log')

        runtime_testvars = {'webRootDir': self.webroot_dir,
                            'resultsDir': self.results_dir}
        runtime_testvars_path = os.path.join(self.awsy_path, 'runtime-testvars.json')
        runtime_testvars_file = open(runtime_testvars_path, 'wb')
        runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
        runtime_testvars_file.close()

        cmd = ['marionette']
        cmd.append("--preferences=%s" % os.path.join(self.awsy_path, "conf", "prefs.json"))
        cmd.append("--testvars=%s" % os.path.join(self.awsy_path, "conf", "testvars.json"))
        cmd.append("--testvars=%s" % runtime_testvars_path)
        cmd.append("--log-raw=-")
        cmd.append("--log-errorsummary=%s" % error_summary_file)
        cmd.append("--binary=%s" % self.binary_path)
        cmd.append("--profile=%s" % (os.path.join(dirs['abs_work_dir'], 'profile')))
        if not self.config['e10s']:
            cmd.append('--disable-e10s')
        cmd.append('--gecko-log=%s' % os.path.join(dirs["abs_blob_upload_dir"],
                                                   'gecko.log'))

        test_file = os.path.join(self.awsy_libdir, 'test_memory_usage.py')
        cmd.append(test_file)

        env['MOZ_UPLOAD_DIR'] = dirs['abs_blob_upload_dir']
        if not os.path.isdir(env['MOZ_UPLOAD_DIR']):
            self.mkdir_p(env['MOZ_UPLOAD_DIR'])
        env = self.query_env(partial_env=env)
        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        strict=False)
        return_code = self.run_command(command=cmd,
                                       cwd=self.awsy_path,
                                       output_timeout=self.config.get("cmd_timeout"),
                                       env=env,
                                       output_parser=parser)

        level = INFO
        tbpl_status, log_level = parser.evaluate_parser(
            return_code=return_code)

        self.log("AWSY exited with return code %s: %s" % (return_code, tbpl_status),
                 level=level)
        self.buildbot_status(tbpl_status)
Ejemplo n.º 12
0
    def run_test(self, binary_path, env=None, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        # Import the harness to retrieve the location of the cli scripts
        import firefox_ui_harness

        cmd = [
            self.query_python_path(),
            os.path.join(os.path.dirname(firefox_ui_harness.__file__), self.cli_script),
            "--binary",
            binary_path,
            "--address",
            "localhost:{}".format(marionette_port),
            # Use the work dir to get temporary data stored
            "--workspace",
            dirs["abs_work_dir"],
            # logging options
            "--gecko-log=-",  # output from the gecko process redirected to stdout
            "--log-raw=-",  # structured log for output parser redirected to stdout
            # additional reports helpful for Jenkins and inpection via Treeherder
            "--log-html",
            os.path.join(dirs["abs_reports_dir"], self.reports["html"]),
            "--log-xunit",
            os.path.join(dirs["abs_reports_dir"], self.reports["xunit"]),
        ]

        # Collect all pass-through harness options to the script
        cmd.extend(self.query_harness_args())

        # Set further environment settings
        env = env or self.query_env()

        if self.symbols_url:
            cmd.extend(["--symbols-path", self.symbols_url])

        if self.query_minidump_stackwalk():
            env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path

        parser = StructuredOutputParser(config=self.config, log_obj=self.log_obj, strict=False)

        return_code = self.run_command(cmd, cwd=dirs["abs_work_dir"], output_timeout=300, output_parser=parser, env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)
        self.buildbot_status(tbpl_status, level=log_level)

        return return_code
Ejemplo n.º 13
0
def get_mozharness_status(lines, status):
    parser = StructuredOutputParser(
        config={'log_level': INFO},
        error_list=BaseErrorList + HarnessErrorList,
        strict=False,
        suite_category='mochitest',
    )

    # Processing the log with mozharness will re-print all the output to stdout
    # Since this exact same output has already been printed by the actual test
    # run, temporarily redirect stdout to devnull.
    with open(os.devnull, 'w') as fh:
        orig = sys.stdout
        sys.stdout = fh
        for line in lines:
            parser.parse_single_line(json.dumps(line))
        sys.stdout = orig
    return parser.evaluate_parser(status)
Ejemplo n.º 14
0
    def run_tests(self):
        dirs = self.query_abs_dirs()
        cmd = self._query_cmd()

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj)

        env = {}
        env = self.query_env(partial_env=env, log_level=INFO)
        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=1000,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)

        self.buildbot_status(tbpl_status, level=log_level)
Ejemplo n.º 15
0
    def run_tests(self):
        dirs = self.query_abs_dirs()
        cmd = self._query_cmd()

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj)

        env = {'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']}
        env = self.query_env(partial_env=env, log_level=INFO)

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=1000,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)

        self.buildbot_status(tbpl_status, level=log_level)
Ejemplo n.º 16
0
    def run_tests(self):
        dirs = self.query_abs_dirs()
        cmd = self._query_cmd()
        cmd = self.append_harness_extra_args(cmd)

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj)

        env = {'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']}
        env = self.query_env(partial_env=env, log_level=INFO)

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=1000,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)

        self.buildbot_status(tbpl_status, level=log_level)
Ejemplo n.º 17
0
 def get_test_output_parser(self,
                            suite_category,
                            flavor=None,
                            strict=False,
                            **kwargs):
     if not self.structured_output(suite_category, flavor):
         return DesktopUnittestOutputParser(suite_category=suite_category,
                                            **kwargs)
     self.info("Structured output parser in use for %s." % suite_category)
     return StructuredOutputParser(suite_category=suite_category,
                                   strict=strict,
                                   **kwargs)
Ejemplo n.º 18
0
def get_mozharness_status(suite, lines, status, formatter=None, buf=None):
    """Given list of log lines, determine what the mozharness status would be."""
    parser = StructuredOutputParser(
        config={'log_level': INFO},
        error_list=BaseErrorList+HarnessErrorList,
        strict=False,
        suite_category=suite,
    )

    if formatter:
        parser.formatter = formatter

    # Processing the log with mozharness will re-print all the output to stdout
    # Since this exact same output has already been printed by the actual test
    # run, temporarily redirect stdout to devnull.
    buf = buf or open(os.devnull, 'w')
    orig = sys.stdout
    sys.stdout = buf
    for line in lines:
        parser.parse_single_line(json.dumps(line))
    sys.stdout = orig
    return parser.evaluate_parser(status)
Ejemplo n.º 19
0
 def get_test_output_parser(self, suite_category, strict=False,
                            fallback_parser_class=DesktopUnittestOutputParser,
                            **kwargs):
     """Derive and return an appropriate output parser, either the structured
     output parser or a fallback based on the type of logging in use as determined by
     configuration.
     """
     if not self.structured_output(suite_category):
         if fallback_parser_class is DesktopUnittestOutputParser:
             return DesktopUnittestOutputParser(suite_category=suite_category, **kwargs)
         return fallback_parser_class(**kwargs)
     self.info("Structured output parser in use for %s." % suite_category)
     return StructuredOutputParser(suite_category=suite_category, strict=strict, **kwargs)
Ejemplo n.º 20
0
def get_mozharness_status(suite, lines, status, formatter=None, buf=None):
    """Given list of log lines, determine what the mozharness status would be."""
    parser = StructuredOutputParser(
        config={'log_level': INFO},
        error_list=BaseErrorList + HarnessErrorList,
        strict=False,
        suite_category=suite,
    )

    if formatter:
        parser.formatter = formatter

    # Processing the log with mozharness will re-print all the output to stdout
    # Since this exact same output has already been printed by the actual test
    # run, temporarily redirect stdout to devnull.
    buf = buf or open(os.devnull, 'w')
    orig = sys.stdout
    sys.stdout = buf
    for line in lines:
        parser.parse_single_line(json.dumps(line))
    sys.stdout = orig
    return parser.evaluate_parser(status)
Ejemplo n.º 21
0
    def run_test(self, binary_path, env=None, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        # Import the harness to retrieve the location of the cli scripts
        import firefox_ui_harness

        cmd = [
            self.query_python_path(),
            os.path.join(os.path.dirname(firefox_ui_harness.__file__),
                         self.cli_script),
            '--binary',
            binary_path,
            '--address',
            'localhost:{}'.format(marionette_port),

            # Resource files to serve via local webserver
            '--server-root',
            os.path.join(dirs['abs_fxui_dir'], 'resources'),

            # Use the work dir to get temporary data stored
            '--workspace',
            dirs['abs_work_dir'],

            # logging options
            '--gecko-log=-',  # output from the gecko process redirected to stdout
            '--log-raw=-',  # structured log for output parser redirected to stdout

            # additional reports helpful for Jenkins and inpection via Treeherder
            '--log-html',
            os.path.join(dirs['abs_blob_upload_dir'], 'report.html'),
            '--log-xunit',
            os.path.join(dirs['abs_blob_upload_dir'], 'report.xml'),

            # Enable tracing output to log transmission protocol
            '-vv',
        ]

        if self.config['enable_webrender']:
            cmd.append('--enable-webrender')

        # Collect all pass-through harness options to the script
        cmd.extend(self.query_harness_args())

        if not self.config.get('e10s'):
            cmd.append('--disable-e10s')

        cmd.extend(
            ['--setpref={}'.format(p) for p in self.config.get('extra_prefs')])

        if self.symbols_url:
            cmd.extend(['--symbols-path', self.symbols_url])

        if self.config.get('tag'):
            cmd.extend(['--tag', self.config['tag']])

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        strict=False)

        # Add the default tests to run
        tests = [
            os.path.join(dirs['abs_fxui_manifest_dir'], t)
            for t in self.default_tests
        ]
        cmd.extend(tests)

        # Set further environment settings
        env = env or self.query_env()
        env.update({'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']})
        if self.query_minidump_stackwalk():
            env.update({'MINIDUMP_STACKWALK': self.minidump_stackwalk_path})
        env['RUST_BACKTRACE'] = 'full'

        # If code coverage is enabled, set GCOV_PREFIX and JS_CODE_COVERAGE_OUTPUT_DIR
        # env variables
        if self.config.get('code_coverage'):
            env['GCOV_PREFIX'] = self.gcov_dir
            env['JS_CODE_COVERAGE_OUTPUT_DIR'] = self.jsvm_dir

        if self.config['allow_software_gl_layers']:
            env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_fxui_dir'],
                                       output_timeout=1000,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level, summary = parser.evaluate_parser(return_code)
        self.record_status(tbpl_status, level=log_level)

        return return_code
Ejemplo n.º 22
0
    def run_test(self, binary_path, env=None, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        # Import the harness to retrieve the location of the cli scripts
        import firefox_ui_harness

        cmd = [
            self.query_python_path(),
            os.path.join(os.path.dirname(firefox_ui_harness.__file__),
                         self.cli_script),
            '--binary',
            binary_path,
            '--address',
            'localhost:{}'.format(marionette_port),

            # Resource files to serve via local webserver
            '--server-root',
            os.path.join(dirs['abs_fxui_dir'], 'resources'),

            # Use the work dir to get temporary data stored
            '--workspace',
            dirs['abs_work_dir'],

            # logging options
            '--gecko-log=-',  # output from the gecko process redirected to stdout
            '--log-raw=-',  # structured log for output parser redirected to stdout

            # additional reports helpful for Jenkins and inpection via Treeherder
            '--log-html',
            os.path.join(dirs['abs_blob_upload_dir'], 'report.html'),
            '--log-xunit',
            os.path.join(dirs['abs_blob_upload_dir'], 'report.xml'),
        ]

        # Collect all pass-through harness options to the script
        cmd.extend(self.query_harness_args())

        # Translate deprecated --e10s flag
        if not self.config.get('e10s'):
            cmd.append('--disable-e10s')

        if self.symbols_url:
            cmd.extend(['--symbols-path', self.symbols_url])

        if self.config.get('tag'):
            cmd.extend(['--tag', self.config['tag']])

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        strict=False)

        # Add the default tests to run
        tests = [
            os.path.join(dirs['abs_fxui_dir'], 'tests', test)
            for test in self.default_tests
        ]
        cmd.extend(tests)

        # Set further environment settings
        env = env or self.query_env()
        env.update({'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']})
        if self.query_minidump_stackwalk():
            env.update({'MINIDUMP_STACKWALK': self.minidump_stackwalk_path})

        if self.config['allow_software_gl_layers']:
            env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=300,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)
        self.buildbot_status(tbpl_status, level=log_level)

        return return_code
Ejemplo n.º 23
0
    def run_tests(self):
        dirs = self.query_abs_dirs()

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        log_compact=True,
                                        error_list=BaseErrorList +
                                        HarnessErrorList)

        env = {'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']}
        env['RUST_BACKTRACE'] = 'full'

        if self.config['allow_software_gl_layers']:
            env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'
        if self.config['enable_webrender']:
            env['MOZ_WEBRENDER'] = '1'
            env['MOZ_ACCELERATED'] = '1'
        if self.config['headless']:
            env['MOZ_HEADLESS'] = '1'
            env['MOZ_HEADLESS_WIDTH'] = self.config['headless_width']
            env['MOZ_HEADLESS_HEIGHT'] = self.config['headless_height']

        if self.config['single_stylo_traversal']:
            env['STYLO_THREADS'] = '1'
        else:
            env['STYLO_THREADS'] = '4'

        if self.is_android:
            env['ADB_PATH'] = self.adb_path

        env = self.query_env(partial_env=env, log_level=INFO)

        start_time = datetime.now()
        max_per_test_time = timedelta(minutes=60)
        max_per_test_tests = 10
        if self.per_test_coverage:
            max_per_test_tests = 30
        executed_tests = 0
        executed_too_many_tests = False

        if self.per_test_coverage or self.verify_enabled:
            suites = self.query_per_test_category_suites(None, None)
            if "wdspec" in suites:
                # geckodriver is required for wdspec, but not always available
                geckodriver_path = self._query_geckodriver()
                if not geckodriver_path or not os.path.isfile(
                        geckodriver_path):
                    suites.remove("wdspec")
                    self.info("Skipping 'wdspec' tests - no geckodriver")
        else:
            test_types = self.config.get("test_type", [])
            suites = [None]
        for suite in suites:
            if executed_too_many_tests and not self.per_test_coverage:
                continue

            if suite:
                test_types = [suite]

            summary = {}
            for per_test_args in self.query_args(suite):
                # Make sure baseline code coverage tests are never
                # skipped and that having them run has no influence
                # on the max number of actual tests that are to be run.
                is_baseline_test = 'baselinecoverage' in per_test_args[-1] \
                                   if self.per_test_coverage else False
                if executed_too_many_tests and not is_baseline_test:
                    continue

                if not is_baseline_test:
                    if (datetime.now() - start_time) > max_per_test_time:
                        # Running tests has run out of time. That is okay! Stop running
                        # them so that a task timeout is not triggered, and so that
                        # (partial) results are made available in a timely manner.
                        self.info(
                            "TinderboxPrint: Running tests took too long: Not all tests "
                            "were executed.<br/>")
                        return
                    if executed_tests >= max_per_test_tests:
                        # When changesets are merged between trees or many tests are
                        # otherwise updated at once, there probably is not enough time
                        # to run all tests, and attempting to do so may cause other
                        # problems, such as generating too much log output.
                        self.info(
                            "TinderboxPrint: Too many modified tests: Not all tests "
                            "were executed.<br/>")
                        executed_too_many_tests = True

                    executed_tests = executed_tests + 1

                cmd = self._query_cmd(test_types)
                cmd.extend(per_test_args)

                final_env = copy.copy(env)

                if self.per_test_coverage:
                    self.set_coverage_env(final_env, is_baseline_test)

                return_code = self.run_command(cmd,
                                               cwd=dirs['abs_work_dir'],
                                               output_timeout=1000,
                                               output_parser=parser,
                                               env=final_env)

                if self.per_test_coverage:
                    self.add_per_test_coverage_report(final_env, suite,
                                                      per_test_args[-1])

                tbpl_status, log_level, summary = parser.evaluate_parser(
                    return_code, previous_summary=summary)
                self.record_status(tbpl_status, level=log_level)

                if len(per_test_args) > 0:
                    self.log_per_test_status(per_test_args[-1], tbpl_status,
                                             log_level)
    def run_tests(self, args=None, **kw):
        '''
        AWSY test should be implemented here
        '''
        dirs = self.abs_dirs
        env = {}
        error_summary_file = os.path.join(dirs['abs_blob_upload_dir'],
                                          'marionette_errorsummary.log')

        runtime_testvars = {'webRootDir': self.webroot_dir,
                            'resultsDir': self.results_dir,
                            'bin': self.binary_path}

        # Check if this is a DMD build and if so enable it.
        dmd_enabled = False
        dmd_py_lib_dir = os.path.dirname(self.binary_path)
        if mozinfo.os == 'mac':
            # On mac binary is in MacOS and dmd.py is in Resources, ie:
            #   Name.app/Contents/MacOS/libdmd.dylib
            #   Name.app/Contents/Resources/dmd.py
            dmd_py_lib_dir = os.path.join(dmd_py_lib_dir, "../Resources/")

        dmd_path = os.path.join(dmd_py_lib_dir, "dmd.py")
        if self.config['dmd'] and os.path.isfile(dmd_path):
            dmd_enabled = True
            runtime_testvars['dmd'] = True

            # Allow the child process to import dmd.py
            python_path = os.environ.get('PYTHONPATH')

            if python_path:
                os.environ['PYTHONPATH'] = "%s%s%s" % (python_path, os.pathsep, dmd_py_lib_dir)
            else:
                os.environ['PYTHONPATH'] = dmd_py_lib_dir

            env['DMD'] = "--mode=dark-matter --stacks=full"

        runtime_testvars['tp6'] = self.config['tp6']
        if self.config['tp6']:
            # mitmproxy needs path to mozharness when installing the cert, and tooltool
            env['SCRIPTSPATH'] = scripts_path
            env['EXTERNALTOOLSPATH'] = external_tools_path

        runtime_testvars_path = os.path.join(self.awsy_path, 'runtime-testvars.json')
        runtime_testvars_file = open(runtime_testvars_path, 'wb')
        runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
        runtime_testvars_file.close()

        cmd = ['marionette']

        test_vars_file = None
        if self.config['test_about_blank']:
            test_vars_file = "base-testvars.json"
        else:
            if self.config['tp6']:
                test_vars_file = "tp6-testvars.json"
            else:
                test_vars_file = "testvars.json"

        cmd.append("--testvars=%s" % os.path.join(self.awsy_path, "conf", test_vars_file))
        cmd.append("--testvars=%s" % runtime_testvars_path)
        cmd.append("--log-raw=-")
        cmd.append("--log-errorsummary=%s" % error_summary_file)
        cmd.append("--binary=%s" % self.binary_path)
        cmd.append("--profile=%s" % (os.path.join(dirs['abs_work_dir'], 'profile')))
        if not self.config['e10s']:
            cmd.append('--disable-e10s')
        cmd.extend(['--setpref={}'.format(p) for p in self.config['extra_prefs']])
        cmd.append('--gecko-log=%s' % os.path.join(dirs["abs_blob_upload_dir"],
                                                   'gecko.log'))
        # TestingMixin._download_and_extract_symbols() should set
        # self.symbols_path
        cmd.append('--symbols-path=%s' % self.symbols_path)

        if self.config['test_about_blank']:
            test_file = os.path.join(self.awsy_libdir, 'test_base_memory_usage.py')
            prefs_file = "base-prefs.json"
        else:
            test_file = os.path.join(self.awsy_libdir, 'test_memory_usage.py')
            if self.config['tp6']:
                prefs_file = "tp6-prefs.json"
            else:
                prefs_file = "prefs.json"

        cmd.append("--preferences=%s" % os.path.join(self.awsy_path, "conf", prefs_file))
        if dmd_enabled:
            cmd.append("--setpref=security.sandbox.content.level=0")
        cmd.append(test_file)

        if self.config['enable_webrender']:
            cmd.append('--enable-webrender')

        env['STYLO_THREADS'] = '4'

        env['MOZ_UPLOAD_DIR'] = dirs['abs_blob_upload_dir']
        if not os.path.isdir(env['MOZ_UPLOAD_DIR']):
            self.mkdir_p(env['MOZ_UPLOAD_DIR'])
        if self.query_minidump_stackwalk():
            env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path
        env['MINIDUMP_SAVE_PATH'] = dirs['abs_blob_upload_dir']
        env['RUST_BACKTRACE'] = '1'
        env = self.query_env(partial_env=env)
        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        error_list=self.error_list,
                                        strict=False)
        return_code = self.run_command(command=cmd,
                                       cwd=self.awsy_path,
                                       output_timeout=self.config.get("cmd_timeout"),
                                       env=env,
                                       output_parser=parser)

        level = INFO
        tbpl_status, log_level, summary = parser.evaluate_parser(
            return_code=return_code)

        self.log("AWSY exited with return code %s: %s" % (return_code, tbpl_status),
                 level=level)
        self.record_status(tbpl_status)
Ejemplo n.º 25
0
    def run_test(self, binary_path, env=None, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        # Import the harness to retrieve the location of the cli scripts
        import firefox_ui_harness

        cmd = [
            self.query_python_path(),
            os.path.join(os.path.dirname(firefox_ui_harness.__file__), self.cli_script),
            "--binary",
            binary_path,
            "--address",
            "localhost:{}".format(marionette_port),
            # Resource files to serve via local webserver
            "--server-root",
            os.path.join(dirs["abs_fxui_dir"], "resources"),
            # Use the work dir to get temporary data stored
            "--workspace",
            dirs["abs_work_dir"],
            # logging options
            "--gecko-log=-",  # output from the gecko process redirected to stdout
            "--log-raw=-",  # structured log for output parser redirected to stdout
            # Enable tracing output to log transmission protocol
            "-vv",
        ]

        if self.config["enable_webrender"]:
            cmd.append("--enable-webrender")

        # Collect all pass-through harness options to the script
        cmd.extend(self.query_harness_args())

        if not self.config.get("e10s"):
            cmd.append("--disable-e10s")

        cmd.extend(["--setpref={}".format(p) for p in self.config.get("extra_prefs")])

        if self.symbols_url:
            cmd.extend(["--symbols-path", self.symbols_url])

        if self.config.get("tag"):
            cmd.extend(["--tag", self.config["tag"]])

        parser = StructuredOutputParser(
            config=self.config, log_obj=self.log_obj, strict=False
        )

        # Add the default tests to run
        tests = [
            os.path.join(dirs["abs_fxui_manifest_dir"], t) for t in self.default_tests
        ]
        cmd.extend(tests)

        # Set further environment settings
        env = env or self.query_env()
        env.update({"MINIDUMP_SAVE_PATH": dirs["abs_blob_upload_dir"]})
        if self.query_minidump_stackwalk():
            env.update({"MINIDUMP_STACKWALK": self.minidump_stackwalk_path})
        env["RUST_BACKTRACE"] = "full"

        # If code coverage is enabled, set GCOV_PREFIX and JS_CODE_COVERAGE_OUTPUT_DIR
        # env variables
        if self.config.get("code_coverage"):
            env["GCOV_PREFIX"] = self.gcov_dir
            env["JS_CODE_COVERAGE_OUTPUT_DIR"] = self.jsvm_dir

        if self.config["allow_software_gl_layers"]:
            env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"

        return_code = self.run_command(
            cmd,
            cwd=dirs["abs_fxui_dir"],
            output_timeout=1000,
            output_parser=parser,
            env=env,
        )

        tbpl_status, log_level, summary = parser.evaluate_parser(return_code)
        self.record_status(tbpl_status, level=log_level)

        return return_code
Ejemplo n.º 26
0
    def run_tests(self):
        dirs = self.query_abs_dirs()

        self._install_fonts()

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        log_compact=True,
                                        error_list=BaseErrorList +
                                        HarnessErrorList)

        env = {'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']}
        env['RUST_BACKTRACE'] = 'full'

        if self.config['allow_software_gl_layers']:
            env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'
        if self.config['enable_webrender']:
            env['MOZ_WEBRENDER'] = '1'
            env['MOZ_ACCELERATED'] = '1'
        if self.config['headless']:
            env['MOZ_HEADLESS'] = '1'
            env['MOZ_HEADLESS_WIDTH'] = self.config['headless_width']
            env['MOZ_HEADLESS_HEIGHT'] = self.config['headless_height']

        if self.config['single_stylo_traversal']:
            env['STYLO_THREADS'] = '1'
        else:
            env['STYLO_THREADS'] = '4'

        env = self.query_env(partial_env=env, log_level=INFO)

        start_time = datetime.now()
        max_verify_time = timedelta(minutes=60)
        max_verify_tests = 10
        verified_tests = 0

        if self.config.get("verify") is True:
            verify_suites = self.query_verify_category_suites(None, None)
            if "wdspec" in verify_suites:
                # geckodriver is required for wdspec, but not always available
                geckodriver_path = self._query_geckodriver()
                if not geckodriver_path or not os.path.isfile(
                        geckodriver_path):
                    verify_suites.remove("wdspec")
                    self.info(
                        "Test verification skipping 'wdspec' tests - no geckodriver"
                    )
        else:
            test_types = self.config.get("test_type", [])
            verify_suites = [None]
        for verify_suite in verify_suites:
            if verify_suite:
                test_types = [verify_suite]
            for verify_args in self.query_verify_args(verify_suite):
                if (datetime.now() - start_time) > max_verify_time:
                    # Verification has run out of time. That is okay! Stop running
                    # tests so that a task timeout is not triggered, and so that
                    # (partial) results are made available in a timely manner.
                    self.info(
                        "TinderboxPrint: Verification too long: Not all tests "
                        "were verified.<br/>")
                    return
                if verified_tests >= max_verify_tests:
                    # When changesets are merged between trees or many tests are
                    # otherwise updated at once, there probably is not enough time
                    # to verify all tests, and attempting to do so may cause other
                    # problems, such as generating too much log output.
                    self.info(
                        "TinderboxPrint: Too many modified tests: Not all tests "
                        "were verified.<br/>")
                    return
                verified_tests = verified_tests + 1

                cmd = self._query_cmd(test_types)
                cmd.extend(verify_args)

                return_code = self.run_command(cmd,
                                               cwd=dirs['abs_work_dir'],
                                               output_timeout=1000,
                                               output_parser=parser,
                                               env=env)

                tbpl_status, log_level = parser.evaluate_parser(return_code)
                self.buildbot_status(tbpl_status, level=log_level)

                if len(verify_args) > 0:
                    self.log_verify_status(verify_args[-1], tbpl_status,
                                           log_level)
Ejemplo n.º 27
0
    def run_tests(self, args=None, **kw):
        '''
        AWSY test should be implemented here
        '''
        dirs = self.abs_dirs
        env = {}
        error_summary_file = os.path.join(dirs['abs_blob_upload_dir'],
                                          'marionette_errorsummary.log')

        runtime_testvars = {
            'webRootDir': self.webroot_dir,
            'resultsDir': self.results_dir
        }
        runtime_testvars_path = os.path.join(self.awsy_path,
                                             'runtime-testvars.json')
        runtime_testvars_file = open(runtime_testvars_path, 'wb')
        runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
        runtime_testvars_file.close()

        cmd = ['marionette']

        if self.config['test_about_blank']:
            cmd.append(
                "--testvars=%s" %
                os.path.join(self.awsy_path, "conf", "base-testvars.json"))
        else:
            cmd.append("--testvars=%s" %
                       os.path.join(self.awsy_path, "conf", "testvars.json"))

        cmd.append("--testvars=%s" % runtime_testvars_path)
        cmd.append("--log-raw=-")
        cmd.append("--log-errorsummary=%s" % error_summary_file)
        cmd.append("--binary=%s" % self.binary_path)
        cmd.append("--profile=%s" %
                   (os.path.join(dirs['abs_work_dir'], 'profile')))
        if not self.config['e10s']:
            cmd.append('--disable-e10s')
        cmd.append('--gecko-log=%s' %
                   os.path.join(dirs["abs_blob_upload_dir"], 'gecko.log'))
        # TestingMixin._download_and_extract_symbols() should set
        # self.symbols_path
        cmd.append('--symbols-path=%s' % self.symbols_path)

        if self.config['test_about_blank']:
            test_file = os.path.join(self.awsy_libdir,
                                     'test_base_memory_usage.py')
            prefs_file = "base-prefs.json"
        else:
            test_file = os.path.join(self.awsy_libdir, 'test_memory_usage.py')
            prefs_file = "prefs.json"

        cmd.append("--preferences=%s" %
                   os.path.join(self.awsy_path, "conf", prefs_file))
        cmd.append(test_file)

        if self.config['single_stylo_traversal']:
            env['STYLO_THREADS'] = '1'
        else:
            env['STYLO_THREADS'] = '4'

        # TODO: consider getting rid of this as stylo is enabled by default
        env['STYLO_FORCE_ENABLED'] = '1'

        if self.config['enable_webrender']:
            env['MOZ_WEBRENDER'] = '1'
            env['MOZ_ACCELERATED'] = '1'

        env['MOZ_UPLOAD_DIR'] = dirs['abs_blob_upload_dir']
        if not os.path.isdir(env['MOZ_UPLOAD_DIR']):
            self.mkdir_p(env['MOZ_UPLOAD_DIR'])
        if self.query_minidump_stackwalk():
            env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path
        env['MINIDUMP_SAVE_PATH'] = dirs['abs_blob_upload_dir']
        env['RUST_BACKTRACE'] = '1'
        env = self.query_env(partial_env=env)
        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        error_list=self.error_list,
                                        strict=False)
        return_code = self.run_command(
            command=cmd,
            cwd=self.awsy_path,
            output_timeout=self.config.get("cmd_timeout"),
            env=env,
            output_parser=parser)

        level = INFO
        tbpl_status, log_level, summary = parser.evaluate_parser(
            return_code=return_code)

        self.log("AWSY exited with return code %s: %s" %
                 (return_code, tbpl_status),
                 level=level)
        self.record_status(tbpl_status)
Ejemplo n.º 28
0
    def run_tests(self):
        dirs = self.query_abs_dirs()

        self._install_fonts()

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        log_compact=True,
                                        error_list=BaseErrorList +
                                        HarnessErrorList)

        env = {'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']}
        env['RUST_BACKTRACE'] = 'full'

        if self.config['allow_software_gl_layers']:
            env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'
        if self.config['enable_webrender']:
            env['MOZ_WEBRENDER'] = '1'
            env['MOZ_ACCELERATED'] = '1'
        if self.config['headless']:
            env['MOZ_HEADLESS'] = '1'
            env['MOZ_HEADLESS_WIDTH'] = self.config['headless_width']
            env['MOZ_HEADLESS_HEIGHT'] = self.config['headless_height']

        if self.config['single_stylo_traversal']:
            env['STYLO_THREADS'] = '1'
        else:
            env['STYLO_THREADS'] = '4'

        env = self.query_env(partial_env=env, log_level=INFO)

        start_time = datetime.now()
        max_per_test_time = timedelta(minutes=60)
        max_per_test_tests = 10
        executed_tests = 0

        if self.per_test_coverage or self.verify_enabled:
            suites = self.query_per_test_category_suites(None, None)
            if "wdspec" in suites:
                # geckodriver is required for wdspec, but not always available
                geckodriver_path = self._query_geckodriver()
                if not geckodriver_path or not os.path.isfile(
                        geckodriver_path):
                    suites.remove("wdspec")
                    self.info("Skipping 'wdspec' tests - no geckodriver")
        else:
            test_types = self.config.get("test_type", [])
            suites = [None]
        for suite in suites:
            if suite:
                test_types = [suite]

            # Run basic startup/shutdown test to collect baseline coverage.
            # This way, after we run a test, we can generate a diff between the
            # full coverage of the test and the baseline coverage and only get
            # the coverage data specific to the test.
            if self.per_test_coverage:
                gcov_dir, jsvm_dir = self.set_coverage_env(env)
                # TODO: Run basic startup/shutdown test to collect baseline coverage.
                # grcov_file, jsvm_file = self.parse_coverage_artifacts(gcov_dir, jsvm_dir)
                # shutil.rmtree(gcov_dir)
                # shutil.rmtree(jsvm_dir)
                # TODO: Parse coverage report

            for per_test_args in self.query_args(suite):
                if (datetime.now() - start_time) > max_per_test_time:
                    # Running tests has run out of time. That is okay! Stop running
                    # them so that a task timeout is not triggered, and so that
                    # (partial) results are made available in a timely manner.
                    self.info(
                        "TinderboxPrint: Running tests took too long: Not all tests "
                        "were executed.<br/>")
                    return
                if executed_tests >= max_per_test_tests:
                    # When changesets are merged between trees or many tests are
                    # otherwise updated at once, there probably is not enough time
                    # to run all tests, and attempting to do so may cause other
                    # problems, such as generating too much log output.
                    self.info(
                        "TinderboxPrint: Too many modified tests: Not all tests "
                        "were executed.<br/>")
                    return
                executed_tests = executed_tests + 1

                cmd = self._query_cmd(test_types)
                cmd.extend(per_test_args)

                if self.per_test_coverage:
                    gcov_dir, jsvm_dir = self.set_coverage_env(env)

                return_code = self.run_command(cmd,
                                               cwd=dirs['abs_work_dir'],
                                               output_timeout=1000,
                                               output_parser=parser,
                                               env=env)

                if self.per_test_coverage:
                    grcov_file, jsvm_file = self.parse_coverage_artifacts(
                        gcov_dir, jsvm_dir)
                    shutil.rmtree(gcov_dir)
                    shutil.rmtree(jsvm_dir)
                    # TODO: Parse coverage report
                    # TODO: Diff this coverage report with the baseline one

                tbpl_status, log_level = parser.evaluate_parser(return_code)
                self.buildbot_status(tbpl_status, level=log_level)

                if len(per_test_args) > 0:
                    self.log_per_test_status(per_test_args[-1], tbpl_status,
                                             log_level)
Ejemplo n.º 29
0
    def run_tests(self, args=None, **kw):
        '''
        AWSY test should be implemented here
        '''
        dirs = self.abs_dirs
        env = {}
        error_summary_file = os.path.join(dirs['abs_blob_upload_dir'],
                                          'marionette_errorsummary.log')

        runtime_testvars = {'webRootDir': self.webroot_dir,
                            'resultsDir': self.results_dir}
        runtime_testvars_path = os.path.join(self.awsy_path, 'runtime-testvars.json')
        runtime_testvars_file = open(runtime_testvars_path, 'wb')
        runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
        runtime_testvars_file.close()

        cmd = ['marionette']
        cmd.append("--preferences=%s" % os.path.join(self.awsy_path, "conf", "prefs.json"))
        cmd.append("--testvars=%s" % os.path.join(self.awsy_path, "conf", "testvars.json"))
        cmd.append("--testvars=%s" % runtime_testvars_path)
        cmd.append("--log-raw=-")
        cmd.append("--log-errorsummary=%s" % error_summary_file)
        cmd.append("--binary=%s" % self.binary_path)
        cmd.append("--profile=%s" % (os.path.join(dirs['abs_work_dir'], 'profile')))
        if not self.config['e10s']:
            cmd.append('--disable-e10s')
        cmd.append('--gecko-log=%s' % os.path.join(dirs["abs_blob_upload_dir"],
                                                   'gecko.log'))
        # TestingMixin._download_and_extract_symbols() should set
        # self.symbols_path
        cmd.append('--symbols-path=%s' % self.symbols_path)

        test_file = os.path.join(self.awsy_libdir, 'test_memory_usage.py')
        cmd.append(test_file)

        if self.config['disable_stylo']:
            if self.config['single_stylo_traversal']:
                self.fatal("--disable-stylo conflicts with --single-stylo-traversal")
            if self.config['enable_stylo']:
                self.fatal("--disable-stylo conflicts with --enable-stylo")

        if self.config['single_stylo_traversal']:
            env['STYLO_THREADS'] = '1'
        else:
            env['STYLO_THREADS'] = '4'

        if self.config['enable_stylo']:
            env['STYLO_FORCE_ENABLED'] = '1'
        if self.config['disable_stylo']:
            env['STYLO_FORCE_DISABLED'] = '1'

        env['MOZ_UPLOAD_DIR'] = dirs['abs_blob_upload_dir']
        if not os.path.isdir(env['MOZ_UPLOAD_DIR']):
            self.mkdir_p(env['MOZ_UPLOAD_DIR'])
        if self.query_minidump_stackwalk():
            env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path
        env['MINIDUMP_SAVE_PATH'] = dirs['abs_blob_upload_dir']
        env['RUST_BACKTRACE'] = '1'
        env = self.query_env(partial_env=env)
        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        error_list=self.error_list,
                                        strict=False)
        return_code = self.run_command(command=cmd,
                                       cwd=self.awsy_path,
                                       output_timeout=self.config.get("cmd_timeout"),
                                       env=env,
                                       output_parser=parser)

        level = INFO
        tbpl_status, log_level = parser.evaluate_parser(
            return_code=return_code)

        self.log("AWSY exited with return code %s: %s" % (return_code, tbpl_status),
                 level=level)
        self.buildbot_status(tbpl_status)
Ejemplo n.º 30
0
    def run_test(self, binary_path, env=None, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        # Import the harness to retrieve the location of the cli scripts
        import firefox_ui_harness

        cmd = [
            self.query_python_path(),
            os.path.join(os.path.dirname(firefox_ui_harness.__file__), self.cli_script),
            "--binary",
            binary_path,
            "--address",
            "localhost:{}".format(marionette_port),
            # Resource files to serve via local webserver
            "--server-root",
            os.path.join(dirs["abs_fxui_dir"], "resources"),
            # Use the work dir to get temporary data stored
            "--workspace",
            dirs["abs_work_dir"],
            # logging options
            "--gecko-log=-",  # output from the gecko process redirected to stdout
            "--log-raw=-",  # structured log for output parser redirected to stdout
            # additional reports helpful for Jenkins and inpection via Treeherder
            "--log-html",
            os.path.join(dirs["abs_blob_upload_dir"], "report.html"),
            "--log-xunit",
            os.path.join(dirs["abs_blob_upload_dir"], "report.xml"),
            # Enable tracing output to log transmission protocol
            "-vv",
        ]

        # Collect all pass-through harness options to the script
        cmd.extend(self.query_harness_args())

        # Translate deprecated --e10s flag
        if not self.config.get("e10s"):
            cmd.append("--disable-e10s")

        if self.symbols_url:
            cmd.extend(["--symbols-path", self.symbols_url])

        if self.config.get("tag"):
            cmd.extend(["--tag", self.config["tag"]])

        parser = StructuredOutputParser(config=self.config, log_obj=self.log_obj, strict=False)

        # Add the default tests to run
        tests = [os.path.join(dirs["abs_fxui_dir"], "tests", test) for test in self.default_tests]
        cmd.extend(tests)

        # Set further environment settings
        env = env or self.query_env()
        env.update({"MINIDUMP_SAVE_PATH": dirs["abs_blob_upload_dir"]})
        if self.query_minidump_stackwalk():
            env.update({"MINIDUMP_STACKWALK": self.minidump_stackwalk_path})

        if self.config["allow_software_gl_layers"]:
            env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"

        return_code = self.run_command(cmd, cwd=dirs["abs_work_dir"], output_timeout=300, output_parser=parser, env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)
        self.buildbot_status(tbpl_status, level=log_level)

        return return_code
Ejemplo n.º 31
0
    def run_tests(self, args=None, **kw):
        '''
        AWSY test should be implemented here
        '''
        dirs = self.abs_dirs
        env = {}
        error_summary_file = os.path.join(dirs['abs_blob_upload_dir'],
                                          'marionette_errorsummary.log')

        runtime_testvars = {
            'webRootDir': self.webroot_dir,
            'resultsDir': self.results_dir
        }

        # Check if this is a DMD build and if so enable it.
        dmd_enabled = False
        dmd_py_lib_dir = os.path.dirname(self.binary_path)
        if mozinfo.os == 'mac':
            # On mac binary is in MacOS and dmd.py is in Resources, ie:
            #   Name.app/Contents/MacOS/libdmd.dylib
            #   Name.app/Contents/Resources/dmd.py
            dmd_py_lib_dir = os.path.join(dmd_py_lib_dir, "../Resources/")

        dmd_path = os.path.join(dmd_py_lib_dir, "dmd.py")
        if self.config['dmd'] and os.path.isfile(dmd_path):
            dmd_enabled = True
            runtime_testvars['dmd'] = True

            # Allow the child process to import dmd.py
            python_path = os.environ.get('PYTHONPATH')

            if python_path:
                os.environ['PYTHONPATH'] = "%s%s%s" % (python_path, os.pathsep,
                                                       dmd_py_lib_dir)
            else:
                os.environ['PYTHONPATH'] = dmd_py_lib_dir

            env['DMD'] = "--mode=dark-matter --stacks=full"

        runtime_testvars_path = os.path.join(self.awsy_path,
                                             'runtime-testvars.json')
        runtime_testvars_file = open(runtime_testvars_path, 'wb')
        runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
        runtime_testvars_file.close()

        cmd = ['marionette']

        if self.config['test_about_blank']:
            cmd.append(
                "--testvars=%s" %
                os.path.join(self.awsy_path, "conf", "base-testvars.json"))
        else:
            cmd.append("--testvars=%s" %
                       os.path.join(self.awsy_path, "conf", "testvars.json"))

        cmd.append("--testvars=%s" % runtime_testvars_path)
        cmd.append("--log-raw=-")
        cmd.append("--log-errorsummary=%s" % error_summary_file)
        cmd.append("--binary=%s" % self.binary_path)
        cmd.append("--profile=%s" %
                   (os.path.join(dirs['abs_work_dir'], 'profile')))
        if not self.config['e10s']:
            cmd.append('--disable-e10s')
        cmd.append('--gecko-log=%s' %
                   os.path.join(dirs["abs_blob_upload_dir"], 'gecko.log'))
        # TestingMixin._download_and_extract_symbols() should set
        # self.symbols_path
        cmd.append('--symbols-path=%s' % self.symbols_path)

        if self.config['test_about_blank']:
            test_file = os.path.join(self.awsy_libdir,
                                     'test_base_memory_usage.py')
            prefs_file = "base-prefs.json"
        else:
            test_file = os.path.join(self.awsy_libdir, 'test_memory_usage.py')
            prefs_file = "prefs.json"

        cmd.append("--preferences=%s" %
                   os.path.join(self.awsy_path, "conf", prefs_file))
        if dmd_enabled:
            cmd.append("--pref=security.sandbox.content.level:0")
        cmd.append(test_file)

        if self.config['single_stylo_traversal']:
            env['STYLO_THREADS'] = '1'
        else:
            env['STYLO_THREADS'] = '4'

        # TODO: consider getting rid of this as stylo is enabled by default
        env['STYLO_FORCE_ENABLED'] = '1'

        if self.config['enable_webrender']:
            env['MOZ_WEBRENDER'] = '1'
            env['MOZ_ACCELERATED'] = '1'

        # Allow explicitly disabling webrender, so that we don't run WR on non-QR
        # test platforms just because they run on qualified hardware.
        if self.config['disable_webrender']:
            env['MOZ_WEBRENDER'] = '0'

        env['MOZ_UPLOAD_DIR'] = dirs['abs_blob_upload_dir']
        if not os.path.isdir(env['MOZ_UPLOAD_DIR']):
            self.mkdir_p(env['MOZ_UPLOAD_DIR'])
        if self.query_minidump_stackwalk():
            env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path
        env['MINIDUMP_SAVE_PATH'] = dirs['abs_blob_upload_dir']
        env['RUST_BACKTRACE'] = '1'
        env = self.query_env(partial_env=env)
        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        error_list=self.error_list,
                                        strict=False)
        return_code = self.run_command(
            command=cmd,
            cwd=self.awsy_path,
            output_timeout=self.config.get("cmd_timeout"),
            env=env,
            output_parser=parser)

        level = INFO
        tbpl_status, log_level, summary = parser.evaluate_parser(
            return_code=return_code)

        self.log("AWSY exited with return code %s: %s" %
                 (return_code, tbpl_status),
                 level=level)
        self.record_status(tbpl_status)
Ejemplo n.º 32
0
    def run_test(self, binary_path, env=None, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        # Import the harness to retrieve the location of the cli scripts
        import telemetry_harness

        cmd = [
            self.query_python_path(),
            os.path.join(os.path.dirname(telemetry_harness.__file__),
                         self.cli_script),
            "--binary",
            binary_path,
            "--address",
            "localhost:{}".format(marionette_port),
            # Resource files to serve via local webserver
            "--server-root",
            os.path.join(dirs["abs_telemetry_dir"], "harness", "www"),
            # Use the work dir to get temporary data stored
            "--workspace",
            dirs["abs_work_dir"],
            # logging options
            "--gecko-log=-",  # output from the gecko process redirected to stdout
            "--log-raw=-",  # structured log for output parser redirected to stdout
            # additional reports helpful for Jenkins and inpection via Treeherder
            "--log-html",
            os.path.join(dirs["abs_blob_upload_dir"], "report.html"),
            "--log-xunit",
            os.path.join(dirs["abs_blob_upload_dir"], "report.xml"),
            # Enable tracing output to log transmission protocol
            "-vv",
        ]

        if self.config["enable_webrender"]:
            cmd.extend(["--enable-webrender"])

        cmd.extend(
            ["--setpref={}".format(p) for p in self.config["extra_prefs"]])

        if not self.config["e10s"]:
            cmd.append("--disable-e10s")

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        strict=False)

        # Add the default tests to run
        tests = [
            os.path.join(dirs["abs_telemetry_dir"], "tests", test)
            for test in self.default_tests
        ]
        cmd.extend(tests)

        # Set further environment settings
        env = env or self.query_env()
        env.update({"MINIDUMP_SAVE_PATH": dirs["abs_blob_upload_dir"]})
        if self.query_minidump_stackwalk():
            env.update({"MINIDUMP_STACKWALK": self.minidump_stackwalk_path})
        env["RUST_BACKTRACE"] = "1"
        env["MOZ_IGNORE_NSS_SHUTDOWN_LEAKS"] = "1"

        # If code coverage is enabled, set GCOV_PREFIX env variable
        if self.config.get("code_coverage"):
            env["GCOV_PREFIX"] = self.gcov_dir

        return_code = self.run_command(
            cmd,
            cwd=dirs["abs_work_dir"],
            output_timeout=300,
            output_parser=parser,
            env=env,
        )

        tbpl_status, log_level, _ = parser.evaluate_parser(return_code)
        self.record_status(tbpl_status, level=log_level)

        return return_code
Ejemplo n.º 33
0
    def run_test(self, binary_path, env=None, marionette_port=2828):
        """All required steps for running the tests against an installer."""
        dirs = self.query_abs_dirs()

        # Import the harness to retrieve the location of the cli scripts
        import firefox_ui_harness

        cmd = [
            self.query_python_path(),
            os.path.join(os.path.dirname(firefox_ui_harness.__file__),
                         self.cli_script),
            '--binary', binary_path,
            '--address', 'localhost:{}'.format(marionette_port),

            # Resource files to serve via local webserver
            '--server-root', os.path.join(dirs['abs_fxui_dir'], 'resources'),

            # Use the work dir to get temporary data stored
            '--workspace', dirs['abs_work_dir'],

            # logging options
            '--gecko-log=-',  # output from the gecko process redirected to stdout
            '--log-raw=-',  # structured log for output parser redirected to stdout

            # additional reports helpful for Jenkins and inpection via Treeherder
            '--log-html', os.path.join(dirs['abs_blob_upload_dir'], 'report.html'),
            '--log-xunit', os.path.join(dirs['abs_blob_upload_dir'], 'report.xml'),

            # Enable tracing output to log transmission protocol
            '-vv',
        ]

        # Collect all pass-through harness options to the script
        cmd.extend(self.query_harness_args())

        # Translate deprecated --e10s flag
        if not self.config.get('e10s'):
            cmd.append('--disable-e10s')

        if self.symbols_url:
            cmd.extend(['--symbols-path', self.symbols_url])

        if self.config.get('tag'):
            cmd.extend(['--tag', self.config['tag']])

        parser = StructuredOutputParser(config=self.config,
                                        log_obj=self.log_obj,
                                        strict=False)

        # Add the default tests to run
        tests = [os.path.join(dirs['abs_fxui_dir'], 'tests', test) for test in self.default_tests]
        cmd.extend(tests)

        # Set further environment settings
        env = env or self.query_env()
        env.update({'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']})
        if self.query_minidump_stackwalk():
            env.update({'MINIDUMP_STACKWALK': self.minidump_stackwalk_path})
        env['RUST_BACKTRACE'] = '1'

        # If code coverage is enabled, set GCOV_PREFIX env variable
        if self.config.get('code_coverage'):
            env['GCOV_PREFIX'] = self.gcov_dir

        if self.config['allow_software_gl_layers']:
            env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'
        if self.config['enable_webrender']:
            env['MOZ_WEBRENDER'] = '1'

        return_code = self.run_command(cmd,
                                       cwd=dirs['abs_work_dir'],
                                       output_timeout=300,
                                       output_parser=parser,
                                       env=env)

        tbpl_status, log_level = parser.evaluate_parser(return_code)
        self.buildbot_status(tbpl_status, level=log_level)

        return return_code