Exemplo n.º 1
0
    TestRunner.configure_logger(args.output_directory,
                                level=getattr(L, context_dict['log_level']))

    test_configs = []
    for test_config_file in args.test_configs:
        test_configs += object_from_command_line(test_config_file, 'TESTS')

    test_devices = []
    for test_device_file in args.test_devices:
        test_devices += object_from_command_line(test_device_file, 'DEVICES')

    if args.test_regex:
        test_configs = filter_tests(test_configs, args.test_regex)

    # This call shouldn't throw. If it does then we need to tweak the TestRunner.
    if args.v2:
        L.warning(
            "Argument -2 is DEPRECATED and will be ignored. Test runner v2 is the default now"
        )
    return TestRunner(TestRunContext(context_dict), test_devices,
                      test_configs).run()


if __name__ == "__main__":
    # pylint: disable=broad-except
    try:
        sys.exit(main())
    except Exception as ex:
        L.error("Unrecoverable error: {}\n".format(exception_to_string(ex)))
        sys.exit(-1)
    def run_one(self, test_run):
        L.info("Running test: {}".format(test_run.name()))
        L.debug("Test Config:\n{}".format(
            json.dumps(test_run.config(), indent=2)))

        test_run.set_ran()

        steps = [
            TestRunStep(up=self._prepare_output_directory,
                        up_desc="Prepare output directory"),
            TestRunStep(up=self._prepare_logger,
                        up_desc="Prepare logging",
                        down=self._restore_logger,
                        down_desc="Restore Logging"),
            TestRunStep(up=self._prepare_devices,
                        up_desc="Prepare devices",
                        down=self._release_devices,
                        down_desc="Release Devices"),
            TestRunStep(up=self._prepare_test_class,
                        up_desc="Create test class"),
            TestRunStep(up=self._setup_test,
                        down=self._teardown_test,
                        up_desc="Run test setup",
                        down_desc="Run test teardown"),
            TestRunStep(up=self._report_info, up_desc="Report info for test"),
            TestRunStep(up=self._execute_test, up_desc="Execute the test"),
        ]

        def exception_string(ex):
            '''KeyboardInterrupt gives no msg so let's make it clear that this is what happened'''
            if isinstance(ex, KeyboardInterrupt):
                return "Keyboard interrupt"
            return str(ex)

        last_step_index = 0
        for last_step_index, step in enumerate(steps):
            if not step.up:
                continue
            try:
                L.info("Executing test step: {}".format(step.up_desc))
                step.up(test_run)
            except BaseException as ex:
                L.error(
                    "Exception occurred during test step '{}'\nEXCEPTION: {}".
                    format(step.up_desc, exception_string(ex)))
                test_run.register_exception(step.up_desc, ex)
                break

        for step_index in reversed(range(0, last_step_index + 1)):
            step = steps[step_index]
            if not step.down:
                continue
            try:
                L.info("Executing test step: {}".format(step.down_desc))
                step.down(test_run)
            except BaseException as ex:
                L.error(
                    "Exception occurred during teardown in test step '{}'\nEXCEPTION: {}"
                    .format(step.down_desc, exception_string(ex)))
                test_run.register_exception(step.down_desc, ex)
                # Keep running teardown steps even on failure

        L.debug("Forcing garbage collection to clean up after test run")
        gc.collect()
    def run(self):
        if self._context['allow_manual']:
            xv_leak_tools.manual_input.allow_manual_input()

        for itest, test_run in enumerate(self._test_runs):
            try:
                L.info("\n\n {} Test {} / {} {}\n".format(
                    STARS, itest + 1, len(self._test_runs), STARS))

                self.run_one(test_run)

                if test_run.passed():
                    L.info("Test {} passed".format(test_run.name()))
                    continue

                if test_run.user_interrupt():
                    L.error("Abandoning tests due to keyboard interrupt!")
                    break

                exceptions = test_run.exceptions()
                first_exception = exceptions[0]
                # LeakTestFail is different from other exceptions. A LeakTestFail means that the
                # test failed an expectation (assertion). Any other type of exception means
                # something happened we weren't expecting.
                if test_run.failed():
                    L.error(
                        "Test {} failed at step '{}' due to assertion:\n{}".
                        format(test_run.name(), first_exception[0],
                               exception_to_string(first_exception[1])))
                else:
                    L.error(
                        "Test {} errored at step '{}' due to exception:\n{}".
                        format(test_run.name(), first_exception[0],
                               exception_to_string(first_exception[1])))

                if len(exceptions) > 1:
                    L.warning("Subsequent errors occurred in the test run!")
                    for failure in exceptions[1:]:
                        L.warning("Test step '{}'' errored with:\n{}".format(
                            failure[0], exception_to_string(failure[1])))

                if not self._context['stop_on_fail']:
                    continue

                L.error('Abandoning further tests due to test failure')
                break
            except BaseException as ex:
                L.error(
                    "FATAL ERROR: Unexpected error thrown from test suite. Test run will be "
                    "aborted: {}".format(exception_to_string(ex)))

        num_failures = self._summarise_run(self._test_runs)

        self._device_discovery.cleanup()

        # Do this just in case we ever end up running again without restarting the interpretor
        xv_leak_tools.manual_input.disallow_manual_input()

        # Need to terminate logging in order to close log files and thus chown them to non-root.
        L.terminate()
        self._ensure_files_not_owned_by_root()
        return num_failures
    def test(self):
        L.info("Running test {} completely on remote device".format(
            self.remote_test_config['name']))

        remote_config_path = os.path.join(self.test_device.temp_directory(),
                                          'test_config.py')

        self.connector_helper.write_remote_file_from_contents(
            remote_config_path, json.dumps([self.remote_test_config]))

        cmd = [
            'run_tests.py',
            self.test_device.output_directory(), '-c', remote_config_path
        ]

        # TODO: Use .requires_root to figure out if the test needs root.
        ret, stdout, stderr = self.connector_helper.execute_python(cmd,
                                                                   root=True)

        # TODO: Reconsider all this. Maybe we shouldn't display the output here as it screws up the
        # logs. Possibly better is to have ERROR log go to stderr then we just dump stderr here
        if stdout:
            # TODO: Make the logger send error and other to correct out file
            L.info('*' * 80)
            L.info('BEGIN stdout from remote machine:')
            L.info('*' * 80)
            for line in stdout.splitlines():
                # TODO: Logger not satisfying my needs!
                print(line)
            L.info('*' * 80)
            L.info('END stdout from remote machine:')
            L.info('*' * 80)

        if stderr:
            L.error('*' * 80)
            L.error('BEGIN stderr from remote machine:')
            L.error('*' * 80)
            for line in stderr.splitlines():
                # TODO: Logger not satisfying my needs!
                print(line)
            L.error('*' * 80)
            L.error('END stderr from remote machine:')
            L.error('*' * 80)

        if ret == 0:
            L.info('Remote test execution succeeded')
            return

        L.error('Remote test execution failed')
        self.failTest('Remote test run failed!')