Exemple #1
0
 def test_logcat_service_create_output_excerpts(self, clear_adb_mock,
                                                stop_proc_mock,
                                                start_proc_mock,
                                                FastbootProxy,
                                                MockAdbProxy):
     mock_serial = '1'
     ad = android_device.AndroidDevice(serial=mock_serial)
     logcat_service = logcat.Logcat(ad)
     logcat_service._start()
     # Generate logs before the file pointer is created.
     # This message will not be captured in the excerpt.
     NOT_IN_EXCERPT = 'Not in excerpt.\n'
     with open(logcat_service.adb_logcat_file_path, 'a') as f:
         f.write(NOT_IN_EXCERPT)
     # With the file pointer created, generate logs and make an excerpt.
     logcat_service._open_logcat_file()
     FILE_CONTENT = 'Some log.\n'
     with open(logcat_service.adb_logcat_file_path, 'a') as f:
         f.write(FILE_CONTENT)
     test_output_dir = os.path.join(self.tmp_dir, 'test_foo')
     mock_record = mock.MagicMock()
     mock_record.begin_time = 123
     test_run_info = runtime_test_info.RuntimeTestInfo(
         'test_foo', test_output_dir, mock_record)
     actual_path1 = logcat_service.create_output_excerpts(test_run_info)[0]
     expected_path1 = os.path.join(test_output_dir, 'test_foo-123',
                                   'logcat,1,fakemodel,test_foo-123.txt')
     self.assertEqual(actual_path1, expected_path1)
     self.assertTrue(os.path.exists(expected_path1))
     self.AssertFileContains(FILE_CONTENT, expected_path1)
     self.AssertFileDoesNotContain(NOT_IN_EXCERPT, expected_path1)
     # Generate some new logs and do another excerpt.
     FILE_CONTENT = 'Some more logs!!!\n'
     with open(logcat_service.adb_logcat_file_path, 'a') as f:
         f.write(FILE_CONTENT)
     test_output_dir = os.path.join(self.tmp_dir, 'test_bar')
     mock_record = mock.MagicMock()
     mock_record.begin_time = 456
     test_run_info = runtime_test_info.RuntimeTestInfo(
         'test_bar', test_output_dir, mock_record)
     actual_path2 = logcat_service.create_output_excerpts(test_run_info)[0]
     expected_path2 = os.path.join(test_output_dir, 'test_bar-456',
                                   'logcat,1,fakemodel,test_bar-456.txt')
     self.assertEqual(actual_path2, expected_path2)
     self.assertTrue(os.path.exists(expected_path2))
     self.AssertFileContains(FILE_CONTENT, expected_path2)
     self.AssertFileDoesNotContain(FILE_CONTENT, expected_path1)
     logcat_service.stop()
Exemple #2
0
 def _teardown_class(self):
     """Proxy function to guarantee the base implementation of
     teardown_class is called.
     """
     stage_name = STAGE_NAME_TEARDOWN_CLASS
     record = records.TestResultRecord(stage_name, self.TAG)
     record.test_begin()
     self.current_test_info = runtime_test_info.RuntimeTestInfo(
         stage_name, self.log_path, record)
     expects.recorder.reset_internal_states(record)
     try:
         with self._log_test_stage(stage_name):
             self.teardown_class()
     except signals.TestAbortAll as e:
         setattr(e, 'results', self.results)
         raise
     except Exception as e:
         logging.exception('Error encountered in %s.', stage_name)
         record.test_error(e)
         record.update_record()
         self.results.add_class_error(record)
         self.summary_writer.dump(record.to_dict(),
                                  records.TestSummaryEntryType.RECORD)
     else:
         if expects.recorder.has_error:
             record.update_record()
             self.results.add_class_error(record)
             self.summary_writer.dump(record.to_dict(),
                                      records.TestSummaryEntryType.RECORD)
     finally:
         self._clean_up()
Exemple #3
0
 def test_logcat_service_create_output_excerpts(self, clear_adb_mock,
                                                stop_proc_mock,
                                                start_proc_mock,
                                                FastbootProxy,
                                                MockAdbProxy):
     mock_serial = '1'
     ad = android_device.AndroidDevice(serial=mock_serial)
     logcat_service = logcat.Logcat(ad)
     logcat_service.start()
     FILE_CONTENT = 'Some log.\n'
     with open(logcat_service.adb_logcat_file_path, 'w') as f:
         f.write(FILE_CONTENT)
     test_output_dir = os.path.join(self.tmp_dir, 'test_foo')
     mock_record = mock.MagicMock()
     mock_record.begin_time = 123
     test_run_info = runtime_test_info.RuntimeTestInfo(
         'test_foo', test_output_dir, mock_record)
     actual_path1 = logcat_service.create_output_excerpts(test_run_info)[0]
     expected_path1 = os.path.join(test_output_dir, 'test_foo-123',
                                   'adblog,fakemodel,1.txt')
     self.assertTrue(os.path.exists(expected_path1))
     self.assertEqual(actual_path1, expected_path1)
     self.AssertFileContains(FILE_CONTENT, expected_path1)
     self.assertFalse(os.path.exists(logcat_service.adb_logcat_file_path))
     # Generate some new logs and do another excerpt.
     FILE_CONTENT = 'Some more logs!!!\n'
     with open(logcat_service.adb_logcat_file_path, 'w') as f:
         f.write(FILE_CONTENT)
     test_output_dir = os.path.join(self.tmp_dir, 'test_bar')
     mock_record = mock.MagicMock()
     mock_record.begin_time = 456
     test_run_info = runtime_test_info.RuntimeTestInfo(
         'test_bar', test_output_dir, mock_record)
     actual_path2 = logcat_service.create_output_excerpts(test_run_info)[0]
     expected_path2 = os.path.join(test_output_dir, 'test_bar-456',
                                   'adblog,fakemodel,1.txt')
     self.assertTrue(os.path.exists(expected_path2))
     self.assertEqual(actual_path2, expected_path2)
     self.AssertFileContains(FILE_CONTENT, expected_path2)
     self.AssertFileDoesNotContain(FILE_CONTENT, expected_path1)
     self.assertFalse(os.path.exists(logcat_service.adb_logcat_file_path))
Exemple #4
0
 def _clean_up(self):
     """The final stage of a test class execution."""
     stage_name = STAGE_NAME_CLEAN_UP
     record = records.TestResultRecord(stage_name, self.TAG)
     record.test_begin()
     self.current_test_info = runtime_test_info.RuntimeTestInfo(
         stage_name, self.log_path, record)
     expects.recorder.reset_internal_states(record)
     with self._log_test_stage(stage_name):
         # Write controller info and summary to summary file.
         self._record_controller_info()
         self._controller_manager.unregister_controllers()
         if expects.recorder.has_error:
             record.test_error()
             record.update_record()
             self.results.add_class_error(record)
             self.summary_writer.dump(record.to_dict(),
                                      records.TestSummaryEntryType.RECORD)
Exemple #5
0
    def _setup_class(self):
        """Proxy function to guarantee the base implementation of setup_class
        is called.

        Returns:
            If `self.results` is returned instead of None, this means something
            has gone wrong, and the rest of the test class should not execute.
        """
        # Setup for the class.
        class_record = records.TestResultRecord(STAGE_NAME_SETUP_CLASS,
                                                self.TAG)
        class_record.test_begin()
        self.current_test_info = runtime_test_info.RuntimeTestInfo(
            STAGE_NAME_SETUP_CLASS, self.log_path, class_record)
        expects.recorder.reset_internal_states(class_record)
        try:
            with self._log_test_stage(STAGE_NAME_SETUP_CLASS):
                self.setup_class()
        except signals.TestAbortSignal:
            # Throw abort signals to outer try block for handling.
            raise
        except Exception as e:
            # Setup class failed for unknown reasons.
            # Fail the class and skip all tests.
            logging.exception('Error in %s#setup_class.', self.TAG)
            class_record.test_error(e)
            self.results.add_class_error(class_record)
            self._exec_procedure_func(self._on_fail, class_record)
            class_record.update_record()
            self.summary_writer.dump(class_record.to_dict(),
                                     records.TestSummaryEntryType.RECORD)
            self._skip_remaining_tests(e)
            return self.results
        if expects.recorder.has_error:
            self._exec_procedure_func(self._on_fail, class_record)
            class_record.test_error()
            class_record.update_record()
            self.summary_writer.dump(class_record.to_dict(),
                                     records.TestSummaryEntryType.RECORD)
            self.results.add_class_error(class_record)
            self._skip_remaining_tests(
                class_record.termination_signal.exception)
            return self.results
Exemple #6
0
 def _write_logcat_file_and_assert_excerpts_exists(
         logcat_file_content, test_begin_time, test_name):
     with open(logcat_service.adb_logcat_file_path, 'a') as f:
         f.write(logcat_file_content)
     test_output_dir = os.path.join(self.tmp_dir, test_name)
     mock_record = mock.MagicMock()
     mock_record.begin_time = test_begin_time
     test_run_info = runtime_test_info.RuntimeTestInfo(
         test_name, test_output_dir, mock_record)
     actual_path = logcat_service.create_output_excerpts(
         test_run_info)[0]
     expected_path = os.path.join(
         test_output_dir, '{test_name}-{test_begin_time}'.format(
             test_name=test_name, test_begin_time=test_begin_time),
         'logcat,{mock_serial},fakemodel,{test_name}-{test_begin_time}.txt'
         .format(mock_serial=mock_serial,
                 test_name=test_name,
                 test_begin_time=test_begin_time))
     self.assertEqual(actual_path, expected_path)
     self.assertTrue(os.path.exists(expected_path))
     return expected_path
Exemple #7
0
    def _setup_generated_tests(self):
        """Proxy function to guarantee the base implementation of
        setup_generated_tests is called.

        Returns:
            True if setup is successful, False otherwise.
        """
        stage_name = STAGE_NAME_SETUP_GENERATED_TESTS
        record = records.TestResultRecord(stage_name, self.TAG)
        record.test_begin()
        self.current_test_info = runtime_test_info.RuntimeTestInfo(
            stage_name, self.log_path, record)
        try:
            with self._log_test_stage(stage_name):
                self.setup_generated_tests()
                return True
        except Exception as e:
            logging.exception('%s failed for %s.', stage_name, self.TAG)
            record.test_error(e)
            self.results.add_class_error(record)
            self.summary_writer.dump(record.to_dict(),
                                     records.TestSummaryEntryType.RECORD)
            return False
Exemple #8
0
    def exec_one_test(self, test_name, test_method):
        """Executes one test and update test results.

        Executes setup_test, the test method, and teardown_test; then creates a
        records.TestResultRecord object with the execution information and adds
        the record to the test class's test results.

        Args:
            test_name: string, Name of the test.
            test_method: function, The test method to execute.
        """
        tr_record = records.TestResultRecord(test_name, self.TAG)
        tr_record.uid = getattr(test_method, 'uid', None)
        tr_record.test_begin()
        self.current_test_info = runtime_test_info.RuntimeTestInfo(
            test_name, self.log_path, tr_record)
        expects.recorder.reset_internal_states(tr_record)
        logging.info('%s %s', TEST_CASE_TOKEN, test_name)
        # Did teardown_test throw an error.
        teardown_test_failed = False
        try:
            try:
                try:
                    self._setup_test(test_name)
                except signals.TestFailure as e:
                    raise_with_traceback(signals.TestError(
                        e.details, e.extras))
                test_method()
            except (signals.TestPass, signals.TestAbortSignal):
                raise
            except Exception:
                logging.exception('Exception occurred in %s.',
                                  self.current_test_name)
                raise
            finally:
                before_count = expects.recorder.error_count
                try:
                    self._teardown_test(test_name)
                except signals.TestAbortSignal:
                    raise
                except Exception as e:
                    logging.exception(e)
                    tr_record.test_error()
                    tr_record.add_error(STAGE_NAME_TEARDOWN_TEST, e)
                    teardown_test_failed = True
                else:
                    # Check if anything failed by `expects`.
                    if before_count < expects.recorder.error_count:
                        teardown_test_failed = True
        except (signals.TestFailure, AssertionError) as e:
            tr_record.test_fail(e)
        except signals.TestSkip as e:
            # Test skipped.
            tr_record.test_skip(e)
        except signals.TestAbortSignal as e:
            # Abort signals, pass along.
            tr_record.test_fail(e)
            raise
        except signals.TestPass as e:
            # Explicit test pass.
            tr_record.test_pass(e)
        except Exception as e:
            # Exception happened during test.
            tr_record.test_error(e)
        else:
            # No exception is thrown from test and teardown, if `expects` has
            # error, the test should fail with the first error in `expects`.
            if expects.recorder.has_error and not teardown_test_failed:
                tr_record.test_fail()
            # Otherwise the test passed.
            elif not teardown_test_failed:
                tr_record.test_pass()
        finally:
            tr_record.update_record()
            try:
                if tr_record.result in (
                        records.TestResultEnums.TEST_RESULT_ERROR,
                        records.TestResultEnums.TEST_RESULT_FAIL):
                    self._exec_procedure_func(self._on_fail, tr_record)
                elif tr_record.result == records.TestResultEnums.TEST_RESULT_PASS:
                    self._exec_procedure_func(self._on_pass, tr_record)
                elif tr_record.result == records.TestResultEnums.TEST_RESULT_SKIP:
                    self._exec_procedure_func(self._on_skip, tr_record)
            finally:
                logging.info(RESULT_LINE_TEMPLATE, tr_record.test_name,
                             tr_record.result)
                self.results.add_record(tr_record)
                self.summary_writer.dump(tr_record.to_dict(),
                                         records.TestSummaryEntryType.RECORD)
                self.current_test_info = None
                self.current_test_name = None
Exemple #9
0
    def exec_one_test(self, test_name, test_method, record=None):
        """Executes one test and update test results.

    Executes setup_test, the test method, and teardown_test; then creates a
    records.TestResultRecord object with the execution information and adds
    the record to the test class's test results.

    Args:
      test_name: string, Name of the test.
      test_method: function, The test method to execute.
      record: records.TestResultRecord, optional arg for injecting a record
        object to use for this test execution. If not set, a new one is created
        created. This is meant for passing information between consecutive test
        case execution for retry purposes. Do NOT abuse this for "magical"
        features.

    Returns:
      TestResultRecord, the test result record object of the test execution.
      This object is strictly for read-only purposes. Modifying this record
      will not change what is reported in the test run's summary yaml file.
    """
        tr_record = record or records.TestResultRecord(test_name, self.TAG)
        tr_record.uid = getattr(test_method, 'uid', None)
        tr_record.test_begin()
        self.current_test_info = runtime_test_info.RuntimeTestInfo(
            test_name, self.log_path, tr_record)
        expects.recorder.reset_internal_states(tr_record)
        logging.info('%s %s', TEST_CASE_TOKEN, test_name)
        # Did teardown_test throw an error.
        teardown_test_failed = False
        try:
            try:
                try:
                    self._setup_test(test_name)
                except signals.TestFailure as e:
                    _, _, traceback = sys.exc_info()
                    raise signals.TestError(e.details,
                                            e.extras).with_traceback(traceback)
                test_method()
            except (signals.TestPass, signals.TestAbortSignal):
                raise
            except Exception:
                logging.exception('Exception occurred in %s.',
                                  self.current_test_info.name)
                raise
            finally:
                before_count = expects.recorder.error_count
                try:
                    self._teardown_test(test_name)
                except signals.TestAbortSignal:
                    raise
                except Exception as e:
                    logging.exception('Exception occurred in %s of %s.',
                                      STAGE_NAME_TEARDOWN_TEST,
                                      self.current_test_info.name)
                    tr_record.test_error()
                    tr_record.add_error(STAGE_NAME_TEARDOWN_TEST, e)
                    teardown_test_failed = True
                else:
                    # Check if anything failed by `expects`.
                    if before_count < expects.recorder.error_count:
                        teardown_test_failed = True
        except (signals.TestFailure, AssertionError) as e:
            tr_record.test_fail(e)
        except signals.TestSkip as e:
            # Test skipped.
            tr_record.test_skip(e)
        except signals.TestAbortSignal as e:
            # Abort signals, pass along.
            tr_record.test_fail(e)
            raise
        except signals.TestPass as e:
            # Explicit test pass.
            tr_record.test_pass(e)
        except Exception as e:
            # Exception happened during test.
            tr_record.test_error(e)
        else:
            # No exception is thrown from test and teardown, if `expects` has
            # error, the test should fail with the first error in `expects`.
            if expects.recorder.has_error and not teardown_test_failed:
                tr_record.test_fail()
            # Otherwise the test passed.
            elif not teardown_test_failed:
                tr_record.test_pass()
        finally:
            tr_record.update_record()
            try:
                if tr_record.result in (
                        records.TestResultEnums.TEST_RESULT_ERROR,
                        records.TestResultEnums.TEST_RESULT_FAIL):
                    self._exec_procedure_func(self._on_fail, tr_record)
                elif tr_record.result == records.TestResultEnums.TEST_RESULT_PASS:
                    self._exec_procedure_func(self._on_pass, tr_record)
                elif tr_record.result == records.TestResultEnums.TEST_RESULT_SKIP:
                    self._exec_procedure_func(self._on_skip, tr_record)
            finally:
                logging.info(RESULT_LINE_TEMPLATE, tr_record.test_name,
                             tr_record.result)
                self.results.add_record(tr_record)
                self.summary_writer.dump(tr_record.to_dict(),
                                         records.TestSummaryEntryType.RECORD)
                self.current_test_info = None
        return tr_record
Exemple #10
0
    def exec_one_test(self, test_name, test_method, args=(), **kwargs):
        """Executes one test and update test results.

        Executes setup_test, the test method, and teardown_test; then creates a
        records.TestResultRecord object with the execution information and adds
        the record to the test class's test results.

        Args:
            test_name: Name of the test.
            test_method: The test method.
            args: A tuple of params.
            kwargs: Extra kwargs.
        """
        tr_record = records.TestResultRecord(test_name, self.TAG)
        tr_record.test_begin()
        self.current_test_info = runtime_test_info.RuntimeTestInfo(
            test_name, self.log_path, tr_record)
        logging.info('%s %s', TEST_CASE_TOKEN, test_name)
        teardown_test_failed = False
        try:
            try:
                try:
                    self._setup_test(test_name)
                except signals.TestFailure as e:
                    new_e = signals.TestError(e.details, e.extras)
                    _, _, new_e.__traceback__ = sys.exc_info()
                    raise new_e
                if args or kwargs:
                    test_method(*args, **kwargs)
                else:
                    test_method()
            except signals.TestPass:
                raise
            except Exception:
                logging.exception('Exception occurred in %s.',
                                  self.current_test_name)
                raise
            finally:
                try:
                    self._teardown_test(test_name)
                except signals.TestAbortSignal:
                    raise
                except Exception as e:
                    logging.exception(e)
                    tr_record.add_error('teardown_test', e)
                    teardown_test_failed = True
        except (signals.TestFailure, AssertionError) as e:
            tr_record.test_fail(e)
        except signals.TestSkip as e:
            # Test skipped.
            tr_record.test_skip(e)
        except signals.TestAbortSignal as e:
            # Abort signals, pass along.
            tr_record.test_fail(e)
            raise e
        except signals.TestPass as e:
            # Explicit test pass.
            tr_record.test_pass(e)
        except Exception as e:
            # Exception happened during test.
            tr_record.test_error(e)
        else:
            if not teardown_test_failed:
                tr_record.test_pass()
        finally:
            tr_record.update_record()
            try:
                if tr_record.result in (
                        records.TestResultEnums.TEST_RESULT_ERROR,
                        records.TestResultEnums.TEST_RESULT_FAIL):
                    self._exec_procedure_func(self._on_fail, tr_record)
                elif tr_record.result == records.TestResultEnums.TEST_RESULT_PASS:
                    self._exec_procedure_func(self._on_pass, tr_record)
                elif tr_record.result == records.TestResultEnums.TEST_RESULT_SKIP:
                    self._exec_procedure_func(self._on_skip, tr_record)
            finally:
                self.results.add_record(tr_record)
                self.summary_writer.dump(tr_record.to_dict(),
                                         records.TestSummaryEntryType.RECORD)
                self.current_test_info = None
                self.current_test_name = None
Exemple #11
0
    def run(self, test_names=None):
        """Runs tests within a test class.

        One of these test method lists will be executed, shown here in priority
        order:

        1. The test_names list, which is passed from cmd line. Invalid names
           are guarded by cmd line arg parsing.
        2. The self.tests list defined in test class. Invalid names are
           ignored.
        3. All function that matches test method naming convention in the test
           class.

        Args:
            test_names: A list of string that are test method names requested in
                cmd line.

        Returns:
            The test results object of this class.
        """
        # Executes pre-setup procedures, like generating test methods.
        if not self._setup_generated_tests():
            return self.results
        logging.info('==========> %s <==========', self.TAG)
        # Devise the actual test methods to run in the test class.
        if not test_names:
            if self.tests:
                # Specified by run list in class.
                test_names = list(self.tests)
            else:
                # No test method specified by user, execute all in test class.
                test_names = self.get_existing_test_names()
        self.results.requested = test_names
        self.summary_writer.dump(self.results.requested_test_names_dict(),
                                 records.TestSummaryEntryType.TEST_NAME_LIST)
        tests = self._get_test_methods(test_names)
        try:
            # Setup for the class.
            class_record = records.TestResultRecord(STAGE_NAME_SETUP_CLASS,
                                                    self.TAG)
            class_record.test_begin()
            self.current_test_info = runtime_test_info.RuntimeTestInfo(
                STAGE_NAME_SETUP_CLASS, self.log_path, class_record)
            try:
                self._setup_class()
            except signals.TestAbortSignal:
                # Throw abort signals to outer try block for handling.
                raise
            except Exception as e:
                # Setup class failed for unknown reasons.
                # Fail the class and skip all tests.
                logging.exception('Error in %s#setup_class.', self.TAG)
                class_record.test_error(e)
                self.results.add_class_error(class_record)
                self.summary_writer.dump(class_record.to_dict(),
                                         records.TestSummaryEntryType.RECORD)
                self._exec_procedure_func(self._on_fail, class_record)
                self._skip_remaining_tests(e)
                return self.results
            # Run tests in order.
            for test_name, test_method in tests:
                self.exec_one_test(test_name, test_method)
            return self.results
        except signals.TestAbortClass as e:
            e.details = 'Test class aborted due to: %s' % e.details
            self._skip_remaining_tests(e)
            return self.results
        except signals.TestAbortAll as e:
            e.details = 'All remaining tests aborted due to: %s' % e.details
            self._skip_remaining_tests(e)
            # Piggy-back test results on this exception object so we don't lose
            # results from this test class.
            setattr(e, 'results', self.results)
            raise e
        finally:
            self._teardown_class()
            self._unregister_controllers()
            logging.info('Summary for test class %s: %s', self.TAG,
                         self.results.summary_str())