def run_tests(self, test_labels, extra_tests=None, full_suite=False, **kwargs): # type: (List[str], Optional[List[TestCase]], bool, **Any) -> bool self.setup_test_environment() try: suite = self.build_suite(test_labels, extra_tests) except AttributeError: traceback.print_exc() print() print( " This is often caused by a test module/class/function that doesn't exist or " ) print( " import properly. You can usually debug in a `manage.py shell` via e.g. " ) print(" import zerver.tests.test_messages") print( " from zerver.tests.test_messages import StreamMessagesTest" ) print(" StreamMessagesTest.test_message_to_stream") print() sys.exit(1) # We have to do the next line to avoid flaky scenarios where we # run a single test and getting an SA connection causes data from # a Django connection to be rolled back mid-test. get_sqlalchemy_connection() result = self.run_suite(suite) self.teardown_test_environment() failed = result.failed if not failed: write_instrumentation_reports(full_suite=full_suite) return failed
def run_tests(self, test_labels, extra_tests=None, full_suite=False, **kwargs): # type: (List[str], Optional[List[TestCase]], bool, **Any) -> bool self.setup_test_environment() try: suite = self.build_suite(test_labels, extra_tests) except AttributeError: traceback.print_exc() print() print(" This is often caused by a test module/class/function that doesn't exist or ") print(" import properly. You can usually debug in a `manage.py shell` via e.g. ") print(" import zerver.tests.test_messages") print(" from zerver.tests.test_messages import StreamMessagesTest") print(" StreamMessagesTest.test_message_to_stream") print() sys.exit(1) # We have to do the next line to avoid flaky scenarios where we # run a single test and getting an SA connection causes data from # a Django connection to be rolled back mid-test. get_sqlalchemy_connection() result = self.run_suite(suite) self.teardown_test_environment() failed = self.suite_result(suite, result) if not failed: write_instrumentation_reports(full_suite=full_suite) return failed
def run_tests( self, test_labels: List[str], extra_tests: Optional[List[TestCase]] = None, full_suite: bool = False, include_webhooks: bool = False, **kwargs: Any, ) -> Tuple[bool, List[str]]: self.setup_test_environment() try: suite = self.build_suite(test_labels, extra_tests) except AttributeError: # We are likely to get here only when running tests in serial # mode on Python 3.4 or lower. # test_labels are always normalized to include the correct prefix. # If we run the command with ./tools/test-backend test_alert_words, # test_labels will be equal to ['zerver.tests.test_alert_words']. for test_label in test_labels: check_import_error(test_label) # I think we won't reach this line under normal circumstances, but # for some unforeseen scenario in which the AttributeError was not # caused by an import error, let's re-raise the exception for # debugging purposes. raise self.test_imports(test_labels, suite) if self.parallel == 1: # We are running in serial mode so create the databases here. # For parallel mode, the databases are created in init_worker. # We don't want to create and destroy DB in setup_test_environment # because it will be called for both serial and parallel modes. # However, at this point we know in which mode we would be running # since that decision has already been made in build_suite(). # # We pass a _worker_id, which in this code path is always 0 destroy_test_databases(_worker_id) create_test_databases(_worker_id) # We have to do the next line to avoid flaky scenarios where we # run a single test and getting an SA connection causes data from # a Django connection to be rolled back mid-test. get_sqlalchemy_connection() result = self.run_suite(suite) self.teardown_test_environment() failed = self.suite_result(suite, result) if not failed: write_instrumentation_reports(full_suite=full_suite, include_webhooks=include_webhooks) return failed, result.failed_tests
def run_tests(self, test_labels, extra_tests=None, full_suite=False, **kwargs): # type: (List[str], Optional[List[TestCase]], bool, **Any) -> Tuple[bool, List[str]] self.setup_test_environment() try: suite = self.build_suite(test_labels, extra_tests) except AttributeError: traceback.print_exc() print() print( " This is often caused by a test module/class/function that doesn't exist or " ) print( " import properly. You can usually debug in a `manage.py shell` via e.g. " ) print(" import zerver.tests.test_messages") print( " from zerver.tests.test_messages import StreamMessagesTest" ) print(" StreamMessagesTest.test_message_to_stream") print() sys.exit(1) if self.parallel == 1: # We are running in serial mode so create the databases here. # For parallel mode, the databases are created in init_worker. # We don't want to create and destroy DB in setup_test_environment # because it will be called for both serial and parallel modes. # However, at this point we know in which mode we would be running # since that decision has already been made in build_suite(). destroy_test_databases(self.database_id) create_test_databases(self.database_id) # We have to do the next line to avoid flaky scenarios where we # run a single test and getting an SA connection causes data from # a Django connection to be rolled back mid-test. get_sqlalchemy_connection() result = self.run_suite(suite) self.teardown_test_environment() failed = self.suite_result(suite, result) if not failed: write_instrumentation_reports(full_suite=full_suite) return failed, result.failed_tests
def run_tests(self, test_labels: List[str], extra_tests: Optional[List[TestCase]]=None, full_suite: bool=False, include_webhooks: bool=False, **kwargs: Any) -> Tuple[bool, List[str]]: self.setup_test_environment() try: suite = self.build_suite(test_labels, extra_tests) except AttributeError: # We are likely to get here only when running tests in serial # mode on Python 3.4 or lower. # test_labels are always normalized to include the correct prefix. # If we run the command with ./tools/test-backend test_alert_words, # test_labels will be equal to ['zerver.tests.test_alert_words']. for test_label in test_labels: check_import_error(test_label) # I think we won't reach this line under normal circumstances, but # for some unforeseen scenario in which the AttributeError was not # caused by an import error, let's re-raise the exception for # debugging purposes. raise self.test_imports(test_labels, suite) if self.parallel == 1: # We are running in serial mode so create the databases here. # For parallel mode, the databases are created in init_worker. # We don't want to create and destroy DB in setup_test_environment # because it will be called for both serial and parallel modes. # However, at this point we know in which mode we would be running # since that decision has already been made in build_suite(). destroy_test_databases(self.database_id) create_test_databases(self.database_id) # We have to do the next line to avoid flaky scenarios where we # run a single test and getting an SA connection causes data from # a Django connection to be rolled back mid-test. get_sqlalchemy_connection() result = self.run_suite(suite) self.teardown_test_environment() failed = self.suite_result(suite, result) if not failed: write_instrumentation_reports(full_suite=full_suite, include_webhooks=include_webhooks) return failed, result.failed_tests
def run_tests(self, test_labels, extra_tests=None, full_suite=False, **kwargs): # type: (List[str], Optional[List[TestCase]], bool, **Any) -> Tuple[bool, List[str]] self.setup_test_environment() try: suite = self.build_suite(test_labels, extra_tests) except AttributeError: traceback.print_exc() print() print(" This is often caused by a test module/class/function that doesn't exist or ") print(" import properly. You can usually debug in a `manage.py shell` via e.g. ") print(" import zerver.tests.test_messages") print(" from zerver.tests.test_messages import StreamMessagesTest") print(" StreamMessagesTest.test_message_to_stream") print() sys.exit(1) if self.parallel == 1: # We are running in serial mode so create the databases here. # For parallel mode, the databases are created in init_worker. # We don't want to create and destroy DB in setup_test_environment # because it will be called for both serial and parallel modes. # However, at this point we know in which mode we would be running # since that decision has already been made in build_suite(). destroy_test_databases(self.database_id) create_test_databases(self.database_id) # We have to do the next line to avoid flaky scenarios where we # run a single test and getting an SA connection causes data from # a Django connection to be rolled back mid-test. get_sqlalchemy_connection() result = self.run_suite(suite) self.teardown_test_environment() failed = self.suite_result(suite, result) if not failed: write_instrumentation_reports(full_suite=full_suite) return failed, result.failed_tests