def send_start_campaign_info(self, testRequestId, test_suite_name, email): """ Send start test suite message on TSMB bus :param testRequestId: test suite execution unique identifier :param test_suite_name: test suite name :param email: user email """ if self.is_reporting_enabled: # Store userId that initiate the test suite execution self._userId = email # Initialize test suite report message self._init_test_suite_report_message(testRequestId, test_suite_name, "unknown") if self.__test_suite_report_msg: # Test suite execution is starting self.__test_suite_report_msg.status = tsmb.TEST_VERDICT.RUNNING # Put test suite starting execution time self.__test_suite_report_msg.startTime = utctime_iso8601() # Send test suite report message self.__logger and self.__logger.info( "Start test suite uuid:{0})".format( str(self.__test_suite_report_msg.testRequestId))) self.__send_message(self.__test_suite_report_msg) else: self.__logger and self.__logger.error( "Start test suite uuid:{0} " "but no test suite report message created)".format( str(testRequestId)))
def stop_campaign(self, status, verdict, campaign_info=None): """ Send stop campaign event to the server :param status: campaign status :type status: str :param verdict: campaign verdict :type verdict: str :param campaign_info: different campaign infos : (execution_rate, pass_rate, fail_rate and some campaign statistics) :type campaign_info: dict """ if not self._campaign_id: self.__logger and self.__logger.warning( "Stop campaign info message not sent!" "Campaign ID does not exist!") return None # compute test case stop time and duration stop_time = utctime_iso8601() exec_time = stop_time - self._campaign_start_time status = DataModel.STATUS[status] if status != DataModel.STATUS.CANCELLED: if verdict == Global.BLOCKED: verdict = DataModel.VERDICT.BLOCKED elif verdict == Global.FAILURE: verdict = DataModel.VERDICT.FAILED elif verdict == Global.SUCCESS: verdict = DataModel.VERDICT.PASSED else: verdict = DataModel.VERDICT.NA else: verdict = DataModel.VERDICT.NA campaign_info = campaign_info or {} payload = { "status": status, "verdict": verdict, "stopTime": str(stop_time.format(ISO8601_TIME_FORMAT)), "duration": exec_time.total_seconds() * 1000, "results": { "executionRate": campaign_info.get("execution_rate", ""), "passRate": campaign_info.get("pass_rate", ""), "failRate": campaign_info.get("fail_rate", ""), "blockedRate": campaign_info.get("blocked_rate", ""), "validRate": campaign_info.get("valid_rate", ""), "invalidRate": campaign_info.get("invalid_rate", ""), "inconclusiveRate": campaign_info.get("inconclusive_rate", ""), self._test_framework: campaign_info.get("stats", "") } } header = {'requestId': self._campaign_id} header.update(self._header) return {'header': header, 'payload': payload}
def send_stop_tc_info(self, verdict, comment=""): """ Send stop test case message on TSMB bus :param verdict: test case verdict :param comment: test case execution comments """ # Check that tsmb reporting is turned on and test suite report message has already been sent if self.is_reporting_enabled: if self.__test_suite_report_msg: if self.__test_case_report_msg: # Set test case execution information # ------------------------------------- # compute test case verdict for test suite reporting system if verdict in TSMBLiveReporting.acsTcVerdictToTsmbTcVerdictMapping: tsmb_verdict = TSMBLiveReporting.acsTcVerdictToTsmbTcVerdictMapping[ verdict] else: tsmb_verdict = tsmb.TEST_VERDICT.NA # Set test case verdict self.__test_case_report_msg.verdict = tsmb_verdict # Set comment associated to test case execution self.__test_case_report_msg.comment = comment # Put test case starting execution time self.__test_case_report_msg.stopTime = utctime_iso8601() # Send test case report message self.__logger and self.__logger.info( "Stop test case (name={0}, order={1}, verdict={2} )". format(str(self.__test_case_report_msg.name), str(self.__test_case_report_msg.sequenceNumber), str(self.__test_case_report_msg.verdict))) self.__send_message(self.__test_case_report_msg) else: self.__logger and self.__logger.error( "Stop test case, but no test case starting procedure done" ) else: self.__logger and self.__logger.error( "Stop test case, " "but no test suite starting message previously sent)")
def start_campaign(self, campaign_uuid, campaign_name, email): """ Send start campaign event to the server :param campaign_uuid: campaign execution instance unique identifier :type campaign_uuid: str :param campaign_name: campaign name :type campaign_name: str :param email: user email :type email: str """ if self._campaign_id: self.__logger and self.__logger.warning( "Start campaign info message not sent!" "Campaign ID does not exist!") return None self._campaign_id = campaign_uuid # Create the campaign # Set it to "Running" self._campaign_start_time = utctime_iso8601() campaign_start_time = str( self._campaign_start_time.format(ISO8601_TIME_FORMAT)) # Update BenchConf instance BenchConf.instance().user_email = email payload = { 'id': campaign_uuid, 'name': campaign_name, 'userEmail': email, 'type': 'test', 'status': DataModel.STATUS.RUNNING, 'verdict': DataModel.VERDICT.NA, 'startTime': campaign_start_time, } header = {'requestId': campaign_uuid} header.update(self._header) return {'header': header, 'payload': payload}
def send_start_tc_info(self, tc_name, tc_order_in_test_suite): """ Send start test case message on TSMB bus :param tc_name: test case name :param tc_order_in_test_suite: test case order define in test suite """ # Check that tsmb reporting is turned on and test suite report message has already been sent if self.is_reporting_enabled: if self.__test_suite_report_msg: # Initialize test case report message self._init_test_case_report_message( self.__test_suite_report_msg.testRequestId, tc_name, tc_order_in_test_suite) if self.__test_case_report_msg: # Set test case execution information # ------------------------------------- # Test case execution is starting self.__test_case_report_msg.verdict = tsmb.TEST_VERDICT.RUNNING # Put test case starting execution time self.__test_case_report_msg.startTime = utctime_iso8601() # Only send start test case report message, if required if self.__testCaseDetailedReportMode: # Send test case report message self.__logger and self.__logger.info( "Start test case (name={0}, sequence order={1})". format(str(tc_name), str(tc_order_in_test_suite))) self.__send_message(self.__test_case_report_msg) else: self.__logger and self.__logger.error( "Start test case {0}, " "but no test suite starting message previously sent)". format(str(tc_name)))
def send_stop_campaign_info(self, status): """ Send stop test suite message on TSMB bus :param status: test suite execution status """ if self.is_reporting_enabled: if self.__test_suite_report_msg: status = tsmb.TEST_VERDICT.FAILED if bool( status) else tsmb.TEST_VERDICT.PASSED # build name has been retrieve at test case execution ending phase # Convert test suite status to report system value # Feature not ready # if status in TSMBLiveReporting.acsTestSuiteStatusToTsmbTestSuiteStatusMapping: # tsmb_status = TSMBLiveReporting.acsTestSuiteStatusToTsmbTestSuiteStatusMapping[status] # else: # tsmb_status = "" self.__test_suite_report_msg.status = status # Retrieve comment associated to test suite status self.__test_suite_report_msg.comment = "" self.__test_suite_report_msg.stopTime = utctime_iso8601() # Add test suite execution information # Compute test suite execution rate / test case pass rate / test case failure rate # TO DO: Shall be done in the reporting system to take into account all # results from different test benches execution_rate = CampaignMetrics.instance().execution_rate pass_rate = CampaignMetrics.instance().pass_rate fail_rate = CampaignMetrics.instance().fail_rate # Specific ACS data for test suite execution # @TODO: Shall be done in the reporting system (info shall be captured at test case level) # to take into account all results from different test benches acs_stats = { "TotalBootCount": CampaignMetrics.instance().total_boot_count, "ConnectFailureCount": CampaignMetrics.instance().connect_failure_count, "MeanTimeBeforeFailure": CampaignMetrics.instance().mtbf, "TimeToCriticalFailure": CampaignMetrics.instance().time_to_first_critical_failure, "CriticalFailureCount": CampaignMetrics.instance().critical_failure_count, "UnexpectedRebootCount": CampaignMetrics.instance().unexpected_reboot_count, "BootFailureCount": CampaignMetrics.instance().boot_failure_count } self.__test_suite_report_msg.additionalData.update({ "executionRate": execution_rate, "passRate": pass_rate, "failRate": fail_rate, "ACS": acs_stats }) # Send test suite report message self.__logger and self.__logger.info( "Stop test suite uuid:{0})".format( str(self.__test_suite_report_msg.testRequestId))) self.__send_message(self.__test_suite_report_msg) # Test suite execution is completed on the test bench, delete test suite message context self.__test_suite_report_msg = None else: self.__logger and self.__logger.error( "Stop test suite, " "but no test suite starting message previously sent)")
def stop_testcase(self, verdict, execution_nb, tc_parameters=None, tc_properties=None, tc_comments=None, iteration=False, device_info=None): """ Send stop test case event to the server :param verdict: tc verdict :type verdict: str :param execution_nb: Number of test execution :type execution_nb: int :param tc_parameters (optional): dict of test execution parameters :type tc_parameters: dict :param tc_properties (optional): properties of the test case (b2b iteration, retries ..) :type tc_properties: dict :param tc_comments (optional): list of test execution comments :type tc_comments: list :param iteration (optional): True if the test case has at least two iterations and is the "parent" test case :type iteration: boolean :param device_info (optional): Additional device info of the dut. :type device_info: dict """ if not self._campaign_id: self.__logger and self.__logger.warning( "Stop test case info message not sent!" "Campaign ID does not exist!") return None if not self._test_id or (not self._test_it_id and iteration): self.__logger and self.__logger.warning( "There is no currently running test case to stop.") return None if self._rerun and self._test_id != self._test_parent_id and not iteration: bak_test_id = self._test_id bak_start_time = self._tc_start_time self._test_id = self._test_parent_id self.stop_testcase(verdict, execution_nb, tc_parameters=tc_parameters, tc_properties=tc_properties, tc_comments=tc_comments, device_info=device_info) self._test_id = bak_test_id self._tc_start_time = bak_start_time # compute test case verdict for test campaign report tool if verdict == Verdict.INTERRUPTED: status = DataModel.STATUS.CANCELLED verdict = DataModel.VERDICT.NA else: status = DataModel.STATUS.COMPLETED verdict = DataModel.VERDICT[verdict] # compute stop time and duration according to the step's kind start time (iteration or testcase) stop_time = utctime_iso8601() start_time = self._tc_it_start_time if iteration else self._tc_start_time exec_time = stop_time - start_time duration = exec_time.microseconds / 1000 + exec_time.seconds * 1000 + exec_time.days * 24 * 3600 result_node = self._additional_tc_it_results if iteration else self._additional_tc_results if tc_parameters: self._test_case_conf.update({"parameters": tc_parameters}) if tc_properties: self._test_case_conf.update({"properties": tc_properties}) if tc_comments: self._test_case_conf.update({"comments": tc_comments}) payload = { "status": status, "verdict": verdict, "stopTime": str(stop_time.format(ISO8601_TIME_FORMAT)), "duration": duration, "result": result_node, "nbTries": execution_nb, self._test_framework: self._test_case_conf } # Remove crash events if any for next test case if self._crash_events: self._crash_events = [] self._update_device_info(payload, device_info, iteration=iteration) header = { 'requestId': self._test_it_id if iteration else self._test_id } header.update(self._header) # Clean data if not iteration: # Clean external tc results (for next run) self._additional_tc_results = {} self._test_id = None self._tc_start_time = None self._tc_device_info = {} else: self._additional_tc_it_results = {} self._test_it_id = None self._tc_it_start_time = None return {'header': header, 'payload': payload}
def start_testcase(self, tc_name, tc_order=1, device_info=None, iteration=False): """ Send start test case event to the server :param tc_name: test case name :type tc_name: str :param tc_order: the test case order :type tc_order: int :param device_info (optional): Additional device info of the dut. :type device_info: dict :param iteration (optional): True if the test case has at least two iterations and is the "parent" test case :type iteration: boolean """ if not self._campaign_id: self.__logger and self.__logger.warning( "Start test message not sent!" "Campaign ID does not exist!") return None if self._test_id and (self._test_it_id or not iteration): self.__logger and self.__logger.warning( "Start test message not sent (test_id already exists: {}". format(tc_name)) return None if self._rerun and not self._is_rerun_parent( tc_order) and not iteration: # update parent test case self.start_testcase(tc_name, name2order(tc_name), device_info=device_info) self._test_parent_id = self.__test_cases[name2order(tc_name) - 1] start_time = utctime_iso8601() # Set the correct start time according to step's kind if iteration: self._test_it_id = str(uuid.uuid4()) self._tc_it_start_time = start_time else: self._test_id = self.__test_cases[tc_order - 1] self._tc_start_time = start_time payload = { "campaignId": self._campaign_id, "startTime": str(start_time.format(ISO8601_TIME_FORMAT)), "status": DataModel.STATUS.RUNNING } self._update_device_info(payload, device_info, iteration=iteration) if iteration: payload.update({ "id": self._test_it_id, "testCase": os.path.basename(tc_name), "parentId": self._test_id }) header = { 'requestId': self._test_it_id if iteration else self._test_id } header.update(self._header) return {'header': header, 'payload': payload}