def on_test_stop(self) -> None: """Perform actions after a test is finished.""" if self._cluster_instance_num == -1: return self._log(f"c{self._cluster_instance_num}: called `on_test_stop`") # search for errors in cluster logfiles errors = logfiles.search_cluster_artifacts() with locking.FileLockIfXdist(self.cluster_lock): # There's only one test running on a worker at a time. Deleting the coresponding rules # file right after a test is finished is therefore safe. The effect is that the rules # apply only from the time they were added (by `logfiles.add_ignore_rule`) until the end # of the test. # However sometimes we don't want to remove the rules file. Imagine situation when test # failed and cluster instance needs to be restarted. The failed test already finished, # but other tests are still running and need to finish first before restart can happen. # If the ignored error continues to get printed into log file, tests that are still # running on the cluster instance would report that error. Therefore if the cluster # instance is scheduled for restart, don't delete the rules file. if not list(self.instance_dir.glob(f"{RESTART_NEEDED_GLOB}_*")): logfiles.clean_ignore_rules(ignore_file_id=self.worker_id) # remove resource locking files created by the worker resource_locking_files = list( self.instance_dir.glob( f"{RESOURCE_LOCKED_GLOB}_*_{self.worker_id}")) for f in resource_locking_files: f.unlink() # remove "resource in use" files created by the worker resource_in_use_files = list( self.instance_dir.glob( f"{RESOURCE_IN_USE_GLOB}_*_{self.worker_id}")) for f in resource_in_use_files: f.unlink() # remove file that indicates that a test is running on the worker (self.instance_dir / f"{TEST_RUNNING_GLOB}_{self.worker_id}").unlink(missing_ok=True) if errors: logfiles.report_artifacts_errors(errors)
def on_test_stop(self) -> None: """Perform actions after the test finished.""" if self._cluster_instance_num == -1: return with helpers.FileLockIfXdist(self.cluster_lock): self._log(f"c{self.cluster_instance_num}: called `on_test_stop`") # remove resource locking files created by the worker resource_locking_files = list( self.instance_dir.glob( f"{RESOURCE_LOCKED_GLOB}_*_{self.worker_id}")) for f in resource_locking_files: os.remove(f) # remove "resource in use" files created by the worker resource_in_use_files = list( self.instance_dir.glob( f"{RESOURCE_IN_USE_GLOB}_*_{self.worker_id}")) for f in resource_in_use_files: os.remove(f) # remove file that indicates that a test is running on the worker try: os.remove(self.instance_dir / f"{TEST_RUNNING_GLOB}_{self.worker_id}") except FileNotFoundError: pass # remove file that indicates the test was singleton try: os.remove(self.instance_dir / TEST_SINGLETON_FILE) except FileNotFoundError: pass # search for errors in cluster logfiles errors = logfiles.search_cluster_artifacts() if errors: logfiles.report_artifacts_errors(errors)