def _reuse_dev_cluster(self) -> clusterlib.ClusterLib: """Reuse cluster that was already started outside of test framework.""" instance_num = 0 self.cm._cluster_instance_num = instance_num cluster_nodes.set_cluster_env(instance_num) state_dir = cluster_nodes.get_cluster_env().state_dir # make sure instance dir exists instance_dir = self.cm.lock_dir / f"{CLUSTER_DIR_TEMPLATE}{instance_num}" instance_dir.mkdir(exist_ok=True, parents=True) cluster_obj = self.cm.cache.cluster_obj if not cluster_obj: cluster_obj = cluster_nodes.get_cluster_type().get_cluster_obj() # setup faucet addresses if not (state_dir / cluster_nodes.ADDRS_DATA).exists(): tmp_path = state_dir / "addrs_data" tmp_path.mkdir(exist_ok=True, parents=True) cluster_nodes.setup_test_addrs(cluster_obj, tmp_path) # check if it is necessary to reload data self._reload_cluster_obj(state_dir=state_dir) return cluster_obj
def _setup_dev_cluster(self) -> None: """Set up cluster instance that was already started outside of test framework.""" work_dir = cluster_nodes.get_cluster_env().work_dir state_dir = work_dir / f"{cluster_nodes.STATE_CLUSTER}0" if (state_dir / cluster_nodes.ADDRS_DATA).exists(): return self.cm._log("c0: setting up dev cluster") # Create "addrs_data" directly in the cluster state dir, so it can be reused # (in normal non-`DEV_CLUSTER_RUNNING` setup we want "addrs_data" stored among # tests artifacts, so it can be used during cleanup etc.). tmp_path = state_dir / "addrs_data" tmp_path.mkdir(exist_ok=True, parents=True) cluster_obj = cluster_nodes.get_cluster_type().get_cluster_obj() cluster_nodes.setup_test_addrs(cluster_obj=cluster_obj, destination_dir=tmp_path)
def _restart(self, start_cmd: str = "", stop_cmd: str = "") -> bool: # noqa: C901 """Restart cluster. Not called under global lock! """ # pylint: disable=too-many-branches cluster_running_file = self.cm.instance_dir / CLUSTER_RUNNING_FILE # don't restart cluster if it was started outside of test framework if configuration.DEV_CLUSTER_RUNNING: self.cm._log( f"c{self.cm.cluster_instance_num}: ignoring restart, dev cluster is running" ) if cluster_running_file.exists(): LOGGER.warning( "Ignoring requested cluster restart as 'DEV_CLUSTER_RUNNING' is set." ) else: helpers.touch(cluster_running_file) return True # fail if cluster restart is forbidden and it was already started if configuration.FORBID_RESTART and cluster_running_file.exists(): raise RuntimeError( "Cannot restart cluster when 'FORBID_RESTART' is set.") self.cm._log( f"c{self.cm.cluster_instance_num}: called `_restart`, start_cmd='{start_cmd}', " f"stop_cmd='{stop_cmd}'") startup_files = cluster_nodes.get_cluster_type( ).cluster_scripts.prepare_scripts_files( destdir=self.cm._create_startup_files_dir( self.cm.cluster_instance_num), instance_num=self.cm.cluster_instance_num, start_script=start_cmd, stop_script=stop_cmd, ) state_dir = cluster_nodes.get_cluster_env().state_dir self.cm._log( f"c{self.cm.cluster_instance_num}: in `_restart`, new files " f"start_cmd='{startup_files.start_script}', " f"stop_cmd='{startup_files.stop_script}'") excp: Optional[Exception] = None for i in range(2): if i > 0: self.cm._log( f"c{self.cm.cluster_instance_num}: failed to start cluster:\n{excp}\nretrying" ) time.sleep(0.2) try: LOGGER.info( f"Stopping cluster with `{startup_files.stop_script}`.") helpers.run_command(str(startup_files.stop_script)) except Exception as err: self.cm._log( f"c{self.cm.cluster_instance_num}: failed to stop cluster:\n{err}" ) # save artifacts only when produced during this test run if cluster_running_file.exists(): artifacts.save_start_script_coverage( log_file=state_dir / CLUSTER_START_CMDS_LOG, pytest_config=self.cm.pytest_config, ) artifacts.save_cluster_artifacts( save_dir=self.cm.pytest_tmp_dir, state_dir=state_dir) shutil.rmtree(state_dir, ignore_errors=True) with contextlib.suppress(Exception): _kill_supervisor(self.cm.cluster_instance_num) try: cluster_obj = cluster_nodes.start_cluster( cmd=str(startup_files.start_script), args=startup_files.start_script_args) except Exception as err: LOGGER.error(f"Failed to start cluster: {err}") excp = err else: break else: self.cm._log( f"c{self.cm.cluster_instance_num}: failed to start cluster:\n{excp}\ncluster dead" ) if not configuration.IS_XDIST: pytest.exit(msg=f"Failed to start cluster, exception: {excp}", returncode=1) helpers.touch(self.cm.instance_dir / CLUSTER_DEAD_FILE) return False # Create temp dir for faucet addresses data. # Pytest's mktemp adds number to the end of the dir name, so keep the trailing '_' # as separator. Resulting dir name is e.g. 'addrs_data_ci3_0'. tmp_path = Path( self.cm.tmp_path_factory.mktemp( f"addrs_data_ci{self.cm.cluster_instance_num}_")) # setup faucet addresses cluster_nodes.setup_test_addrs(cluster_obj=cluster_obj, destination_dir=tmp_path) # create file that indicates that the cluster is running if not cluster_running_file.exists(): helpers.touch(cluster_running_file) return True
def _restart(self, start_cmd: str = "", stop_cmd: str = "") -> bool: # noqa: C901 """Restart cluster. Not called under global lock! """ # pylint: disable=too-many-branches cluster_running_file = self.cm.instance_dir / CLUSTER_RUNNING_FILE # don't restart cluster if it was started outside of test framework if DEV_CLUSTER_RUNNING: if cluster_running_file.exists(): LOGGER.warning( "Ignoring requested cluster restart as 'DEV_CLUSTER_RUNNING' is set." ) else: open(cluster_running_file, "a").close() return True # fail if cluster restart is forbidden and it was already started if FORBID_RESTART and cluster_running_file.exists(): raise RuntimeError( "Cannot restart cluster when 'FORBID_RESTART' is set.") self.cm._log( f"c{self.cm.cluster_instance_num}: called `_restart`, start_cmd='{start_cmd}', " f"stop_cmd='{stop_cmd}'") startup_files = cluster_nodes.get_cluster_type( ).cluster_scripts.prepare_scripts_files( destdir=self.cm._create_startup_files_dir( self.cm.cluster_instance_num), instance_num=self.cm.cluster_instance_num, start_script=start_cmd, stop_script=stop_cmd, ) state_dir = cluster_nodes.get_cluster_env().state_dir self.cm._log( f"c{self.cm.cluster_instance_num}: in `_restart`, new files " f"start_cmd='{startup_files.start_script}', " f"stop_cmd='{startup_files.stop_script}'") excp: Optional[Exception] = None for i in range(2): if i > 0: self.cm._log( f"c{self.cm.cluster_instance_num}: failed to start cluster:\n{excp}\nretrying" ) time.sleep(0.2) try: cluster_nodes.stop_cluster(cmd=str(startup_files.stop_script)) except Exception as err: self.cm._log( f"c{self.cm.cluster_instance_num}: failed to stop cluster:\n{err}" ) # save artifacts only when produced during this test run if cluster_running_file.exists(): cli_coverage.save_start_script_coverage( log_file=state_dir / CLUSTER_START_CMDS_LOG, pytest_config=self.cm.pytest_config, ) self._restart_save_cluster_artifacts(clean=True) try: _kill_supervisor(self.cm.cluster_instance_num) except Exception: pass try: cluster_obj = cluster_nodes.start_cluster( cmd=str(startup_files.start_script), args=startup_files.start_script_args) except Exception as err: LOGGER.error(f"Failed to start cluster: {err}") excp = err else: break else: self.cm._log( f"c{self.cm.cluster_instance_num}: failed to start cluster:\n{excp}\ncluster dead" ) if not helpers.IS_XDIST: pytest.exit(msg=f"Failed to start cluster, exception: {excp}", returncode=1) open(self.cm.instance_dir / CLUSTER_DEAD_FILE, "a").close() return False # setup faucet addresses tmp_path = Path(self.cm.tmp_path_factory.mktemp("addrs_data")) cluster_nodes.setup_test_addrs(cluster_obj, tmp_path) # create file that indicates that the cluster is running if not cluster_running_file.exists(): open(cluster_running_file, "a").close() return True
def _restart(self, start_cmd: str = "", stop_cmd: str = "") -> bool: # noqa: C901 """Restart cluster. Not called under global lock! """ # don't restart cluster if it was started outside of test framework if self.cm.num_of_instances == 1 and DEV_CLUSTER_RUNNING: return True # using `_locked_log` because restart is not called under global lock self.cm._locked_log( f"c{self.cm.cluster_instance}: called `_restart`, start_cmd='{start_cmd}', " f"stop_cmd='{stop_cmd}'") startup_files = cluster_nodes.get_cluster_type( ).cluster_scripts.prepare_scripts_files( destdir=self.cm._create_startup_files_dir( self.cm.cluster_instance), instance_num=self.cm.cluster_instance, start_script=start_cmd, stop_script=stop_cmd, ) self.cm._locked_log( f"c{self.cm.cluster_instance}: in `_restart`, new files " f"start_cmd='{startup_files.start_script}', " f"stop_cmd='{startup_files.stop_script}'") excp: Optional[Exception] for i in range(2): excp = None if i > 0: self.cm._locked_log( f"c{self.cm.cluster_instance}: failed to start cluster, retrying" ) time.sleep(0.2) try: cluster_nodes.stop_cluster(cmd=str(startup_files.stop_script)) except Exception: pass self._restart_save_cluster_artifacts(clean=True) try: _kill_supervisor(self.cm.cluster_instance) except Exception: pass try: cluster_obj = cluster_nodes.start_cluster( cmd=str(startup_files.start_script), args=startup_files.start_script_args) except Exception as err: LOGGER.error(f"Failed to start cluster: {err}") excp = err else: break else: if not helpers.IS_XDIST: pytest.exit(msg=f"Failed to start cluster, exception: {excp}", returncode=1) open(self.cm.instance_dir / CLUSTER_DEAD_FILE, "a").close() return False # setup faucet addresses tmp_path = Path(self.cm.tmp_path_factory.mktemp("addrs_data")) cluster_nodes.setup_test_addrs(cluster_obj, tmp_path) # create file that indicates that the cluster is running cluster_running_file = self.cm.instance_dir / CLUSTER_RUNNING_FILE if not cluster_running_file.exists(): open(cluster_running_file, "a").close() return True