def is_statistics_driver_finished(config, test_name,): with ssh_with_config(config.statistics_host, config) as ssh_client: if not ssh_client.is_java_running(): return True driver_log_path = posixpath.join(config.remote_home, test_name, "ignite.log") cluster_stopped_line = "Cluster is considered overloaded and will be stopped." return ssh_client.check_file_contains_text(driver_log_path, cluster_stopped_line)
def __call__(self, host, config, *args, **kwargs): with ssh_with_config(host, config) as ssh_client: preparator = self._preparator_cls(ssh_client, config, *args, **kwargs) preparator.prepare() try: node_type = config.determine_node_type(host) logger.info("Bootstrapped %s on %s.", node_type.cluster_name, host) except HostOfUnknownTypeException: logger.info("Bootstrapped %s.", host)
def download_statistics_report(config, test_name): reportdir = os.path.join(get_script_workdir(), test_name) remote_reportdir = posixpath.join(config.remote_home, test_name, common.reportdir_name) if os.path.exists(reportdir): timestamp = int(time.time()) os.rename(reportdir, reportdir + "_" + str(timestamp)) os.makedirs(reportdir) with ssh_with_config(config.statistics_host, config) as ssh_client: ssh_client.download(remote_reportdir, reportdir) logger.info("Downloaded statistics reports to %s.", reportdir)
def _start_server(self, host): start_command = ( "{0}/bin/kafka-server-start.sh" " {0}/config/server.properties > kafka.log").format( self._config.kafka_dir_name) with ssh_with_config(host, self._config) as ssh_client: self._prestart_common(ssh_client) ssh_client.ssh_exec(start_command, in_background=True) check_started = partial(KafkaClusterStarter._check_started, ssh_client=ssh_client) self._wait_till_started(host, "Kafka", check_started)
def _start_server(self, host): start_command = ("{0}/bin/kafka-server-start.sh" " {0}/config/server.properties > kafka.log").format( self._config.kafka_dir_name) with ssh_with_config(host, self._config) as ssh_client: self._prestart_common(ssh_client) ssh_client.ssh_exec(start_command, in_background=True) check_started = partial(KafkaClusterStarter._check_started, ssh_client=ssh_client) self._wait_till_started(host, "Kafka", check_started)
def _start_server(self, host): host_index = self._config.zookeeper_hosts.index(host) + 1 start_command = "{}/bin/zkServer.sh start".format( self._config.zookeeper_dir_name) with ssh_with_config(host, self._config) as ssh_client: self._prestart_common(ssh_client) ssh_client.ssh_exec("echo '{}' > {}/myid".format( host_index, common.zookeeper_data_dir)) ssh_client.ssh_exec(start_command) check_started = partial(self._check_started, ssh_client=ssh_client,) self._wait_till_started(host, "Zookeeper", check_started)
def is_statistics_driver_finished( config, test_name, ): with ssh_with_config(config.statistics_host, config) as ssh_client: if not ssh_client.is_java_running(): return True driver_log_path = posixpath.join(config.remote_home, test_name, "ignite.log") cluster_stopped_line = "Cluster is considered overloaded and will be stopped." return ssh_client.check_file_contains_text(driver_log_path, cluster_stopped_line)
def _start_server(self, host): host_index = self._config.zookeeper_hosts.index(host) + 1 start_command = "{}/bin/zkServer.sh start".format( self._config.zookeeper_dir_name) with ssh_with_config(host, self._config) as ssh_client: self._prestart_common(ssh_client) ssh_client.ssh_exec("echo '{}' > {}/myid".format( host_index, common.zookeeper_data_dir)) ssh_client.ssh_exec(start_command) check_started = partial( self._check_started, ssh_client=ssh_client, ) self._wait_till_started(host, "Zookeeper", check_started)
def _start(self, host, test_name): with ssh_with_config(host, self._config) as ssh_client: self._prestart_common(ssh_client) testdir = posixpath.join(self._config.remote_home, test_name) if ssh_client.path_exists(testdir): # Backup files from previous runs if exist. timestamp = int(time.time()) ssh_client.sudo_exec("mv {0} {0}_{1}".format(testdir, timestamp)) ssh_client.ssh_exec("mkdir -p " + testdir) start_command = IgniteStarter.__start_command_pattern.format( testdir=testdir, remote_home=self._config.remote_home, start_script=self._start_script) self._do_before_start(ssh_client, testdir) ssh_client.ssh_exec(start_command, in_background=True) log_file = posixpath.join(testdir, "ignite.log") check_started = partial(ssh_client.has_ignite_started, log_file=log_file) self._wait_till_started(host, "Ignite", check_started)
def _start_first_client_and_wait_for_prepopulate(self): host = self._config.client_hosts[0] self._start(host, self._test_name) log_file = posixpath.join(self._config.remote_home, self._test_name, "ignite.log") prepopulation_start_time = time.time() timeout_expiration_reported = False logger.info("Waiting for the first client to prepopulate caches.") while True: with ssh_with_config(host, self._config) as ssh_client: text_to_find = "Setting up load tests driver" if ssh_client.check_file_contains_text(log_file, text_to_find): logger.info("Prepopulation finished.") return if not timeout_expiration_reported: passed_time = time.time() - prepopulation_start_time if passed_time > common.prepopulation_timeout: logger.warning("Caches prepopulation from node %s not completed in %.2f seconds." " Continue waiting.", host, common.prepopulation_timeout) timeout_expiration_reported = True time.sleep(common.test_running_poll_time)
def _start(self, host, test_name): with ssh_with_config(host, self._config) as ssh_client: self._prestart_common(ssh_client) testdir = posixpath.join(self._config.remote_home, test_name) if ssh_client.path_exists(testdir): # Backup files from previous runs if exist. timestamp = int(time.time()) ssh_client.sudo_exec("mv {0} {0}_{1}".format( testdir, timestamp)) ssh_client.ssh_exec("mkdir -p " + testdir) start_command = IgniteStarter.__start_command_pattern.format( testdir=testdir, remote_home=self._config.remote_home, start_script=self._start_script) self._do_before_start(ssh_client, testdir) ssh_client.ssh_exec(start_command, in_background=True) log_file = posixpath.join(testdir, "ignite.log") check_started = partial(ssh_client.has_ignite_started, log_file=log_file) self._wait_till_started(host, "Ignite", check_started)
def _start_first_client_and_wait_for_prepopulate(self): host = self._config.client_hosts[0] self._start(host, self._test_name) log_file = posixpath.join(self._config.remote_home, self._test_name, "ignite.log") prepopulation_start_time = time.time() timeout_expiration_reported = False logger.info("Waiting for the first client to prepopulate caches.") while True: with ssh_with_config(host, self._config) as ssh_client: text_to_find = "Setting up load tests driver" if ssh_client.check_file_contains_text(log_file, text_to_find): logger.info("Prepopulation finished.") return if not timeout_expiration_reported: passed_time = time.time() - prepopulation_start_time if passed_time > common.prepopulation_timeout: logger.warning( "Caches prepopulation from node %s not completed in %.2f seconds." " Continue waiting.", host, common.prepopulation_timeout) timeout_expiration_reported = True time.sleep(common.test_running_poll_time)
def _ensure_gmond_stopped(self, host, config): with ssh_with_config(host, config) as ssh_client: ssh_client.stop_process("gmond") logger.info("Stopped gmond sender on %s unused in current test.", host)
def stop_java_forcibly(host, config, expected_stopped_process_name): with ssh_with_config(host, config) as ssh_client: if ssh_client.stop_process("java"): logger.info("%s on %s was stopped forcibly.", expected_stopped_process_name, host)
def cleanup_host(host, config): with ssh_with_config(host, config) as ssh_client: cleaner = Cleaner(ssh_client, config) cleaner.cleanup()