def kill_test(self, backtrace_with_reason): if not Setup.tester_obj(): LOGGER.error( "no test was register using 'Setup.set_tester_obj()', not killing" ) return test_pid = os.getpid() Setup.tester_obj().result.addFailure(Setup.tester_obj(), backtrace_with_reason) os.kill(test_pid, signal.SIGUSR2) self.terminate()
def kill_test(self, backtrace_with_reason): if not Setup.tester_obj(): LOGGER.error( "no test was register using 'Setup.set_tester_obj()', not killing" ) return if not self.signal_sent: _test_pid = os.getpid() Setup.tester_obj().result.addFailure(Setup.tester_obj(), backtrace_with_reason) os.kill(_test_pid, signal.SIGUSR2) else: raise Exception( f"stop test signal already sent once, ignoreing: {str(backtrace_with_reason[1])}" )
def add_file_logger(level: int = logging.DEBUG) -> None: cmd_path = "-".join(click.get_current_context().command_path.split()[1:]) logdir = Setup.make_new_logdir(update_latest_symlink=False, postfix=f"-{cmd_path}") handler = logging.FileHandler(os.path.join(logdir, "hydra.log")) handler.setLevel(level) LOGGER.addHandler(handler)
def configure_logging(): from sdcm.cluster import Setup logging.config.dictConfig({ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'default': { '()': MultilineMessagesFormatter, 'format': '< t:%(asctime)s f:%(filename)-15s l:%(lineno)-4s c:%(name)-20s p:%(levelname)-5s > %(message)s' }, }, 'filters': { 'filter_remote': { '()': FilterRemote } }, 'handlers': { 'console': { 'level': 'INFO', 'formatter': 'default', 'class': 'logging.StreamHandler', 'stream': 'ext://sys.stdout', # Default is stderr 'filters': ['filter_remote'] }, 'outfile': { 'level': 'DEBUG', '()': MakeFileHandler, 'filename': '{}/sct.log'.format(Setup.logdir()), 'mode': 'w', 'formatter': 'default', } }, 'loggers': { '': { # root logger 'handlers': ['console', 'outfile'], 'level': 'DEBUG', 'propagate': True }, 'botocore': { 'level': 'CRITICAL' }, 'boto3': { 'level': 'CRITICAL' }, 'paramiko.transport': { 'level': 'CRITICAL' }, 'cassandra.connection': { 'level': 'INFO' }, 'invoke': { 'level': 'CRITICAL' } } })
def _create_test_id(): """return unified test-id Returns: str -- generated test-id for whole run. """ # avoid cyclic-decencies between cluster and db_stats from sdcm.cluster import Setup return Setup.test_id()
def clean_resources(ctx, post_behavior, user, test_id, logdir, dry_run, backend): """Clean cloud resources. There are different options how to run clean up: - To clean resources for the latest run according to post behavior $ hydra clean-resources --post-behavior - The same as above but with altered logdir $ hydra clean-resources --post-behavior --logdir /path/to/logdir - To clean resources for some Test ID according to post behavior (test run status extracted from logdir) $ hydra clean-resources --post-behavior --test-id TESTID - The same as above but with altered logdir $ hydra clean-resources --post-behavior --test-id TESTID --logdir /path/to/logdir - To clean resources for the latest run ignoring post behavior $ hydra clean-resources - The same as above but with altered logdir $ hydra clean-resources --logdir /path/to/logdir - To clean all resources belong to some Test ID: $ hydra clean-resources --test-id TESTID - To clean all resources belong to some user: $ hydra clean-resources --user vasya.pupkin Also you can add --dry-run option to see what should be cleaned. """ add_file_logger() user_param = {"RunByUser": user} if user else {} if not post_behavior and user and not test_id and not logdir: click.echo(f"Clean all resources belong to user `{user}'") params = (user_param, ) else: if not logdir and (post_behavior or not test_id): logdir = Setup.base_logdir() if not test_id and (latest_test_id := search_test_id_in_latest(logdir)): click.echo(f"Latest TestId in {logdir} is {latest_test_id}") test_id = (latest_test_id, ) if not test_id: click.echo(clean_resources.get_help(ctx)) return if post_behavior: click.echo( f"Clean resources according to post behavior for following Test IDs: {test_id}" ) else: click.echo( f"Clean all resources for following Test IDs: {test_id}") params = ({"TestId": tid, **user_param} for tid in test_id)
def run_test(argv, backend, config, logdir): if config: os.environ['SCT_CONFIG_FILES'] = str(list(config)) if backend: os.environ['SCT_CLUSTER_BACKEND'] = backend if logdir: os.environ['_SCT_LOGDIR'] = logdir logfile = os.path.join(Setup.logdir(), 'output.log') sys.stdout = OutputLogger(logfile, sys.stdout) sys.stderr = OutputLogger(logfile, sys.stderr) unittest.main(module=None, argv=['python -m unittest', argv], testRunner=xmlrunner.XMLTestRunner(stream=sys.stderr, output=os.path.join( Setup.logdir(), 'test-reports')), failfast=False, buffer=False, catchbreak=True)
def clean_resources(ctx, user, test_id, logdir, config_file): # pylint: disable=too-many-arguments,too-many-branches add_file_logger() params = dict() if config_file or logdir: if not logdir: logdir = Setup.base_logdir() if logdir and not test_id: test_id = (search_test_id_in_latest(logdir), ) if not logdir or not all(test_id): click.echo(clean_resources.get_help(ctx)) return # need to pass SCTConfigration verification, # but not affect on result os.environ['SCT_CLUSTER_BACKEND'] = "aws" if config_file: os.environ['SCT_CONFIG_FILES'] = str(list(config_file)) config = SCTConfiguration() for _test_id in test_id: params['TestId'] = _test_id clean_resources_according_post_behavior(params, config, logdir) else: if not (user or test_id): click.echo(clean_resources.get_help(ctx)) return if user: params['RunByUser'] = user if test_id: for _test_id in test_id: params['TestId'] = _test_id clean_cloud_resources(params) click.echo('cleaned instances for {}'.format(params)) else: clean_cloud_resources(params) click.echo('cleaned instances for {}'.format(params))
def get_test_details(self): # avoid cyclic-decencies between cluster and db_stats from sdcm.cluster import Setup test_details = {} test_details['sct_git_commit'] = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip() test_details['job_name'] = get_job_name() test_details['job_url'] = os.environ.get('BUILD_URL', '') test_details['start_host'] = platform.node() test_details['test_duration'] = self.params.get(key='test_duration', default=60) test_details['start_time'] = time.time() test_details['grafana_snapshots'] = [] test_details['grafana_screenshots'] = [] test_details['grafana_annotations'] = [] test_details['prometheus_data'] = "" test_details['test_id'] = Setup.test_id() test_details['log_files'] = {} return test_details
def _create_test_id(doc_id_with_timestamp=False): """Return doc_id equal unified test-id Generate doc_id for ES document as unified global test-id if doc_id_with_timestamp is true, create ES document with global test_id + timestamp :param doc_id_with_timestamp: add timestamp to test_id , defaults to False :type doc_id_with_timestamp: bool, optional :returns: doc_id for document in ES :rtype: {str} """ # avoid cyclic-decencies between cluster and db_stats from sdcm.cluster import Setup doc_id = Setup.test_id() if doc_id_with_timestamp: doc_id += "_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")) return doc_id
def setUp(self): Setup.set_tester_obj(self)
def kill_test(self, backtrace_with_reason): self.terminate() if not Setup.tester_obj(): LOGGER.error("no test was register using 'Setup.set_tester_obj()', not killing") return Setup.tester_obj().kill_test(backtrace_with_reason)
def kill_test(self, backtrace_with_reason) -> None: self.terminate() if tester := Setup.tester_obj(): tester.kill_test(backtrace_with_reason)