def __init__(self): self.version = run('./bin/elastic-version', stdout=PIPE).stdout.decode().strip() self.flavor = config.getoption('--image-flavor') self.url = 'http://localhost:5601' self.process = host.process.get(comm='node') self.image_flavor = config.getoption('--image-flavor') self.environment = dict([ line.split('=', 1) for line in self.stdout_of('env').split('\n') ]) if 'STAGING_BUILD_NUM' in os.environ: self.tag = '%s-%s' % (self.version, os.environ['STAGING_BUILD_NUM']) else: self.tag = self.version if self.flavor != 'full': self.image = 'docker.elastic.co/kibana/kibana-%s:%s' % ( self.flavor, self.tag) else: self.image = 'docker.elastic.co/kibana/kibana:%s' % (self.tag) self.docker_metadata = json.loads( run(['docker', 'inspect', self.image], stdout=PIPE).stdout.decode())[0]
def assert_healthy(self): if config.getoption('--single-node'): assert self.get_node_count() == 1 assert self.get_cluster_status() in ['yellow', 'green'] else: assert self.get_node_count() == 2 assert self.get_cluster_status() == 'green'
def __init__(self): self.version = run('./bin/elastic-version', stdout=PIPE).stdout.decode().strip() self.flavor = pytest.config.getoption('--image-flavor') self.url = 'http://localhost:9200' if config.getoption('--image-flavor') == 'platinum': self.auth = HTTPBasicAuth('elastic', Elasticsearch.bootstrap_pwd) else: self.auth = '' if 'STAGING_BUILD_NUM' in os.environ: self.tag = '%s-%s' % (self.version, os.environ['STAGING_BUILD_NUM']) else: self.tag = self.version if self.flavor != 'full': self.image = 'docker.elastic.co/elasticsearch/elasticsearch-%s:%s' % (self.flavor, self.tag) else: self.image = 'docker.elastic.co/elasticsearch/elasticsearch:%s' % (self.tag) self.docker_metadata = json.loads( run(['docker', 'inspect', self.image], stdout=PIPE).stdout.decode())[0] self.assert_healthy() self.process = host.process.get(comm='java') # Start each test with a clean slate. assert self.load_index_template().status_code == codes.ok assert self.delete().status_code == codes.ok
def get_docker_log(self): proc = run(['docker-compose', '-f', 'docker-compose-{}.yml'.format(config.getoption('--image-flavor')), 'logs', self.get_hostname()], stdout=PIPE) return proc.stdout.decode()
def __init__(self): self.name = container_name self.process = host.process.get(comm='java') self.settings_file = host.file( '/usr/share/logstash/config/logstash.yml') self.image_flavor = config.getoption('--image-flavor') self.image = 'docker.elastic.co/logstash/logstash-%s:%s' % ( self.image_flavor, version)
def __init__(self): self.url = 'http://localhost:5601' self.process = host.process.get(comm='node') self.image_flavor = config.getoption('--image-flavor') self.environment = dict([ line.split('=', 1) for line in self.stdout_of('env').split('\n') ])
def __init__(self): self.version = run('./bin/elastic-version', stdout=PIPE).stdout.decode().strip() self.flavor = config.getoption('--image-flavor') self.name = container_name self.process = host.process.get(comm='java') self.settings_file = host.file( '/usr/share/logstash/config/logstash.yml') self.image_flavor = config.getoption('--image-flavor') self.image = 'docker.elastic.co/logstash/logstash-%s:%s' % ( self.image_flavor, version) if 'STAGING_BUILD_NUM' in os.environ: self.tag = '%s-%s' % (self.version, os.environ['STAGING_BUILD_NUM']) else: self.tag = self.version self.docker_metadata = json.loads( run(['docker', 'inspect', self.image], stdout=PIPE).stdout.decode())[0]
def __init__(self): self.url = 'http://localhost:9200' if config.getoption('--image-flavor') == 'platinum': self.auth = HTTPBasicAuth('elastic', Elasticsearch.bootstrap_pwd) else: self.auth = '' self.assert_healthy() self.process = host.process.get(comm='java') # Start each test with a clean slate. assert self.load_index_template().status_code == codes.ok assert self.delete().status_code == codes.ok
def __init__(self): self.version = run(['../bin/version-info', '--es'], stdout=PIPE).stdout.decode().strip() self.url = 'http://localhost:5601' self.process = host.process.get(comm='node') self.environment = dict([ line.split('=', 1) for line in self.stdout_of('env').split('\n') ]) self.tag = run(['../bin/version-info', '--od'], stdout=PIPE).stdout.decode().strip() self.image = config.getoption('--image') + self.tag self.docker_metadata = json.loads( run(['docker', 'inspect', self.image], stdout=PIPE).stdout.decode())[0]
def pytest_runtest_teardown(item): """ Pytest hook to get node information after the test executed. This creates a folder with the name of the test case, copies the folders defined in the shared_dir_mount attribute of each openswitch container and the /var/log/messages file inside. FIXME: document the item argument """ test_suite = splitext(basename(item.parent.name))[0] from pytest import config topology_log_dir = config.getoption('--topology-log-dir') if not topology_log_dir: return else: path_name = join( topology_log_dir, '{}_{}_{}'.format( test_suite, item.name, datetime.now().strftime('%Y_%m_%d_%H_%M_%S') ) ) # Being extra-prudent here if exists(path_name): rmtree(path_name) if 'topology' not in item.funcargs: from topology_docker_openswitch.openswitch import LOG_PATHS for log_path in LOG_PATHS: try: destination = join(path_name, basename(log_path)) try: rmtree(destination) except: pass copytree(log_path, destination) rmtree(path_name) except Error as err: errors = err.args[0] for error in errors: src, dest, msg = error warning( 'Unable to copy file {}, Error {}'.format( src, msg ) ) return topology = item.funcargs['topology'] if topology.engine != 'docker': return logs_path = '/var/log/messages' for node in topology.nodes: node_obj = topology.get(node) if node_obj.metadata.get('type', None) != 'openswitch': return shared_dir = node_obj.shared_dir try: commands = ['cat {}'.format(logs_path)] log_commands( commands, join(node_obj.shared_dir_mount, 'container_logs'), node_obj._docker_exec, prefix=r'sh -c "', suffix=r'"' ) except: warning( 'Unable to get {} from node {}.'.format( logs_path, node_obj.identifier ) ) bash_shell = node_obj.get_shell('bash') try: core_path = '/var/diagnostics/coredump' bash_shell.send_command( 'ls -1 {}/core* 2>/dev/null'.format(core_path), silent=True ) core_files = bash_shell.get_response(silent=True).splitlines() for core_file in core_files: bash_shell.send_command( 'cp {core_file} /tmp'.format(**locals()), silent=True ) except: warning( 'Unable to get coredumps from node {}.'.format( node_obj.identifier ) ) try: copytree(shared_dir, join(path_name, basename(shared_dir))) rmtree(shared_dir) except Error as err: errors = err.args[0] for error in errors: src, dest, msg = error warning( 'Unable to copy file {}, Error {}'.format( src, msg ) )
def pytest_runtest_teardown(item): """ Pytest hook to get node information after the test executed. This creates a folder with the name of the test case, copies the folders defined in the shared_dir_mount attribute of each openswitch container and the /var/log/messages file inside. FIXME: document the item argument """ test_suite = splitext(basename(item.parent.name))[0] from pytest import config topology_log_dir = config.getoption('--topology-log-dir') if not topology_log_dir: return else: path_name = join( topology_log_dir, '{}_{}_{}'.format(test_suite, item.name, datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))) # Being extra-prudent here if exists(path_name): rmtree(path_name) if 'topology' not in item.funcargs: from topology_docker_openswitch.openswitch import LOG_PATHS for log_path in LOG_PATHS: try: destination = join(path_name, basename(log_path)) try: rmtree(destination) except: pass copytree(log_path, destination) rmtree(path_name) except Error as err: errors = err.args[0] for error in errors: src, dest, msg = error warning('Unable to copy file {}, Error {}'.format( src, msg)) return topology = item.funcargs['topology'] if topology.engine != 'docker': return logs_path = '/var/log/messages' for node in topology.nodes: node_obj = topology.get(node) if node_obj.metadata.get('type', None) != 'openswitch': return shared_dir = node_obj.shared_dir try: commands = ['cat {}'.format(logs_path)] log_commands(commands, join(node_obj.shared_dir_mount, 'container_logs'), node_obj._docker_exec, prefix=r'sh -c "', suffix=r'"') except: warning('Unable to get {} from node {}.'.format( logs_path, node_obj.identifier)) bash_shell = node_obj.get_shell('bash') try: core_path = '/var/diagnostics/coredump' bash_shell.send_command( 'ls -1 {}/core* 2>/dev/null'.format(core_path), silent=True) core_files = bash_shell.get_response(silent=True).splitlines() for core_file in core_files: bash_shell.send_command( 'cp {core_file} /tmp'.format(**locals()), silent=True) except: warning('Unable to get coredumps from node {}.'.format( node_obj.identifier)) try: copytree(shared_dir, join(path_name, basename(shared_dir))) rmtree(shared_dir) except Error as err: errors = err.args[0] for error in errors: src, dest, msg = error warning('Unable to copy file {}, Error {}'.format(src, msg))