def check_redis_instances(step, instances_count, instance_type, serv_as): server = getattr(world, serv_as) instances = getattr(world, 'redis_instances', {}) count = 0 for instance in instances: if CONF.feature.platform in ['cloudstack', 'idcf']: cloud = Cloud() node = cloud.get_node(server) ip = filter(lambda x: x.address == server.public_ip, node.driver.ex_list_public_ip())[0] try: rule = node.driver.ex_add_port_forwarding_rule(node, ip, 'TCP', instance, instance) except: rules = node.driver.ex_list_port_forwarding_rule() rule = filter(lambda x: x.public_port == instance and x.address == ip, rules)[0] LOG.info('Rule for open port add: %s %s - %s' % (ip, instance, instance)) info = {} try: LOG.debug('Try connect to redis instance: %s:%s:%s' % (server.public_ip, instance, instances[instance])) r = redis.Redis(host=server.public_ip, port=instance, password=instances[instance], socket_timeout=5) info = r.info() except redis.ConnectionError, e: LOG.error('Connection to redis: %s:%s with password %s is FAILED' % (server.public_ip, instance, instances[instance])) raise redis.ConnectionError('Connection to redis: %s:%s with password %s is FAILED' % (server.public_ip, instance, instances[instance])) finally:
def action_on_redis(step, action, instance_number, serv_as): server = getattr(world, serv_as) instances = getattr(world, 'redis_instances', {}) instance = sorted(instances.items())[int(instance_number)-1] if CONF.feature.platform in ['cloudstack', 'idcf']: cloud = Cloud() node = cloud.get_node(server) ip = filter(lambda x: x.address == server.public_ip, node.driver.ex_list_public_ip())[0] try: rule = node.driver.ex_add_port_forwarding_rule(node, ip, 'TCP', instance[0], instance[0]) except: rules = node.driver.ex_list_port_forwarding_rule() rule = filter(lambda x: x.public_port == instance[0] and x.address == ip, rules)[0] LOG.info('Rule for open port add: %s %s - %s' % (ip, instance[0], instance[0])) r = redis.Redis(host=server.public_ip, port=instance[0], password=instance[1], socket_timeout=5, db=0) if action == 'write': LOG.info('Insert test key to %s:%s' % (server.public_ip, instance[0])) r.set('test_key', 'test_value') elif action == 'read': LOG.info('Read test key from %s:%s' % (server.public_ip, instance[0])) data = r.get('test_key') if not data == 'test_value': LOG.error('Receive bad key value from redis instance: %s:%s' % (server.public_ip, instance[0])) raise AssertionError('Receive bad key value from redis instance: %s:%s' % (server.public_ip, instance[0])) if CONF.feature.platform in ['cloudstack', 'idcf']: node.driver.ex_delete_port_forwarding_rule(node, rule) LOG.info('Rule for open port was delete')
def assert_cf_work(step, serv_as): time.sleep(180) server = getattr(world, serv_as) cloud = Cloud() cloud_serv = cloud.node_from_server(server) out = cloud_serv.run("/bin/bash -c 'source /usr/local/rvm/scripts/rvm; vmc info'")[0] world.assert_not_in("VMware's Cloud Application Platform", out, 'CF client not work, message: %s' % out)
def check_count_redis_instances(step, instances_count, serv_as): server = getattr(world, serv_as) c = Cloud() node = c.get_node(server) out = node.run('ps -A | grep redis-server') if not len(out[0].splitlines()) == int(instances_count): LOG.error('Redis processes: %s' % out[0]) raise AssertionError('Invalid redis processes count, must be: %s but %s' % (instances_count, len(out[0].splitlines())))
def add_test_app(step, serv_as): server = getattr(world, serv_as) cloud = Cloud() node = cloud.node_from_server(server) node.run('mkdir env') cont = resources('scripts/env.rb') node.put(path='/root/env/env.rb', content=cont.get()) out = node.run("/bin/bash -c 'source /usr/local/rvm/scripts/rvm; vmc login --email [email protected] --passwd password; echo Y | vmc push testapp --url %s --mem 64 --path /root/env/'" % world.D1)[0] world.assert_not_in('Starting Application: OK', out, 'Application is not starting: %s' % out)
def check_processes(step, count, serv_as): time.sleep(60) serv = getattr(world, serv_as) cl = Cloud() node = cl.get_node(serv) list_proc = node.run("ps aux | grep scalarizr")[0] c = 0 for pr in list_proc.splitlines(): if "bin/scalarizr" in pr: c += 1 LOG.info("Scalarizr count of processes %s" % c) world.assert_not_equal(c, int(count), "Scalarizr processes is: %s but processes \n%s" % (c, list_proc))
def given_server_in_cloud(step, mbeh): n = getattr(world, 'cloud_server', None) if n: n.destroy() cloud = Cloud() LOG.info('Create node in cloud') if mbeh == 'mbeh1': image = images(CONF.main.platform).filter({'dist': get_scalr_dist_name(CONF.main.dist), 'behavior':'mysql2'}).first() elif mbeh == 'mbeh2': image = images(CONF.main.platform).filter({'dist': get_scalr_dist_name(CONF.main.dist), 'behavior':'www'}).first() LOG.debug('Use image: %s' % image) node = cloud.create_node(image=image.keys()[0], userdata='scm-branch=%s' % CONF.main.branch) setattr(world, 'cloud_server', node)
def assert_at_user_on_server(cloud: Cloud, server: api.Server, expected_user: str): node = cloud.get_node(server) cmd = 'net user' if CONF.feature.dist.is_windows else 'cut -d : -f 1 /etc/passwd' with node.remote_connection() as conn: user_list = conn.run(cmd).std_out.split() assert expected_user in user_list, \ f'User {expected_user} was not found on the server! User list output: {user_list}'
def get_mount_table(cloud: Cloud, server: Server) -> tp.Dict[str, str]: LOG.info('Save mount table from server "%s"' % server.id) node = cloud.get_node(server) mount_table = node.run('mount').std_out.splitlines() mount_table = {x.split()[2]: x.split()[0] for x in mount_table if x} LOG.debug('Mount table:\n %s' % mount_table) return mount_table
def test_webhooks(self, context: dict, cloud: Cloud, farm: Farm, servers: dict, testenv, ssl_verify: bool, webhooks: list, expected_results: list): server = servers.get('F1') params = { "scalr.system.webhooks.scalr_labs_workflow_engine": True, "scalr.system.webhooks.ssl_verify": ssl_verify, "scalr.system.webhooks.retry_interval": 5, "scalr.system.webhooks.use_proxy": False} lib_scalr.update_scalr_config(testenv, params) testenv.restart_service("workflow-engine") testenv.restart_service("zmq_service") node = cloud.get_node(server) node.put_file("/tmp/default", resources('configs/nginx_to_flask_proxy.conf').get().decode("utf-8")) node.put_file("/tmp/prepare_flask.sh", resources('scripts/prepare_flask.sh').get().decode("utf-8")) node.put_file('webhooks.py', resources('scripts/webhooks.py').get().decode("utf-8")) # Put flask script with node.remote_connection() as conn: # Run preparation script conn.run("sudo bash /tmp/prepare_flask.sh") # Run flask in background process conn.run("gunicorn -D -w 1 --bind localhost:5000 webhooks:app") lib_apache.assert_check_http_get_answer(server) lib_webhooks.configure_webhooks(webhooks, server, farm, context) result = lib_node.execute_command(cloud, server, 'szradm --fire-event AccountEvent') assert not result.std_err, "Command szradm --fire-event AccountEvent failed with %s" % result.std_err lib_webhooks.assert_webhooks(context['test_webhooks'], expected_results, server_id=server.id) assert not testenv.check_service_log("workflow-engine", "Traceback"), "Found Traceback in workflow-engine service log!"
def test_chef_deployment_linux(self, context: dict, cloud: Cloud, servers: dict): """Verify chef executed fine""" server = servers['M1'] node = cloud.get_node(server) context['chef_deployment_time'] = provision.get_chef_bootstrap_stat(node) lib_node.assert_process_has_options(cloud, server, process='memcached', options='-m 1024') lib_node.assert_process_has_options(cloud, server, process='chef-client', options='--daemonize') provision.assert_chef_node_name_equal_hostname(cloud, server)
def test_checking_config_changes(self, cloud: Cloud, servers: dict): """Checking config changes: INTERVAL""" server = servers['M1'] node = cloud.get_node(server) interval = 15 provision.change_chef_client_interval_value(node, interval) provision.assert_chef_client_interval_value(node, interval) provision.assert_chef_runs_time(node, interval)
def assert_scalarizr_log_errors(cloud: Cloud, server: Server, log_type: str = None): """Check scalarizr log for errors""" log_type = log_type or 'debug' node = cloud.get_node(server) if CONF.feature.dist.is_windows: validate_scalarizr_log_errors(cloud, node, windows=True, server=server, log_type=log_type) else: validate_scalarizr_log_errors(cloud, node, log_type=log_type)
def test_chef_after_resume_linux(self, context: dict, cloud: Cloud, servers: dict, farm: Farm): """Verify chef not executed after resume""" server = servers['M1'] node = cloud.get_node(server) assert context['chef_deployment_time'] == provision.get_chef_bootstrap_stat(node), \ 'Chef was started after resume!' provision.check_process_status(node, 'memcached', False) lib_node.assert_process_has_options(cloud, server, process='chef-client', options='--daemonize') provision.assert_chef_node_name_equal_hostname(cloud, server)
def assert_szr_terminated_in_log(cloud: Cloud, server: Server): # TODO: PP > consolidate win/linux methods node = cloud.get_node(server) if CONF.feature.ci_repo == 'buildbot': out = node.run("findstr /c:\"Scalarizr terminated\" \"C:\Program Files\Scalarizr\\var\log\scalarizr_debug.log\"") else: out = node.run("findstr /c:\"Scalarizr terminated\" \"C:\opt\scalarizr\\var\log\scalarizr_debug.log\"") if 'Scalarizr terminated' in out.std_out: return True raise AssertionError("Not see 'Scalarizr terminated' in debug log")
def validate_string_in_file(cloud: Cloud, server: Server, file_path: str, value: str, invert: bool = False): LOG.info('Verify file "%s" in %s %s "%s"' % (file_path, server.id, 'does not contain' if invert else 'contains', value)) node = cloud.get_node(server) out = node.run('cat %s | grep %s' % (file_path, value)).std_out.strip() assert bool(out) ^ invert, \ 'File %s %s: %s. Result of grep: %s' % (file_path, 'contains' if invert else 'does not contain', value, out)
def test_chef_solo_bootstrapping(self, context: dict, cloud: Cloud, farm: Farm, role_options: str): """Bootstrapping role with chef-solo""" lib_farm.clear(farm) farm.terminate() lib_farm.add_role_to_farm(context, farm, role_options=[role_options]) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) node = cloud.get_node(server) lib_server.assert_file_exist(node, f'C:\{role_options}') provision.assert_script_data_deleted(cloud, server)
def assert_chef_node_name_equal_hostname(cloud: Cloud, server: api.Server): hostname = lib_server.get_hostname_by_server_format(server) node = cloud.get_node(server) command = CONF.feature.dist.is_windows \ and 'findstr node_name c:\\chef\\client.rb' \ or 'cat /etc/chef/client.rb | grep node_name' with node.remote_connection() as conn: result = conn.run(command).std_out node_name = result.strip().split()[1][1:-1] if not node_name == hostname: raise AssertionError(f'Chef node_name "{node_name}" != hostname on server "{hostname}"')
def szradm_execute_command(command: str, cloud: Cloud, server: Server, format_output: bool=True): command = format_output and f"{command} --format=json" or command if CONF.feature.dist.id == 'coreos': command = f'PATH=$PATH:/opt/bin; {command}' LOG.info(f'Execute the command: {command} a remote host: {server.id}') node = cloud.get_node(server) with node.remote_connection() as conn: result = conn.run(command) if result.status_code: raise AssertionError(f"An error has occurred while execute szradm:\n {out.std_err}") return format_output and json.loads(result.std_out) or result.std_out
def test_bootstrapping_form_chef_role(self, context: dict, cloud: Cloud, farm: Farm): """Bootstrapping from chef role""" lib_farm.clear(farm) farm.terminate() lib_farm.add_role_to_farm(context, farm, role_options=['winchef-role']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) node = cloud.get_node(server) lib_server.assert_file_exist(node, 'C:\chef_result_file') lib_server.assert_scalarizr_log_errors(cloud, server) lifecycle.assert_szr_version_last(server) lifecycle.assert_hostname(server)
def set_iptables_rule(cloud: Cloud, server: Server, port: tp.Union[int, tuple, list]): """Insert iptables rule in the top of the list""" LOG.info('Insert iptables rule to server %s for opening port %s' % (server, port)) node = cloud.get_node(server) my_ip = get_external_local_ip() LOG.info('My IP address: %s' % my_ip) if isinstance(port, (tuple, list)): if len(port) == 2: port = ':'.join(str(x) for x in port) else: port = ','.join(str(x) for x in port) node.run('iptables -I INPUT -p tcp -s %s --dport %s -j ACCEPT' % (my_ip, port))
def agent_restart(cloud: Cloud, server: Server): # TODO: PP > consolidate win/linux methods node = cloud.get_node(server) with node.remote_connection() as conn: LOG.info('Restart scalarizr via winrm') LOG.debug('Stop scalarizr') out = conn.run('net stop Scalarizr') time.sleep(3) LOG.debug(out) out = conn.run('net start Scalarizr') LOG.debug(out) time.sleep(15)
def get_config_from_message(cloud: Cloud, server: Server, config_group: str, message: str) -> dict: node = cloud.get_node(server) LOG.info(f'Get messages from server {server.id}') messages = lib_server.get_szr_messages(node) msg_id = next(filter(lambda x: x['name'] == message, messages))['id'] LOG.info(f'Message id for {message} is {msg_id}') cmd = f'szradm message-details {msg_id} --json' if CONF.feature.dist.id == 'coreos': cmd = "/opt/bin/" + cmd message_details = json.loads(node.run(cmd).std_out)['body'] LOG.info(f'Message details is {message_details}') LOG.info(f'Returning message part {config_group}') return message_details[config_group]
def assert_path_exist(cloud: Cloud, server: Server, path: str): """Validate path exist in server""" LOG.info(f'Verify path {path} exist in server {server.id}') node = cloud.get_node(server) with node.remote_connection() as conn: for attempt in range(5): out = conn.run('/bin/ls %s' % path) if out.status_code == 0: break time.sleep(15) else: LOG.error(f'Path {path} does not exist in server {server.id}') raise AssertionError(f'Path {path} does not exist in server {server.id}')
def assert_chef_bootstrap_failed(cloud: Cloud, server: api.Server): node = cloud.get_node(server) assertion_msg = "Chef bootstrap marker not found in scalarizr_debug.log" if CONF.feature.dist.is_windows: failure_marker = 'chef-client" exited with code 1' cmd = 'findstr /C:"Command \\"C:\opscode\chef\\bin\chef-client\\" exited with code 1"' \ ' "C:\opt\scalarizr\\var\log\scalarizr_debug.log"' assert failure_marker in node.run(cmd).std_out, assertion_msg else: failure_markers = [ 'Command "/usr/bin/chef-client" exited with code 1', 'Command /usr/bin/chef-client exited with code 1'] assert any(node.run(f'grep {m} /var/log/scalarizr_debug.log').std_out.strip() for m in failure_markers), \ assertion_msg
def test_bootstrapping_with_chef(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Bootstrapping with chef""" lib_farm.add_role_to_farm(context, farm, role_options=['winchef']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) servers['M1'] = server lib_server.assert_scalarizr_log_errors(cloud, server) lifecycle.assert_szr_version_last(server) provision.assert_node_exists_on_chef_server(server) orchestration.assert_recipes_in_runlist(server, ['windows_file_create', 'revizorenv', 'revizor_chef_multi']) node = cloud.get_node(server) lib_server.assert_file_exist(node, 'C:\chef_result_file') lib_server.assert_file_exist(node, 'C:\changed_result') provision.assert_chef_node_name_equal_hostname(cloud, server) provision.assert_chef_log_contains_text(server, "revizor_chef_variable=REVIZOR_CHEF_VARIABLE_VALUE_WORK")
def delete_volume(cloud: Cloud, device_id: str): LOG.info('Delete volume: %s' % device_id) volume = [v for v in cloud.list_volumes() if v.id == device_id] if volume: volume = volume[0] else: raise AssertionError('Can\'t found Volume in cloud with ID: %s' % device_id) for i in range(10): try: cloud._driver._conn.destroy_volume(volume) break except Exception as e: if 'attached' in e.message: LOG.warning('Volume %s currently attached to server' % device_id) time.sleep(60)
def create_partitions_on_volume(cloud: Cloud, server: Server, mnt_point: str): script_name = 'create_partitions.sh' script_src = resources(Path('scripts', script_name)).get().decode() path = Path('/tmp', script_name) node = cloud.get_node(server) LOG.info('Creating partitions table for volume on %s' % mnt_point) node.put_file(str(path), script_src % mnt_point) out = node.run(f'source {path}') partition_table = out.std_out.strip('\n').splitlines()[-4:] LOG.debug('Created partitions table for volume:\n%s' % '\n'.join(partition_table)) assert all(line.startswith('/dev/') for line in partition_table), \ 'Create volume partitions failed: %s' % out.std_err LOG.info('Partitions table for volume was successfully created')
def assert_script_data_deleted(cloud: Cloud, server: api.Server): node = cloud.get_node(server) server.scriptlogs.reload() LOG.info('Check script executed data was deleted') if not server.scriptlogs: raise AssertionError("No orchestration logs found on %s" % server.id) task_dir = server.scriptlogs[0].execution_id.replace('-', '') if CONF.feature.dist.is_windows: cmd = f'dir c:\\opt\\scalarizr\\var\\lib\\tasks\\{task_dir} /b /s /ad | findstr /e "\\bin \\data"' else: cmd = f'find /var/lib/scalarizr/tasks/{task_dir} -type d -regex ".*/\\(bin\\|data\\)"' with node.remote_connection() as conn: result = conn.run(cmd) assert not result.std_err, f"Command: {cmd} execution error:\n {result.std_err}" folders = [l for l in result.std_out.splitlines() if l.strip()] assert not folders, f"Find script data {folders} on {server.id}"
def assert_attached_disks_size(cloud: Cloud, server: Server, disks: tp.List[tp.Tuple[str, str, int]]): node = cloud.get_node(server) out = node.run('wmic volume get Caption,Capacity,Label').std_out server_disks = [line.split() for line in out.splitlines() if line.strip()][1:] for d, label, size in disks: for disk in server_disks: if disk[1] == d: server_size = int(disk[0]) // 1000000000 if server_size != size: raise AssertionError("Disk %s is of wrong size - %s " % (disk[1], server_size)) if len(disk) > 2 and disk[2] not in label: raise AssertionError("Disk %s has incorrect or no label '%s'. Should be '%s'." % ( disk[1], disk[2], label)) break else: raise AssertionError("Disk not found! All server disks %s" % server_disks)
def initialize_world(): setattr(world, 'test_start_time', datetime.now()) c = Cloud() setattr(world, 'cloud', c)