def test_webhooks_in_proxy(self, context: dict, cloud: Cloud, farm: Farm, servers: dict, testenv): server = servers.get('F1') lib_farm.add_role_to_farm(context, farm, dist='ubuntu1404') farm.launch() proxy_server = lib_server.wait_server_status( context, cloud, farm, status=ServerStatus.RUNNING) servers['P1'] = proxy_server lib_server.execute_script(context, farm, proxy_server, script_name='https://git.io/vA52O', is_local=True, synchronous=True) lib_scalr.configure_scalr_proxy(testenv, proxy_server, 'system.webhooks') testenv.restart_service("workflow-engine") testenv.restart_service("zmq_service") webhooks = [ {'schema': 'http', 'endpoint': '/', 'trigger_event': 'AccountEvent', 'name': 'http_normal'}, {'schema': 'https', 'endpoint': '/', 'trigger_event': 'AccountEvent', 'name': 'https_normal'} ] lib_webhooks.configure_webhooks(webhooks, server, farm, context) result = lib_node.execute_command(cloud, server, 'szradm --fire-event AccountEvent') assert not result.std_err, "Command szradm --fire-event AccountEvent failed with %s" % result.std_err expected_results = [ {'webhook_name': 'http_normal', 'expected_response': 200, 'attempts': 1, 'error': None}, {'webhook_name': 'https_normal', 'expected_response': 200, 'attempts': 1, 'error': None} ] lib_webhooks.assert_webhooks(context['test_webhooks'], expected_results, server_id=server.id) assert not testenv.check_service_log("workflow-engine", "Traceback"), "Found Traceback in workflow-engine service log!"
def test_import_server(self, context: dict, cloud: Cloud, farm: Farm): """Import cloud server to Scalr""" farm.launch() node = discovery.run_server_in_cloud(cloud) location = CONF.feature.platform.location instance_id = node.id if CONF.feature.platform.is_gce: location = node.extra['zone'].name instance_id = node.name role_id = IMPL.discovery_manager.get_system_role_id( node.cloud._name, location, CONF.feature.dist.id ) role = Role(id=role_id) farm_role_id = lib_farm.add_role_to_farm(context, farm, role=role).id IMPL.discovery_manager.import_server(node.cloud._name, farm_role_id, instance_id=instance_id) farm.roles.reload() assert farm.roles[0].role_id == str(role_id) assert len(farm.roles) == 1 assert len(farm.roles[0].servers) == 1 assert farm.roles[0].servers[0].cloud_server_id == instance_id lifecycle.assert_server_status(farm.roles[0].servers[0], ServerStatus.RUNNING)
def test_efs_bootstrapping(self, efs: dict, context: dict, farm: Farm, cloud: Cloud): """Attach EFS storage""" lib_farm.clear(farm) farm.terminate() context['linked_services'] = {'efs': {'cloud_id': efs['fileSystemId']}} efs_mount_point = "/media/efsmount" lib_farm.link_efs_cloud_service_to_farm(farm, efs) lib_farm.add_role_to_farm(context, farm, role_options=['efs']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) lifecycle.assert_attached_disk_types(context, cloud, farm) lifecycle.assert_path_exist(cloud, server, efs_mount_point) lifecycle.create_files(cloud, server, count=100, directory=efs_mount_point) mount_table = lifecycle.get_mount_table(cloud, server) lifecycle.assert_mount_point_in_fstab( cloud, server, mount_table=mount_table, mount_point=efs_mount_point) # Reboot server lib_server.execute_server_action(server, 'reboot') lib_server.assert_server_message(cloud, farm, msgtype='in', msg='RebootFinish', server=server) # Check after reboot lifecycle.assert_attached_disk_types(context, cloud, farm) lifecycle.assert_path_exist(cloud, server, efs_mount_point) lifecycle.assert_file_count(cloud, server, count=100, directory=efs_mount_point)
def test_update_from_stable_to_branch_on_startup_and_new_package(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Update scalarizr from stable to branch on startup with new pkg""" farm.terminate() lib_farm.clear(farm) image = update.get_clean_image(cloud) role = lib_role.create_role(image) farm.launch() farm_role = lib_farm.add_role_to_farm(context, farm, role=Role.get(role['role']['id'])) server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.PENDING) szr_ver = lib_node.install_scalarizr_to_server(server, cloud, custom_branch='stable') time.sleep(120) lib_server.execute_server_action(server, 'reboot', hard=True) server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING, server=server) update.assert_scalarizr_version(server, cloud, szr_ver) update.create_branch_copy(context, branch='system') update.waiting_new_package(context) lib_role.change_branch_in_farm_role(farm_role, context['branch_copy_name']) update.start_scalarizr_update_via_ui(server) update.wait_szrupd_status(server, 'in-progress') update.wait_szrupd_status(server, 'completed') lifecycle.assert_szr_version_last(server, branch=context['branch_copy_name']) lib_server.execute_script(context, farm, server, script_name='Windows ping-pong. CMD', synchronous=True) lib_server.assert_last_script_result(context, cloud, server, name='Windows ping-pong. CMD', log_contains='pong', new_only=True) lib_node.reboot_scalarizr(cloud, server) lib_server.execute_script(context, farm, server, script_name='Windows ping-pong. CMD', synchronous=True) lib_server.assert_last_script_result(context, cloud, server, name='Windows ping-pong. CMD', log_contains='pong', new_only=True) lib_server.assert_scalarizr_log_errors(cloud, server, log_type='debug') lib_server.assert_scalarizr_log_errors(cloud, server, log_type='update')
def test_update_from_branch_to_stable_on_startup(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Update scalarizr from branch to stable on startup""" farm.terminate() lib_farm.clear(farm) image = update.get_clean_image(cloud) role = lib_role.create_role(image) farm.launch() lib_farm.add_role_to_farm(context, farm, role=Role.get(role['role']['id']), role_options=['branch_stable']) server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.PENDING) szr_ver = lib_node.install_scalarizr_to_server(server, cloud) time.sleep(120) lib_server.execute_server_action(server, 'reboot', hard=True) server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING, server=server) update.assert_scalarizr_version(server, cloud, szr_ver) lib_server.execute_script(context, farm, server, script_name='Windows ping-pong. CMD', synchronous=True) lib_server.assert_last_script_result(context, cloud, server, name='Windows ping-pong. CMD', log_contains='pong', new_only=True) lib_node.reboot_scalarizr(cloud, server) lib_server.execute_script(context, farm, server, script_name='Windows ping-pong. CMD', synchronous=True) lib_server.assert_last_script_result(context, cloud, server, name='Windows ping-pong. CMD', log_contains='pong', new_only=True) lib_server.assert_scalarizr_log_errors(cloud, server, log_type='debug') lib_server.assert_scalarizr_log_errors(cloud, server, log_type='update')
def test_failed_hostname(self, context: dict, cloud: Cloud, farm: Farm): """Failed bootstrap by hostname""" lib_farm.clear(farm) farm.terminate() lib_farm.add_role_to_farm(context, farm, role_options=['failed_hostname']) farm.launch() lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.FAILED)
def test_update_to_branch_from_ui(self, context: dict, cloud: Cloud, farm: Farm, servers: dict, branch: str): """Update scalarizr from release to branch via UI""" farm.terminate() lib_farm.clear(farm) farm.launch() farm_role = lib_farm.add_role_to_farm(context, farm, role_options=['branch_{}'.format(branch)]) server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) lifecycle.assert_szr_version_last(server, branch=branch) lib_role.change_branch_in_farm_role(farm_role, 'system') update.start_scalarizr_update_via_ui(server) update.wait_szrupd_status(server, 'completed') lib_server.assert_server_message(cloud, farm, msgtype='in', msg='HostUpdate', server=server) lifecycle.assert_szr_version_last(server, branch='system') lib_server.execute_script(context, farm, server, script_name='Windows ping-pong. CMD', synchronous=True) lib_server.assert_last_script_result(context, cloud, server, name='Windows ping-pong. CMD', log_contains='pong', new_only=True) lib_node.reboot_scalarizr(cloud, server) lib_server.execute_script(context, farm, server, script_name='Windows ping-pong. CMD', synchronous=True) lib_server.assert_last_script_result(context, cloud, server, name='Windows ping-pong. CMD', log_contains='pong', new_only=True) lib_server.assert_scalarizr_log_errors(cloud, server, log_type='debug') lib_server.assert_scalarizr_log_errors(cloud, server, log_type='update')
def test_update_from_branch_on_startup(self, context: dict, cloud: Cloud, farm: Farm, servers: dict, branch: str): """Update scalarizr from release to branch on startup""" image = update.get_clean_image(cloud) role = lib_role.create_role(image) farm.launch() lib_farm.add_role_to_farm(context, farm, role=Role.get(role['role']['id'])) server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.PENDING) szr_ver = lib_node.install_scalarizr_to_server(server, cloud, custom_branch=branch) lib_server.execute_server_action(server, 'reboot', hard=True) server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING, server=server) update.assert_scalarizr_version(server, cloud, szr_ver) lib_server.execute_script(context, farm, server, script_name='Linux ping-pong', synchronous=True) lib_server.assert_last_script_result(context, cloud, server, name='Linux ping-pong', log_contains='pong', new_only=True) lib_node.reboot_scalarizr(cloud, server) lib_server.execute_script(context, farm, server, script_name='Linux ping-pong', synchronous=True) lib_server.assert_last_script_result(context, cloud, server, name='Linux ping-pong', log_contains='pong', new_only=True) lib_server.assert_scalarizr_log_errors(cloud, server, log_type='debug') lib_server.assert_scalarizr_log_errors(cloud, server, log_type='update')
def farm(request: FixtureRequest) -> Farm: if CONF.main.farm_id is None: LOG.info('Farm ID not set, create a new farm for test') test_farm = Farm.create(f'tmprev-{datetime.now().strftime("%d%m%H%M%f")}', 'Revizor farm for tests\n' f'RV_BRANCH={CONF.feature.branch}\n' f'RV_PLATFORM={CONF.feature.platform.name}\n' f'RV_DIST={CONF.feature.dist.dist}\n') CONF.main.farm_id = test_farm.id else: LOG.info(f'Farm ID is set in config, use it: {CONF.main.farm_id}') test_farm = Farm.get(CONF.main.farm_id) lib_farm.clear(test_farm) LOG.info(f'Returning test farm: {test_farm.id}') try: yield test_farm finally: failed_count = request.session.testsfailed LOG.info('Failed tests: %s' % failed_count) if (failed_count == 0 and CONF.feature.stop_farm) or (CONF.feature.stop_farm and CONF.scalr.te_id): LOG.info('Clear and stop farm...') test_farm.terminate() IMPL.farm.clear_roles(test_farm.id) if test_farm.name.startswith('tmprev'): LOG.info('Delete working temporary farm') try: LOG.info('Wait all servers in farm terminated before delete') wait_until(lib_server.farm_servers_state, args=(test_farm, 'terminated'), timeout=1800, error_text='Servers in farm not terminated too long') test_farm.destroy() except Exception as e: LOG.warning(f'Farm cannot be deleted: {str(e)}') LOG.info('Farm finalize complete')
def test_start_farm(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Start farm""" if CONF.feature.platform.is_cloudstack: time.sleep(1800) farm.launch() server = lib_server.expect_server_bootstraping_for_role(context, cloud, farm) servers['M1'] = server lib_node.assert_scalarizr_version(server, 'system')
def test_bootstrapping_role_with_at(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Bootstrapping role with Ansible Tower""" lib_farm.add_role_to_farm(context, farm, role_options=['ansible-tower', 'ansible-orchestration']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) servers['M1'] = server provision.assert_hostname_exists_on_at_server(server) provision.assert_at_user_on_server(cloud, server, 'scalr-ansible')
def test_restart_farm(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Restart farm""" farm.terminate() lib_server.wait_servers_state(farm, 'terminated') farm.launch() server = lib_server.expect_server_bootstraping_for_role(context, cloud, farm) servers['M1'] = server lifecycle.assert_hostname(server)
def test_nonblank_volume(self, context: dict, cloud: Cloud, farm: Farm): """Check partition table recognized as a non-blank volume""" lib_farm.clear(farm) farm.terminate() lib_farm.add_role_to_farm(context, farm) snapshot_id = context['volume_snapshot_id'] lifecycle.add_storage_to_role(context, farm, snapshot_id) farm.launch() lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.FAILED)
def test_restart_bootstrap(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Bootstraping on restart""" lib_farm.clear(farm) farm.terminate() lib_farm.add_role_to_farm(context, farm) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) servers['M1'] = server lifecycle.assert_hostname(server)
def test_bootstrapping(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Bootstrapping""" options = ['storages', 'chef', 'termination_preferences'] if CONF.feature.dist.is_windows: options = ['winchef', 'termination_preferences'] lib_farm.add_role_to_farm(context, farm, role_options=options) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) servers['M1'] = server lifecycle.assert_szr_version_last(server)
def test_rebundle_cloudinit_server(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Verify cloudinit server rebundling work""" bundle_id = rebundle.start_server_rebundle(servers['M1']) rebundle.assert_bundle_task_created(servers['M1'], bundle_id) role_id = rebundle.wait_bundle_complete(servers['M1'], bundle_id) farm.clear_roles() lib_farm.add_role_to_farm(context, farm, role=Role.get(role_id)) server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) lifecycle.assert_szr_version_last(server) lib_server.assert_scalarizr_log_errors(cloud, server)
def test_chef_bootstrap_failure(self, context: dict, cloud: Cloud, farm: Farm): """Chef bootstrap failure""" lib_farm.clear(farm) farm.terminate() lib_farm.add_role_to_farm(context, farm, role_options=['chef-fail']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.FAILED) lib_server.validate_failed_status_message(server, "beforeHostUp", "/usr/bin/chef-client exited with code 1") provision.assert_chef_log_contains_text(server, "ERROR: undefined method `fatal!'") provision.assert_chef_bootstrap_failed(cloud, server)
def test_chef_solo_bootstrapping(self, context: dict, cloud: Cloud, farm: Farm, role_options: str): """Bootstrapping role with chef-solo""" lib_farm.clear(farm) farm.terminate() lib_farm.add_role_to_farm(context, farm, role_options=[role_options]) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) node = cloud.get_node(server) lib_server.assert_file_exist(node, f'C:\{role_options}') provision.assert_script_data_deleted(cloud, server)
def test_eph_bootstrap(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Bootstraping with ephemeral""" lib_farm.clear(farm) farm.terminate() lib_farm.add_role_to_farm(context, farm, role_options=['ephemeral']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) servers['M1'] = server lifecycle.assert_vcpu_count(server) windows.assert_attached_disks_size(cloud, server, [('Z:\\', 'test_label', 4)]) lifecycle.assert_szr_version_last(server)
def test_bootstrapping_form_chef_role(self, context: dict, cloud: Cloud, farm: Farm): """Bootstrapping from chef role""" lib_farm.clear(farm) farm.terminate() lib_farm.add_role_to_farm(context, farm, role_options=['winchef-role']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) node = cloud.get_node(server) lib_server.assert_file_exist(node, 'C:\chef_result_file') lib_server.assert_scalarizr_log_errors(cloud, server) lifecycle.assert_szr_version_last(server) lifecycle.assert_hostname(server)
def test_bootstrapping(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Bootstrapping""" lib_farm.add_role_to_farm(context, farm, role_options=['storages', 'noiptables']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) servers['M1'] = server lifecycle.assert_vcpu_count(server) lifecycle.assert_szr_version_last(server) lifecycle.assert_hostname(server) lifecycle.assert_iptables_ports_status(cloud, server, [8008, 8010, 8012, 8013, 8014], invert=True) lifecycle.assert_server_message_count(context, server, 'BlockDeviceMounted') lib_server.assert_scalarizr_log_errors(cloud, server)
def test_bootstrapping_from_chef_role(self, context: dict, cloud: Cloud, farm: Farm): """Bootstrapping from chef role""" lib_farm.clear(farm) farm.terminate() lib_farm.add_role_to_farm(context, farm, role_options=['chef-role']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) lib_server.assert_scalarizr_log_errors(cloud, server) lifecycle.assert_szr_version_last(server) provision.assert_node_exists_on_chef_server(server) provision.assert_chef_node_name_equal_hostname(cloud, server) provision.assert_chef_log_contains_text(server, "revizor_chef_variable=REVIZOR_CHEF_VARIABLE_VALUE_WORK")
def test_start_cloudinit_server(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Check cloudinit server started""" node = discovery.run_server_in_cloud(cloud) cloudinit.assert_cloudinit_installed(node) image = lib_node.create_image_from_node(node, cloud) role = lib_role.create_role(image, non_scalarized=True, has_cloudinit=True) lib_farm.add_role_to_farm(context, farm, role=Role.get(role['role']['id'])) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) servers['M1'] = server lifecycle.assert_vcpu_count(server) lifecycle.assert_szr_version_last(server)
def test_bootstrapping_chef_role(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Bootstrapping chef role firstly""" lib_farm.add_role_to_farm(context, farm, role_options=['chef']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) servers['M1'] = server lib_server.assert_scalarizr_log_errors(cloud, server) lifecycle.assert_szr_version_last(server) lib_node.assert_process_has_options(cloud, server, process='memcached', options='-m 1024') lib_node.assert_process_has_options(cloud, server, process='chef-client', options='--daemonize') provision.assert_node_exists_on_chef_server(server) provision.assert_chef_node_name_equal_hostname(cloud, server) provision.assert_chef_log_contains_text(server, "revizor_chef_variable=REVIZOR_CHEF_VARIABLE_VALUE_WORK")
def test_chef_bootstrapping_via_cookbooks_with_hostname_configured(self, context: dict, cloud: Cloud, farm: Farm): """Chef bootstrapping with hostname configured via cookbooks""" hostname = f'hostname-LIX{uuid4().hex[16:24]}' lib_farm.clear(farm) farm.terminate() context['chef_hostname_for_cookbook'] = hostname lib_farm.add_role_to_farm(context, farm, role_options=['chef-hostname']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) lib_server.assert_scalarizr_log_errors(cloud, server) server.reload() assert server.hostname == hostname, \ f'Hostname on server "{saerver.hostname}" != chef hostname configured via the cookbook "{hostname}"'
def add_role_to_farm(context: dict, farm: Farm, behavior: str = None, dist: str = None, role: Role = None, role_name: str = None, role_options: tp.List[str] = None, alias: str = None) -> FarmRole: behavior = (behavior or CONF.feature.behavior).strip() role_name = (role_name or '').strip() if role: role_id = role.id #FIXME: Use Role object below else: role_id = CONF.feature.role_id or context.get(f'{role_name}_id', None) if role_options: LOG.debug(f'Additional role options: {role_options}') if role_id: if not isinstance(role_id, int) and not role_id.isdigit(): raise AssertionError('Role environment variable can\'t be only in digit format') LOG.info(f'Get role by id: {role_id}') role = IMPL.role.get(role_id) else: role = get_role_by_behavior(behavior, dist=dist) if not role: raise NotFound('Role with id or by mask "%s" not found in Scalr' % ( role_id or behavior)) # world.wrt(etree.Element('meta', name='role', value=role['name'])) # world.wrt(etree.Element('meta', name='dist', value=role['dist'])) previously_added_roles = [r.id for r in farm.roles] alias = alias or role['name'] LOG.info(f'Add role {role["id"]} with alias {alias} to farm') role_params = setup_farmrole_params( context, farm, role_options=role_options, alias=alias, behaviors=behavior) farm.add_role(role['id'], options=role_params.to_json()) time.sleep(5) farm.roles.reload() added_role = [r for r in farm.roles if r.id not in previously_added_roles] if not added_role: raise AssertionError(f'Added role "{role["name"]}" not found in farm') LOG.debug(f'Save role object with name {added_role[0].alias}') context[f'{added_role[0].alias}_role'] = added_role[0] context[f'role_params_{added_role[0].id}'] = role_params return added_role[0] #TODO: Scalr return addedFarmRoleIds
def test_push_statistics(self, farm: Farm, context: dict, cloud: Cloud, servers: dict, testenv): """Verify push statistics work in scalarizr master""" roles = {} CONF.feature.platform = Platform('gce') CONF.feature.branch = 'master' for dist in ('ubuntu1604', 'centos7', 'win2012'): roles[dist] = lib_farm.add_role_to_farm(context, farm, dist=dist) farm.launch() for index, role in enumerate(roles.values()): servers[f'A{index}'] = lib_server.wait_server_status(context, cloud, farm, role=role, status=ServerStatus.RUNNING) for server in servers.values(): lib_server.assert_scalarizr_log_errors(cloud, server) assert_stats_received(servers, testenv, 'Pushing') assert_push_stats_in_influx(testenv, servers)
def test_bootstrapping_with_chef(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Bootstrapping with chef""" lib_farm.add_role_to_farm(context, farm, role_options=['winchef']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) servers['M1'] = server lib_server.assert_scalarizr_log_errors(cloud, server) lifecycle.assert_szr_version_last(server) provision.assert_node_exists_on_chef_server(server) orchestration.assert_recipes_in_runlist(server, ['windows_file_create', 'revizorenv', 'revizor_chef_multi']) node = cloud.get_node(server) lib_server.assert_file_exist(node, 'C:\chef_result_file') lib_server.assert_file_exist(node, 'C:\changed_result') provision.assert_chef_node_name_equal_hostname(cloud, server) provision.assert_chef_log_contains_text(server, "revizor_chef_variable=REVIZOR_CHEF_VARIABLE_VALUE_WORK")
def test_bootstrapping(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Bootstrapping""" lib_farm.add_role_to_farm(context, farm, role_options=['storages']) farm.launch() server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING) servers['M1'] = server lifecycle.assert_vcpu_count(server) if CONF.feature.platform in [Platform.EC2, Platform.GCE, Platform.AZURE]: windows.assert_attached_disks_size(cloud, server, [ ('E:\\', 'test_label', 1), ('F:\\', '', 2), ('C:\\diskmount\\', '', 3) ]) lifecycle.assert_szr_version_last(server) lifecycle.assert_hostname(server)
def test_farm_stop_resume(self, context: dict, cloud: Cloud, farm: Farm, servers: dict): """Farm suspend test""" farm.servers.reload() farm.suspend() provision.wait_for_farm_state(farm, FarmStatus.SUSPENDED) for server in farm.servers: lib_server.wait_server_status(context, cloud, farm, server=server, status=ServerStatus.SUSPENDED) farm.resume() for server in farm.servers: lib_server.wait_server_status(context, cloud, farm, server=server, status=ServerStatus.RESUMING) for server in farm.servers: lib_server.assert_server_message(cloud, farm, msgtype='in', msg='RebootFinish', server=server) lib_server.assert_server_event(server, ['ResumeComplete']) for server in farm.servers: lib_server.wait_server_status(context, cloud, farm, server=server, status=ServerStatus.RUNNING)
def having_a_stopped_farm(step): """Clear all roles from farm and stop farm""" world.farm = Farm.get(CONF.main.farm_id) IMPL.farm.clear_roles(world.farm.id) LOG.info('Clear farm') if world.farm.running: LOG.info('Terminate farm %s' % world.farm.id) world.farm.terminate()
def give_empty_running_farm(): farm_id = os.environ.get('RV_FARM_ID', CONF.main.farm_id) world.farm = Farm.get(farm_id) world.farm.roles.reload() if len(world.farm.roles): LOG.info('Clear farm roles') IMPL.farm.clear_roles(world.farm.id) world.farm.vhosts.reload() for vhost in world.farm.vhosts: LOG.info('Delete vhost: %s' % vhost.name) vhost.delete() try: world.farm.domains.reload() for domain in world.farm.domains: LOG.info('Delete domain: %s' % domain.name) domain.delete() except Exception: pass if world.farm.terminated: world.farm.launch() LOG.info('Return empty running farm: %s' % world.farm.id)