def disable_external_auth_openldap(): auth = DatabaseAuthSetting() auth.update() sssd_conf = '/etc/sssd/sssd.conf' httpd_auth = '/etc/pam.d/httpd-auth' manageiq_remoteuser = '******' manageiq_ext_auth = '/etc/httpd/conf.d/manageiq-external-auth.conf' command = 'rm -rf {} && rm -rf {} && rm -rf {} && rm -rf {}'.format( sssd_conf, httpd_auth, manageiq_ext_auth, manageiq_remoteuser) current_appliance = get_or_create_current_appliance() with current_appliance.ssh_client as ssh: assert ssh.run_command(command) ssh.run_command('systemctl restart evmserverd') get_or_create_current_appliance().wait_for_web_ui() appliance.current_appliance.server.logout()
def pytest_configure(config): art_client = get_client( art_config=env.get('artifactor', {}), pytest_config=config) # just in case if not store.slave_manager: with diaper: atexit.register(shutdown, config) if art_client: config._art_proc = spawn_server(config, art_client) wait_for( net_check, func_args=[art_client.port, '127.0.0.1'], func_kwargs={'force': True}, num_sec=10, message="wait for artifactor to start") art_client.ready = True else: config._art_proc = None from utils.log import artifactor_handler artifactor_handler.artifactor = art_client if store.slave_manager: artifactor_handler.slaveid = store.slaveid config._art_client = art_client art_client.fire_hook('setup_merkyl', ip=get_or_create_current_appliance().address)
def simulate(instance=None, message=None, request=None, target_type=None, target_object=None, execute_methods=None, attributes_values=None, pre_clear=True, appliance=None): """Runs the simulation of specified Automate object.""" if not appliance: appliance = get_or_create_current_appliance() view = navigate_to(appliance.server, 'AutomateSimulation') if pre_clear: view.avp.clear() view.fill({ 'instance': 'Request', 'message': 'create', 'request': '', 'target_type': '<None>', 'execute_methods': True, }) view.fill({ 'instance': instance, 'message': message, 'request': request, 'target_type': target_type, 'target_object': target_object, 'execute_methods': execute_methods, 'avp': attributes_values, }) view.submit_button.click() view.flash.assert_no_error() view.flash.assert_message('Automation Simulation has been run')
def simulate( instance=None, message=None, request=None, target_type=None, target_object=None, execute_methods=None, attributes_values=None, pre_clear=True, appliance=None): """Runs the simulation of specified Automate object.""" if not appliance: appliance = get_or_create_current_appliance() view = navigate_to(appliance.server, 'AutomateSimulation') if pre_clear: view.avp.clear() view.fill({ 'instance': 'Request', 'message': 'create', 'request': '', 'target_type': '<None>', 'execute_methods': True, }) view.fill({ 'instance': instance, 'message': message, 'request': request, 'target_type': target_type, 'target_object': target_object, 'execute_methods': execute_methods, 'avp': attributes_values, }) view.submit_button.click() view.flash.assert_no_error() view.flash.assert_message('Automation Simulation has been run')
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--address', help='hostname or ip address of target appliance', default=None) parser.add_argument('--vddk_url', help='url to download vddk pkg') parser.add_argument('--reboot', help='reboot after installation ' + '(required for proper operation)', action="store_true") parser.add_argument('--force', help='force installation if version detected', action="store_true") args = parser.parse_args() if not args.address: appliance = get_or_create_current_appliance() else: appliance = IPAppliance(address=urlparse(args.address).netloc) appliance.install_vddk(reboot=args.reboot, force=args.force, vddk_url=args.vddk_url, log_callback=log)
def setup_external_auth_openldap(**data): """Sets up the appliance for an external authentication with OpenLdap. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaddress'], } current_appliance = get_or_create_current_appliance() appliance_name = 'cfmeappliance{}'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = current_appliance.address appliance_fqdn = '{}.{}'.format(appliance_name, data['domain_name']) with SSHClient(**connect_kwargs) as ldapserver_ssh: # updating the /etc/hosts is a workaround due to the # https://bugzilla.redhat.com/show_bug.cgi?id=1360928 command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ldapserver_ssh.run_command(command) ldapserver_ssh.get_file(remote_file=data['cert_filepath'], local_path=conf_path.strpath) ensure_browser_open() appliance.current_appliance.server.login_admin() auth = ExternalAuthSetting(get_groups=data.pop("get_groups", True)) auth.setup() current_appliance.configure_appliance_for_openldap_ext_auth(appliance_fqdn) appliance.current_appliance.server.logout()
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--address', help='hostname or ip address of target appliance', default=None) parser.add_argument('--sdk_url', help='url to download sdk pkg', default=cfme_data.get("basic_info", {}).get("netapp_sdk_url")) parser.add_argument('--restart', help='restart evmserverd after installation ' + '(required for proper operation)', action="store_true") args = parser.parse_args() if not args.address: appliance = get_or_create_current_appliance() else: appliance = IPAppliance(address=args.address) print('Address: {}'.format(appliance.address)) print('SDK URL: {}'.format(args.sdk_url)) print('Restart: {}'.format(args.restart)) appliance.install_netapp_sdk(sdk_url=args.sdk_url, reboot=args.restart, log_callback=log)
def test_check_compliance_history(request, virtualcenter_provider, vmware_vm): """This test checks if compliance history link in a VM details screen work. Steps: * Create any VM compliance policy * Assign it to a policy profile * Assign the policy profile to any VM * Perform the compliance check for the VM * Go to the VM details screen * Click on "History" row in Compliance InfoBox Result: Compliance history screen with last 10 checks should be opened """ policy = VMCompliancePolicy( "Check compliance history policy {}".format(fauxfactory.gen_alpha()), active=True, scope="fill_field(VM and Instance : Name, INCLUDES, {})".format( vmware_vm.name)) request.addfinalizer(lambda: policy.delete() if policy.exists else None) policy.create() policy_profile = PolicyProfile(policy.description, policies=[policy]) request.addfinalizer(lambda: policy_profile.delete() if policy_profile.exists else None) policy_profile.create() virtualcenter_provider.assign_policy_profiles(policy_profile.description) request.addfinalizer(lambda: virtualcenter_provider. unassign_policy_profiles(policy_profile.description)) vmware_vm.check_compliance() vmware_vm.open_details(["Compliance", "History"]) appliance = get_or_create_current_appliance() history_screen_title = Text(appliance.browser.widgetastic, "//span[@id='explorer_title_text']").text assert history_screen_title == '"Compliance History" for Virtual Machine "{}"'.format( vmware_vm.name)
def pytest_runtest_teardown(item, nextitem): name, location = get_test_idents(item) app = get_or_create_current_appliance() ip = app.address fire_art_test_hook( item, 'finish_test', slaveid=store.slaveid, ip=ip, wait_for_task=True) fire_art_test_hook(item, 'sanitize', words=words) jenkins_data = { 'build_url': os.environ.get('BUILD_URL'), 'build_number': os.environ.get('BUILD_NUMBER'), 'git_commit': os.environ.get('GIT_COMMIT'), 'job_name': os.environ.get('JOB_NAME') } try: caps = app.browser.widgetastic.selenium.capabilities param_dict = { 'browserName': caps['browserName'], 'browserPlatform': caps['platform'], 'browserVersion': caps['version'] } except Exception as e: logger.error(e) param_dict = None fire_art_test_hook( item, 'ostriz_send', env_params=param_dict, slaveid=store.slaveid, polarion_ids=extract_polarion_ids(item), jenkins=jenkins_data)
def pytest_sessionstart(session): appliance = get_or_create_current_appliance() try: appliance.check_no_conflicting_providers() except ApplianceException as e: raise pytest.UsageError( "Conflicting providers were found: {}".format(e))
def __init__(self, name=None, credentials=None, zone=None, key=None, appliance=None): self.appliance = appliance or get_or_create_current_appliance() if not credentials: credentials = {} self.name = name self.credentials = credentials self.zone = zone self.key = key
def __init__(self, name=None, description=None, image_type=None, script_type=None, script_data=None, appliance=None): self.appliance = appliance or get_or_create_current_appliance() self.name = name self.description = description self.image_type = image_type self.script_type = script_type self.script_data = script_data
def get_appliance(appliance_ip): """Checks an appliance is not None and if so, loads the appropriate things""" from utils.appliance import IPAppliance, get_or_create_current_appliance if not appliance_ip: app = get_or_create_current_appliance() else: app = IPAppliance(appliance_ip) return app
def __init__(self, request): """ A simple adapter to aid in Merkyl Log Inspection during a test. This class is really only useful during a test and is designed to abstract away accessing the request object. The hooks which are fired can be done so during the test without this class/fixture, this is merely a convenience and does nothing special. """ self.node = request.node self.ip = get_or_create_current_appliance().address
def test_retire_service(provider, myservice, register_event): """Tests my service Metadata: test_flag: provision """ builder = EventBuilder(get_or_create_current_appliance()) event = builder.new_event(target_type='Service', target_name=myservice.service_name, event_type='service_retired') register_event(event) myservice.retire()
def disable_external_auth_ipa(): """Unconfigure external auth.""" current_appliance = get_or_create_current_appliance() with current_appliance.ssh_client as ssh: ensure_browser_open() appliance.current_appliance.server.login_admin() auth = DatabaseAuthSetting() auth.update() assert ssh.run_command("appliance_console_cli --uninstall-ipa") current_appliance.wait_for_web_ui() appliance.current_appliance.server.logout()
def __init__(self, name=None, hostname=None, port=None, credentials=None, key=None, appliance=None, **kwargs): self.name = name self.hostname = hostname self.port = port self.provider_type = 'Hawkular' if not credentials: credentials = {} self.credentials = credentials self.key = key self.db_id = kwargs['db_id'] if 'db_id' in kwargs else None self.appliance = appliance or get_or_create_current_appliance()
def test_login(method): """ Tests that the appliance can be logged into and shows dashboard page. """ appliance = get_or_create_current_appliance() login_page = navigate_to(appliance.server, 'LoginScreen') assert login_page.is_displayed login_page.login_admin(method=method) logged_in_page = appliance.browser.create_view(BaseLoggedInPage) assert logged_in_page.is_displayed logged_in_page.logout() login_page.flush_widget_cache() assert login_page.is_displayed
def test_vm_create(request, vm_crud, provider, register_event): """ Test whether vm_create_complete event is emitted. Prerequisities: * A provider that is set up and able to deploy VMs Steps: * Create a Control setup (action, policy, profile) that apply a tag on a VM when ``VM Create Complete`` event comes * Deploy the VM outside of CFME (directly in the provider) * Refresh provider relationships and wait for VM to appear * Assert the tag appears. Metadata: test_flag: provision """ action = Action( fauxfactory.gen_alpha(), "Tag", dict(tag=("My Company Tags", "Environment", "Development"))) action.create() request.addfinalizer(action.delete) policy = VMControlPolicy(fauxfactory.gen_alpha()) policy.create() request.addfinalizer(policy.delete) policy.assign_events("VM Create Complete") request.addfinalizer(policy.assign_events) policy.assign_actions_to_event("VM Create Complete", action) profile = PolicyProfile(fauxfactory.gen_alpha(), policies=[policy]) profile.create() request.addfinalizer(profile.delete) provider.assign_policy_profiles(profile.description) request.addfinalizer(lambda: provider.unassign_policy_profiles(profile.description)) event = EventBuilder(get_or_create_current_appliance()).new_event(target_type='VmOrTemplate', target_name=vm_crud.name, event_type='vm_create') register_event(event) vm_crud.create_on_provider() provider.refresh_provider_relationships() vm_crud.wait_to_appear() def _check(): return any(tag.category.display_name == "Environment" and tag.display_name == "Development" for tag in vm_crud.get_tags()) wait_for(_check, num_sec=300, delay=15, message="tags to appear")
def __init__( self, name=None, credentials=None, key=None, zone=None, provider_data=None, appliance=None): if not credentials: credentials = {} self.appliance = appliance or get_or_create_current_appliance() self.name = name self.credentials = credentials self.key = key self.provider_data = provider_data self.zone = zone self.vm_name = version.pick({version.LOWEST: "VMs", '5.5': "VMs and Instances"}) self.template_name = "Templates"
def pytest_runtest_protocol(item): global session_ver global session_build global session_stream if not session_ver: session_ver = str(version.current_version()) session_build = store.current_appliance.build session_stream = store.current_appliance.version.stream() fire_art_hook( item.config, 'session_info', version=session_ver, build=session_build, stream=session_stream) tier = item.get_marker('tier') if tier: tier = tier.args[0] requirement = item.get_marker('requirement') if requirement: requirement = requirement.args[0] try: params = item.callspec.params param_dict = {p: get_name(v) for p, v in params.iteritems()} except: param_dict = {} ip = get_or_create_current_appliance().address # This pre_start_test hook is needed so that filedump is able to make get the test # object set up before the logger starts logging. As the logger fires a nested hook # to the filedumper, and we can't specify order inriggerlib. meta = item.get_marker('meta') if meta and 'blockers' in meta.kwargs: blocker_spec = meta.kwargs['blockers'] blockers = [] for blocker in blocker_spec: if isinstance(blocker, int): blockers.append(BZ(blocker).url) else: blockers.append(Blocker.parse(blocker).url) else: blockers = [] fire_art_test_hook( item, 'pre_start_test', slaveid=store.slaveid, ip=ip) fire_art_test_hook( item, 'start_test', slaveid=store.slaveid, ip=ip, tier=tier, requirement=requirement, param_dict=param_dict, issues=blockers) yield
def __init__(self, name=None, depot_type=None, uri=None, userid=None, password=None, access_url=None, pxe_dir=None, windows_dir=None, customize_dir=None, menu_filename=None, appliance=None): self.appliance = appliance or get_or_create_current_appliance() self.name = name self.depot_type = depot_type self.uri = uri self.userid = userid self.password = password self.access_url = access_url self.pxe_dir = pxe_dir self.windows_dir = windows_dir self.customize_dir = customize_dir self.menu_filename = menu_filename
def shutdown(config): with lock: proc = config._art_proc if proc: if not store.slave_manager: write_line('collecting artifacts') fire_art_hook(config, 'finish_session') fire_art_hook(config, 'teardown_merkyl', ip=get_or_create_current_appliance().address) if not store.slave_manager: config._art_client.terminate() proc = config._art_proc if proc: proc.wait()
def setup_external_auth_ipa(**data): """Sets up the appliance for an external authentication with IPA. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaserver'], } current_appliance = get_or_create_current_appliance() appliance_name = 'cfmeappliance{}'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = current_appliance.address appliance_fqdn = '{}.{}'.format(appliance_name, data['iparealm'].lower()) with SSHClient(**connect_kwargs) as ipaserver_ssh: ipaserver_ssh.run_command('cp /etc/hosts /etc/hosts_bak') ipaserver_ssh.run_command( "sed -i -r '/^{}/d' /etc/hosts".format(appliance_address)) command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ipaserver_ssh.run_command(command) with current_appliance.ssh_client as ssh: result = ssh.run_command( 'appliance_console_cli --host {}'.format(appliance_fqdn)).success if not current_appliance.is_pod: assert result else: # appliance_console_cli fails when calls hostnamectl --host. it seems docker issue # raise BZ ? assert str(ssh.run_command('hostname')).rstrip() == appliance_fqdn ensure_browser_open() appliance.current_appliance.server.login_admin() if data["ipaserver"] not in get_ntp_servers(): set_ntp_servers(data["ipaserver"]) sleep(120) auth = ExternalAuthSetting(get_groups=data.pop("get_groups", False)) auth.setup() creds = credentials.get(data.pop("credentials"), {}) data.update(**creds) assert ssh.run_command( "appliance_console_cli --ipaserver {ipaserver} --iparealm {iparealm} " "--ipaprincipal {principal} --ipapassword {password}".format( **data)) appliance.current_appliance.server.login_admin()
def test_bad_password(request): """ Tests logging in with a bad password. """ appliance = get_or_create_current_appliance() request.addfinalizer(lambda: navigate_to(appliance.server, 'LoginScreen')) login_page = navigate_to(appliance.server, 'LoginScreen') username = conf.credentials['default']['username'] password = "******" cred = Credential(principal=username, secret=password) user = User(credential=cred) user.name = 'Administrator' with error.expected("Sorry, the username or password you entered is incorrect."): login_page.log_in(user) assert login.page.is_displayed
def setup_external_auth_ipa(**data): """Sets up the appliance for an external authentication with IPA. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaserver'], } current_appliance = get_or_create_current_appliance() appliance_name = 'cfmeappliance{}'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = current_appliance.address appliance_fqdn = '{}.{}'.format(appliance_name, data['iparealm'].lower()) with SSHClient(**connect_kwargs) as ipaserver_ssh: ipaserver_ssh.run_command('cp /etc/hosts /etc/hosts_bak') ipaserver_ssh.run_command("sed -i -r '/^{}/d' /etc/hosts".format(appliance_address)) command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ipaserver_ssh.run_command(command) with current_appliance.ssh_client as ssh: result = ssh.run_command('appliance_console_cli --host {}'.format(appliance_fqdn)).success if not current_appliance.is_pod: assert result else: # appliance_console_cli fails when calls hostnamectl --host. it seems docker issue # raise BZ ? assert str(ssh.run_command('hostname')).rstrip() == appliance_fqdn ensure_browser_open() appliance.current_appliance.server.login_admin() if data["ipaserver"] not in get_ntp_servers(): set_ntp_servers(data["ipaserver"]) sleep(120) auth = ExternalAuthSetting(get_groups=data.pop("get_groups", False)) auth.setup() creds = credentials.get(data.pop("credentials"), {}) data.update(**creds) assert ssh.run_command( "appliance_console_cli --ipaserver {ipaserver} --iparealm {iparealm} " "--ipaprincipal {principal} --ipapassword {password}".format(**data) ) appliance.current_appliance.server.login_admin()
def backup_orig_state(extend_db_partition): app = get_or_create_current_appliance() app.backup_database("/var/www/miq/orig_db.backup") app.ssh_client.run_command("cp /var/www/miq/vmdb/GUID{,.bak}") app.ssh_client.run_command("cp /var/www/miq/vmdb/REGION{,.bak}") app.ssh_client.run_command("cp /var/www/miq/vmdb/certs/v2_key{,.bak}") if not app.db_partition_extended: app.extend_db_partition() yield app.stop_evm_service() app.drop_database() app.restore_database("/var/www/miq/orig_db.backup") app.ssh_client.run_command("cp /var/www/miq/vmdb/GUID{.bak,}") app.ssh_client.run_command("cp /var/www/miq/vmdb/REGION{.bak,}") app.ssh_client.run_command("cp /var/www/miq/vmdb/certs/v2_key{.bak,}") app.start_evm_service() app.wait_for_web_ui()
def test_vm_capture(request, provider, setup_provider, register_event): """ tests that generalize and capture vm azure events are received and parsed by CFME """ mgmt = provider.mgmt vm = VM.factory(random_vm_name(context='capture'), provider) if not mgmt.does_vm_exist(vm.name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") vm.refresh_relationships() # # deferred delete vm request.addfinalizer(vm.delete_from_provider) # register event builder = EventBuilder(get_or_create_current_appliance()) capt_regexp = '^\s*resourceId:.*?{}.*?^\s*status:.*?^\s*value:\s*Succeeded'.format(vm.name) full_data_attr = {'full_data': 'will be ignored', 'cmp_func': lambda _, y: bool(re.search(capt_regexp, y, re.M | re.U | re.S))} generalize_event = builder.new_event(full_data_attr, source='AZURE', event_type='virtualMachines_generalize_EndRequest') register_event(generalize_event) capture_event = builder.new_event(full_data_attr, source='AZURE', event_type='virtualMachines_capture_EndRequest') register_event(capture_event) # capture vm image_name = vm.name resource_group = provider.data['provisioning']['resource_group'] mgmt.capture_vm(vm.name, resource_group, 'templates', image_name) # delete remaining image container = 'system' blob_images = mgmt.list_blob_images(container) # removing both json and vhd files test_image = [img for img in blob_images if image_name in img][-1] mgmt.remove_blob_image(test_image, container)
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( '--address', help='hostname or ip address of target appliance', default=None) parser.add_argument('--vddk_url', help='url to download vddk pkg') parser.add_argument('--reboot', help='reboot after installation ' + '(required for proper operation)', action="store_true") parser.add_argument('--force', help='force installation if version detected', action="store_true") args = parser.parse_args() if not args.address: appliance = get_or_create_current_appliance() else: appliance = IPAppliance(address=urlparse(args.address).netloc) appliance.install_vddk( reboot=args.reboot, force=args.force, vddk_url=args.vddk_url, log_callback=log)
def test_order_catalog_item(provider, setup_provider, catalog_item, request, register_event): """Tests order catalog item Metadata: test_flag: provision """ vm_name = catalog_item.provisioning_data["vm_name"] request.addfinalizer(lambda: cleanup_vm(vm_name + "_0001", provider)) catalog_item.create() builder = EventBuilder(get_or_create_current_appliance()) event = builder.new_event(target_type='Service', target_name=catalog_item.name, event_type='service_provisioned') register_event(event) service_catalogs = ServiceCatalogs(catalog_item.name) service_catalogs.order() logger.info('Waiting for cfme provision request for service %s', catalog_item.name) row_description = catalog_item.name cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells, True], fail_func=requests.reload, num_sec=1400, delay=20) assert row.request_state.text == 'Finished'
def test_check_compliance_history(request, virtualcenter_provider, vmware_vm): """This test checks if compliance history link in a VM details screen work. Steps: * Create any VM compliance policy * Assign it to a policy profile * Assign the policy profile to any VM * Perform the compliance check for the VM * Go to the VM details screen * Click on "History" row in Compliance InfoBox Result: Compliance history screen with last 10 checks should be opened """ policy = VMCompliancePolicy( "Check compliance history policy {}".format(fauxfactory.gen_alpha()), active=True, scope="fill_field(VM and Instance : Name, INCLUDES, {})".format(vmware_vm.name) ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) policy.create() policy_profile = PolicyProfile( policy.description, policies=[policy] ) request.addfinalizer(lambda: policy_profile.delete() if policy_profile.exists else None) policy_profile.create() virtualcenter_provider.assign_policy_profiles(policy_profile.description) request.addfinalizer(lambda: virtualcenter_provider.unassign_policy_profiles( policy_profile.description)) vmware_vm.check_compliance() vmware_vm.open_details(["Compliance", "History"]) appliance = get_or_create_current_appliance() history_screen_title = Text(appliance.browser.widgetastic, "//span[@id='explorer_title_text']").text assert history_screen_title == '"Compliance History" for Virtual Machine "{}"'.format( vmware_vm.name)
def test_manage_nsg_group(provider, setup_provider, register_event): """ tests that create/remove azure network security groups events are received and parsed by CFME """ nsg_name = random_vm_name(context='nsg') resource_group = provider.data['provisioning']['resource_group'] # registering add/remove network security group events # we need to check raw data by regexps, since many azure events aren't parsed by CFME yet builder = EventBuilder(get_or_create_current_appliance()) fd_regexp = '^\s*resourceId:.*?{nsg}.*?^\s*status:.*?^\s*value:\s*{stat}.*?^' \ '\s*subStatus:.*?^\s*value:\s*{sstat}' def add_cmp(_, y): bool(re.search(fd_regexp.format(nsg=nsg_name, stat='Accepted', sstat='Created'), y, re.M | re.U | re.S)) fd_add_attr = {'full_data': 'will be ignored', 'cmp_func': add_cmp} def rm_cmp(_, y): bool(re.search(fd_regexp.format(nsg=nsg_name, stat='Succeeded', sstat=' '), y, re.M | re.U | re.S)) fd_rm_attr = {'full_data': 'will be ignored', 'cmp_func': rm_cmp} add_event = builder.new_event(fd_add_attr, source=provider.type.upper(), event_type='networkSecurityGroups_write_EndRequest') register_event(add_event) remove_event = builder.new_event(fd_rm_attr, source=provider.type.upper(), event_type='networkSecurityGroups_delete_EndRequest') register_event(remove_event) # creating and removing network security group provider.mgmt.create_netsec_group(nsg_name, resource_group) provider.mgmt.remove_netsec_group(nsg_name, resource_group)
def test_distributed_vm_power_control(request, test_vm, vmware_provider, verify_vm_running, register_event, soft_assert): """Tests that a replication parent appliance can control the power state of a VM being managed by a replication child appliance. Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) vmware_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True builder = EventBuilder(get_or_create_current_appliance()) base_evt = partial(builder.new_event, target_type='VmOrTemplate', target_name=test_vm.name) with appl2.ipapp: register_event(base_evt(event_type='vm_poweroff'), base_evt(event_type='request_vm_poweroff')) test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False) flash.assert_message_contain("Stop initiated") navigate_to(test_vm.provider, 'Details') test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-off') soft_assert( not test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm running")
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( '--address', help='hostname or ip address of target appliance', default=None) parser.add_argument( '--sdk_url', help='url to download sdk pkg', default=cfme_data.get("basic_info", {}).get("netapp_sdk_url")) parser.add_argument('--restart', help='restart evmserverd after installation ' + '(required for proper operation)', action="store_true") args = parser.parse_args() if not args.address: appliance = get_or_create_current_appliance() else: appliance = IPAppliance(address=args.address) print('Address: {}'.format(appliance.address)) print('SDK URL: {}'.format(args.sdk_url)) print('Restart: {}'.format(args.restart)) appliance.install_netapp_sdk(sdk_url=args.sdk_url, reboot=args.restart, log_callback=log)
def test_db_migrate(stabilize_current_appliance, db_url, db_version, db_desc): app = get_or_create_current_appliance() # Download the database logger.info("Downloading database: {}".format(db_desc)) url_basename = os_path.basename(db_url) rc, out = app.ssh_client.run_command( 'curl -o "/tmp/{}" "{}"'.format(url_basename, db_url), timeout=30) assert rc == 0, "Failed to download database: {}".format(out) # The v2_key is potentially here v2key_url = os_path.join(os_path.dirname(db_url), "v2_key") # Drop vmdb_production DB app.drop_database() # restore new DB and migrate it with app.ssh_client as ssh: rc, out = ssh.run_command('createdb vmdb_production', timeout=30) assert rc == 0, "Failed to create clean database: {}".format(out) rc, out = ssh.run_command( 'pg_restore -v --dbname=vmdb_production /tmp/{}'.format(url_basename), timeout=600) assert rc == 0, "Failed to restore new database: {}".format(out) rc, out = ssh.run_rake_command("db:migrate", timeout=300) assert rc == 0, "Failed to migrate new database: {}".format(out) rc, out = ssh.run_rake_command( 'db:migrate:status 2>/dev/null | grep "^\s*down"', timeout=30) assert rc != 0, "Migration failed; migrations in 'down' state found: {}".format(out) # fetch GUID and REGION from the DB and use it to replace data in /var/www/miq/vmdb/GUID # and /var/www/miq/vmdb/REGION respectively data_query = { 'guid': 'select guid from miq_servers', 'region': 'select region from miq_regions' } for data_type, db_query in data_query.items(): data_filepath = '/var/www/miq/vmdb/{}'.format(data_type.upper()) rc, out = ssh.run_command( 'psql -d vmdb_production -t -c "{}"'.format(db_query), timeout=15) assert rc == 0, "Failed to fetch {}: {}".format(data_type, out) db_data = out.strip() assert db_data, "No {} found in database; query '{}' returned no records".format( data_type, db_query) rc, out = ssh.run_command( "echo -n '{}' > {}".format(db_data, data_filepath), timeout=15) assert rc == 0, "Failed to replace data in {} with '{}': {}".format( data_filepath, db_data, out) # fetch v2_key try: rc, out = ssh.run_command( 'curl "{}"'.format(v2key_url), timeout=15) assert rc == 0, "Failed to download v2_key: {}".format(out) assert ":key:" in out, "Not a v2_key file: {}".format(out) rc, out = ssh.run_command( 'curl -o "/var/www/miq/vmdb/certs/v2_key" "{}"'.format(v2key_url), timeout=15) assert rc == 0, "Failed to download v2_key: {}".format(out) # or change all invalid (now unavailable) passwords to 'invalid' except AssertionError: rc, out = ssh.run_command("fix_auth -i invalid", timeout=45) assert rc == 0, "Failed to change invalid passwords: {}".format(out) # start evmserverd, wait for web UI to start and try to log in try: app.start_evm_service() except ApplianceException: rc, out = app.ssh_client.run_rake_command("evm:start") assert rc == 0, "Couldn't start evmserverd: {}".format(out) app.wait_for_web_ui(timeout=600) # Reset user's password, just in case (necessary for customer DBs) rc, out = ssh.run_rails_command( '"u = User.find_by_userid(\'admin\'); u.password = \'{}\'; u.save!"' .format(app.user.credential.secret)) assert rc == 0, "Failed to change UI password of {} to {}:" \ .format(app.user.credential.principal, app.user.credential.secret, out) login(app.user)
def stabilize_current_appliance(backup_orig_state): app = get_or_create_current_appliance() app.reboot(wait_for_web_ui=False) app.stop_evm_service()
def new_paginator(): """ Simple function to avoid module level import """ appliance = get_or_create_current_appliance() paginator = PaginationPane(parent=appliance.browser.widgetastic) return paginator
"""A set of functions for dealing with the paginator controls.""" from cfme.exceptions import PaginatorException from widgetastic_manageiq import PaginationPane from utils.appliance import get_or_create_current_appliance appliance = get_or_create_current_appliance() new_paginator = PaginationPane(parent=appliance.browser.widgetastic) def page_controls_exist(): """ Simple check to see if page controls exist. """ return new_paginator.is_displayed def _page_nums(): return new_paginator.pages_amount def check_all(): """ selects all items """ new_paginator.check_all() def uncheck_all(): """ unselects all items """ new_paginator.uncheck_all() def next(): """ Returns the Next button locator.""" new_paginator.next_page()
def test_ssui_login(context): appliance = get_or_create_current_appliance() with appliance.context.use(context): appliance.server.login()
def appliance(): return get_or_create_current_appliance()