def test_service_ansible_playbook_negative(): view = navigate_to(AnsiblePlaybookCatalogItem("", "", {}), "Add") view.fill({ "name": fauxfactory.gen_alphanumeric(), "description": fauxfactory.gen_alphanumeric() }) assert not view.add.active
def category(): cg = Category(name=fauxfactory.gen_alphanumeric(8).lower(), description=fauxfactory.gen_alphanumeric(32), display_name=fauxfactory.gen_alphanumeric(32)) cg.create() yield cg cg.delete()
def test_charge_report_filter_tag(appliance, infra_provider, request): """Tests creation of chargeback report that is filtered by tag """ report = appliance.collections.reports.create( menu_name=fauxfactory.gen_alphanumeric(), title=fauxfactory.gen_alphanumeric(), base_report_on="Chargeback for Vms", report_fields=[ "CPU Used", "CPU Used Cost", "Memory Used", "Memory Used Cost", "Owner", "vCPUs Allocated Cost", "Total Cost", ], filter=dict( filter_show_costs="My Company Tag", filter_tag_cat="Location", filter_tag_value="Chicago" ) ) def cleanup_report(report): return lambda: _cleanup_report(report) request.addfinalizer(cleanup_report(report)) report.queue(wait_for_finish=True)
def ansible_catalog_item(ansible_repository): cat_item = AnsiblePlaybookCatalogItem( fauxfactory.gen_alphanumeric(), fauxfactory.gen_alphanumeric(), display_in_catalog=True, provisioning={ "repository": ansible_repository.name, "playbook": "dump_all_variables.yml", "machine_credential": "CFME Default Credential", "create_new": True, "provisioning_dialog_name": fauxfactory.gen_alphanumeric(), "extra_vars": [("some_var", "some_value")] }, retirement={ "repository": ansible_repository.name, "playbook": "dump_all_variables.yml", "machine_credential": "CFME Default Credential", "extra_vars": [("some_var", "some_value")] } ) cat_item.create() yield cat_item if cat_item.exists: cat_item.delete()
def test_provider_crud(request, rest_api, from_detail): """Test the CRUD on provider using REST API. Steps: * POST /api/providers (method ``create``) <- {"hostname":..., "name":..., "type": "EmsVmware"} * Remember the provider ID. * Delete it either way: * DELETE /api/providers/<id> * POST /api/providers (method ``delete``) <- list of dicts containing hrefs to the providers, in this case just list with one dict. Metadata: test_flag: rest """ if "create" not in rest_api.collections.providers.action.all: pytest.skip("Create action is not implemented in this version") if current_version() < "5.5": provider_type = "EmsVmware" else: provider_type = "ManageIQ::Providers::Vmware::InfraManager" provider = rest_api.collections.providers.action.create( hostname=fauxfactory.gen_alphanumeric(), name=fauxfactory.gen_alphanumeric(), type=provider_type, )[0] if from_detail: provider.action.delete() provider.wait_not_exists(num_sec=30, delay=0.5) else: rest_api.collections.providers.action.delete(provider) provider.wait_not_exists(num_sec=30, delay=0.5)
def ansible_catalog_item(appliance, ansible_repository): collection = appliance.collections.catalog_items cat_item = collection.create( collection.ANSIBLE_PLAYBOOK, fauxfactory.gen_alphanumeric(), fauxfactory.gen_alphanumeric(), display_in_catalog=True, provisioning={ "repository": ansible_repository.name, "playbook": "dump_all_variables.yml", "machine_credential": "CFME Default Credential", "create_new": True, "provisioning_dialog_name": fauxfactory.gen_alphanumeric(), "extra_vars": [("some_var", "some_value")] }, retirement={ "repository": ansible_repository.name, "playbook": "dump_all_variables.yml", "machine_credential": "CFME Default Credential", "extra_vars": [("some_var", "some_value")] } ) yield cat_item if cat_item.exists: cat_item.delete()
def test_charge_report_filter_owner(appliance, infra_provider, request): """Tests creation of chargeback report that is filtered by owner """ report = appliance.collections.reports.create( menu_name=fauxfactory.gen_alphanumeric(), title=fauxfactory.gen_alphanumeric(), base_report_on="Chargeback for Vms", report_fields=[ "Network I/O Used", "Network I/O Used Cost", "Storage Used", "Storage Used Cost", "Disk I/O Used", "Disk I/O Used Cost", "Owner", "Total Cost", ], filter=dict( filter_show_costs="Owner", filter_owner="Administrator" ) ) def cleanup_report(report): return lambda: _cleanup_report(report) request.addfinalizer(cleanup_report(report)) report.queue(wait_for_finish=True)
def test_charge_report_filter_owner(setup_first_provider, request): """Tests creation of chargeback report that is filtered by owner """ report = CustomReport( menu_name=fauxfactory.gen_alphanumeric(), title=fauxfactory.gen_alphanumeric(), base_report_on="Chargebacks", report_fields=[ "Network I/O Used", "Network I/O Used Cost", "Storage Used", "Storage Used Cost", "Disk I/O Used", "Disk I/O Used Cost", "Owner", "Total Cost", ], filter_show_costs="Owner", filter_owner="Administrator", ) report.create() def cleanup_report(report): return lambda: _cleanup_report(report) request.addfinalizer(cleanup_report(report)) flash.assert_message_match('Report "{}" was added'.format(report.menu_name)) report.queue(wait_for_finish=True)
def test_charge_report_filter_tag(setup_first_provider, request): """Tests creation of chargeback report that is filtered by tag """ report = CustomReport( menu_name=fauxfactory.gen_alphanumeric(), title=fauxfactory.gen_alphanumeric(), base_report_on="Chargebacks", report_fields=[ "CPU Used", "CPU Used Cost", "Memory Used", "Memory Used Cost", "Owner", "vCPUs Allocated Cost", "Total Cost", ], filter_show_costs="My Company Tag", filter_tag_cat="Location", filter_tag_value="Chicago", ) report.create() def cleanup_report(report): return lambda: _cleanup_report(report) request.addfinalizer(cleanup_report(report)) flash.assert_message_match('Report "{}" was added'.format(report.menu_name)) report.queue(wait_for_finish=True)
def new_project(appliance): collection = appliance.collections.projects project = collection.create(name='project{}'.format(fauxfactory.gen_alphanumeric()), description='project_des{}'.format(fauxfactory.gen_alphanumeric()), parent=collection.get_root_tenant()) yield project project.delete()
def test_password_max_character_validation(): password = fauxfactory.gen_alphanumeric(51) cred = Credential( principal=fauxfactory.gen_alphanumeric(5), secret=password, verify_secret=password) discover(cred, d_type="Amazon")
def test_edit_categories(rest_api, categories, multiple): if "edit" not in rest_api.collections.categories.action.all: pytest.skip("Edit categories action is not implemented in this version") if multiple: new_names = [] ctgs_data_edited = [] for ctg in categories: new_name = fauxfactory.gen_alphanumeric().lower() new_names.append(new_name) ctg.reload() ctgs_data_edited.append({ "href": ctg.href, "description": "test_category_{}".format(new_name), }) rest_api.collections.categories.action.edit(*ctgs_data_edited) for new_name in new_names: wait_for( lambda: rest_api.collections.categories.find_by(description=new_name), num_sec=180, delay=10, ) else: ctg = rest_api.collections.categories.find_by(description=categories[0].description)[0] new_name = 'test_category_{}'.format(fauxfactory.gen_alphanumeric().lower()) ctg.action.edit(description=new_name) wait_for( lambda: rest_api.collections.categories.find_by(description=new_name), num_sec=180, delay=10, )
def test_edit_roles(rest_api, roles, multiple): if "edit" not in rest_api.collections.roles.action.all: pytest.skip("Edit roles action is not implemented in this version") if multiple: new_names = [] roles_data_edited = [] for role in roles: new_name = fauxfactory.gen_alphanumeric() new_names.append(new_name) role.reload() roles_data_edited.append({ "href": role.href, "name": "role_name_{}".format(new_name), }) rest_api.collections.roles.action.edit(*roles_data_edited) for new_name in new_names: wait_for( lambda: rest_api.collections.roles.find_by(name=new_name), num_sec=180, delay=10, ) else: role = rest_api.collections.roles.find_by(name=roles[0].name)[0] new_name = 'role_name_{}'.format(fauxfactory.gen_alphanumeric()) role.action.edit(name=new_name) wait_for( lambda: rest_api.collections.roles.find_by(name=new_name), num_sec=180, delay=10, )
def vm_ownership(enable_candu, provider, appliance): """In these tests, chargeback reports are filtered on VM owner.So,VMs have to be assigned ownership. """ vm_name = provider.data['cap_and_util']['chargeback_vm'] vm = appliance.provider_based_collection(provider, coll_type='vms').instantiate(vm_name, provider) if not vm.exists_on_provider: pytest.skip('Skipping test, {} VM does not exist'.format(vm_name)) vm.mgmt.ensure_state(VmState.RUNNING) group_collection = appliance.collections.groups cb_group = group_collection.instantiate(description='EvmGroup-user') user = appliance.collections.users.create( name="{}_{}".format(provider.name, fauxfactory.gen_alphanumeric()), credential=Credential(principal='uid{}'.format(fauxfactory.gen_alphanumeric()), secret='secret'), email='*****@*****.**', groups=cb_group, cost_center='Workload', value_assign='Database') vm.set_ownership(user=user) logger.info('Assigned VM OWNERSHIP for {} running on {}'.format(vm_name, provider.name)) yield user.name vm.unset_ownership() if user: user.delete()
def test_edit_rates(rest_api, rates, multiple): if multiple: new_descriptions = [] rates_data_edited = [] for rate in rates: new_description = fauxfactory.gen_alphanumeric().lower() new_descriptions.append(new_description) rate.reload() rates_data_edited.append({ "href": rate.href, "description": "test_category_{}".format(new_description), }) rest_api.collections.rates.action.edit(*rates_data_edited) for new_description in new_descriptions: wait_for( lambda: rest_api.collections.rates.find_by(description=new_description), num_sec=180, delay=10, ) else: rate = rest_api.collections.rates.find_by(description=rates[0].description)[0] new_description = 'test_rate_{}'.format(fauxfactory.gen_alphanumeric().lower()) rate.action.edit(description=new_description) wait_for( lambda: rest_api.collections.categories.find_by(description=new_description), num_sec=180, delay=10, )
def test_positive_update_key(self): """@test: Create gpg key with valid name and valid gpg key via file import then update its gpg key file @feature: GPG Keys @assert: gpg key is updated """ gpg_key = make_gpg_key({'organization-id': self.org['id']}) content = gen_alphanumeric(gen_integer(20, 50)) self.assertNotEqual(gpg_key['content'], content) local_key = create_gpg_key_file(content) self.assertIsNotNone(local_key, 'GPG Key file must be created') key = '/tmp/%s' % gen_alphanumeric() ssh.upload_file(local_file=local_key, remote_file=key) GPGKey.update({ 'key': key, 'name': gpg_key['name'], 'organization-id': self.org['id'], }) gpg_key = GPGKey.info({ 'name': gpg_key['name'], 'organization-id': self.org['id'], }) self.assertEqual(gpg_key['content'], content)
def chargeback_report_custom(appliance, vm_ownership, assign_custom_rate, provider): """Create a Chargeback report based on a custom rate; Queue the report""" owner = vm_ownership data = { 'menu_name': '{}_{}'.format(provider.name, fauxfactory.gen_alphanumeric()), 'title': '{}_{}'.format(provider.name, fauxfactory.gen_alphanumeric()), 'base_report_on': 'Chargeback for Vms', 'report_fields': ['Memory Allocated Cost', 'Memory Allocated over Time Period', 'Owner', 'vCPUs Allocated over Time Period', 'vCPUs Allocated Cost', 'Storage Allocated', 'Storage Allocated Cost'], 'filter': { 'filter_show_costs': 'Owner', 'filter_owner': owner, 'interval_end': 'Today (partial)' } } report = appliance.collections.reports.create(is_candu=True, **data) logger.info('Queuing chargeback report with custom rate for {} provider'.format(provider.name)) report.queue(wait_for_finish=True) if not list(report.saved_reports.all()[0].data.rows): pytest.skip('Empty report') else: yield list(report.saved_reports.all()[0].data.rows) if report.exists: report.delete()
def test_schedule_crud(appliance, current_server_time): current_time, _ = current_server_time start_date = current_time + relativedelta.relativedelta(days=2) schedule = appliance.collections.system_schedules.create( name=fauxfactory.gen_alphanumeric(), description=fauxfactory.gen_alphanumeric(), start_date=start_date ) view = appliance.browser.create_view(BaseLoggedInPage) view.flash.assert_message('Schedule "{}" was saved'.format(schedule.name)) # test for bz 1569127 start_date_updated = start_date - relativedelta.relativedelta(days=1) updates = { 'name': fauxfactory.gen_alphanumeric(), 'description': fauxfactory.gen_alphanumeric(), } schedule.update(updates, cancel=True) view.flash.assert_message( 'Edit of Schedule "{}" was cancelled by the user'.format(schedule.name)) schedule.update(updates, reset=True) view.flash.assert_message('All changes have been reset') with update(schedule): schedule.name = fauxfactory.gen_alphanumeric() schedule.start_date = start_date_updated view.flash.assert_message('Schedule "{}" was saved'.format(schedule.name)) schedule.delete(cancel=True) schedule.delete() view.flash.assert_message('Schedule "{}": Delete successful'.format(schedule.description))
def test_service_ansible_playbook_crud(appliance, ansible_repository): """ Polarion: assignee: sbulage casecomponent: Ansible caseimportance: critical initialEstimate: 1/6h tags: ansible_embed """ cat_item = appliance.collections.catalog_items.create( appliance.collections.catalog_items.ANSIBLE_PLAYBOOK, fauxfactory.gen_alphanumeric(), fauxfactory.gen_alphanumeric(), provisioning={ "repository": ansible_repository.name, "playbook": "dump_all_variables.yml", "machine_credential": "CFME Default Credential", "create_new": True, "provisioning_dialog_name": fauxfactory.gen_alphanumeric() } ) assert cat_item.exists with update(cat_item): new_name = "edited_{}".format(fauxfactory.gen_alphanumeric()) cat_item.name = new_name cat_item.provisioning = { "playbook": "copy_file_example.yml" } view = navigate_to(cat_item, "Details") assert new_name in view.entities.title.text assert view.entities.provisioning.info.get_text_of("Playbook") == "copy_file_example.yml" cat_item.delete() assert not cat_item.exists
def test_cust_template_duplicate_name_error_validation(collection): """Test to validate duplication in customization templates. Polarion: assignee: jhenner initialEstimate: 1/4h casecomponent: WebUI """ name = fauxfactory.gen_alphanumeric(8) description = fauxfactory.gen_alphanumeric(16) template_name = collection.create( name=name, description=description, image_type='RHEL-6', script_type='Kickstart', script_data='Testing the script') with pytest.raises(Exception, match='Name has already been taken'): collection.create( name=name, description=description, image_type='RHEL-6', script_type='Kickstart', script_data='Testing the script') collection.delete(False, template_name)
def vm_ownership(enable_candu, clean_setup_provider, provider, appliance): # In these tests, Metering report is filtered on VM owner.So,VMs have to be # assigned ownership. vm_name = provider.data['cap_and_util']['chargeback_vm'] if not provider.mgmt.does_vm_exist(vm_name): pytest.skip("Skipping test, {} VM does not exist".format(vm_name)) provider.mgmt.start_vm(vm_name) provider.mgmt.wait_vm_running(vm_name) group_collection = appliance.collections.groups cb_group = group_collection.instantiate(description='EvmGroup-user') user = appliance.collections.users.create( name=fauxfactory.gen_alphanumeric(), credential=Credential(principal='uid' + '{}'.format(fauxfactory.gen_alphanumeric()), secret='secret'), email='*****@*****.**', groups=cb_group, cost_center='Workload', value_assign='Database') vm = VM.factory(vm_name, provider) try: vm.set_ownership(user=user.name) logger.info('Assigned VM OWNERSHIP for {} running on {}'.format(vm_name, provider.name)) yield user.name finally: vm.unset_ownership() user.delete()
def test_check_package_presence(request, fleecing_vm, ssh_client, analysis_profile): """This test checks compliance by presence of a certain cfme-appliance package which is expected to be present on an appliance.""" # TODO: If we step out from provisioning a full appliance for fleecing, this might need revisit condition = VMCondition( "Compliance testing condition {}".format(fauxfactory.gen_alphanumeric(8)), expression=("fill_find(field=VM and Instance.Guest Applications : Name, " "skey=STARTS WITH, value=cfme-appliance, check=Check Count, ckey= = , cvalue=1)") ) request.addfinalizer(lambda: diaper(condition.delete)) policy = VMCompliancePolicy("Compliance {}".format(fauxfactory.gen_alphanumeric(8))) request.addfinalizer(lambda: diaper(policy.delete)) policy.create() policy.assign_conditions(condition) profile = PolicyProfile( "Compliance PP {}".format(fauxfactory.gen_alphanumeric(8)), policies=[policy] ) request.addfinalizer(lambda: diaper(profile.delete)) profile.create() fleecing_vm.assign_policy_profiles(profile.description) request.addfinalizer(lambda: fleecing_vm.unassign_policy_profiles(profile.description)) with update(analysis_profile): analysis_profile.categories = [ "check_services", "check_accounts", "check_software", "check_vmconfig", "check_system"] do_scan(fleecing_vm) assert fleecing_vm.check_compliance_and_wait()
def publish_to_template(self, template_name, email=None, first_name=None, last_name=None): self.load_details() lcl_btn("Publish this VM to a Template") first_name = first_name or fauxfactory.gen_alphanumeric() last_name = last_name or fauxfactory.gen_alphanumeric() email = email or "{}@{}.test".format(first_name, last_name) try: prov_data = cfme_data["management_systems"][self.provider.key]["provisioning"] except (KeyError, IndexError): raise ValueError("You have to specify the correct options in cfme_data.yaml") provisioning_data = { "first_name": first_name, "last_name": last_name, "email": email, "vm_name": template_name, "host_name": {"name": prov_data.get("host")}, "datastore_name": {"name": prov_data.get("datastore")}, } from cfme.provisioning import provisioning_form fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button) cells = {'Description': 'Publish from [{}] to [{}]'.format(self.name, template_name)} row, __ = wait_for( requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=900, delay=20) return Template(template_name, self.provider)
def new_category(): category = Category(name="tag_vis_" + fauxfactory.gen_alpha(8).lower(), description="tag_vis_" + fauxfactory.gen_alphanumeric(), display_name="tag_vis_" + fauxfactory.gen_alphanumeric()) category.create() yield category category.delete(cancel=False)
def random_labels(provider, appliance): # Creating random instance for each object in TEST_OBJECTS and create a random label for it. label_data = namedtuple('label_data', ['instance', 'label_name', 'label_value', 'status_code', 'json_content']) data_collection = [] # Collected data in the form: # <instance>, <label_name>, <label_value>, <results_status> # Adding label to each object: for test_obj in TEST_OBJECTS: instance = test_obj.collection_obj(appliance).get_random_instances().pop() label_key = fauxfactory.gen_alpha(1) + \ fauxfactory.gen_alphanumeric(random.randrange(1, 62)) value = fauxfactory.gen_alphanumeric(random.randrange(1, 63)) try: status_code, json_content = instance.set_label(label_key, value) except: status_code, json_content = None, format_exc() data_collection.append( label_data(instance, label_key, value, status_code, json_content) ) return data_collection # In case that test_labels_remove is skipped we should remove the labels: for _, label_key, status_code, _ in data_collection: if status_code and label_key in instance.get_labels(): instance.remove_label(label_key)
def test_discovery_error_azure_cloud(appliance): """ Test Azure discovery with fake data prerequisites: * appliance supporting discovery Steps: * Navigate Cloud provider discovery and select Azure * Fill all fields with fake data * Start Discovery * Even with wrong data discovery will start with the proper flash message assert it * Check for provider should not discover """ cred = Credential( principal=fauxfactory.gen_alphanumeric(5), secret=fauxfactory.gen_alphanumeric(8), tenant_id=fauxfactory.gen_alphanumeric(10), subscription_id=fauxfactory.gen_alphanumeric(10)) collection = appliance.collections.cloud_providers view = navigate_to(collection, 'All') initial_count = len(view.entities.entity_names) collection.discover(cred, AzureProvider) view = appliance.browser.create_view(CloudProvidersView) view.flash.assert_success_message('Cloud Providers: Discovery successfully initiated') # While waiting for new provider, TimeOutError will come (Negative Test) with pytest.raises(TimedOutError): collection.wait_for_new_provider(timeout=120) assert len(view.entities.entity_names) <= initial_count
def schedule(appliance): schedule = appliance.collections.system_schedules.create( name=fauxfactory.gen_alphanumeric(), description=fauxfactory.gen_alphanumeric() ) yield schedule schedule.delete()
def test_user_change_password(request): user = ac.User( name="user {}".format(fauxfactory.gen_alphanumeric()), credential=Credential( principal="user_principal_{}".format(fauxfactory.gen_alphanumeric()), secret="very_secret", verify_secret="very_secret" ), email="*****@*****.**", group=usergrp, ) user.create() request.addfinalizer(user.delete) request.addfinalizer(login.login_admin) login.logout() assert not login.logged_in() login.login(user.credential.principal, user.credential.secret) assert login.current_full_name() == user.name login.login_admin() with update(user): user.credential = Credential( principal=user.credential.principal, secret="another_very_secret", verify_secret="another_very_secret", ) login.logout() assert not login.logged_in() login.login(user.credential.principal, user.credential.secret) assert login.current_full_name() == user.name
def test_rssfeedwidget_crud(appliance): w = appliance.collections.dashboard_report_widgets.create( appliance.collections.dashboard_report_widgets.RSS, fauxfactory.gen_alphanumeric(), description=fauxfactory.gen_alphanumeric(), active=True, type="Internal", feed="Administrative Events", rows="8", visibility="<To All Users>" ) view = w.create_view(AllDashboardWidgetsView) view.flash.assert_message('Widget "{}" was saved'.format(w.title)) # Basic update with update(w): w.active = False # Different feed type with update(w): w.type = "External" w.external = "SlashDot" # and custom address with update(w): w.type = "External" w.external = "http://rss.example.com/" w.delete()
def test_check_files(request, fleecing_vm, ssh_client, analysis_profile): """This test checks presence and contents of a certain file. Due to caching, an existing file is checked. """ check_file_name = "/etc/sudo.conf" check_file_contents = "sudoers_policy" # The file contains: `Plugin sudoers_policy sudoers.so` condition = VMCondition( "Compliance testing condition {}".format(fauxfactory.gen_alphanumeric(8)), expression=("fill_find(VM and Instance.Files : Name, " "=, {}, Check Any, Contents, INCLUDES, {})".format( check_file_name, check_file_contents)) ) request.addfinalizer(lambda: diaper(condition.delete)) policy = VMCompliancePolicy("Compliance {}".format(fauxfactory.gen_alphanumeric(8))) request.addfinalizer(lambda: diaper(policy.delete)) policy.create() policy.assign_conditions(condition) profile = PolicyProfile( "Compliance PP {}".format(fauxfactory.gen_alphanumeric(8)), policies=[policy] ) request.addfinalizer(lambda: diaper(profile.delete)) profile.create() fleecing_vm.assign_policy_profiles(profile.description) request.addfinalizer(lambda: fleecing_vm.unassign_policy_profiles(profile.description)) with update(analysis_profile): analysis_profile.files = [(check_file_name, True)] analysis_profile.categories = [ "check_services", "check_accounts", "check_software", "check_vmconfig", "check_system"] do_scan(fleecing_vm, ("Configuration", "Files")) assert fleecing_vm.check_compliance_and_wait()
def test_migration_playbooks(request, appliance, v2v_providers, host_creds, conversion_tags, ansible_repository, form_data_vm_obj_single_datastore): """Test for migrating vms with pre and post playbooks""" creds = credentials[v2v_providers.vmware_provider.data.templates.get("rhel7_minimal").creds] CREDENTIALS = ( "Machine", { "username": creds.username, "password": creds.password, "privilage_escalation": "sudo", }, ) credential = appliance.collections.ansible_credentials.create( name="{type}_credential_{cred}".format(type=CREDENTIALS[0], cred=fauxfactory.gen_alpha()), credential_type=CREDENTIALS[0], **CREDENTIALS[1] ) provision_catalog = catalog_item( request, appliance, credential.name, ansible_repository, "provision" ) retire_catalog = catalog_item( request, appliance, credential.name, ansible_repository, "retire" ) infrastructure_mapping_collection = appliance.collections.v2v_mappings mapping = infrastructure_mapping_collection.create( form_data_vm_obj_single_datastore.form_data ) @request.addfinalizer def _cleanup(): infrastructure_mapping_collection.delete(mapping) # vm_obj is a list, with only 1 VM object, hence [0] src_vm_obj = form_data_vm_obj_single_datastore.vm_list[0] migration_plan_collection = appliance.collections.v2v_plans migration_plan = migration_plan_collection.create( name="plan_{}".format(fauxfactory.gen_alphanumeric()), description="desc_{}".format(fauxfactory.gen_alphanumeric()), infra_map=mapping.name, vm_list=form_data_vm_obj_single_datastore.vm_list, start_migration=True, pre_playbook=provision_catalog.name, post_playbook=retire_catalog.name, ) # explicit wait for spinner of in-progress status card view = appliance.browser.create_view( navigator.get_class(migration_plan_collection, "All").VIEW.pick() ) wait_for( func=view.progress_card.is_plan_started, func_args=[migration_plan.name], message="migration plan is starting, be patient please", delay=5, num_sec=280, handle_exception=True, fail_cond=False ) # wait until plan is in progress wait_for( func=view.plan_in_progress, func_args=[migration_plan.name], message="migration plan is in progress, be patient please", delay=15, num_sec=3600, ) view.switch_to("Completed Plans") view.wait_displayed() migration_plan_collection.find_completed_plan(migration_plan) logger.info( "For plan %s, migration status after completion: %s, total time elapsed: %s", migration_plan.name, view.migration_plans_completed_list.get_vm_count_in_plan(migration_plan.name), view.migration_plans_completed_list.get_clock(migration_plan.name), ) # validate MAC address matches between source and target VMs assert view.migration_plans_completed_list.is_plan_succeeded(migration_plan.name) migrated_vm = get_migrated_vm_obj(src_vm_obj, v2v_providers.rhv_provider) assert src_vm_obj.mac_address == migrated_vm.mac_address
def catalog(): catalog = "cat_" + fauxfactory.gen_alphanumeric() cat = Catalog(name=catalog, description="my catalog") cat.create() yield catalog
def __init__( self, cpu=4, ram=16384, distro=None, provisioning_server=None, image_dir=None, org_id=None, lce_id=None, organization_ids=None, location_ids=None): """Manage a virtual machine with satellite capsule product setup for client provisioning. :param int cpu: The number of CPUs usage. :param int ram: the number of RAM usage in mega bytes. :param str distro: The OS distro to use to provision the virtual machine, it's also used in capsule setup to prepare the satellite products content. :param str provisioning_server: the provisioning server url :param str image_dir: the images location path on the provisioning server. :param int org_id: The organization id used to subscribe the virtual machine and to create the products contents that the virtual machine will use to setup the capsule. :param int lce_id: the lifecycle environment used for the subscription of virtual machine :param List[int] organization_ids: the organization ids of organizations that will use the capsule. :param List[int] location_ids: the location ids for which the content will be synchronized. """ # ensure that capsule configuration exist and validate if not setting_is_set('capsule'): raise CapsuleVirtualMachineError('capsule configuration not set') name_prefix = gen_alphanumeric(4).lower() self._capsule_instance_name = ( '{0}-{1}'.format(name_prefix, settings.capsule.instance_name) ) self._capsule_domain = settings.clients.provisioning_server.split( '.', 1)[1] self._capsule_hostname = ( '{0}.{1}'.format( self._capsule_instance_name, self._capsule_domain ) ) super(CapsuleVirtualMachine, self).__init__( cpu=cpu, ram=ram, distro=distro, provisioning_server=provisioning_server, image_dir=image_dir, domain=self._capsule_domain, hostname=self._capsule_hostname, target_image=self._capsule_instance_name ) self._capsule_org_id = org_id self._capsule_lce_id = lce_id if organization_ids is None: organization_ids = [] self._capsule_organization_ids = organization_ids if location_ids is None: location_ids = [] self._capsule_location_ids = location_ids self._capsule = None self._capsule_org = None self._capsule_lce = None
def test_vmware_vimapi_hotadd_disk(appliance, request, testing_group, provider, testing_vm, domain, cls): """ Tests hot adding a disk to vmware vm. This test exercises the ``VMware_HotAdd_Disk`` method, located in ``/Integration/VMware/VimApi`` Steps: * It creates an instance in ``System/Request`` that can be accessible from eg. a button. * Then it creates a button, that refers to the ``VMware_HotAdd_Disk`` in ``Request``. The button shall belong in the VM and instance button group. * After the button is created, it goes to a VM's summary page, clicks the button. * The test waits until the capacity of disks is raised. Metadata: test_flag: hotdisk, provision Polarion: assignee: dmisharo initialEstimate: 1/8h """ meth = cls.methods.create(name='load_value_{}'.format( fauxfactory.gen_alpha()), script=dedent('''\ # Sets the capacity of the new disk. $evm.root['size'] = 1 # GB exit MIQ_OK ''')) request.addfinalizer(meth.delete_if_exists) # Instance that calls the method and is accessible from the button instance = cls.instances.create( name="VMware_HotAdd_Disk_{}".format(fauxfactory.gen_alpha()), fields={ "meth4": { 'value': meth.name }, # To get the value "rel5": { 'value': "/Integration/VMware/VimApi/VMware_HotAdd_Disk" }, }, ) request.addfinalizer(instance.delete_if_exists) # Button that will invoke the dialog and action button_name = fauxfactory.gen_alphanumeric() button = testing_group.buttons.create(text=button_name, hover=button_name, system="Request", request=instance.name) request.addfinalizer(button.delete_if_exists) def _get_disk_capacity(): view = testing_vm.load_details(refresh=True) return view.entities.summary( 'Datastore Allocation Summary').get_text_of('Total Allocation') original_disk_capacity = _get_disk_capacity() logger.info('Initial disk allocation: %s', original_disk_capacity) class CustomButtonView(View): custom_button = Dropdown(testing_group.text) view = appliance.browser.create_view(CustomButtonView) view.custom_button.item_select(button.text) view = appliance.browser.create_view(BaseLoggedInPage) view.flash.assert_no_error() try: wait_for(lambda: _get_disk_capacity() > original_disk_capacity, num_sec=180, delay=5) finally: logger.info('End disk capacity: %s', _get_disk_capacity())
def domain(request, domain_collection): domain = domain_collection.create(name=fauxfactory.gen_alphanumeric(), enabled=True) yield domain if domain.exists: domain.delete()
def policy_profile_name(): return fauxfactory.gen_alphanumeric( 43, start="compliance_testing: policy profile ")
def policy_name(): return fauxfactory.gen_alphanumeric(35, start="compliance_testing: policy ")
def enable_external(self, db_address, region=0, db_name=None, db_username=None, db_password=None): """Enables external database Args: db_address: Address of the external database region: Number of region to join db_name: Name of the external DB db_username: Username to access the external DB db_password: Password to access the external DB Returns a tuple of (exitstatus, script_output) for reporting, if desired """ self.logger.info( 'Enabling external DB (db_address {}, region {}) on {}.'.format( db_address, region, self.address)) # reset the db address and clear the cached db object if we have one self.address = db_address clear_property_cache(self, 'client') # default db_name = db_name or 'vmdb_production' db_username = db_username or conf.credentials['database']['username'] db_password = db_password or conf.credentials['database']['password'] client = self.ssh_client if self.appliance.has_cli: if not client.is_pod: # copy v2 key master_client = client(hostname=self.address) rand_filename = "/tmp/v2_key_{}".format( fauxfactory.gen_alphanumeric()) master_client.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename) client.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key") # enable external DB with cli status, out = client.run_command( 'appliance_console_cli ' '--hostname {0} --region {1} --dbname {2} --username {3} --password {4}' .format(self.address, region, db_name, db_username, db_password)) else: # no cli, use the enable external db script rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'host': self.address, 'region': region, 'database': db_name, 'username': db_username, 'password': db_password } # Find and load our rb template with replacements rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath) rb = datafile.load_data_file(rbt, rbt_repl) # Init SSH client and sent rb file over to /tmp remote_file = '/tmp/{}'.format(fauxfactory.gen_alphanumeric()) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done status, out = client.run_command('ruby {}'.format(remote_file)) client.run_command('rm {}'.format(remote_file)) if status != 0: self.logger.error('error enabling external db') self.logger.error(out) msg = ('Appliance {} failed to enable external DB running on {}'. format(self.appliance.hostname, db_address)) self.logger.error(msg) from . import ApplianceException raise ApplianceException(msg) return status, out
def name_suffix(): return fauxfactory.gen_alphanumeric()
def vm_name(): vm_name = 'test_quota_prov_{}'.format(fauxfactory.gen_alphanumeric()) return vm_name
def new_credential(): return Credential(principal='uid' + fauxfactory.gen_alphanumeric(), secret='secret')
def enable_internal(self, region=0, key_address=None, db_password=None, ssh_password=None, db_disk=None): """Enables internal database Args: region: Region number of the CFME appliance. key_address: Address of CFME appliance where key can be fetched. db_disk: Path of the db disk for --dbdisk appliance_console_cli. If not specified it tries to load it from the appliance. Note: If key_address is None, a new encryption key is generated for the appliance. """ # self.logger.info('Enabling internal DB (region {}) on {}.'.format(region, self.address)) self.address = self.appliance.hostname clear_property_cache(self, 'client') client = self.ssh_client # Defaults db_password = db_password or conf.credentials['database']['password'] ssh_password = ssh_password or conf.credentials['ssh']['password'] if not db_disk: try: db_disk = self.appliance.unpartitioned_disks[0] except IndexError: db_disk = None self.logger.warning( 'Failed to set --dbdisk from the appliance. On 5.9.0.3+ it will fail.' ) # make sure the dbdisk is unmounted, RHOS ephemeral disks come up mounted result = client.run_command('umount {}'.format(db_disk)) if not result.success: self.logger.warning( 'umount non-zero return, output was: '.format(result)) if self.appliance.has_cli: base_command = 'appliance_console_cli --region {}'.format(region) # use the cli if key_address: command_options = ( '--internal --fetch-key {key} -p {db_pass} -a {ssh_pass}'. format(key=key_address, db_pass=db_password, ssh_pass=ssh_password)) else: command_options = '--internal --force-key -p {db_pass}'.format( db_pass=db_password) if db_disk: command_options = ' '.join( [command_options, '--dbdisk {}'.format(db_disk)]) status, out = client.run_command(' '.join( [base_command, command_options])) if status != 0 or 'failed' in out.lower(): raise Exception( 'Could not set up the database:\n{}'.format(out)) else: # no cli, use the enable internal db script rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'region': region, 'postgres_version': self.postgres_version } # Find and load our rb template with replacements rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath) rb = datafile.load_data_file(rbt, rbt_repl) # sent rb file over to /tmp remote_file = '/tmp/{}'.format(fauxfactory.gen_alphanumeric()) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done status, out = client.run_command('ruby {}'.format(remote_file)) client.run_command('rm {}'.format(remote_file)) self.logger.info('Output from appliance db configuration: %s', out) return status, out
def test_control_alert_copy(random_alert): alert_copy = random_alert.copy(description=fauxfactory.gen_alphanumeric()) alert_copy.delete()
from collections import namedtuple from copy import copy from fauxfactory import gen_alphanumeric, gen_integer import pytest from cfme.containers.provider import ContainersProvider from cfme.utils.version import current_version from cfme.common.provider_views import ContainerProvidersView pytestmark = [ pytest.mark.uncollectif(lambda: current_version() < "5.8.0.3"), pytest.mark.provider([ContainersProvider], scope='module') ] alphanumeric_name = gen_alphanumeric(10) long_alphanumeric_name = gen_alphanumeric(100) integer_name = str(gen_integer(0, 100000000)) provider_names = alphanumeric_name, integer_name, long_alphanumeric_name AVAILABLE_SEC_PROTOCOLS = ('SSL trusting custom CA', 'SSL without validation', 'SSL') DEFAULT_SEC_PROTOCOLS = ('SSL trusting custom CA', 'SSL without validation', 'SSL') checked_item = namedtuple('TestItem', ['default_sec_protocol', 'metrics_sec_protocol']) TEST_ITEMS = (checked_item('SSL trusting custom CA', 'SSL trusting custom CA'), checked_item('SSL trusting custom CA', 'SSL without validation'), checked_item('SSL trusting custom CA', 'SSL'),
def policy(policy_class): policy = policy_class(fauxfactory.gen_alphanumeric()) policy.create() yield policy policy.delete()
def test_cloud_catalog_item(appliance, vm_name, setup_provider, provider, dialog, catalog, request, provisioning): """Tests cloud catalog item Metadata: test_flag: provision Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/4h """ wait_for(provider.is_refreshed, func_kwargs=dict(refresh_delta=10), timeout=600) vm = appliance.collections.cloud_instances.instantiate( "{}0001".format(vm_name), provider) request.addfinalizer(lambda: vm.cleanup_on_provider()) image = provisioning['image']['name'] item_name = "{}-service-{}".format(provider.name, fauxfactory.gen_alphanumeric()) inst_args = { 'catalog': { 'catalog_name': { 'name': image, 'provider': provider.name }, 'vm_name': vm_name }, 'environment': { 'availability_zone': provisioning.get('availability_zone', None), 'security_groups': [provisioning.get('security_group', None)], 'cloud_tenant': provisioning.get('cloud_tenant', None), 'cloud_network': provisioning.get('cloud_network', None), 'cloud_subnet': provisioning.get('cloud_subnet', None), 'resource_groups': provisioning.get('resource_group', None) }, 'properties': { 'instance_type': partial_match(provisioning.get('instance_type', None)), 'guest_keypair': provisioning.get('guest_keypair', None) } } # GCE specific if provider.one_of(GCEProvider): recursive_update( inst_args, { 'properties': { 'boot_disk_size': provisioning['boot_disk_size'], 'is_preemptible': True } }) # Azure specific if provider.one_of(AzureProvider): recursive_update( inst_args, { 'customize': { 'admin_username': provisioning['customize_username'], 'root_password': provisioning['customize_password'] } }) catalog_item = appliance.collections.catalog_items.create( provider.catalog_item_type, name=item_name, description="my catalog", display_in=True, catalog=catalog, dialog=dialog, prov_data=inst_args) request.addfinalizer(catalog_item.delete) service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name) service_catalogs.order() logger.info('Waiting for cfme provision request for service %s', item_name) request_description = item_name provision_request = appliance.collections.requests.instantiate( request_description, partial_check=True) provision_request.wait_for_request() msg = "Request failed with the message {}".format( provision_request.rest.message) assert provision_request.is_succeeded(), msg
def schedule(appliance): schedule = appliance.collections.system_schedules.create( name=fauxfactory.gen_alphanumeric(), description=fauxfactory.gen_alphanumeric()) yield schedule schedule.delete()
def control_policy(request): policy = request.param(fauxfactory.gen_alphanumeric()) policy.create() yield policy policy.delete()
def test_custom_button_dialog_container_obj(appliance, dialog, request, setup_obj, button_group): """ Test custom button with dialog and InspectMe method Polarion: assignee: ndhandre initialEstimate: 1/4h caseimportance: high caseposneg: positive testtype: functional startsin: 5.9 casecomponent: CustomButton tags: custom_button testSteps: 1. Create custom button group with the Object type 2. Create a custom button with service dialog 3. Navigate to object Details page 4. Check for button group and button 5. Select/execute button from group dropdown for selected entities 6. Fill dialog and submit 7. Check for the proper flash message related to button execution 8. Check request in automation log Bugzilla: 1729903 1732489 """ group, obj_type = button_group # Note: No need to set display_for dialog only work with Single entity button = group.buttons.create( text=fauxfactory.gen_alphanumeric(), hover=fauxfactory.gen_alphanumeric(), dialog=dialog, system="Request", request="InspectMe", ) request.addfinalizer(button.delete_if_exists) view = navigate_to(setup_obj, "Details") custom_button_group = Dropdown(view, group.hover) assert custom_button_group.has_item(button.text) custom_button_group.item_select(button.text) dialog_view = view.browser.create_view(TextInputDialogView, wait="10s") assert dialog_view.service_name.fill("Custom Button Execute") # Clear the automation log assert appliance.ssh_client.run_command( 'echo -n "" > /var/www/miq/vmdb/log/automation.log') # Submit order dialog_view.submit.click() if not (BZ(1732489, forced_streams=["5.10", "5.11"]).blocks and obj_type == "PROVIDER"): view.flash.assert_message("Order Request was Submitted") # Check for request in automation log try: wait_for( log_request_check, [appliance, 1], timeout=300, message="Check for expected request count", delay=20, ) except TimedOutError: assert False, "Expected 1 requests not found in automation log"
def random_host_control_policy(): policy = policies.HostControlPolicy(fauxfactory.gen_alphanumeric()) policy.create() yield policy policy.delete()
def original_method(request, original_method_write_data, original_class): method = original_class.methods.create( name=fauxfactory.gen_alphanumeric(), location='inline', script=METHOD_TORSO.format(original_method_write_data)) return method
def test_custom_button_expression_container_obj(appliance, request, setup_obj, button_group, expression): """ Test custom button as per expression enablement/visibility. Polarion: assignee: ndhandre initialEstimate: 1/4h caseimportance: medium caseposneg: positive testtype: functional startsin: 5.9 casecomponent: CustomButton tags: custom_button testSteps: 1. Create custom button group with the Object type 2. Create a custom button with expression (Tag) a. Enablement Expression b. Visibility Expression 3. Navigate to object Detail page 4. Check: button should not enable/visible without tag 5. Check: button should enable/visible with tag """ group, obj_type = button_group exp = { expression: { "tag": "My Company Tags : Department", "value": "Engineering" } } disabled_txt = "Tag - My Company Tags : Department : Engineering" button = group.buttons.create(text=fauxfactory.gen_alphanumeric(), hover=fauxfactory.gen_alphanumeric(), display_for="Single entity", system="Request", request="InspectMe", **exp) request.addfinalizer(button.delete_if_exists) tag_cat = appliance.collections.categories.instantiate( name="department", display_name="Department") tag = tag_cat.collections.tags.instantiate(name="engineering", display_name="Engineering") view = navigate_to(setup_obj, "Details") custom_button_group = Dropdown(view, group.text) if tag in setup_obj.get_tags(): if expression == "enablement": assert custom_button_group.item_enabled(button.text) setup_obj.remove_tag(tag) assert not custom_button_group.is_enabled assert re.search(disabled_txt, custom_button_group.hover) elif expression == "visibility": assert button.text in custom_button_group.items setup_obj.remove_tag(tag) assert not custom_button_group.is_displayed else: if expression == "enablement": assert not custom_button_group.is_enabled assert re.search(disabled_txt, custom_button_group.hover) setup_obj.add_tag(tag) assert custom_button_group.item_enabled(button.text) elif expression == "visibility": assert not custom_button_group.is_displayed setup_obj.add_tag(tag) assert button.text in custom_button_group.items
def original_method_write_data(): return fauxfactory.gen_alphanumeric(32)
def new_role(): role = ac.Role(name='tag_vis_role_' + fauxfactory.gen_alphanumeric()) role.create() yield role role.delete()
def test_automate_disabled_domains_in_domain_priority(request, klass): """When the admin clicks on a instance that has duplicate entries in two different domains. If one domain is disabled it is still displayed in the UI for the domain priority. Polarion: assignee: ghubale initialEstimate: 1/12h caseimportance: low caseposneg: negative testtype: functional startsin: 5.7 casecomponent: Automate tags: automate title: Test automate disabled domains in domain priority testSteps: 1. create two domains 2. attach the same automate code to both domains. 3. disable one domain 4. click on a instance and see domains displayed. expectedResults: 1. 2. 3. 4. CFME should not display disabled domains or it should be like 'domain_name (Disabled)' Bugzilla: 1331017 """ schema_field = fauxfactory.gen_alphanumeric() # Create one more domain other_domain = klass.appliance.collections.domains.create( name=fauxfactory.gen_alphanumeric(), description=fauxfactory.gen_alpha(), enabled=True) request.addfinalizer(other_domain.delete_if_exists) method = klass.methods.create( name=fauxfactory.gen_alphanumeric(), display_name=fauxfactory.gen_alphanumeric(), location='inline', script='$evm.log(:info, ":P")', ) request.addfinalizer(method.delete_if_exists) klass.schema.add_fields({ 'name': schema_field, 'type': 'Method', 'data_type': 'String' }) instance = klass.instances.create( name=fauxfactory.gen_alphanumeric(), display_name=fauxfactory.gen_alphanumeric(), description=fauxfactory.gen_alphanumeric(), fields={schema_field: { 'value': method.name }}) request.addfinalizer(instance.delete_if_exists) # Copy method and instance to other domain method.copy_to(other_domain) instance.copy_to(other_domain) view = navigate_to(instance, 'Details') # Read domain priority to check whether any domain is not disabled domain_priority = view.domain_priority.read().split(' ') assert "(Disabled)" not in domain_priority # Disable the other domain with update(other_domain): other_domain.enabled = False view = navigate_to(instance, 'Details') # Read domain priority to check whether other domain is disabled domain_priority = view.domain_priority.read().split(' ') assert "(Disabled)" in domain_priority
def copy_method_write_data(): return fauxfactory.gen_alphanumeric(32)
def validate_args(self): ec = 0 appliance = self.args.get('appliance', None) if self.args.get('appliance_name', None) and not appliance: self.args['appliance'] = docker_conf['appliances'][ self.args['appliance_name']] self.check_arg('nowait', False) self.check_arg('banner', False) self.check_arg('watch', False) self.check_arg('output', True) self.check_arg('dry_run', False) self.check_arg('server_ip', None) if not self.args['server_ip']: self.args['server_ip'] = my_ip_address() self.check_arg('sprout', False) self.check_arg('provision_appliance', False) if self.args['provision_appliance']: if not self.args['provision_template'] or not self.args['provision_provider'] or \ not self.args['provision_vm_name']: print( "You don't have all the required options to provision an appliance" ) ec += 1 self.check_arg('sprout_stream', None) if self.args['sprout'] and not self.args['sprout_stream']: print("You need to supply a stream for sprout") ec += 1 self.check_arg('appliance_name', None) self.check_arg('appliance', None) if not self.args['appliance_name'] != self.args['appliance'] and \ not self.args['provision_appliance'] and not self.args['sprout']: print( "You must supply either an appliance OR an appliance name from config" ) ec += 1 self.check_arg('branch', 'origin/master') self.check_arg('pr', None) self.check_arg('dev_pr', None) self.check_arg('cfme_repo', None) self.check_arg('cfme_repo_dir', '/cfme_tests_te') self.check_arg('cfme_cred_repo', None) self.check_arg('cfme_cred_repo_dir', '/cfme-qe-yamls') self.check_arg('dev_repo', None) if not self.args['cfme_repo']: print("You must supply a CFME REPO") ec += 1 if not self.args['cfme_cred_repo']: print("You must supply a CFME Credentials REPO") ec += 1 self.check_arg('selff', 'cfme/sel_ff_chrome') self.check_arg('gh_token', None) self.check_arg('gh_owner', None) self.check_arg('gh_repo', None) self.check_arg('gh_dev_repo', None) self.check_arg('gh_dev_owner', None) if self.args['dev_pr']: dev_check = [self.args[i] for i in ['gh_dev_repo', 'gh_dev_owner']] if not all(dev_check): print( "To use dev_pr you must have a gh_dev_repo and gh_dev_owner defined" ) ec += 1 self.check_arg('browser', 'firefox') self.check_arg('pytest', None) self.check_arg('pytest_con', 'py_test_base') if not self.args['pytest']: print("You must specify a py.test command") ec += 1 self.check_arg('update_pip', False) self.check_arg('wheel_host_url', None) self.check_arg('auto_gen_test', False) self.check_arg('artifactor_dir', '/log_depot') self.check_arg('log_depot', None) if not self.args['log_depot']: print("You must specify a log_depot") ec += 1 if self.args['pr'] and self.args['auto_gen_test'] and not \ all([self.args['gh_token'], self.args['gh_owner'], self.args['gh_repo']]): print( "You chose to use Auto Test Gen, without supplying GitHub details" ) ec += 1 self.check_arg('capture', False) self.check_arg('test_id', fauxfactory.gen_alphanumeric(8)) self.check_arg('prtester', False) self.check_arg('trackerbot', None) self.check_arg('wharf', False) self.check_arg('sprout_username', None) self.check_arg('sprout_password', None) self.check_arg('sprout_description', None) if ec: sys.exit(127)
def original_domain(request, domain_collection): domain = domain_collection.create(name=fauxfactory.gen_alphanumeric(), enabled=True) request.addfinalizer(domain.delete_if_exists) return domain
def test_vm_retire_extend(appliance, request, testing_vm, soft_assert): """ Tests extending a retirement using an AE method. Prerequisities: * A running VM on any provider. Steps: * It creates a button pointing to ``Request/vm_retire_extend`` instance. The button should live in the VM and Instance button group. * Then it sets a retirement date for the VM * Then it waits until the retirement date is set * Then it clicks the button that was created and it waits for the retirement date to extend. Metadata: test_flag: retire, provision """ num_days = 5 soft_assert(testing_vm.retirement_date == 'Never', "The retirement date is not 'Never'!") retirement_date = generate_retirement_date(delta=num_days) testing_vm.set_retirement_date(retirement_date) wait_for(lambda: testing_vm.retirement_date != 'Never', message="retirement date set") set_date = testing_vm.retirement_date vm_retire_date_fmt = VM.RETIRE_DATE_FMT.pick(appliance.version) if not BZ(1419150, forced_streams='5.6').blocks: soft_assert( set_date == retirement_date.strftime(vm_retire_date_fmt), "The retirement date '{}' did not match expected date '{}'".format( set_date, retirement_date.strftime(vm_retire_date_fmt))) # Create the vm_retire_extend button and click on it grp_name = "grp_{}".format(fauxfactory.gen_alphanumeric()) grp = appliance.collections.button_groups.create( text=grp_name, hover=grp_name, type=appliance.collections.button_groups.VM_INSTANCE) request.addfinalizer(lambda: grp.delete_if_exists()) btn_name = "btn_{}".format(fauxfactory.gen_alphanumeric()) button = grp.buttons.create(text=btn_name, hover=btn_name, system="Request", request="vm_retire_extend") request.addfinalizer(lambda: button.delete_if_exists()) navigate_to(testing_vm, 'Details') class TestDropdownView(InfraVmSummaryView): group = Dropdown(grp.text) view = appliance.browser.create_view(TestDropdownView) view.group.item_select(button.text) # CFME automate vm_retire_extend method defaults to extending the date by 14 days extend_duration_days = 14 extended_retirement_date = retirement_date + timedelta( days=extend_duration_days) # Check that the WebUI updates with the correct date wait_for( lambda: testing_vm.retirement_date >= extended_retirement_date. strftime(vm_retire_date_fmt), num_sec=60, message="Check for extension of the VM retirement date by {} days". format(extend_duration_days))
# -*- coding: utf-8 -*- """This module contains tests that check priority of domains.""" import fauxfactory import pytest from cfme import test_requirements from cfme.automate.explorer.domain import DomainCollection from cfme.automate.simulation import simulate from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.update import update from cfme.utils.wait import wait_for pytestmark = [test_requirements.automate] FILE_LOCATION = "/var/www/miq/vmdb/test_ae_{}".format( fauxfactory.gen_alphanumeric(16)) METHOD_TORSO = """ $evm.log("info", "Automate Method Started") File.open("%s", "w") do |file| file.write "{}" end $evm.log("info", "Automate Method Ended") exit MIQ_OK """ % FILE_LOCATION @pytest.fixture(scope="module") def domain_collection(appliance): return DomainCollection(appliance)