def new_vm(provider, request): if provider.one_of(CloudProvider): vm = Instance.factory(random_vm_name(context='cockpit'), provider) else: vm = InfraVm.factory(random_vm_name(context='cockpit'), provider) if not provider.mgmt.does_vm_exist(vm.name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") request.addfinalizer(vm.cleanup_on_provider) return vm
def new_vm(appliance, provider, request): if provider.one_of(CloudProvider): vm = appliance.collections.cloud_instances.instantiate(random_vm_name(context='cockpit'), provider) else: vm = appliance.collections.infra_vms.instantiate(random_vm_name(context='cockpit'), provider) if not provider.mgmt.does_vm_exist(vm.name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") request.addfinalizer(vm.cleanup_on_provider) return vm
def provision_vm(provider, template): vm_name = random_vm_name(context="snpst") vm = VM.factory(vm_name, provider, template_name=template.name) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm
def testing_instance(setup_provider, provider): """ Fixture to provision instance on the provider """ instance = Instance.factory(random_vm_name('pwr-c'), provider) if not provider.mgmt.does_vm_exist(instance.name): instance.create_on_provider(allow_skip="default") elif instance.provider.type == "ec2" and \ provider.mgmt.is_vm_state(instance.name, provider.mgmt.states['deleted']): provider.mgmt.set_name( instance.name, 'test_terminated_{}'.format(fauxfactory.gen_alphanumeric(8))) instance.create_on_provider(allow_skip="default", find_in_cfme=True) provider.refresh_provider_relationships() # Make sure the instance shows up try: wait_for(lambda: instance.exists, fail_condition=False, num_sec=600, delay=15, fail_func=provider.refresh_provider_relationships) except TimedOutError: pytest.fail('Failed to find instance in CFME after creating on provider: {}' .format(instance.name)) yield instance logger.info('Fixture cleanup, deleting test instance: %s', instance.name) try: provider.mgmt.delete_vm(instance.name) except Exception: logger.exception('Exception when deleting testing_instance: %s', instance.name)
def catalog_item(appliance, provider, dialog, catalog, provisioning): iso_template, host, datastore, iso_file, iso_kickstart,\ iso_root_password, iso_image_type, vlan = map(provisioning.get, ('pxe_template', 'host', 'datastore', 'iso_file', 'iso_kickstart', 'iso_root_password', 'iso_image_type', 'vlan')) provisioning_data = { 'catalog': {'catalog_name': {'name': iso_template, 'provider': provider.name}, 'vm_name': random_vm_name('iso_service'), 'provision_type': 'ISO', 'iso_file': {'name': iso_file}}, 'environment': {'host_name': {'name': host}, 'datastore_name': {'name': datastore}}, 'customize': {'custom_template': {'name': iso_kickstart}, 'root_password': iso_root_password}, 'network': {'vlan': partial_match(vlan)}, } item_name = fauxfactory.gen_alphanumeric() return appliance.collections.catalog_items.create( appliance.collections.catalog_items.RHV, name=item_name, description="my catalog", display_in=True, catalog=catalog, dialog=dialog, prov_data=provisioning_data )
def vm_big(request, provider, setup_provider_modscope, big_template_modscope): return _get_vm( request, provider, big_template_modscope.name, random_vm_name("action", max_length=16) )
def test_template_set_ownership(appliance, request, provider, setup_provider, vm_crud): """ Sets ownership to an infra template. First publishes a template from a VM, then tries to unset an ownership of that template, then sets it back and in the end removes the template. VM is removed via fixture. Tests BZ 1446801 in RHCF3-14353 Polarion: assignee: apagac casecomponent: Infra caseimportance: medium initialEstimate: 1/6h """ # setup the test # publish a vm to a template template = vm_crud.publish_to_template(template_name=random_vm_name(context='ownrs')) # instantiate a user representing no owner user_no_owner = appliance.collections.users.instantiate(name="<No Owner>") # instantiate a user representing Administrator user_admin = appliance.collections.users.instantiate(name="Administrator") # run the test try: # unset ownership template.set_ownership(user=user_no_owner) # set ownership back to admin template.set_ownership(user=user_admin) finally: # in every case, delete template we created template.mgmt.delete()
def catalog_item(appliance, provider, provisioning, tagcontrol_dialog, catalog): template, host, datastore, iso_file, vlan = map(provisioning.get, ('template', 'host', 'datastore', 'iso_file', 'vlan')) provisioning_data = { 'catalog': {'catalog_name': {'name': template, 'provider': provider.name}, 'vm_name': random_vm_name('service')}, 'environment': {'host_name': {'name': host}, 'datastore_name': {'name': datastore}}, 'network': {'vlan': partial_match(vlan)}, } if provider.type == 'rhevm': provisioning_data['catalog']['provision_type'] = 'Native Clone' elif provider.type == 'virtualcenter': provisioning_data['catalog']['provision_type'] = 'VMware' item_name = fauxfactory.gen_alphanumeric() catalog_item = appliance.collections.catalog_items.create( provider.catalog_item_type, name=item_name, description="my catalog", display_in=True, catalog=catalog, dialog=tagcontrol_dialog, prov_data=provisioning_data) return catalog_item
def catalog_item(appliance, provider, dialog, catalog, provisioning, setup_pxe_servers_vm_prov): # generate_tests makes sure these have values pxe_template, host, datastore, pxe_server, pxe_image, pxe_kickstart, pxe_root_password,\ pxe_image_type, pxe_vlan = map( provisioning.get, ( 'pxe_template', 'host', 'datastore', 'pxe_server', 'pxe_image', 'pxe_kickstart', 'pxe_root_password', 'pxe_image_type', 'vlan' ) ) provisioning_data = { 'catalog': {'catalog_name': {'name': pxe_template, 'provider': provider.name}, 'provision_type': 'PXE', 'pxe_server': pxe_server, 'pxe_image': {'name': pxe_image}, 'vm_name': random_vm_name('pxe_service')}, 'environment': {'datastore_name': {'name': datastore}, 'host_name': {'name': host}}, 'customize': {'root_password': pxe_root_password, 'custom_template': {'name': pxe_kickstart}}, 'network': {'vlan': partial_match(pxe_vlan)}, } item_name = fauxfactory.gen_alphanumeric() return appliance.collections.catalog_items.create( provider.catalog_item_type, name=item_name, description="my catalog", display_in=True, catalog=catalog, dialog=dialog, prov_data=provisioning_data)
def vm_obj(provider, setup_provider_modscope, small_template_modscope): """Creates new VM or instance""" vm_name = random_vm_name('attrs') new_vm = VM.factory(vm_name, provider, template_name=small_template_modscope.name) yield new_vm new_vm.cleanup_on_provider()
def small_vm(provider, small_template_modscope): vm = provider.appliance.collections.infra_vms.instantiate( random_vm_name(context="reconfig"), provider, small_template_modscope.name ) vm.create_on_provider(find_in_cfme=True, allow_skip="default") vm.refresh_relationships() yield vm vm.cleanup_on_provider()
def small_vm(provider, small_template_modscope): vm = VM.factory(random_vm_name(context='reconfig'), provider, small_template_modscope.name) vm.create_on_provider(find_in_cfme=True, allow_skip="default") vm.refresh_relationships() yield vm vm.delete_from_provider()
def new_vm(provider): vm = provider.appliance.collections.infra_vms.instantiate( random_vm_name('timelines', max_length=16), provider ) vm.create_on_provider(find_in_cfme=True) logger.debug('Fixture new_vm set up! Name: %r Provider: %r', vm.name, vm.provider.name) yield vm vm.cleanup_on_provider()
def vmware_vm(request, virtualcenter_provider): vm = virtualcenter_provider.appliance.collections.infra_vms.instantiate( random_vm_name("control"), virtualcenter_provider ) vm.create_on_provider(find_in_cfme=True) request.addfinalizer(vm.cleanup_on_provider) return vm
def new_vm(request, provider): vm = VM.factory(random_vm_name('timelines', max_length=16), provider) logger.debug('Fixture new_vm set up! Name: %r', vm.name) logger.info('Will create %r on Provider: %r', vm.name, vm.provider.name) vm.create_on_provider(find_in_cfme=False, timeout=500) yield vm logger.debug('Fixture new_vm teardown! Name: %r Provider: %r', vm.name, vm.provider.name) vm.provider.mgmt.delete_vm(vm.name)
def new_vm(request, a_provider): vm = VM.factory(random_vm_name("timelines", max_length=16), a_provider) request.addfinalizer(vm.delete_from_provider) if not a_provider.mgmt.does_vm_exist(vm.name): logger.info("deploying %s on provider %s", vm.name, a_provider.key) vm.create_on_provider(allow_skip="default", find_in_cfme=True) return vm
def provision_vm(provider, template): vm_name = random_vm_name(context="snpst") vm = provider.appliance.collections.infra_vms.instantiate(vm_name, provider, template.name) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm
def vm_obj(provider, setup_provider_modscope, small_template_modscope): """Creates new VM or instance""" vm_name = random_vm_name('attrs') collection = provider.appliance.provider_based_collection(provider) new_vm = collection.instantiate(vm_name, provider, template_name=small_template_modscope.name) yield new_vm new_vm.cleanup_on_provider()
def new_instance(provider): inst = Instance.factory(random_vm_name('cloud-timeline', max_length=20), provider) logger.debug('Fixture new_instance set up! Name: %r Provider: %r', inst.name, inst.provider.name) inst.create_on_provider(allow_skip="default", find_in_cfme=True) yield inst logger.debug('Fixture new_vm teardown! Name: %r Provider: %r', inst.name, inst.provider.name) if inst.provider.mgmt.does_vm_exist(inst.name): inst.provider.mgmt.delete_vm(inst.name)
def vm_crud(provider): vm = VM.factory(random_vm_name(context='ownrs'), provider) vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm try: vm.cleanup_on_provider() except Exception: logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name)
def new_vm(setup_provider_modscope, provider, request): """Fixture to provision appliance to the provider being tested if necessary""" vm_name = random_vm_name(context='migrate') vm = VM.factory(vm_name, provider, template_name=provider.data['small_template']) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") request.addfinalizer(vm.cleanup_on_provider) return vm
def create_instance(appliance, provider, template_name): instance = appliance.collections.cloud_instances.instantiate(random_vm_name('pwr-c'), provider, template_name) if not instance.exists_on_provider: instance.create_on_provider(allow_skip="default", find_in_cfme=True) elif instance.provider.one_of(EC2Provider) and instance.mgmt.state == VmState.DELETED: instance.mgmt.rename('test_terminated_{}'.format(fauxfactory.gen_alphanumeric(8))) instance.create_on_provider(allow_skip="default", find_in_cfme=True) return instance
def vm_crud(provider): collection = provider.appliance.provider_based_collection(provider) vm = collection.instantiate(random_vm_name(context='ownrs'), provider) vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm try: vm.cleanup_on_provider() except Exception: logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name)
def vm_name(provider): # also tries to delete the VM that gets made with this name vm_name = random_vm_name('scat') yield vm_name scat_vm = "{}0001".format(vm_name) if scat_vm in provider.mgmt.list_vm(): vm_name_to_cleanup = "{}0001".format(vm_name) else: vm_name_to_cleanup = vm_name VM.factory(vm_name_to_cleanup, provider).cleanup_on_provider()
def archived_vm(appliance, source_provider): """Fixture to create archived vm""" vm_obj = appliance.collections.infra_vms.instantiate( random_vm_name(context='v2v-auto'), source_provider) if not source_provider.mgmt.does_vm_exist(vm_obj.name): vm_obj.create_on_provider(find_in_cfme=True, allow_skip="default") vm_obj.mgmt.delete() vm_obj.wait_for_vm_state_change(desired_state='archived', timeout=900, from_details=False, from_any_provider=True) return vm_obj.name
def vm_name(provider): # also tries to delete the VM that gets made with this name vm_name = random_vm_name('scat') yield vm_name try: logger.info('Cleaning up VM %s on provider %s', vm_name, provider.key) provider.mgmt.delete_vm(vm_name) except: # The mgmt_sys classes raise Exception :\ logger.warning('Failed to clean up VM %s on provider %s', vm_name, provider.key)
def create_instance(provider): instance = Instance.factory(random_vm_name('pwr-c'), provider) if not provider.mgmt.does_vm_exist(instance.name): instance.create_on_provider(allow_skip="default", find_in_cfme=True) elif instance.provider.type == "ec2" and \ provider.mgmt.is_vm_state(instance.name, provider.mgmt.states['deleted']): provider.mgmt.set_name( instance.name, 'test_terminated_{}'.format(fauxfactory.gen_alphanumeric(8))) instance.create_on_provider(allow_skip="default", find_in_cfme=True) return instance
def new_instance(request, provider): instance = Instance.factory(random_vm_name("timelines", max_length=16), provider) request.addfinalizer(instance.delete_from_provider) if not provider.mgmt.does_vm_exist(instance.name): logger.info("deploying %s on provider %s", instance.name, provider.key) instance.create_on_provider(allow_skip="default", find_in_cfme=True) if instance.provider.one_of(EC2Provider): ec2_sleep() return instance
def retire_ec2_s3_vm(provider): """Fixture for creating an S3 backed paravirtual instance, template is a public ec2 AMI Args: provider: provider crud object from fixture """ vm = VM.factory(random_vm_name('retire'), provider, template_name='amzn-ami-pv-2015.03.rc-1.x86_64-s3') vm.create_on_provider(find_in_cfme=True, allow_skip="default", timeout=1200) yield vm vm.cleanup_on_provider()
def retire_vm(small_template, provider): """Fixture for creating a generic vm/instance Args: small_template: small template fixture, template on provider provider: provider crud object from fixture """ vm = VM.factory(random_vm_name('retire'), provider, template_name=small_template.name) vm.create_on_provider(find_in_cfme=True, allow_skip="default", timeout=1200) yield vm vm.cleanup_on_provider()
def test_vm(virtualcenter_provider): """Fixture to provision appliance to the provider being tested if necessary""" vm_name = random_vm_name('distpwr') vm = VM.factory(vm_name, virtualcenter_provider) if not virtualcenter_provider.mgmt.does_vm_exist(vm_name): logger.info("deploying %r on provider %r", vm_name, virtualcenter_provider.key) vm.create_on_provider(find_in_cfme=True, allow_skip="default") else: logger.info("recycling deployed vm %r on provider %r", vm_name, virtualcenter_provider.key) yield vm try: virtualcenter_provider.mgmt.delete_vm(vm_name=vm_name) except Exception: logger.exception('Failed deleting VM "%r" on "%r"', vm_name, virtualcenter_provider.name)
def vm_obj(provider, setup_provider_modscope, small_template_modscope): """Creates new VM or instance""" vm_name = random_vm_name('snpsht') new_vm = VM.factory(vm_name, provider, template_name=small_template_modscope.name) if not provider.mgmt.does_vm_exist(vm_name): new_vm.create_on_provider(find_in_cfme=True, allow_skip='default') yield new_vm try: provider.mgmt.delete_vm(new_vm.name) except Exception: logger.warning("Failed to delete vm `{}`.".format(new_vm.name))
def new_vm(setup_provider, provider): """Fixture to provision appliance to the provider being tested if necessary""" vm_name = random_vm_name(context='migrate') try: template_name = provider.data.templates.small_template.name except AttributeError: pytest.skip('Could not find templates.small_template.name in provider yaml: {}' .format(provider.data)) vm = provider.appliance.collections.infra_vms.instantiate(vm_name, provider, template_name) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm vm.cleanup_on_provider()
def retire_vm(small_template, provider): """Fixture for creating a generic vm/instance Args: small_template: small template fixture, template on provider provider: provider crud object from fixture """ collection = provider.appliance.provider_based_collection(provider) vm = collection.instantiate(random_vm_name('retire'), provider, template_name=small_template.name) vm.create_on_provider(find_in_cfme=True, allow_skip="default", timeout=1200) yield vm vm.cleanup_on_provider()
def retire_ec2_s3_vm(provider): """Fixture for creating an S3 backed paravirtual instance, template is a public ec2 AMI Args: provider: provider crud object from fixture """ collection = provider.appliance.provider_based_collection(provider) vm = collection.instantiate( random_vm_name('retire'), provider, template_name='amzn-ami-pv-2015.03.rc-1.x86_64-s3') vm.create_on_provider(find_in_cfme=True, allow_skip="default", timeout=1200) yield vm vm.cleanup_on_provider()
def vm_obj(request, provider, setup_provider, console_template): """ Create a VM on the provider with the given template, and return the vm_obj. Cleanup VM when done """ collection = provider.appliance.provider_based_collection(provider) vm_obj = collection.instantiate(random_vm_name('html5-con'), provider, template_name=console_template.name) request.addfinalizer(lambda: vm_obj.cleanup_on_provider()) vm_obj.create_on_provider(timeout=2400, find_in_cfme=True, allow_skip="default") return vm_obj
def vm_crud(provider): collection = provider.appliance.provider_based_collection(provider) vm = collection.instantiate(random_vm_name(context='ownrs'), provider) try: vm.create_on_provider(find_in_cfme=True, allow_skip="default") except KeyError: msg = 'Missing template for provider {}'.format(provider.key) logger.exception(msg) pytest.skip(msg) yield vm try: vm.cleanup_on_provider() except Exception: logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name)
def vm_obj(virtualcenter_provider): """Fixture to provision appliance to the provider being tested if necessary""" vm_name = random_vm_name('distpwr') collection = virtualcenter_provider.appliance.provider_based_collection( virtualcenter_provider) vm = collection.instantiate(vm_name, virtualcenter_provider) if not virtualcenter_provider.mgmt.does_vm_exist(vm_name): logger.info("deploying %r on provider %r", vm_name, virtualcenter_provider.key) vm.create_on_provider(find_in_cfme=True, allow_skip="default") else: logger.info("recycling deployed vm %r on provider %r", vm_name, virtualcenter_provider.key) yield vm vm.cleanup_on_provider()
def small_vm(appliance, provider, small_template_modscope): """This fixture is function-scoped, because there is no un-ambiguous way how to search for reconfigure request in UI in situation when you have two requests for the same reconfiguration and for the same VM name. This happens if you run test_vm_reconfig_add_remove_hw_cold and then test_vm_reconfig_add_remove_hw_hot or vice versa. Making thix fixture function-scoped will ensure that the VM under test has a different name each time so the reconfigure requests are unique as a result.""" vm = appliance.collections.infra_vms.instantiate( random_vm_name(context='reconfig'), provider, small_template_modscope.name) vm.create_on_provider(find_in_cfme=True, allow_skip="default") vm.refresh_relationships() yield vm vm.cleanup_on_provider()
def testing_vm(setup_provider, provider): collection = provider.appliance.provider_based_collection(provider) try: template_name = provider.data['full_template']['name'] except KeyError: pytest.skip('Unable to identify full_template for provider: {}'.format( provider)) vm = collection.instantiate(random_vm_name("ae-hd"), provider, template_name=template_name) try: vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm finally: vm.delete_from_provider()
def test_manage_nsg_group(appliance, provider, setup_provider, register_event): """ tests that create/remove azure network security groups events are received and parsed by CFME Metadata: test_flag: events """ nsg_name = random_vm_name(context='nsg') resource_group = provider.data['provisioning']['resource_group'] # registering add/remove network security group events # we need to check raw data by regexps, since many azure events aren't parsed by CFME yet def add_cmp(_, y): # In 5.9 version `y` is a dict, not a yaml stream. data = yaml.load(y) if appliance.version < '5.9' else y return (data['resourceId'].endswith(nsg_name) and (data['status']['value'] == 'Accepted' and data['subStatus']['value'] == 'Created') or data['status']['value'] == 'Succeeded') fd_add_attr = {'full_data': 'will be ignored', 'cmp_func': add_cmp} # add network security group event register_event(fd_add_attr, source=provider.type.upper(), event_type='networkSecurityGroups_write_EndRequest') def rm_cmp(_, y): # In 5.9 version `y` is a dict, not a yaml stream. data = yaml.load(y) if appliance.version < '5.9' else y return data['resourceId'].endswith( nsg_name) and data['status']['value'] == 'Succeeded' fd_rm_attr = {'full_data': 'will be ignored', 'cmp_func': rm_cmp} # remove network security group register_event(fd_rm_attr, source=provider.type.upper(), event_type='networkSecurityGroups_delete_EndRequest') # creating and removing network security group provider.mgmt.create_netsec_group(nsg_name, resource_group) provider.mgmt.remove_netsec_group(nsg_name, resource_group)
def catalog_item(appliance, provider, dialog, catalog, provisioning): (iso_template, host, datastore, iso_file, iso_kickstart, iso_root_password, iso_image_type, vlan) = tuple( map(provisioning.get, ('pxe_template', 'host', 'datastore', 'iso_file', 'iso_kickstart', 'iso_root_password', 'iso_image_type', 'vlan'))) provisioning_data = { 'catalog': { 'catalog_name': { 'name': iso_template, 'provider': provider.name }, 'vm_name': random_vm_name('iso_service'), 'provision_type': 'ISO', 'iso_file': { 'name': iso_file } }, 'environment': { 'host_name': { 'name': host }, 'datastore_name': { 'name': datastore } }, 'customize': { 'custom_template': { 'name': iso_kickstart }, 'root_password': iso_root_password }, 'network': { 'vlan': partial_match(vlan) }, } item_name = fauxfactory.gen_alphanumeric(15, start="cat_item_") return appliance.collections.catalog_items.create( appliance.collections.catalog_items.RHV, name=item_name, description="my catalog", display_in=True, catalog=catalog, dialog=dialog, prov_data=provisioning_data)
def test_vm_capture(request, provider, setup_provider, register_event): """ tests that generalize and capture vm azure events are received and parsed by CFME """ mgmt = provider.mgmt vm = VM.factory(random_vm_name(context='capture'), provider) if not mgmt.does_vm_exist(vm.name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") vm.refresh_relationships() # # deferred delete vm request.addfinalizer(vm.delete_from_provider) def cmp_function(_, y): data = yaml.load(y) return data['resourceId'].endswith( vm.name) and data['status']['value'] == 'Succeeded' full_data_attr = {'full_data': 'will be ignored', 'cmp_func': cmp_function} # generalize event register_event(full_data_attr, source='AZURE', event_type='virtualMachines_generalize_EndRequest') # capture event register_event(full_data_attr, source='AZURE', event_type='virtualMachines_capture_EndRequest') # capture vm image_name = vm.name resource_group = provider.data['provisioning']['resource_group'] mgmt.capture_vm(vm.name, 'templates', image_name, resource_group=resource_group) # delete remaining image container = 'system' blob_images = mgmt.list_blob_images(container) # removing both json and vhd files test_image = [img for img in blob_images if image_name in img][-1] mgmt.remove_blob_image(test_image, container)
def vm(request, provider, small_template_modscope, ssa_analysis_profile): """ Fixture to provision instance on the provider """ vm_name = random_vm_name("ssa", max_length=16) vm_obj = VM.factory(vm_name, provider, template_name=small_template_modscope.name) vm_obj.create_on_provider(find_in_cfme=True, allow_skip="default") provider.mgmt.start_vm(vm_obj.name) provider.mgmt.wait_vm_running(vm_obj.name) @request.addfinalizer def _finalize(): try: vm_obj.cleanup_on_provider() provider.refresh_provider_relationships() except Exception as e: logger.exception(e) return vm_obj
def retire_ec2_s3_vm(provider): """Fixture for creating an S3 backed paravirtual instance, template is a public ec2 AMI Args: provider: provider crud object from fixture """ vm = VM.factory(random_vm_name('retire'), provider, template_name='amzn-ami-pv-2015.03.rc-1.x86_64-s3') vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm try: if provider.mgmt.does_vm_exist(vm.name): provider.mgmt.delete_vm(vm.name) except Exception: logger.warning('Failed to delete vm from provider: {}'.format(vm.name))
def vm_crud(provider): collection = provider.appliance.provider_based_collection(provider) vm_name = random_vm_name(context='pblsh') vm = collection.instantiate(vm_name, provider) try: deploy_template(vm.provider.key, vm_name, provider.data.templates.small_template.name, timeout=2500) except (KeyError, AttributeError): pytest.skip("Skipping as small_template could not be found on the provider") vm.wait_to_appear(timeout=900, load_details=False) yield vm try: vm.cleanup_on_provider() except Exception: logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name)
def _get_vm_name(request): """Helper function to get vm name from test requirement mark. At first we try to get a requirement value from ``pytestmark`` module list. If it's missing we can try to look up it in the test function itself. There is one restriction for it. We cannot get the test function mark from module scoped fixtures. """ req = [ mark.args[0] for mark in request.module.pytestmark if mark.name == "requirement" ] if not req and request.scope == "function": try: req = request.function.requirement.args except AttributeError: raise CFMEException("VM name can not be obtained") return random_vm_name(req[0])
def get_vm(request, appliance, source_provider, template_type, datastore='nfs'): """ Helper method that takes template , source provider and datastore and creates VM on source provider to migrate . Args: request appliance: source_provider: Provider on which vm is created template: Template used for creating VM datastore: datastore in which VM is created. If no datastore is provided then by default VM is created on nfs datastore returns: Vm object """ source_datastores_list = source_provider.data.get("datastores", []) source_datastore = [ d.name for d in source_datastores_list if d.type == datastore ][0] collection = source_provider.appliance.provider_based_collection( source_provider) vm_name = random_vm_name("v2v-auto") template = _get_template(source_provider, template_type) vm_obj = collection.instantiate(vm_name, source_provider, template_name=template.name) power_on_vm = True if 'win10' in template.name: # TODO Get the VM to the correct power state within the fixture/test, not here # the fixture or test # Need to leave this off, otherwise migration fails # because when migration process tries to power off the VM if it is powered off # and for win10, it hibernates and that state of filesystem is unsupported power_on_vm = False vm_obj.create_on_provider( timeout=2400, find_in_cfme=True, allow_skip="default", datastore=source_datastore, power_on=power_on_vm, ) request.addfinalizer(lambda: vm_obj.cleanup_on_provider()) return vm_obj
def test_publish_vm_to_template(request, setup_provider, vm_crud): """ Try to publish VM to template. Steps: 1) Deploy a VM and make sure it is stopped, otherwise Publish button isn't available 2) Publish the VM to a template 3) Check that the template exists """ vm_crud.mgmt.ensure_state(VmState.STOPPED) vm_crud.refresh_relationships() template_name = random_vm_name(context='pblsh') template = vm_crud.publish_to_template(template_name) request.addfinalizer(template.delete) assert template.exists, 'Published template does not exist.'
def vm(request, provider, small_template, ssa_analysis_profile): """ Fixture to provision instance on the provider """ vm_name = random_vm_name("ssa", max_length=16) vm_obj = provider.appliance.collections.infra_vms.instantiate( vm_name, provider, small_template.name) vm_obj.create_on_provider(find_in_cfme=True, allow_skip="default") vm_obj.mgmt.ensure_state(VmState.RUNNING) @request.addfinalizer def _finalize(): try: vm_obj.cleanup_on_provider() provider.refresh_provider_relationships() except Exception as e: logger.exception(e) return vm_obj
def proxy_machine(): """ Deploy vm for proxy test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = random_vm_name('proxy') data = conf.cfme_data.get("proxy_template") proxy_provider_key = data["provider"] proxy_template_name = data["template_name"] proxy_port = data['port'] prov = get_mgmt(proxy_provider_key) deploy_template(proxy_provider_key, depot_machine_name, template_name=proxy_template_name) yield prov.get_ip_address(depot_machine_name), proxy_port prov.delete_vm(depot_machine_name)
def _get_vm_name(request): """Helper function to get vm name from test requirement mark. At first we try to get a requirement value from ``pytestmark`` module list. If it's missing we can try to look up it in the test function itself. There is one restriction for it. We cannot get the test function mark from module scoped fixtures. """ try: req = [mark.args[0] for mark in request.module.pytestmark if mark.name == "requirement"] except AttributeError: req = None logger.debug("Could not get the requirement from pytestmark") if not req and request.scope == "function": try: req = [mark.args for mark in request.function.pytestmark if mark.name == 'requirement'][0] except AttributeError: raise CFMEException("VM name can not be obtained") vm_name = random_vm_name(req[0]) if not request.config.getoption('--no-assignee-vm-name'): if isinstance(request.node, pytest.Function): assignee = get_parsed_docstring(request.node, request.session._docstrings_cache).get('assignee', '') else: # Fetch list of tests in the module object test_list = [ item for item in dir(request.module) if item.startswith('test_') and not ('test_requirements' == item) ] # Find out assignee for each test in test_list assignee_list = list() for test in test_list: nodeid = f'{request.node.fspath.strpath}::{test}' try: assignee_list.append(request.session._docstrings_cache[nodeid]['assignee']) except KeyError: continue # If all tests have same assignee, set length will be 1, else set assignee='module' assignee = assignee_list[0] if len(set(assignee_list)) == 1 else 'module' vm_name = f'{vm_name}-{assignee}' return vm_name
def get_vm(request, appliance, second_provider, template, datastore='nfs'): source_datastores_list = second_provider.data.get('datastores', []) source_datastore = [d.name for d in source_datastores_list if d.type == datastore][0] collection = second_provider.appliance.provider_based_collection(second_provider) vm_name = random_vm_name('v2v-auto') vm_obj = collection.instantiate(vm_name, second_provider, template_name=template(second_provider)['name']) power_on_vm = True if template.__name__ == 'win10_template': # Need to leave this off, otherwise migration fails # because when migration process tries to power off the VM if it is powered off # and for win10, it hibernates and that state of filesystem is unsupported power_on_vm = False vm_obj.create_on_provider(timeout=2400, find_in_cfme=True, allow_skip="default", datastore=source_datastore, power_on=power_on_vm) request.addfinalizer(lambda: vm_obj.cleanup_on_provider()) return vm_obj
def retire_vm(small_template, provider): """Fixture for creating a generic vm/instance Args: small_template: small template fixture, template on provider provider: provider crud object from fixture """ vm = VM.factory(random_vm_name('retire'), provider, template_name=small_template.name) vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm try: if provider.mgmt.does_vm_exist(vm.name): provider.mgmt.delete_vm(vm.name) except Exception: logger.warning('Failed to delete vm from provider: {}'.format(vm.name))
def vm_list(request, appliance, second_provider, provider): """Fixture to provide list of vm objects""" # TODO: Need to add list of vm and its configuration in cfme_data.yaml templates = [second_provider.data.templates.big_template['name']] for template in templates: vm_name = random_vm_name(context='v2v-auto') collection = appliance.provider_based_collection(second_provider) vm = collection.instantiate(vm_name, second_provider, template_name=template) if not second_provider.mgmt.does_vm_exist(vm_name): logger.info("deploying {} on provider {}".format( vm_name, second_provider.key)) vm.create_on_provider(allow_skip="default", datastore=request.param) vms.append(vm) return vms
def create_vm_with_clone(request, create_vm, provider, appliance): """Fixture to provision a VM and clone it""" first_name = fauxfactory.gen_alphanumeric() last_name = fauxfactory.gen_alphanumeric() email = "{first_name}.{last_name}@test.com" provision_type = 'VMware' vm_name = random_vm_name(context=None, max_length=15) create_vm.clone_vm(email, first_name, last_name, vm_name, provision_type) vm2 = appliance.collections.infra_vms.instantiate(vm_name, provider) wait_for(lambda: vm2.exists, timeout=120) @request.addfinalizer def _cleanup(): vm2.cleanup_on_provider() provider.refresh_provider_relationships() return create_vm, vm2
def get_vm(request, appliance, second_provider, template, datastore='nfs'): source_datastores_list = second_provider.data.get('datastores', []) source_datastore = [ d.name for d in source_datastores_list if d.type == datastore ][0] collection = second_provider.appliance.provider_based_collection( second_provider) vm_name = random_vm_name('v2v-auto') vm_obj = collection.instantiate( vm_name, second_provider, template_name=template(second_provider)['name']) vm_obj.create_on_provider(timeout=2400, find_in_cfme=True, allow_skip="default", datastore=source_datastore) request.addfinalizer(lambda: vm_obj.cleanup_on_provider()) return vm_obj
def vm(provider, appliance, collection, setup_provider_modscope, small_template_modscope): """Creates new VM or instance.""" vm_name = random_vm_name('snpsht') new_vm = VM.factory(vm_name, provider, template_name=small_template_modscope.name) if not collection.find_by(name=vm_name): new_vm.create_on_provider(find_in_cfme=True, allow_skip='default') vm_rest = collection.get(name=vm_name) yield vm_rest vms = appliance.rest_api.collections.vms.find_by(name=vm_name) if vms: vm = vms[0] vm.action.delete() vm.wait_not_exists(num_sec=600, delay=5)
def catalog_item_setups(request, appliance, provider, setup_provider_modscope, provisioning, catalog, dialog): """ This fixture is used to create two catalog items instance pointing to method """ template, host, datastore, vlan = list( map(provisioning.get, ('template', 'host', 'datastore', 'vlan'))) provisioning_data = { 'catalog': { 'catalog_name': { 'name': template, 'provider': provider.name }, 'vm_name': random_vm_name('service') }, 'environment': { 'host_name': { 'name': host }, 'datastore_name': { 'name': datastore } }, 'network': { 'vlan': partial_match(vlan) }, } cat_list = [] for _ in range(2): catalog_item = appliance.collections.catalog_items.create( provider.catalog_item_type, name=fauxfactory.gen_alphanumeric(15, start="cat_item_"), description="my catalog", display_in=True, catalog=catalog, dialog=dialog, prov_data=provisioning_data) cat_list.append(catalog_item) request.addfinalizer(catalog_item.delete_if_exists) yield cat_list, catalog_item
def test_vm_capture(appliance, request, provider, register_event): """ tests that generalize and capture vm azure events are received and parsed by CFME Metadata: test_flag: events, provision """ vm = appliance.collections.cloud_instances.instantiate( random_vm_name(context='capture'), provider) if not vm.exists_on_provider: vm.create_on_provider(find_in_cfme=True, allow_skip="default") vm.refresh_relationships() # # deferred delete vm request.addfinalizer(vm.cleanup_on_provider) def cmp_function(_, y): # In 5.9 version `y` is a dict, not a yaml stream. data = yaml.load(y) if appliance.version < '5.9' else y return data['resourceId'].endswith(vm.name) and data['status']['value'] == 'Succeeded' full_data_attr = {'full_data': 'will be ignored', 'cmp_func': cmp_function} # generalize event register_event(full_data_attr, source='AZURE', event_type='virtualMachines_generalize_EndRequest') # capture event register_event(full_data_attr, source='AZURE', event_type='virtualMachines_capture_EndRequest') # capture vm vm.mgmt.capture(container='templates', image_name=vm.name) # delete remaining image # removing both json and vhd files, find_templates returns blob objects blob_images = provider.mgmt.find_templates(container='system', name=vm.name, only_vhd=False) logger.info('Found blobs on system container: %s', blob_images) for blob in blob_images: logger.info('Deleting blob %s', blob) blob.cleanup()