Ejemplo n.º 1
0
def test_scope_windows_registry_stuck(request, appliance, infra_provider,
                                      policy_collection,
                                      policy_profile_collection):
    """If you provide Scope checking windows registry, it messes CFME up. Recoverable."""
    policy = policy_collection.create(
        VMCompliancePolicy,
        "Windows registry scope glitch testing Compliance Policy",
        active=True,
        scope=
        r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, "
        r"some value, INCLUDES, some content)")
    request.addfinalizer(lambda: policy.delete() if policy.exists else None)
    profile = policy_profile_collection.create(
        "Windows registry scope glitch testing Compliance Policy",
        policies=[policy])
    request.addfinalizer(lambda: profile.delete() if profile.exists else None)
    # Now assign this malformed profile to a VM
    vm = VM.factory(
        InfraVm.get_first_vm(provider=infra_provider).name, infra_provider)
    vm.assign_policy_profiles(profile.description)
    # It should be screwed here, but do additional check
    navigate_to(appliance.server, 'Dashboard')
    view = navigate_to(InfraVm, 'All')
    assert "except" not in view.entities.title.text.lower()
    vm.unassign_policy_profiles(profile.description)
Ejemplo n.º 2
0
def new_snapshot(test_vm, has_name=True, memory=False, create_description=True):
    name = fauxfactory.gen_alphanumeric(8)
    return InfraVm.Snapshot(
        name="snpshot_{}".format(name) if has_name else None,
        description="snapshot_{}".format(name) if create_description else None,
        memory=memory,
        parent_vm=test_vm
    )
Ejemplo n.º 3
0
def test_create_snapshot_via_ae(appliance, request, domain, small_test_vm):
    """This test checks whether the vm.create_snapshot works in AE.

    Prerequisities:
        * A VMware provider
        * A VM that has been discovered by CFME

    Steps:
        * Clone the Request class inside the System namespace into a new domain
        * Add a method named ``snapshot`` and insert the provided code there.
        * Add an instance named ``snapshot`` and set the methd from previous step
            as ``meth5``
        * Run the simulation of the method against the VM, preferably setting
            ``snap_name`` to something that can be checked
        * Wait until snapshot with such name appears.
    """
    # PREPARE
    file = data_path.join("ui").join("automate").join(
        "test_create_snapshot_via_ae.rb")
    with file.open("r") as f:
        method_contents = f.read()
    miq_domain = DomainCollection(appliance).instantiate(name='ManageIQ')
    miq_class = miq_domain.namespaces.instantiate(
        name='System').classes.instantiate(name='Request')
    miq_class.copy_to(domain)
    request_cls = domain.namespaces.instantiate(
        name='System').classes.instantiate(name='Request')
    request.addfinalizer(request_cls.delete)
    method = request_cls.methods.create(name="snapshot",
                                        location='inline',
                                        script=method_contents)
    request.addfinalizer(method.delete)
    instance = request_cls.instances.create(
        name="snapshot", fields={"meth5": {
            'value': "snapshot"
        }})
    request.addfinalizer(instance.delete)

    # SIMULATE
    snap_name = fauxfactory.gen_alpha()
    snapshot = InfraVm.Snapshot(name=snap_name, parent_vm=small_test_vm)
    simulate(appliance=appliance,
             instance="Request",
             request="snapshot",
             target_type='VM and Instance',
             target_object=small_test_vm.name,
             execute_methods=True,
             attributes_values={"snap_name": snap_name})

    wait_for(lambda: snapshot.exists,
             timeout="2m",
             delay=10,
             fail_func=small_test_vm.provider.browser.refresh,
             handle_exception=True,
             message="Waiting for snapshot create")

    # Clean up if it appeared
    snapshot.delete()
def ssa_vm(request, local_setup_provider, provider, vm_analysis_provisioning_data,
           appliance, analysis_type):
    """ Fixture to provision instance on the provider """
    vm_name = 'test-ssa-{}-{}'.format(fauxfactory.gen_alphanumeric(), analysis_type)
    vm = VM.factory(vm_name, provider, template_name=vm_analysis_provisioning_data.image)
    request.addfinalizer(lambda: vm.cleanup_on_provider())

    provision_data = vm_analysis_provisioning_data.copy()
    del provision_data['image']

    vm.create_on_provider(find_in_cfme=True, **provision_data)

    if provider.one_of(OpenStackProvider):
        public_net = provider.data['public_network']
        vm.provider.mgmt.assign_floating_ip(vm.name, public_net)

    logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name)

    @wait_for_decorator(timeout="20m", delay=5)
    def get_ip_address():
        logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format(
            vm_name, provider.mgmt.vm_status(vm_name), provider.mgmt.is_vm_stopped(vm_name)))
        if provider.mgmt.is_vm_stopped(vm_name):
            provider.mgmt.start_vm(vm_name)

        ip = provider.mgmt.current_ip_address(vm_name)
        logger.info("Fetched IP for %s: %s", vm_name, ip)
        return ip is not None

    connect_ip = provider.mgmt.get_ip_address(vm_name)
    assert connect_ip is not None

    # Check that we can at least get the uptime via ssh this should only be possible
    # if the username and password have been set via the cloud-init script so
    # is a valid check
    if vm_analysis_provisioning_data['fs-type'] not in ['ntfs', 'fat32']:
        logger.info("Waiting for %s to be available via SSH", connect_ip)
        ssh_client = ssh.SSHClient(
            hostname=connect_ip, username=vm_analysis_provisioning_data['username'],
            password=vm_analysis_provisioning_data['password'], port=22)
        wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True)
        vm.ssh = ssh_client
    vm.system_type = detect_system_type(vm)
    logger.info("Detected system type: %s", vm.system_type)
    vm.image = vm_analysis_provisioning_data['image']
    vm.connect_ip = connect_ip

    # TODO:  if rhev and iscsi, it need direct_lun
    if provider.type == 'rhevm':
        logger.info("Setting a relationship between VM and appliance")
        cfme_rel = InfraVm.CfmeRelationship(vm)
        cfme_rel.set_relationship(appliance.server.name, appliance.server_id())

    yield vm

    # Close the SSH client if we have one
    if getattr(vm, 'ssh', None):
        vm.ssh.close()
def new_vm(provider, request):
    if provider.one_of(CloudProvider):
        vm = Instance.factory(random_vm_name(context='cockpit'), provider)
    else:
        vm = InfraVm.factory(random_vm_name(context='cockpit'), provider)
    if not provider.mgmt.does_vm_exist(vm.name):
        vm.create_on_provider(find_in_cfme=True, allow_skip="default")
        request.addfinalizer(vm.cleanup_on_provider)
    return vm
Ejemplo n.º 6
0
def new_vm(provider, request):
    if provider.one_of(CloudProvider):
        vm = Instance.factory(random_vm_name(context='cockpit'), provider)
    else:
        vm = InfraVm.factory(random_vm_name(context='cockpit'), provider)
    if not provider.mgmt.does_vm_exist(vm.name):
        vm.create_on_provider(find_in_cfme=True, allow_skip="default")
        request.addfinalizer(vm.cleanup_on_provider)
    return vm
Ejemplo n.º 7
0
def test_vm_reconfig_resize_disk_snapshot(request,
                                          disk_type,
                                          disk_mode,
                                          full_vm,
                                          memory=False):
    """

    Bugzilla:
        1631448

    Polarion:
        assignee: nansari
        initialEstimate: 1/8h
        startsin: 5.11
        casecomponent: Infra
        caseposneg: negative
        setup:
            1. Have a VM running on vsphere provider
        testSteps:
            1. Go to Compute -> infrastructure -> Virtual Machines -> Select Vm
            2. Create a snapshot for selected VM
            3. Go to VM reconfiguration and try to resize disk of the VM
        expectedResults:
            1. VM selected
            2. Snapshot created
            3. Resize is not allowed when snapshots are attached
    """

    snapshot = InfraVm.Snapshot(
        name=fauxfactory.gen_alphanumeric(start="snap_"),
        description=fauxfactory.gen_alphanumeric(start="desc_"),
        memory=memory,
        parent_vm=full_vm)
    snapshot.create()
    request.addfinalizer(snapshot.delete)

    view = navigate_to(full_vm, 'Reconfigure')
    row = next(r for r in view.disks_table.rows())

    # Delete button should enabled
    assert row.actions.widget.is_enabled

    # Re-sized button should not displayed
    assert not row[9].widget.is_displayed
def test_edit_management_relationship(appliance, new_vm):
    """
    check that Edit Management Relationship works for the VM

    Bugzilla:
        1534400

    Polarion:
        assignee: jhenner
        casecomponent: WebUI
        caseimportance: high
        initialEstimate: 1/6h
    """
    vm_relationship = InfraVm.CfmeRelationship(new_vm)

    for i in range(2):  # do it 2 times and leave the vm w/o relationship
        # set relationship
        vm_relationship.set_relationship(appliance.server.name,
                                         appliance.server.sid)
        # unset relationship
        vm_relationship.remove_relationship()
Ejemplo n.º 9
0
def do_vm_provisioning(appliance, template_name, provider, vm_name, provisioning_data, request,
                       smtp_test, num_sec=1500, wait=True):
    # generate_tests makes sure these have values
    vm = InfraVm(name=vm_name, provider=provider, template_name=template_name)
    note = ('template {} to vm {} on provider {}'.format(template_name, vm_name, provider.key))
    provisioning_data.update({
        'request': {
            'email': '*****@*****.**',
            'first_name': 'Template',
            'last_name': 'Provisioner',
            'notes': note}})
    view = navigate_to(vm, 'Provision')
    view.form.fill_with(provisioning_data, on_change=view.form.submit_button)
    view.flash.assert_no_error()
    if not wait:
        return

    # Provision Re important in this test
    logger.info('Waiting for cfme provision request for vm %s', vm_name)
    request_description = 'Provision from [{}] to [{}]'.format(template_name, vm_name)
    provision_request = appliance.collections.requests.instantiate(request_description)
    provision_request.wait_for_request(method='ui', num_sec=num_sec)
    assert provision_request.is_succeeded(method='ui'), \
        "Provisioning failed with the message {}".format(provision_request.row.last_message.text)

    # Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup
    logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key)
    wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600)

    if smtp_test:
        # Wait for e-mails to appear
        def verify():
            approval = dict(subject_like="%%Your Virtual Machine configuration was Approved%%")
            expected_text = "Your virtual machine request has Completed - VM:%%{}".format(vm_name)
            return (
                len(smtp_test.get_emails(**approval)) > 0 and
                len(smtp_test.get_emails(subject_like=expected_text)) > 0
            )

        wait_for(verify, message="email receive check", delay=30)
Ejemplo n.º 10
0
    def _provisioner(template, provisioning_data, delayed=None):
        vm = InfraVm(name=vm_name, provider=provider, template_name=template)
        view = navigate_to(vm, 'Provision')
        view.form.fill_with(provisioning_data,
                            on_change=view.form.submit_button)
        base_view = vm.appliance.browser.create_view(BaseLoggedInPage)
        base_view.flash.assert_no_error()

        request.addfinalizer(
            lambda: VM.factory(vm_name, provider).cleanup_on_provider())
        request_description = 'Provision from [{}] to [{}]'.format(
            template, vm_name)
        provision_request = appliance.collections.requests.instantiate(
            description=request_description)
        if delayed is not None:
            total_seconds = (delayed - datetime.utcnow()).total_seconds()
            try:
                wait_for(provision_request.is_finished,
                         fail_func=provision_request.update,
                         num_sec=total_seconds,
                         delay=5)
                pytest.fail("The provisioning was not postponed")
            except TimedOutError:
                pass

        logger.info('Waiting for vm %s to appear on provider %s', vm_name,
                    provider.key)
        wait_for(provider.mgmt.does_vm_exist, [vm_name],
                 fail_func=provider.refresh_provider_relationships,
                 handle_exception=True,
                 num_sec=600)

        # nav to requests page happens on successful provision
        logger.info('Waiting for cfme provision request for vm %s', vm_name)
        provision_request.wait_for_request()
        msg = "Provisioning failed with the message {}".format(
            provision_request.rest.message)
        assert provision_request.is_succeeded(), msg
        return vm
Ejemplo n.º 11
0
def test_scope_windows_registry_stuck(request, appliance, infra_provider, policy_collection,
        policy_profile_collection):
    """If you provide Scope checking windows registry, it messes CFME up. Recoverable."""
    policy = policy_collection.create(
        VMCompliancePolicy,
        "Windows registry scope glitch testing Compliance Policy",
        active=True,
        scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, "
        r"some value, INCLUDES, some content)"
    )
    request.addfinalizer(lambda: policy.delete() if policy.exists else None)
    profile = policy_profile_collection.create(
        "Windows registry scope glitch testing Compliance Policy",
        policies=[policy]
    )
    request.addfinalizer(lambda: profile.delete() if profile.exists else None)
    # Now assign this malformed profile to a VM
    vm = VM.factory(InfraVm.get_first_vm(provider=infra_provider).name, infra_provider)
    vm.assign_policy_profiles(profile.description)
    # It should be screwed here, but do additional check
    navigate_to(appliance.server, 'Dashboard')
    view = navigate_to(InfraVm, 'All')
    assert "except" not in view.entities.title.text.lower()
    vm.unassign_policy_profiles(profile.description)
Ejemplo n.º 12
0
    def _ssa_single_vm():
        template_name = vm_analysis_provisioning_data['image']
        vm_name = f'test-ssa-{fauxfactory.gen_alphanumeric()}-{analysis_type}'
        collection = provider.appliance.provider_based_collection(provider)
        vm = collection.instantiate(
            vm_name,
            provider,
            template_name=vm_analysis_provisioning_data.image)
        provision_data = vm_analysis_provisioning_data.copy()
        del provision_data['image']

        if "test_ssa_compliance" in request._pyfuncitem.name or provider.one_of(
                RHEVMProvider):
            provisioning_data = {
                "catalog": {
                    'vm_name': vm_name
                },
                "environment": {
                    'automatic_placement': True
                }
            }

            if provider.one_of(RHEVMProvider):
                provisioning_data.update({
                    "network": {
                        'vlan': partial_match(provision_data['vlan'])
                    }
                })

            do_vm_provisioning(vm_name=vm_name,
                               appliance=appliance,
                               provider=provider,
                               provisioning_data=provisioning_data,
                               template_name=template_name,
                               request=request,
                               num_sec=2500)
        else:
            deploy_template(vm.provider.key,
                            vm_name,
                            template_name,
                            timeout=2500)
            vm.wait_to_appear(timeout=900, load_details=False)

        request.addfinalizer(lambda: vm.cleanup_on_provider())

        if provider.one_of(OpenStackProvider):
            public_net = provider.data['public_network']
            vm.mgmt.assign_floating_ip(public_net)

        logger.info("VM %s provisioned, waiting for IP address to be assigned",
                    vm_name)

        vm.mgmt.ensure_state(VmState.RUNNING)

        try:
            connect_ip, _ = wait_for(find_pingable,
                                     func_args=[vm.mgmt],
                                     timeout="10m",
                                     delay=5,
                                     fail_condition=None)
        except TimedOutError:
            pytest.fail('Timed out waiting for pingable address on SSA VM')

        # Check that we can at least get the uptime via ssh this should only be possible
        # if the username and password have been set via the cloud-init script so
        # is a valid check
        if vm_analysis_provisioning_data['fs-type'] not in ['ntfs', 'fat32']:
            logger.info("Waiting for %s to be available via SSH", connect_ip)

            ssh_client = ssh.SSHClient(
                hostname=connect_ip,
                username=credentials[
                    vm_analysis_provisioning_data.credentials]['username'],
                password=credentials[
                    vm_analysis_provisioning_data.credentials]['password'],
                port=22)
            wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True)
            vm.ssh = ssh_client
        vm.system_type = detect_system_type(vm)
        logger.info("Detected system type: %s", vm.system_type)
        vm.image = vm_analysis_provisioning_data['image']
        vm.connect_ip = connect_ip

        # TODO:  if rhev and iscsi, it need direct_lun
        if provider.type == 'rhevm':
            logger.info("Setting a relationship between VM and appliance")
            cfme_rel = InfraVm.CfmeRelationship(vm)
            cfme_rel.set_relationship(appliance.server.name,
                                      appliance.server_id())
        # Close the SSH client if we have one
        request.addfinalizer(lambda: vm.ssh.close()
                             if getattr(vm, 'ssh', None) else None)
        return vm
Ejemplo n.º 13
0
def generated_request(appliance, a_provider, provider_data, provisioning,
                      template_name, vm_name):
    """Creates a provision request, that is not automatically approved, and returns the search data.

    After finishing the test, request should be automatically deleted.

    Slightly modified code from :py:module:`cfme.tests.infrastructure.test_provisioning`
    """
    first_name = fauxfactory.gen_alphanumeric()
    last_name = fauxfactory.gen_alphanumeric()
    notes = fauxfactory.gen_alphanumeric()
    e_mail = "{}@{}.test".format(first_name, last_name)
    host, datastore = map(provisioning.get, ('host', 'datastore'))
    vm = InfraVm(name=vm_name,
                 provider=a_provider,
                 template_name=template_name)
    view = navigate_to(vm, 'Provision')

    provisioning_data = {
        'request': {
            'email': e_mail,
            'first_name': first_name,
            'last_name': last_name,
            'notes': notes
        },
        'catalog': {
            'vm_name': vm_name,
            'num_vms': '10'
        },
        'environment': {
            'host_name': {
                'name': host
            },
            'datastore_name': {
                'name': datastore
            }
        },
    }

    # Same thing, different names. :\
    if provider_data["type"] == 'rhevm':
        provisioning_data['catalog']['provision_type'] = 'Native Clone'
    elif provider_data["type"] == 'virtualcenter':
        provisioning_data['catalog']['provision_type'] = 'VMware'

    try:
        provisioning_data['network'] = {
            'vlan': partial_match(provisioning['vlan'])
        }
    except KeyError:
        # provisioning['vlan'] is required for rhevm provisioning
        if provider_data["type"] == 'rhevm':
            raise pytest.fail(
                'rhevm requires a vlan value in provisioning info')

    view.form.fill_with(provisioning_data, on_change=view.form.submit_button)
    request_cells = {
        "Description":
        "Provision from [{}] to [{}###]".format(template_name, vm_name),
    }
    provision_request = appliance.collections.requests.instantiate(
        cells=request_cells)
    yield provision_request

    browser().get(appliance.url)
    appliance.server.login_admin()

    provision_request.remove_request()
    def _ssa_single_vm():
        template_name = vm_analysis_provisioning_data['image']
        vm_name = 'test-ssa-{}-{}'.format(fauxfactory.gen_alphanumeric(), analysis_type)
        collection = provider.appliance.provider_based_collection(provider)
        vm = collection.instantiate(vm_name,
                                    provider,
                                    template_name=vm_analysis_provisioning_data.image)
        provision_data = vm_analysis_provisioning_data.copy()
        del provision_data['image']

        if "test_ssa_compliance" in request._pyfuncitem.name:
            provisioning_data = {"catalog": {'vm_name': vm_name},
                                 "environment": {'automatic_placement': True}}
            do_vm_provisioning(vm_name=vm_name, appliance=appliance, provider=provider,
                               provisioning_data=provisioning_data, template_name=template_name,
                               request=request, smtp_test=False, num_sec=2500)
        else:
            deploy_template(vm.provider.key, vm_name, template_name, timeout=2500)
            vm.wait_to_appear(timeout=900, load_details=False)

        request.addfinalizer(lambda: vm.delete_from_provider())

        if provider.one_of(OpenStackProvider):
            public_net = provider.data['public_network']
            vm.provider.mgmt.assign_floating_ip(vm.name, public_net)

        logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name)

        @wait_for_decorator(timeout="20m", delay=5)
        def get_ip_address():
            logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format(
                vm_name, provider.mgmt.vm_status(vm_name), provider.mgmt.is_vm_stopped(vm_name)))
            if provider.mgmt.is_vm_stopped(vm_name):
                provider.mgmt.start_vm(vm_name)

            ip = provider.mgmt.current_ip_address(vm_name)
            logger.info("Fetched IP for %s: %s", vm_name, ip)
            return ip is not None

        connect_ip = provider.mgmt.get_ip_address(vm_name)
        assert connect_ip is not None

        # Check that we can at least get the uptime via ssh this should only be possible
        # if the username and password have been set via the cloud-init script so
        # is a valid check
        if vm_analysis_provisioning_data['fs-type'] not in ['ntfs', 'fat32']:
            logger.info("Waiting for %s to be available via SSH", connect_ip)

            ssh_client = ssh.SSHClient(
                hostname=connect_ip,
                username=credentials[vm_analysis_provisioning_data.credentials]['username'],
                password=credentials[vm_analysis_provisioning_data.credentials]['password'],
                port=22)
            wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True)
            vm.ssh = ssh_client
        vm.system_type = detect_system_type(vm)
        logger.info("Detected system type: %s", vm.system_type)
        vm.image = vm_analysis_provisioning_data['image']
        vm.connect_ip = connect_ip

        # TODO:  if rhev and iscsi, it need direct_lun
        if provider.type == 'rhevm':
            logger.info("Setting a relationship between VM and appliance")
            cfme_rel = InfraVm.CfmeRelationship(vm)
            cfme_rel.set_relationship(appliance.server.name, appliance.server_id())
        # Close the SSH client if we have one
        request.addfinalizer(lambda: vm.ssh.close() if getattr(vm, 'ssh', None) else None)
        return vm