def get_conversion_data(target_provider):
    if target_provider.one_of(RHEVMProvider):
        resource_type = "ManageIQ::Providers::Redhat::InfraManager::Host"
        engine_key = conf.credentials[target_provider.data["ssh_creds"]]
        auth_user = engine_key.username
        ssh_client = ssh.SSHClient(
            hostname=target_provider.hostname,
            username=engine_key.username,
            password=engine_key.password,
        )

        private_key = ssh_client.run_command(
            "cat /etc/pki/ovirt-engine/keys/engine_id_rsa").output
        try:
            hosts = [h.name for h in target_provider.hosts.all()]
        except KeyError:
            pytest.skip("No conversion host on provider")

    else:
        resource_type = "ManageIQ::Providers::Openstack::CloudManager::Vm"
        instance_key = conf.credentials[target_provider.data["private-keys"][
            "conversion_host_ssh_key"]["credentials"]]
        auth_user = instance_key.username
        private_key = instance_key.password
        try:
            hosts = target_provider.data["conversion_instances"]
        except KeyError:
            pytest.skip("No conversion instance on provider")

    return {
        "resource_type": resource_type,
        "private_key": private_key,
        "auth_user": auth_user,
        "hosts": hosts,
    }
def ssh_client(vm_obj, console_template):
    """Provide vm_ssh_client for ssh operations in the test."""
    console_vm_username = credentials.get(console_template.creds).username
    console_vm_password = credentials.get(console_template.creds).password
    with ssh.SSHClient(hostname=vm_obj.ip_address, username=console_vm_username,
            password=console_vm_password) as vm_ssh_client:
        yield vm_ssh_client
def pwsh_ssh(az_pwsh_vm):
    """Provide vm_ssh_client for ssh operations in the test."""
    with ssh.SSHClient(
            hostname=az_pwsh_vm.ip_address,
            username=credentials['host_default']['username'],
            password=credentials['host_default']['password']) as vm_ssh_client:
        yield vm_ssh_client
def ssa_vm(request, local_setup_provider, provider, vm_analysis_provisioning_data,
           appliance, analysis_type):
    """ Fixture to provision instance on the provider """
    vm_name = 'test-ssa-{}-{}'.format(fauxfactory.gen_alphanumeric(), analysis_type)
    vm = VM.factory(vm_name, provider, template_name=vm_analysis_provisioning_data.image)
    request.addfinalizer(lambda: cleanup_vm(vm_name, provider))

    provision_data = vm_analysis_provisioning_data.copy()
    del provision_data['image']

    vm.create_on_provider(find_in_cfme=True, **provision_data)

    if provider.one_of(OpenStackProvider):
        public_net = provider.data['public_network']
        vm.provider.mgmt.assign_floating_ip(vm.name, public_net)

    logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name)

    @wait_for_decorator(timeout="20m", delay=5)
    def get_ip_address():
        logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format(
            vm_name, provider.mgmt.vm_status(vm_name), provider.mgmt.is_vm_stopped(vm_name)))
        if provider.mgmt.is_vm_stopped(vm_name):
            provider.mgmt.start_vm(vm_name)

        ip = provider.mgmt.current_ip_address(vm_name)
        logger.info("Fetched IP for %s: %s", vm_name, ip)
        return ip is not None

    connect_ip = provider.mgmt.get_ip_address(vm_name)
    assert connect_ip is not None

    # Check that we can at least get the uptime via ssh this should only be possible
    # if the username and password have been set via the cloud-init script so
    # is a valid check
    if vm_analysis_provisioning_data['fs-type'] not in ['ntfs', 'fat32']:
        logger.info("Waiting for %s to be available via SSH", connect_ip)
        ssh_client = ssh.SSHClient(
            hostname=connect_ip, username=vm_analysis_provisioning_data['username'],
            password=vm_analysis_provisioning_data['password'], port=22)
        wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True)
        vm.ssh = ssh_client
    vm.system_type = detect_system_type(vm)
    logger.info("Detected system type: %s", vm.system_type)
    vm.image = vm_analysis_provisioning_data['image']
    vm.connect_ip = connect_ip

    # TODO:  if rhev and iscsi, it need direct_lun
    if provider.type == 'rhevm':
        logger.info("Setting a relationship between VM and appliance")
        cfme_rel = Vm.CfmeRelationship(vm)
        cfme_rel.set_relationship(appliance.server.name, appliance.server_id())

    yield vm

    # Close the SSH client if we have one
    if getattr(vm, 'ssh', None):
        vm.ssh.close()
def test_provision_cloud_init(appliance, request, setup_provider, provider,
                              provisioning, setup_ci_template, vm_name):
    """ Tests provisioning from a template with cloud_init

    Metadata:
        test_flag: cloud_init, provision
    """
    image = provisioning.get('ci-image') or provisioning['image']['name']
    note = (
        'Testing provisioning from image {} to vm {} on provider {}'.format(
            image, vm_name, provider.key))
    logger.info(note)

    mgmt_system = provider.mgmt

    inst_args = {
        'request': {
            'notes': note
        },
        'customize': {
            'custom_template': {
                'name': provisioning['ci-template']
            }
        }
    }
    # for image selection in before_fill
    inst_args['template_name'] = image

    if provider.one_of(AzureProvider):
        inst_args['environment'] = {'public_ip_address': "New"}
    if provider.one_of(OpenStackProvider):
        ip_pool = provider.data['public_network']
        floating_ip = mgmt_system.get_first_floating_ip(pool=ip_pool)
        provider.refresh_provider_relationships()
        inst_args['environment'] = {'public_ip_address': floating_ip}
    if provider.one_of(InfraProvider) and appliance.version > '5.9':
        inst_args['customize']['customize_type'] = 'Specification'

    logger.info('Instance args: {}'.format(inst_args))

    collection = appliance.provider_based_collection(provider)
    instance = collection.create(vm_name, provider, form_values=inst_args)
    request.addfinalizer(instance.cleanup_on_provider)
    provision_request = provider.appliance.collections.requests.instantiate(
        vm_name, partial_check=True)
    provision_request.wait_for_request()
    wait_for(lambda: instance.ip_address is not None, num_sec=60)
    connect_ip = instance.ip_address
    assert connect_ip, "VM has no IP"

    # Check that we can at least get the uptime via ssh this should only be possible
    # if the username and password have been set via the cloud-init script so
    # is a valid check
    with ssh.SSHClient(hostname=connect_ip,
                       username=provisioning['ci-username'],
                       password=provisioning['ci-pass']) as ssh_client:
        wait_for(ssh_client.uptime, num_sec=200, handle_exception=True)
Ejemplo n.º 6
0
    def get_ca_cert(connection_info):
        """Getting OpenShift's certificate from the master machine.
        Args:
            connection_info (dict): username, password and hostname for OCP
        returns:
            certificate's content.
        """

        with ssh.SSHClient(**connection_info) as provider_ssh:
            _, stdout, _ = provider_ssh.exec_command("cat /etc/origin/master/ca.crt")
            return str("".join(stdout.readlines()))
def test_provision_cloud_init(request, setup_provider, provider, provisioning,
                              setup_ci_template, vm_name):
    """ Tests provisioning from a template with cloud_init

    Metadata:
        test_flag: cloud_init, provision
    """
    image = provisioning.get('ci-image') or provisioning['image']['name']
    note = ('Testing provisioning from image {} to vm {} on provider {}'.format(
        image, vm_name, provider.key))
    logger.info(note)

    mgmt_system = provider.mgmt

    instance = Instance.factory(vm_name, provider, image)

    request.addfinalizer(instance.delete_from_provider)
    # TODO: extend inst_args for other providers except EC2 if needed
    inst_args = {
        'request': {'email': '*****@*****.**',
                    'first_name': 'Image',
                    'last_name': 'Provisioner',
                    'notes': note},
        'catalog': {'vm_name': vm_name},
        'properties': {'instance_type': provisioning['instance_type'],
                       'guest_keypair': provisioning['guest_keypair']},
        'environment': {'availability_zone': provisioning['availability_zone'],
                        'cloud_network': provisioning['cloud_network'],
                        'security_groups': [provisioning['security_group']]},
        'customize': {'custom_template': {'name': provisioning['ci-template']}}
    }

    if provider.one_of(OpenStackProvider):
        floating_ip = mgmt_system.get_first_floating_ip()
        inst_args['environment']['public_ip_address'] = floating_ip

    logger.info('Instance args: {}'.format(inst_args))

    instance.create(**inst_args)

    connect_ip, tc = wait_for(mgmt_system.get_ip_address, [vm_name], num_sec=300,
                              handle_exception=True)

    # Check that we can at least get the uptime via ssh this should only be possible
    # if the username and password have been set via the cloud-init script so
    # is a valid check
    with ssh.SSHClient(hostname=connect_ip, username=provisioning['ci-username'],
                       password=provisioning['ci-pass']) as ssh_client:
        wait_for(ssh_client.uptime, num_sec=200, handle_exception=True)
Ejemplo n.º 8
0
def test_provision_cloud_init(appliance, setup_provider, provider, setup_ci_template,
                              vm_name, smtp_test, request, provisioning):
    """Tests cloud init provisioning

    Metadata:
        test_flag: cloud_init, provision
        suite: infra_provisioning
    """
    # generate_tests makes sure these have values
    template = provisioning.get('ci-image') or provisioning['image']['name']
    host, datastore, vlan = map(provisioning.get, ('host', 'datastore', 'vlan'))

    mgmt_system = provider.get_mgmt_system()

    request.addfinalizer(lambda: cleanup_vm(vm_name, provider))

    provisioning_data = {
        'catalog': {
            'provision_type': 'Native Clone',
            'vm_name': vm_name},
        'environment': {
            'host_name': {'name': host},
            'datastore_name': {'name': datastore}},
        'network': {
            'vlan': vlan},
        'customize': {
            'custom_template': {'name': [provisioning['ci-template']]}}
    }

    do_vm_provisioning(appliance, template, provider, vm_name, provisioning_data, request,
                       smtp_test, num_sec=900)

    connect_ip, tc = wait_for(mgmt_system.get_ip_address, [vm_name], num_sec=300,
                              handle_exception=True)

    # Check that we can at least get the uptime via ssh this should only be possible
    # if the username and password have been set via the cloud-init script so
    # is a valid check
    with ssh.SSHClient(hostname=connect_ip, username=provisioning['ci-username'],
                       password=provisioning['ci-pass']) as ssh_client:
        wait_for(ssh_client.uptime, num_sec=200, handle_exception=True)
def test_vm_console(request, appliance, setup_provider, context,
                    configure_websocket, configure_console_vnc, order_service,
                    take_screenshot, console_template, provider):
    """Test Myservice VM Console in SSUI.

    Metadata:
        test_flag: ssui

    Polarion:
        assignee: apagac
        casecomponent: Infra
        caseimportance: medium
        initialEstimate: 1/2h
    """
    if (provider.one_of(VMwareProvider) and provider.version >= 6.5 or
            'html5_console' in provider.data.get('excluded_test_flags', [])):
        pytest.skip(
            'VNC consoles are unsupported on VMware ESXi 6.5 and later')

    catalog_item = order_service
    service_name = catalog_item.name
    console_vm_username = credentials[
        provider.data.templates.console_template.creds].username
    console_vm_password = credentials[
        provider.data.templates.console_template.creds].password
    with appliance.context.use(context):
        myservice = MyService(appliance, service_name)
        vm_obj = myservice.launch_vm_console(catalog_item)
        vm_console = vm_obj.vm_console
        if provider.one_of(OpenStackProvider):
            public_net = provider.data['public_network']
            vm_obj.mgmt.assign_floating_ip(public_net)
        request.addfinalizer(vm_console.close_console_window)
        request.addfinalizer(appliance.server.logout)
        ssh_who_command = ("who --count"
                           if not provider.one_of(OpenStackProvider) else
                           "who -aH")
        with ssh.SSHClient(hostname=vm_obj.ip_address,
                           username=console_vm_username,
                           password=console_vm_password) as vm_ssh_client:
            try:
                assert vm_console.wait_for_connect(180), (
                    "VM Console did not reach 'connected'"
                    " state")
                user_count_before_login = vm_ssh_client.run_command(
                    ssh_who_command, ensure_user=True)
                logger.info("Output of '{}' is {} before login".format(
                    ssh_who_command, user_count_before_login))
                assert vm_console.wait_for_text(
                    text_to_find="login:"******"VM Console"
                                   " didn't prompt for Login")
                # Enter Username:
                vm_console.send_keys("{}".format(console_vm_username))
                assert vm_console.wait_for_text(
                    text_to_find="Password",
                    timeout=200), ("VM Console"
                                   " didn't prompt for Password")
                # Enter Password:
                vm_console.send_keys("{}".format(console_vm_password))
                logger.info("Wait to get the '$' prompt")
                if not provider.one_of(OpenStackProvider):
                    vm_console.wait_for_text(
                        text_to_find=provider.data.templates.console_template.
                        prompt_text,
                        timeout=200)

                def _validate_login():
                    # the following try/except is required to handle the exception thrown by SSH
                    # while connecting to VMware VM.It throws "[Error 104]Connection reset by Peer".
                    try:
                        user_count_after_login = vm_ssh_client.run_command(
                            ssh_who_command, ensure_user=True)
                        logger.info("Output of '{}' is {} after login".format(
                            ssh_who_command, user_count_after_login))
                        return user_count_before_login < user_count_after_login
                    except Exception as e:
                        logger.info("Exception: {}".format(e))
                        logger.info(
                            "Trying again to perform 'who --count' over ssh.")
                        return False

                # Number of users before login would be 0 and after login would be 180
                # If below assertion would fail user_count_after_login is also 0,
                # denoting login failed
                wait_for(func=_validate_login, timeout=300, delay=5)
                # create file on system
                vm_console.send_keys("touch blather")
                wait_for(func=vm_ssh_client.run_command,
                         func_args=["ls blather"],
                         func_kwargs={'ensure_user': True},
                         fail_condition=lambda result: result.rc != 0,
                         delay=1,
                         num_sec=10)
                # if file was created in previous steps it will be removed here
                # we will get instance of SSHResult
                command_result = vm_ssh_client.run_command("rm blather",
                                                           ensure_user=True)
                assert command_result

            except Exception:
                # Take a screenshot if an exception occurs
                vm_console.switch_to_console()
                take_screenshot("ConsoleScreenshot")
                vm_console.switch_to_appliance()
                raise
def test_vm_console(request, appliance, setup_provider, context, configure_websocket,
        configure_console_vnc, order_catalog_item_in_ops_ui, take_screenshot,
        console_template):
    """Test Myservice VM Console in SSUI."""
    catalog_item = order_catalog_item_in_ops_ui
    service_name = catalog_item.name
    console_vm_username = credentials[catalog_item.provider.data.templates.console_template
                            .creds].username
    console_vm_password = credentials[catalog_item.provider.data.templates.console_template
                            .creds].password
    with appliance.context.use(context):
        myservice = MyService(appliance, service_name)
        vm_obj = myservice.launch_vm_console(catalog_item)
        vm_console = vm_obj.vm_console
        request.addfinalizer(vm_console.close_console_window)
        request.addfinalizer(appliance.server.logout)
        with ssh.SSHClient(hostname=vm_obj.ip_address, username=console_vm_username,
                password=console_vm_password) as vm_ssh_client:
            try:
                assert vm_console.wait_for_connect(180), ("VM Console did not reach 'connected'"
                    " state")
                user_count_before_login = vm_ssh_client.run_command("who --count", ensure_user=True)
                logger.info("Output of who --count is {} before login"
                    .format(user_count_before_login))
                assert vm_console.wait_for_text(text_to_find="login:"******"VM Console"
                    " didn't prompt for Login")
                # Enter Username:
                vm_console.send_keys("{}".format(console_vm_username))
                assert vm_console.wait_for_text(text_to_find="Password", timeout=200), ("VM Console"
                " didn't prompt for Password")
                # Enter Password:
                vm_console.send_keys("{}".format(console_vm_password))
                logger.info("Wait to get the '$' prompt")
                vm_console.wait_for_text(text_to_find=catalog_item.provider.data.templates.
                    console_template.prompt_text, timeout=200)

                def _validate_login():
                    # the following try/except is required to handle the exception thrown by SSH
                    # while connecting to VMware VM.It throws "[Error 104]Connection reset by Peer".
                    try:
                        user_count_after_login = vm_ssh_client.run_command("who --count",
                                                    ensure_user=True)
                        logger.info("Output of 'who --count' is {} after login"
                        .format(user_count_after_login))
                        return user_count_before_login < user_count_after_login
                    except Exception as e:
                        logger.info("Exception: {}".format(e))
                        logger.info("Trying again to perform 'who --count' over ssh.")
                        return False

                # Number of users before login would be 0 and after login would be 180
                # If below assertion would fail user_count_after_login is also 0,
                # denoting login failed
                wait_for(func=_validate_login, timeout=300, delay=5)
                # create file on system
                vm_console.send_keys("touch blather")
                wait_for(func=vm_ssh_client.run_command, func_args=["ls blather"],
                    func_kwargs={'ensure_user': True},
                    fail_condition=lambda result: result.rc != 0, delay=1, num_sec=10)
                # if file was created in previous steps it will be removed here
                # we will get instance of SSHResult
                command_result = vm_ssh_client.run_command("rm blather", ensure_user=True)
                assert command_result

            except Exception as e:
                # Take a screenshot if an exception occurs
                vm_console.switch_to_console()
                take_screenshot("ConsoleScreenshot")
                vm_console.switch_to_appliance()
                raise e
Ejemplo n.º 11
0
def test_html5_vm_console(appliance, provider, configure_websocket, vm_obj,
        configure_vmware_console_for_test, take_screenshot):
    """
    Test the HTML5 console support for a particular provider.

    The supported providers are:

        VMware
        Openstack
        RHV

    For a given provider, and a given VM, the console will be opened, and then:

        - The console's status will be checked.
        - A command that creates a file will be sent through the console.
        - Using ssh we will check that the command worked (i.e. that the file
          was created.
    """
    console_vm_username = credentials[provider.data.templates.get('console_template')
                            ['creds']].get('username')
    console_vm_password = credentials[provider.data.templates.get('console_template')
                            ['creds']].get('password')

    vm_obj.open_console(console='VM Console')
    assert vm_obj.vm_console, 'VMConsole object should be created'
    vm_console = vm_obj.vm_console
    try:
        # If the banner/connection-status element exists we can get
        # the connection status text and if the console is healthy, it should connect.
        assert vm_console.wait_for_connect(180), "VM Console did not reach 'connected' state"

        # Get the login screen image, and make sure it is a jpeg file:
        screen = vm_console.get_screen()
        assert imghdr.what('', screen) == 'jpeg'

        assert vm_console.wait_for_text(text_to_find="login:"******"VM Console"
            " didn't prompt for Login")

        # Enter Username:
        vm_console.send_keys(console_vm_username)

        assert vm_console.wait_for_text(text_to_find="Password", timeout=200), ("VM Console"
            " didn't prompt for Password")
        # Enter Password:
        vm_console.send_keys("{}\n".format(console_vm_password))

        time.sleep(5)  # wait for login to complete

        # This regex can find if there is a word 'login','password','incorrect' present in
        # text, irrespective of its case
        regex_for_login_password = re.compile(r'\blogin\b | \bpassword\b| \bincorrect\b',
         flags=re.I | re.X)

        def _validate_login():
            """
            Try to read what is on present on the last line in console.

            If it is word 'login', enter username, if 'password' enter password, in order
            to make the login successful
            """
            if vm_console.find_text_on_screen(text_to_find='login', current_line=True):
                vm_console.send_keys(console_vm_username)

            if vm_console.find_text_on_screen(text_to_find='Password', current_line=True):
                vm_console.send_keys("{}\n".format(console_vm_password))
            # if the login attempt failed for some reason (happens with RHOS-cirros),
            # last line of the console will contain one of the following words:
            # [login, password, incorrect]
            # if so, regex_for_login_password will find it and result will not be []
            # .split('\n')[-1] splits the console text on '\n' & picks last item of resulting list
            result = regex_for_login_password.findall(vm_console.get_screen_text().split('\n')[-1])
            return result == []

        # if _validate_login() returns True, it means we did not find any of words
        # [login, password, incorrect] on last line of console text, which implies login success
        wait_for(func=_validate_login, timeout=300, delay=5)

        logger.info("Wait to get the '$' prompt")
        if provider.one_of(VMwareProvider):
            vm_console.wait_for_text(text_to_find=provider.data.templates.get('console_template')
                            ['prompt_text'], timeout=200)
        else:
            time.sleep(15)

        # create file on system
        vm_console.send_keys("touch blather")
        if not (BZ.bugzilla.get_bug(1491387).is_opened):
            # Test pressing ctrl-alt-delete...we should be able to get a new login prompt:
            vm_console.send_ctrl_alt_delete()
            assert vm_console.wait_for_text(text_to_find="login:"******"Text 'login:'******'t prompt for Login")

        if not provider.one_of(OpenStackProvider):
            assert vm_console.send_fullscreen(), ("VM Console Toggle Full Screen button does"
            " not work")

        with ssh.SSHClient(hostname=vm_obj.ip_address, username=console_vm_username,
                password=console_vm_password) as ssh_client:
            # if file was created in previous steps it will be removed here
            # we will get instance of SSHResult
            # Sometimes Openstack drops characters from word 'blather' hence try to remove
            # file using partial file name. Known issue, being worked on.
            command_result = ssh_client.run_command("rm blather", ensure_user=True)
            assert command_result
    except:
        # Take a screenshot if an exception occurs
        vm_console.switch_to_console()
        take_screenshot("ConsoleScreenshot")
        vm_console.switch_to_appliance()
        raise
    finally:
        vm_console.close_console_window()
        # Logout is required because when running the Test back 2 back against RHV and VMware
        # Providers, following issue would arise:
        # If test for RHV is just finished, code would proceed to adding VMware Provider and once it
        # is added, then it will navigate to Infrastructure -> Virtual Machines Page, it will see
        # "Page Does not Exists" Error, because the browser will try to go to the
        # VM details page of RHV VM which is already deleted
        # at the End of test for RHV Provider Console and test would fail.
        # Logging out would get rid of this issue.
        appliance.server.logout()
Ejemplo n.º 12
0
def temp_pod_ansible_appliance(provider, appliance_data, template_tags):
    tags = template_tags
    params = appliance_data.copy()
    project = 'test-pod-ansible-{t}'.format(
        t=fauxfactory.gen_alphanumeric().lower())
    try:
        with ssh.SSHClient(
                hostname=params['openshift_creds']['hostname'],
                username=params['openshift_creds']['ssh']['username'],
                password=params['openshift_creds']['ssh']['password'],
                oc_username=params['openshift_creds']['username'],
                oc_password=params['openshift_creds']['password'],
                project=project,
                is_pod=True) as ssh_client:

            # copying ansible configuration file to openshift server
            fulfilled_config = ansible_config.format(
                host=provider.provider_data['hostname'],
                subdomain=provider.provider_data['base_url'],
                proj=project,
                app_ui_url=tags['cfme-openshift-app-ui']['url'],
                app_ui_tag=tags['cfme-openshift-app-ui']['tag'],
                app_url=tags['cfme-openshift-app']['url'],
                app_tag=tags['cfme-openshift-app']['tag'],
                ansible_url=tags['cfme-openshift-embedded-ansible']['url'],
                ansible_tag=tags['cfme-openshift-embedded-ansible']['tag'],
                httpd_url=tags['cfme-openshift-httpd']['url'],
                httpd_tag=tags['cfme-openshift-httpd']['tag'],
                memcached_url=tags['cfme-openshift-memcached']['url'],
                memcached_tag=tags['cfme-openshift-memcached']['tag'],
                db_url=tags['cfme-openshift-postgresql']['url'],
                db_tag=tags['cfme-openshift-postgresql']['tag'])
            logger.info(
                "ansible config file:\n {conf}".format(conf=fulfilled_config))
            with tempfile.NamedTemporaryFile('w') as f:
                f.write(fulfilled_config)
                f.flush()
                os.fsync(f.fileno())
                remote_file = os.path.join('/tmp', f.name)
                ssh_client.put_file(f.name, remote_file, ensure_host=True)

            # run ansible deployment
            ansible_cmd = ('/usr/bin/ansible-playbook -v -i {inventory_file} '
                           '/usr/share/ansible/openshift-ansible/playbooks/'
                           'openshift-management/config.yml').format(
                               inventory_file=remote_file)
            cmd_result = ssh_client.run_command(ansible_cmd, ensure_host=True)
            logger.info(u"deployment result: {result}".format(
                result=cmd_result.output))
            ssh_client.run_command('rm -f {f}'.format(f=remote_file))

            assert cmd_result.success
            # retrieve data of created appliance
            assert provider.mgmt.is_vm_running(
                project), "Appliance was not deployed correctly"
            params['db_host'] = provider.mgmt.expose_db_ip(project)
            params['project'] = project
            params['hostname'] = provider.mgmt.get_appliance_url(project)
            # create instance of appliance
            with IPAppliance(**params) as appliance:
                # framework will try work with default appliance if browser restarts w/o this
                # workaround
                holder = config.pluginmanager.get_plugin(PLUGIN_KEY)
                holder.held_appliance = appliance
                yield appliance
    finally:
        if provider.mgmt.does_vm_exist(project):
            provider.mgmt.delete_vm(project)
Ejemplo n.º 13
0
def test_provision_cloud_init_payload(appliance, request, setup_provider,
                                      provider, provisioning, vm_name):
    """
    Tests that options specified in VM provisioning dialog in UI are properly passed as a cloud-init
    payload to the newly provisioned VM.

    Metadata:
        test_flag: cloud_init, provision

    Polarion:
        assignee: None
        initialEstimate: None
    """
    image = provisioning.get('ci-image', None)
    if not image:
        pytest.skip('No ci-image found in provider specification.')
    note = (
        'Testing provisioning from image {image} to vm {vm} on provider {provider}'
        .format(image=image, vm=vm_name, provider=provider.key))
    logger.info(note)

    ci_payload = {
        'root_password': '******',
        'address_mode': 'Static',
        'hostname': 'cimachine',
        'ip_address': '169.254.0.1',
        'subnet_mask': '29',
        'gateway': '169.254.0.2',
        'dns_servers': '169.254.0.3',
        'dns_suffixes': 'virt.lab.example.com',
        'custom_template': {
            'name': 'oVirt cloud-init'
        }
    }

    inst_args = {
        'request': {
            'notes': note
        },
        'customize': {
            'customize_type': 'Specification'
        },
        'template_name': image
    }

    inst_args['customize'].update(ci_payload)
    logger.info('Instance args: {}'.format(inst_args))

    # Provision VM
    collection = appliance.provider_based_collection(provider)
    instance = collection.create(vm_name, provider, form_values=inst_args)
    request.addfinalizer(instance.cleanup_on_provider)
    provision_request = provider.appliance.collections.requests.instantiate(
        vm_name, partial_check=True)
    provision_request.wait_for_request()

    connect_ip = wait_for(find_global_ipv6,
                          func_args=[instance],
                          num_sec=600,
                          delay=20).out
    logger.info('Connect IP: {}'.format(connect_ip))

    # Connect to the newly provisioned VM
    with ssh.SSHClient(hostname=connect_ip,
                       username='******',
                       password=ci_payload['root_password']) as ssh_client:
        # Check that correct hostname has been set
        hostname_cmd = ssh_client.run_command('hostname')
        assert hostname_cmd.success
        assert hostname_cmd.output.strip() == ci_payload['hostname']

        # Obtain network configuration script for eth0 and store it in a list
        network_cfg_cmd = ssh_client.run_command(
            'cat /etc/sysconfig/network-scripts/ifcfg-eth0')
        assert network_cfg_cmd.success
        config_list = network_cfg_cmd.output.split('\n')

        # Compare contents of network script with cloud-init payload
        assert 'BOOTPROTO=none' in config_list, 'Address mode was not set to static'
        assert 'IPADDR={}'.format(ci_payload['ip_address']) in config_list
        assert 'PREFIX={}'.format(ci_payload['subnet_mask']) in config_list
        assert 'GATEWAY={}'.format(ci_payload['gateway']) in config_list
        assert 'DNS1={}'.format(ci_payload['dns_servers']) in config_list
        assert 'DOMAIN={}'.format(ci_payload['dns_suffixes']) in config_list
    def _ssa_single_vm():
        template_name = vm_analysis_provisioning_data['image']
        vm_name = 'test-ssa-{}-{}'.format(fauxfactory.gen_alphanumeric(), analysis_type)
        collection = provider.appliance.provider_based_collection(provider)
        vm = collection.instantiate(vm_name,
                                    provider,
                                    template_name=vm_analysis_provisioning_data.image)
        provision_data = vm_analysis_provisioning_data.copy()
        del provision_data['image']

        if "test_ssa_compliance" in request._pyfuncitem.name:
            provisioning_data = {"catalog": {'vm_name': vm_name},
                                 "environment": {'automatic_placement': True}}
            do_vm_provisioning(vm_name=vm_name, appliance=appliance, provider=provider,
                               provisioning_data=provisioning_data, template_name=template_name,
                               request=request, smtp_test=False, num_sec=2500)
        else:
            deploy_template(vm.provider.key, vm_name, template_name, timeout=2500)
            vm.wait_to_appear(timeout=900, load_details=False)

        request.addfinalizer(lambda: vm.delete_from_provider())

        if provider.one_of(OpenStackProvider):
            public_net = provider.data['public_network']
            vm.provider.mgmt.assign_floating_ip(vm.name, public_net)

        logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name)

        @wait_for_decorator(timeout="20m", delay=5)
        def get_ip_address():
            logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format(
                vm_name, provider.mgmt.vm_status(vm_name), provider.mgmt.is_vm_stopped(vm_name)))
            if provider.mgmt.is_vm_stopped(vm_name):
                provider.mgmt.start_vm(vm_name)

            ip = provider.mgmt.current_ip_address(vm_name)
            logger.info("Fetched IP for %s: %s", vm_name, ip)
            return ip is not None

        connect_ip = provider.mgmt.get_ip_address(vm_name)
        assert connect_ip is not None

        # Check that we can at least get the uptime via ssh this should only be possible
        # if the username and password have been set via the cloud-init script so
        # is a valid check
        if vm_analysis_provisioning_data['fs-type'] not in ['ntfs', 'fat32']:
            logger.info("Waiting for %s to be available via SSH", connect_ip)

            ssh_client = ssh.SSHClient(
                hostname=connect_ip,
                username=credentials[vm_analysis_provisioning_data.credentials]['username'],
                password=credentials[vm_analysis_provisioning_data.credentials]['password'],
                port=22)
            wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True)
            vm.ssh = ssh_client
        vm.system_type = detect_system_type(vm)
        logger.info("Detected system type: %s", vm.system_type)
        vm.image = vm_analysis_provisioning_data['image']
        vm.connect_ip = connect_ip

        # TODO:  if rhev and iscsi, it need direct_lun
        if provider.type == 'rhevm':
            logger.info("Setting a relationship between VM and appliance")
            cfme_rel = InfraVm.CfmeRelationship(vm)
            cfme_rel.set_relationship(appliance.server.name, appliance.server_id())
        # Close the SSH client if we have one
        request.addfinalizer(lambda: vm.ssh.close() if getattr(vm, 'ssh', None) else None)
        return vm
def test_create_azure_vm_from_azure_image(connect_az_account, cfme_vhd,
                                          upload_image_to_azure, vm_ip):
    """
    To run this test Azure account is required.

    Azure VM is provisioned from another VM using Powershell, that can be run on any provider.

    Polarion:
        assignee: anikifor
        casecomponent: Cloud
        caseimportance: high
        initialEstimate: 1/2h
        setup: # Virtual Machine Name - as it appears in Azure
               $VMName = "myVmName"
               $ResourceGroupName = "CFMEQE-Main"
               Break
               # Existing Azure Deployment Values - Video with instructions
               forthcoming.
               $AvailabilitySetName = "cfmeqe-as-free"
               $AzureLocation = "East US"
               $VMDeploymentSize= "Standard_A1"
               $StorageAccountName = "cfmeqestore"
               $BlobContainerName = "templates"
               $VHDName = "cfme-azure-56013.vhd"
               $VirtualNetworkName = "cfmeqe"
               $NetworkSecurityGroupName = "cfmeqe-nsg"
               $VirtualNetworkSubnetName = "default"
               $VirtualNetworkAddressPrefix = "10.0.0.0/16"
               $VirtualNetworkSubnetAddressPrefix = "10.0.0.0/24"
               # Create VM Components
               $StorageAccount = Get-AzureRmStorageAccount -ResourceGroupName
               $ResourceGroupName -Name $StorageAccountName
               $InterfaceName = $VMName
               $NetworkSecurityGroupID = Get-AzureRmNetworkSecurityGroup -Name
               $NetworkSecurityGroupName -ResourceGroupName $ResourceGroupName
               $PIp = New-AzureRmPublicIpAddress -Name $InterfaceName
               -ResourceGroupName $ResourceGroupName -Location $AzureLocation
               -AllocationMethod Dynamic -Force
               $SubnetConfig = New-AzureRmVirtualNetworkSubnetConfig -Name
               $VirtualNetworkSubnetName -AddressPrefix
               $VirtualNetworkSubnetAddressPrefix
               $VNet = New-AzureRmVirtualNetwork -Name $VirtualNetworkName
               -ResourceGroupName $ResourceGroupName -Location $AzureLocation
               -AddressPrefix $VirtualNetworkAddressPrefix -Subnet $SubnetConfig
               -Force
               $Interface = New-AzureRmNetworkInterface -Name $InterfaceName
               -ResourceGroupName $ResourceGroupName -Location $AzureLocation
               -SubnetId $VNet.Subnets[0].Id -PublicIpAddressId $PIp.Id -Force
               $AvailabilitySet = Get-AzureRmAvailabilitySet -ResourceGroupName
               $ResourceGroupName -Name $AvailabilitySetName
               $VirtualMachine = New-AzureRmVMConfig -VMName $VMName -VMSize
               $VMDeploymentSize -AvailabilitySetID $AvailabilitySet.Id
               $VirtualMachine = Add-AzureRmVMNetworkInterface -VM $VirtualMachine
               -Id $Interface.Id
               $OSDiskUri = $StorageAccount.PrimaryEndpoints.Blob.ToString() +
               $BlobContainerName + "/" + $VHDName
               $VirtualMachine = Set-AzureRmVMOSDisk -VM $VirtualMachine -Name
               $VMName -VhdUri $OSDiskUri -CreateOption attach -Linux
               # Create the Virtual Machine
               New-AzureRmVM -ResourceGroupName $ResourceGroupName -Location
               $AzureLocation -VM $VirtualMachine
        testSteps:
            1. Make the VM
            2. Config SSH support
            3. Config DNS is desired.
            4. SSH into new VM with Azure Public IP address and verify it has booted
            correctly.
            5. Use HTTP to DNS into the appliance web ui and make sure
            you can log in.
        startsin: 5.6
        teardown: When you"re done, delete everything.  Make sure at a minimum that the
                  VM is completely Stopped in Azure.
    """
    app = appliance.IPAppliance.from_url(vm_ip)

    # Credentials for the provisioned VM from CFME image, this is different to the VM that runs
    # powershell scripts as Azure has specific requirements for login/password.
    # These credentials are used in the script create_vm.ps1 to provision the VM.
    username = credentials['azure_appliance']['username']
    password = credentials['azure_appliance']['password']

    with ssh.SSHClient(hostname=vm_ip, username=username,
                       password=password) as app_ssh_client:

        # permit root login over ssh for future appliance configuration
        command = 'sed -i "s/.*PermitRootLogin.*/PermitRootLogin yes/g" /etc/ssh/sshd_config'
        config = app_ssh_client.run_command('echo {} | sudo -S {}'.format(
            password, command),
                                            ensure_user=True)
        assert config.success

        # restart sshd to apply configuration changes
        restart = app_ssh_client.run_command(
            'echo {} | sudo -S systemctl restart sshd'.format(password),
            ensure_user=True)
        assert restart.success

        # unlock root password
        unlock = app_ssh_client.run_command(
            'echo {} | sudo -S passwd -u root'.format(password),
            ensure_user=True)
        assert unlock.success

    app.configure()
    app.wait_for_web_ui()

    # Check we can login
    logged_in_page = app.server.login()
    assert logged_in_page.is_displayed
Ejemplo n.º 16
0
def instance(request, local_setup_provider, provider, vm_name,
             vm_analysis_data, appliance):
    """ Fixture to provision instance on the provider """

    vm = VM.factory(vm_name, provider, template_name=vm_analysis_data['image'])
    request.addfinalizer(lambda: cleanup_vm(vm_name, provider))

    provision_data = vm_analysis_data.copy()
    del provision_data['image']
    vm.create_on_provider(find_in_cfme=True, **provision_data)

    if provider.type == "openstack":
        vm.provider.mgmt.assign_floating_ip(vm.name, 'public')

    logger.info("VM %s provisioned, waiting for IP address to be assigned",
                vm_name)

    mgmt_system = provider.get_mgmt_system()

    @wait_for_decorator(timeout="20m", delay=5)
    def get_ip_address():
        logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format(
            vm_name, mgmt_system.vm_status(vm_name),
            mgmt_system.is_vm_stopped(vm_name)))
        if mgmt_system.is_vm_stopped(vm_name):
            mgmt_system.start_vm(vm_name)

        ip = mgmt_system.current_ip_address(vm_name)
        logger.info("Fetched IP for %s: %s", vm_name, ip)
        return ip is not None

    connect_ip = mgmt_system.get_ip_address(vm_name)
    assert connect_ip is not None

    # Check that we can at least get the uptime via ssh this should only be possible
    # if the username and password have been set via the cloud-init script so
    # is a valid check
    if vm_analysis_data['fs-type'] not in ['ntfs', 'fat32']:
        logger.info("Waiting for %s to be available via SSH", connect_ip)
        ssh_client = ssh.SSHClient(hostname=connect_ip,
                                   username=vm_analysis_data['username'],
                                   password=vm_analysis_data['password'],
                                   port=22)
        wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True)
        vm.ssh = ssh_client
    vm.system_type = detect_system_type(vm)
    logger.info("Detected system type: %s", vm.system_type)
    vm.image = vm_analysis_data['image']
    vm.connect_ip = connect_ip

    # TODO:  This is completely wrong and needs to be fixed
    #   CFME relationship is suppose to be set to the appliance, which is required
    #   to be placed within the same datastore that the VM resides
    #
    #   Also, if rhev and iscsi, it need direct_lun
    if provider.type == 'rhevm':
        logger.info("Setting a relationship between VM and appliance")
        from cfme.infrastructure.virtual_machines import Vm
        cfme_rel = Vm.CfmeRelationship(vm)
        server_name = appliance.server_name()
        cfme_rel.set_relationship(str(server_name), configuration.server_id())

    yield vm

    # Close the SSH client if we have one
    if getattr(vm, 'ssh', None):
        vm.ssh.close()
Ejemplo n.º 17
0
    def _ssa_single_vm():
        template_name = vm_analysis_provisioning_data['image']
        vm_name = f'test-ssa-{fauxfactory.gen_alphanumeric()}-{analysis_type}'
        collection = provider.appliance.provider_based_collection(provider)
        vm = collection.instantiate(
            vm_name,
            provider,
            template_name=vm_analysis_provisioning_data.image)
        provision_data = vm_analysis_provisioning_data.copy()
        del provision_data['image']

        if "test_ssa_compliance" in request._pyfuncitem.name or provider.one_of(
                RHEVMProvider):
            provisioning_data = {
                "catalog": {
                    'vm_name': vm_name
                },
                "environment": {
                    'automatic_placement': True
                }
            }

            if provider.one_of(RHEVMProvider):
                provisioning_data.update({
                    "network": {
                        'vlan': partial_match(provision_data['vlan'])
                    }
                })

            do_vm_provisioning(vm_name=vm_name,
                               appliance=appliance,
                               provider=provider,
                               provisioning_data=provisioning_data,
                               template_name=template_name,
                               request=request,
                               num_sec=2500)
        else:
            deploy_template(vm.provider.key,
                            vm_name,
                            template_name,
                            timeout=2500)
            vm.wait_to_appear(timeout=900, load_details=False)

        request.addfinalizer(lambda: vm.cleanup_on_provider())

        if provider.one_of(OpenStackProvider):
            public_net = provider.data['public_network']
            vm.mgmt.assign_floating_ip(public_net)

        logger.info("VM %s provisioned, waiting for IP address to be assigned",
                    vm_name)

        vm.mgmt.ensure_state(VmState.RUNNING)

        try:
            connect_ip, _ = wait_for(find_pingable,
                                     func_args=[vm.mgmt],
                                     timeout="10m",
                                     delay=5,
                                     fail_condition=None)
        except TimedOutError:
            pytest.fail('Timed out waiting for pingable address on SSA VM')

        # Check that we can at least get the uptime via ssh this should only be possible
        # if the username and password have been set via the cloud-init script so
        # is a valid check
        if vm_analysis_provisioning_data['fs-type'] not in ['ntfs', 'fat32']:
            logger.info("Waiting for %s to be available via SSH", connect_ip)

            ssh_client = ssh.SSHClient(
                hostname=connect_ip,
                username=credentials[
                    vm_analysis_provisioning_data.credentials]['username'],
                password=credentials[
                    vm_analysis_provisioning_data.credentials]['password'],
                port=22)
            wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True)
            vm.ssh = ssh_client
        vm.system_type = detect_system_type(vm)
        logger.info("Detected system type: %s", vm.system_type)
        vm.image = vm_analysis_provisioning_data['image']
        vm.connect_ip = connect_ip

        # TODO:  if rhev and iscsi, it need direct_lun
        if provider.type == 'rhevm':
            logger.info("Setting a relationship between VM and appliance")
            cfme_rel = InfraVm.CfmeRelationship(vm)
            cfme_rel.set_relationship(appliance.server.name,
                                      appliance.server_id())
        # Close the SSH client if we have one
        request.addfinalizer(lambda: vm.ssh.close()
                             if getattr(vm, 'ssh', None) else None)
        return vm
def test_webmks_vm_console(request, appliance, provider, vm_obj,
                           configure_vmware_console_for_test):
    """Test the VMware WebMKS console support for a particular provider.

    The supported providers are:

        VMware vSphere6 and vSphere6.5

    For a given provider, and a given VM, the console will be opened, and then:

        - The console's status will be checked.
        - A command that creates a file will be sent through the console.
        - Using ssh we will check that the command worked (i.e. that the file
          was created.)
    """
    console_vm_username = credentials[provider.data.templates.get(
        'console_template')['creds']].get('username')
    console_vm_password = credentials[provider.data.templates.get(
        'console_template')['creds']].get('password')

    vm_obj.open_console(console='VM Console', invokes_alert=True)
    assert vm_obj.vm_console, 'VMConsole object should be created'
    vm_console = vm_obj.vm_console

    request.addfinalizer(vm_console.close_console_window)
    request.addfinalizer(appliance.server.logout)

    # Get the login screen image, and make sure it is a jpeg file:
    screen = vm_console.get_screen(180)
    assert imghdr.what('', screen) == 'jpeg'

    with ssh.SSHClient(hostname=vm_obj.ip_address,
                       username=console_vm_username,
                       password=console_vm_password) as vm_ssh_client:
        assert vm_console.wait_for_text(text_to_find="login:"******"VM Console didn't prompt for Login"

        result_before_login = vm_ssh_client.run_command("who --count",
                                                        ensure_user=True)
        # Enter Username:
        vm_console.send_keys(console_vm_username)

        assert vm_console.wait_for_text(text_to_find="Password", timeout=200),\
            "VM Console didn't prompt for Password"
        # Enter Password:
        vm_console.send_keys("{}\n".format(console_vm_password))

        result_after_login = vm_ssh_client.run_command("who --count",
                                                       ensure_user=True)
        # Number of users before login would be 0 and after login would be 180
        # If below assertion would fail result_after_login is also 0, denoting login failed
        assert (
            result_before_login.output.split('=')[-1].strip() <
            result_after_login.output.split('=')[-1].strip()), "Login Failed"

    # This regex can find if there is a word 'login','password','incorrect' present in
    # text, irrespective of its case
    regex_for_login_password = re.compile(
        r'\blogin\b | \bpassword\b| \bincorrect\b', flags=re.I | re.X)

    def _validate_login():
        """
        Try to read what is on present on the last line in console.

        If it is word 'login', enter username, if 'password' enter password, in order
        to make the login successful
        """
        if vm_console.find_text_on_screen(text_to_find='login',
                                          current_line=True):
            vm_console.send_keys(console_vm_username)

        if vm_console.find_text_on_screen(text_to_find='Password',
                                          current_line=True):
            vm_console.send_keys("{}\n".format(console_vm_password))
        # if the login attempt failed for some reason (happens with RHOS-cirros),
        # last line of the console will contain one of the following words:
        # [login, password, incorrect]
        # if so, regex_for_login_password will find it and result will not be []
        # .split('\n')[-1] splits the console text on '\n' & picks last item of resulting list
        result = regex_for_login_password.findall(
            vm_console.get_screen_text().split('\n')[-1])
        return result == []

    # if _validate_login() returns True, it means we did not find any of words
    # [login, password, incorrect] on last line of console text, which implies login success
    wait_for(func=_validate_login, timeout=300, delay=5)

    logger.info("Wait to get the '$' prompt")

    vm_console.wait_for_text(text_to_find=provider.data.templates.get(
        'console_template')['prompt_text'],
                             timeout=200)

    with ssh.SSHClient(hostname=vm_obj.ip_address,
                       username=console_vm_username,
                       password=console_vm_password) as vm_ssh_client:
        # create file on system
        vm_console.send_keys("touch blather\n")
        wait_for(func=lambda: vm_ssh_client.run_command("ls blather",
                                                        ensure_user=True) == 0,
                 delay=1,
                 num_sec=10)
        # if file was created in previous steps it will be removed here
        # we will get instance of SSHResult
        # Sometimes Openstack drops characters from word 'blather' hence try to remove
        # file using partial file name. Known issue, being worked on.
        command_result = vm_ssh_client.run_command("rm blather",
                                                   ensure_user=True)
        assert command_result