Example #1
0
    def suspend_vm(self, instance_name):
        if self.is_vm_suspended(instance_name):
            return True

        instance = self._find_instance_by_name(instance_name)
        instance.suspend()
        wait_for(self.is_vm_suspended, [instance_name])
    def _provisioner(template, provisioning_data, delayed=None):
        pytest.sel.force_navigate('infrastructure_provision_vms', context={
            'provider': provider,
            'template_name': template,
        })

        vm_name = provisioning_data["vm_name"]
        fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button)
        flash.assert_no_errors()

        request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
        if delayed is not None:
            total_seconds = (delayed - datetime.utcnow()).total_seconds()
            row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
            cells = {'Description': row_description}
            try:
                row, __ = wait_for(requests.wait_for_request, [cells],
                                   fail_func=requests.reload, num_sec=total_seconds, delay=5)
                pytest.fail("The provisioning was not postponed")
            except TimedOutError:
                pass
        logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key)
        wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600)

        # nav to requests page happens on successful provision
        logger.info('Waiting for cfme provision request for vm %s', vm_name)
        row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
        cells = {'Description': row_description}
        row, __ = wait_for(requests.wait_for_request, [cells],
                           fail_func=requests.reload, num_sec=900, delay=20)
        assert row.last_message.text == 'Vm Provisioned Successfully'
        return VM.factory(vm_name, provider)
def upload_template(rhosip, sshname, sshpass, username, password, auth_url, provider, image_url,
                    template_name):
    try:
        print("RHOS:{} Starting template {} upload...".format(provider, template_name))

        kwargs = make_kwargs_rhos(cfme_data, provider)
        ssh_client = make_ssh_client(rhosip, sshname, sshpass)

        kwargs['image_url'] = image_url
        if template_name is None:
            template_name = cfme_data['basic_info']['appliance_template']

        export = make_export(username, password, kwargs.get('tenant_id'), auth_url)

        if not check_image_exists(template_name, export, ssh_client):
            output = upload_qc2_file(ssh_client, kwargs.get('image_url'), template_name, export,
                                     provider)

            image_id = get_image_id(output)

            wait_for(check_image_status, [image_id, export, ssh_client],
                     fail_condition=False, delay=5, num_sec=300)
        else:
            print("RHOS:{} Found image with name {}. Exiting...".format(provider, template_name))
        ssh_client.close()
    except Exception as e:
        print(e)
        return False
    finally:
        print("RHOS:{} End template {} upload...".format(provider, template_name))
Example #4
0
    def resume_vm(self, instance_name):
        if self.is_vm_running(instance_name):
            return True

        instance = self._find_instance_by_name(instance_name)
        instance.resume()
        wait_for(self.is_vm_running, [instance_name])
def test_power_on_or_off_after_provision(provisioner, prov_data, provider, started):
    """ Tests setting the desired power state after provisioning.

    Prerequisities:
        * A provider set up, supporting provisioning in CFME

    Steps:
        * Open the provisioning dialog.
        * Apart from the usual provisioning settings, set whether you want or not the VM to be
            powered on after provisioning.
        * Submit the provisioning request and wait for it to finish.
        * The VM should become steady in the desired VM power state.

    Metadata:
        test_flag: provision
    """
    vm_name = "test_prov_dlg_{}".format(fauxfactory.gen_alphanumeric())
    prov_data["vm_name"] = vm_name
    prov_data["power_on"] = started
    template_name = provider.data['provisioning']['template']

    provisioner(template_name, prov_data)

    wait_for(
        lambda: provider.mgmt.does_vm_exist(vm_name) and
        (provider.mgmt.is_vm_running if started else provider.mgmt.is_vm_stopped)(vm_name),
        num_sec=240, delay=5
    )
Example #6
0
def test_action_raise_automation_event(
        request, assign_policy_for_testing, vm, vm_on, ssh_client, vm_crud_refresh):
    """ This test tests actions 'Raise Automation Event'.

    This test sets the policy that it raises an automation event VM after it's powered on.
    Then it checks logs whether that really happened.

    Metadata:
        test_flag: actions, provision
    """
    # Set up the policy and prepare finalizer
    assign_policy_for_testing.assign_actions_to_event("VM Power Off", ["Raise Automation Event"])
    request.addfinalizer(lambda: assign_policy_for_testing.assign_events())
    # Start the VM
    vm.stop_vm()
    vm_crud_refresh()

    # Search the logs
    def search_logs():
        rc, stdout = ssh_client.run_command(
            "cat /var/www/miq/vmdb/log/automation.log | grep 'MiqAeEvent.build_evm_event' |"
            " grep 'event=<\"vm_poweroff\">' | grep 'id: %s'" % vm.soap.object.id
            # not guid, but the ID
        )
        if rc != 0:  # Nothing found, so shortcut
            return False
        found = [event for event in stdout.strip().split("\n") if len(event) > 0]
        if not found:
            return False
        else:
            logger.info("Found event: `%s`" % event[-1].strip())
            return True
    wait_for(search_logs, num_sec=180, message="log search")
Example #7
0
def test_action_untag(request, assign_policy_for_testing, vm, vm_off, vm_crud_refresh):
    """ Tests action untag

    Metadata:
        test_flag: actions, provision
    """
    tag_unassign_action = explorer.Action(
        fauxfactory.gen_alphanumeric(),
        action_type="Remove Tags",
        action_values={"cat_service_level": True}
    )
    assign_policy_for_testing.assign_actions_to_event("VM Power On", [tag_unassign_action])

    def finalize():
        assign_policy_for_testing.assign_events()
        tag_unassign_action.delete()
    request.addfinalizer(finalize)

    vm.start_vm()
    vm_crud_refresh()
    try:
        wait_for(
            lambda: not any(
                [tag.category == "service_level" and tag.tag_name == "gold" for tag in vm.soap.tags]
            ),
            num_sec=600,
            message="tag presence check"
        )
    except TimedOutError:
        pytest.fail("Tags were not unassigned!")
def test_ssa_groups(provider, instance, soft_assert):
    """ Tests SSA fetches correct results for groups

    Metadata:
        test_flag: vm_analysis
    """
    group = fauxfactory.gen_alphanumeric()
    expected = None

    if instance.system_type != WINDOWS:
        # Add a new group
        instance.ssh.run_command("groupdel {0} || groupadd {0}".format(group))
        expected = instance.ssh.run_command("cat /etc/group | wc -l").output.strip('\n')

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15, timeout="15m", fail_func=lambda: toolbar.select('Reload'))

    # Check that all data has been fetched
    current = instance.get_detail(properties=('Security', 'Groups'))
    if instance.system_type != WINDOWS:
        assert current == expected

    # Make sure created group is in the list
    instance.open_details(("Security", "Groups"))
    if instance.system_type != WINDOWS:
        if not instance.paged_table.find_row_on_all_pages('Name', group):
            pytest.fail("Group {0} was not found".format(group))
Example #9
0
def test_action_power_on_audit(
        request, assign_policy_for_testing, vm, vm_off, ssh_client, vm_crud_refresh):
    """ This test tests action 'Generate Audit Event'.

    This test sets the policy that it logs powering on of the VM. Then it powers up the vm and
    checks whether audit logs contain message about that.

    Metadata:
        test_flag: actions, provision
    """
    # Set up the policy and prepare finalizer
    assign_policy_for_testing.assign_actions_to_event("VM Power On", ["Generate Audit Event"])
    request.addfinalizer(lambda: assign_policy_for_testing.assign_events())
    # Start the VM
    vm.start_vm()
    vm_crud_refresh()
    policy_desc = assign_policy_for_testing.description

    # Search the logs
    def search_logs():
        rc, stdout = ssh_client.run_command(
            "cat /var/www/miq/vmdb/log/audit.log | grep '%s'" % policy_desc
        )
        if rc != 0:  # Nothing found, so shortcut
            return False
        for line in stdout.strip().split("\n"):
            if "Policy success" not in line or "MiqAction.action_audit" not in line:
                continue
            match_string = "policy: [%s], event: [VM Power On]" % (policy_desc)
            if match_string in line:
                logger.info("Found corresponding log message: %s" % line.strip())
                return True
        else:
            return False
    wait_for(search_logs, num_sec=180, message="log search")
    def cleanup_host():
        try:
            logger.info('Cleaning up host %s on provider %s' % (prov_host_name, provider_crud.key))
            mgmt_system = provider_crud.get_mgmt_system()
            host_list = mgmt_system.list_host()
            if host_provisioning['ip_addr'] in host_list:
                wait_for(mgmt_system.is_host_connected, [host_provisioning['ip_addr']])
                mgmt_system.remove_host_from_cluster(host_provisioning['ip_addr'])

            ipmi = test_host.get_ipmi()
            ipmi.power_off()

            # During host provisioning,the host name gets changed from what's specified at creation
            # time.If host provisioning succeeds,the original name is reverted to,otherwise the
            # changed names are retained upon failure
            renamed_host_name1 = "{} ({})".format('IPMI', host_provisioning['ipmi_address'])
            renamed_host_name2 = "{} ({})".format('VMware ESXi', host_provisioning['ip_addr'])

            host_list_ui = host.get_all_hosts()
            if host_provisioning['hostname'] in host_list_ui:
                test_host.delete(cancel=False)
                host.wait_for_host_delete(test_host)
            elif renamed_host_name1 in host_list_ui:
                host_renamed_obj1 = host.Host(name=renamed_host_name1)
                host_renamed_obj1.delete(cancel=False)
                host.wait_for_host_delete(host_renamed_obj1)
            elif renamed_host_name2 in host_list_ui:
                host_renamed_obj2 = host.Host(name=renamed_host_name2)
                host_renamed_obj2.delete(cancel=False)
                host.wait_for_host_delete(host_renamed_obj2)
        except:
            # The mgmt_sys classes raise Exception :\
            logger.warning('Failed to clean up host %s on provider %s' %
                           (prov_host_name, provider_crud.key))
def test_ssa_users(provider, instance, soft_assert):
    """ Tests SSA fetches correct results for users list

    Metadata:
        test_flag: vm_analysis
    """
    username = fauxfactory.gen_alphanumeric()
    expected = None

    # In windows case we can't add new users (yet)
    # So we simply check that user list doesn't cause any Rails errors
    if instance.system_type != WINDOWS:
        # Add a new user
        instance.ssh.run_command("userdel {0} || useradd {0}".format(username))
        expected = instance.ssh.run_command("cat /etc/passwd | wc -l").output.strip('\n')

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15, timeout="15m", fail_func=lambda: toolbar.select('Reload'))

    # Check that all data has been fetched
    current = instance.get_detail(properties=('Security', 'Users'))
    if instance.system_type != WINDOWS:
        assert current == expected

    # Make sure created user is in the list
    instance.open_details(("Security", "Users"))
    if instance.system_type != WINDOWS:
        if not instance.paged_table.find_row_on_all_pages('Name', username):
            pytest.fail("User {0} was not found".format(username))
def test_action_tag(request, assign_policy_for_testing, vm, vm_off, vm_crud_refresh):
    """ Tests action tag

    Metadata:
        test_flag: actions, provision
    """
    if any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
           for tag in vm.crud.get_tags()):
        vm.crud.remove_tag(("Service Level", "Gold"))

    tag_assign_action = actions.Action(
        fauxfactory.gen_alphanumeric(),
        action_type="Tag",
        action_values={"tag": ("My Company Tags", "Service Level", "Gold")}
    )
    assign_policy_for_testing.assign_actions_to_event("VM Power On", [tag_assign_action])

    @request.addfinalizer
    def finalize():
        assign_policy_for_testing.assign_events()
        tag_assign_action.delete()

    vm.start_vm()
    vm_crud_refresh()
    try:
        wait_for(
            lambda: any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
                        for tag in vm.crud.get_tags()),
            num_sec=600,
            message="tag presence check"
        )
    except TimedOutError:
        pytest.fail("Tags were not assigned!")
Example #13
0
def _filter(
        zone=None,
        user=None,
        time_period=None,
        task_status_queued=None,
        task_status_running=None,
        task_status_ok=None,
        task_status_error=None,
        task_status_warn=None,
        task_state=None):
    """ Does filtering of the results in table. Needs to be on the correct page before called.

    If there was no change in the form and the apply button does not appear, nothing happens.

    Args:
        zone: Value for 'Zone' select
        user: Value for 'User' select
        time_period: Value for 'Time period' select.
        task_status_*: :py:class:`bool` values for checkboxes
        task_state: Value for 'Task State' select.
    """
    fill(filter_form, locals())
    try:
        wait_for(lambda: sel.is_displayed(buttons.apply), num_sec=5)
        sel.click(buttons.apply)
    except TimedOutError:
        pass
Example #14
0
    def create(self, cancel=False, wait=False):
        view = navigate_to(self, 'Add')
        view.form.fill({
            'cloud_provider': self.provider.name,
            'name': self.name
        })
        if cancel:
            view.form.cancel_button.click()
        else:
            view.form.save_button.click()

        all_view = self.create_view(TenantAllView)
        wait_for(lambda: all_view.is_displayed, fail_condition=False, num_sec=120, delay=3,
                 fail_func=lambda: all_view.flush_widget_cache(), handle_exception=True)
        if cancel:
            if self.appliance.version >= '5.8':
                msg = 'Add of Cloud Tenant was cancelled by the user'
            else:
                msg = 'Add of new Cloud Tenant was cancelled by the user'
            all_view.entities.flash.assert_success_message(msg)
        else:
            all_view.entities.flash.assert_success_message(
                'Cloud Tenant "{}" created'.format(self.name))
        if wait:
            def refresh():
                """Refresh a few things"""
                self.provider.refresh_provider_relationships()
                all_view.flush_widget_cache()
                self.browser.refresh()

            wait_for(lambda: self.exists, timeout=600, message='Wait for cloud tenant to appear',
                     delay=10, fail_func=refresh)
Example #15
0
def queue_canned_report(*path):
    """Queue report from selection of pre-prepared reports.

    Args:
        *path: Path in tree after All Reports
    Returns: Value of Run At in the table so the run can be then checked.
    """
    sel.force_navigate("report_canned_info", context={"path": path})
    toolbar.select("Queue")
    flash.assert_no_errors()
    tabstrip.select_tab("Saved Reports")

    def _get_state():
        try:
            first_row = list(records_table.rows())[0]
        except IndexError:
            return False
        return sel.text(first_row.status).strip().lower() == "finished"

    wait_for(
        _get_state,
        delay=1,
        message="wait for report generation finished",
        fail_func=reload_view
    )
    return sel.text(list(records_table.rows())[0].run_at).encode("utf-8")
def templatize_vm(api, template_name, cluster, temp_vm_name, provider):
    """Templatizes temporary VM. Result is template with two disks.

    Args:
        api: API to chosen RHEVM provider.
        template_name: Name of the final template.
        cluster: Cluster to save the final template onto.
    """
    try:
        if api.templates.get(template_name) is not None:
            print("RHEVM:{} Warning: found finished template with this name.".format(provider))
            print("RHEVM:{} Skipping this step, attempting to continue...".format(provider))
            return
        temporary_vm = api.vms.get(temp_vm_name)
        actual_cluster = api.clusters.get(cluster)
        new_template = params.Template(name=template_name, vm=temporary_vm, cluster=actual_cluster)
        api.templates.add(new_template)

        wait_for(check_disks, [api, temp_vm_name], fail_condition=False, delay=5, num_sec=900)

        # check, if template is really there
        if not api.templates.get(template_name):
            print("RHEVM:{} templatizing temporary VM failed".format(provider))
            sys.exit(127)
        print("RHEVM:{} successfully templatized the temporary VM".format(provider))
    except Exception as e:
        print("RHEVM:{} templatizing temporary VM failed".format(provider))
        print(e)
Example #17
0
    def validate_stats(self, ui=False):
        """ Validates that the detail page matches the Providers information.

        This method logs into the provider using the mgmt_system interface and collects
        a set of statistics to be matched against the UI. The details page is then refreshed
        continuously until the matching of all items is complete. A error will be raised
        if the match is not complete within a certain defined time period.
        """

        # If we're not using db, make sure we are on the provider detail page
        if ui:
            self.load_details()

        # Initial bullet check
        if self._do_stats_match(self.mgmt, self.STATS_TO_MATCH, ui=ui):
            self.mgmt.disconnect()
            return
        else:
            # Set off a Refresh Relationships
            method = 'ui' if ui else None
            self.refresh_provider_relationships(method=method)

            refresh_timer = RefreshTimer(time_for_refresh=300)
            wait_for(self._do_stats_match,
                     [self.mgmt, self.STATS_TO_MATCH, refresh_timer],
                     {'ui': ui},
                     message="do_stats_match_db",
                     num_sec=1000,
                     delay=60)

        self.mgmt.disconnect()
Example #18
0
def rates(request, rest_api):
    chargeback = rest_api.collections.chargebacks.find_by(rate_type='Compute')[0]
    data = [{
        'description': 'test_rate_{}_{}'.format(_index, fauxfactory.gen_alphanumeric()),
        'rate': 1,
        'group': 'cpu',
        'per_time': 'daily',
        'per_unit': 'megahertz',
        'chargeback_rate_id': chargeback.id
    } for _index in range(0, 3)]

    rates = rest_api.collections.rates.action.create(*data)
    for rate in data:
        wait_for(
            lambda: rest_api.collections.rates.find_by(description=rate.get('description')),
            num_sec=180,
            delay=10,
        )

    @request.addfinalizer
    def _finished():
        ids = [rate.id for rate in rates]
        delete_rates = [rate for rate in rest_api.collections.rates if rate.id in ids]
        if len(delete_rates) != 0:
            rest_api.collections.rates.action.delete(*delete_rates)

    return rates
def add_disk_to_vm(api, sdomain, disk_size, disk_format, disk_interface, temp_vm_name,
                   provider):
    """Adds second disk to a temporary VM.

    Args:
        api: API to chosen RHEVM provider.
        sdomain: Storage domain to save new disk onto.
        disk_size: Size of the new disk (in B).
        disk_format: Format of the new disk.
        disk_interface: Interface of the new disk.
    """
    try:
        if len(api.vms.get(temp_vm_name).disks.list()) > 1:
            print("RHEVM:{} Warning: found more than one disk in existing VM.".format(provider))
            print("RHEVM:{} Skipping this step, attempting to continue...".format(provider))
            return
        actual_sdomain = api.storagedomains.get(sdomain)
        temp_vm = api.vms.get(temp_vm_name)
        params_disk = params.Disk(storage_domain=actual_sdomain, size=disk_size,
                                  interface=disk_interface, format=disk_format)
        temp_vm.disks.add(params_disk)

        wait_for(check_disks, [api, temp_vm_name], fail_condition=False, delay=5, num_sec=900)

        # check, if there are two disks
        if len(api.vms.get(temp_vm_name).disks.list()) < 2:
            print("RHEVM:{} Disk failed to add".format(provider))
            sys.exit(127)
        print("RHEVM:{} Successfully added Disk".format(provider))
    except Exception as e:
        print("RHEVM:{} add_disk_to_temp_vm failed".format(provider))
        print(e)
Example #20
0
def test_edit_categories(rest_api, categories, multiple):
    if "edit" not in rest_api.collections.categories.action.all:
        pytest.skip("Edit categories action is not implemented in this version")

    if multiple:
        new_names = []
        ctgs_data_edited = []
        for ctg in categories:
            new_name = fauxfactory.gen_alphanumeric().lower()
            new_names.append(new_name)
            ctg.reload()
            ctgs_data_edited.append({
                "href": ctg.href,
                "description": "test_category_{}".format(new_name),
            })
        rest_api.collections.categories.action.edit(*ctgs_data_edited)
        for new_name in new_names:
            wait_for(
                lambda: rest_api.collections.categories.find_by(description=new_name),
                num_sec=180,
                delay=10,
            )
    else:
        ctg = rest_api.collections.categories.find_by(description=categories[0].description)[0]
        new_name = 'test_category_{}'.format(fauxfactory.gen_alphanumeric().lower())
        ctg.action.edit(description=new_name)
        wait_for(
            lambda: rest_api.collections.categories.find_by(description=new_name),
            num_sec=180,
            delay=10,
        )
Example #21
0
def test_edit_roles(rest_api, roles, multiple):
    if "edit" not in rest_api.collections.roles.action.all:
        pytest.skip("Edit roles action is not implemented in this version")

    if multiple:
        new_names = []
        roles_data_edited = []
        for role in roles:
            new_name = fauxfactory.gen_alphanumeric()
            new_names.append(new_name)
            role.reload()
            roles_data_edited.append({
                "href": role.href,
                "name": "role_name_{}".format(new_name),
            })
        rest_api.collections.roles.action.edit(*roles_data_edited)
        for new_name in new_names:
            wait_for(
                lambda: rest_api.collections.roles.find_by(name=new_name),
                num_sec=180,
                delay=10,
            )
    else:
        role = rest_api.collections.roles.find_by(name=roles[0].name)[0]
        new_name = 'role_name_{}'.format(fauxfactory.gen_alphanumeric())
        role.action.edit(name=new_name)
        wait_for(
            lambda: rest_api.collections.roles.find_by(name=new_name),
            num_sec=180,
            delay=10,
        )
Example #22
0
def service_catalogs(request, rest_api):
    name = fauxfactory.gen_alphanumeric()
    rest_api.collections.service_catalogs.action.add(
        name=name,
        description="description_{}".format(name),
        service_templates=[]
    )[0]
    wait_for(
        lambda: rest_api.collections.service_catalogs.find_by(name=name),
        num_sec=180,
        delay=10,
    )

    scls_data = [{
        "name": "name_{}_{}".format(name, index),
        "description": "description_{}_{}".format(name, index),
        "service_templates": []
    } for index in range(1, 5)]
    scls = rest_api.collections.service_catalogs.action.add(*scls_data)

    @request.addfinalizer
    def _finished():
        scls = [_ for _ in rest_api.collections.service_catalogs]
        if len(scls) != 0:
            rest_api.collections.service_catalogs.action.delete(*scls)

    return scls
Example #23
0
def test_edit_multiple_services(rest_api, services):
    """Tests editing multiple service catalogs at time.
    Prerequisities:
        * An appliance with ``/api`` available.
    Steps:
        * POST /api/services (method ``edit``) with the list of dictionaries used to edit
        * Check if the services with ``new_name`` each exists
    Metadata:
        test_flag: rest
    """
    new_names = []
    services_data_edited = []
    for ser in services:
        new_name = fauxfactory.gen_alphanumeric()
        new_names.append(new_name)
        services_data_edited.append({
            "href": ser.href,
            "name": new_name,
        })
    rest_api.collections.services.action.edit(*services_data_edited)
    for new_name in new_names:
        wait_for(
            lambda: rest_api.collections.service_templates.find_by(name=new_name),
            num_sec=180,
            delay=10,
        )
Example #24
0
def test_edit_rates(rest_api, rates, multiple):
    if multiple:
        new_descriptions = []
        rates_data_edited = []
        for rate in rates:
            new_description = fauxfactory.gen_alphanumeric().lower()
            new_descriptions.append(new_description)
            rate.reload()
            rates_data_edited.append({
                "href": rate.href,
                "description": "test_category_{}".format(new_description),
            })
        rest_api.collections.rates.action.edit(*rates_data_edited)
        for new_description in new_descriptions:
            wait_for(
                lambda: rest_api.collections.rates.find_by(description=new_description),
                num_sec=180,
                delay=10,
            )
    else:
        rate = rest_api.collections.rates.find_by(description=rates[0].description)[0]
        new_description = 'test_rate_{}'.format(fauxfactory.gen_alphanumeric().lower())
        rate.action.edit(description=new_description)
        wait_for(
            lambda: rest_api.collections.categories.find_by(description=new_description),
            num_sec=180,
            delay=10,
        )
Example #25
0
def test_provision(request, provision_data, provider, rest_api):
    """Tests provision via REST API.
    Prerequisities:
        * Have a provider set up with templates suitable for provisioning.
    Steps:
        * POST /api/provision_requests (method ``create``) the JSON with provisioning data. The
            request is returned.
        * Query the request by its id until the state turns to ``finished`` or ``provisioned``.
    Metadata:
        test_flag: rest, provision
    """

    vm_name = provision_data["vm_fields"]["vm_name"]
    request.addfinalizer(
        lambda: provider.mgmt.delete_vm(vm_name) if provider.mgmt.does_vm_exist(vm_name) else None)
    request = rest_api.collections.provision_requests.action.create(**provision_data)[0]

    def _finished():
        request.reload()
        if request.status.lower() in {"error"}:
            pytest.fail("Error when provisioning: `{}`".format(request.message))
        return request.request_state.lower() in {"finished", "provisioned"}

    wait_for(_finished, num_sec=600, delay=5, message="REST provisioning finishes")
    assert provider.mgmt.does_vm_exist(vm_name), "The VM {} does not exist!".format(vm_name)
def test_appliance_replicate_database_disconnection_with_backlog(request, provider):
    """Tests a database disconnection with backlog

    Metadata:
        test_flag: replication
    """
    appl1, appl2 = get_replication_appliances()

    def finalize():
        appl1.destroy()
        appl2.destroy()
    request.addfinalizer(finalize)
    appl1.ipapp.browser_steal = True
    with appl1.ipapp:
        configure_db_replication(appl2.address)
        # Replication is up and running, now stop the DB on the replication parent
        provider.create()
        stop_db_process(appl2.address)
        sleep(60)
        start_db_process(appl2.address)
        sel.force_navigate("cfg_diagnostics_region_replication")
        wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False,
                 num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
        assert conf.get_replication_status()
        wait_for_a_provider()

    appl2.ipapp.browser_steal = True
    with appl2.ipapp:
        wait_for_a_provider()
        assert provider.exists
def register_appliances(appliance_set, appliances_to_register):
    with appliance_set.primary.browser_session():
        red_hat_updates.register_appliances(*appliances_to_register)

        logger.info('Waiting for appliance statuses to change to Registered')
        wait_for(red_hat_updates.are_registered,
                 func_args=appliances_to_register,
                 num_sec=120,
                 delay=REFRESH_SEC,
                 fail_func=red_hat_updates.refresh)
        logger.info('Done')

        logger.info('Waiting for implicit update check after registration')
        # The update check doesnt have to find any available updates, but it still has to run
        wait_for(red_hat_updates.checked_updates,
                 func_args=appliances_to_register,
                 num_sec=300,
                 delay=REFRESH_SEC,
                 fail_func=red_hat_updates.refresh)
        logger.info('Done')

        # And all registered appliances should be registered and subscribed
        assert red_hat_updates.are_registered(appliances_to_register),\
            'Failed to register all specified appliances'
        assert red_hat_updates.are_subscribed(appliances_to_register),\
            'Failed to subscribe all specified appliances'
Example #28
0
def test_retire_service_future(rest_api, services):
    """Test retiring a service
    Prerequisities:
        * An appliance with ``/api`` available.
    Steps:
        * Retrieve list of entities using GET /api/services , pick the first one
        * POST /api/service/<id> (method ``retire``) with the ``retire_date``
    Metadata:
        test_flag: rest
    """
    assert "retire" in rest_api.collections.services.action.all

    retire_service = services[0]
    date = (datetime.datetime.now() + datetime.timedelta(days=5)).strftime('%m/%d/%y')
    future = {
        "date": date,
        "warn": "4",
    }
    date_before = retire_service.updated_at
    retire_service.action.retire(future)

    def _finished():
        retire_service.reload()
        if retire_service.updated_at > date_before:
                return True
        return False

    wait_for(_finished, num_sec=600, delay=5, message="REST automation_request finishes")
def test_appliance_replicate_sync_role_change_with_backlog(request, provider):
    """Tests that a role change is replicated with backlog

    Metadata:
        test_flag: replication
    """
    appl1, appl2 = get_replication_appliances()

    def finalize():
        appl1.destroy()
        appl2.destroy()
    request.addfinalizer(finalize)
    appl1.ipapp.browser_steal = True
    with appl1.ipapp:
        configure_db_replication(appl2.address)
        # Replication is up and running, now disable DB sync role
        provider.create()
        conf.set_server_roles(database_synchronization=False)
        sel.force_navigate("cfg_diagnostics_region_replication")
        wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=True,
                 num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
        conf.set_server_roles(database_synchronization=True)
        sel.force_navigate("cfg_diagnostics_region_replication")
        wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False,
                 num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
        assert conf.get_replication_status()
        wait_for_a_provider()

    appl2.ipapp.browser_steal = True
    with appl2.ipapp:
        wait_for_a_provider()
        assert provider.exists
Example #30
0
def test_automation_requests(request, rest_api, automation_requests_data, multiple):
    """Test adding the automation request
     Prerequisities:
        * An appliance with ``/api`` available.
    Steps:
        * POST /api/automation_request - (method ``create``) add request
        * Retrieve list of entities using GET /api/automation_request and find just added request
    Metadata:
        test_flag: rest, requests
    """

    if "automation_requests" not in rest_api.collections:
        pytest.skip("automation request collection is not implemented in this version")

    if multiple:
        requests = rest_api.collections.automation_requests.action.create(*automation_requests_data)
    else:
        requests = rest_api.collections.automation_requests.action.create(
            automation_requests_data[0])

    def _finished():
        for request in requests:
            request.reload()
            if request.status.lower() not in {"error"}:
                return False
        return True

    wait_for(_finished, num_sec=600, delay=5, message="REST automation_request finishes")
Example #31
0
def instance(request, local_setup_provider, provider, vm_name,
             vm_analysis_data):
    """ Fixture to provision instance on the provider """

    template = vm_analysis_data.get('image', None)
    host_name, datastore_name = map(vm_analysis_data.get,
                                    ('host', 'datastore'))

    mgmt_system = provider.get_mgmt_system()

    provisioning_data = {
        'vm_name': vm_name,
        'host_name': {
            'name': [host_name]
        },
        'datastore_name': {
            'name': [datastore_name]
        },
    }

    try:
        provisioning_data['vlan'] = vm_analysis_data['vlan']
    except KeyError:
        # provisioning['vlan'] is required for rhevm provisioning
        if provider.type == 'rhevm':
            raise pytest.fail(
                'rhevm requires a vlan value in provisioning info')

    vm = VM.factory(vm_name, provider)

    connect_ip = None
    if provider.type == "openstack":
        image = vm_analysis_data['image']
        vm = VM.factory(vm_name, provider, image)
        request.addfinalizer(vm.delete_from_provider)
        connect_ip = mgmt_system.get_first_floating_ip()
        provider.refresh_provider_relationships(method='ui')
        inst_args = {
            'email':
            '*****@*****.**',
            'first_name':
            'Image',
            'last_name':
            'Provisioner',
            'template_name':
            image,
            'notes':
            ('Testing provisioning from image {} to vm {} on provider {}'.
             format(image, vm_name, provider.key)),
            'instance_type':
            vm_analysis_data['instance_type'],
            'availability_zone':
            vm_analysis_data['availability_zone'],
            'security_groups': [vm_analysis_data['security_group']],
            'cloud_network':
            vm_analysis_data['cloud_network'],
            'public_ip_address':
            connect_ip,
        }
        vm.create(**inst_args)
    else:
        request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
        do_vm_provisioning(template,
                           provider,
                           vm_name,
                           provisioning_data,
                           request,
                           None,
                           num_sec=6000)
    logger.info("VM %s provisioned, waiting for IP address to be assigned",
                vm_name)

    @pytest.wait_for(timeout="20m", delay=5)
    def get_ip_address():
        logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format(
            vm_name, mgmt_system.vm_status(vm_name),
            mgmt_system.is_vm_stopped(vm_name)))
        if mgmt_system.is_vm_stopped(vm_name):
            mgmt_system.start_vm(vm_name)

        ip = mgmt_system.current_ip_address(vm_name)
        logger.info("Fetched IP for %s: %s", vm_name, ip)
        return ip is not None

    connect_ip = mgmt_system.get_ip_address(vm_name)
    assert connect_ip is not None

    # Check that we can at least get the uptime via ssh this should only be possible
    # if the username and password have been set via the cloud-init script so
    # is a valid check
    if vm_analysis_data['fs-type'] not in ['ntfs', 'fat32']:
        logger.info("Waiting for %s to be available via SSH", connect_ip)
        ssh_client = ssh.SSHClient(hostname=connect_ip,
                                   username=vm_analysis_data['username'],
                                   password=vm_analysis_data['password'],
                                   port=22)
        wait_for(ssh_client.uptime, num_sec=3600, handle_exception=False)
        vm.ssh = ssh_client

    vm.system_type = detect_system_type(vm)
    logger.info("Detected system type: %s", vm.system_type)
    vm.image = vm_analysis_data['image']
    vm.connect_ip = connect_ip

    if provider.type == 'rhevm':
        logger.info("Setting a relationship between VM and appliance")
        from cfme.infrastructure.virtual_machines import Vm
        cfme_rel = Vm.CfmeRelationship(vm)
        cfme_rel.set_relationship(str(configuration.server_name()),
                                  configuration.server_id())
    return vm
def run(**kwargs):

    for provider in list_provider_keys("scvmm"):

        kwargs = make_kwargs_scvmm(cfme_data, provider,
                                   kwargs.get('image_url'),
                                   kwargs.get('template_name'))
        check_kwargs(**kwargs)
        mgmt_sys = cfme_data['management_systems'][provider]
        host_fqdn = mgmt_sys['hostname_fqdn']
        creds = credentials[mgmt_sys['credentials']]

        # For powershell to work, we need to extract the User Name from the Domain
        user = creds['username'].split('\\')
        if len(user) == 2:
            username_powershell = user[1]
        else:
            username_powershell = user[0]

        username_scvmm = creds['domain'] + "\\" + creds['username']

        scvmm_args = {
            "hostname": mgmt_sys['ipaddress'],
            "username": username_powershell,
            "password": creds['password'],
            "domain": creds['domain'],
            "provisioning": mgmt_sys['provisioning']
        }
        client = SCVMMSystem(**scvmm_args)

        url = kwargs.get('image_url', None)

        # Template name equals either user input of we extract the name from the url
        new_template_name = kwargs.get('template_name', None)
        if new_template_name is None:
            new_template_name = os.path.basename(url)[:-4]

        print("SCVMM:{} started template {} upload".format(
            provider, new_template_name))
        print("SCVMM:{} Make Template out of the VHD {}".format(
            provider, new_template_name))

        # use_library is either user input or we use the cfme_data value
        use_library = kwargs.get('library', None)
        if use_library is None:
            use_library = mgmt_sys['template_upload'].get('library',
                                                          None) + "\\VHDS\\"

        print("SCVMM:{} Template Library: {}".format(provider, use_library))

        #  The VHD name changed, match the template_name.
        new_vhd_name = new_template_name + '.vhd'

        use_network = mgmt_sys['template_upload'].get('network', None)
        use_os_type = mgmt_sys['template_upload'].get('os_type', None)
        cores = mgmt_sys['template_upload'].get('cores', None)
        ram = mgmt_sys['template_upload'].get('ram', None)

        # Uses PowerShell Get-SCVMTemplate to return a list of  templates and aborts if exists.
        if not check_template_exists(client, new_template_name):
            if kwargs.get('upload'):
                upload_vhd(client, url, use_library, new_vhd_name)
            if kwargs.get('template'):
                print("SCVMM:{} Make Template out of the VHD {}".format(
                    provider, new_template_name))

                make_template(client, host_fqdn, new_template_name,
                              use_library, use_network, use_os_type,
                              username_scvmm, cores, ram)
            try:
                wait_for(check_template_exists, [client, new_template_name],
                         fail_condition=False,
                         delay=5)
                print("SCVMM:{} template {} uploaded successfully".format(
                    provider, new_template_name))
                print("SCVMM:{} Adding template {} to trackerbot".format(
                    provider, new_template_name))
                trackerbot.trackerbot_add_provider_template(
                    kwargs.get('stream'), provider,
                    kwargs.get('template_name'))
            except Exception as e:
                print(e)
                print(
                    "SCVMM:{} Exception occured while verifying the template {} upload"
                    .format(provider, new_template_name))
        else:
            print(
                "SCVMM: A Template with that name already exists in the SCVMMLibrary"
            )
def fill_field(field=None, key=None, value=None):
    """ Fills the 'Field' type of form.

    Args:
        tag: Name of the field to compare (Host.VMs, ...).
        key: Operation to do (=, <, >=, IS NULL, ...).
        value: Value to check against.
    Returns: See :py:func:`cfme.web_ui.fill`.
    """
    field_norm = field.strip().lower()
    if "date updated" in field_norm or "date created" in field_norm or "boot time" in field_norm:
        no_date = False
    else:
        no_date = True
    fill(
        field_form,
        dict(
            type="Field",
            field=field,
            key=key,
            value=value if no_date else None,
        ),
    )
    # In case of advanced search box
    if sel.is_displayed(field_form.user_input):
        user_input = value is None
    else:
        user_input = None
    fill(field_form.user_input, user_input)
    if not no_date:
        # Flip the right part of form
        if isinstance(value, basestring) and not re.match(
                r"^[0-9]{2}/[0-9]{2}/[0-9]{4}$", value):
            if not sel.is_displayed(field_date_form.dropdown_select):
                sel.click(date_switch_buttons.to_relative)
            fill(field_date_form, {"dropdown_select": value})
            sel.click(buttons.commit)
        else:
            # Specific selection
            if not sel.is_displayed(field_date_form.input_select_date):
                sel.click(date_switch_buttons.to_specific)
            if (isinstance(value, tuple)
                    or isinstance(value, list)) and len(value) == 2:
                date, time = value
            elif isinstance(value,
                            basestring):  # is in correct format mm/dd/yyyy
                # Date only (for now)
                date = value[:]
                time = None
            else:
                raise TypeError(
                    "fill_field expects a 2-tuple (date, time) or string with date"
                )
            # TODO datetime.datetime support
            fill(field_date_form.input_select_date, date)
            # Try waiting a little bit for time field
            # If we don't wait, committing the expression will glitch
            try:
                wait_for(lambda: sel.is_displayed(field_date_form.
                                                  input_select_time),
                         num_sec=6)
                # It appeared, so if the time is to be set, we will set it (passing None glitches)
                if time:
                    fill(field_date_form.input_select_time, time)
            except TimedOutError:
                # Did not appear, ignore that
                pass
            finally:
                # And finally, commit the expression :)
                sel.click(buttons.commit)
    else:
        sel.click(buttons.commit)
Example #34
0
def resource_usage(vm_ownership, appliance, provider):
    # Retrieve resource usage values from metric_rollups table.
    average_cpu_used_in_mhz = 0
    average_memory_used_in_mb = 0
    average_network_io = 0
    average_disk_io = 0
    vm_name = provider.data['cap_and_util']['chargeback_vm']

    metrics = appliance.db['metrics']
    rollups = appliance.db['metric_rollups']
    ems = appliance.db['ext_management_systems']
    logger.info('DELETING METRICS DATA FROM METRICS AND METRIC_ROLLUPS tables')
    appliance.db.session.query(metrics).delete()
    appliance.db.session.query(rollups).delete()

    provider_id = appliance.db.session.query(ems).filter(
        ems.name == provider.name).first().id

    # Chargeback reporting is done on rollups and not  real-time values.So, we are capturing C&U
    # data and forcing hourly rollups by running these commands through the Rails console.

    logger.info('CAPTURING PERF DATA FOR VM {} running on {}'.format(
        vm_name, provider.name))
    appliance.ssh_client.run_rails_command(
        "\"vm = Vm.where(:ems_id => {}).where(:name => {})[0];\
        vm.perf_capture('realtime',1.hour.ago.utc, Time.now.utc);\
        vm.perf_rollup_range('realtime',1.hour.ago.utc, Time.now.utc)\"".
        format(provider_id, repr(vm_name)))
    wait_for(count_records_rollups_table, [appliance, provider],
             timeout=60,
             fail_condition=0,
             message="rollups")

    # Since we are collecting C&U data for > 1 hour, there will be multiple hourly records per VM
    # in the metric_rollups DB table.The values from these hourly records are summed up.

    with appliance.db.transaction:
        providers = (appliance.db.session.query(rollups.id).join(
            ems, rollups.parent_ems_id == ems.id).filter(
                rollups.capture_interval_name == 'hourly',
                rollups.resource_name == vm_name, ems.name == provider.name,
                rollups.timestamp >= date.today()))
    for record in appliance.db.session.query(rollups).filter(
            rollups.id.in_(providers.subquery())):
        if record.cpu_usagemhz_rate_average is None:
            pass
        else:
            average_cpu_used_in_mhz = average_cpu_used_in_mhz + record.cpu_usagemhz_rate_average
            average_memory_used_in_mb = average_memory_used_in_mb + record.derived_memory_used
            average_network_io = average_network_io + record.net_usage_rate_average
            average_disk_io = average_disk_io + record.disk_usage_rate_average

    average_cpu_used_in_mhz = average_cpu_used_in_mhz / 24
    average_memory_used_in_mb = average_memory_used_in_mb / 24
    average_network_io = average_network_io / 24
    average_disk_io = average_disk_io / 24

    return {
        "average_cpu_used_in_mhz": average_cpu_used_in_mhz,
        "average_memory_used_in_mb": average_memory_used_in_mb,
        "average_network_io": average_network_io,
        "average_disk_io": average_disk_io
    }
Example #35
0
def test_drift_analysis(request, provider, instance, soft_assert):
    """ Tests drift analysis is correct

    Metadata:
        test_flag: vm_analysis
    """

    instance.load_details()
    drift_num_orig = 0
    drift_orig = InfoBlock("Relationships", "Drift History").text
    if drift_orig != 'None':
        drift_num_orig = int(drift_orig)
    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15,
             timeout="15m",
             fail_func=lambda: toolbar.select('Reload'))
    instance.load_details()
    wait_for(lambda: int(InfoBlock("Relationships", "Drift History").text) ==
             drift_num_orig + 1,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)
    drift_new = int(InfoBlock("Relationships", "Drift History").text)

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    instance.add_tag(tag, single_value=False)
    request.addfinalizer(lambda: instance.remove_tag(tag))

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15,
             timeout="15m",
             fail_func=lambda: toolbar.select('Reload'))
    instance.load_details()
    wait_for(lambda: int(InfoBlock("Relationships", "Drift History").text) ==
             drift_new + 1,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # check drift difference
    soft_assert(
        not instance.equal_drift_results('Department (1)', 'My Company Tags',
                                         0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    toolbar.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    toolbar.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
Example #36
0
def test_ssa_vm(provider, instance, soft_assert):
    """ Tests SSA can be performed and returns sane results

    Metadata:
        test_flag: vm_analysis
    """

    e_users = None
    e_groups = None
    e_packages = None
    e_services = None
    e_icon_part = instance.system_type['icon']

    if instance.system_type != WINDOWS:
        e_users = instance.ssh.run_command(
            "cat /etc/passwd | wc -l").output.strip('\n')
        e_groups = instance.ssh.run_command(
            "cat /etc/group | wc -l").output.strip('\n')
        e_packages = instance.ssh.run_command(
            instance.system_type['package-number']).output.strip('\n')
        e_services = instance.ssh.run_command(
            instance.system_type['services-number']).output.strip('\n')

    logger.info(
        "Expecting to have {} users, {} groups, {} packages and {} services".
        format(e_users, e_groups, e_packages, e_services))

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15,
             timeout="15m",
             fail_func=lambda: toolbar.select('Reload'))

    # Check release and quadricon
    quadicon_os_icon = instance.find_quadicon().os
    details_os_icon = instance.get_detail(properties=('Properties',
                                                      'Operating System'),
                                          icon_href=True)
    logger.info("Icons: %s, %s", details_os_icon, quadicon_os_icon)

    # We shouldn't use get_detail anymore - it takes too much time
    c_lastanalyzed = InfoBlock.text('Lifecycle', 'Last Analyzed')
    c_users = InfoBlock.text('Security', 'Users')
    c_groups = InfoBlock.text('Security', 'Groups')
    c_packages = 0
    c_services = 0
    if instance.system_type != WINDOWS:
        c_packages = InfoBlock.text('Configuration', 'Packages')
        c_services = InfoBlock.text('Configuration', 'Init Processes')

    logger.info(
        "SSA shows {} users, {} groups {} packages and {} services".format(
            c_users, c_groups, c_packages, c_services))

    soft_assert(c_lastanalyzed != 'Never', "Last Analyzed is set to Never")
    soft_assert(
        e_icon_part in details_os_icon,
        "details icon: '{}' not in '{}'".format(e_icon_part, details_os_icon))
    soft_assert(
        e_icon_part in quadicon_os_icon,
        "quad icon: '{}' not in '{}'".format(e_icon_part, details_os_icon))

    if instance.system_type != WINDOWS:
        soft_assert(c_users == e_users,
                    "users: '{}' != '{}'".format(c_users, e_users))
        soft_assert(c_groups == e_groups,
                    "groups: '{}' != '{}'".format(c_groups, e_groups))
        soft_assert(c_packages == e_packages,
                    "packages: '{}' != '{}'".format(c_packages, e_packages))
        if not BZ("1312971").blocks:
            soft_assert(
                c_services == e_services,
                "services: '{}' != '{}'".format(c_services, e_services))
    else:
        # Make sure windows-specific data is not empty
        c_patches = InfoBlock.text('Security', 'Patches')
        c_applications = InfoBlock.text('Configuration', 'Applications')
        c_win32_services = InfoBlock.text('Configuration', 'Win32 Services')
        c_kernel_drivers = InfoBlock.text('Configuration', 'Kernel Drivers')
        c_fs_drivers = InfoBlock.text('Configuration', 'File System Drivers')

        soft_assert(c_patches != '0', "patches: '{}' != '0'".format(c_patches))
        soft_assert(c_applications != '0',
                    "applications: '{}' != '0'".format(c_applications))
        soft_assert(c_win32_services != '0',
                    "win32 services: '{}' != '0'".format(c_win32_services))
        soft_assert(c_kernel_drivers != '0',
                    "kernel drivers: '{}' != '0'".format(c_kernel_drivers))
        soft_assert(c_fs_drivers != '0',
                    "fs drivers: '{}' != '0'".format(c_fs_drivers))

    image_label = 'Parent VM'
    if provider.type == 'openstack':
        image_label = 'VM Template'
    # 5.4 doesn't have Parent VM field
    if version.current_version() > "5.5" and provider.type != 'openstack':
        c_image = InfoBlock.text('Relationships', image_label)
        soft_assert(c_image == instance.image,
                    "image: '{}' != '{}'".format(c_image, instance.image))
Example #37
0
def test_ssa_template(request, local_setup_provider, provider, soft_assert,
                      vm_analysis_data):
    """ Tests SSA can be performed on a template

    Metadata:
        test_flag: vm_analysis
    """

    template_name = vm_analysis_data['image']
    template = Template.factory(template_name, provider, template=True)

    # Set credentials to all hosts set for this datastore
    if provider.type != 'openstack':
        datastore_name = vm_analysis_data['datastore']
        test_datastore = datastore.Datastore(datastore_name, provider.key)
        host_list = cfme_data.get('management_systems',
                                  {})[provider.key].get('hosts', [])
        host_names = test_datastore.get_hosts()
        for host_name in host_names:
            test_host = host.Host(name=host_name)
            hosts_data = [x for x in host_list if x.name == host_name]
            if len(hosts_data) > 0:
                host_data = hosts_data[0]

                if not test_host.has_valid_credentials:
                    creds = host.get_credentials_from_config(
                        host_data['credentials'])
                    test_host.update(updates={'credentials': creds},
                                     validate_credentials=True)

    template.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(template_name),
             delay=15,
             timeout="10m",
             fail_func=lambda: toolbar.select('Reload'))

    # Check release and quadricon
    quadicon_os_icon = template.find_quadicon().os
    details_os_icon = template.get_detail(properties=('Properties',
                                                      'Operating System'),
                                          icon_href=True)
    logger.info("Icons: {}, {}".format(details_os_icon, quadicon_os_icon))

    # We shouldn't use get_detail anymore - it takes too much time
    c_users = InfoBlock.text('Security', 'Users')
    c_groups = InfoBlock.text('Security', 'Groups')
    c_packages = 0
    if vm_analysis_data['fs-type'] not in ['ntfs', 'fat32']:
        c_packages = InfoBlock.text('Configuration', 'Packages')

    logger.info("SSA shows {} users, {} groups and {} packages".format(
        c_users, c_groups, c_packages))

    if vm_analysis_data['fs-type'] not in ['ntfs', 'fat32']:
        soft_assert(c_users != '0', "users: '{}' != '0'".format(c_users))
        soft_assert(c_groups != '0', "groups: '{}' != '0'".format(c_groups))
        soft_assert(c_packages != '0',
                    "packages: '{}' != '0'".format(c_packages))
    else:
        # Make sure windows-specific data is not empty
        c_patches = InfoBlock.text('Security', 'Patches')
        c_applications = InfoBlock.text('Configuration', 'Applications')
        c_win32_services = InfoBlock.text('Configuration', 'Win32 Services')
        c_kernel_drivers = InfoBlock.text('Configuration', 'Kernel Drivers')
        c_fs_drivers = InfoBlock.text('Configuration', 'File System Drivers')

        soft_assert(c_patches != '0', "patches: '{}' != '0'".format(c_patches))
        soft_assert(c_applications != '0',
                    "applications: '{}' != '0'".format(c_applications))
        soft_assert(c_win32_services != '0',
                    "win32 services: '{}' != '0'".format(c_win32_services))
        soft_assert(c_kernel_drivers != '0',
                    "kernel drivers: '{}' != '0'".format(c_kernel_drivers))
        soft_assert(c_fs_drivers != '0',
                    "fs drivers: '{}' != '0'".format(c_fs_drivers))
def test_db_backup_schedule(request, db_backup_data):
    """ Test scheduled one-type backup on given machines using smb/nfs
    """

    # ---- Create new db backup schedule set to run in the next 6 min
    dt = get_schedulable_datetime()
    # the dash is there to make strftime not use a leading zero
    hour = dt.strftime('%-H')
    minute = dt.strftime('%-M')

    sched_args = {
        'name': db_backup_data.schedule_name,
        'description': db_backup_data.schedule_description,
        'active': True,
        'run_type': "Once",
        'run_every': None,
        'time_zone': "UTC",
        'start_date': dt,
        'start_hour': hour,
        'start_min': minute,
        'depot_name': fauxfactory.gen_alphanumeric(),
    }

    if db_backup_data.protocol_type == 'smb':
        sched_args.update({
            'protocol':
            'Samba',
            'uri':
            db_backup_data.hostname,
            'username':
            db_backup_data.credentials['username'],
            'password':
            db_backup_data.credentials['password'],
            'password_verify':
            db_backup_data.credentials['password']
        })
    else:
        sched_args.update({
            'protocol': 'Network File System',
            'uri': db_backup_data.hostname,
        })

    if db_backup_data.protocol_type == 'nfs':
        path_on_host = urlparse('nfs://' + db_backup_data.hostname).path
    else:
        path_on_host = db_backup_data.path_on_host
    full_path = get_full_path_to_file(path_on_host,
                                      db_backup_data.schedule_name)

    sched = DatabaseBackupSchedule(**sched_args)
    sched.create()
    flash.assert_message_contain('Schedule "{}" was saved'.format(
        db_backup_data.schedule_name))

    # ----

    # ---- Add cleanup finalizer
    def delete_sched_and_files():
        with get_ssh_client(db_backup_data.hostname,
                            db_backup_data.credentials) as ssh:
            ssh.run_command('rm -rf {}'.format(full_path))
        sched.delete()
        flash.assert_message_contain('Schedule "{}": Delete successful'.format(
            db_backup_data.schedule_description))

    request.addfinalizer(delete_sched_and_files)
    # ----

    # ---- Wait for schedule to run
    # check last date at schedule's table
    wait_for(lambda: sched.last_date != '',
             num_sec=600,
             delay=30,
             fail_func=sel.refresh,
             message='Schedule failed to run in 10mins from being set up')
    # ----

    # ---- Check if the db backup file exists
    with get_ssh_client(db_backup_data.hostname,
                        db_backup_data.credentials) as ssh:

        assert ssh.run_command('cd "{}"'.format(path_on_host))[0] == 0,\
            "Could not cd into '{}' over ssh".format(path_on_host)
        # Find files no more than 5 minutes old, count them and remove newline
        file_check_cmd = "find {}/* -cmin -5 | wc -l | tr -d '\n' ".format(
            full_path)

        wait_for(lambda: ssh.run_command(file_check_cmd)[1] == '1',
                 delay=5,
                 num_sec=60,
                 message="File '{}' not found on share".format(full_path))
Example #39
0
def do_vm_provisioning(template_name,
                       provider,
                       vm_name,
                       provisioning_data,
                       request,
                       smtp_test,
                       num_sec=1500,
                       wait=True):
    # generate_tests makes sure these have values
    vm = Vm(name=vm_name, provider=provider, template_name=template_name)
    navigate_to(vm, 'ProvisionVM')

    note = ('template {} to vm {} on provider {}'.format(
        template_name, vm_name, provider.key))
    provisioning_data.update({
        'email': '*****@*****.**',
        'first_name': 'Template',
        'last_name': 'Provisioner',
        'notes': note,
    })

    fill(provisioning_form,
         provisioning_data,
         action=provisioning_form.submit_button)
    flash.assert_no_errors()
    if not wait:
        return

    # Provision Re important in this test
    logger.info('Waiting for cfme provision request for vm %s', vm_name)
    row_description = 'Provision from [{}] to [{}]'.format(
        template_name, vm_name)
    cells = {'Description': row_description}
    try:
        row, __ = wait_for(requests.wait_for_request, [cells],
                           fail_func=requests.reload,
                           num_sec=num_sec,
                           delay=20)
    except Exception as e:
        requests.debug_requests()
        raise e
    assert normalize_text(row.status.text) == 'ok' \
                                              and normalize_text(
        row.request_state.text) == 'finished', \
        "Provisioning failed with the message {}".format(row.last_message.text)

    # Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup
    logger.info('Waiting for vm %s to appear on provider %s', vm_name,
                provider.key)
    wait_for(provider.mgmt.does_vm_exist, [vm_name],
             handle_exception=True,
             num_sec=600)

    if smtp_test:
        # Wait for e-mails to appear
        def verify():
            approval = dict(
                subject_like=
                "%%Your Virtual Machine configuration was Approved%%")
            expected_text = "Your virtual machine request has Completed - VM:%%{}".format(
                vm_name)
            return (len(smtp_test.get_emails(**approval)) > 0 and
                    len(smtp_test.get_emails(subject_like=expected_text)) > 0)

        wait_for(verify, message="email receive check", delay=30)
Example #40
0
def test_db_migrate(db_url, db_version, db_desc, v2key_url):
    """ This is a destructive test - it _will_ destroy your database """
    app = pytest.store.current_appliance

    # initiate evmserverd stop
    app.stop_evm_service()

    # in the meantime, download the database
    logger.info("Downloading database: {}".format(db_desc))
    url_basename = os_path.basename(db_url)
    rc, out = app.ssh_client.run_command('wget "{}" -O "/tmp/{}"'.format(
        db_url, url_basename),
                                         timeout=30)
    assert rc == 0, "Failed to download database: {}".format(out)

    # wait 30sec until evmserverd is down
    wait_for(app.is_evm_service_running,
             num_sec=30,
             fail_condition=True,
             delay=5,
             message="Failed to stop evmserverd in 30 seconds")

    # restart postgres to clear connections, remove old DB, restore it and migrate it
    with app.ssh_client as ssh:
        rc, out = ssh.run_command(
            'systemctl restart rh-postgresql94-postgresql', timeout=30)
        assert rc == 0, "Failed to restart postgres service: {}".format(out)
        rc, out = ssh.run_command('dropdb vmdb_production', timeout=15)
        assert rc == 0, "Failed to remove old database: {}".format(out)
        rc, out = ssh.run_command('createdb vmdb_production', timeout=30)
        assert rc == 0, "Failed to create clean database: {}".format(out)
        rc, out = ssh.run_command(
            'pg_restore -v --dbname=vmdb_production /tmp/{}'.format(
                url_basename),
            timeout=420)
        assert rc == 0, "Failed to restore new database: {}".format(out)
        rc, out = ssh.run_rake_command("db:migrate", timeout=300)
        assert rc == 0, "Failed to migrate new database: {}".format(out)
        rc, out = ssh.run_rake_command(
            'db:migrate:status 2>/dev/null | grep "^\s*down"', timeout=30)
        assert rc != 0, "Migration failed; migrations in 'down' state found: {}".format(
            out)
        # fetch GUID and REGION from the DB and use it to replace data in /var/www/miq/vmdb/GUID
        # and /var/www/miq/vmdb/REGION respectively
        data_query = {
            'guid': 'select guid from miq_servers',
            'region': 'select region from miq_regions'
        }
        for data_type, db_query in data_query.items():
            data_filepath = '/var/www/miq/vmdb/{}'.format(data_type.upper())
            rc, out = ssh.run_command(
                'psql -d vmdb_production -t -c "{}"'.format(db_query),
                timeout=15)
            assert rc == 0, "Failed to fetch {}: {}".format(data_type, out)
            db_data = out.strip()
            assert db_data, "No {} found in database; query '{}' returned no records".format(
                data_type, db_query)
            rc, out = ssh.run_command("echo -n '{}' > {}".format(
                db_data, data_filepath),
                                      timeout=15)
            assert rc == 0, "Failed to replace data in {} with '{}': {}".format(
                data_filepath, db_data, out)
        # fetch v2_key
        if v2key_url:
            rc, out = ssh.run_command(
                'wget "{}" -O "/var/www/miq/vmdb/certs/v2_key"'.format(
                    v2key_url),
                timeout=15)
            assert rc == 0, "Failed to download v2_key: {}".format(out)
        # or change all invalid (now unavailable) passwords to 'invalid'
        else:
            rc, out = ssh.run_command("fix_auth -i invalid", timeout=45)
            assert rc == 0, "Failed to change invalid passwords: {}".format(
                out)
    rc, out = app.ssh_client.run_command(
        'systemctl stop rh-postgresql94-postgresql', timeout=30)
    assert rc == 0, "Failed to stop postgres service: {}".format(out)
    # start evmserverd, wait for web UI to start and try to log in as admin
    app.start_evm_service()
    app.wait_for_web_ui(timeout=600)
    login_admin()
Example #41
0
    def provision_from_template(cls,
                                template_name,
                                vm_name,
                                wait_min=None,
                                cpus=1,
                                memory=1024,
                                vlan=None,
                                first_name="Shadowman",
                                last_name="RedHat",
                                email="*****@*****.**"):
        """Provision VM from template.

        Works independently on the management system, tags appropriate VMDB objects to provision
        without problems.

        Args:
            template_name: Name of the template to use.
            vm_name: VM Name.
            wait_min: How many minutes of wait for the provisioning to finish.
            cpus: How many CPUs should the VM have.
            memory: How much memory (in MB) should the VM have.
            vlan: Where to connect the VM. Obligatory for RHEV
            first_name: Name of the requestee
            last_name: Surname of the requestee
            email: Email of the requestee
        Returns: :py:class:`MiqVM` object with freshly provisioned VM.
        """
        vm_table = cfmedb['vms']
        for vm in cfmedb.session.query(vm_table.name, vm_table.guid)\
            .filter(vm_table.template == True):  # NOQA
            # Previous line is ok, if you change it to `is`, it won't work!
            if vm.name.strip() == template_name.strip():
                template_guid = vm.guid
                break
        else:
            raise Exception("Template %s not found!" % template_name)
        template = cls(template_guid)
        # Tag provider
        for tag in template.provider.tags:
            if tag.category == "prov_scope" and tag.tag_name == "all":
                break
        else:
            logger.info("Tagging provider %s" % template.provider.name)
            template.provider.add_tag(("prov_scope", "all"))
        # Tag all provider's hosts
        for host in template.provider.hosts:
            for tag in host.tags:
                if tag.category == "prov_scope" and tag.tag_name == "all":
                    break
            else:
                logger.info("Tagging host %s" % host.name)
                host.add_tag(("prov_scope", "all"))
        # Tag all provider's datastores
        for datastore in template.provider.datastores:
            ds_name = datastore.name
            if is_datastore_banned(ds_name):
                logger.info("Skipping datastore %s" % ds_name)
                continue
            for tag in datastore.tags:
                if tag.category == "prov_scope" and tag.tag_name == "all":
                    break
            else:
                logger.info("Tagging datastore %s" % ds_name)
                datastore.add_tag(("prov_scope", "all"))
        # Create request
        template_fields = client.pipeoptions(dict(guid=template_guid))
        vm_fields = dict(number_of_cpu=cpus, vm_memory=memory, vm_name=vm_name)
        if vlan:  # RHEV-M requires this field
            vm_fields["vlan"] = vlan
        vm_fields = client.pipeoptions(vm_fields)
        requester = client.pipeoptions(
            dict(owner_first_name=first_name,
                 owner_last_name=last_name,
                 owner_email=email))
        try:
            req_id = client.service.VmProvisionRequest("1.1", template_fields,
                                                       vm_fields, requester,
                                                       "", "").id
        except WebFault as e:
            if "'Network/vLan' is required" in e.message:
                raise TypeError(
                    "You have to specify `vlan` parameter for this function! (RHEV-M?)"
                )
            else:
                raise
        logger.info("Waiting for VM provisioning request approval")
        wait_for(lambda: client.service.GetVmProvisionRequest(req_id).
                 approval_state == "approved",
                 num_sec=180,
                 delay=5,
                 message="VM provision approval")

        def check_whether_provisioning_finished():
            request = client.service.GetVmProvisionRequest(req_id)
            if request.status.lower().strip() == "error":
                raise Exception(
                    request.message)  # change the exception class here
            return request.status.lower().strip() == "ok" and len(
                request.vms) > 0

        logger.info("Waiting for VM provisioning to be done")
        wait_for(check_whether_provisioning_finished,
                 num_sec=(wait_min * 60 if wait_min else 300),
                 delay=5,
                 message="provisioning")
        vm_guid = client.service.GetVmProvisionRequest(req_id).vms[0].guid
        new_vm = MiqVM(client.service.FindVmByGuid(vm_guid).guid)
        # some basic sanity checks though they should always pass
        assert new_vm.name == vm_name
        assert new_vm.object.guid == vm_guid
        logger.info("VM has been provisioned")
        return new_vm
Example #42
0
 def fill_data(self,
               name="test_name",
               description="test_description",
               active=True,
               action_type="vm",
               filter_type="all",
               filter_value="",
               log_type="Samba",
               uri="samba-or-nfs-fqdn-or-ip",
               user_id="smb_only_user",
               password="******",
               verify="smb_only_pass",
               timer_type="Monthly",
               timer_subtype="6",
               time_zone="UTC",
               start_date="4/5/2063",
               start_hour="1",
               start_min="45"):
     self.fill_field_by_locator(name, *self._name_field_locator)
     self.fill_field_by_locator(description,
                                *self._description_field_locator)
     self.toggle_checkbox(active, *self._active_checkbox_locator)
     self.select_dropdown_by_value(action_type,
                                   *self._action_type_selectbox_locator)
     self._wait_for_results_refresh()
     if action_type == 'db_backup':
         self.select_dropdown_by_value(
             log_type, *self._log_type_selectbox_locator)
         if log_type == 'Samba':
             self._wait_for_visible_element(
                 *self._smb_uri_field_locator)
             self.fill_field_by_locator(uri,
                                        *self._smb_uri_field_locator)
             self.fill_field_by_locator(
                 user_id, *self._smb_user_id_field_locator)
             self.fill_field_by_locator(
                 password, *self._smb_password_field_locator)
             self.fill_field_by_locator(verify,
                                        *self._smb_verify_field_locator)
             self._wait_for_visible_element(
                 *self._smb_validate_button_locator)
         elif log_type == 'Network File System':
             self._wait_for_visible_element(
                 *self._nfs_uri_field_locator)
             self.fill_field_by_locator(uri,
                                        *self._nfs_uri_field_locator)
         else:
             raise Exception("Unknown database backup type")
     else:
         self.select_dropdown_by_value(
             filter_type, *self._filter_type_selectbox_locator)
         if filter_value:
             self._wait_for_visible_element(
                 *self._filter_value_selectbox_locator)
             self.select_dropdown_by_value(
                 filter_value, *self._filter_value_selectbox_locator)
     self.select_dropdown_by_value(timer_type,
                                   *self._timer_type_selectbox_locator)
     if timer_type != "Once":
         self.select_dropdown_by_value(
             timer_subtype, *self._timer_subtype_selectbox_locator)
     if self.will_change_time_zone(time_zone):
         # set the starting hour to "1" to be able to check for element change later
         self.select_dropdown_by_value(
             "1", *self._start_hour_selectbox_locator)
         # select timezone by substring in text
         self.select_dropdown_substring(
             time_zone, *self._time_zone_selectbox_locator)
         # wait for timezone javascript to reset the starting date and time
         wait_for(self.is_starting_date_reset, num_sec=3)
         # now we can continue filling out the date and time
     self.selenium.find_element(
         *self._start_date_field_locator)._parent.execute_script(
             "$j('#miq_date_1').attr('value', '%s')" % start_date)
     self.select_dropdown_by_value(start_hour,
                                   *self._start_hour_selectbox_locator)
     self.select_dropdown_by_value(start_min,
                                   *self._start_min_selectbox_locator)
     self._wait_for_results_refresh()
Example #43
0
 def wait_powered_off(self, wait_time=120):
     return wait_for(lambda: self.is_powered_off,
                     num_sec=wait_time,
                     message="wait for power off",
                     delay=5)
Example #44
0
def instance(request, local_setup_provider, provider, vm_name, vm_analysis_data, appliance):
    """ Fixture to provision instance on the provider """

    vm = VM.factory(vm_name, provider, template_name=vm_analysis_data['image'])
    request.addfinalizer(lambda: cleanup_vm(vm_name, provider))

    provision_data = vm_analysis_data.copy()
    del provision_data['image']
    vm.create_on_provider(find_in_cfme=True, **provision_data)

    if provider.type == "openstack":
        vm.provider.mgmt.assign_floating_ip(vm.name, 'public')

    logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name)

    mgmt_system = provider.get_mgmt_system()

    @wait_for_decorator(timeout="20m", delay=5)
    def get_ip_address():
        logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format(
            vm_name, mgmt_system.vm_status(vm_name), mgmt_system.is_vm_stopped(vm_name)))
        if mgmt_system.is_vm_stopped(vm_name):
            mgmt_system.start_vm(vm_name)

        ip = mgmt_system.current_ip_address(vm_name)
        logger.info("Fetched IP for %s: %s", vm_name, ip)
        return ip is not None

    connect_ip = mgmt_system.get_ip_address(vm_name)
    assert connect_ip is not None

    # Check that we can at least get the uptime via ssh this should only be possible
    # if the username and password have been set via the cloud-init script so
    # is a valid check
    if vm_analysis_data['fs-type'] not in ['ntfs', 'fat32']:
        logger.info("Waiting for %s to be available via SSH", connect_ip)
        ssh_client = ssh.SSHClient(hostname=connect_ip, username=vm_analysis_data['username'],
                                   password=vm_analysis_data['password'], port=22)
        wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True)
        vm.ssh = ssh_client
    vm.system_type = detect_system_type(vm)
    logger.info("Detected system type: %s", vm.system_type)
    vm.image = vm_analysis_data['image']
    vm.connect_ip = connect_ip

    # TODO:  This is completely wrong and needs to be fixed
    #   CFME relationship is suppose to be set to the appliance, which is required
    #   to be placed within the same datastore that the VM resides
    #
    #   Also, if rhev and iscsi, it need direct_lun
    if provider.type == 'rhevm':
        logger.info("Setting a relationship between VM and appliance")
        from cfme.infrastructure.virtual_machines import Vm
        cfme_rel = Vm.CfmeRelationship(vm)
        server_name = appliance.server_name()
        cfme_rel.set_relationship(str(server_name), configuration.server_id())

    yield vm

    # Close the SSH client if we have one
    if getattr(vm, 'ssh', None):
        vm.ssh.close()
Example #45
0
    def test_retire_service_future(self, rest_api, services):
        """Test retiring a service
        Prerequisities:
            * An appliance with ``/api`` available.
        Steps:
            * Retrieve list of entities using GET /api/services , pick the first one
            * POST /api/service/<id> (method ``retire``) with the ``retire_date``
        Metadata:
            test_flag: rest
        """
        assert "retire" in rest_api.collections.services.action.all

        retire_service = services[0]
        date = (datetime.datetime.now() +
                datetime.timedelta(days=5)).strftime('%m/%d/%y')
        future = {
            "date": date,
            "warn": "4",
        }
        date_before = retire_service.updated_at
        retire_service.action.retire(future)

        def _finished():
            retire_service.reload()
            if retire_service.updated_at > date_before:
                return True
            return False

        wait_for(_finished,
                 num_sec=600,
                 delay=5,
                 message="REST automation_request finishes")

        @pytest.mark.uncollectif(lambda: version.current_version() < '5.5')
        def test_set_service_owner(self, rest_api, services):
            if "set_ownership" not in rest_api.collections.services.action.all:
                pytest.skip(
                    "Set owner action for service is not implemented in this version"
                )
            service = services[0]
            user = rest_api.collections.users.get(userid='admin')
            data = {"owner": {"href": user.href}}
            service.action.set_ownership(data)
            service.reload()
            assert hasattr(service, "evm_owner")
            assert service.evm_owner.userid == user.userid

        @pytest.mark.uncollectif(lambda: version.current_version() < '5.5')
        def test_set_services_owner(self, rest_api, services):
            if "set_ownership" not in rest_api.collections.services.action.all:
                pytest.skip(
                    "Set owner action for service is not implemented in this version"
                )
            data = []
            user = rest_api.collections.users.get(userid='admin')
            for service in services:
                tmp_data = {"href": service.href, "owner": {"href": user.href}}
                data.append(tmp_data)
            rest_api.collections.services.action.set_ownership(*data)
            for service in services:
                service.reload()
                assert hasattr(service, "evm_owner")
                assert service.evm_owner.userid == user.userid
Example #46
0
 def wait_suspended(self, wait_time=160):
     return wait_for(lambda: self.is_suspended,
                     num_sec=wait_time,
                     message="wait for suspend",
                     delay=5)
Example #47
0
def vm(request, provider, local_setup_provider, small_template_modscope,
       vm_name):
    if provider.type == "rhevm":
        kwargs = {"cluster": provider.data["default_cluster"]}
    elif provider.type == "virtualcenter":
        kwargs = {}
    elif provider.type == "openstack":
        kwargs = {}
        if 'small_template_flavour' in provider.data:
            kwargs = {
                "flavour_name": provider.data.get('small_template_flavour')
            }
    elif provider.type == "scvmm":
        kwargs = {
            "host_group":
            provider.data.get("provisioning", {}).get("host_group",
                                                      "All Hosts")
        }
    else:
        kwargs = {}

    try:
        deploy_template(provider.key,
                        vm_name,
                        template_name=small_template_modscope,
                        allow_skip="default",
                        power_on=True,
                        **kwargs)
    except TimedOutError as e:
        logger.exception(e)
        try:
            provider.mgmt.delete_vm(vm_name)
        except TimedOutError:
            logger.warning("Could not delete VM %s!", vm_name)
        finally:
            # If this happened, we should skip all tests from this provider in this module
            pytest.skip(
                "{} is quite likely overloaded! Check its status!\n{}: {}".
                format(provider.key,
                       type(e).__name__, str(e)))

    @request.addfinalizer
    def _finalize():
        """if getting REST object failed, we would not get the VM deleted! So explicit teardown."""
        logger.info("Shutting down VM with name %s", vm_name)
        if provider.mgmt.is_vm_suspended(vm_name):
            logger.info("Powering up VM %s to shut it down correctly.",
                        vm_name)
            provider.mgmt.start_vm(vm_name)
        if provider.mgmt.is_vm_running(vm_name):
            logger.info("Powering off VM %s", vm_name)
            provider.mgmt.stop_vm(vm_name)
        if provider.mgmt.does_vm_exist(vm_name):
            logger.info("Deleting VM %s in %s", vm_name,
                        provider.mgmt.__class__.__name__)
            provider.mgmt.delete_vm(vm_name)

    # Make it appear in the provider
    provider.refresh_provider_relationships()

    # Get the REST API object
    api = wait_for(
        lambda: get_vm_object(vm_name),
        message="VM object {} appears in CFME".format(vm_name),
        fail_condition=None,
        num_sec=600,
        delay=15,
    )[0]

    return VMWrapper(provider, vm_name, api)
Example #48
0
def vm(request, provider, small_template, vm_name):
    try:
        setup_provider(provider.key)
    except FlashMessageException as e:
        e.skip_and_log("Provider failed to set up")

    if isinstance(provider.mgmt, mgmt_system.RHEVMSystem):
        kwargs = {"cluster": provider.data["default_cluster"]}
    elif isinstance(provider.mgmt, mgmt_system.VMWareSystem):
        kwargs = {}
    elif isinstance(provider.mgmt, mgmt_system.SCVMMSystem):
        kwargs = {
            "host_group": provider.data.get("provisioning", {}).get("host_group", "All Hosts")}
    else:
        kwargs = {}

    try:
        deploy_template(
            provider.key,
            vm_name,
            template_name=small_template,
            allow_skip="default",
            power_on=True,
            **kwargs
        )
    except TimedOutError as e:
        logger.exception(e)
        try:
            provider.mgmt.delete_vm(vm_name)
        except TimedOutError:
            logger.warning("Could not delete VM {}!".format(vm_name))
        finally:
            # If this happened, we should skip all tests from this provider in this module
            pytest.skip("{} is quite likely overloaded! Check its status!\n{}: {}".format(
                provider.key, type(e).__name__, str(e)))

    def finalize():
        """if getting SOAP object failed, we would not get the VM deleted! So explicit teardown."""
        logger.info("Shutting down VM with name {}".format(vm_name))
        if provider.mgmt.is_vm_suspended(vm_name):
            logger.info("Powering up VM {} to shut it down correctly.".format(vm_name))
            provider.mgmt.start_vm(vm_name)
        if provider.mgmt.is_vm_running(vm_name):
            logger.info("Powering off VM {}".format(vm_name))
            provider.mgmt.stop_vm(vm_name)
        if provider.mgmt.does_vm_exist(vm_name):
            logger.info("Deleting VM {} in {}".format(vm_name, provider.mgmt.__class__.__name__))
            provider.mgmt.delete_vm(vm_name)
    request.addfinalizer(finalize)

    # Make it appear in the provider
    provider.refresh_provider_relationships()

    # Get the SOAP object
    soap = wait_for(
        lambda: get_vm_object(vm_name),
        message="VM object {} appears in CFME".format(vm_name),
        fail_condition=None,
        num_sec=600,
        delay=15,
    )[0]

    return VMWrapper(provider.mgmt, vm_name, soap)
Example #49
0
 def wait_for_existence(self, existence, **kwargs):
     return wait_for(
         lambda: self.exists, fail_condition=not existence, **kwargs)
Example #50
0
def test_action_initiate_smartstate_analysis(request,
                                             assign_policy_for_testing, vm,
                                             vm_off, vm_crud_refresh):
    """ This test tests actions 'Initiate SmartState Analysis for VM'.

    This test sets the policy that it analyses VM after it's powered on. Then it checks whether
    that really happened.

    Metadata:
        test_flag: actions, provision
    """
    # Set host credentials for VMWare
    if isinstance(vm.provider, mgmtsystem.virtualcenter.VMWareSystem):
        set_host_credentials(request, vm.provider, vm)

    # Set up the policy and prepare finalizer
    assign_policy_for_testing.assign_actions_to_event(
        "VM Power On", ["Initiate SmartState Analysis for VM"])
    request.addfinalizer(lambda: assign_policy_for_testing.assign_events())
    switched_on = datetime.utcnow()
    # Start the VM
    vm.crud.power_control_from_cfme(option=vm.crud.POWER_ON,
                                    cancel=False,
                                    from_details=True)

    # Wait for VM being tried analysed by CFME
    def wait_analysis_tried():
        if current_version() > "5.5":
            vm.api.reload()
        try:
            return vm.api.last_scan_attempt_on.replace(
                tzinfo=None) >= switched_on
        except AttributeError:
            return False

    try:
        wait_for(wait_analysis_tried,
                 num_sec=360,
                 message="wait for analysis attempt",
                 delay=5)
    except TimedOutError:
        pytest.fail("CFME did not even try analysing the VM {}".format(
            vm.name))

    # Check that analyse job has appeared in the list
    # Wait for the task to finish
    @pytest.wait_for(delay=15,
                     timeout="8m",
                     fail_func=lambda: tb.select('Reload'))
    def is_vm_analysis_finished():
        """ Check if analysis is finished - if not, reload page
        """
        tab_name = pick({
            LOWEST: "All VM Analysis Tasks",
            '5.6': "All VM and Container Analysis Tasks",
        })
        if not pytest.sel.is_displayed(tasks.tasks_table) or \
           not tabs.is_tab_selected(tab_name):
            pytest.sel.force_navigate('tasks_all_vm')
        vm_analysis_finished = tasks.tasks_table.find_row_by_cells({
            'task_name':
            "Scan from Vm {}".format(vm.name),
            'state':
            'finished'
        })
        return vm_analysis_finished is not None

    # Wait for VM analysis to finish
    def wait_analysis_finished():
        if current_version() > "5.5":
            vm.api.reload()
        try:
            return vm.api.last_scan_on.replace(tzinfo=None) >= switched_on
        except AttributeError:
            return False

    try:
        wait_for(wait_analysis_finished,
                 num_sec=600,
                 message="wait for analysis finished",
                 delay=60)
    except TimedOutError:
        pytest.fail("CFME did not finish analysing the VM {}".format(vm.name))
Example #51
0
def create_image(template_name, image_description, bucket_name, key_name):
    """Imports the Image file uploaded to bucket inside amazon s3, checks the image status.
       Waits for import_image task to complete. creates name tag and assigns template_name to
       imported image.

        Args:
            template_name: Name of the template, this will be assigned to Name tag of the
            imported image.
            image_description: description to be set to imported image.
            bucket_name: bucket_name from where image file is imported. This is created in Amazon S3
            service.
            key_name: Keyname inside the create bucket.
    """
    temp_up = cfme_data['template_upload']['template_upload_ec2']
    aws_cli_tool_client_username = credentials['host_default']['username']
    aws_cli_tool_client_password = credentials['host_default']['password']
    sshclient = make_ssh_client(temp_up['aws_cli_tool_client'],
                                aws_cli_tool_client_username,
                                aws_cli_tool_client_password)

    print("AMAZON EC2: Creating JSON file beofre importing the image ...")
    upload_json = """[
  {{
    "Description": "{description}",
    "Format": "vhd",
    "UserBucket": {{
        "S3Bucket": "{bucket_name}",
        "S3Key": "{key_name}"
    }}
}}]""".format(description=image_description,
              bucket_name=bucket_name,
              key_name=key_name)
    command = '''cat <<EOT > import_image.json
    {}
    '''.format(upload_json)
    sshclient.run_command(command)

    print("AMAZON EC2: Running import-image command and grep ImportTaskId ...")
    command = "aws ec2 import-image --description 'test_cfme_ami_image_upload' --disk-containers " \
              "file://import_image.json | grep ImportTaskId"
    output = sshclient.run_command(command)
    importtask_id = re.findall(r'import-ami-[a-zA-Z0-9_]*', str(output))[0]

    def check_import_task_status():
        check_status_command = "aws ec2 describe-import-image-tasks --import-task-ids {} | grep " \
                               "-w Status".format(importtask_id)
        import_status_output = sshclient.run_command(check_status_command)
        return True if 'completed' in import_status_output else False

    print(
        "AMAZON EC2: Waiting for import-image task to be completed, this may take a while ..."
    )
    wait_for(check_import_task_status,
             fail_condition=False,
             delay=5,
             timeout='1h')

    print(
        "AMAZON EC2: Retrieve AMI ImageId from describe-import-image-tasks...")
    command = "aws ec2 describe-import-image-tasks --import-task-ids {} | grep ImageId".format(
        importtask_id)
    output = sshclient.run_command(command)
    ami_image_id = re.findall(r'ami-[a-zA-Z0-9_]*', str(output))[0]

    print("AMAZON EC2: Creating Tag for imported image ...")
    command = "aws ec2 create-tags --resources {} --tags Key='Name'," \
              "Value='{}'".format(ami_image_id, template_name)
    sshclient.run_command(command)
def test_run_datastore_analysis(request, setup_provider, provider, datastore,
                                soft_assert):
    """Tests smarthost analysis

    Metadata:
        test_flag: datastore_analysis
    """

    # Check if there is a host with valid credentials
    host_names = datastore.get_hosts()
    assert len(host_names) != 0, "No hosts attached to this datastore found"
    for host_name in host_names:
        host_qi = Quadicon(host_name, 'host')
        if 'checkmark' in host_qi.creds:
            break
    else:
        # If not, get credentials for one of the present hosts
        found_host = False
        for host_name in host_names:
            host_data = get_host_data_by_name(provider.key, host_name)
            if host_data is None:
                continue

            found_host = True
            test_host = host.Host(name=host_name, provider=provider)

            # Add them to the host
            wait_for(lambda: test_host.exists,
                     delay=10,
                     num_sec=120,
                     fail_func=sel.refresh)
            if not test_host.has_valid_credentials:
                test_host.update(
                    updates={
                        'credentials':
                        host.get_credentials_from_config(
                            host_data['credentials'])
                    })
                wait_for(lambda: test_host.has_valid_credentials,
                         delay=10,
                         num_sec=120,
                         fail_func=sel.refresh)

                # And remove them again when the test is finished
                def test_host_remove_creds():
                    test_host.update(
                        updates={
                            'credentials':
                            host.Host.Credential(
                                principal="", secret="", verify_secret="")
                        })

                request.addfinalizer(test_host_remove_creds)
            break

        assert found_host,\
            "No credentials found for any of the hosts attached to datastore {}"\
            .format(datastore.name)

    # TODO add support for events
    # register_event(
    #     None,
    #     "datastore",
    #     datastore_name,
    #     ["datastore_analysis_request_req", "datastore_analysis_complete_req"]
    # )

    # Initiate analysis
    datastore.run_smartstate_analysis()
    wait_for(lambda: is_datastore_analysis_finished(datastore.name),
             delay=15,
             timeout="15m",
             fail_func=lambda: tb.select('Reload the current display'))

    ds_str = "Datastores Type"
    c_datastore = datastore.get_detail('Properties', ds_str)
    # Check results of the analysis and the datastore type
    soft_assert(
        c_datastore == datastore.type.upper(),
        'Datastore type does not match the type defined in yaml:' +
        'expected "{}" but was "{}"'.format(datastore.type.upper(),
                                            c_datastore))
    for row_name in CONTENT_ROWS_TO_CHECK:
        value = InfoBlock('Content', row_name).text
        soft_assert(value != '0',
                    'Expected value for {} to be non-empty'.format(row_name))
Example #53
0
def test_provision_approval(setup_provider, provider, vm_name, smtp_test,
                            request, edit, provisioning):
    """ Tests provisioning approval. Tests couple of things.

    * Approve manually
    * Approve by editing the request to conform

    Prerequisities:
        * A provider that can provision.
        * Automate role enabled
        * User with e-mail set so you can receive and view them

    Steps:
        * Create a provisioning request that does not get automatically approved (eg. ``num_vms``
            bigger than 1)
        * Wait for an e-mail to come, informing you that the auto-approval was unsuccessful.
        * Depending on whether you want to do manual approval or edit approval, do:
            * MANUAL: manually approve the request in UI
            * EDIT: Edit the request in UI so it conforms the rules for auto-approval.
        * Wait for an e-mail with approval
        * Wait until the request finishes
        * Wait until an email, informing about finished provisioning, comes.

    Metadata:
        test_flag: provision
        suite: infra_provisioning
    """
    # generate_tests makes sure these have values
    template, host, datastore = map(provisioning.get,
                                    ('template', 'host', 'datastore'))

    # It will provision two of them
    vm_names = [vm_name + "001", vm_name + "002"]
    request.addfinalizer(
        lambda: [cleanup_vm(vmname, provider) for vmname in vm_names])

    provisioning_data = {
        'vm_name': vm_name,
        'host_name': {
            'name': [host]
        },
        'datastore_name': {
            'name': [datastore]
        },
        'num_vms': "2",
    }

    # Same thing, different names. :\
    if provider.type == 'rhevm':
        provisioning_data['provision_type'] = 'Native Clone'
    elif provider.type == 'virtualcenter':
        provisioning_data['provision_type'] = 'VMware'

    try:
        provisioning_data['vlan'] = provisioning['vlan']
    except KeyError:
        # provisioning['vlan'] is required for rhevm provisioning
        if provider.type == 'rhevm':
            raise pytest.fail(
                'rhevm requires a vlan value in provisioning info')

    do_vm_provisioning(template,
                       provider,
                       vm_name,
                       provisioning_data,
                       request,
                       smtp_test,
                       wait=False)
    wait_for(lambda: len(
        filter(
            lambda mail: "your request for a new vms was not autoapproved" in
            normalize_text(mail["subject"]), smtp_test.get_emails())) > 0,
             num_sec=90,
             delay=5)
    wait_for(lambda: len(
        filter(
            lambda mail: "virtual machine request was not approved" in
            normalize_text(mail["subject"]), smtp_test.get_emails())) > 0,
             num_sec=90,
             delay=5)

    cells = {
        'Description':
        'Provision from [{}] to [{}###]'.format(template, vm_name)
    }
    wait_for(lambda: requests.go_to_request(cells), num_sec=80, delay=5)
    if edit:
        # Automatic approval after editing the request to conform
        with requests.edit_request(cells) as form:
            fill(form.num_vms, "1")
            new_vm_name = vm_name + "-xx"
            fill(form.vm_name, new_vm_name)
        vm_names = [new_vm_name]  # Will be just one now
        cells = {
            'Description':
            'Provision from [{}] to [{}]'.format(template, new_vm_name)
        }
        request.addfinalizer(lambda: cleanup_vm(new_vm_name, provider))
    else:
        # Manual approval
        requests.approve_request(cells, "Approved")
        vm_names = [vm_name + "001", vm_name + "002"]  # There will be two VMs
        request.addfinalizer(
            lambda: [cleanup_vm(vmname, provider) for vmname in vm_names])
    wait_for(lambda: len(
        filter(
            lambda mail: "your virtual machine configuration was approved" in
            normalize_text(mail["subject"]), smtp_test.get_emails())) > 0,
             num_sec=120,
             delay=5)

    # Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup
    logger.info('Waiting for vms %s to appear on provider %s',
                ", ".join(vm_names), provider.key)
    wait_for(lambda: all(map(provider.mgmt.does_vm_exist, vm_names)),
             handle_exception=True,
             num_sec=600)

    row, __ = wait_for(requests.wait_for_request, [cells],
                       fail_func=requests.reload,
                       num_sec=1500,
                       delay=20)
    assert normalize_text(row.status.text) == 'ok' \
        and normalize_text(row.request_state.text) == 'finished'

    # Wait for e-mails to appear
    def verify():
        return (len(
            filter(
                lambda mail: "your virtual machine request has completed vm {}"
                .format(normalize_text(vm_name)) in normalize_text(mail[
                    "subject"]), smtp_test.get_emails())) == len(vm_names))

    wait_for(verify, message="email receive check", delay=5)
Example #54
0
 def test_delete(self, verify_vm_stopped, vm, vm_name, rest_api):
     assert "delete" in vm.action
     vm.action.delete()
     wait_for(lambda: not rest_api.collections.vms.find_by(name=vm_name),
              num_sec=240,
              delay=5)
Example #55
0
def test_provision_from_template_using_rest(request, setup_provider, provider,
                                            provisioning, vm_name, rest_api):
    """ Tests provisioning from a template using the REST API.

    Metadata:
        test_flag: provision
    """
    if "flavors" not in rest_api.collections.all_names:
        pytest.skip("This appliance does not have `flavors` collection.")
    image_guid = rest_api.collections.templates.find_by(
        name=provisioning['image']['name'])[0].guid
    instance_type = (provisioning['instance_type'].split(":")[0].strip()
                     if ":" in provisioning['instance_type']
                     and provider.type == "ec2" else
                     provisioning['instance_type'])
    flavors = rest_api.collections.flavors.find_by(name=instance_type)
    assert len(flavors) > 0
    # TODO: Multi search when it works
    for flavor in flavors:
        if flavor.ems.name == provider.name:
            flavor_id = flavor.id
            break
    else:
        pytest.fail("Cannot find flavour {} for provider {}".format(
            instance_type, provider.name))

    provision_data = {
        "version": "1.1",
        "template_fields": {
            "guid": image_guid,
        },
        "vm_fields": {
            "vm_name": vm_name,
            "instance_type": flavor_id,
            "request_type": "template",
            "availability_zone": provisioning["availability_zone"],
            "security_groups": [provisioning["security_group"]],
            "guest_keypair": provisioning["guest_keypair"]
        },
        "requester": {
            "user_name": "admin",
            "owner_first_name": "Administrator",
            "owner_last_name": "Administratorovich",
            "owner_email": "*****@*****.**",
            "auto_approve": True,
        },
        "tags": {},
        "additional_values": {},
        "ems_custom_attributes": {},
        "miq_custom_attributes": {}
    }

    request.addfinalizer(lambda: provider.mgmt.delete_vm(vm_name)
                         if provider.mgmt.does_vm_exist(vm_name) else None)
    request = rest_api.collections.provision_requests.action.create(
        **provision_data)[0]

    def _finished():
        request.reload()
        if request.status.lower() in {"error"}:
            pytest.fail("Error when provisioning: `{}`".format(
                request.message))
        return request.request_state.lower() in {"finished", "provisioned"}

    wait_for(_finished,
             num_sec=600,
             delay=5,
             message="REST provisioning finishes")
    wait_for(lambda: provider.mgmt.does_vm_exist(vm_name),
             num_sec=600,
             delay=5,
             message="VM {} becomes visible".format(vm_name))
Example #56
0
 def wait_generated(self, timeout=600):
     wait_for(self.check_status,
              num_sec=timeout,
              delay=5,
              fail_condition=lambda result: result != "Complete")
Example #57
0
def test_provision_with_boot_volume(request, setup_provider, provider,
                                    provisioning, vm_name, soft_assert, domain,
                                    copy_domains):
    """ Tests provisioning from a template and attaching one booting volume.

    Metadata:
        test_flag: provision, volumes
    """

    image = provisioning['image']['name']
    note = ('Testing provisioning from image %s to vm %s on provider %s' %
            (image, vm_name, provider.key))

    with provider.mgmt.with_volume(
            1, imageRef=provider.mgmt.get_template_id(image)) as volume:
        # Set up automate
        cls = automate.Class(name="Methods",
                             namespace=automate.Namespace.make_path(
                                 "Cloud",
                                 "VM",
                                 "Provisioning",
                                 "StateMachines",
                                 parent=domain))
        method = automate.Method(name="openstack_CustomizeRequest", cls=cls)
        with update(method):
            method.data = dedent('''\
                $evm.root["miq_provision"].set_option(
                    :clone_options, {
                        :image_ref => nil,
                        :block_device_mapping_v2 => [{
                            :boot_index => 0,
                            :uuid => "%s",
                            :device_name => "vda",
                            :source_type => "volume",
                            :destination_type => "volume",
                            :delete_on_termination => false
                        }]
                    }
                )
            ''' % (volume, ))

        def _finish_method():
            with update(method):
                method.data = """prov = $evm.root["miq_provision"]"""

        request.addfinalizer(_finish_method)
        instance = Instance.factory(vm_name, provider, image)
        request.addfinalizer(instance.delete_from_provider)
        inst_args = {
            'email': '*****@*****.**',
            'first_name': 'Image',
            'last_name': 'Provisioner',
            'notes': note,
            'instance_type': provisioning['instance_type'],
            'availability_zone': provisioning['availability_zone'],
            'security_groups': [provisioning['security_group']],
            'guest_keypair': provisioning['guest_keypair']
        }

        if isinstance(provider, OpenStackProvider):
            inst_args['cloud_network'] = provisioning['cloud_network']

        sel.force_navigate("clouds_instances_by_provider")
        instance.create(**inst_args)

        soft_assert(vm_name in provider.mgmt.volume_attachments(volume))
        soft_assert(provider.mgmt.volume_attachments(volume)[vm_name] == "vda")
        instance.delete_from_provider(
        )  # To make it possible to delete the volume
        wait_for(lambda: not instance.does_vm_exist_on_provider(),
                 num_sec=180,
                 delay=5)
Example #58
0
def test_verify_revert_snapshot(full_test_vm, provider, soft_assert,
                                register_event, request):
    """Tests revert snapshot

    Metadata:
        test_flag: snapshot, provision
    """
    if provider.one_of(RHEVMProvider):
        snapshot1 = new_snapshot(full_test_vm, has_name=False)
    else:
        snapshot1 = new_snapshot(full_test_vm)
    ssh_kwargs = {
        'hostname':
        snapshot1.vm.provider.mgmt.get_ip_address(snapshot1.vm.name),
        'username':
        credentials[provider.data['full_template']['creds']]['username'],
        'password':
        credentials[provider.data['full_template']['creds']]['password']
    }
    ssh_client = SSHClient(**ssh_kwargs)
    # We need to wait for ssh to become available on the vm, it can take a while. Without
    # this wait, the ssh command would fail with 'port 22 not available' error.
    # Easiest way to solve this is just mask the exception with 'handle_exception = True'
    # and wait for successful completition of the ssh command.
    # The 'fail_func' ensures we close the connection that failed with exception.
    # Without this, the connection would hang there and wait_for would fail with timeout.
    wait_for(lambda: ssh_client.run_command('touch snapshot1.txt').rc == 0,
             num_sec=300,
             delay=20,
             handle_exception=True,
             fail_func=ssh_client.close())
    snapshot1.create()
    register_event(target_type='VmOrTemplate',
                   target_name=full_test_vm.name,
                   event_type='vm_snapshot_complete')
    register_event(target_type='VmOrTemplate',
                   target_name=full_test_vm.name,
                   event_type='vm_snapshot')
    ssh_client.run_command('touch snapshot2.txt')
    if provider.one_of(RHEVMProvider):
        snapshot2 = new_snapshot(full_test_vm, has_name=False)
    else:
        snapshot2 = new_snapshot(full_test_vm)
    snapshot2.create()

    if provider.one_of(RHEVMProvider):
        full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_OFF,
                                             cancel=False)
        full_test_vm.wait_for_vm_state_change(
            desired_state=full_test_vm.STATE_OFF, timeout=900)

    snapshot1.revert_to()
    # Wait for the snapshot to become active
    logger.info('Waiting for vm %s to become active', snapshot1.name)
    wait_for(lambda: snapshot1.active,
             num_sec=300,
             delay=20,
             fail_func=sel.refresh)
    full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_OFF,
                                          timeout=720)
    full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_ON,
                                         cancel=False)
    full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_ON,
                                          timeout=900)
    current_state = full_test_vm.find_quadicon().state
    soft_assert(current_state.startswith('currentstate-on'),
                "Quadicon state is {}".format(current_state))
    soft_assert(full_test_vm.provider.mgmt.is_vm_running(full_test_vm.name),
                "vm not running")
    wait_for(lambda: ssh_client.run_command('test -e snapshot1.txt').rc == 0,
             num_sec=400,
             delay=20,
             handle_exception=True,
             fail_func=ssh_client.close())
    try:
        result = ssh_client.run_command('test -e snapshot1.txt')
        assert not result.rc
        result = ssh_client.run_command('test -e snapshot2.txt')
        assert result.rc
        logger.info('Revert to snapshot %s successful', snapshot1.name)
    except:
        logger.exception('Revert to snapshot %s Failed', snapshot1.name)
    ssh_client.close()
Example #59
0
def test_provision_from_template_with_attached_disks(request, setup_provider,
                                                     provider, provisioning,
                                                     vm_name, disks,
                                                     soft_assert, domain, cls,
                                                     copy_domains):
    """ Tests provisioning from a template and attaching disks

    Metadata:
        test_flag: provision
    """

    image = provisioning['image']['name']
    note = ('Testing provisioning from image %s to vm %s on provider %s' %
            (image, vm_name, provider.key))

    DEVICE_NAME = "/dev/sd{}"
    device_mapping = []

    with provider.mgmt.with_volumes(1, n=disks) as volumes:
        for i, volume in enumerate(volumes):
            device_mapping.append(
                (volume, DEVICE_NAME.format(chr(ord("b") + i))))
        # Set up automate

        cls = automate.Class(name="Methods",
                             namespace=automate.Namespace.make_path(
                                 "Cloud",
                                 "VM",
                                 "Provisioning",
                                 "StateMachines",
                                 parent=domain))

        method = automate.Method(name="openstack_PreProvision", cls=cls)

        with update(method):
            disk_mapping = []
            for mapping in device_mapping:
                disk_mapping.append(ONE_FIELD % mapping)
            method.data = VOLUME_METHOD % ", ".join(disk_mapping)

        def _finish_method():
            with update(method):
                method.data = """prov = $evm.root["miq_provision"]"""

        request.addfinalizer(_finish_method)
        instance = Instance.factory(vm_name, provider, image)
        request.addfinalizer(instance.delete_from_provider)
        inst_args = {
            'email': '*****@*****.**',
            'first_name': 'Image',
            'last_name': 'Provisioner',
            'notes': note,
            'instance_type': provisioning['instance_type'],
            'availability_zone': provisioning['availability_zone'],
            'security_groups': [provisioning['security_group']],
            'guest_keypair': provisioning['guest_keypair']
        }

        if isinstance(provider, OpenStackProvider):
            inst_args['cloud_network'] = provisioning['cloud_network']

        sel.force_navigate("clouds_instances_by_provider")
        instance.create(**inst_args)

        for volume_id in volumes:
            soft_assert(vm_name in provider.mgmt.volume_attachments(volume_id))
        for volume, device in device_mapping:
            soft_assert(
                provider.mgmt.volume_attachments(volume)[vm_name] == device)
        instance.delete_from_provider(
        )  # To make it possible to delete the volume
        wait_for(lambda: not instance.does_vm_exist_on_provider(),
                 num_sec=180,
                 delay=5)
Example #60
0
def test_provision_with_additional_volume(request, setup_provider,
                                          provisioning, provider, vm_name,
                                          soft_assert, copy_domains, domain):
    """ Tests provisioning with setting specific image from AE and then also making it create and
    attach an additional 3G volume.

    Metadata:
        test_flag: provision, volumes
    """

    image = provisioning['image']['name']
    note = ('Testing provisioning from image %s to vm %s on provider %s' %
            (image, vm_name, provider.key))

    # Set up automate
    cls = automate.Class(name="Methods",
                         namespace=automate.Namespace.make_path(
                             "Cloud",
                             "VM",
                             "Provisioning",
                             "StateMachines",
                             parent=domain))
    method = automate.Method(name="openstack_CustomizeRequest", cls=cls)
    try:
        image_id = provider.mgmt.get_template_id(
            provider.data["small_template"])
    except KeyError:
        pytest.skip("No small_template in provider adta!")
    with update(method):
        method.data = dedent('''\
            $evm.root["miq_provision"].set_option(
              :clone_options, {
                :image_ref => nil,
                :block_device_mapping_v2 => [{
                  :boot_index => 0,
                  :uuid => "%s",
                  :device_name => "vda",
                  :source_type => "image",
                  :destination_type => "volume",
                  :volume_size => 3,
                  :delete_on_termination => false
                }]
              }
        )
        ''' % (image_id, ))

    def _finish_method():
        with update(method):
            method.data = """prov = $evm.root["miq_provision"]"""

    request.addfinalizer(_finish_method)
    instance = Instance.factory(vm_name, provider, image)
    request.addfinalizer(instance.delete_from_provider)
    inst_args = {
        'email': '*****@*****.**',
        'first_name': 'Image',
        'last_name': 'Provisioner',
        'notes': note,
        'instance_type': provisioning['instance_type'],
        'availability_zone': provisioning['availability_zone'],
        'security_groups': [provisioning['security_group']],
        'guest_keypair': provisioning['guest_keypair']
    }

    if isinstance(provider, OpenStackProvider):
        inst_args['cloud_network'] = provisioning['cloud_network']

    sel.force_navigate("clouds_instances_by_provider")
    instance.create(**inst_args)

    prov_instance = provider.mgmt._find_instance_by_name(vm_name)
    try:
        assert hasattr(prov_instance, 'os-extended-volumes:volumes_attached')
        volumes_attached = getattr(prov_instance,
                                   'os-extended-volumes:volumes_attached')
        assert len(volumes_attached) == 1
        volume_id = volumes_attached[0]["id"]
        assert provider.mgmt.volume_exists(volume_id)
        volume = provider.mgmt.get_volume(volume_id)
        assert volume.size == 3
    finally:
        instance.delete_from_provider()
        wait_for(lambda: not instance.does_vm_exist_on_provider(),
                 num_sec=180,
                 delay=5)
        if "volume_id" in locals():  # To handle the case of 1st or 2nd assert
            if provider.mgmt.volume_exists(volume_id):
                provider.mgmt.delete_volume(volume_id)