Ejemplo n.º 1
0
def test_proxy_invalid(appliance, prepare_proxy_invalid, provider):
    """ Check whether invalid default and invalid specific provider proxy settings
     results in provider refresh not working.

    Bugzilla:
        1623550

    Polarion:
        assignee: jhenner
        initialEstimate: 1/4h
        testSteps:
            1. Configure default proxy to invalid entry.
            2. Configure specific proxy to invalid entry.
            3. Wait for the provider refresh to complete to check the settings causes error.
    """
    provider.refresh_provider_relationships()

    view = navigate_to(provider, 'Details')

    if appliance.version >= '5.10':
        view.toolbar.view_selector.select('Summary View')

    def last_refresh_failed():
        view.toolbar.reload.click()
        return 'Timed out connecting to server' in (
            view.entities.summary('Status').get_text_of('Last Refresh'))

    wait_for(last_refresh_failed, fail_condition=False, num_sec=240, delay=5)
def test_appliance_replicate_database_disconnection_with_backlog(request, virtualcenter_provider,
                                                                 appliance):
    """Tests a database disconnection with backlog

    Metadata:
        test_flag: replication
    """
    appl1, appl2 = get_replication_appliances()
    replication_conf = appliance.server.zone.region.replication

    def finalize():
        appl1.destroy()
        appl2.destroy()
    request.addfinalizer(finalize)
    appl1.ipapp.browser_steal = True
    with appl1.ipapp:
        configure_db_replication(appl2.hostname)
        # Replication is up and running, now stop the DB on the replication parent
        virtualcenter_provider.create()
        appl2.db.stop_db_service()
        sleep(60)
        appl2.db.start_db_service()
        wait_for(replication_conf.get_replication_status, fail_condition=False, num_sec=360,
                 delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status")
        assert replication_conf.get_replication_status()
        wait_for_a_provider()

    appl2.ipapp.browser_steal = True
    with appl2.ipapp:
        wait_for_a_provider()
        assert virtualcenter_provider.exists
Ejemplo n.º 3
0
def test_run_datastore_analysis(setup_provider, datastore, soft_assert, datastores_hosts_setup,
                                clear_all_tasks, appliance):
    """Tests smarthost analysis

    Metadata:
        test_flag: datastore_analysis
    """
    # Initiate analysis
    try:
        datastore.run_smartstate_analysis(wait_for_task_result=True)
    except MenuItemNotFound:
        # TODO need to update to cover all detastores
        pytest.skip('Smart State analysis is disabled for {} datastore'.format(datastore.name))
    details_view = navigate_to(datastore, 'DetailsFromProvider')
    # c_datastore = details_view.entities.properties.get_text_of("Datastore Type")

    # Check results of the analysis and the datastore type
    # TODO need to clarify datastore type difference
    # soft_assert(c_datastore == datastore.type.upper(),
    #             'Datastore type does not match the type defined in yaml:' +
    #             'expected "{}" but was "{}"'.format(datastore.type.upper(), c_datastore))

    wait_for(lambda: details_view.entities.content.get_text_of(CONTENT_ROWS_TO_CHECK[0]),
             delay=15, timeout="3m",
             fail_condition='0',
             fail_func=appliance.server.browser.refresh)
    managed_vms = details_view.entities.relationships.get_text_of('Managed VMs')
    if managed_vms != '0':
        for row_name in CONTENT_ROWS_TO_CHECK:
            value = details_view.entities.content.get_text_of(row_name)
            soft_assert(value != '0',
                        'Expected value for {} to be non-empty'.format(row_name))
    else:
        assert details_view.entities.content.get_text_of(CONTENT_ROWS_TO_CHECK[-1]) != '0'
def test_delete_datastore_appear_after_refresh(provider, appliance):
    """ Tests delete datastore

    Metadata:
        test_flag: delete_object
    """
    datastore_collection = appliance.collections.datastores
    data_store = provider.data['remove_test']['datastore']
    test_datastore = datastore_collection.instantiate(name=data_store, provider=provider)

    if test_datastore.host_count > 0:
        test_datastore.delete_all_attached_hosts()
    if test_datastore.vm_count > 0:
        test_datastore.delete_all_attached_vms()

    test_datastore.delete(cancel=False)
    wait_for(lambda: not test_datastore.exists,
             delay=20,
             timeout=1200,
             message="Wait datastore to disappear",
             fail_func=test_datastore.browser.refresh)

    provider.refresh_provider_relationships()
    wait_for(lambda: test_datastore.exists,
             delay=20,
             timeout=1200,
             message="Wait datastore to appear",
             fail_func=test_datastore.browser.refresh)
def test_appliance_replicate_sync_role_change_with_backlog(request, virtualcenter_provider,
                                                           appliance):
    """Tests that a role change is replicated with backlog

    Metadata:
        test_flag: replication
    """
    appl1, appl2 = get_replication_appliances()
    replication_conf = appliance.server.zone.region.replication

    def finalize():
        appl1.destroy()
        appl2.destroy()
    request.addfinalizer(finalize)
    appl1.ipapp.browser_steal = True
    with appl1.ipapp:
        server_settings = appliance.server.settings
        configure_db_replication(appl2.hostname)
        # Replication is up and running, now disable DB sync role
        virtualcenter_provider.create()
        server_settings.disable_server_roles('database_synchronization')
        wait_for(replication_conf.get_replication_status, fail_condition=True, num_sec=360,
                 delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status")
        server_settings.enable_server_roles('database_synchronization')
        wait_for(replication_conf.get_replication_status, fail_condition=False, num_sec=360,
                 delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status")
        assert replication_conf.get_replication_status()
        wait_for_a_provider()

    appl2.ipapp.browser_steal = True
    with appl2.ipapp:
        wait_for_a_provider()
        assert virtualcenter_provider.exists
Ejemplo n.º 6
0
    def create(self, name, provider, tenant, network_manager, has_external_gw=False,
               ext_network=None, ext_network_subnet=None):
        """Create network router

        Args:
            name: (str) name of router
            provider: crud object of OpenStack cloud provider
            tenant: (str) name of tenant to place router to
            network_manager: (str) name of network manager
            has_external_gw: (bool) represents if router has external gateway
            ext_network: (str) name of the external cloud network
                to be connected as a gateway to the router.
                Is used if has_external_gw == 'Yes'
            ext_network_subnet: (str) name of the subnet of ext_network.
                Is used if has_external_gw == 'Yes'
        Returns: instance of cfme.networks.network_router.NetworkRouter
        """
        view = navigate_to(self, 'Add')
        form_params = {'network_manager': network_manager,
                       'router_name': name,
                       'cloud_tenant': tenant}
        if has_external_gw:
            form_params.update({'ext_gateway': has_external_gw,
                                'network_name': ext_network,
                                'subnet_name': ext_network_subnet})
        view.fill(form_params)
        view.add.click()
        view.flash.assert_success_message('Network Router "{}" created'.format(name))
        router = self.instantiate(name, provider, ext_network)
        # Refresh provider's relationships to have new router displayed
        wait_for(provider.is_refreshed, func_kwargs=dict(refresh_delta=10), timeout=600)
        wait_for(lambda: router.exists, timeout=100, fail_func=router.browser.refresh)
        return router
Ejemplo n.º 7
0
    def reorder_elements(self, add_element, second_element, element_data):
        """Method to add element and interchange element positions.
           This method updates a dialog and adds a second element.The position
           of two elements are then interchanged to test for error.

        Args:
            add_element - flag if second element needs to be added.
            second_element - The second element to be added to the dialog.
            element_data - Already existing first element's data.
        """
        view = navigate_to(self, 'Edit')
        view.element_tree.click_path(*self.tree_path[1:])
        # Add a new element and then interchange position (BZ-1238721)
        if add_element:
            view.plus_btn.item_select("Add a new Element to this Box")
            view.fill(second_element.get('element_information'))
            # Views are not nested in 5.8,hence need to check for options value
            if second_element.get('options') is not None:
                view.fill(second_element.get('options'))
            view.element_tree.click_path(*self.tree_path[1:])
        dragged_el = element_data.get('element_information').get("ele_label")
        dropped_el = second_element.get('element_information').get("ele_label")
        view.dragndrop.drag_and_drop(self.element_loc(dragged_el), self.element_loc(dropped_el))

        view.save_button.click()
        view = self.create_view(DetailsDialogView)
        wait_for(
            lambda: view.is_displayed, delay=15, num_sec=300,
            message="waiting for view to be displayed"
        )
        assert view.is_displayed
        view.flash.assert_no_error()
Ejemplo n.º 8
0
def wait_for_alert(smtp, alert, delay=None, additional_checks=None):
    """DRY waiting function

    Args:
        smtp: smtp_test funcarg
        alert: Alert name
        delay: Optional delay to pass to wait_for
        additional_checks: Additional checks to perform on the mails. Keys are names of the mail
            sections, values the values to look for.
    """
    logger.info("Waiting for informative e-mail of alert %s to come", alert.description)
    additional_checks = additional_checks or {}

    def _mail_arrived():
        for mail in smtp.get_emails():
            if "Alert Triggered: {}".format(alert.description) in mail["subject"]:
                if not additional_checks:
                    return True
                else:
                    for key, value in additional_checks.iteritems():
                        if value in mail.get(key, ""):
                            return True
        return False
    wait_for(
        _mail_arrived,
        num_sec=delay,
        delay=5,
        message="wait for e-mail to come!"
    )
def test_action_run_ansible_playbook(request, ansible_catalog_item, ansible_action,
        policy_for_testing, vmware_vm, ansible_credential, service_request, service, host_type,
        inventory):
    """Tests a policy with ansible playbook action against localhost, manual address,
       target machine and unavailable address.
    """
    if host_type == "manual_address":
        inventory["inventory"]["hosts"] = vmware_vm.ip_address
    if host_type in ["manual_address", "target_machine"]:
        with update(ansible_catalog_item):
            ansible_catalog_item.provisioning = {"machine_credential": ansible_credential.name}
    with update(ansible_action):
        ansible_action.run_ansible_playbook = inventory
    vmware_vm.add_tag("Service Level", "Gold")
    request.addfinalizer(lambda: vmware_vm.remove_tag("Service Level", "Gold"))
    wait_for(service_request.exists, num_sec=600)
    service_request.wait_for_request()
    view = navigate_to(service, "Details")
    if host_type == "localhost":
        assert view.provisioning.details.get_text_of("Hosts") == "localhost"
        assert view.provisioning.results.get_text_of("Status") == "successful"
    elif host_type == "manual_address":
        assert view.provisioning.details.get_text_of("Hosts") == vmware_vm.ip_address
        assert view.provisioning.results.get_text_of("Status") == "successful"
    elif host_type == "target_machine":
        assert view.provisioning.details.get_text_of("Hosts") == vmware_vm.ip_address
        assert view.provisioning.results.get_text_of("Status") == "successful"
    elif host_type == "unavailable_address":
        assert view.provisioning.details.get_text_of("Hosts") == "unavailable_address"
        assert view.provisioning.results.get_text_of("Status") == "failed"
def wait_for_termination(provider, instance):
    """ Waits for VM/instance termination and refreshes power states and relationships
    """
    view = navigate_to(instance, 'Details')
    pwr_mgmt = view.entities.summary('Power Management')
    state_change_time = pwr_mgmt.get_text_of('State Changed On')
    provider.refresh_provider_relationships()
    logger.info("Refreshing provider relationships and power states")
    refresh_timer = RefreshTimer(time_for_refresh=300)
    wait_for(provider.is_refreshed,
             [refresh_timer],
             message="Waiting for provider.is_refreshed",
             num_sec=1000,
             delay=60,
             handle_exception=True)
    wait_for_ui_state_refresh(instance, provider, state_change_time, timeout=720)
    term_states = {instance.STATE_TERMINATED, instance.STATE_ARCHIVED, instance.STATE_UNKNOWN}
    if pwr_mgmt.get_text_of('Power State') not in term_states:
        """Wait for one more state change as transitional state also changes "State Changed On" time
        """
        logger.info("Instance is still powering down. please wait before termination")
        state_change_time = pwr_mgmt.get_text_of('State Changed On')
        wait_for_ui_state_refresh(instance, provider, state_change_time, timeout=720)

    return (instance.mgmt.state == VmState.DELETED
            if provider.one_of(EC2Provider)
            else pwr_mgmt.get_text_of('Power State') in term_states)
Ejemplo n.º 11
0
def test_alert_vm_turned_on_more_than_twice_in_past_15_minutes(request, provider, full_template_vm,
        smtp_test, alert_collection, setup_for_alerts):
    """ Tests alerts for vm turned on more than twice in 15 minutes

    Metadata:
        test_flag: alerts, provision
    """
    vm = full_template_vm
    alert = alert_collection.instantiate("VM Power On > 2 in last 15 min")
    with update(alert):
        alert.active = True
        alert.emails = fauxfactory.gen_email()

    setup_for_alerts(request, [alert], "VM Power On", vm.name, provider)

    if not provider.mgmt.is_vm_stopped(vm.name):
        provider.mgmt.stop_vm(vm.name)
    provider.refresh_provider_relationships()
    vm.wait_for_vm_state_change(vm.STATE_OFF)
    for i in range(5):
        vm.power_control_from_cfme(option=vm.POWER_ON, cancel=False)
        wait_for(lambda: provider.mgmt.is_vm_running(vm.name), num_sec=300,
                 message="Check if vm is running")
        vm.wait_for_vm_state_change(vm.STATE_ON)
        vm.power_control_from_cfme(option=vm.POWER_OFF, cancel=False)
        wait_for(lambda: provider.mgmt.is_vm_stopped(vm.name), num_sec=300,
                 message="Check if vm is stopped")
        vm.wait_for_vm_state_change(vm.STATE_OFF)

    wait_for_alert(smtp_test, alert, delay=16 * 60)
Ejemplo n.º 12
0
 def delete(self, cancel=True):
     """ Deletes a network provider from CFME """
     view = navigate_to(self, 'Details')
     wait_for(lambda: view.toolbar.configuration.item_enabled('Remove this Network Provider'),
              num_sec=10)
     view.toolbar.configuration.item_select('Remove this Network Provider',
                                            handle_alert=not cancel)
Ejemplo n.º 13
0
    def go(self, _tries=0, *args, **kwargs):
        nav_args = {'use_resetter': True, 'wait_for_view': 10, 'force': False}
        self.log_message("Beginning Navigation...", level="info")
        start_time = time.time()
        if _tries > 2:
            # Need at least three tries:
            # 1: login_admin handles an alert or CannotContinueWithNavigation appears.
            # 2: Everything should work. If not, NavigationError.
            raise exceptions.NavigationError(self._name)

        _tries += 1
        for arg in nav_args:
            if arg in kwargs:
                nav_args[arg] = kwargs.pop(arg)
        self.check_for_badness(self.pre_navigate, _tries, nav_args, *args, **kwargs)
        here = False
        resetter_used = False
        waited = False
        force_used = False
        try:
            here = self.check_for_badness(self.am_i_here, _tries, nav_args, *args, **kwargs)
        except NotImplementedError:
            nav_args['wait_for_view'] = 0
            self.log_message(
                "is_displayed not implemented for {} view".format(self.VIEW or ""), level="warn")
        except Exception as e:
            self.log_message(
                "Exception raised [{}] whilst checking if already here".format(e), level="error")
        if not here or nav_args['force']:
            if nav_args['force']:
                force_used = True
            self.log_message("Prerequisite Needed")
            self.prerequisite_view = self.prerequisite()
            try:
                self.check_for_badness(self.step, _tries, nav_args, *args, **kwargs)
            except (exceptions.CandidateNotFound, exceptions.ItemNotFound) as e:
                self.log_message(
                    "Item/Tree Exception raised [{}] whilst running step, trying refresh"
                    .format(e), level="error"
                )
                self.appliance.browser.widgetastic.refresh()
                self.check_for_badness(self.step, _tries, nav_args, *args, **kwargs)
        if nav_args['use_resetter']:
            resetter_used = True
            self.check_for_badness(self.resetter, _tries, nav_args, *args, **kwargs)
        self.check_for_badness(self.post_navigate, _tries, nav_args, *args, **kwargs)
        view = self.view if self.VIEW is not None else None
        duration = int((time.time() - start_time) * 1000)
        if view and nav_args['wait_for_view'] and not os.environ.get(
                'DISABLE_NAVIGATE_ASSERT', False):
            waited = True
            wait_for(
                lambda: view.is_displayed, num_sec=nav_args['wait_for_view'],
                message="Waiting for view [{}] to display".format(view.__class__.__name__)
            )
        self.log_message(
            self.construct_message(here, resetter_used, view, duration, waited, force_used),
            level="info"
        )
        return view
Ejemplo n.º 14
0
    def _check_result(result):
        # check that result contains data to catch bugs like BZ 1414845
        assert result, 'The result should not be empty'

        if success is not None:
            assert 'success' in result
            assert result['success'] is success
        elif 'success' in result and last_response:
            # expect True if 'success' is present and HTTP status is success
            assert result['success'], 'The response "success" is {}'.format(result['success'])

        # if the request succeeded and there is a 'task_id' present in the response,
        # check the corresponding resource in /api/task/:task_id
        if task_wait and 'task_id' in result and result.get('success') and last_response:
            task = rest_api.get_entity('tasks', result['task_id'])
            task.wait_exists(num_sec=5)
            wait_for(
                lambda: task.state.lower() == 'finished',
                fail_func=task.reload,
                num_sec=task_wait,
                message='task state finished',
            )
            task_message = getattr(task, 'message', '')
            assert task.status.lower() == 'ok', (
                'Task failed with status "{}", message "{}"'.format(task.status, task_message))
def test_session_timeout(request, appliance):
    """Sets the timeout to shortest possible time and waits if it really times out."""

    auth_settings = appliance.server.authentication

    @request.addfinalizer  # Wow, why we did not figure this out before?!
    def _finalize():
        quit()
        ensure_browser_open()
        auth_settings.set_session_timeout(hours="24", minutes="0")

    auth_settings.set_session_timeout(hours="0", minutes="5")
    # Wait 10 minutes
    time.sleep(10 * 60)
    # Try getting timeout
    # I had to use wait_for because on 5.4 and upstream builds it made weird errors
    wait_for(
        lambda: appliance.browser.widgetastic.selenium.find_elements_by_xpath(
            "//div[(@id='flash_div' or @id='login_div') and contains(normalize-space(.), "
            "'Session was timed out due to inactivity')]"),
        num_sec=60,
        delay=5,
        fail_func=lambda: appliance.browser.widgetastic.selenium.click(
            "//a[normalize-space(text())='Cloud Intelligence']"
        )
    )
def test_ansible_group_id_in_payload(service_catalog, service_request, service):
    """Test if group id is presented in manageiq payload.

    Bugzilla:
        1480019

    In order to get manageiq payload the service's standard output should be parsed.

    Bugzilla:
        1480019

    Polarion:
        assignee: sbulage
        casecomponent: Ansible
        caseimportance: medium
        initialEstimate: 1/6h
        tags: ansible_embed
    """
    service_catalog.order()
    service_request.wait_for_request()
    view = navigate_to(service, "Details")
    stdout = view.provisioning.standart_output
    wait_for(lambda: stdout.is_displayed, timeout=10)
    pre = stdout.text
    json_str = pre.split("--------------------------------")
    # Standard output has several sections splitted by --------------------------------
    # Required data is located in 6th section
    # Then we need to replace or remove some characters to get a parsable json string
    result_dict = json.loads(json_str[5].replace('", "', "").replace('\\"', '"').replace(
        '\\, "', '",').split('" ] } PLAY')[0])
    assert "group" in result_dict["manageiq"]
Ejemplo n.º 17
0
def create_resource(rest_api, col_name, col_data, col_action='create', substr_search=False):
    """Creates new resource in collection."""
    collection = getattr(rest_api.collections, col_name)
    try:
        action = getattr(collection.action, col_action)
    except AttributeError:
        raise OptionNotAvailable(
            "Action `{}` for {} is not implemented in this version".format(col_action, col_name))

    entities = action(*col_data)
    action_response = rest_api.response
    search_str = '%{}%' if substr_search else '{}'
    for entity in col_data:
        if entity.get('name'):
            wait_for(lambda: collection.find_by(
                name=search_str.format(entity.get('name'))) or False, num_sec=180, delay=10)
        elif entity.get('description'):
            wait_for(lambda: collection.find_by(
                description=search_str.format(entity.get('description'))) or False,
                num_sec=180, delay=10)
        else:
            raise NotImplementedError

    # make sure action response is preserved
    rest_api.response = action_response
    return entities
def import_and_check(appliance, infra_map, error_text, filetype='csv', content=False,
                     table_hover=False, alert=False):
    plan_view = migration_plan(appliance, infra_map)
    temp_file = tempfile.NamedTemporaryFile(suffix='.{}'.format(filetype))
    if content:
        with open(temp_file.name, 'w') as f:
            f.write(content)
    try:
        plan_view.vms.hidden_field.fill(temp_file.name)
    except UnexpectedAlertPresentException:
        pass
    if table_hover:
        wait_for(lambda: plan_view.vms.is_displayed,
                 timeout=60, message='Wait for VMs view', delay=5)
        if table_hover == 'duplicate':
            plan_view.vms.table[0][1].widget.click()  # widget stands for tooltip widget
        else:
            plan_view.vms.table[0][1].widget.click()
        error_msg = plan_view.vms.popover_text.read()
    else:
        if alert:
            error_msg = plan_view.browser.get_alert().text
            plan_view.browser.handle_alert()
        else:
            error_msg = plan_view.vms.error_text.text
    plan_view.cancel_btn.click()
    return bool(error_msg == error_text)
def test_custom_button_ansible_credential_list(custom_service_button, service_catalog, service,
        service_request, appliance):
    """Test if credential list matches when the Ansible Playbook Service Dialog is invoked from a
    Button versus a Service Order Screen.

    Bugzilla:
        1448918

    Polarion:
        assignee: sbulage
        casecomponent: Automate
        caseimportance: medium
        initialEstimate: 1/3h
        tags: ansible_embed
    """
    service_catalog.order()
    service_request.wait_for_request()
    view = navigate_to(service, "Details")
    view.toolbar.custom_button(custom_service_button.group.text).item_select(
        custom_service_button.text)
    credentials_dropdown = BootstrapSelect(
        appliance.browser.widgetastic,
        locator=".//select[@id='credential']/.."
    )
    wait_for(lambda: credentials_dropdown.is_displayed, timeout=30)
    all_options = [option.text for option in credentials_dropdown.all_options]
    assert ["<Default>", "CFME Default Credential"] == all_options
def templatize_vm(api, template_name, cluster, temp_vm_name, provider):
    """Templatizes temporary VM. Result is template with two disks.

    Args:
        api: API to chosen RHEVM provider.
        template_name: Name of the final template.
        cluster: Cluster to save the final template onto.
    """
    try:
        if api.templates.get(template_name) is not None:
            logger.info("RHEVM:%r Warning: found finished template with this name (%r).",
                    provider, template_name)
            logger.info("RHEVM:%r Skipping this step, attempting to continue", provider)
            return
        temporary_vm = api.vms.get(temp_vm_name)
        actual_cluster = api.clusters.get(cluster)
        new_template = params.Template(name=template_name, vm=temporary_vm, cluster=actual_cluster)
        api.templates.add(new_template)

        wait_for(check_disks, [api, temp_vm_name], fail_condition=False, delay=5, num_sec=900)

        # check, if template is really there
        if not api.templates.get(template_name):
            logger.error("RHEVM:%r templatizing temporary VM failed", provider)
            sys.exit(127)
        logger.info("RHEVM:%r successfully templatized the temporary VM", provider)
    except Exception:
        logger.exception("RHEVM:%r templatizing temporary VM failed", provider)
Ejemplo n.º 21
0
    def update(self, updates):
        """Update this Alert Profile in UI.

        Args:
            updates: Provided by update() context manager.
            cancel: Whether to cancel the update (default False).
        """
        view = navigate_to(self, "Edit")
        changed = view.fill(updates)
        if changed:
            view.save_button.click()
        else:
            view.cancel_button.click()
        for attrib, value in updates.items():
            setattr(self, attrib, value)
        view = self.create_view(AlertProfileDetailsView)
        wait_for(lambda: view.is_displayed, timeout=10,
            message="wait AlertProfileDetailsView is displayed")
        view.flash.assert_no_error()
        if changed:
            view.flash.assert_message(
                'Alert Profile "{}" was saved'.format(
                    updates.get("description", self.description)))
        else:
            view.flash.assert_message(
                'Edit of Alert Profile "{}" was cancelled by the user'.format(self.description))
Ejemplo n.º 22
0
def test_cluster_graph_screen(provider, cluster, host, graph_type, interval, enable_candu):
    """Test Cluster graphs for Hourly and Daily Interval

    prerequisites:
        * C&U enabled appliance

    Steps:
        * Navigate to Cluster
        * Check graph displayed or not
        * Select interval Hourly/Daily
        * Zoom graph to get Table
        * Compare table and graph data

    Polarion:
        assignee: nachandr
        caseimportance: medium
        casecomponent: CandU
        initialEstimate: 1/4h
    """
    host.capture_historical_data()
    cluster.wait_candu_data_available(timeout=1200)

    view = navigate_to(cluster, "Utilization")
    view.options.interval.fill(interval)

    # Check garph displayed or not
    try:
        graph = getattr(view, graph_type)
    except AttributeError as e:
        logger.error(e)
    assert graph.is_displayed

    def refresh():
        provider.browser.refresh()
        view.options.interval.fill(interval)

    # wait, some time graph take time to load
    wait_for(lambda: len(graph.all_legends) > 0, delay=5, timeout=200, fail_func=refresh)

    # zoom in button not available with normal graph except Host and VM.
    # We have to use vm or host average graph for zoom in operation.
    graph_zoom = ["cluster_host", "cluster_vm"]
    avg_graph = graph_type if graph_type in graph_zoom else "{}_vm_host_avg".format(graph_type)
    try:
        avg_graph = getattr(view, avg_graph)
    except AttributeError as e:
        logger.error(e)
    avg_graph.zoom_in()
    view = view.browser.create_view(UtilizationZoomView)

    # wait, some time graph take time to load
    wait_for(lambda: len(view.chart.all_legends) > 0, delay=5, timeout=300, fail_func=refresh)
    assert view.chart.is_displayed
    view.flush_widget_cache()
    legends = view.chart.all_legends
    graph_data = view.chart.all_data
    # Clear cache of table widget before read else it will mismatch headers.
    view.table.clear_cache()
    table_data = view.table.read()
    compare_data(table_data=table_data, graph_data=graph_data, legends=legends)
Ejemplo n.º 23
0
def test_update_ha_webui(ha_appliances_with_providers, appliance, request, old_version):
    """ Tests updating an appliance with providers, also confirms that the
            provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    update_appliance(ha_appliances_with_providers[2])
    wait_for(do_appliance_versions_match, func_args=(appliance, ha_appliances_with_providers[2]),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')
    # Cause failover to occur
    result = ha_appliances_with_providers[0].ssh_client.run_command(
        'systemctl stop $APPLIANCE_PG_SERVICE', timeout=15)
    assert result.success, "Failed to stop APPLIANCE_PG_SERVICE: {}".format(result.output)

    def is_failover_started():
        return ha_appliances_with_providers[2].ssh_client.run_command(
            "grep 'Starting to execute failover' /var/www/miq/vmdb/log/ha_admin.log").success

    wait_for(is_failover_started, timeout=450, handle_exception=True,
             message='Waiting for HA failover')
    ha_appliances_with_providers[2].wait_for_evm_service()
    ha_appliances_with_providers[2].wait_for_web_ui()
    # Verify that existing provider can detect new VMs
    virtual_crud = provider_app_crud(VMwareProvider, ha_appliances_with_providers[2])
    vm = provision_vm(request, virtual_crud)
    assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
Ejemplo n.º 24
0
def test_update_scap_webui(appliance_with_providers, appliance, request, old_version):
    """ Tests updating an appliance with providers and scap hardened, also confirms that the
        provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    appliance_with_providers.appliance_console.scap_harden_appliance()
    rules_failures = appliance_with_providers.appliance_console.scap_check_rules()
    assert not rules_failures, "Some rules have failed, check log"
    update_appliance(appliance_with_providers)

    wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')
    # Re-harden appliance and confirm rules are applied.
    rules_failures = appliance_with_providers.appliance_console.scap_check_rules()
    assert not rules_failures, "Some rules have failed, check log"
    # Verify that existing provider can detect new VMs on the second appliance
    virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers)
    vm = provision_vm(request, virtual_crud)
    assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
Ejemplo n.º 25
0
def test_update_replicated_webui(replicated_appliances_with_providers, appliance, request,
                                 old_version, soft_assert):
    """ Tests updating an appliance with providers, also confirms that the
            provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    providers_before_upgrade = set(replicated_appliances_with_providers[0].managed_provider_names)
    update_appliance(replicated_appliances_with_providers[0])
    update_appliance(replicated_appliances_with_providers[1])
    wait_for(do_appliance_versions_match,
             func_args=(appliance, replicated_appliances_with_providers[0]),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')
    wait_for(do_appliance_versions_match,
             func_args=(appliance, replicated_appliances_with_providers[1]),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')

    # Assert providers exist after upgrade and replicated to second appliances
    assert providers_before_upgrade == set(
        replicated_appliances_with_providers[1].managed_provider_names), 'Providers are missing'
    # Verify that existing provider can detect new VMs on both apps
    virtual_crud_appl1 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[0])
    virtual_crud_appl2 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[1])
    vm1 = provision_vm(request, virtual_crud_appl1)
    vm2 = provision_vm(request, virtual_crud_appl2)
    soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned")
    soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
Ejemplo n.º 26
0
    def get_logging_url(self):

        def report_kibana_failure():
            raise RuntimeError("Kibana not found in the window title or content")

        browser_instance = browser()

        all_windows_before = browser_instance.window_handles
        appliance_window = browser_instance.current_window_handle

        self.monitor.item_select('External Logging')

        all_windows_after = browser_instance.window_handles

        new_windows = set(all_windows_after) - set(all_windows_before)

        if not new_windows:
            raise RuntimeError("No logging window was open!")

        logging_window = new_windows.pop()
        browser_instance.switch_to_window(logging_window)

        logging_url = browser_instance.current_url

        wait_for(lambda: "kibana" in
                         browser_instance.title.lower() + " " +
                         browser_instance.page_source.lower(),
                 fail_func=report_kibana_failure, num_sec=60, delay=5)

        browser_instance.close()
        browser_instance.switch_to_window(appliance_window)

        return logging_url
Ejemplo n.º 27
0
    def validate_stats(self, ui=False):
        """ Validates that the detail page matches the Providers information.

        This method logs into the provider using the mgmt_system interface and collects
        a set of statistics to be matched against the UI. The details page is then refreshed
        continuously until the matching of all items is complete. A error will be raised
        if the match is not complete within a certain defined time period.
        """

        # If we're not using db, make sure we are on the provider detail page
        if ui:
            self.load_details()

        # Initial bullet check
        if self._do_stats_match(self.mgmt, self.STATS_TO_MATCH, ui=ui):
            self.mgmt.disconnect()
            return
        else:
            # Set off a Refresh Relationships
            method = 'ui' if ui else None
            self.refresh_provider_relationships(method=method)

            refresh_timer = RefreshTimer(time_for_refresh=300)
            wait_for(self._do_stats_match,
                     [self.mgmt, self.STATS_TO_MATCH, refresh_timer],
                     {'ui': ui},
                     message="do_stats_match_db",
                     num_sec=1000,
                     delay=60)

        self.mgmt.disconnect()
def create_image(ec2, ami_name, bucket_name):
    """
    Create the image from a given bucket+file defined by the ami_name
    :param ec2: mgmtsystem:EC2System object
    :param ami_name: name of the file in the bucket, will be used for AMI name too
    :param bucket_name: name of the s3 bucket where the image is
    :return: none
    """
    logger.info('EC2:%r: Adding image %r from bucket %r...', ec2.api.region, ami_name, bucket_name)
    import_task_id = ec2.import_image(
        s3bucket=bucket_name, s3key=ami_name, description=ami_name)

    logger.info('EC2:%r: Monitoring image task id %r...', ec2.api.region, import_task_id)
    wait_for(ec2.get_image_id_if_import_completed,
             func_args=[import_task_id],
             fail_condition=False,
             delay=5,
             timeout='90m',
             message='Importing image to EC2')

    ami_id = ec2.get_image_id_if_import_completed(import_task_id)

    logger.info("EC2:%r: Copying image to set 'name' attribute %r...", ec2.api.region, ami_name)
    ec2.copy_image(source_region=ec2.api.region.name, source_image=ami_id, image_id=ami_name)

    logger.info("EC2:%r: Removing original un-named imported image %r...", ec2.api.region, ami_id)
    ec2.deregister_image(image_id=ami_id)
Ejemplo n.º 29
0
    def check_compliance_multiple_images(self, image_entities, check_on_entity=True, timeout=240):
        """Initiates compliance check and waits for it to finish on several Images.

        Args:
            image_entities: list of Image entities that need to perform compliance check on them
            check_on_entity (bool): check the compliance status on the entity summary view if True,
                                    only run compliance otherwise.
            timeout (seconds): time for waiting for compliance status
        """

        # Chose Check Compliance of Last Known Configuration
        images_view = navigate_to(self, 'All')
        self.check_image_entities(image_entities)
        wait_for(lambda: images_view.toolbar.policy.is_enabled, num_sec=5,
                 message='Policy drop down menu is disabled after checking some Images')
        images_view.toolbar.policy.item_select('Check Compliance of Last Known Configuration',
                                  handle_alert=True)
        images_view.flash.assert_no_error()

        # Verify Image summary
        if check_on_entity:
            for image_instance in image_entities:
                original_state = 'never verified'
                try:
                    wait_for(
                        lambda: image_instance.compliance_status.lower() != original_state,
                        num_sec=timeout, delay=5,
                        message='compliance state of Image ID, "{}", still matches {}'
                                .format(image_instance.id, original_state)
                    )
                except TimedOutError:
                    logger.error('compliance state of Image ID, "{}", is {}'
                                 .format(image_instance.id, image_instance.compliance_status))
                    raise TimedOutError('Timeout exceeded, Waited too much'
                                        ' time for check Compliance finish ({}).'.format(timeout))
def add_disk_to_vm(api, sdomain, disk_size, disk_format, disk_interface, temp_vm_name,
                   provider):
    """Adds second disk to a temporary VM.

    Args:
        api: API to chosen RHEVM provider.
        sdomain: Storage domain to save new disk onto.
        disk_size: Size of the new disk (in B).
        disk_format: Format of the new disk.
        disk_interface: Interface of the new disk.
    """
    try:
        if len(api.vms.get(temp_vm_name).disks.list()) > 1:
            logger.info("RHEVM:%r Warning: found more than one disk in existing VM (%r).",
                    provider, temp_vm_name)
            logger.info("RHEVM:%r Skipping this step, attempting to continue...", provider)
            return
        actual_sdomain = api.storagedomains.get(sdomain)
        temp_vm = api.vms.get(temp_vm_name)
        storage_id = params.StorageDomains(storage_domain=[params.StorageDomain
            (id=actual_sdomain.get_id())])
        params_disk = params.Disk(storage_domains=storage_id, size=disk_size,
                                  interface=disk_interface, format=disk_format)
        temp_vm.disks.add(params_disk)

        wait_for(check_disks, [api, temp_vm_name], fail_condition=False, delay=5, num_sec=900)

        # check, if there are two disks
        if len(api.vms.get(temp_vm_name).disks.list()) < 2:
            logger.error("RHEVM:%r Disk failed to add", provider)
            sys.exit(127)
        logger.info("RHEVM:%r Successfully added disk", provider)
    except Exception:
        logger.exception("RHEVM:%r add_disk_to_temp_vm failed:", provider)
def test_host_drift_analysis(appliance, request, a_host, soft_assert,
                             set_host_credentials):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """

    # tabs changed, hack until configure.tasks is refactored for collections and versioned widgets
    destination = 'AllTasks' if appliance.version >= '5.9' else 'AllOtherTasks'

    # get drift history num
    view = navigate_to(a_host, 'Details')
    drift_num_orig = int(
        view.entities.summary('Relationships').get_text_of('Drift History'))

    # clear table
    col = appliance.collections.tasks.filter({'tab': destination})
    col.delete_all()

    # initiate 1st analysis
    a_host.run_smartstate_analysis(wait_for_task_result=True)

    # wait for for drift history num+1
    navigate_to(a_host, 'Details')
    wait_for(lambda: (view.entities.summary('Relationships').get_text_of(
        'Drift History') == str(drift_num_orig + 1)),
             delay=20,
             num_sec=360,
             message="Waiting for Drift History count to increase",
             fail_func=appliance.server.browser.refresh)

    # add a tag and a finalizer to remove it
    added_tag = appliance.collections.categories.instantiate(
        display_name='Department').collections.tags.instantiate(
            display_name='Accounting')
    a_host.add_tag(added_tag)
    request.addfinalizer(lambda: a_host.remove_tag(added_tag))

    # initiate 2nd analysis
    a_host.run_smartstate_analysis(wait_for_task_result=True)

    # wait for for drift history num+2
    navigate_to(a_host, 'Details')
    wait_for(lambda: (view.entities.summary('Relationships').get_text_of(
        'Drift History') == str(drift_num_orig + 2)),
             delay=20,
             num_sec=360,
             message="Waiting for Drift History count to increase",
             fail_func=appliance.server.browser.refresh)

    # check drift difference
    soft_assert(
        a_host.equal_drift_results(
            '{} (1)'.format(added_tag.category.display_name),
            'My Company Tags', 0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    drift_analysis_view = appliance.browser.create_view(HostDriftAnalysis)

    # Accounting tag should not be displayed, because it was changed to True
    drift_analysis_view.toolbar.same_values_attributes.click()
    soft_assert(
        not drift_analysis_view.drift_analysis.
        check_section_attribute_availability('{}'.format(
            added_tag.category.display_name)),
        "{} row should be hidden, but not".format(added_tag.display_name))

    # Accounting tag should be displayed now
    drift_analysis_view.toolbar.different_values_attributes.click()
    soft_assert(
        drift_analysis_view.drift_analysis.
        check_section_attribute_availability('{} (1)'.format(
            added_tag.category.display_name)),
        "{} row should be visible, but not".format(added_tag.display_name))
Ejemplo n.º 32
0
    def go(self, _tries=0, *args, **kwargs):
        nav_args = {'use_resetter': True, 'wait_for_view': False}
        self.log_message("Beginning Navigation...", level="info")
        start_time = time.time()
        if _tries > 2:
            # Need at least three tries:
            # 1: login_admin handles an alert or CannotContinueWithNavigation appears.
            # 2: Everything should work. If not, NavigationError.
            raise exceptions.NavigationError(self._name)

        _tries += 1
        for arg in nav_args:
            if arg in kwargs:
                nav_args[arg] = kwargs.pop(arg)
        self.check_for_badness(self.pre_navigate, _tries, nav_args, *args,
                               **kwargs)
        here = False
        resetter_used = False
        waited = False
        try:
            here = self.check_for_badness(self.am_i_here, _tries, nav_args,
                                          *args, **kwargs)
        except Exception as e:
            self.log_message(
                "Exception raised [{}] whilst checking if already here".format(
                    e),
                level="error")
        if not here:
            self.log_message("Prerequisite Needed")
            self.prerequisite_view = self.prerequisite()
            try:
                self.check_for_badness(self.step, _tries, nav_args, *args,
                                       **kwargs)
            except (exceptions.CandidateNotFound,
                    exceptions.ItemNotFound) as e:
                self.log_message(
                    "Item/Tree Exception raised [{}] whilst running step, trying refresh"
                    .format(e),
                    level="error")
                self.appliance.browser.widgetastic.refresh()
                self.check_for_badness(self.step, _tries, nav_args, *args,
                                       **kwargs)
        if nav_args['use_resetter']:
            resetter_used = True
            self.check_for_badness(self.resetter, _tries, nav_args, *args,
                                   **kwargs)
        self.check_for_badness(self.post_navigate, _tries, nav_args, *args,
                               **kwargs)
        view = self.view if self.VIEW is not None else None
        duration = int((time.time() - start_time) * 1000)
        if view and nav_args['wait_for_view'] and not os.environ.get(
                'DISABLE_NAVIGATE_ASSERT', False):
            waited = True
            wait_for(lambda: view.is_displayed,
                     num_sec=10,
                     message="Waiting for view [{}] to display".format(
                         view.__class__.__name__))
        self.log_message(self.construct_message(here, resetter_used, view,
                                                duration, waited),
                         level="info")
        return view
Ejemplo n.º 33
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('appliance',
                        help='hostname or ip address of parent appliance')
    parser.add_argument('-c',
                        action='append',
                        dest='children',
                        help='hostname or ip address of child appliance')
    args = parser.parse_args()
    print("Appliance: {}".format(args.appliance))
    if args.children:
        for child in args.children:
            print("Child: {}".format(child))

    local_key_name = "v2_key_" + fauxfactory.gen_alphanumeric(8)

    ssh_creds = {
        'username': credentials['ssh']['username'],
        'password': credentials['ssh']['password'],
    }

    def is_ssh_running(address):
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        result = s.connect_ex((address, 22))
        return result == 0

    def generate_key(address):
        with SSHClient(hostname=address, **ssh_creds) as client:
            print('Connecting to Appliance...')
            status, out = client.run_command(
                'ruby /var/www/miq/vmdb/tools/fix_auth.rb --key --verbose')
            if status != 0:
                print('Creating new encryption key failed.')
                print(out)
                sys.exit(1)
            else:
                print('New encryption key created.')
                if args.children:
                    # Only copy locally if needed for child appliances
                    client.get_file('/var/www/miq/vmdb/certs/v2_key',
                                    local_key_name)

    def update_db_yaml(address):
        with SSHClient(hostname=address, **ssh_creds) as client:
            client.run_command('cd /var/www/miq/vmdb')
            status, out = client.run_rails_command(
                '\'puts MiqPassword.encrypt("smartvm");\'')
            if status != 0:
                print('Retrieving encrypted db password failed on {}'.format(
                    address))
                sys.exit(1)
            else:
                encrypted_pass = out
                status, out = client.run_command(
                    ('cd /var/www/miq/vmdb; '
                     'sed -i.`date +%m-%d-%Y` "s/password:'******' .*/password: {}/g" config/database.yml'.format(
                         re.escape(encrypted_pass))))
                if status != 0:
                    print('Updating database.yml failed on {}'.format(address))
                    print(out)
                    sys.exit(1)
                else:
                    print('Updating database.yml succeeded on {}'.format(
                        address))

    def update_password(address):
        with SSHClient(hostname=address, **ssh_creds) as client:
            status, out = client.run_command(
                'ruby /var/www/miq/vmdb/tools/fix_auth.rb --hostname localhost --password smartvm'
            )
            if status != 0:
                print('Updating DB password failed on {}'.format(address))
                print(out)
                sys.exit(1)
            else:
                print('DB password updated on {}'.format(address))

    def put_key(address):
        print('copying key to {}'.format(address))
        with SSHClient(hostname=address, **ssh_creds) as client:
            client.put_file(local_key_name, '/var/www/miq/vmdb/certs/v2_key')

    def restart_appliance(address):
        print('Restarting evmserverd on {}'.format(address))
        with SSHClient(hostname=address, **ssh_creds) as client:
            status, out = client.run_command('systemctl restart evmserverd')
            if status != 0:
                print("Restarting evmserverd failed on {}".format(address))
                sys.exit(1)
            else:
                print("Restarting succeeded on {}".format(address))

    # make sure ssh is ready on each appliance
    wait_for(func=is_ssh_running,
             func_args=[args.appliance],
             delay=10,
             num_sec=600)

    # generate key on master appliance
    generate_key(args.appliance)
    update_db_yaml(args.appliance)

    # copy to other appliances
    if args.children:
        for child in args.children:
            wait_for(func=is_ssh_running,
                     func_args=[child],
                     delay=10,
                     num_sec=600)
            put_key(child)
            update_db_yaml(child)

    # restart master appliance (and children, if provided)
    restart_appliance(args.appliance)
    if args.children:
        for child in args.children:
            restart_appliance(child)
    print("Appliance(s) restarted with new key in place.")

    # update encrypted passwords in each database-owning appliance.

    update_password(args.appliance)
    if args.children:
        for child in args.children:
            update_password(child)

    # Restart again!
    restart_appliance(args.appliance)
    if args.children:
        for child in args.children:
            restart_appliance(child)

    print("Done!")
def test_dual_vm_migration_cancel_migration(
        request, appliance, v2v_providers, host_creds, conversion_tags,
        form_data_multiple_vm_obj_single_datastore, soft_assert,
        cancel_migration_after_percent):
    # TODO: Improve this test to cover cancel operation at various stages in migration.
    # This test will make use of migration request details page to track status of migration
    infrastructure_mapping_collection = appliance.collections.v2v_mappings
    mapping = infrastructure_mapping_collection.create(
        form_data_multiple_vm_obj_single_datastore.form_data)

    @request.addfinalizer
    def _cleanup():
        infrastructure_mapping_collection.delete(mapping)

    migration_plan_collection = appliance.collections.v2v_plans
    migration_plan = migration_plan_collection.create(
        name="plan_{}".format(fauxfactory.gen_alphanumeric()),
        description="desc_{}".format(fauxfactory.gen_alphanumeric()),
        infra_map=mapping.name,
        vm_list=form_data_multiple_vm_obj_single_datastore.vm_list,
        start_migration=True)
    # as migration is started, try to track progress using migration plan request details page
    view = appliance.browser.create_view(
        navigator.get_class(migration_plan_collection, 'All').VIEW.pick())
    wait_for(func=view.progress_card.is_plan_started,
             func_args=[migration_plan.name],
             message="migration plan is starting, be patient please",
             delay=5,
             num_sec=150,
             handle_exception=True)
    view.progress_card.select_plan(migration_plan.name)
    view = appliance.browser.create_view(
        navigator.get_class(migration_plan_collection, 'Details').VIEW)
    view.wait_displayed()
    request_details_list = view.migration_request_details_list
    vms = request_details_list.read()

    def _get_plan_status_and_cancel():
        migration_plan_in_progress_tracker = []
        for vm in vms:
            clock_reading1 = request_details_list.get_clock(vm)
            time.sleep(1)  # wait 1 sec to see if clock is ticking
            logger.info("For vm %s, current message is %s", vm,
                        request_details_list.get_message_text(vm))
            current_progress_text = request_details_list.get_progress_description(
                vm)
            if request_details_list.progress_percent(
                    vm) > cancel_migration_after_percent:
                request_details_list.cancel_migration(vm, confirmed=True)
            logger.info("For vm %s, current progress description is %s", vm,
                        current_progress_text)
            clock_reading2 = request_details_list.get_clock(vm)
            logger.info("clock_reading1: %s, clock_reading2:%s",
                        clock_reading1, clock_reading2)
            logger.info("For vm %s, is currently in progress: %s", vm,
                        request_details_list.is_in_progress(vm))
            migration_plan_in_progress_tracker.append(
                request_details_list.is_in_progress(vm)
                and (clock_reading1 < clock_reading2))
        return not any(migration_plan_in_progress_tracker)

    wait_for(func=_get_plan_status_and_cancel,
             message="migration plan is in progress,"
             "be patient please",
             delay=5,
             num_sec=3600)

    for vm in vms:
        soft_assert(request_details_list.is_cancelled(vm))
        soft_assert(
            request_details_list.progress_percent(vm) < 100.0
            or "Virtual machine migrated"
            not in request_details_list.get_message_text(vm))
Ejemplo n.º 35
0
 def wait_generated(self, timeout=600):
     wait_for(self.check_status,
              num_sec=timeout,
              delay=5,
              fail_condition=lambda result: result != "Complete",
              fail_func=self.refresh)
Ejemplo n.º 36
0
    def create(
        self,
        text,
        hover,
        type="Default",
        image="fa-user",
        icon_color="#000000",
        display=True,
        group=None,
        dialog=None,
        display_for=None,
        submit=None,
        playbook_cat_item=None,
        inventory=None,
        hosts=None,
        open_url=None,
        system=None,
        request=None,
        attributes=None,
        visibility=None,
        enablement=None,
        roles=None,
    ):
        self.group = group or self.parent

        view = navigate_to(self, "Add")
        view.options.fill({"type": type})
        view.fill({
            "options": {
                "text": text,
                "display": display,
                "hover": hover,
                "image": image,
                "icon_color": icon_color,
                "open_url": open_url,
                "display_for": display_for,
                "submit": submit,
                "form": {
                    "dialog": dialog,
                    "playbook_cat_item": playbook_cat_item,
                    "inventory": inventory,
                    "hosts": hosts,
                },
            }
        })

        if visibility:
            # TODO: extend visibility expression variations if needed.
            if self.group.type in EVM_TAG_OBJS:
                tag = "EVM {obj_type}.{tag}".format(obj_type=self.group.type,
                                                    tag=visibility["tag"])
            elif self.group.type in BUILD_TAG_OBJS:
                _type = "Switch" if self.group.type == "Virtual Infra Switch" else self.group.type
                tag = "{obj_type}.Build.{tag}".format(obj_type=_type,
                                                      tag=visibility["tag"])
            else:
                tag = "{obj_type}.{tag}".format(obj_type=self.group.type,
                                                tag=visibility["tag"])

            if view.advanced.visibility.define_exp.is_displayed:
                view.advanced.visibility.define_exp.click()
            view.advanced.visibility.expression.fill_tag(
                tag=tag, value=visibility["value"])

        if enablement:
            # TODO: extend enablement expression variations if needed.
            if self.group.type in EVM_TAG_OBJS:
                tag = "EVM {obj_type}.{tag}".format(obj_type=self.group.type,
                                                    tag=enablement["tag"])
            elif self.group.type in BUILD_TAG_OBJS:
                _type = "Switch" if self.group.type == "Virtual Infra Switch" else self.group.type
                tag = "{obj_type}.Build.{tag}".format(obj_type=_type,
                                                      tag=enablement["tag"])
            else:
                tag = "{obj_type}.{tag}".format(obj_type=self.group.type,
                                                tag=enablement["tag"])

            if view.advanced.enablement.define_exp.is_displayed:
                view.advanced.enablement.define_exp.click()

            view.advanced.enablement.expression.fill_tag(
                tag=tag, value=enablement["value"])
            view.advanced.enablement.disabled_text.fill("Tag - {} : {}".format(
                enablement["tag"], enablement["value"]))

        view.fill({"advanced": {"system": system, "request": request}})

        if attributes:
            view.advanced.attributes.fill(attributes)

        if roles:
            view.advanced.role_show.fill("<By Role>")
            view.advanced.roles.wait_displayed("20s")
            view.advanced.roles.fill(roles)
        else:
            view.advanced.role_show.fill("<To All>")

        try:
            # add button slow to enable?
            wait_for(lambda: not view.add_button.disabled,
                     timeout=5,
                     handle_exception=True)
        except TimedOutError:
            logger.exception(
                'Timed out waiting for add button on button group form')
            raise CFMEException(
                'Custom button group add form button did not activate')

        view.add_button.click()
        view.flash.assert_no_error()

        return self.instantiate(
            self.group,
            text=text,
            hover=hover,
            type=type,
            display=display,
            dialog=dialog,
            display_for=display_for,
            submit=submit,
            playbook_cat_item=playbook_cat_item,
            inventory=inventory,
            hosts=hosts,
            image=image,
            icon_color=icon_color,
            open_url=open_url,
            system=system,
            request=request,
            attributes=attributes,
            visibility=visibility,
            enablement=enablement,
            roles=roles,
        )
Ejemplo n.º 37
0
    def fill_field(self, field=None, key=None, value=None):
        """ Fills the 'Field' type of form.

        Args:
            tag: Name of the field to compare (Host.VMs, ...).
            key: Operation to do (=, <, >=, IS NULL, ...).
            value: Value to check against.
        """
        field_norm = field.strip().lower()
        if ("date updated" in field_norm or "date created" in field_norm
                or "boot time" in field_norm or "timestamp" in field_norm):
            no_date = False
        else:
            no_date = True
        view = self.field_form_view
        view.fill(
            dict(
                type="Field",
                field=field,
                key=key,
                value=value if no_date else None,
            ))
        # In case of advanced search box
        if view.user_input.is_displayed:
            user_input = value is None
            view.user_input.fill(user_input)
        if not no_date:
            # Flip the right part of form
            view = self.field_date_form
            if (isinstance(value, basestring)
                    and not re.match(r"^[0-9]{2}/[0-9]{2}/[0-9]{4}$", value)):
                if not view.dropdown_select.is_displayed:
                    self.click_switch_to_relative()
                view.fill({"dropdown_select": value})
                self.click_commit()
            else:
                # Specific selection
                if not view.input_select_date.is_displayed:
                    self.click_switch_to_specific()
                if (isinstance(value, tuple)
                        or isinstance(value, list)) and len(value) == 2:
                    date, time = value
                elif isinstance(value,
                                basestring):  # is in correct format mm/dd/yyyy
                    # Date only (for now)
                    date = value[:]
                    time = None
                else:
                    raise TypeError(
                        "fill_field expects a 2-tuple (date, time) or string with date"
                    )
                # TODO datetime.datetime support
                view.input_select_date.fill(date)
                # Try waiting a little bit for time field
                # If we don't wait, committing the expression will glitch
                try:
                    wait_for(lambda: view.input_select_time.is_displayed,
                             num_sec=6)
                    # It appeared, so if the time is to be set, we will set it
                    # (passing None glitches)
                    if time:
                        view.input_select_time.fill(time)
                except TimedOutError:
                    # Did not appear, ignore that
                    pass
                finally:
                    # And finally, commit the expression :)
                    self.click_commit()
        else:
            self.click_commit()
Ejemplo n.º 38
0
 def wait_for_new_provider(self, timeout=1000):
     view = navigate_to(self, 'All')
     logger.info('Waiting for a provider to appear...')
     wait_for(lambda: int(view.entities.paginator.items_amount), fail_condition=0,
              message="Wait for any provider to appear", num_sec=timeout,
              fail_func=view.browser.refresh)
Ejemplo n.º 39
0
def test_vm_retire_extend(appliance, request, create_vm, soft_assert):
    """ Tests extending a retirement using an AE method.

    Polarion:
        assignee: dgaikwad
        casecomponent: Automate
        initialEstimate: 1/3h
        setup:
            1. A running VM on any provider.
        testSteps:
            1. It creates a button pointing to ``Request/vm_retire_extend`` instance. The button
               should live in the VM and Instance button group.
            2. Then it sets a retirement date for the VM
            3. Then it waits until the retirement date is set
            4. Then it clicks the button that was created and it waits for the retirement date to
               extend.

    Bugzilla:
        1627758
    """
    num_days = 5
    soft_assert(create_vm.retirement_date == 'Never',
                "The retirement date is not 'Never'!")
    retirement_date = generate_retirement_date(delta=num_days)
    create_vm.set_retirement_date(when=retirement_date)
    wait_for(lambda: create_vm.retirement_date != 'Never',
             message="retirement date set")
    set_date = create_vm.retirement_date
    vm_retire_date_fmt = create_vm.RETIRE_DATE_FMT

    soft_assert(
        set_date == retirement_date.strftime(vm_retire_date_fmt),
        "The retirement date '{}' did not match expected date '{}'".format(
            set_date, retirement_date.strftime(vm_retire_date_fmt)))

    # Create the vm_retire_extend button and click on it
    grp_name = fauxfactory.gen_alphanumeric(start="grp_")
    grp = appliance.collections.button_groups.create(
        text=grp_name,
        hover=grp_name,
        type=appliance.collections.button_groups.VM_INSTANCE)
    request.addfinalizer(lambda: grp.delete_if_exists())
    btn_name = fauxfactory.gen_alphanumeric(start="btn_")
    button = grp.buttons.create(text=btn_name,
                                hover=btn_name,
                                system="Request",
                                request="vm_retire_extend")
    request.addfinalizer(lambda: button.delete_if_exists())

    navigate_to(create_vm, 'Details')

    class TestDropdownView(InfraVmSummaryView):
        group = Dropdown(grp.text)

    view = appliance.browser.create_view(TestDropdownView)
    view.group.item_select(button.text)

    # CFME automate vm_retire_extend method defaults to extending the date by 14 days
    extend_duration_days = 14
    extended_retirement_date = retirement_date + timedelta(
        days=extend_duration_days)

    # Check that the WebUI updates with the correct date
    wait_for(
        lambda: create_vm.retirement_date >= extended_retirement_date.strftime(
            vm_retire_date_fmt),
        num_sec=60,
        message="Check for extension of the VM retirement date by {} days".
        format(extend_duration_days))
Ejemplo n.º 40
0
def verify_server_compliant(provider, server):
    wait_for(lambda: server.is_compliant(),
             delay=DELAY,
             num_sec=NUM_SEC,
             message='Server {} must be Compliant'.format(server.name))
Ejemplo n.º 41
0
    def create(self,
               name=None,
               display_name=None,
               location='inline',
               script=None,
               data=None,
               cancel=False,
               validate=True,
               repository=None,
               playbook=None,
               machine_credential=None,
               hosts=None,
               max_ttl=None,
               logging_output=None,
               escalate_privilege=None,
               verbosity=None,
               playbook_input_parameters=None,
               inputs=None,
               embedded_method=None):

        add_page = navigate_to(self, 'Add')

        if self.browser.product_version > '5.11' and location.islower():
            location = location.capitalize()

        add_page.fill({'location': location})
        if location.lower() == 'inline':
            add_page.fill({
                'inline_name': name,
                'inline_display_name': display_name,
                'script': script,
                'data': data,
                'inputs': inputs,
                'embedded_method': embedded_method
            })
        if location.lower() == 'playbook':
            add_page.fill({
                'playbook_name': name,
                'playbook_display_name': display_name,
                'repository': repository
            })
            wait_for(lambda: add_page.playbook.is_displayed,
                     delay=0.5,
                     num_sec=2)
            add_page.fill({
                'playbook':
                playbook,
                'machine_credential':
                machine_credential,
                'hosts':
                hosts,
                'max_ttl':
                max_ttl,
                'logging_output':
                logging_output,
                'escalate_privilege':
                escalate_privilege,
                'verbosity':
                verbosity,
                'playbook_input_parameters':
                playbook_input_parameters
            })
            validate = False
        if validate:
            add_page.validate_button.click()
            add_page.wait_displayed()
            add_page.flash.assert_no_error()
            add_page.flash.assert_message('Data validated successfully')
        if cancel:
            add_page.cancel_button.click()
            add_page.flash.assert_no_error()
            add_page.flash.assert_message(
                'Add of new Automate Method was cancelled by the user', wait=3)
            return None
        else:
            add_page.add_button.click()
            add_page.flash.assert_no_error()

            # TODO(BZ-1704439): Remove the work-around once this BZ got fixed
            if BZ(1704439).blocks:
                view = self.create_view(ClassDetailsView)
                view.flash.assert_message(
                    f'Automate Method "{name}" was added')
                self.browser.refresh()

            return self.instantiate(
                name=name,
                display_name=display_name,
                location=location,
                script=script,
                data=data,
                repository=repository,
                playbook=playbook,
                machine_credential=machine_credential,
                hosts=hosts,
                max_ttl=max_ttl,
                logging_output=logging_output,
                escalate_privilege=escalate_privilege,
                verbosity=verbosity,
                playbook_input_parameters=playbook_input_parameters,
                inputs=inputs)
def test_host_drift_analysis(request, setup_provider, provider, host,
                             soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    test_host = host_obj.Host(name=host['name'], provider=provider)

    wait_for(lambda: test_host.exists,
             delay=20,
             num_sec=120,
             fail_func=sel.refresh,
             message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail('Relationships',
                                              'Drift History'))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(updates={
            'credentials':
            host_obj.get_credentials_from_config(host['credentials'])
        },
                         validate_credentials=True)

        @request.addfinalizer
        def test_host_remove_creds():
            test_host.update(
                updates={
                    'credentials':
                    host_obj.Host.Credential(
                        principal="", secret="", verify_secret="")
                })

    # clear table
    view = navigate_to(Tasks, 'AllOtherTasks')
    view.delete.item_select('Delete All', handle_alert=True)

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    def is_host_analysis_finished():
        """ Check if analysis is finished - if not, reload page
        """
        finished = False
        view = navigate_to(Tasks, 'AllOtherTasks')
        host_analysis_row = view.tabs.allothertasks.table.row(
            task_name="SmartState Analysis for '{}'".format(test_host.name))
        if host_analysis_row.state.text == 'Finished':
            finished = True
            # select the row and delete the task
            host_analysis_row[0].check()
            view.delete.item_select('Delete', handle_alert=True)
        else:
            view.reload.click()
        return finished

    wait_for(is_host_analysis_finished, delay=5, timeout="8m")

    # wait for for drift history num+1
    wait_for(lambda: int(test_host.get_detail('Relationships', 'Drift History')
                         ) == drift_num_orig + 1,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # add a tag and a finalizer to remove it
    test_host.add_tag(category='Department', tag='Accounting')
    request.addfinalizer(
        lambda: test_host.remove_tag(category='Department', tag='Accounting'))

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    wait_for(is_host_analysis_finished, delay=5, timeout="8m")

    # wait for for drift history num+2
    wait_for(lambda: int(test_host.get_detail('Relationships', 'Drift History')
                         ) == drift_num_orig + 2,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # check drift difference
    soft_assert(
        not test_host.equal_drift_results('Department (1)', 'My Company Tags',
                                          0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
Ejemplo n.º 43
0
 def wait_for_results_to_load(self):
     wait_for(lambda: bool(int(self.filter_result_header.text.split()[0])),
              delay=5,
              num_sec=60)
Ejemplo n.º 44
0
def test_service_retirement_from_automate_method(request, generic_catalog_item,
                                                 custom_instance):
    """
    Bugzilla:
        1700524
        1753669

    Polarion:
        assignee: dgaikwad
        initialEstimate: 1/8h
        caseposneg: positive
        startsin: 5.11
        casecomponent: Automate
        testSteps:
            1. Create service catalog item and order
            2. Create a writeable domain and copy ManageIQ/System/Request to this domain
            3. Create retire_automation_service instance and set meth5 to retire_automation_service.
            4. Create retire_automation_service method with sample code given below:
               > service = $evm.root['service']
               > $evm.log(:info, "create_retire_request for  service #{service}")
               > request = $evm.execute(:create_retire_request, service)
               > $evm.log(:info, "Create request for create_retire_request #{request}")
            5. Execute this method using simulation
        expectedResults:
            1. Service provision request should be provisioned successfully
            2.
            3.
            4.
            5. Service should be retired successfully
    """
    # Ordering catalog item and deleting request once service has been reached to 'Finished' state
    service_request = generic_catalog_item.appliance.rest_api.collections.service_templates.get(
        name=generic_catalog_item.name).action.order()
    request.addfinalizer(lambda: service_request.action.delete())
    wait_for(lambda: service_request.request_state == "finished",
             fail_func=service_request.reload,
             timeout=180,
             delay=10)

    # Ruby code to execute create_retire_request
    script = dedent("""
        service = $evm.root['service']
        $evm.log(:info, 'create_retire_request for service #{service}')
        request = $evm.execute(:create_retire_request, service)
        $evm.log(:info, 'Create request for create_retire_request #{request}')
        """)
    instance = custom_instance(ruby_code=script)
    with LogValidator("/var/www/miq/vmdb/log/automation.log",
                      matched_patterns=[
                          '.*Create request for create_retire_request.*'
                      ]).waiting(timeout=120):

        # Executing automate method
        simulate(
            appliance=generic_catalog_item.appliance,
            target_type="Service",
            target_object=f"{generic_catalog_item.name}",
            message="create",
            request=f"{instance.name}",
            execute_methods=True,
        )

    retire_request = generic_catalog_item.appliance.rest_api.collections.requests.get(
        description=f"Service Retire for: {generic_catalog_item.name}")
    wait_for(lambda: retire_request.request_state == "finished",
             fail_func=retire_request.reload,
             timeout=180,
             delay=10)
Ejemplo n.º 45
0
    def vm_default_args_rest(self):
        """Represents dictionary used for REST API Instance provision with minimum required default
        args
        """
        from cfme.cloud.provider.azure import AzureProvider
        from cfme.cloud.provider.ec2 import EC2Provider

        if not self.provider.is_refreshed():
            self.provider.refresh_provider_relationships()
            wait_for(self.provider.is_refreshed, func_kwargs=dict(refresh_delta=10), timeout=600)
        provisioning = self.provider.data['provisioning']

        provider_rest = self.appliance.rest_api.collections.providers.get(name=self.provider.name)

        # find out image guid
        image_name = provisioning['image']['name']
        image = self.appliance.rest_api.collections.templates.get(name=image_name,
                                                                  ems_id=provider_rest.id)
        # find out flavor
        if ':' in provisioning['instance_type'] and self.provider.one_of(EC2Provider):
            instance_type = provisioning['instance_type'].split(':')[0].strip()
        else:
            instance_type = provisioning['instance_type']
        flavor = self.appliance.rest_api.collections.flavors.get(name=instance_type,
                                                                 ems_id=provider_rest.id)
        # find out cloud network
        cloud_network_name = provisioning.get('cloud_network').strip()
        if self.provider.one_of(EC2Provider, AzureProvider):
            cloud_network_name = cloud_network_name.split()[0]
        cloud_network = self.appliance.rest_api.collections.cloud_networks.get(
            name=cloud_network_name, enabled='true')
        # find out cloud subnet
        cloud_subnet = self.appliance.rest_api.collections.cloud_subnets.get(
            cloud_network_id=cloud_network['id'])
        # find out availability zone
        azone_id = None
        av_zone_name = provisioning.get('availability_zone')
        if av_zone_name:
            azone_id = self.appliance.rest_api.collections.availability_zones.get(
                name=av_zone_name, ems_id=flavor.ems_id).id
        # find out cloud tenant
        tenant_name = provisioning.get('cloud_tenant')
        if tenant_name:
            try:
                tenant = self.appliance.rest_api.collections.cloud_tenants.get(
                    name=tenant_name,
                    ems_id=provider_rest.id,
                    enabled='true')
            except IndexError:
                raise ItemNotFound("Tenant {} not found on provider {}".format(
                    tenant_name, self.provider.name))

        resource_group_id = None
        if self.provider.one_of(AzureProvider):
            resource_groups = self.appliance.rest_api.get(
                '{}?attributes=resource_groups'.format(provider_rest._href))['resource_groups']
            resource_group_id = None
            resource_group_name = provisioning.get('resource_group')
            for res_group in resource_groups:
                if (res_group['name'] == resource_group_name and
                        res_group['ems_id'] == provider_rest.id):
                    resource_group_id = res_group['id']
                    break

        inst_args = {
            "version": "1.1",
            "template_fields": {
                "guid": image.guid
            },
            "vm_fields": {
                "placement_auto": False,
                "vm_name": self.name,
                "instance_type": flavor['id'],
                "request_type": "template",
                "cloud_network": cloud_network['id'],
                "cloud_subnet": cloud_subnet['id'],

            },
            "requester": {
                "user_name": "admin",
                "owner_email": "*****@*****.**",
                "auto_approve": True,
            },
            "tags": {
            },
            "ems_custom_attributes": {
            },
            "miq_custom_attributes": {
            }
        }
        if tenant_name:
            inst_args['vm_fields']['cloud_tenant'] = tenant['id']
        if resource_group_id:
            inst_args['vm_fields']['resource_group'] = resource_group_id
        if azone_id:
            inst_args['vm_fields']['placement_availability_zone'] = azone_id
        if self.provider.one_of(EC2Provider):
            inst_args['vm_fields']['monitoring'] = 'basic'

        return inst_args
Ejemplo n.º 46
0
    def create(self, vm_name, provider, form_values=None, cancel=False, check_existing=False,
               find_in_cfme=False, wait=True, request_description=None, auto_approve=False,
               override=False):
        """Provisions an vm/instance with the given properties through CFME

        Args:
            vm_name: the vm/instance's name
            provider: provider object
            form_values: dictionary of form values for provisioning, structured into tabs
            cancel: boolean, whether or not to cancel form filling
            check_existing: verify if such vm_name exists
            find_in_cfme: verify that vm was created and appeared in CFME
            wait: wait for vm provision request end
            request_description: request description that test needs to search in request table.
            auto_approve: if true the request is approved before waiting for completion.
            override: To override any failure related exception

        Note:
            Calling create on a sub-class of instance will generate the properly formatted
            dictionary when the correct fields are supplied.
        """
        vm = self.instantiate(vm_name, provider)
        if check_existing and vm.exists:
            return vm
        if not provider.is_refreshed():
            provider.refresh_provider_relationships()
            wait_for(provider.is_refreshed, func_kwargs={'refresh_delta': 10}, timeout=600)
        if not form_values:
            form_values = vm.vm_default_args
        else:
            inst_args = vm.vm_default_args
            form_values = recursive_update(inst_args, form_values)
        env = form_values.get('environment') or {}
        if env.get('automatic_placement'):
            form_values['environment'] = {'automatic_placement': True}
        form_values.update({'provider_name': provider.name})
        if not form_values.get('template_name'):
            template_name = (provider.data.get('provisioning').get('image', {}).get('name') or
                             provider.data.get('provisioning').get('template'))
            vm.template_name = template_name
            form_values.update({'template_name': template_name})
        view = navigate_to(self, 'Provision')
        view.form.fill(form_values)

        if cancel:
            view.form.cancel_button.click()
            view = self.browser.create_view(BaseLoggedInPage)
            view.flash.assert_success_message(self.ENTITY.PROVISION_CANCEL)
            view.flash.assert_no_error()
        else:
            view.form.submit_button.click()

            view = vm.appliance.browser.create_view(RequestsView)
            if not BZ(1608967, forced_streams=['5.10']).blocks:
                wait_for(lambda: view.flash.messages, fail_condition=[], timeout=10, delay=2,
                        message='wait for Flash Success')
            # This flash message is not flashed in 5.10.
            if self.appliance.version < 5.10:
                wait_for(lambda: view.flash.messages, fail_condition=[], timeout=10, delay=2,
                         message='wait for Flash Success')
            view.flash.assert_no_error()
            if wait:
                if request_description is None:
                    request_description = 'Provision from [{}] to [{}]'.format(
                        form_values.get('template_name'), vm.name)
                provision_request = vm.appliance.collections.requests.instantiate(
                    request_description)
                logger.info('Waiting for cfme provision request for vm %s', vm.name)
                if auto_approve:
                    provision_request.approve_request(method='ui', reason="Approved")
                provision_request.wait_for_request(method='ui', num_sec=1200)
                if provision_request.is_succeeded(method='ui'):
                    logger.info('Waiting for vm %s to appear on provider %s', vm.name,
                                provider.key)
                    wait_for(provider.mgmt.does_vm_exist, [vm.name],
                             handle_exception=True, num_sec=600)
                elif override:
                    logger.info('Overriding exception to check failure condition.')
                else:
                    raise Exception(
                        "Provisioning vm {} failed with: {}"
                        .format(vm.name, provision_request.row.last_message.text)
                    )
        if find_in_cfme:
            vm.wait_to_appear(timeout=800)

        return vm
Ejemplo n.º 47
0
def test_vmware_vimapi_hotadd_disk(appliance, request, testing_group, provider,
                                   testing_vm, domain, cls):
    """ Tests hot adding a disk to vmware vm.

    This test exercises the ``VMware_HotAdd_Disk`` method, located in ``/Integration/VMware/VimApi``

    Steps:
        * It creates an instance in ``System/Request`` that can be accessible from eg. a button.
        * Then it creates a button, that refers to the ``VMware_HotAdd_Disk`` in ``Request``. The
            button shall belong in the VM and instance button group.
        * After the button is created, it goes to a VM's summary page, clicks the button.
        * The test waits until the capacity of disks is raised.

    Metadata:
        test_flag: hotdisk, provision

    Polarion:
        assignee: dmisharo
        initialEstimate: 1/8h
    """
    meth = cls.methods.create(name='load_value_{}'.format(
        fauxfactory.gen_alpha()),
                              script=dedent('''\
            # Sets the capacity of the new disk.

            $evm.root['size'] = 1  # GB
            exit MIQ_OK
            '''))

    request.addfinalizer(meth.delete_if_exists)

    # Instance that calls the method and is accessible from the button
    instance = cls.instances.create(
        name="VMware_HotAdd_Disk_{}".format(fauxfactory.gen_alpha()),
        fields={
            "meth4": {
                'value': meth.name
            },  # To get the value
            "rel5": {
                'value': "/Integration/VMware/VimApi/VMware_HotAdd_Disk"
            },
        },
    )

    request.addfinalizer(instance.delete_if_exists)

    # Button that will invoke the dialog and action
    button_name = fauxfactory.gen_alphanumeric()
    button = testing_group.buttons.create(text=button_name,
                                          hover=button_name,
                                          system="Request",
                                          request=instance.name)
    request.addfinalizer(button.delete_if_exists)

    def _get_disk_capacity():
        view = testing_vm.load_details(refresh=True)
        return view.entities.summary(
            'Datastore Allocation Summary').get_text_of('Total Allocation')

    original_disk_capacity = _get_disk_capacity()
    logger.info('Initial disk allocation: %s', original_disk_capacity)

    class CustomButtonView(View):
        custom_button = Dropdown(testing_group.text)

    view = appliance.browser.create_view(CustomButtonView)
    view.custom_button.item_select(button.text)

    view = appliance.browser.create_view(BaseLoggedInPage)
    view.flash.assert_no_error()
    try:
        wait_for(lambda: _get_disk_capacity() > original_disk_capacity,
                 num_sec=180,
                 delay=5)
    finally:
        logger.info('End disk capacity: %s', _get_disk_capacity())
Ejemplo n.º 48
0
 def wait_for_filter_option_to_load(self):
     wait_for(lambda: bool(self.filter_dropdown.items), delay=5, num_sec=60)
Ejemplo n.º 49
0
 def update(self, updates):
     machine_credential_fill_dict = {
         "username":
         updates.get("username"),
         "password":
         updates.get("password"),
         "private_key":
         updates.get("private_key"),
         "private_key_phrase":
         updates.get("private_key_phrase"),
         "privilage_escalation":
         updates.get("privilage_escalation"),
         "privilage_escalation_username":
         updates.get("privilage_escalation_username"),
         "privilage_escalation_password":
         updates.get("privilage_escalation_password"),
         "vault_password":
         updates.get("vault_password")
     }
     scm_credential_fill_dict = {
         "username": updates.get("username"),
         "password": updates.get("password"),
         "private_key": updates.get("private_key"),
         "private_key_phrase": updates.get("private_key_phrase")
     }
     amazon_credential_fill_dict = {
         "access_key": updates.get("access_key"),
         "secret_key": updates.get("secret_key"),
         "sts_token": updates.get("sts_token"),
     }
     vmware_credential_fill_dict = {
         "username": updates.get("username"),
         "password": updates.get("password"),
         "vcenter_host": updates.get("vcenter_host")
     }
     openstack_credential_fill_dict = {
         "username": updates.get("username"),
         "password": updates.get("password"),
         "authentication_url": updates.get("authentication_url"),
         "project": updates.get("project"),
         "domain": updates.get("domain")
     }
     credential_type_map = {
         "Machine": machine_credential_fill_dict,
         "Scm": scm_credential_fill_dict,
         "Amazon": amazon_credential_fill_dict,
         "VMware": vmware_credential_fill_dict,
         "OpenStack": openstack_credential_fill_dict
     }
     edit_page = navigate_to(self, "Edit")
     changed = edit_page.fill({"name": updates.get("name")})
     form_changed = edit_page.credential_form.fill(
         credential_type_map[self.credential_type])
     if changed or form_changed:
         edit_page.save_button.click()
     else:
         edit_page.cancel_button.click()
     view = self.create_view(CredentialsListView)
     wait_for(lambda: False, silent_failure=True, timeout=5)
     assert view.is_displayed
     view.flash.assert_no_error()
     if changed or form_changed:
         view.flash.assert_message(
             'Modification of Credential "{}" has been successfully queued.'
             .format(updates.get("name", self.name)))
     else:
         view.flash.assert_message(
             'Edit of Credential "{}" was canceled by the user.'.format(
                 self.name))
Ejemplo n.º 50
0
 def wait_for_running(self, timeout=600):
     result, wait = wait_for(lambda: self.running,
                             num_sec=timeout,
                             fail_condition=False,
                             delay=10)
     return result
Ejemplo n.º 51
0
    def create(self,
               cancel=False,
               validate_credentials=True,
               validate=True,
               force=False):
        """Creates the manager through UI

        Args:
            cancel (bool): Whether to cancel out of the creation.  The cancel is done
                after all the information present in the manager has been filled in the UI.
            validate_credentials (bool): Whether to validate credentials - if True and the
                credentials are invalid, an error will be raised.
            validate (bool): Whether we want to wait for the manager's data to load
                and show up in it's detail page. True will also wait, False will only set it up.
            force (bool): Whether to force the creation even if the manager already exists.
                True will try anyway; False will check for its existence and leave, if present.
        """
        def config_profiles_loaded():
            # Workaround - without this, validation of provider failed
            config_profiles_names = [
                prof.name for prof in self.config_profiles
            ]
            logger.info("UI: %s\nYAML: %s", set(config_profiles_names),
                        set(self.yaml_data['config_profiles']))
            return all([
                cp in config_profiles_names
                for cp in self.yaml_data['config_profiles']
            ])

        if not force and self.exists:
            return
        form_dict = self.__dict__
        form_dict.update(self.credentials.view_value_mapping)
        if self.appliance.version < '5.8':
            form_dict['provider_type'] = self.type
        view = navigate_to(self, 'Add')
        view.entities.form.fill(form_dict)
        if validate_credentials:
            view.entities.form.validate.click()
            view.flash.assert_success_message(
                'Credential validation was successful')
        if cancel:
            view.entities.cancel.click()
            view.flash.assert_success_message(
                'Add of Provider was cancelled by the user')
        else:
            view.entities.add.click()
            success_message = '{} Provider "{}" was added'.format(
                self.type, self.name)
            view.flash.assert_success_message(success_message)
            view.flash.assert_success_message(self.refresh_flash_msg)
            if validate:
                try:
                    self.yaml_data['config_profiles']
                except KeyError as e:
                    logger.exception(e)
                    raise

                wait_for(config_profiles_loaded,
                         fail_func=self.refresh_relationships,
                         handle_exception=True,
                         num_sec=180,
                         delay=30)
Ejemplo n.º 52
0
    def create(self, name, credential_type, **credentials):
        add_page = navigate_to(self, "Add")
        machine_credential_fill_dict = {
            "username":
            credentials.get("username"),
            "password":
            credentials.get("password"),
            "private_key":
            credentials.get("private_key"),
            "private_key_phrase":
            credentials.get("private_key_phrase"),
            "privilage_escalation":
            credentials.get("privilage_escalation"),
            "privilage_escalation_username":
            credentials.get("privilage_escalation_username"),
            "privilage_escalation_password":
            credentials.get("privilage_escalation_password"),
            "vault_password":
            credentials.get("vault_password")
        }
        scm_credential_fill_dict = {
            "username": credentials.get("username"),
            "password": credentials.get("password"),
            "private_key": credentials.get("private_key"),
            "private_key_phrase": credentials.get("private_key_phrase")
        }
        amazon_credential_fill_dict = {
            "access_key": credentials.get("access_key"),
            "secret_key": credentials.get("secret_key"),
            "sts_token": credentials.get("sts_token"),
        }
        vmware_credential_fill_dict = {
            "username": credentials.get("username"),
            "password": credentials.get("password"),
            "vcenter_host": credentials.get("vcenter_host")
        }
        openstack_credential_fill_dict = {
            "username": credentials.get("username"),
            "password": credentials.get("password"),
            "authentication_url": credentials.get("authentication_url"),
            "project": credentials.get("project"),
            "domain": credentials.get("domain")
        }
        credential_type_map = {
            "Machine": machine_credential_fill_dict,
            "Scm": scm_credential_fill_dict,
            "Amazon": amazon_credential_fill_dict,
            "VMware": vmware_credential_fill_dict,
            "OpenStack": openstack_credential_fill_dict,
        }

        add_page.fill({"name": name, "credential_type": credential_type})
        add_page.credential_form.fill(credential_type_map[credential_type])
        add_page.add_button.click()
        credentials_list_page = self.create_view(CredentialsListView)
        # Without this StaleElementReferenceException can be raised
        wait_for(lambda: False, silent_failure=True, timeout=5)
        assert credentials_list_page.is_displayed
        credentials_list_page.flash.assert_success_message(
            'Add of Credential "{}" has been successfully queued.'.format(
                name))

        credential = self.instantiate(name, credential_type, **credentials)

        wait_for(lambda: credential.exists,
                 fail_func=credentials_list_page.browser.selenium.refresh,
                 delay=5,
                 timeout=300)

        return credential
def test_gap_collection(appliance, provider, element, graph_type, order_data):
    """ Test gap collection data

    prerequisites:
        * C&U enabled appliance

    Steps:
        * Navigate to Configuration > Diagnostics > Zone Gap Collection Page
        * Order old data
        * Navigate to VM or Host Utilization page
        * Check for Hourly data
        * Check for Daily data

    Polarion:
        assignee: nachandr
        casecomponent: CandU
        caseimportance: medium
        initialEstimate: 1/4h
    """
    if element == 'host':
        collection = appliance.collections.hosts
        for test_host in provider.data['hosts']:
            if not test_host.get('test_fleece', False):
                continue
            element = collection.instantiate(name=test_host.name,
                                             provider=provider)
    elif element == 'vm':
        collection = appliance.provider_based_collection(provider)
        element = collection.instantiate('cu-24x7', provider)

    date = datetime.now() - timedelta(days=1)
    element.wait_candu_data_available(timeout=1200)

    view = navigate_to(element, 'candu')
    view.options.interval.fill(graph_type.capitalize())
    try:
        graph = getattr(view, 'vm_cpu')
    except AttributeError:
        graph = getattr(view.interval_type, 'host_cpu')
    assert graph.is_displayed

    def refresh():
        provider.browser.refresh()
        view = navigate_to(element, 'candu')
        view.options.interval.fill(graph_type.capitalize())

    # wait, some time graph took time to load
    wait_for(lambda: len(graph.all_legends) > 0,
             delay=5,
             timeout=600,
             fail_func=refresh)

    # check collected data for cpu graph
    view.options.calendar.fill(date)
    graph_data = 0
    for leg in graph.all_legends:
        graph.display_legends(leg)
        for data in graph.data_for_legends(leg).values():
            graph_data += float(data[leg].replace(',',
                                                  '').replace('%',
                                                              '').split()[0])
    assert graph_data > 0
Ejemplo n.º 54
0
def test_migration_playbooks(request, appliance, v2v_providers, host_creds, conversion_tags,
                             ansible_repository, form_data_vm_obj_single_datastore):
    """Test for migrating vms with pre and post playbooks"""
    creds = credentials[v2v_providers.vmware_provider.data.templates.get("rhel7_minimal").creds]
    CREDENTIALS = (
        "Machine",
        {
            "username": creds.username,
            "password": creds.password,
            "privilage_escalation": "sudo",
        },
    )
    credential = appliance.collections.ansible_credentials.create(
        name="{type}_credential_{cred}".format(type=CREDENTIALS[0], cred=fauxfactory.gen_alpha()),
        credential_type=CREDENTIALS[0],
        **CREDENTIALS[1]
    )

    provision_catalog = catalog_item(
        request, appliance, credential.name, ansible_repository, "provision"
    )
    retire_catalog = catalog_item(
        request, appliance, credential.name, ansible_repository, "retire"
    )

    infrastructure_mapping_collection = appliance.collections.v2v_mappings
    mapping = infrastructure_mapping_collection.create(
        form_data_vm_obj_single_datastore.form_data
    )

    @request.addfinalizer
    def _cleanup():
        infrastructure_mapping_collection.delete(mapping)

    # vm_obj is a list, with only 1 VM object, hence [0]
    src_vm_obj = form_data_vm_obj_single_datastore.vm_list[0]

    migration_plan_collection = appliance.collections.v2v_plans
    migration_plan = migration_plan_collection.create(
        name="plan_{}".format(fauxfactory.gen_alphanumeric()),
        description="desc_{}".format(fauxfactory.gen_alphanumeric()),
        infra_map=mapping.name,
        vm_list=form_data_vm_obj_single_datastore.vm_list,
        start_migration=True,
        pre_playbook=provision_catalog.name,
        post_playbook=retire_catalog.name,
    )

    # explicit wait for spinner of in-progress status card
    view = appliance.browser.create_view(
        navigator.get_class(migration_plan_collection, "All").VIEW.pick()
    )
    wait_for(
        func=view.progress_card.is_plan_started,
        func_args=[migration_plan.name],
        message="migration plan is starting, be patient please",
        delay=5,
        num_sec=280,
        handle_exception=True,
        fail_cond=False
    )

    # wait until plan is in progress
    wait_for(
        func=view.plan_in_progress,
        func_args=[migration_plan.name],
        message="migration plan is in progress, be patient please",
        delay=15,
        num_sec=3600,
    )
    view.switch_to("Completed Plans")
    view.wait_displayed()
    migration_plan_collection.find_completed_plan(migration_plan)
    logger.info(
        "For plan %s, migration status after completion: %s, total time elapsed: %s",
        migration_plan.name,
        view.migration_plans_completed_list.get_vm_count_in_plan(migration_plan.name),
        view.migration_plans_completed_list.get_clock(migration_plan.name),
    )

    # validate MAC address matches between source and target VMs
    assert view.migration_plans_completed_list.is_plan_succeeded(migration_plan.name)
    migrated_vm = get_migrated_vm_obj(src_vm_obj, v2v_providers.rhv_provider)
    assert src_vm_obj.mac_address == migrated_vm.mac_address
Ejemplo n.º 55
0
def small_test_vm(setup_provider, provider, small_template, request):
    vm = provision_vm(provider, small_template)
    yield vm
    wait_for(lambda: vm.cleanup_on_provider,
             handle_exception=True,
             timeout=900)
Ejemplo n.º 56
0
def resource_usage(vm_ownership, appliance, provider):
    # Retrieve resource usage values from metric_rollups table.
    average_cpu_used_in_mhz = 0
    average_memory_used_in_mb = 0
    average_network_io = 0
    average_disk_io = 0
    average_storage_used = 0
    consumed_hours = 0
    vm_name = provider.data['cap_and_util']['chargeback_vm']

    metrics = appliance.db.client['metrics']
    rollups = appliance.db.client['metric_rollups']
    ems = appliance.db.client['ext_management_systems']
    logger.info('Deleting METRICS DATA from metrics and metric_rollups tables')

    appliance.db.client.session.query(metrics).delete()
    appliance.db.client.session.query(rollups).delete()

    # Chargeback reporting is done on hourly and daily rollup values and not real-time values.So, we
    # are capturing C&U data and forcing hourly rollups by running these commands through
    # the Rails console.

    def verify_records_metrics_table(appliance, provider):
        # Verify that rollups are present in the metric_rollups table.
        vm_name = provider.data['cap_and_util']['chargeback_vm']

        ems = appliance.db.client['ext_management_systems']
        metrics = appliance.db.client['metrics']

        result = appliance.ssh_client.run_rails_command(
            "\"vm = Vm.where(:ems_id => {}).where(:name => {})[0];\
            vm.perf_capture('realtime', 1.hour.ago.utc, Time.now.utc)\""
            .format(provider.id, repr(vm_name)))
        assert result.success, "Failed to capture VM C&U data:".format(result.output)

        with appliance.db.client.transaction:
            result = (
                appliance.db.client.session.query(metrics.id)
                .join(ems, metrics.parent_ems_id == ems.id)
                .filter(metrics.capture_interval_name == 'realtime',
                metrics.resource_name == vm_name,
                ems.name == provider.name, metrics.timestamp >= date.today())
            )

        for record in appliance.db.client.session.query(metrics).filter(
                metrics.id.in_(result.subquery())):
            if (record.cpu_usagemhz_rate_average or
               record.cpu_usage_rate_average or
               record.derived_memory_used or
               record.net_usage_rate_average or
               record.disk_usage_rate_average):
                return True
        return False

    wait_for(verify_records_metrics_table, [appliance, provider], timeout=600,
        fail_condition=False, message='Waiting for VM real-time data')

    # New C&U data may sneak in since 1)C&U server roles are running and 2)collection for clusters
    # and hosts is on.This would mess up our Chargeback calculations, so we are disabling C&U
    # collection after data has been fetched for the last hour.

    appliance.server.settings.disable_server_roles(
        'ems_metrics_coordinator', 'ems_metrics_collector')
    result = appliance.ssh_client.run_rails_command(
        "\"vm = Vm.where(:ems_id => {}).where(:name => {})[0];\
        vm.perf_rollup_range(1.hour.ago.utc, Time.now.utc,'realtime')\"".
        format(provider.id, repr(vm_name)))
    assert result.success, "Failed to rollup VM C&U data:".format(result.output)

    wait_for(verify_records_rollups_table, [appliance, provider], timeout=600, fail_condition=False,
        message='Waiting for hourly rollups')

    # Since we are collecting C&U data for > 1 hour, there will be multiple hourly records per VM
    # in the metric_rollups DB table.The values from these hourly records are summed up.

    with appliance.db.client.transaction:
        result = (
            appliance.db.client.session.query(rollups.id)
            .join(ems, rollups.parent_ems_id == ems.id)
            .filter(rollups.capture_interval_name == 'hourly', rollups.resource_name == vm_name,
            ems.name == provider.name, rollups.timestamp >= date.today())
        )

    for record in appliance.db.client.session.query(rollups).filter(
            rollups.id.in_(result.subquery())):
        consumed_hours = consumed_hours + 1
        if (record.cpu_usagemhz_rate_average or
           record.cpu_usage_rate_average or
           record.derived_memory_used or
           record.net_usage_rate_average or
           record.disk_usage_rate_average):
            average_cpu_used_in_mhz = average_cpu_used_in_mhz + record.cpu_usagemhz_rate_average
            average_memory_used_in_mb = average_memory_used_in_mb + record.derived_memory_used
            average_network_io = average_network_io + record.net_usage_rate_average
            average_disk_io = average_disk_io + record.disk_usage_rate_average

    for record in appliance.db.client.session.query(rollups).filter(
            rollups.id.in_(result.subquery())):
        if record.derived_vm_used_disk_storage:
            average_storage_used = average_storage_used + record.derived_vm_used_disk_storage

    # Convert storage used in bytes to GB
    average_storage_used = average_storage_used * math.pow(2, -30)

    return {"average_cpu_used_in_mhz": average_cpu_used_in_mhz,
            "average_memory_used_in_mb": average_memory_used_in_mb,
            "average_network_io": average_network_io,
            "average_disk_io": average_disk_io,
            "average_storage_used": average_storage_used,
            "consumed_hours": consumed_hours}
Ejemplo n.º 57
0
    def create(self,
               name,
               infra_map,
               vm_list,
               description=None,
               csv_import=False,
               start_migration=False):
        """Create new migration plan in UI
        Args:
            name: (string) plan name
            description: (string) plan description
            infra_map: (object) infra map object name
            vm_list: (list) list of vm objects
            csv_import: (bool) flag for importing vms
            start_migration: (bool) flag for start migration
        """
        view = navigate_to(self, 'Add')
        view.general.fill({
            'infra_map': infra_map,
            'name': name,
            'description': description
        })

        if csv_import:
            view.general.select_vm.select(
                "Import a CSV file with a list of VMs to be migrated")
            view.next_btn.click()
            temp_file = tempfile.NamedTemporaryFile(suffix='.csv')
            with open(temp_file.name, 'w') as file:
                headers = ['Name', 'Provider']
                writer = csv.DictWriter(file, fieldnames=headers)
                writer.writeheader()
                for vm in vm_list:
                    writer.writerow({
                        'Name': vm.name,
                        'Provider': vm.provider.name
                    })
            view.vms.hidden_field.fill(temp_file.name)
        else:
            view.next_btn.click()

        wait_for(lambda: view.vms.table.is_displayed,
                 timeout=60,
                 message='Wait for VMs view',
                 delay=2)
        for vm in vm_list:
            view.vms.filter_by_name(vm.name)
            for row in view.vms.table.rows():
                if vm.name in row.vm_name.read():
                    row[0].fill(True)
            view.vms.clear_filters.click()
        view.next_btn.click()
        view.next_btn.click()

        if start_migration:
            view.options.run_migration.select("Start migration immediately")
        view.options.create.click()
        wait_for(lambda: view.results.msg.is_displayed,
                 timeout=60,
                 message='Wait for Results view')

        base_flash = "Migration Plan: '{}'".format(name)
        if start_migration:
            base_flash = "{} is in progress".format(base_flash)
        else:
            base_flash = "{} has been saved".format(base_flash)
        assert view.results.msg.text == base_flash
        view.results.close.click()
        return self.instantiate(name)
Ejemplo n.º 58
0
def test_create_snapshot_via_ae(appliance, request, domain, small_test_vm):
    """This test checks whether the vm.create_snapshot works in AE.

    Prerequisities:
        * A VMware provider
        * A VM that has been discovered by CFME

    Steps:
        * Clone the Request class inside the System namespace into a new domain
        * Add a method named ``snapshot`` and insert the provided code there.
        * Add an instance named ``snapshot`` and set the methd from previous step
            as ``meth5``
        * Run the simulation of the method against the VM, preferably setting
            ``snap_name`` to something that can be checked
        * Wait until snapshot with such name appears.

    Polarion:
        assignee: apagac
        casecomponent: Infra
        caseimportance: medium
        initialEstimate: 1/3h
    """
    # PREPARE
    file = data_path.join("ui").join("automate").join(
        "test_create_snapshot_via_ae.rb")
    with file.open("r") as f:
        method_contents = f.read()
    miq_domain = DomainCollection(appliance).instantiate(name='ManageIQ')
    miq_class = miq_domain.namespaces.instantiate(
        name='System').classes.instantiate(name='Request')
    miq_class.copy_to(domain)
    request_cls = domain.namespaces.instantiate(
        name='System').classes.instantiate(name='Request')
    request.addfinalizer(request_cls.delete)
    method = request_cls.methods.create(name="snapshot",
                                        location='inline',
                                        script=method_contents)
    request.addfinalizer(method.delete)
    instance = request_cls.instances.create(
        name="snapshot", fields={"meth5": {
            'value': "snapshot"
        }})
    request.addfinalizer(instance.delete)

    # SIMULATE
    snap_name = fauxfactory.gen_alpha()
    snapshot = InfraVm.Snapshot(name=snap_name, parent_vm=small_test_vm)
    simulate(appliance=appliance,
             instance="Request",
             request="snapshot",
             target_type='VM and Instance',
             target_object=small_test_vm.name,
             execute_methods=True,
             attributes_values={"snap_name": snap_name})

    wait_for(lambda: snapshot.exists,
             timeout="2m",
             delay=10,
             fail_func=small_test_vm.provider.browser.refresh,
             handle_exception=True,
             message="Waiting for snapshot create")

    # Clean up if it appeared
    snapshot.delete()
def test_cloud_catalog_item(appliance, vm_name, setup_provider, provider,
                            dialog, catalog, request, provisioning):
    """Tests cloud catalog item

    Metadata:
        test_flag: provision

    Polarion:
        assignee: nansari
        casecomponent: Services
        initialEstimate: 1/4h
    """
    wait_for(provider.is_refreshed,
             func_kwargs=dict(refresh_delta=10),
             timeout=600)
    vm = appliance.collections.cloud_instances.instantiate(
        "{}0001".format(vm_name), provider)

    request.addfinalizer(lambda: vm.cleanup_on_provider())
    image = provisioning['image']['name']
    item_name = "{}-service-{}".format(provider.name,
                                       fauxfactory.gen_alphanumeric())

    inst_args = {
        'catalog': {
            'catalog_name': {
                'name': image,
                'provider': provider.name
            },
            'vm_name': vm_name
        },
        'environment': {
            'availability_zone': provisioning.get('availability_zone', None),
            'security_groups': [provisioning.get('security_group', None)],
            'cloud_tenant': provisioning.get('cloud_tenant', None),
            'cloud_network': provisioning.get('cloud_network', None),
            'cloud_subnet': provisioning.get('cloud_subnet', None),
            'resource_groups': provisioning.get('resource_group', None)
        },
        'properties': {
            'instance_type':
            partial_match(provisioning.get('instance_type', None)),
            'guest_keypair': provisioning.get('guest_keypair', None)
        }
    }
    # GCE specific
    if provider.one_of(GCEProvider):
        recursive_update(
            inst_args, {
                'properties': {
                    'boot_disk_size': provisioning['boot_disk_size'],
                    'is_preemptible': True
                }
            })
    # Azure specific
    if provider.one_of(AzureProvider):
        recursive_update(
            inst_args, {
                'customize': {
                    'admin_username': provisioning['customize_username'],
                    'root_password': provisioning['customize_password']
                }
            })

    catalog_item = appliance.collections.catalog_items.create(
        provider.catalog_item_type,
        name=item_name,
        description="my catalog",
        display_in=True,
        catalog=catalog,
        dialog=dialog,
        prov_data=inst_args)
    request.addfinalizer(catalog_item.delete)
    service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog,
                                       catalog_item.name)
    service_catalogs.order()
    logger.info('Waiting for cfme provision request for service %s', item_name)
    request_description = item_name
    provision_request = appliance.collections.requests.instantiate(
        request_description, partial_check=True)
    provision_request.wait_for_request()
    msg = "Request failed with the message {}".format(
        provision_request.rest.message)
    assert provision_request.is_succeeded(), msg
Ejemplo n.º 60
0
def verify_revert_snapshot(full_test_vm,
                           provider,
                           soft_assert,
                           register_event,
                           request,
                           active_snapshot=False):
    if provider.one_of(RHEVMProvider):
        # RHV snapshots have only description, no name
        snapshot1 = new_snapshot(full_test_vm, has_name=False)
    else:
        snapshot1 = new_snapshot(full_test_vm)
    full_template = getattr(provider.data.templates, 'full_template')
    # Define parameters of the ssh connection
    ssh_kwargs = {
        'hostname': snapshot1.parent_vm.mgmt.ip,
        'username': credentials[full_template.creds]['username'],
        'password': credentials[full_template.creds]['password']
    }
    ssh_client = SSHClient(**ssh_kwargs)
    # We need to wait for ssh to become available on the vm, it can take a while. Without
    # this wait, the ssh command would fail with 'port 22 not available' error.
    # Easiest way to solve this is just mask the exception with 'handle_exception = True'
    # and wait for successful completition of the ssh command.
    # The 'fail_func' ensures we close the connection that failed with exception.
    # Without this, the connection would hang there and wait_for would fail with timeout.
    wait_for(lambda: ssh_client.run_command('touch snapshot1.txt').success,
             num_sec=400,
             delay=20,
             handle_exception=True,
             fail_func=ssh_client.close(),
             message="Waiting for successful SSH connection")
    # Create first snapshot
    snapshot1.create()
    ssh_client.run_command('touch snapshot2.txt')

    # If we are not testing 'revert to active snapshot' situation, we create another snapshot
    if not active_snapshot:
        if provider.one_of(RHEVMProvider):
            snapshot2 = new_snapshot(full_test_vm, has_name=False)
        else:
            snapshot2 = new_snapshot(full_test_vm)
        snapshot2.create()

    # VM on RHV provider must be powered off before snapshot revert
    if provider.one_of(RHEVMProvider):
        full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_OFF,
                                             cancel=False)
        full_test_vm.wait_for_vm_state_change(
            desired_state=full_test_vm.STATE_OFF, timeout=900)

    snapshot1.revert_to()
    # Wait for the snapshot to become active
    logger.info('Waiting for vm %s to become active', snapshot1.name)
    wait_for(lambda: snapshot1.active,
             num_sec=300,
             delay=20,
             fail_func=provider.browser.refresh,
             message="Waiting for the first snapshot to become active")
    # VM state after revert should be OFF
    full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_OFF,
                                          timeout=720)
    # Let's power it ON again
    full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_ON,
                                         cancel=False)
    full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_ON,
                                          timeout=900)
    soft_assert(full_test_vm.mgmt.is_running, "vm not running")
    # Wait for successful ssh connection
    wait_for(lambda: ssh_client.run_command('test -e snapshot1.txt').success,
             num_sec=400,
             delay=10,
             handle_exception=True,
             fail_func=ssh_client.close(),
             message="Waiting for successful SSH connection after revert")
    try:
        result = ssh_client.run_command('test -e snapshot1.txt')
        assert result.success  # file found, RC=0
        result = ssh_client.run_command('test -e snapshot2.txt')
        assert result.failed  # file not found, RC=1
        logger.info('Revert to snapshot %s successful', snapshot1.name)
    except Exception:
        logger.exception('Revert to snapshot %s Failed', snapshot1.name)
    ssh_client.close()