Exemplo n.º 1
0
# BZ 1485310 was not fixed for versions < 5.9
COLLECTIONS_BUGGY_HREF_SLUG_IN_58 = {'policy_actions', 'automate_domains'}


@pytest.mark.tier(3)
@pytest.mark.parametrize("collection_name", COLLECTIONS_ALL)
@pytest.mark.uncollectif(
    lambda appliance, collection_name: collection_name == 'automate'
    or  # doesn't have 'href'
    (collection_name in COLLECTIONS_BUGGY_HREF_SLUG_IN_58 and appliance.version
     < '5.9') or (collection_name in COLLECTIONS_REMOVED_IN_59 and appliance.
                  version >= '5.9'))
@pytest.mark.meta(blockers=[
    BZ(1547852,
       forced_streams=['5.9', 'upstream'],
       unblock=lambda collection_name: collection_name != 'pictures'),
    BZ(1503852,
       forced_streams=['5.8', '5.9', 'upstream'],
       unblock=lambda collection_name: collection_name not in
       {'requests', 'service_requests'}),
    BZ(1510238,
       forced_streams=['5.8', '5.9', 'upstream'],
       unblock=lambda collection_name: collection_name != 'vms')
])
def test_attributes_present(appliance, collection_name):
    """Tests that the expected attributes are present in all collections.

    Metadata:
        test_flag: rest
    """
Exemplo n.º 2
0

@pytest.fixture(scope="function")
def prepare_proxy_invalid(provider, appliance):
    prov_type = provider.type
    # 192.0.2.1 is from TEST-NET-1 which doesn't exist on the internet (RFC5737).
    appliance.set_proxy('192.0.2.1', '1234', prov_type='default')
    appliance.set_proxy('192.0.2.1', '1234', prov_type=prov_type)
    yield
    appliance.reset_proxy(prov_type)
    appliance.reset_proxy()


@pytest.mark.meta(blockers=[
    BZ(1623862,
       forced_streams=['5.9', '5.10'],
       unblock=lambda provider: provider.one_of(AzureProvider))
])
def test_proxy_valid(appliance, proxy_machine, proxy_ssh,
                     prepare_proxy_default, provider):
    """ Check whether valid proxy settings works.

    Steps:
     * Configure appliance to use proxy for default provider.
     * Configure appliance to use not use proxy for specific provider.
     * Chceck whether the provider is accessed trough proxy by chceking the
       proxy logs."""
    provider.refresh_provider_relationships()
    validate_proxy_logs(provider, proxy_ssh, appliance.hostname)
    wait_for(provider.is_refreshed,
             func_kwargs={"refresh_delta": 120},
Exemplo n.º 3
0
                elif host_name.startswith(host):
                    break
                elif ip_from_provider is not None and ip_from_provider == host_ip:
                    break
            else:
                if host_ip == ip_from_provider:
                    break
        else:
            soft_assert(
                False,
                "Hostname {} not found in {}".format(host_name, provider_name))


@pytest.mark.rhv2
@pytest.mark.meta(
    blockers=[BZ(1504010, forced_streams=['5.7', '5.8', 'upstream'])])
def test_operations_vm_on(soft_assert, appliance, request):
    adb = appliance.db.client
    vms = adb['vms']
    hosts = adb['hosts']
    storages = adb['storages']

    path = ["Operations", "Virtual Machines", "Online VMs (Powered On)"]
    report = CannedSavedReport.new(path)

    vms_in_db = adb.session.query(
        vms.name.label('vm_name'), vms.location.label('vm_location'),
        vms.last_scan_on.label('vm_last_scan'),
        storages.name.label('storages_name'),
        hosts.name.label('hosts_name')).outerjoin(
            hosts, vms.host_id == hosts.id).outerjoin(
@pytest.mark.tier(3)
def test_delete_default_user(appliance):
    """Test for deleting default user Administrator.

    Steps:
        * Login as Administrator user
        * Try deleting the user
    """
    user = appliance.collections.users.instantiate(name='Administrator')
    with pytest.raises(RBACOperationBlocked):
        user.delete()


@pytest.mark.tier(3)
@pytest.mark.meta(automates=[BZ(1090877)])
@pytest.mark.meta(blockers=[BZ(1408479)], forced_streams=["5.7", "upstream"])
def test_current_user_login_delete(appliance, request):
    """Test for deleting current user login.

    Steps:
        * Login as Admin user
        * Create a new user
        * Login with the new user
        * Try deleting the user
    """
    group_name = "EvmGroup-super_administrator"
    group = group_collection(appliance).instantiate(description=group_name)

    user = new_user(appliance, [group])
    request.addfinalizer(user.delete)
Exemplo n.º 5
0
    'web_services': 'Web Services Credentials'
}


def get_host_data_by_name(provider_key, host_name):
    for host_obj in conf.cfme_data.get('management_systems',
                                       {})[provider_key].get('hosts', []):
        if host_name == host_obj['name']:
            return host_obj
    return None


@pytest.mark.rhv1
@pytest.mark.meta(blockers=[
    BZ(1584261,
       forced_streams=['5.8'],
       unblock=lambda creds: creds == 'default'),
    BZ(1584280,
       forced_streams=['5.9'],
       unblock=lambda provider, creds: not provider.one_of(RHEVMProvider) or
       creds != 'web_services'),
    BZ(1619626,
       forced_streams=['5.9', '5.10'],
       unblock=lambda provider: not provider.one_of(RHEVMProvider))
])
@pytest.mark.parametrize("creds", ["default", "remote_login", "web_services"],
                         ids=["default", "remote", "web"])
@pytest.mark.uncollectif(
    lambda provider, creds: creds in ['remote_login', 'web_services'
                                      ] and provider.one_of(RHEVMProvider),
    reason="Not relevant for RHEVM Provider.")
Exemplo n.º 6
0
        view.flash.assert_success_message(text='Start initiated', partial=True)

        if_scvmm_refresh_provider(testing_vm.provider)
        testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON,
                                            timeout=720,
                                            from_details=True)
        wait_for_last_boot_timestamp_refresh(testing_vm,
                                             last_boot_time,
                                             timeout=600)
        soft_assert(testing_vm.provider.mgmt.is_vm_running(testing_vm.name),
                    "vm not running")


@pytest.mark.rhv3
@pytest.mark.meta(
    blockers=[BZ(1496383, forced_streams=['5.7', '5.8', '5.9', 'upstream'])])
def test_no_template_power_control(provider, soft_assert):
    """ Ensures that no power button is displayed for templates.

    Prerequisities:
        * An infra provider that has some templates.

    Steps:
        * Open the view of all templates of the provider
        * Verify the Power toolbar button is not visible
        * Select some template using the checkbox
        * Verify the Power toolbar button is not visible
        * Click on some template to get into the details page
        * Verify the Power toolbar button is not visible
    """
    view = navigate_to(provider, 'ProviderTemplates')
pytestmark = [
    pytest.mark.meta(server_roles="+automate",
                     blockers=[GH('ManageIQ/integration_tests:7297')]),
    test_requirements.ssui, pytest.mark.long_running,
    pytest.mark.provider(gen_func=providers,
                         filters=[
                             ProviderFilter(
                                 classes=[InfraProvider, CloudProvider],
                                 required_fields=['provisioning'])
                         ])
]


@pytest.mark.rhv1
@pytest.mark.meta(blockers=[
    BZ(1544535, forced_streams=['5.9']),
    GH('ManageIQ/integration_tests:7297')
])
@pytest.mark.parametrize('context', [ViaSSUI])
def test_myservice_crud(appliance, setup_provider, context, order_service):
    """Test Myservice crud in SSUI."""
    catalog_item = order_service
    with appliance.context.use(context):
        my_service = MyService(appliance, catalog_item.name)
        my_service.set_ownership("Administrator", "EvmGroup-approver")
        my_service.update(
            {'description': '{}_edited'.format(catalog_item.name)})
        if appliance.version > "5.8":
            my_service.edit_tags("Cost Center", "Cost Center 001")
        my_service.delete()
Exemplo n.º 8
0
PAGING_DATA = [
    (0, 0),
    (1, 0),
    (11, 13),
    (1, 10000),
]


@pytest.mark.uncollectif(lambda: current_version() < '5.9')
@pytest.mark.parametrize('paging',
                         PAGING_DATA,
                         ids=['{},{}'.format(d[0], d[1]) for d in PAGING_DATA])
@pytest.mark.meta(blockers=[
    BZ(1489885,
       forced_streams=['5.9', 'upstream'],
       unblock=lambda paging: paging[0] != 0),
])
def test_rest_paging(appliance, paging):
    """Tests paging when offset and limit are specified.

    Metadata:
        test_flag: rest
    """
    limit, offset = paging
    url_string = '{}{}'.format(appliance.rest_api.collections.features._href,
                               '?limit={}&offset={}'.format(limit, offset))
    response = appliance.rest_api.get(url_string)

    if response['count'] <= offset:
        expected_subcount = 0
Exemplo n.º 9
0
    network_provider = collection.instantiate(prov_class=NetworkProvider,
                                              name=net_manager)

    collection = appliance.collections.cloud_networks
    ovn_network = collection.create(test_name, 'tenant', network_provider,
                                    net_manager, 'None')

    yield ovn_network
    if ovn_network.exists:
        ovn_network.delete()


@test_requirements.rhev
@pytest.mark.meta(blockers=[
    GH('ManageIQ/integration_tests:8128'),
    BZ(1649886, unblock=lambda provider: not provider.one_of(RHEVMProvider))
])
def test_provision_vm_to_virtual_network(appliance, setup_provider, provider,
                                         request, provisioning, network):
    """ Tests provisioning a vm from a template to a virtual network

    Metadata:
        test_flag: provision

    Polarion:
        assignee: jhenner
        casecomponent: Provisioning
        initialEstimate: 1/4h
    """
    vm_name = random_vm_name('provd')
Exemplo n.º 10
0
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ

pytestmark = [
    pytest.mark.usefixtures('setup_provider'),
    pytest.mark.tier(2),
    pytest.mark.provider(classes=[CloudProvider, InfraProvider],
                         selector=ONE_PER_CATEGORY,
                         scope="module"),
]


@pytest.mark.parametrize("navigation",
                         ["provider_vms", "all_vms", "vm_summary"])
@pytest.mark.meta(blockers=[
    BZ(1717483, forced_streams=['5.11']),
    BZ(1686617,
       forced_streams=["5.10"],
       unblock=lambda navigation: navigation != 'provider_vms'),
    BZ(1686619,
       forced_streams=["5.10"],
       unblock=lambda navigation: navigation != 'provider_vms'),
    BZ(1717539, unblock=lambda navigation: navigation != 'provider_vms')
])
def test_policy_simulation_ui(appliance, provider, navigation):
    """
    Bugzilla:
        1670456
        1686617
        1686619
        1688359
Exemplo n.º 11
0
        expected_count = 1 if submit == "Submit all" else entity_count

        try:
            wait_for(
                lambda: log.matches[request_pattern] == expected_count,
                timeout=300,
                message="wait for expected match count",
                delay=5,
            )
        except TimedOutError:
            assert False, "Expected '{}' requests and '{}' requests found in automation log".format(
                expected_count, log.matches[request_pattern])


@pytest.mark.meta(blockers=[
    BZ(1685555, unblock=lambda button_group: "SWITCH" not in button_group)
])
def test_custom_button_dialog_infra_obj(appliance, dialog, request, setup_obj,
                                        button_group):
    """ Test custom button with dialog and InspectMe method

    Polarion:
        assignee: ndhandre
        initialEstimate: 1/4h
        caseimportance: high
        caseposneg: positive
        testtype: functional
        startsin: 5.9
        casecomponent: CustomButton
        tags: custom_button
        testSteps:
Exemplo n.º 12
0
    vm.mgmt.ensure_state(VmState.RUNNING)
    return


@pytest.fixture(scope="function")
def vm_off(vm):
    """ Ensures that the VM is off when the control goes to the test."""
    vm.mgmt.wait_for_steady_state()
    vm.mgmt.ensure_state(VmState.STOPPED)
    return


@pytest.mark.provider(
    [VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
    scope="module")
@pytest.mark.meta(blockers=[BZ(1531547)], automates=[1531547])
def test_action_start_virtual_machine_after_stopping(request, vm, vm_on,
                                                     policy_for_testing):
    """ This test tests action 'Start Virtual Machine'

    This test sets the policy that it turns on the VM when it is turned off
    (https://www.youtube.com/watch?v=UOn4gxj2Dso), then turns the VM off and waits for it coming
    back alive.

    Bugzilla:
        1531547

    Metadata:
        test_flag: actions, provision

    Polarion:
Exemplo n.º 13
0
        myservice = service
    if myservice.exists:
        myservice.delete()


def test_provision_stack(order_stack):
    """Tests stack provisioning

    Metadata:
        test_flag: provision
    """
    provision_request, stack = order_stack
    assert provision_request.is_succeeded()


@pytest.mark.meta(blockers=[BZ(1575935, forced_streams=['5.8', '5.9'])])
def test_reconfigure_service(appliance, service_catalogs, request):
    """Tests service reconfiguring

    Metadata:
        test_flag: provision
    """
    provision_request = service_catalogs.order()
    provision_request.wait_for_request(method='ui')
    last_message = provision_request.get_request_row_from_ui(
    )['Last Message'].text
    service_name = last_message.split()[2].strip('[]')
    myservice = MyService(appliance, service_name)
    request.addfinalizer(lambda: _cleanup(service=myservice))
    assert provision_request.is_succeeded()
    myservice.reconfigure_service()
Exemplo n.º 14
0
            object_name = host_name
    elif vm_name is not None:
        object_name = vm_name

    with appliance.db.client.transaction:
        provs = (appliance.db.client.session.query(metrics_tbl.id).join(
            ems, metrics_tbl.parent_ems_id == ems.id).filter(
                metrics_tbl.resource_name == object_name,
                ems.name == provider.name))
    return appliance.db.client.session.query(metrics_tbl).filter(
        metrics_tbl.id.in_(provs.subquery()))


# Tests to check that specific metrics are being collected
@pytest.mark.meta(blockers=[
    BZ(1525296, unblock=lambda provider: not provider.one_of(CloudProvider))
])
def test_raw_metric_vm_cpu(metrics_collection, appliance, provider):
    vm_name = provider.data['cap_and_util']['capandu_vm']
    if provider.category == "infra":
        query = query_metric_db(appliance, provider,
                                'cpu_usagemhz_rate_average', vm_name)
        average_rate = attrgetter('cpu_usagemhz_rate_average')
    elif provider.category == "cloud":
        query = query_metric_db(appliance, provider, 'cpu_usage_rate_average',
                                vm_name)
        average_rate = attrgetter('cpu_usage_rate_average')

    for record in query:
        if average_rate(record) is not None:
            assert average_rate(record) > 0, 'Zero VM CPU Usage'
    collection = provider.appliance.provider_based_collection(provider)
    vm_obj = collection.instantiate(random_vm_name('html5-con'),
                                    provider,
                                    template_name=console_template.name)

    request.addfinalizer(lambda: vm_obj.cleanup_on_provider())
    vm_obj.create_on_provider(timeout=2400,
                              find_in_cfme=True,
                              allow_skip="default")
    return vm_obj


@pytest.mark.tier(2)
@test_requirements.html5
@pytest.mark.provider([VMwareProvider], selector=ONE)
@pytest.mark.meta(automates=[BZ(1514594)])
def test_html5_console_ports_present(appliance, setup_provider, provider):
    """
    Bugzilla:
        1514594

    Check to see if the Add/Edit provider screen has the Host VNC Start Port
    and Host VNC End port. Only applicable to versions of VMware that support VNC console.

    Polarion:
        assignee: apagac
        casecomponent: Appliance
        initialEstimate: 1/4h
        startsin: 5.8
    """
    edit_view = navigate_to(provider, 'Edit')
Exemplo n.º 16
0
class TestRESTSnapshots(object):
    """Tests actions with VM/instance snapshots using REST API."""
    @pytest.mark.rhv2
    def test_create_snapshot(self, vm_snapshot):
        """Creates VM/instance snapshot using REST API.

        Metadata:
            test_flag: rest
        """
        vm, snapshot = vm_snapshot
        vm.snapshots.get(description=snapshot.description)

    @pytest.mark.rhv3
    @pytest.mark.parametrize('method', ['post', 'delete'],
                             ids=['POST', 'DELETE'])
    def test_delete_snapshot_from_detail(self, vm_snapshot, method):
        """Deletes VM/instance snapshot from detail using REST API.

        Testing BZ 1466225

        Metadata:
            test_flag: rest
        """
        __, snapshot = vm_snapshot
        delete_resources_from_detail([snapshot],
                                     method=method,
                                     num_sec=300,
                                     delay=5)

    @pytest.mark.rhv3
    def test_delete_snapshot_from_collection(self, vm_snapshot):
        """Deletes VM/instance snapshot from collection using REST API.

        Metadata:
            test_flag: rest
        """
        vm, snapshot = vm_snapshot
        delete_resources_from_collection([snapshot],
                                         vm.snapshots,
                                         not_found=True,
                                         num_sec=300,
                                         delay=5)

    @pytest.mark.meta(blockers=[
        BZ(1550551,
           forced_streams=['5.8', '5.9', 'upstream'],
           unblock=lambda provider: not provider.one_of(RHEVMProvider))
    ])
    def test_delete_snapshot_race(self, request, appliance, collection, vm):
        """Tests creation of snapshot while delete is in progress.

        Testing race condition described in BZ 1550551

        Expected result is either success or reasonable error message.
        Not expected result is success where no snapshot is created.

        Metadata:
            test_flag: rest
        """
        # create and delete snapshot #1
        __, snap1 = vm_snapshot(request, appliance, collection, vm)
        snap1.action.delete()

        # create snapshot #2 without waiting for delete
        # of snapshot #1 to finish
        try:
            vm_snapshot(request, appliance, collection, vm)
        except AssertionError as err:
            # The `vm_snapshot` calls `assert_response` that checks status of the Task.
            # AssertionError is raised when Task failed and Task message is included
            # in error message.
            # Error message can be different after BZ 1550551 is fixed.
            if 'Please wait for the operation to finish' not in str(err):
                raise

    @pytest.mark.rhv2
    @pytest.mark.uncollectif(
        lambda provider: not provider.one_of(InfraProvider))
    def test_revert_snapshot(self, appliance, provider, vm_snapshot):
        """Reverts VM/instance snapshot using REST API.

        Metadata:
            test_flag: rest
        """
        __, snapshot = vm_snapshot

        snapshot.action.revert()
        if provider.one_of(RHEVMProvider):
            assert_response(appliance, success=False)
            result = appliance.rest_api.response.json()
            assert 'Revert is allowed only when vm is down' in result[
                'message']
        else:
            assert_response(appliance)
Exemplo n.º 17
0
    if appliance.version.is_in_series('5.8'):
        message = 'Button "{}" was added'.format(button_name)
    else:
        message = 'Custom Button "{}" was added'.format(button_name)
    view.flash.assert_success_message(message)


def test_edit_tags(catalog_item):
    tag = catalog_item.add_tag()
    catalog_item.remove_tag(tag)


@pytest.mark.skip(
    'Catalog items are converted to collections. Refactoring is required')
@pytest.mark.meta(
    blockers=[BZ(1531512, forced_streams=["5.8", "5.9", "upstream"])])
def test_catalog_item_duplicate_name(catalog_item):
    catalog_item.create()
    with pytest.raises(Exception, match="Name has already been taken"):
        catalog_item.create()


@pytest.mark.skip(
    'Catalog items are converted to collections. Refactoring is required')
@pytest.mark.meta(blockers=[BZ(1460891, forced_streams=["5.8", "upstream"])])
def test_permissions_catalog_item_add(catalog_item):
    """Test that a catalog can be added only with the right permissions."""
    tac.single_task_permission_test(
        [['Everything', 'Services', 'Catalogs Explorer', 'Catalog Items']],
        {'Add Catalog Item': catalog_item.create})
Exemplo n.º 18
0
    col = temp_appliance_extended_db.collections.bottlenecks
    view = navigate_to(col, 'All')
    row = view.report.event_details[0]
    # Selecting row by uniq value
    db_row = db_events.filter(db_tbl.message == row[5].text)
    # Compare bottleneck's table timestamp with db
    assert row[0].text == db_row[0][0].strftime(
        parsetime.american_with_utc_format)
    # Changing time zone
    view.report.time_zone.fill('(GMT-04:00) La Paz')
    row = view.report.event_details[0]
    assert row[0].text == (
        db_row[0][0] - timedelta(hours=4)).strftime("%m/%d/%y %H:%M:%S -04")


@pytest.mark.meta(blockers=[BZ(1507565, forced_streams=["5.8"])])
@pytest.mark.tier(2)
def test_bottlenecks_summary_event_groups(temp_appliance_extended_db,
                                          db_restore, db_tbl, db_events):
    """ Checks event_groups selectbox in summary tab. It should filter events by type

    Polarion:
        assignee: nachandr
        initialEstimate: 1/4h
        casecomponent: Optimize
    """
    col = temp_appliance_extended_db.collections.bottlenecks
    view = navigate_to(col, 'All')
    # Enabling this option to show all possible values
    view.summary.show_host_events.fill(True)
    view.summary.event_groups.fill('Capacity')
Exemplo n.º 19
0
def pytest_runtest_protocol(item):
    holder = item.config.pluginmanager.getplugin('appliance-holder')
    global session_ver
    global session_build
    global session_stream
    appliance = holder.held_appliance
    if not session_ver:
        session_ver = str(appliance.version)
        session_build = appliance.build
        session_stream = appliance.version.stream()
        if str(session_ver) not in session_build:
            session_build = "{}-{}".format(str(session_ver), session_build)
        try:
            proc = subprocess.Popen(['git', 'describe', '--tags'],
                                    stdout=subprocess.PIPE)
            proc.wait()
            session_fw_version = proc.stdout.read().strip()
        except:
            session_fw_version = None
        fire_art_hook(item.config,
                      'session_info',
                      version=session_ver,
                      build=session_build,
                      stream=session_stream,
                      fw_version=session_fw_version)

    tier = item.get_marker('tier')
    if tier:
        tier = tier.args[0]

    requirement = item.get_marker('requirement')
    if requirement:
        requirement = requirement.args[0]

    try:
        params = item.callspec.params
        param_dict = {p: get_name(v) for p, v in params.iteritems()}
    except:
        param_dict = {}
    ip = appliance.hostname
    # This pre_start_test hook is needed so that filedump is able to make get the test
    # object set up before the logger starts logging. As the logger fires a nested hook
    # to the filedumper, and we can't specify order inriggerlib.
    meta = item.get_marker('meta')
    if meta and 'blockers' in meta.kwargs:
        blocker_spec = meta.kwargs['blockers']
        blockers = []
        for blocker in blocker_spec:
            if isinstance(blocker, int):
                blockers.append(BZ(blocker).url)
            else:
                blockers.append(Blocker.parse(blocker).url)
    else:
        blockers = []
    fire_art_test_hook(item, 'pre_start_test', slaveid=store.slaveid, ip=ip)
    fire_art_test_hook(item,
                       'start_test',
                       slaveid=store.slaveid,
                       ip=ip,
                       tier=tier,
                       requirement=requirement,
                       param_dict=param_dict,
                       issues=blockers)
    yield
Exemplo n.º 20
0
    def create(self,
               vm_name,
               provider,
               form_values=None,
               cancel=False,
               check_existing=False,
               find_in_cfme=False,
               wait=True,
               request_description=None,
               auto_approve=False,
               override=False):
        """Provisions an vm/instance with the given properties through CFME

        Args:
            vm_name: the vm/instance's name
            provider: provider object
            form_values: dictionary of form values for provisioning, structured into tabs
            cancel: boolean, whether or not to cancel form filling
            check_existing: verify if such vm_name exists
            find_in_cfme: verify that vm was created and appeared in CFME
            wait: wait for vm provision request end
            request_description: request description that test needs to search in request table.
            auto_approve: if true the request is approved before waiting for completion.
            override: To override any failure related exception

        Note:
            Calling create on a sub-class of instance will generate the properly formatted
            dictionary when the correct fields are supplied.
        """
        vm = self.instantiate(vm_name, provider)
        if check_existing and vm.exists:
            return vm
        if not provider.is_refreshed():
            provider.refresh_provider_relationships()
            wait_for(provider.is_refreshed,
                     func_kwargs={'refresh_delta': 10},
                     timeout=600)
        if not form_values:
            form_values = vm.vm_default_args
        else:
            inst_args = vm.vm_default_args
            form_values = recursive_update(inst_args, form_values)
        env = form_values.get('environment') or {}
        if env.get('automatic_placement'):
            form_values['environment'] = {'automatic_placement': True}
        form_values.update({'provider_name': provider.name})
        if not form_values.get('template_name'):
            template_name = (
                provider.data.get('provisioning').get('image', {}).get('name')
                or provider.data.get('provisioning').get('template'))
            vm.template_name = template_name
            form_values.update({'template_name': template_name})
        view = navigate_to(self, 'Provision')
        view.form.fill(form_values)

        if cancel:
            view.form.cancel_button.click()
            view = self.browser.create_view(BaseLoggedInPage)
            view.flash.assert_success_message(self.ENTITY.PROVISION_CANCEL)
            view.flash.assert_no_error()
        else:
            view.form.submit_button.click()

            view = vm.appliance.browser.create_view(RequestsView)
            if not BZ(1608967, forced_streams=['5.10']).blocks:
                wait_for(lambda: view.flash.messages,
                         fail_condition=[],
                         timeout=10,
                         delay=2,
                         message='wait for Flash Success')
            # This flash message is not flashed in 5.10.
            if self.appliance.version < 5.10:
                wait_for(lambda: view.flash.messages,
                         fail_condition=[],
                         timeout=10,
                         delay=2,
                         message='wait for Flash Success')
            view.flash.assert_no_error()
            if wait:
                if request_description is None:
                    request_description = 'Provision from [{}] to [{}]'.format(
                        form_values.get('template_name'), vm.name)
                provision_request = vm.appliance.collections.requests.instantiate(
                    request_description)
                logger.info('Waiting for cfme provision request for vm %s',
                            vm.name)
                if auto_approve:
                    provision_request.approve_request(method='ui',
                                                      reason="Approved")
                provision_request.wait_for_request(method='ui', num_sec=900)
                if provision_request.is_succeeded(method='ui'):
                    logger.info('Waiting for vm %s to appear on provider %s',
                                vm.name, provider.key)
                    wait_for(provider.mgmt.does_vm_exist, [vm.name],
                             handle_exception=True,
                             num_sec=600)
                elif override:
                    logger.info(
                        'Overriding exception to check failure condition.')
                else:
                    raise Exception(
                        "Provisioning vm {} failed with: {}".format(
                            vm.name, provision_request.row.last_message.text))
        if find_in_cfme:
            vm.wait_to_appear(timeout=800)

        return vm
Exemplo n.º 21
0
             timeout="35m",
             fail_func=lambda: toolbar.select('Reload'))

    # Check that all data has been fetched
    current = instance.get_detail(properties=('Configuration', 'Packages'))
    assert current == expected

    # Make sure new package is listed
    instance.open_details(("Configuration", "Packages"))
    if not instance.paged_table.find_row_on_all_pages('Name', package_name):
        pytest.fail("Package {0} was not found".format(package_name))


@pytest.mark.long_running
@pytest.mark.uncollectif(
    BZ(1491576, forced_streams=['5.7']).blocks, 'BZ 1491576')
def test_ssa_files(provider, instance, policy_profile, soft_assert):
    """Tests that instances can be scanned for specific file."""

    if instance.system_type == WINDOWS:
        pytest.skip("We cannot verify Windows files yet")

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15,
             timeout="35m",
             fail_func=lambda: toolbar.select('Reload'))

    # Check that all data has been fetched
    current = instance.get_detail(properties=('Configuration', 'Files'))
    assert current != '0', "No files were scanned"
Exemplo n.º 22
0
                             appliance=appliance)


@pytest.fixture()
def vm_name():
    return random_vm_name('ci')


@pytest.mark.rhv2
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda provider, appliance: provider.one_of(
    GCEProvider) and appliance.version < "5.9",
                         reason="GCE supports cloud_init in 5.9+ BZ 1395757")
@pytest.mark.uncollectif(lambda provider: provider.one_of(VMwareProvider),
                         reason="BZ 1568038")
@pytest.mark.meta(blockers=[BZ(1619744, forced_streams=['5.9', '5.10'])])
def test_provision_cloud_init(appliance, request, setup_provider, provider,
                              provisioning, setup_ci_template, vm_name):
    """ Tests provisioning from a template with cloud_init

    Metadata:
        test_flag: cloud_init, provision

    Polarion:
        assignee: jhenner
        initialEstimate: 1/4h
    """
    image = provisioning.get('ci-image') or provisioning['image']['name']
    note = (
        'Testing provisioning from image {} to vm {} on provider {}'.format(
            image, vm_name, provider.key))
import cfme.configure.access_control as ac
from cfme import test_requirements
from cfme.base.credential import Credential
from cfme.common.vm import VM
from cfme.utils import testgen
from cfme.utils.blockers import BZ


def pytest_generate_tests(metafunc):
    argnames, argvalues, idlist = testgen.all_providers(metafunc, required_fields=['ownership_vm'])
    testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope="module")


pytestmark = [
    test_requirements.ownership,
    pytest.mark.meta(blockers=[BZ(1380781, forced_streams=["5.7"])]),
    pytest.mark.tier(3)
]


@pytest.yield_fixture(scope="module")
def role_only_user_owned(appliance):
    appliance.server.login_admin()
    role = ac.Role(name='role_only_user_owned_' + fauxfactory.gen_alphanumeric(),
                   vm_restriction='Only User Owned')
    role.create()
    yield role
    appliance.server.login_admin()
    role.delete()

Exemplo n.º 24
0
    prov = collections.instantiate(prov_class=RHEVMProvider,
                                   name=fauxfactory.gen_alphanumeric(5),
                                   endpoints=endpoint)
    try:
        prov.create()
    except AssertionError:
        view = appliance.browser.create_view(prov.endpoints_form)
        text = view.default.api_port.value
        assert text == prov.default_endpoint.api_port[0:15]


@pytest.mark.rhv1
@pytest.mark.usefixtures('has_no_infra_providers')
@pytest.mark.meta(blockers=[
    BZ(1559796,
       forced_streams=['5.8', '5.9'],
       unblock=lambda provider: not provider.one_of(RHEVMProvider))
])
@pytest.mark.tier(1)
def test_providers_discovery(request, appliance, provider):
    """Tests provider discovery

    Metadata:
        test_flag: crud

    Polarion:
        assignee: pvala
        initialEstimate: 1/8h
    """
    appliance.collections.infra_providers.discover(provider,
                                                   cancel=False,
Exemplo n.º 25
0
    buttongroup = appliance.collections.button_groups.create(
        text=fauxfactory.gen_alphanumeric(),
        hover="btn_desc_{}".format(fauxfactory.gen_alphanumeric()),
        type=appliance.collections.button_groups.HOST)
    request.addfinalizer(buttongroup.delete_if_exists)
    button = buttongroup.buttons.create(text=fauxfactory.gen_alphanumeric(),
                                        hover="btn_hvr_{}".format(
                                            fauxfactory.gen_alphanumeric()),
                                        system="Request",
                                        request="InspectMe")
    request.addfinalizer(button.delete_if_exists)
    host = appliance.collections.hosts.all(provider)[0]
    host.execute_button(buttongroup.hover, button.text, handle_alert=None)


@pytest.mark.meta(blockers=[BZ(1460774, forced_streams=["5.8", "upstream"])])
@pytest.mark.tier(2)
def test_button_avp_displayed(appliance, dialog, request):
    """This test checks whether the Attribute/Values pairs are displayed in the dialog.
       automates 1229348
    Steps:
        * Open a dialog to create a button.
        * Locate the section with attribute/value pairs.
    """
    # This is optional, our nav tree does not have unassigned button
    buttongroup = appliance.collections.button_groups.create(
        text=fauxfactory.gen_alphanumeric(),
        hover="btn_desc_{}".format(fauxfactory.gen_alphanumeric()),
        type=appliance.collections.button_groups.VM_INSTANCE)
    request.addfinalizer(buttongroup.delete_if_exists)
    buttons_collection = appliance.collections.buttons
Exemplo n.º 26
0
    pytest.mark.meta(server_roles="+automate"), test_requirements.ssui,
    pytest.mark.long_running,
    pytest.mark.ignore_stream("upstream"),
    pytest.mark.provider(gen_func=providers,
                         filters=[
                             ProviderFilter(
                                 classes=[InfraProvider, CloudProvider],
                                 required_fields=['provisioning'])
                         ])
]


@pytest.mark.rhv2
@pytest.mark.meta(blockers=[
    BZ(1633540,
       forced_streams=['5.10'],
       unblock=lambda provider: not provider.one_of(RHEVMProvider))
])
@pytest.mark.parametrize('context', [ViaSSUI])
def test_service_catalog_crud_ssui(appliance, setup_provider, context,
                                   order_service):
    """Tests Service Catalog in SSUI.

    Metadata:
        test_flag: ssui

    Polarion:
        assignee: sshveta
        initialEstimate: None
    """
    collection.assign_policy_profiles_multiple_entities(
        random_image_instances, conditions, 'OpenSCAP profile')

    # Verify Image summary
    collection.check_compliance_multiple_images(random_image_instances)


def get_table_attr(instance, table_name, attr):
    # Trying to read the table <table_name> attribute <attr>
    view = navigate_to(instance, 'Details', force=True)
    table = getattr(view.entities, table_name, None)
    if table:
        return table.read().get(attr)


@pytest.mark.meta(blockers=[BZ(1620068, forced_streams=["5.9", "5.10"])])
@pytest.mark.parametrize(('test_item'), TEST_ITEMS)
def test_containers_smartstate_analysis(provider, test_item,
                                        delete_all_container_tasks,
                                        soft_assert, random_image_instances,
                                        appliance):
    """
    Polarion:
        assignee: juwatts
        caseimportance: medium
        casecomponent: Containers
        initialEstimate: 1/6h
    """
    collection = appliance.collections.container_images
    # create conditions list that will match the images that we want to check
    conditions = []
Exemplo n.º 28
0
    request.addfinalizer(lambda: policy.delete() if policy.exists else None)
    policy_profile = policy_profile_collection.create(policy.description, policies=[policy])
    request.addfinalizer(lambda: policy_profile.delete() if policy_profile.exists else None)
    virtualcenter_provider.assign_policy_profiles(policy_profile.description)
    request.addfinalizer(lambda: virtualcenter_provider.unassign_policy_profiles(
        policy_profile.description))
    vmware_vm.check_compliance()
    vmware_vm.open_details(["Compliance", "History"])
    appliance = get_or_create_current_appliance()
    history_screen_title = Text(appliance.browser.widgetastic,
        "//span[@id='explorer_title_text']").text
    assert history_screen_title == '"Compliance History" for Virtual Machine "{}"'.format(
        vmware_vm.name)


@pytest.mark.meta(blockers=[BZ(1395965, forced_streams=["5.6", "5.7"]),
                            BZ(1491576, forced_streams=["5.7"])])
def test_delete_all_actions_from_compliance_policy(request, policy_collection):
    """We should not allow a compliance policy to be saved
    if there are no actions on the compliance event.

    Steps:
        * Create a compliance policy
        * Remove all actions

    Result:
        The policy shouldn't be saved.
    """
    policy = policy_collection.create(VMCompliancePolicy, fauxfactory.gen_alphanumeric())
    request.addfinalizer(lambda: policy.delete() if policy.exists else None)
    with pytest.raises(AssertionError):
Exemplo n.º 29
0
from cfme.base.credential import Credential
from cfme.common.vm import VM
from cfme.common.provider import BaseProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.intelligence.reports.reports import CustomReport
from datetime import date
from fixtures.provider import setup_or_skip
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.wait import wait_for

pytestmark = [
    pytest.mark.tier(2),
    pytest.mark.meta(blockers=[
        BZ(1433984, forced_streams=["5.7", "5.8", "upstream"]),
        BZ(1468729, forced_streams=["5.9"]),
        BZ(1511099,
           forced_streams=["5.7", "5.8"],
           unblock=lambda provider: not provider.one_of(GCEProvider))
    ]),
    pytest.mark.provider(
        [VMwareProvider, RHEVMProvider, AzureProvider, GCEProvider],
        scope='module',
        required_fields=[(['cap_and_util', 'test_chargeback'], True)]),
    test_requirements.chargeback,
]


@pytest.yield_fixture(scope="module")
def clean_setup_provider(request, provider):
Exemplo n.º 30
0
def test_vm_disk_reconfig_via_rest(appliance, create_vm):
    """
    Polarion:
        assignee: pvala
        casecomponent: Infra
        caseimportance: high
        initialEstimate: 1/10h
        setup:
            1. Add an infrastructure provider. Test for vcenter and rhv provider.
            2. Provision a VM.
        testSteps:
            1. Add a disk to the VM.
            2. Remove the disk from VM
        expectedResults:
            1. The disk must be added successfully.
            2. The disk must be removed successfully.
    Bugzilla:
        1618517
        1666593
        1620161
        1691635
        1692801
    """
    vm_id = appliance.rest_api.collections.vms.get(name=create_vm.name).id
    # get initial disks for later comparison
    initial_disks = [disk.filename for disk in create_vm.configuration.disks]

    # add a disk to VM
    add_data = [{
        "disk_size_in_mb": 20,
        "sync": True,
        "persistent": True,
        "thin_provisioned": False,
        "dependent": True,
        "bootable": False,
    }]
    vm_reconfig_via_rest(appliance, "disk_add", vm_id, add_data)

    # assert the new disk was added
    assert wait_for(
        lambda: create_vm.configuration.num_disks > len(initial_disks),
        fail_func=create_vm.refresh_relationships,
        delay=5,
        timeout=200,
    )

    # Disk GUID is displayed instead of disk name in the disks table for a rhev VM, and passing
    # disk GUID to the delete method results in failure, so skip this part until the BZ is fixed.
    if not (BZ(1691635).blocks and create_vm.provider.one_of(RHEVMProvider)):

        # there will always be 2 disks after the disk has been added
        disks_present = [
            disk.filename for disk in create_vm.configuration.disks
        ]
        disk_added = list(set(disks_present) - set(initial_disks))[0]

        # remove the newly added disk from VM
        delete_data = [{"disk_name": disk_added, "delete_backing": False}]
        vm_reconfig_via_rest(appliance, "disk_remove", vm_id, delete_data)

        # assert the disk was removed
        try:
            wait_for(
                lambda: create_vm.configuration.num_disks == len(initial_disks
                                                                 ),
                fail_func=create_vm.refresh_relationships,
                delay=5,
                timeout=200,
            )
        except TimedOutError:
            assert (
                False
            ), "Number of disks expected was {expected}, found {actual}".format(
                expected=len(initial_disks),
                actual=create_vm.configuration.num_disks)