item_name = fauxfactory.gen_alphanumeric()
    catalog_item = CatalogItem(item_type=catalog_item_type,
                               name=item_name,
                               description="my catalog",
                               display_in=True,
                               catalog=catalog,
                               dialog=tagcontrol_dialog,
                               catalog_name=template,
                               provider=provider,
                               prov_data=provisioning_data)
    return catalog_item


@pytest.mark.tier(2)
@pytest.mark.ignore_stream("upstream")
@pytest.mark.meta(blockers=[BZ(1434990, forced_streams=["5.7", "upstream"])])
def test_tagdialog_catalog_item(appliance, provider, setup_provider,
                                catalog_item, request):
    """Tests tag dialog catalog item
    Metadata:
        test_flag: provision
    """
    vm_name = catalog_item.provisioning_data['catalog']["vm_name"]
    request.addfinalizer(lambda: cleanup_vm(vm_name + "_0001", provider))
    catalog_item.create()
    dialog_values = {'default_select_value': "Gold"}
    service_catalogs = ServiceCatalogs(appliance,
                                       catalog=catalog_item.catalog,
                                       name=catalog_item.name,
                                       dialog_values=dialog_values)
    service_catalogs.order()
            request="Call_Instance",
            execute_methods=True,
        )
    view = instance.create_view(AutomateSimulationView)
    assert (view.result_tree.click_path(
        f"ManageIQ/SYSTEM / PROCESS / {instance.klass.name}",
        f"ManageIQ/System / {instance.klass.name} / Call_Instance",
        f"{instance.domain.name}/System / {instance.klass.name} / {instance.name}",
        "values",
        "Hash",
        "Key",
    ).text == "Key")


@pytest.mark.tier(2)
@pytest.mark.meta(blockers=[BZ(1630800, forced_streams=["5.11"])],
                  automates=[1630800])
def test_simulation_copy_button(appliance):
    """
    Bugzilla:
        1630800

    Polarion:
        assignee: ghubale
        initialEstimate: 1/8h
        caseposneg: positive
        startsin: 5.10
        casecomponent: Automate
        testSteps:
            1. Go to Automation > Automate > Simulation
            2. Fill in any required fields to enable submit button and click on 'Submit'
Example #3
0
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.gce import GCEProvider
from cfme.common.provider import BaseProvider
from cfme.common.vm import VM
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.intelligence.reports.reports import CustomReport
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
from fixtures.provider import setup_or_skip

pytestmark = [
    pytest.mark.tier(2),
    pytest.mark.meta(blockers=[
        BZ(1531600, forced_streams=["5.9"]),
        BZ(1511099,
           forced_streams=["5.7", "5.8"],
           unblock=lambda provider: not provider.one_of(GCEProvider)),
        BZ(1531554, forced_streams=["5.8"])
    ]),
    pytest.mark.provider(
        [VMwareProvider, RHEVMProvider, AzureProvider, GCEProvider],
        scope='module',
        required_fields=[(['cap_and_util', 'test_chargeback'], True)]),
    test_requirements.chargeback,
]


@pytest.yield_fixture(scope="module")
def clean_setup_provider(request, provider):
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for

from wrapanapi import VmState

pf1 = ProviderFilter(classes=[CloudInfraProvider],
    required_fields=[(['cap_and_util', 'test_chargeback'], True)])
pf2 = ProviderFilter(classes=[SCVMMProvider], inverted=True)  # SCVMM doesn't support C&U

pytestmark = [
    pytest.mark.tier(2),
    pytest.mark.meta(blockers=[BZ(1511099, forced_streams=['5.9', '5.8'],
                                  unblock=lambda provider: not provider.one_of(GCEProvider)),
                               ]),
    pytest.mark.provider(gen_func=providers, filters=[pf1, pf2], scope='module'),
    pytest.mark.usefixtures('has_no_providers_modscope', 'setup_provider_modscope'),
    test_requirements.chargeback,
]

# Allowed deviation between the reported value in the Chargeback report and the estimated value.
DEVIATION = 1


@pytest.fixture(scope="module")
def vm_ownership(enable_candu, provider, appliance):
    # In these tests, chargeback reports are filtered on VM owner.So,VMs have to be
    # assigned ownership.
    vm_name = provider.data['cap_and_util']['chargeback_vm']
# -*- coding: utf-8 -*-
import pytest

from cfme import test_requirements
from cfme.common.host_views import HostDriftAnalysis
from cfme.infrastructure.host import Host
from cfme.infrastructure.provider import InfraProvider
from cfme.utils import testgen
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.wait import wait_for

pytestmark = [
    test_requirements.drift,
    pytest.mark.tier(3),
    pytest.mark.meta(blockers=[BZ(1635126, forced_streams=['5.10'])]),
]


def pytest_generate_tests(metafunc):
    argnames, argvalues, idlist = testgen.providers_by_class(
        metafunc, [InfraProvider], required_fields=['hosts'])
    argnames += ['host']

    new_idlist = []
    new_argvalues = []

    for i, argvalue_tuple in enumerate(argvalues):
        args = dict(zip(argnames, argvalue_tuple))

        for test_host in args['provider'].data['hosts']:
Example #6
0
    result.start_monitoring()
    # has_name is false if testing RHEVMProvider
    snapshot = new_snapshot(create_vm,
                            has_name=(not provider.one_of(RHEVMProvider)))
    snapshot.create()
    # check for the size as "read" check
    if provider.appliance.version >= "5.11" and provider.one_of(RHEVMProvider):
        assert snapshot.size
    snapshot.delete()
    provider.refresh_provider_relationships(wait=600)
    assert result.validate(wait="60s")


@test_requirements.rhev
@pytest.mark.provider([RHEVMProvider])
@pytest.mark.meta(automates=[BZ(1443411)])
def test_delete_active_vm_snapshot(create_vm):
    """
    Check that it's not possible to delete an Active VM from RHV snapshots

    Bugzilla:
        1443411

    Polarion:
        assignee: jhenner
        casecomponent: Infra
        caseimportance: medium
        caseposneg: negative
        initialEstimate: 1/12h
    """
    view = navigate_to(create_vm, 'SnapshotsAll')
Example #7
0
        datetimes.append((date_from, date_to, file.name))

    # Check for the gaps
    if len(datetimes) > 1:
        for i in range(len(datetimes) - 1):
            dt = datetimes[i + 1][0] - datetimes[i][1]
            assert dt.total_seconds() >= 0.0, \
                "Negative gap between log files ({}, {})".format(
                    datetimes[i][2], datetimes[i + 1][2])


@pytest.mark.tier(3)
@pytest.mark.nondestructive
@pytest.mark.meta(blockers=[
    BZ(1341502,
       unblock=lambda log_depot: log_depot.protocol != "anon_ftp",
       forced_streams=["5.6", "5.7", "5.8", "upstream"])
])
def test_collect_log_depot(log_depot, appliance, configured_depot, request):
    """ Boilerplate test to verify functionality of this concept

    Will be extended and improved.
    """
    # Wipe the FTP contents in the end
    @request.addfinalizer
    def _clear_ftp():
        with log_depot.ftp as ftp:
            ftp.cwd(ftp.upload_dir)
            ftp.recursively_delete()

    # Prepare empty workspace
            3. Select domain which like to import
        expectedResults:
            1.
            2.
            3. Import should work. Check imported or not.
    """
    assert import_datastore.exists


@pytest.mark.tier(2)
@pytest.mark.meta(automates=[1720611])
@pytest.mark.parametrize("upload_file",
                         ["datastore_blank.zip", "dialog_blank.yml"],
                         ids=["datastore", "dialog"])
@pytest.mark.uncollectif(lambda upload_file: upload_file == "dialog_blank.yml"
                         and BZ(1720611, forced_streams=['5.10']).blocks,
                         reason='Blank dialog import blocked by BZ 1720611')
def test_upload_blank_file(appliance, upload_file):
    """
    Bugzilla:
        1720611

    Polarion:
        assignee: ghubale
        initialEstimate: 1/8h
        caseposneg: negative
        startsin: 5.10
        casecomponent: Automate
        testSteps:
            1. Create blank zip(test.zip) and yaml(test.yml) file
            2. Navigate to Automation > Automate > Import/Export and upload test.zip file
    """
    with update(ansible_action):
        ansible_action.run_ansible_playbook = {
            "inventory": {
                "localhost": True
            }
        }
    create_vm.add_tag()
    wait_for(ansible_service_request.exists, num_sec=600)
    ansible_service_request.wait_for_request()
    view = navigate_to(ansible_service, "Details")
    assert view.provisioning.details.get_text_of("Hosts") == "localhost"
    assert view.provisioning.results.get_text_of("Status") == "Finished"


@pytest.mark.meta(blockers=[BZ(1822533, forced_streams=["5.11", "5.10"])])
@pytest.mark.tier(3)
@pytest.mark.parametrize("create_vm", ["full_template"], indirect=True)
def test_action_run_ansible_playbook_manual_address(
    request,
    ansible_catalog_item,
    ansible_action,
    policy_for_testing,
    create_vm,
    ansible_credential,
    ansible_service_request,
    ansible_service,
    appliance,
):
    """Tests a policy with ansible playbook action against manual address.
@pytest.mark.polarion('10031')
def test_check_compliance(random_image_instance):
    random_image_instance.assign_policy_profiles('OpenSCAP profile')
    random_image_instance.check_compliance()


def get_table_attr(instance, table_name, attr):
    # Trying to read the table <table_name> attribute <attr>
    view = refresh_and_navigate(instance, 'Details')
    table = getattr(view.entities, table_name, None)
    if table:
        return table.read().get(attr)


@pytest.mark.meta(blockers=[BZ(1620068, forced_streams=["5.9", "5.10"])])
@pytest.mark.parametrize(('test_item'), TEST_ITEMS)
def test_containers_smartstate_analysis(provider, test_item, soft_assert,
                                        delete_all_container_tasks,
                                        random_image_instance):

    if test_item.is_openscap:
        random_image_instance.assign_policy_profiles('OpenSCAP profile')
    else:
        random_image_instance.unassign_policy_profiles('OpenSCAP profile')

    random_image_instance.perform_smartstate_analysis(wait_for_finish=True)

    view = navigate_to(random_image_instance, 'Details')
    for tbl, attr, verifier in test_item.tested_attr:
@pytest.fixture
def create_vm(appliance, provider, request):
    """Fixture to provision vm to the provider being tested"""
    vm_name = fauxfactory.gen_alphanumeric(15, start="test_clone_")
    vm = appliance.collections.infra_vms.instantiate(vm_name, provider)
    logger.info("provider_key: %s", provider.key)

    if not provider.mgmt.does_vm_exist(vm.name):
        logger.info("deploying %s on provider %s", vm.name, provider.key)
        vm.create_on_provider(allow_skip="default", find_in_cfme=True)
    yield vm
    vm.cleanup_on_provider()


@pytest.mark.provider([VMwareProvider], **filter_fields)
@pytest.mark.meta(blockers=[BZ(1685201)])
@test_requirements.provision
def test_vm_clone(appliance, provider, clone_vm_name, create_vm):
    """
    Polarion:
        assignee: jhenner
        casecomponent: Provisioning
        initialEstimate: 1/6h
    """
    provision_type = 'VMware'
    create_vm.clone_vm("*****@*****.**", "first", "last", clone_vm_name,
                       provision_type)
    request_description = clone_vm_name
    request_row = appliance.collections.requests.instantiate(
        request_description, partial_check=True)
    request_row.wait_for_request(method='ui')
Example #12
0
    if not report_crud_dir.exists:
        report_crud_dir.mkdir()
    for file_name in report_crud_dir.listdir():
        if file_name.isfile() and file_name.basename.endswith(".yaml"):
            result.append(file_name.basename)
    return result


@pytest.fixture(params=crud_files_reports())
def custom_report_values(request):
    with report_crud_dir.join(request.param).open(mode="r") as rep_yaml:
        return yaml.safe_load(rep_yaml)


@pytest.mark.tier(3)
@pytest.mark.meta(blockers=[BZ(1541324, forced_streams=["5.9"])])
@pytest.mark.parametrize("group", GROUPS)
def test_shuffle_top_level(appliance, group, report_menus):
    """
    Polarion:
        assignee: pvala
        casecomponent: report
        initialEstimate: 1/6h
    """
    # Shuffle the order
    with report_menus.manage_folder(group) as folder:
        order = shuffle(folder.fields)
        for item in reversed(order):
            folder.move_first(item)
    # Now go and read the tree
    view = navigate_to(appliance.collections.reports, "All")
from cfme import test_requirements
from cfme.base.credential import Credential
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.gce import GCEProvider
from cfme.common.provider import CloudInfraProvider
from cfme.common.vm import VM
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.wait import wait_for

pytestmark = [
    pytest.mark.tier(2),
    pytest.mark.meta(blockers=[
        BZ(1511099,
           forced_streams=['5.9', '5.8'],
           unblock=lambda provider: not provider.one_of(GCEProvider)),
    ]),
    pytest.mark.provider([CloudInfraProvider],
                         scope='module',
                         required_fields=[(['cap_and_util',
                                            'test_chargeback'], True)]),
    pytest.mark.usefixtures('has_no_providers_modscope',
                            'setup_provider_modscope'),
    test_requirements.chargeback,
]

# Allowed deviation between the reported value in the Chargeback report and the estimated value.
DEVIATION = 1

    else:
        query = query_metric_db(appliance, provider, 'derived_memory_used',
                                vm_name)
        average_rate = attrgetter('derived_memory_used')

    for record in query:
        if average_rate(record) is not None:
            assert average_rate(record) > 0, 'Zero VM Memory Usage'
            break


@pytest.mark.uncollectif(
    lambda provider: current_version() < "5.7" and provider.type == 'gce')
@pytest.mark.meta(blockers=[
    BZ(1408963,
       forced_streams=["5.7", "5.8", "upstream"],
       unblock=lambda provider: provider.type != 'rhevm')
])
def test_raw_metric_vm_network(metrics_collection, appliance, provider):
    vm_name = provider.data['cap_and_util']['capandu_vm']
    query = query_metric_db(appliance, provider, 'net_usage_rate_average',
                            vm_name)

    for record in query:
        if record.net_usage_rate_average is not None:
            assert record.net_usage_rate_average > 0, 'Zero VM Network IO'
            break


@pytest.mark.uncollectif(lambda provider: provider.type == 'ec2')
@pytest.mark.meta(blockers=[
Example #15
0

@pytest.mark.tier(1)
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
@pytest.mark.parametrize(
    "display", DISPLAY_NAV.keys(), ids=[item.replace(" ", "_") for item in DISPLAY_NAV.keys()]
)
@pytest.mark.uncollectif(
    lambda context, button_group: context == ViaSSUI and "GENERIC" in button_group
)
@pytest.mark.meta(
    blockers=[
        BZ(
            1650066,
            forced_streams=["5.9", "5.10"],
            unblock=lambda display, context: not (
                context is ViaSSUI and display in ["List", "Single and list"]
            ),
        )
    ]
)
def test_custom_button_display(request, appliance, context, display, objects, button_group):
    """ Test custom button display on a targeted page

    Polarion:
        assignee: ndhandre
        initialEstimate: 1/4h
        caseimportance: critical
        caseposneg: positive
        testtype: functional
        startsin: 5.8
                "Virtual machine migrated" not in request_details_list.get_message_text(vm_detail))

    # Test1: Check if instance is on openstack/rhevm provider
    soft_assert(not provider.mgmt.find_vms(name=vm_obj.name))

    if provider.one_of(OpenStackProvider):
        # Test2: Check if instance has any volumes attached
        server = provider.mgmt.get_vm(name=vm_obj.name)
        soft_assert(not server.attached_volumes)

        # Test3: Check if instance has any ports attached
        soft_assert(provider.mgmt.get_ports(uuid=server.uuid))


@pytest.mark.tier(1)
@pytest.mark.meta(blockers=[BZ(1746592)], automates=[1755632])
def test_retry_migration_plan(cancel_migration_plan):
    """
    Test to cancel migration and then retry migration
    Polarion:
        assignee: sshveta
        initialEstimate: 1/4h
        caseimportance: medium
        caseposneg: positive
        testtype: functional
        startsin: 5.10
        casecomponent: V2V

    Bugzilla:
        1755632
        1746592
Example #17
0
def test_custom_button_automate(request, appliance, context, submit, objects, button_group):
    """ Test custom button for automate and requests count as per submit

    Polarion:
        assignee: ndhandre
        initialEstimate: 1/4h
        caseimportance: high
        caseposneg: positive
        testtype: functional
        startsin: 5.9
        casecomponent: CustomButton
        tags: custom_button
        testSteps:
            1. Create custom button group with the Object type
            2. Create a custom button with specific submit option and Single and list display
            3. Navigate to object type pages (All and Details)
            4. Check for button group and button
            5. Select/execute button from group dropdown for selected entities
            6. Check for the proper flash message related to button execution
            7. Check automation log requests. Submitted as per selected submit option or not.
            8. Submit all: single request for all entities execution
            9. One by one: separate requests for all entities execution

    Bugzilla:
        1650066
    """

    group, obj_type = button_group
    with appliance.context.use(ViaUI):
        button = group.buttons.create(
            text=fauxfactory.gen_alphanumeric(),
            hover=fauxfactory.gen_alphanumeric(),
            display_for="Single and list",
            submit=submit,
            system="Request",
            request="InspectMe",
        )
        request.addfinalizer(button.delete_if_exists)

    with appliance.context.use(context):
        navigate_to = ssui_nav if context is ViaSSUI else ui_nav

        # BZ-1650066: no custom button on All page
        destinations = (
            ["Details"]
            if context == ViaSSUI and BZ(1650066, forced_streams=["5.9", "5.10"]).blocks
            else ["All", "Details"]
        )
        for destination in destinations:
            obj = objects[obj_type][destination][0]
            dest_name = objects[obj_type][destination][1]
            view = navigate_to(obj, dest_name)
            custom_button_group = Dropdown(view, group.text)
            assert custom_button_group.has_item(button.text)

            # Entity count depends on the destination for `All` available entities and
            # `Details` means a single entity.

            if destination == "All":
                try:
                    paginator = view.paginator
                except AttributeError:
                    paginator = view.entities.paginator

                entity_count = min(paginator.items_amount, paginator.items_per_page)
                view.entities.paginator.check_all()
            else:
                entity_count = 1

            # Clear the automation log
            assert appliance.ssh_client.run_command(
                'echo -n "" > /var/www/miq/vmdb/log/automation.log'
            )

            custom_button_group.item_select(button.text)

            # SSUI not support flash messages
            if context is ViaUI:
                view.flash.assert_message('"{button}" was executed'.format(button=button.text))

            # Submit all: single request for all entity execution
            # One by one: separate requests for all entity execution
            expected_count = 1 if submit == "Submit all" else entity_count
            try:
                wait_for(
                    log_request_check,
                    [appliance, expected_count],
                    timeout=600,
                    message="Check for expected request count",
                    delay=20,
                )
            except TimedOutError:
                assert False, "Expected {count} requests not found in automation log".format(
                    count=str(expected_count)
                )
# -*- coding: utf-8 -*-
import fauxfactory
import pytest

from cfme import test_requirements
from cfme.utils.blockers import BZ
from cfme.utils.log import logger

pytestmark = [pytest.mark.tier(3)]
pytestmark = [
    pytest.mark.tier(3),
    pytest.mark.meta(blockers=[BZ(1531600, forced_streams=["5.9"])]),
    test_requirements.chargeback,
]


def _cleanup_report(report):
    try:
        logger.info('Cleaning up report %s', report.menu_name)
        report.delete()
    except:
        logger.warning('Failed to clean up report %s', report.menu_name)


# These tests are meant to catch issues such as BZ 1203022
def test_charge_report_filter_owner(appliance, infra_provider, request):
    """Tests creation of chargeback report that is filtered by owner


    Polarion:
        assignee: nachandr
Example #19
0
        small_vm.wait_for_vm_state_change(small_vm.STATE_OFF)
    else:
        raise Exception("Unknown power state - unable to continue!")


@pytest.fixture(scope='function')
def ensure_vm_running(small_vm):
    if small_vm.is_pwr_option_available_in_cfme(small_vm.POWER_ON):
        small_vm.mgmt.ensure_state(VmState.RUNNING)
        small_vm.wait_for_vm_state_change(small_vm.STATE_ON)
    else:
        raise Exception("Unknown power state - unable to continue!")


@pytest.mark.rhv1
@pytest.mark.meta(blockers=[BZ(1591606, forced_streams=['5.9', '5.10'])])
@pytest.mark.parametrize('change_type', ['cores_per_socket', 'sockets', 'memory'])
def test_vm_reconfig_add_remove_hw_cold(provider, small_vm, ensure_vm_stopped, change_type):
    orig_config = small_vm.configuration.copy()
    new_config = prepare_new_config(orig_config, change_type)

    # Apply new config
    reconfigure_vm(small_vm, new_config)

    # Revert back to original config
    reconfigure_vm(small_vm, orig_config)


@pytest.mark.rhv1
@pytest.mark.meta(blockers=[GH('ManageIQ/integration_tests:6996')])
@pytest.mark.parametrize('disk_type', ['thin', 'thick'])
    steps = (
        ['workers', 'worker_base'] +
        [step if step != '*' else worker.advanced for step in worker.path])
    patch = {'memory_threshold': new_threshold}
    for step in steps[::-1]:
        patch = {step: patch}
    appliance.server.update_advanced_settings(patch)
    if appliance.version < '5.11' and new_threshold in v510:
        new_threshold = v510[new_threshold]
    return new_threshold


@test_requirements.settings
@pytest.mark.tier(2)
@pytest.mark.meta(blockers=[
    BZ(1787350),
    BZ(1799443,
       unblock=lambda worker, set_memory_threshold: worker.path !=
       QUEUE_WORKER_DEFAULTS_PATH or set_memory_threshold !=
       set_memory_threshold_in_ui)
],
                  automates=[1658373, 1715633])
@pytest.mark.parametrize(
    "set_memory_threshold",
    [set_memory_threshold_in_ui, set_memory_threshold_in_advanced_settings],
    ids=["in_UI", "in_advanced_setting"])
@pytest.mark.parametrize("worker", WORKERS, ids=[x.id for x in WORKERS])
def test_set_memory_threshold(appliance, worker, request,
                              set_memory_threshold):
    """
    Bugzilla:
Example #21
0
def check_ftp(appliance,
              ftp,
              server_name,
              server_zone_id,
              check_ansible_logs=False):
    server_string = server_name + "_" + str(server_zone_id)
    with ftp:
        # Files must have been created after start with server string in it (for ex. EVM_1)
        date_group = '(_.*?){4}'
        zip_files = ftp.filesystem.search(re.compile(r"^.*{}{}[.]zip$".format(
            server_string, date_group)),
                                          directories=False)
        assert zip_files, "No logs found!"
        # Collection of Models and Dialogs introduced in 5.10
        if appliance.version >= '5.10' and not BZ(
                1656318, forced_streams=["5.10"]).blocks:
            models_files = ftp.filesystem.search(re.compile(
                r"^Models_.*{}[.]zip$".format(server_string)),
                                                 directories=False)
            assert models_files, 'No models files found'
            dialogs_files = ftp.filesystem.search(re.compile(
                r"^Dialogs_.*{}[.]zip$".format(server_string)),
                                                  directories=False)
            assert dialogs_files, 'No dialogs files found'

    # Check the times of the files by names
    datetimes = []
    for zip_file in zip_files:
        # files looks like "Current_region_0_default_1_EVM_1_20170127_043343_20170127_051010.zip"
        # 20170127_043343 - date and time
        date = zip_file.name.split("_")
        date_from = date[7] + date[8]
        # removing ".zip" from last item
        date_to = date[9] + date[10][:-4]
        try:
            date_from = datetime.strptime(date_from, "%Y%m%d%H%M%S")
            date_to = datetime.strptime(date_to, "%Y%m%d%H%M%S")
            # if the file is correct, check ansible logs (~/ROOT/var/log/tower/setup-*) are there
            logs_ansible = "ROOT/var/log/tower/setup" if zip_file.name.startswith("Current") \
                else "log/ansible_tower"
            if ftp.login != 'anonymous' and check_ansible_logs:  # can't login as anon using SSH
                with SSHClient(hostname=ftp.host,
                               username=ftp.login,
                               password=ftp.password) as log_ssh:
                    result = log_ssh.run_command(
                        "unzip -l ~{} | grep {}".format(
                            zip_file.path, logs_ansible),
                        ensure_user=True)
                    assert '.log' in result.output
                    log_file_size = result.output.split()[0]
                    assert int(log_file_size) > 0, "Log file is empty!"

        except ValueError:
            assert False, "Wrong file matching of {}".format(zip_file.name)
        datetimes.append((date_from, date_to, zip_file.name))

    # Check for the gaps
    if len(datetimes) > 1:
        for i in range(len(datetimes) - 1):
            dt = datetimes[i + 1][0] - datetimes[i][1]
            assert dt.total_seconds() >= 0.0, (
                "Negative gap between log files ({}, {})".format(
                    datetimes[i][2], datetimes[i + 1][2]))
Example #22
0
    pytest.mark.tier(3), test_requirements.cloud,
    pytest.mark.usefixtures('has_no_providers_modscope',
                            'setup_provider_modscope'),
    pytest.mark.provider([EC2Provider, OpenStackProvider], scope="module")
]


@pytest.fixture()
def keypair(appliance, provider):
    key = appliance.collections.cloud_keypairs.create(
        name=fauxfactory.gen_alphanumeric(), provider=provider)
    assert key.exists
    yield key


@pytest.mark.meta(automates=[BZ(1718833)])
def test_keypair_crud(appliance, provider):
    """ This will test whether it will create new Keypair and then deletes it.
    Polarion:
        assignee: mmojzis
        casecomponent: Cloud
        caseimportance: high
        initialEstimate: 1/4h
        testSteps:
            1. Create keypair.
            2. Read keypair.
            3. Delete keypair.
    """
    keypair = appliance.collections.cloud_keypairs.create(
        name=fauxfactory.gen_alphanumeric(), provider=provider)
    assert keypair.exists
    return


@pytest.fixture(scope="function")
def vm_off(vm):
    """ Ensures that the VM is off when the control goes to the test."""
    vm.mgmt.wait_for_steady_state()
    vm.mgmt.ensure_state(VmState.STOPPED)
    return


@pytest.mark.provider(
    [VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
    scope="module",
    selector=ONE_PER_TYPE)
@pytest.mark.meta(blockers=[BZ(1531547)], automates=[1531547])
def test_action_start_virtual_machine_after_stopping(request, vm, vm_on,
                                                     policy_for_testing):
    """ This test tests action 'Start Virtual Machine'

    This test sets the policy that it turns on the VM when it is turned off
    (https://www.youtube.com/watch?v=UOn4gxj2Dso), then turns the VM off and waits for it coming
    back alive.

    Bugzilla:
        1531547

    Metadata:
        test_flag: actions, provision

    Polarion:
    selected = dict(infra=[], cloud=[], container=[])
    # we want to collect these provider categories
    for cat in selected.keys():
        selected[cat].extend(
            {  # quick and dirty uniqueness for types/versions
                prov.klass
                for prov in required_providers
                if prov.category == cat
            }
        )
    return selected


@pytest.mark.tier(0)
@test_requirements.general_ui
@pytest.mark.meta(automates=[BZ(1671844)])
def test_provider_type_support(appliance, soft_assert):
    """Test availability of GCE provider in downstream CFME builds

    Polarion:
        assignee: pvala
        initialEstimate: 1/10h
        casecomponent: WebUI
    """
    classes_to_test = provider_classes(appliance)
    for category, providers in classes_to_test.items():
        try:
            collection = getattr(appliance.collections, providers[0].collection_name)
        except AttributeError:
            msg = 'Missing collection name for a provider class, cannot test UI field'
            logger.exception(msg)
Example #25
0
    stack.wait_for_exists()
    return provision_request, stack


def _cleanup(appliance=None, provision_request=None, service=None):
    if not service:
        last_message = provision_request.get_request_row_from_ui()['Last Message'].text
        service_name = last_message.split()[2].strip('[]')
        myservice = MyService(appliance, service_name)
    else:
        myservice = service
    if myservice.exists:
        myservice.delete()


@pytest.mark.meta(blockers=[BZ(1628384, forced_streams=['5.10'])])
def test_provision_stack(order_stack):
    """Tests stack provisioning

    Metadata:
        test_flag: provision

    Polarion:
        assignee: jhenner
        initialEstimate: 1/3h
    """
    provision_request, stack = order_stack
    assert provision_request.is_succeeded()


@pytest.mark.meta(blockers=[BZ(1628384, forced_streams=['5.10'])])
Example #26
0
from cfme.infrastructure import host
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils import conf
from cfme.utils.blockers import BZ
from cfme.utils.update import update
from cfme.utils.wait import wait_for

pytestmark = [
    pytest.mark.tier(3),
    pytest.mark.provider([InfraProvider],
                         required_fields=['hosts'],
                         scope='module'),
    pytest.mark.meta(blockers=[BZ(1635126, forced_streams=['5.10'])]),
]

msgs = {
    'virtualcenter': {
        'default':
        'Cannot complete login due to an incorrect user name or password',
        'remote_login':
        '******',
        'web_services':
        'Cannot complete login due to an incorrect user name or password'
    },
    'rhevm':
    'Login failed due to a bad username or password.',
    'scvmm':
    'Check credentials. Remote error message: WinRM::WinRMAuthorizationError'
Example #27
0
class TestCustomAttributesRESTAPI(object):
    @pytest.mark.uncollectif(lambda appliance, provider, collection_name:
                             _uncollectif(appliance, provider, collection_name)
                             )
    @pytest.mark.rhv2
    @pytest.mark.parametrize("collection_name", COLLECTIONS)
    def test_add(self, request, collection_name, get_resource):
        """Test adding custom attributes to resource using REST API.

        Metadata:
            test_flag: rest

        Polarion:
            assignee: pvala
            initialEstimate: 1/4h
        """
        resource = get_resource[collection_name]()
        attributes = add_custom_attributes(request, resource)
        for attr in attributes:
            record = resource.custom_attributes.get(id=attr.id)
            assert record.name == attr.name
            assert record.value == attr.value

    @pytest.mark.uncollectif(lambda appliance, provider, collection_name:
                             _uncollectif(appliance, provider, collection_name)
                             )
    @pytest.mark.parametrize("collection_name", COLLECTIONS)
    def test_delete_from_detail_post(self, request, collection_name,
                                     get_resource):
        """Test deleting custom attributes from detail using POST method.

        Metadata:
            test_flag: rest

        Polarion:
            assignee: pvala
            initialEstimate: 1/4h
        """
        attributes = add_custom_attributes(request,
                                           get_resource[collection_name]())
        delete_resources_from_detail(attributes, method='POST')

    @pytest.mark.uncollectif(
        lambda appliance, provider, collection_name: appliance.version < '5.9'
        or  # BZ 1422596 was not fixed for versions < 5.9
        _uncollectif(appliance, provider, collection_name))
    @pytest.mark.parametrize("collection_name", COLLECTIONS)
    def test_delete_from_detail_delete(self, request, collection_name,
                                       get_resource):
        """Test deleting custom attributes from detail using DELETE method.

        Metadata:
            test_flag: rest

        Polarion:
            assignee: pvala
            initialEstimate: 1/4h
        """
        attributes = add_custom_attributes(request,
                                           get_resource[collection_name]())
        delete_resources_from_detail(attributes, method='DELETE')

    @pytest.mark.uncollectif(lambda appliance, provider, collection_name:
                             _uncollectif(appliance, provider, collection_name)
                             )
    @pytest.mark.parametrize("collection_name", COLLECTIONS)
    def test_delete_from_collection(self, request, collection_name,
                                    get_resource):
        """Test deleting custom attributes from collection using REST API.

        Metadata:
            test_flag: rest

        Polarion:
            assignee: pvala
            initialEstimate: 1/4h
        """
        resource = get_resource[collection_name]()
        attributes = add_custom_attributes(request, resource)
        collection = resource.custom_attributes
        delete_resources_from_collection(attributes,
                                         collection=collection,
                                         not_found=True)

    @pytest.mark.uncollectif(lambda appliance, provider, collection_name:
                             _uncollectif(appliance, provider, collection_name)
                             )
    @pytest.mark.parametrize("collection_name", COLLECTIONS)
    def test_delete_single_from_collection(self, request, collection_name,
                                           get_resource):
        """Test deleting single custom attribute from collection using REST API.

        Metadata:
            test_flag: rest

        Polarion:
            assignee: pvala
            initialEstimate: 1/4h
        """
        resource = get_resource[collection_name]()
        attributes = add_custom_attributes(request, resource)
        attribute = attributes[0]
        collection = resource.custom_attributes
        delete_resources_from_collection([attribute],
                                         collection=collection,
                                         not_found=True)

    @pytest.mark.uncollectif(lambda appliance, provider, collection_name:
                             _uncollectif(appliance, provider, collection_name)
                             )
    @pytest.mark.parametrize("collection_name", COLLECTIONS)
    @pytest.mark.parametrize('from_detail', [True, False],
                             ids=['from_detail', 'from_collection'])
    def test_edit(self, request, from_detail, collection_name, appliance,
                  get_resource):
        """Test editing custom attributes using REST API.

        Metadata:
            test_flag: rest

        Polarion:
            assignee: pvala
            initialEstimate: None
        """
        resource = get_resource[collection_name]()
        attributes = add_custom_attributes(request, resource)
        response_len = len(attributes)
        body = []
        for __ in range(response_len):
            uid = fauxfactory.gen_alphanumeric(5)
            body.append({
                'name': 'ca_name_{}'.format(uid),
                'value': 'ca_value_{}'.format(uid),
                'section': 'metadata'
            })
        if from_detail:
            edited = []
            for i in range(response_len):
                edited.append(attributes[i].action.edit(**body[i]))
                assert_response(appliance)
        else:
            for i in range(response_len):
                body[i].update(attributes[i]._ref_repr())
            edited = resource.custom_attributes.action.edit(*body)
            assert_response(appliance)
        assert len(edited) == response_len
        for i in range(response_len):
            attributes[i].reload()
            assert edited[i].name == body[i]['name'] == attributes[i].name
            assert edited[i].value == body[i]['value'] == attributes[i].value
            assert edited[i].section == body[i]['section'] == attributes[
                i].section

    @pytest.mark.uncollectif(
        lambda appliance, provider, collection_name:
        # BZ 1516762 was not fixed for versions < 5.9
        (appliance.version < '5.9' and collection_name != 'providers'
         ) or _uncollectif(appliance, provider, collection_name))
    @pytest.mark.parametrize("collection_name", COLLECTIONS)
    @pytest.mark.meta(blockers=[
        BZ(1516762,
           forced_streams=['5.9', 'upstream'],
           unblock=lambda collection_name: collection_name not in
           ('vms', 'instances'))
    ])
    @pytest.mark.parametrize('from_detail', [True, False],
                             ids=['from_detail', 'from_collection'])
    def test_bad_section_edit(self, request, from_detail, collection_name,
                              appliance, get_resource):
        """Test that editing custom attributes using REST API and adding invalid section fails.

        Metadata:
            test_flag: rest

        Polarion:
            assignee: pvala
            initialEstimate: 1/4h
        """
        resource = get_resource[collection_name]()
        attributes = add_custom_attributes(request, resource)
        response_len = len(attributes)
        body = []
        for __ in range(response_len):
            body.append({'section': 'bad_section'})
        if from_detail:
            for i in range(response_len):
                with pytest.raises(Exception, match='Api::BadRequestError'):
                    attributes[i].action.edit(**body[i])
                assert_response(appliance, http_status=400)
        else:
            for i in range(response_len):
                body[i].update(attributes[i]._ref_repr())
            with pytest.raises(Exception, match='Api::BadRequestError'):
                resource.custom_attributes.action.edit(*body)
            assert_response(appliance, http_status=400)

    @pytest.mark.uncollectif(
        lambda appliance, provider, collection_name:
        # BZ 1516762 was not fixed for versions < 5.9
        (appliance.version < '5.9' and collection_name != 'providers'
         ) or _uncollectif(appliance, provider, collection_name))
    @pytest.mark.parametrize("collection_name", COLLECTIONS)
    @pytest.mark.meta(blockers=[
        BZ(1516762,
           forced_streams=['5.9', 'upstream'],
           unblock=lambda collection_name: collection_name not in
           ('vms', 'instances'))
    ])
    def test_bad_section_add(self, request, collection_name, appliance,
                             get_resource):
        """Test adding custom attributes with invalid section to resource using REST API.

        Metadata:
            test_flag: rest

        Polarion:
            assignee: pvala
            initialEstimate: 1/4h
        """
        resource = get_resource[collection_name]()
        add_custom_attributes(request, resource)
        uid = fauxfactory.gen_alphanumeric(5)
        body = {
            'name': 'ca_name_{}'.format(uid),
            'value': 'ca_value_{}'.format(uid),
            'section': 'bad_section'
        }
        with pytest.raises(Exception, match='Api::BadRequestError'):
            resource.custom_attributes.action.add(body)
        assert_response(appliance, http_status=400)

    @pytest.mark.uncollectif(lambda appliance, provider, collection_name:
                             # BZ 1544800 was not fixed for versions < 5.9
                             appliance.version < '5.9' or _uncollectif(
                                 appliance, provider, collection_name))
    @pytest.mark.parametrize('collection_name', COLLECTIONS)
    def test_add_duplicate(self, request, collection_name, get_resource):
        """Tests that adding duplicate custom attribute updates the existing one.

        Testing BZ 1544800

        Metadata:
            test_flag: rest

        Polarion:
            assignee: pvala
            initialEstimate: None
        """
        resource = get_resource[collection_name]()
        orig_attribute, = add_custom_attributes(request, resource, num=1)

        new_attribute = resource.custom_attributes.action.add({
            'name':
            orig_attribute.name,
            'value':
            'updated_value'
        })[0]
        assert orig_attribute.name == new_attribute.name
        assert orig_attribute.id == new_attribute.id
        assert new_attribute.value == 'updated_value'
Example #28
0
                                        temp_appliance_preconfig_modscope):
    """Set and check timezones are set correctly through appliance conosle cli

    Polarion:
        assignee: sbulage
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/12h
    """
    app = temp_appliance_preconfig_modscope
    app.ssh_client.run_command(
        "appliance_console_cli --timezone {}".format(timezone))
    app.appliance_console.timezone_check(timezone)


@pytest.mark.meta(blockers=[BZ(1598427, forced_streams=['5.9', '5.10'])])
@pytest.mark.tier(2)
def test_appliance_console_cli_set_hostname(configured_appliance):
    """
    Polarion:
        assignee: sbulage
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/12h
    """
    hostname = 'test.example.com'
    configured_appliance.appliance_console_cli.set_hostname(hostname)
    result = configured_appliance.ssh_client.run_command("hostname -f")
    assert result.success
    assert result.output.strip() == hostname
Example #29
0
def test_targeted_refresh_volume(appliance, create_vm, provider, request):
    """
    AWS naming is EBS

    Polarion:
        assignee: mmojzis
        casecomponent: Cloud
        initialEstimate: 2/3h
        startsin: 5.9
        testSteps:
            1. Volume CREATE
            2. Volume UPDATE
            3. Volume ATTACH
            4. Volume DETACH
            5. Volume DELETE
    """
    volume_name = fauxfactory.gen_alpha()
    volume_collection = appliance.rest_api.collections.cloud_volumes
    instance = create_vm

    # create
    volume = provider.mgmt.create_volume(instance.mgmt.az, name=volume_name)
    if not volume:
        pytest.fail("Volume wasn't successfully created using API!")
    request.addfinalizer(lambda: cleanup_if_exists(volume))
    wait_for(lambda: volume_collection.get(name=volume_name),
             delay=DELAY,
             timeout=TIMEOUT,
             handle_exception=True)
    # update name
    new_volume_name = fauxfactory.gen_alpha()
    volume.rename(new_volume_name)
    wait_for(lambda: volume_collection.get(name=new_volume_name),
             delay=DELAY,
             timeout=TIMEOUT,
             handle_exception=True)
    # update size
    if not BZ(1754874, forced_streams=["5.10", "5.11"]).blocks:
        new_size = 20
        volume.resize(new_size)
        wait_for(lambda: volume_collection.get(name=new_volume_name).size ==
                 (new_size * 1024 * 1024 * 1024),
                 delay=DELAY,
                 timeout=TIMEOUT,
                 handle_exception=True)
    # attach
    volume.attach(instance.mgmt.uuid)
    wait_for(lambda: volume_collection.get(name=new_volume_name),
             delay=DELAY,
             timeout=TIMEOUT,
             handle_exception=True)
    # detach
    volume.detach(instance.mgmt.uuid)
    wait_for(lambda: volume_collection.get(name=new_volume_name),
             delay=DELAY,
             timeout=TIMEOUT,
             handle_exception=True)
    # delete
    wait_for(lambda: volume.cleanup(),
             delay=DELAY,
             timeout=TIMEOUT,
             handle_exception=True)
    wait_for_deleted(volume_collection, new_volume_name)
Example #30
0
 def create(self,
            name=None,
            display_name=None,
            location='inline',
            script=None,
            data=None,
            cancel=False,
            validate=True,
            repository=None,
            playbook=None,
            machine_credential=None,
            hosts=None,
            max_ttl=None,
            escalate_privilege=None,
            verbosity=None,
            playbook_input_parameters=None):
     add_page = navigate_to(self, 'Add', wait_for_view=True)
     add_page.fill({'location': location})
     if location == 'inline':
         add_page.fill({
             'inline_name': name,
             'inline_display_name': display_name,
             'script': script,
             'data': data
         })
     if location == 'playbook':
         add_page.fill({
             'playbook_name': name,
             'playbook_display_name': display_name,
             'repository': repository
         })
         wait_for(lambda: add_page.playbook.is_displayed,
                  delay=0.5,
                  num_sec=2)
         add_page.fill({
             'playbook':
             playbook,
             'machine_credential':
             machine_credential,
             'hosts':
             hosts,
             'max_ttl':
             max_ttl,
             'escalate_privilege':
             escalate_privilege,
             'verbosity':
             verbosity,
             'playbook_input_parameters':
             playbook_input_parameters
         })
         validate = False
     if validate and not BZ(1499881, forced_streams=['5.9']).blocks:
         add_page.validate_button.click()
         add_page.flash.assert_no_error()
         add_page.flash.assert_message('Data validated successfully')
     if cancel:
         add_page.cancel_button.click()
         add_page.flash.assert_no_error()
         add_page.flash.assert_message(
             'Add of new Automate Method was cancelled by the user')
         return None
     else:
         add_page.add_button.click()
         add_page.flash.assert_no_error()
         return self.instantiate(
             name=name,
             display_name=display_name,
             location=location,
             script=script,
             data=data,
             repository=repository,
             playbook=playbook,
             machine_credential=machine_credential,
             hosts=hosts,
             max_ttl=max_ttl,
             escalate_privilege=escalate_privilege,
             verbosity=verbosity,
             playbook_input_parameters=playbook_input_parameters)