Ejemplo n.º 1
0
def log_monitor(user_obj, temp_appliance_preconfig_long):
    """Search evm.log for any plaintext password"""
    result = LogValidator("/var/www/miq/vmdb/log/evm.log",
                          failure_patterns=[f"{user_obj.credential.secret}"],
                          hostname=temp_appliance_preconfig_long.hostname)
    result.start_monitoring()
    yield result
def test_refresh_with_empty_iot_hub_azure(request, provider, setup_provider):
    """
    Polarion:
        assignee: anikifor
        casecomponent: Cloud
        caseimportance: low
        initialEstimate: 1/6h
        setup: prepare env
               create an IoT Hub in Azure (using free tier pricing is good enough):
               $ az iot hub create --name rmanes-iothub --resource-group iot_rg
        testSteps:
            1. refresh azure provider
        expectedResults:
            1. no errors found in logs
    Bugzilla:
        1495318
    """
    result = LogValidator("/var/www/miq/vmdb/log/evm.log",
                          failure_patterns=[r".*ERROR.*"])
    result.start_monitoring()
    azure = provider.mgmt
    if not azure.has_iothub():
        iothub_name = fauxfactory.gen_alpha(18, start="potatoiothub_")
        azure.create_iothub(iothub_name)
        request.addfinalizer(lambda: azure.delete_iothub(iothub_name))
        assert azure.has_iothub()
    provider.refresh_provider_relationships(wait=600)
    assert result.validate(wait="60s")
def test_retry_migration_plan(cancel_migration_plan):
    """
    Test to cancel migration and then retry migration
    Polarion:
        assignee: sshveta
        initialEstimate: 1/4h
        caseimportance: medium
        caseposneg: positive
        testtype: functional
        startsin: 5.10
        casecomponent: V2V

    Bugzilla:
        1755632
        1746592
    """
    migration_plan = cancel_migration_plan
    view = navigate_to(migration_plan, "Complete")
    # Retry Migration
    view.plans_completed_list.migrate_plan(migration_plan.name)
    assert migration_plan.wait_for_state("Started")

    # Automating BZ 1755632
    retry_interval_log = LogValidator(
        '/var/www/miq/vmdb/log/evm.log',
        matched_patterns=[r'.*to Automate for delivery in \[60\] seconds.*'])
    retry_interval_log.start_monitoring()
    # search logs and wait for validation
    assert (retry_interval_log.validate(wait="150s"))

    assert migration_plan.wait_for_state("In_Progress")
    assert migration_plan.wait_for_state("Completed")
    assert migration_plan.wait_for_state("Successful")
Ejemplo n.º 4
0
def test_snapshot_crud(create_vm, provider):
    """Tests snapshot crud

    Metadata:
        test_flag: snapshot, provision

    Polarion:
        assignee: prichard
        casecomponent: Infra
        initialEstimate: 1/6h
    """
    result = LogValidator(
        "/var/www/miq/vmdb/log/evm.log",
        failure_patterns=[r".*ERROR.*"],
    )
    result.start_monitoring()
    # has_name is false if testing RHEVMProvider
    snapshot = new_snapshot(create_vm,
                            has_name=(not provider.one_of(RHEVMProvider)))
    snapshot.create()
    # check for the size as "read" check
    if provider.appliance.version >= "5.11" and provider.one_of(RHEVMProvider):
        assert snapshot.size
    snapshot.delete()
    provider.refresh_provider_relationships(wait=600)
    assert result.validate(wait="60s")
Ejemplo n.º 5
0
def test_check_system_request_calls_depr_conf_mgmt(appliance, copy_instance):
    """
    Polarion:
        assignee: ghubale
        initialEstimate: 1/8h
        caseimportance: low
        caseposneg: positive
        testtype: functional
        startsin: 5.10
        casecomponent: Automate
        tags: automate
        setup:
            1. Copy /System/Request/ansible_tower_job instance to new domain
        testSteps:
            1. Run that instance(ansible_tower_job) using simulation
            2. See automation log
        expectedResults:
            1.
            2. The /System/Request/ansible_tower_job instance should call the newer
               "/AutomationManagement/AnsibleTower/Operations/StateMachines/Job/default" method

    Bugzilla:
        1615444
    """
    search = '/AutomationManagement/AnsibleTower/Operations/StateMachines/Job/default'
    result = LogValidator(
        "/var/www/miq/vmdb/log/automation.log", matched_patterns=[".*{}.*".format(search)]
    )
    result.start_monitoring()
    # Executing the automate instance - 'ansible_tower_job' using simulation
    simulate(
        appliance=appliance,
        request=copy_instance.name
    )
    assert result.validate(wait="60s")
Ejemplo n.º 6
0
def test_attribute_override(appliance, request, provider, setup_provider,
                            buttongroup):
    """ Test custom button attribute override

    Polarion:
        assignee: ndhandre
        initialEstimate: 1/4h
        caseimportance: medium
        caseposneg: positive
        testtype: nonfunctional
        startsin: 5.9
        casecomponent: CustomButton
        tags: custom_button
        testSteps:
            1. create a custom button to request the call_instance_with_message
            2. set the message to create
            3. set the attributes instance, class, namespace to "whatever"
            4. set the attribute message to "my_message"
            5. save it

    Bugzilla:
        1651099
    """
    attributes = [
        ("class", "Request"),
        ("instance", "TestNotification"),
        ("message", "digitronik_msg"),
        ("namespace", "/System"),
    ]
    req = "call_instance_with_message"
    patterns = [
        "[miqaedb:/System/Request/TestNotification#create]",
        "[miqaedb:/System/Request/TestNotification#digitronik_msg]"
    ]

    group = buttongroup("PROVIDER")
    button = group.buttons.create(
        text="btn_{}".format(fauxfactory.gen_alphanumeric(3)),
        hover="hover_{}".format(fauxfactory.gen_alphanumeric(3)),
        system="Request",
        request=req,
        attributes=attributes,
    )
    request.addfinalizer(button.delete_if_exists)

    # Initialize Log Checks
    log = LogValidator("/var/www/miq/vmdb/log/automation.log",
                       matched_patterns=patterns)
    log.start_monitoring()

    # Execute button
    view = navigate_to(provider, "Details")
    custom_button_group = Dropdown(view, group.hover)
    custom_button_group.item_select(button.text)

    # Simulate button
    button.simulate(provider.name, request=req)

    # validate log requests for simulation and actual execution
    log.validate(wait="120s")
Ejemplo n.º 7
0
def test_service_provisioning_email(request, appliance, catalog_item):
    """
    Polarion:
        assignee: nansari
        casecomponent: Services
        caseposneg: negative
        initialEstimate: 1/4h

    Bugzilla:
        1668004
    """
    result = LogValidator("/var/www/miq/vmdb/log/automation.log",
                          failure_patterns=[".*Error during substitution.*"])
    result.start_monitoring()
    service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog,
                                       catalog_item.name)
    service_catalogs.order()
    request_description = (
        "Provisioning Service [{catalog_item_name}] from [{catalog_item_name}]"
        .format(catalog_item_name=catalog_item.name))
    provision_request = appliance.collections.requests.instantiate(
        request_description)
    provision_request.wait_for_request(method='ui')
    request.addfinalizer(provision_request.remove_request)
    assert result.validate(wait="60s")
Ejemplo n.º 8
0
def test_action_power_on_audit(request, vm, vm_off, policy_for_testing):
    """ This test tests action 'Generate Audit Event'.

    This test sets the policy that it logs powering on of the VM. Then it powers up the vm and
    checks whether audit logs contain message about that.

    Metadata:
        test_flag: actions, provision

    Polarion:
        assignee: dgaikwad
        initialEstimate: 1/6h
        casecomponent: Control
    """
    policy_result = LogValidator(
        "/var/www/miq/vmdb/log/audit.log",
        matched_patterns=[
            r'.*policy: \[{}\], event: \[VM Power On\]'.format(
                policy_for_testing.description
            )
        ]
    )
    policy_result.start_monitoring()
    # Set up the policy and prepare finalizer
    policy_for_testing.assign_actions_to_event("VM Power On", ["Generate Audit Event"])

    @request.addfinalizer
    def _cleanup():
        policy_for_testing.unassign_events("VM Power On")

    # Start the VM
    vm.mgmt.ensure_state(VmState.RUNNING)

    # Search the logs and wait for validation
    assert policy_result.validate("180s")
Ejemplo n.º 9
0
def test_appliance_console_cli_external_auth(auth_type, ipa_crud,
                                             configured_appliance):
    """
    Polarion:
        assignee: dgaikwad
        caseimportance: high
        casecomponent: Auth
        initialEstimate: 1/4h
    """
    evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
                            matched_patterns=[f'.*{auth_type} to true.*'],
                            hostname=configured_appliance.hostname)
    evm_tail.start_monitoring()
    cmd_set = f'appliance_console_cli --extauth-opts="/authentication/{auth_type}=true"'
    assert configured_appliance.ssh_client.run_command(cmd_set)
    assert evm_tail.validate(wait="30s")

    evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
                            matched_patterns=[f'.*{auth_type} to false.*'],
                            hostname=configured_appliance.hostname)

    evm_tail.start_monitoring()
    cmd_unset = f'appliance_console_cli --extauth-opts="/authentication/{auth_type}=false"'
    assert configured_appliance.ssh_client.run_command(cmd_unset)
    assert evm_tail.validate(wait="30s")
Ejemplo n.º 10
0
def test_delete_vm_on_provider_side(vm_test, provider):
    """ Delete VM on the provider side and refresh relationships in CFME

    Polarion:
        assignee: anikifor
        initialEstimate: 1/4h
        casecomponent: Infra

    Bugzilla:
        1592430
    """
    logs = LogValidator("/var/www/miq/vmdb/log/evm.log",
                        failure_patterns=[".*ERROR.*"])
    logs.start_monitoring()
    vm_test.cleanup_on_provider()
    provider.refresh_provider_relationships()
    try:
        wait_for(provider.is_refreshed,
                 func_kwargs={'refresh_delta': 10},
                 timeout=600)
    except TimedOutError:
        pytest.fail(
            "Provider failed to refresh after VM was removed from the provider"
        )
    assert logs.validate(wait="60s")
Ejemplo n.º 11
0
def test_domain_lock_disabled(klass):
    """
    Polarion:
        assignee: ghubale
        casecomponent: Automate
        caseimportance: medium
        initialEstimate: 1/16h
        tags: automate
    """
    schema_field = fauxfactory.gen_alphanumeric()
    # Disable automate domain
    with update(klass.namespace.domain):
        klass.namespace.domain.enabled = False

    # Adding schema for executing automate method
    klass.schema.add_fields({'name': schema_field, 'type': 'Method', 'data_type': 'String'})

    # Adding automate method
    method = klass.methods.create(
        name=fauxfactory.gen_alphanumeric(),
        display_name=fauxfactory.gen_alphanumeric(),
        location='inline'
    )

    # Adding instance to call automate method
    instance = klass.instances.create(
        name=fauxfactory.gen_alphanumeric(),
        display_name=fauxfactory.gen_alphanumeric(),
        description=fauxfactory.gen_alphanumeric(),
        fields={schema_field: {'value': method.name}}
    )

    result = LogValidator(
        "/var/www/miq/vmdb/log/automation.log",
        matched_patterns=[r".*ERROR.*"],
    )
    result.start_monitoring()

    # Executing automate method using simulation
    simulate(
        appliance=klass.appliance,
        attributes_values={
            "namespace": klass.namespace.name,
            "class": klass.name,
            "instance": instance.name,
        },
        message="create",
        request="Call_Instance",
        execute_methods=True,
    )
    assert result.validate(wait="60s")

    klass.namespace.domain.lock()
    view = navigate_to(klass.namespace.domain, 'Details')
    assert 'Disabled' in view.title.text
    assert 'Locked' in view.title.text

    # Need to unlock the domain to perform teardown on domain, namespace, class
    klass.namespace.domain.unlock()
Ejemplo n.º 12
0
def test_miq_password_decrypt(klass):
    """
    Polarion:
        assignee: ghubale
        casecomponent: Automate
        initialEstimate: 1/3h

    Bugzilla:
        1720432
    """
    # Ruby script for decrypting password
    script = (
        'require "manageiq-password"\n'
        'root_password = MiqPassword.encrypt("abc")\n'
        '$evm.log("info", "Root Password is #{root_password}")\n'
        'root_password_decrypted = MiqPassword.decrypt(root_password)\n'
        '$evm.log("info", "Decrypted password is #{root_password_decrypted}")')

    # Adding schema for executing method
    klass.schema.add_fields({
        'name': 'execute',
        'type': 'Method',
        'data_type': 'String'
    })

    # Adding automate method
    method = klass.methods.create(name=fauxfactory.gen_alphanumeric(),
                                  display_name=fauxfactory.gen_alphanumeric(),
                                  location='inline',
                                  script=script)

    # Adding instance to call automate method
    instance = klass.instances.create(
        name=fauxfactory.gen_alphanumeric(),
        display_name=fauxfactory.gen_alphanumeric(),
        description=fauxfactory.gen_alphanumeric(),
        fields={'execute': {
            'value': method.name
        }})

    result = LogValidator(
        "/var/www/miq/vmdb/log/automation.log",
        matched_patterns=[".*Decrypted password is abc.*"],
    )
    result.start_monitoring()

    # Executing method via simulation to check decrypted password
    simulate(
        appliance=klass.appliance,
        attributes_values={
            "namespace": klass.namespace.name,
            "class": klass.name,
            "instance": instance.name,
        },
        message="create",
        request="Call_Instance",
        execute_methods=True,
    )
    assert result.validate()
Ejemplo n.º 13
0
def test_configuration_dropdown_roles_by_server(appliance, request):
    """
    Polarion:
        assignee: pvala
        casecomponent: Configuration
        caseimportance: high
        initialEstimate: 1/15h
        testSteps:
            1. Navigate to Settings -> Configuration -> Diagnostics -> CFME Region ->
                Roles by Servers.
            2. Select a Role and check the `Configuration` dropdown in toolbar.
            3. Check the `Suspend Role` option.
            4. Click the `Suspend Role` option and suspend the role
                and monitor production.log for error -
                `Error caught: [ActiveRecord::RecordNotFound] Couldn't find MiqServer with 'id'=0`
        expectedResults:
            1.
            2. `Configuration` dropdown must be enabled/active.
            3. `Suspend Role` must be enabled.
            4. Role must be suspended and there must be no error in the logs.

    Bugzilla:
        1715466
        1455283
        1404280
        1734393
    """
    # 1
    view = navigate_to(appliance.server.zone.region, "RolesByServers")

    # 2
    view.rolesbyservers.tree.select_item("SmartState Analysis")
    assert view.rolesbyservers.configuration.is_displayed

    # 3
    assert view.rolesbyservers.configuration.item_enabled("Suspend Role")

    # 4
    log = LogValidator(
        "/var/www/miq/vmdb/log/production.log",
        failure_patterns=[
            ".*Error caught: .*ActiveRecord::RecordNotFound.* Couldn't find MiqServer with 'id'=.*"
        ],
    )

    log.start_monitoring()
    view.rolesbyservers.configuration.item_select("Suspend Role",
                                                  handle_alert=True)

    request.addfinalizer(lambda: view.rolesbyservers.configuration.item_select(
        "Start Role", handle_alert=True))

    view.flash.assert_message("Suspend successfully initiated")

    assert log.validate(wait="20s")

    if BZ(1734393, forced_streams=["5.10"]).blocks:
        view.rolesbyservers.tree.select_item("SmartState Analysis")
    assert "available" in view.rolesbyservers.tree.currently_selected_role
Ejemplo n.º 14
0
def test_service_ansible_verbosity(
    appliance,
    request,
    local_ansible_catalog_item,
    ansible_service_catalog,
    ansible_service_request,
    ansible_service,
    verbosity,
):
    """Check if the different Verbosity levels can be applied to service and
    monitor the std out
    Bugzilla:
        1460788
    Polarion:
        assignee: sbulage
        casecomponent: Ansible
        caseimportance: medium
        initialEstimate: 1/6h
        tags: ansible_embed
    """
    # Adding index 0 which will give pattern for e.g. pattern = "verbosity"=>0.
    pattern = '"verbosity"=>{}'.format(verbosity[0])
    with update(local_ansible_catalog_item):
        local_ansible_catalog_item.provisioning = {"verbosity": verbosity}
        local_ansible_catalog_item.retirement = {"verbosity": verbosity}
    # Log Validator
    log = LogValidator("/var/www/miq/vmdb/log/evm.log",
                       matched_patterns=[pattern])
    # Start Log check or given pattern
    log.start_monitoring()

    @request.addfinalizer
    def _revert():
        service = MyService(appliance, local_ansible_catalog_item.name)
        if ansible_service_request.exists():
            ansible_service_request.wait_for_request()
            appliance.rest_api.collections.service_requests.action.delete(
                id=service_request.id)
        if service.exists:
            service.delete()

    ansible_service_catalog.order()
    ansible_service_request.wait_for_request()
    # 'request_descr' and 'service_request' being used in finalizer to remove
    # first service request
    request_descr = (
        f"Provisioning Service [{local_ansible_catalog_item.name}] "
        f"from [{local_ansible_catalog_item.name}]")
    service_request = appliance.rest_api.collections.service_requests.get(
        description=request_descr)
    # Searching string '"verbosity"=>0' (example) in evm.log as Standard Output
    # is being logging in evm.log
    assert log.validate(wait="60s")
    logger.info("Pattern found {}".format(log.matched_patterns))

    view = navigate_to(ansible_service, "Details")
    assert verbosity[0] == view.provisioning.details.get_text_of("Verbosity")
    assert verbosity[0] == view.retirement.details.get_text_of("Verbosity")
Ejemplo n.º 15
0
def test_action_prevent_host_ssa(request, host, host_policy):
    """Tests preventing Smart State Analysis on a host.

    This test sets the policy that prevents host analysis.

    Bugzilla:
        1437910

    Metadata:
        test_flag: actions, provision

    Polarion:
        assignee: jdupuy
        initialEstimate: 1/4h
        casecomponent: Control
    """
    host_policy.assign_actions_to_event(
        "Host Analysis Request", ["Prevent current event from proceeding"])

    @request.addfinalizer
    def _cleanup():
        host_policy.unassign_events("Host Analysis Request")

    policy_result = LogValidator(
        "/var/www/miq/vmdb/log/policy.log",
        matched_patterns=[
            '.*Prevent current event from proceeding.*Host Analysis Request.*{}'
            .format(host.name)
        ])
    policy_result.start_monitoring()

    view = navigate_to(host, "Details")

    def _scan():
        return view.entities.summary("Relationships").get_text_of(
            "Drift History")

    original = _scan()
    view.toolbar.configuration.item_select("Perform SmartState Analysis",
                                           handle_alert=True)
    view.flash.assert_success_message(
        '"{}": Analysis successfully initiated'.format(host.name))
    try:
        wait_for(
            lambda: _scan() != original,
            num_sec=60,
            delay=5,
            fail_func=view.browser.refresh,
            message="Check if Drift History field is changed",
        )
    except TimedOutError:
        assert policy_result.validate(wait="120s")
    else:
        pytest.fail("CFME did not prevent analysing the Host {}".format(
            host.name))
Ejemplo n.º 16
0
def test_provider_log_level(appliance, provider, log_exists):
    """
    Tests that log level in advanced settings affects log files

    Bugzilla:
        1633656
        1640718

    Metadata:
        test_flag: log

    Polarion:
        assignee: jhenner
        initialEstimate: 1/4h
        casecomponent: Configuration
        testSteps:
            1. Change log level to info
            2. Refresh provider
            3. Check logs do contain info messages
            4. Change log level to warn
            5. Refresh provider
            6. Check there are no info messages in the log
            7. Reset log level back
    """
    assert log_exists, f"Log file {provider.log_name}.log doesn't exist"
    log_level = appliance.server.advanced_settings['log'][f'level_{provider.log_name}']
    log = f'/var/www/miq/vmdb/log/{provider.log_name}.log'
    # set log level to info
    wait_for(lambda: appliance.server.update_advanced_settings(
        {'log': {f'level_{provider.log_name}': 'info'}}), timeout=300)
    lv_info = LogValidator(log, matched_patterns=['.*INFO.*'], failure_patterns=['.*DEBUG.*'])
    lv_info.start_monitoring()
    provider.refresh_provider_relationships(wait=600)
    assert lv_info.validate(wait="60s")

    # set log level to warn
    wait_for(lambda: appliance.server.update_advanced_settings(
        {'log': {f'level_{provider.log_name}': 'warn'}}), timeout=300)
    lv = LogValidator(log, failure_patterns=['.*INFO.*'])

    def _no_info():
        lv.start_monitoring()
        provider.refresh_provider_relationships(wait=600)
        try:
            assert lv.validate()
        except FailPatternMatchError:
            return False

    # after changing the log level it doesn't take effect immediately, so might require 1-2 extra
    # times to make sure there are no unwanted messages (from before the log change)
    wait_for(_no_info, num_sec=900, delay=40, message="no INFOs in the log")
    # set log level back
    appliance.server.update_advanced_settings(
        {'log': {f'level_{provider.log_name}': log_level}})
Ejemplo n.º 17
0
def test_send_text_custom_report_with_long_condition(appliance, setup_provider,
                                                     smtp_test, request,
                                                     get_report):
    """
    Polarion:
        assignee: pvala
        casecomponent: Reporting
        caseimportance: medium
        initialEstimate: 1/3h
        setup:
            1. Create a report containing 1 or 2 columns
                and add a report filter with a long condition.(Refer BZ for more detail)
            2. Create a schedule for the report and check send_txt.
        testSteps:
            1. Queue the schedule and monitor evm log.
        expectedResults:
            1. There should be no error in the log and report must be sent successfully.

    Bugzilla:
        1677839
    """
    report = get_report("long_condition_report.yaml",
                        "test_long_condition_report")
    data = {
        "timer": {
            "hour": "12",
            "minute": "10"
        },
        "email": {
            "to_emails": "*****@*****.**"
        },
        "email_options": {
            "send_if_empty": True,
            "send_txt": True
        },
    }
    schedule = report.create_schedule(**data)
    request.addfinalizer(schedule.delete_if_exists)

    # prepare LogValidator
    log = LogValidator("/var/www/miq/vmdb/log/evm.log",
                       failure_patterns=[".*negative argument.*"])

    log.start_monitoring()
    schedule.queue()

    # assert that the mail was sent
    assert (len(
        smtp_test.wait_for_emails(wait=200,
                                  to_address=data["email"]["to_emails"])) == 1)
    # assert that the pattern was not found in the logs
    assert log.validate(), "Found error message in the logs."
Ejemplo n.º 18
0
def test_infrastructure_hosts_refresh_multi(appliance,
                                            setup_provider_min_hosts,
                                            provider):
    """
    Polarion:
        assignee: prichard
        casecomponent: Infra
        caseimportance: low
        initialEstimate: 1/6h
        testSteps:
            1. Navigate to the Compute > Infrastructure > Providers view.
            2. Click on a provider quadicon, and then the hosts link along the top row of the view.
            3. Select all hosts (need at least 2 hosts) by checking the box in upper left of
               quadicons.
            4. Click "Refresh Relationships and Power States" under the Configuration
               dropdowm, and then click "OK" when prompted.
        expectedResults:
            1. Providers view is displayed.
            2. Hosts view is displayed.
            3.
            4. "Refresh initiated for X Hosts from the CFME Database" is displayed in green
               banner where "X" is the number of selected hosts. Properties for each host are
               refreshed. Making changes to test pre-commithooks
    """
    num_refresh = 2
    my_slice = slice(0, num_refresh, None)
    hosts_view = navigate_to(provider.collections.hosts, "All")
    num_hosts = hosts_view.entities.paginator.items_amount
    if num_hosts < num_refresh:
        pytest.skip('not enough hosts in appliance UI to run test')
    evm_tail = LogValidator(
        '/var/www/miq/vmdb/log/evm.log',
        matched_patterns=[
            f"'Refresh Provider' successfully initiated for "
            f"{num_refresh} Hosts"
        ],
        hostname=appliance.hostname)
    evm_tail.start_monitoring()
    for h in hosts_view.entities.get_all(slice=my_slice):
        h.check()
    hosts_view.toolbar.configuration.item_select(
        'Refresh Relationships and Power States', handle_alert=True)
    hosts_view.flash.assert_success_message(
        f'Refresh initiated for {num_refresh} Hosts from the CFME Database')
    try:
        wait_for(provider.is_refreshed,
                 func_kwargs={'force_refresh': False},
                 num_sec=300,
                 delay=10)
    except TimedOutError:
        pytest.fail("Hosts were not refreshed within given time")
    assert evm_tail.validate(wait="30s")
def test_automate_methods_from_dynamic_dialog_should_run_as_per_designed(
        request, appliance, import_datastore, import_data, import_dialog,
        catalog, soft_assert):
    """
    Bugzilla:
        1571000

    Polarion:
        assignee: nansari
        casecomponent: Services
        testtype: functional
        initialEstimate: 1/16h
        startsin: 5.9
        tags: service
    """
    sd, ele_label = import_dialog
    catalog_item = appliance.collections.catalog_items.create(
        appliance.collections.catalog_items.GENERIC,
        name=fauxfactory.gen_alphanumeric(),
        description=fauxfactory.gen_alphanumeric(),
        display_in=True,
        catalog=catalog,
        dialog=sd,
    )
    request.addfinalizer(catalog_item.delete_if_exists)
    service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog,
                                       catalog_item.name)
    patterns = [
        ".*CC- dialog_instance1 value=.*",
        ".*CC- dialog_instance2 value=.*",
        ".*CC- dialog_instance3 value=.*",
    ]

    # Checking if automates method gets triggered by three instances for once after ordering catalog
    # item
    result = LogValidator("/var/www/miq/vmdb/log/automation.log",
                          matched_patterns=patterns)
    result.start_monitoring()
    view = navigate_to(service_catalogs, "Order")
    for pattern in patterns:
        soft_assert(result.matches[pattern] == 1)

    with LogValidator("/var/www/miq/vmdb/log/automation.log",
                      failure_patterns=patterns).waiting(timeout=120):
        # Values like 'label1'(value of ele_label), 'label2' and label3 are element names of three
        # different text boxes attached with service dialog
        for ele_name in [ele_label, "label2", "label3"]:
            # Checking if automate method is not triggered after updating values of dialog widgets
            view.fields(ele_name).input.fill(fauxfactory.gen_alphanumeric())
Ejemplo n.º 20
0
def test_custom_button_automate_service_vm(request, appliance, service_vm,
                                           button_group):
    """ Test custom button execution on SSUI vm resource detail page

    Polarion:
        assignee: ndhandre
        initialEstimate: 1/2h
        caseposneg: positive
        testtype: functional
        startsin: 5.9
        casecomponent: CustomButton
        tags: custom_button

    Bugzilla:
        1427430
        1450473
        1454910
    """

    service, _ = service_vm
    with appliance.context.use(ViaUI):
        button = button_group.buttons.create(
            text=fauxfactory.gen_alphanumeric(),
            hover=fauxfactory.gen_alphanumeric(),
            system="Request",
            request="InspectMe",
        )
        request.addfinalizer(button.delete_if_exists)

    # Check for UI and SSUI destinations.
    for context in [ViaUI, ViaSSUI]:
        with appliance.context.use(context):
            nav_to = ssui_nav if context is ViaSSUI else ui_nav

            # Navigate to VM Details page of service
            view = nav_to(service, "VMDetails")

            # start log check
            log = LogValidator("/var/www/miq/vmdb/log/automation.log",
                               matched_patterns=["Attributes - Begin"])
            log.start_monitoring()

            # Execute custom button on service vm
            custom_button_group = Dropdown(view, button_group.text)
            custom_button_group.item_select(button.text)

            # validate request in log
            assert log.validate(wait="120s")
Ejemplo n.º 21
0
def test_codename_in_log(appliance):
    """
    check whether logs contains a mention of appliance codename

    Polarion:
        assignee: jhenner
        casecomponent: Appliance
        initialEstimate: 1/60h
    """
    log = '/var/www/miq/vmdb/log/evm.log'
    lv = LogValidator(log,
                      matched_patterns=[r'.*Codename: \w+$'],
                      hostname=appliance.hostname)
    lv.start_monitoring()
    appliance.ssh_client.run_command('appliance_console_cli --server=restart')
    assert lv.validate(wait="60s")
    appliance.wait_for_web_ui()
Ejemplo n.º 22
0
def test_provider_refresh_relationship(provider, setup_provider):
    """Tests provider refresh

    Bugzilla:
        1353285
        1756984

    Polarion:
        assignee: ghubale
        casecomponent: Infra
        caseimportance: high
        initialEstimate: 1/8h
        tags: relationship
    """
    result = LogValidator("/var/www/miq/vmdb/log/evm.log", failure_patterns=[r".*ERROR.*"])
    result.start_monitoring()
    provider.refresh_provider_relationships(method='ui', wait=600)
    assert result.validate(wait="60s")
Ejemplo n.º 23
0
def test_appliance_console_external_auth_all(configured_appliance):
    """ Commands:
    1. 'ap' launches appliance_console,
    2. RETURN clears info screen,
    3. '13' change ext auth options,
    4. 'auth_type' auth type to change,
    5. '5' apply changes.

    Polarion:
        assignee: dgaikwad
        caseimportance: high
        casecomponent: Auth
        initialEstimate: 1/4h
    """

    evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
                            matched_patterns=[
                                '.*sso_enabled to true.*',
                                '.*saml_enabled to true.*',
                                '.*local_login_disabled to true.*'
                            ],
                            hostname=configured_appliance.hostname)
    evm_tail.start_monitoring()
    command_set = ('ap', RETURN, TimedCommand('13', 20), '1', '2',
                   TimedCommand('5', 20), RETURN, RETURN)
    configured_appliance.appliance_console.run_commands(command_set)
    assert evm_tail.validate("30s")

    evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
                            matched_patterns=[
                                '.*sso_enabled to false.*',
                                '.*saml_enabled to false.*',
                                '.*local_login_disabled to false.*'
                            ],
                            hostname=configured_appliance.hostname)

    evm_tail.start_monitoring()
    command_set = ('ap', RETURN, TimedCommand('13', 20), '1', '2',
                   TimedCommand('5', 20), RETURN, RETURN)
    configured_appliance.appliance_console.run_commands(command_set)
    assert evm_tail.validate(wait="30s")
Ejemplo n.º 24
0
def test_configuration_database_garbage_collection(appliance):
    """
        Navigate to Settings -> Configuration -> Diagnostics -> CFME Region -> Database
        Submit Run database Garbage Collection Now a check UI/logs for errors.

    Polarion:
        assignee: tpapaioa
        casecomponent: Configuration
        caseimportance: medium
        initialEstimate: 1/12h
    """
    evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
                            matched_patterns=[
                                r'.*Queued the action: \[Database GC\] being run for user:.*'
                            ],
                            failure_patterns=[r'.*ERROR.*'])
    evm_tail.start_monitoring()
    view = navigate_to(appliance.server.zone.region, 'Database')
    view.submit_db_garbage_collection_button.click()
    view.flash.assert_message('Database Garbage Collection successfully initiated')
    assert evm_tail.validate(wait="30s")
Ejemplo n.º 25
0
def test_action_prevent_ssa(request, configure_fleecing, vm, vm_on,
                            policy_for_testing):
    """Tests preventing Smart State Analysis.

    This test sets the policy that prevents VM analysis.

    Bugzilla:
        1433084

    Metadata:
        test_flag: actions, provision

    Polarion:
        assignee: jdupuy
        initialEstimate: 1/4h
        casecomponent: Control
    """
    policy_for_testing.assign_actions_to_event(
        "VM Analysis Request", ["Prevent current event from proceeding"])

    @request.addfinalizer
    def _cleanup():
        policy_for_testing.unassign_events("VM Analysis Request")

    policy_result = LogValidator(
        "/var/www/miq/vmdb/log/policy.log",
        matched_patterns=[
            '.*Prevent current event from proceeding.*VM Analysis Request.*{}'.
            format(vm.name)
        ])
    policy_result.start_monitoring()

    wait_for_ssa_enabled(vm)

    try:
        do_scan(vm)
    except TimedOutError:
        assert policy_result.validate(wait="120s")
    else:
        pytest.fail("CFME did not prevent analysing the VM {}".format(vm.name))
Ejemplo n.º 26
0
def test_appliance_console_external_auth(auth_type, ipa_crud,
                                         configured_appliance):
    """ Commands:
    1. 'ap' launches appliance_console,
    2. RETURN clears info screen,
    3. '13' change ext auth options,
    4. 'auth_type' auth type to change,
    5. '4' apply changes.

    Polarion:
        assignee: dgaikwad
        caseimportance: high
        casecomponent: Auth
        initialEstimate: 1/4h
    """
    # TODO this depends on the auth_type options being disabled when the test is run
    # TODO it assumes that first switch is to true, then false.

    evm_tail = LogValidator(
        '/var/www/miq/vmdb/log/evm.log',
        matched_patterns=['.*{} to true.*'.format(auth_type.option)],
        hostname=configured_appliance.hostname)
    evm_tail.start_monitoring()
    command_set = ('ap', RETURN, '13', auth_type.index, '5', RETURN, RETURN)
    configured_appliance.appliance_console.run_commands(command_set,
                                                        timeout=30)
    assert evm_tail.validate(wait="30s")

    evm_tail = LogValidator(
        '/var/www/miq/vmdb/log/evm.log',
        matched_patterns=['.*{} to false.*'.format(auth_type.option)],
        hostname=configured_appliance.hostname)

    evm_tail.start_monitoring()
    command_set = ('ap', RETURN, '13', auth_type.index, '5', RETURN, RETURN)
    configured_appliance.appliance_console.run_commands(command_set,
                                                        timeout=30)
    assert evm_tail.validate(wait="30s")
Ejemplo n.º 27
0
def test_automate_service_quota_runs_only_once(appliance,
                                               generic_catalog_item):
    """
    Polarion:
        assignee: dgaikwad
        casecomponent: Automate
        caseimportance: medium
        initialEstimate: 1/4h
        tags: automate

    Bugzilla:
        1317698
    """
    pattern = ".*Getting Tenant Quota Values for:.*"
    result = LogValidator("/var/www/miq/vmdb/log/automation.log",
                          matched_patterns=[pattern])
    result.start_monitoring()
    service_catalogs = ServiceCatalogs(appliance,
                                       catalog=generic_catalog_item.catalog,
                                       name=generic_catalog_item.name)
    provision_request = service_catalogs.order()
    provision_request.wait_for_request()
    assert result.matches[pattern] == 1
Ejemplo n.º 28
0
def test_task_id_for_method_automation_log(request, generic_catalog_item):
    """
    Polarion:
        assignee: dgaikwad
        initialEstimate: 1/30h
        caseimportance: medium
        caseposneg: positive
        testtype: functional
        startsin: 5.10
        casecomponent: Automate
        tags: automate
        setup:
            1. Add existing or new automate method to newly created domain or create generic service
        testSteps:
            1. Run that instance using simulation or order service catalog item
            2. See automation log
        expectedResults:
            1.
            2. Task id should be included in automation log for method logs.

    Bugzilla:
        1592428
    """
    result = LogValidator("/var/www/miq/vmdb/log/automation.log",
                          matched_patterns=[".*Q-task_id.*"])
    result.start_monitoring()
    service_request = generic_catalog_item.appliance.rest_api.collections.service_templates.get(
        name=generic_catalog_item.name).action.order()
    request.addfinalizer(service_request.action.delete)

    # Need to wait until automation logs with 'Q-task_id' are generated, which happens after the
    # service_request becomes active.
    wait_for(lambda: service_request.request_state == "active",
             fail_func=service_request.reload,
             timeout=60,
             delay=3)
    assert result.validate(wait="60s")
Ejemplo n.º 29
0
def test_quota_source_value(request, entity, search, copy_quota_instance, generic_catalog_item):
    """
    Polarion:
        assignee: ghubale
        initialEstimate: 1/8h
        caseposneg: positive
        startsin: 5.10
        casecomponent: Automate

    Bugzilla:
        1319910
    """
    # Changing quota source value
    copy_quota_instance.update({"fields": {'quota_source_type': {'value': entity}}})

    # Setting cpu quota for 'My Company' tenant
    root_tenant = copy_quota_instance.appliance.collections.tenants.get_root_tenant()
    root_tenant.set_quota(**{"cpu_cb": True, "cpu": 3})
    request.addfinalizer(lambda: root_tenant.set_quota(**{"cpu_cb": False}))

    result = LogValidator(
        "/var/www/miq/vmdb/log/automation.log", matched_patterns=[".*{}.*".format(search)]
    )
    result.start_monitoring()
    service_catalogs = ServiceCatalogs(
        copy_quota_instance.appliance, catalog=generic_catalog_item.catalog,
        name=generic_catalog_item.name
    )
    request_description = 'Provisioning Service [{name}] from [{name}]'.format(
        name=service_catalogs.name)
    provision_request = copy_quota_instance.appliance.collections.requests.instantiate(
        description=request_description)
    service_catalogs.order()
    provision_request.wait_for_request(method='ui')
    request.addfinalizer(lambda: provision_request.remove_request(method="rest"))
    assert result.validate(wait="60s")
Ejemplo n.º 30
0
def test_user_requester_for_lifecycle_provision(request, appliance, provider,
                                                setup_provider, new_users,
                                                generic_catalog_item,
                                                infra_validate_request,
                                                service_validate_request,
                                                provisioning):
    """
    Polarion:
        assignee: ghubale
        casecomponent: Automate
        caseimportance: high
        initialEstimate: 1/6h
        tags: automate

    Bugzilla:
         1671563
         1720273
         1728706
    """
    script = """
    user = $evm.root['user']
    $evm.log(:info, "This is the user: #{user.userid}")

    $evm.log("info", "Listing Root Object Attributes:")
    $evm.root.attributes.sort.each { |k, v| $evm.log("info", "\t#{k}: #{v}") }
    $evm.log("info", "===========================================")
    """
    infra_validate_request.update(updates={"script": script})
    service_validate_request.update(updates={"script": script})

    with new_users[0]:
        # Log in with first user and order service catalog
        result = LogValidator(
            "/var/www/miq/vmdb/log/automation.log",
            matched_patterns=[
                ".*This is the user: {name}.*".format(
                    name=new_users[0].credential.principal)
            ],
        )
        result.start_monitoring()
        service_catalogs = ServiceCatalogs(
            appliance,
            catalog=generic_catalog_item.catalog,
            name=generic_catalog_item.name)
        provision_request = service_catalogs.order()
        provision_request.wait_for_request()
        assert result.validate(wait="60s")

    with new_users[1]:
        # Log in with second user and provision instance via lifecycle
        result = LogValidator(
            "/var/www/miq/vmdb/log/automation.log",
            matched_patterns=[
                ".*This is the user: {name}.*".format(
                    name=new_users[1].credential.principal)
            ],
        )
        result.start_monitoring()
        prov_data = {
            "catalog": {
                'vm_name': random_vm_name(context='provision')
            },
            "environment": {
                'automatic_placement': True
            },
        }
        do_vm_provisioning(appliance,
                           template_name=provisioning["template"],
                           provider=provider,
                           vm_name=prov_data['catalog']['vm_name'],
                           provisioning_data=prov_data,
                           wait=False,
                           request=None)
        request_description = 'Provision from [{template}] to [{vm}{msg}]'.format(
            template=provisioning["template"],
            vm=prov_data['catalog']['vm_name'],
            msg='')
        provision_request = appliance.collections.requests.instantiate(
            request_description)
        provision_request.wait_for_request(method='ui')
        request.addfinalizer(provision_request.remove_request)
        assert result.validate(wait="60s")