Exemplo n.º 1
0
def rhn_mirror_setup(appliance_set):
    """Sets up RHN mirror feature on primary appliance and checks secondary are subscribed
    """

    appliance_set.primary.browser_steal = True
    with appliance_set.primary():
        set_server_roles(rhn_mirror=True)

    appliance_set.primary.restart_evm_service()

    with appliance_set.primary.ssh_client as ssh:

        def is_repotrack_running():
            status, output = ssh.run_command('pgrep repotrack')
            if status == 0:
                return True
            return False

        logger.info('Waiting for repotrack to start')
        wait_for(func=is_repotrack_running, delay=REFRESH_SEC, num_sec=300)
        logger.info('Done')

        logger.info('Waiting for repotrack to finish')
        wait_for(func=is_repotrack_running,
                 delay=REFRESH_SEC,
                 fail_condition=True,
                 num_sec=900)
        logger.info('Done')

        # Check that repo folder exists on primary and contains cfme-appliance pkg
        assert ssh.run_command('ls -m1 /repo/mirror | grep cfme-appliance')[0] == 0,\
            "/repo/mirror on {} doesn't exist or doesn't contain cfme-appliance pkg"\
            .format(appliance_set.primary.name)

    logger.info('Waiting for web UI to start')
    appliance_set.primary.wait_for_web_ui()
    logger.info('Done')

    # Check /etc/yum.repos.d/cfme-mirror.repo file exists on secondary appliances
    for appliance in appliance_set.secondary:
        with appliance.ssh_client as ssh:

            def repo_file_exists():
                status, output = ssh.run_command(
                    'ls /etc/yum.repos.d/cfme-mirror.repo')
                if status == 0:
                    return True
                return False

            logger.info(
                'Waiting for repository files to be created on secondary appliances'
            )
            wait_for(func=repo_file_exists, delay=REFRESH_SEC, num_sec=120)
            logger.info('Done')

    # And confirm that all appliances are subscribed
    appliance_set.primary.browser_steal = True
    with appliance_set.primary():
        assert red_hat_updates.are_subscribed(),\
            'Failed to subscribe all appliances (secondary via proxy)'
def test_appliance_replicate_sync_role_change(request, virtualcenter_provider, appliance):
    """Tests that a role change is replicated

    Metadata:
        test_flag: replication
    """
    appl1, appl2 = get_replication_appliances()

    def finalize():
        appl1.destroy()
        appl2.destroy()
    request.addfinalizer(finalize)
    appl1.ipapp.browser_steal = True
    with appl1.ipapp:
        configure_db_replication(appl2.address)
        # Replication is up and running, now disable DB sync role
        conf.set_server_roles(database_synchronization=False)
        navigate_to(appliance.server.zone.region, 'Replication')
        wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=True,
                 num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
        conf.set_server_roles(database_synchronization=True)
        navigate_to(appliance.server.zone.region, 'Replication')
        wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False,
                 num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
        assert conf.get_replication_status()
        virtualcenter_provider.create()
        wait_for_a_provider()

    appl2.ipapp.browser_steal = True
    with appl2.ipapp:
        wait_for_a_provider()
        assert virtualcenter_provider.exists
def configure_db_replication(db_address):
    """Enables the sync role and configures the appliance to replicate to
       the db_address specified. Then, it waits for the UI to show the replication
       as active and the backlog as empty.
    """
    conf.set_replication_worker_host(db_address)
    view = current_appliance.server.browser.create_view(ServerView)
    view.flash.assert_message(
        "Configuration settings saved for CFME Server")  # may be partial
    navigate_to(current_appliance.server, 'Server')
    conf.set_server_roles(database_synchronization=True)
    navigate_to(current_appliance.server.zone.region, 'Replication')
    rep_status, _ = wait_for(
        conf.get_replication_status,
        func_kwargs={'navigate': False},
        fail_condition=False,
        num_sec=360,
        delay=10,
        fail_func=current_appliance.server.browser.refresh,
        message="get_replication_status")
    assert rep_status
    wait_for(lambda: conf.get_replication_backlog(navigate=False) == 0,
             fail_condition=False,
             num_sec=120,
             delay=10,
             fail_func=current_appliance.server.browser.refresh,
             message="get_replication_backlog")
Exemplo n.º 4
0
def test_appliance_replicate_sync_role_change(request, virtualcenter_provider, appliance):
    """Tests that a role change is replicated

    Metadata:
        test_flag: replication
    """
    appl1, appl2 = get_replication_appliances()

    def finalize():
        appl1.destroy()
        appl2.destroy()
    request.addfinalizer(finalize)
    appl1.ipapp.browser_steal = True
    with appl1.ipapp:
        configure_db_replication(appl2.address)
        # Replication is up and running, now disable DB sync role
        conf.set_server_roles(database_synchronization=False)
        navigate_to(appliance.server.zone.region, 'Replication')
        wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=True,
                 num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
        conf.set_server_roles(database_synchronization=True)
        navigate_to(appliance.server.zone.region, 'Replication')
        wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False,
                 num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
        assert conf.get_replication_status()
        virtualcenter_provider.create()
        wait_for_a_provider()

    appl2.ipapp.browser_steal = True
    with appl2.ipapp:
        wait_for_a_provider()
        assert virtualcenter_provider.exists
Exemplo n.º 5
0
def ensure_websocket_role_disabled():
    # TODO: This is a temporary solution until we find something better.
    roles = configuration.get_server_roles()
    if 'websocket' in roles and roles['websocket']:
        logger.info('Disabling the websocket role to ensure we get no intrusive popups')
        roles['websocket'] = False
        configuration.set_server_roles(**roles)
def test_appliance_replicate_sync_role_change_with_backlog(request, provider):
    """Tests that a role change is replicated with backlog

    Metadata:
        test_flag: replication
    """
    appl1, appl2 = get_replication_appliances()

    def finalize():
        appl1.destroy()
        appl2.destroy()
    request.addfinalizer(finalize)
    appl1.ipapp.browser_steal = True
    with appl1.ipapp:
        configure_db_replication(appl2.address)
        # Replication is up and running, now disable DB sync role
        provider.create()
        conf.set_server_roles(database_synchronization=False)
        sel.force_navigate("cfg_diagnostics_region_replication")
        wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=True,
                 num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
        conf.set_server_roles(database_synchronization=True)
        sel.force_navigate("cfg_diagnostics_region_replication")
        wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False,
                 num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
        assert conf.get_replication_status()
        wait_for_a_provider()

    appl2.ipapp.browser_steal = True
    with appl2.ipapp:
        wait_for_a_provider()
        assert provider.exists
Exemplo n.º 7
0
def test_db_restore(request, soft_assert, virtualcenter_provider_crud, ec2_provider_crud):

    appl1, appl2 = get_appliances()

    def finalize():
        appl1.destroy()
        appl2.destroy()
    request.addfinalizer(finalize)

    appl1.ipapp.browser_steal = True
    with appl1.ipapp:
        # Manage infra,cloud providers and set some roles before taking a DB backup
        config.set_server_roles(automate=True)
        roles = config.get_server_roles()
        virtualcenter_provider_crud.setup()
        wait_for_a_provider()
        ec2_provider_crud.setup()
        cloud_provider.wait_for_a_provider()

        providers_appl1 = appl1.ipapp.managed_known_providers
        appl1.ipapp.db.backup()

    # Fetch v2_key and DB backup from the first appliance
    with appl1.ipapp.ssh_client as ssh:
        rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric())
        ssh.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename)
        dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric())
        ssh.get_file("/tmp/evm_db.backup", dump_filename)

    with appl2.ipapp.ssh_client as ssh:
        ssh.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key")
        ssh.put_file(dump_filename, "/tmp/evm_db.backup")

    appl2.ipapp.browser_steal = True
    with appl2.ipapp:
        # Restore DB on the second appliance
        appl2.ipapp.evmserverd.stop()
        appl2.ipapp.db.drop()
        appl2.ipapp.db.restore()
        appl2.ipapp.start_evm_service()
        appl2.ipapp.wait_for_web_ui()
        wait_for_a_provider()
        cloud_provider.wait_for_a_provider()

        # Assert providers on the second appliance
        providers_appl2 = appl2.ipapp.managed_known_providers
        assert set(providers_appl2).issubset(providers_appl1),\
            'Restored DB is missing some providers'

        # Verify that existing provider can detect new VMs on the second appliance
        vm = provision_vm(request, virtualcenter_provider_crud)
        soft_assert(vm.find_quadicon().data['state'] == 'currentstate-on')
        soft_assert(vm.provider.mgmt.is_vm_running(vm.name), "vm running")

        # Assert server roles on the second appliance
        for role, is_enabled in config.get_server_roles(db=False).iteritems():
            if is_enabled:
                assert roles[role], "Role '{}' is selected but should not be".format(role)
            else:
                assert not roles[role], "Role '{}' is not selected but should be".format(role)
Exemplo n.º 8
0
def configure_db_replication(db_address):
    """Enables the sync role and configures the appliance to replicate to
       the db_address specified. Then, it waits for the UI to show the replication
       as active and the backlog as empty.
    """
    conf.set_replication_worker_host(db_address)
    flash.assert_message_contain(
        "Configuration settings saved for CFME Server")
    try:
        sel.force_navigate("cfg_settings_currentserver_server")
    except WebDriverException:
        sel.handle_alert()
        sel.force_navigate("cfg_settings_currentserver_server")
    conf.set_server_roles(database_synchronization=True)
    sel.force_navigate("cfg_diagnostics_region_replication")
    wait_for(lambda: conf.get_replication_status(navigate=False),
             fail_condition=False,
             num_sec=360,
             delay=10,
             fail_func=sel.refresh,
             message="get_replication_status")
    assert conf.get_replication_status()
    wait_for(lambda: conf.get_replication_backlog(navigate=False) == 0,
             fail_condition=False,
             num_sec=120,
             delay=10,
             fail_func=sel.refresh,
             message="get_replication_backlog")
def configure_db_replication(db_address):
    """Enables the sync role and configures the appliance to replicate to
       the db_address specified. Then, it waits for the UI to show the replication
       as active and the backlog as empty.
    """
    conf.set_replication_worker_host(db_address)
    flash.assert_message_contain("Configuration settings saved for CFME Server")
    try:
        sel.force_navigate("cfg_settings_currentserver_server")
    except WebDriverException:
        sel.handle_alert()
        sel.force_navigate("cfg_settings_currentserver_server")
    conf.set_server_roles(database_synchronization=True)
    sel.force_navigate("cfg_diagnostics_region_replication")
    wait_for(
        lambda: conf.get_replication_status(navigate=False),
        fail_condition=False,
        num_sec=360,
        delay=10,
        fail_func=sel.refresh,
        message="get_replication_status",
    )
    assert conf.get_replication_status()
    wait_for(
        lambda: conf.get_replication_backlog(navigate=False) == 0,
        fail_condition=False,
        num_sec=120,
        delay=10,
        fail_func=sel.refresh,
        message="get_replication_backlog",
    )
def test_appliance_replicate_sync_role_change_with_backlog(request, provider):
    """Tests that a role change is replicated with backlog

    Metadata:
        test_flag: replication
    """
    appl1, appl2 = get_replication_appliances()

    def finalize():
        appl1.destroy()
        appl2.destroy()
    request.addfinalizer(finalize)
    appl1.ipapp.browser_steal = True
    with appl1.ipapp:
        configure_db_replication(appl2.address)
        # Replication is up and running, now disable DB sync role
        provider.create()
        conf.set_server_roles(database_synchronization=False)
        sel.force_navigate("cfg_diagnostics_region_replication")
        wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=True,
                 num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
        conf.set_server_roles(database_synchronization=True)
        sel.force_navigate("cfg_diagnostics_region_replication")
        wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False,
                 num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
        assert conf.get_replication_status()
        wait_for_a_provider()

    appl2.ipapp.browser_steal = True
    with appl2.ipapp:
        wait_for_a_provider()
        assert provider.exists
Exemplo n.º 11
0
def ensure_websocket_role_disabled():
    # TODO: This is a temporary solution until we find something better.
    roles = configuration.get_server_roles()
    if 'websocket' in roles and roles['websocket']:
        logger.info('Disabling the websocket role to ensure we get no intrusive popups')
        roles['websocket'] = False
        configuration.set_server_roles(**roles)
Exemplo n.º 12
0
def local_setup_provider(request, setup_provider_modscope, provider,
                         vm_analysis_new):
    if provider.type == 'rhevm' and version.current_version() < "5.5":
        # See https://bugzilla.redhat.com/show_bug.cgi?id=1300030
        pytest.skip(
            "SSA is not supported on RHEVM for appliances earlier than 5.5 and upstream"
        )
    if GH("ManageIQ/manageiq:6506").blocks:
        pytest.skip("Upstream provisioning is blocked by" +
                    "https://github.com/ManageIQ/manageiq/issues/6506")
    if provider.type == 'virtualcenter':
        store.current_appliance.install_vddk(reboot=True)
        store.current_appliance.wait_for_web_ui()
        try:
            sel.refresh()
        except AttributeError:
            # In case no browser is started
            pass

        set_host_credentials(request, vm_analysis_new, provider)

    # Make sure all roles are set
    roles = configuration.get_server_roles(db=False)
    roles["automate"] = True
    roles["smartproxy"] = True
    roles["smartstate"] = True
    configuration.set_server_roles(**roles)
Exemplo n.º 13
0
def test_db_restore(request, soft_assert):

    appl1, appl2 = get_appliances()

    def finalize():
        appl1.destroy()
        appl2.destroy()
    request.addfinalizer(finalize)

    appl1.ipapp.browser_steal = True
    with appl1.ipapp:
        # Manage infra,cloud providers and set some roles before taking a DB backup
        config.set_server_roles(automate=True)
        roles = config.get_server_roles()
        provider_crud = setup_a_provider('infra', 'virtualcenter', validate=True)
        provider_mgmt = provider_crud.get_mgmt_system()
        wait_for_a_provider()
        setup_a_provider('cloud', 'ec2', validate=True)
        cloud_provider.wait_for_a_provider()

        providers_appl1 = appl1.ipapp.managed_providers
        appl1.ipapp.backup_database()

    # Fetch v2_key and DB backup from the first appliance
    with appl1.ipapp.ssh_client as ssh:
        rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric())
        ssh.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename)
        dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric())
        ssh.get_file("/tmp/evm_db.backup", dump_filename)

    with appl2.ipapp.ssh_client as ssh:
        ssh.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key")
        ssh.put_file(dump_filename, "/tmp/evm_db.backup")

    appl2.ipapp.browser_steal = True
    with appl2.ipapp:
        # Restore DB on the second appliance
        appl2.ipapp.restore_database()
        appl2.ipapp.wait_for_web_ui()
        wait_for_a_provider()
        cloud_provider.wait_for_a_provider()

        # Assert providers on the second appliance
        providers_appl2 = appl2.ipapp.managed_providers
        assert set(providers_appl2).issubset(providers_appl1),\
            'Restored DB is missing some providers'

        # Verify that existing provider can detect new VMs on the second appliance
        vm = provision_vm(request, provider_crud, provider_mgmt)
        soft_assert(vm.find_quadicon().state == 'currentstate-on')
        soft_assert(vm.provider_crud.get_mgmt_system().is_vm_running(vm.name),
            "vm running")

        # Assert server roles on the second appliance
        for role, is_enabled in config.get_server_roles(db=False).iteritems():
            if is_enabled:
                assert roles[role], "Role '%s' is selected but should not be" % role
            else:
                assert not roles[role], "Role '%s' is not selected but should be" % role
Exemplo n.º 14
0
def automate_role_set(request):
    """ Sets the Automate role that the VM can be provisioned.

    Sets the Automate role state back when finished the module tests.
    """
    from cfme.configure import configuration
    roles = configuration.get_server_roles()
    old_roles = dict(roles)
    roles["automate"] = True
    configuration.set_server_roles(**roles)
    yield
    configuration.set_server_roles(**old_roles)
Exemplo n.º 15
0
def automate_role_set(request):
    """ Sets the Automate role that the VM can be provisioned.

    Sets the Automate role state back when finished the module tests.
    """
    from cfme.configure import configuration
    roles = configuration.get_server_roles()
    old_roles = dict(roles)
    roles["automate"] = True
    configuration.set_server_roles(**roles)
    yield
    configuration.set_server_roles(**old_roles)
def rhn_mirror_setup(appliance_set):
    """Sets up RHN mirror feature on primary appliance and checks secondary are subscribed
    """

    appliance_set.primary.browser_steal = True
    with appliance_set.primary():
        set_server_roles(rhn_mirror=True)

    appliance_set.primary.restart_evm_service()

    with appliance_set.primary.ssh_client as ssh:
        def is_repotrack_running():
            status, output = ssh.run_command('pgrep repotrack')
            if status == 0:
                return True
            return False

        logger.info('Waiting for repotrack to start')
        wait_for(func=is_repotrack_running, delay=REFRESH_SEC, num_sec=300)
        logger.info('Done')

        logger.info('Waiting for repotrack to finish')
        wait_for(func=is_repotrack_running, delay=REFRESH_SEC, fail_condition=True, num_sec=900)
        logger.info('Done')

        # Check that repo folder exists on primary and contains cfme-appliance pkg
        assert ssh.run_command('ls -m1 /repo/mirror | grep cfme-appliance')[0] == 0,\
            "/repo/mirror on {} doesn't exist or doesn't contain cfme-appliance pkg"\
            .format(appliance_set.primary.name)

    logger.info('Waiting for web UI to start')
    appliance_set.primary.wait_for_web_ui()
    logger.info('Done')

    # Check /etc/yum.repos.d/cfme-mirror.repo file exists on secondary appliances
    for appliance in appliance_set.secondary:
        with appliance.ssh_client as ssh:
            def repo_file_exists():
                status, output = ssh.run_command('ls /etc/yum.repos.d/cfme-mirror.repo')
                if status == 0:
                    return True
                return False

            logger.info('Waiting for repository files to be created on secondary appliances')
            wait_for(func=repo_file_exists, delay=REFRESH_SEC, num_sec=120)
            logger.info('Done')

    # And confirm that all appliances are subscribed
    appliance_set.primary.browser_steal = True
    with appliance_set.primary():
        assert red_hat_updates.are_subscribed(),\
            'Failed to subscribe all appliances (secondary via proxy)'
Exemplo n.º 17
0
def configure_db_replication(db_address):
    """Enables the sync role and configures the appliance to replicate to
       the db_address specified. Then, it waits for the UI to show the replication
       as active and the backlog as empty.
    """
    conf.set_replication_worker_host(db_address)
    flash.assert_message_contain("Configuration settings saved for CFME Server")
    navigate_to(current_appliance.server, 'Server')
    conf.set_server_roles(database_synchronization=True)
    navigate_to(current_appliance.server.zone.region, 'Replication')
    wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False,
             num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
    assert conf.get_replication_status()
    wait_for(lambda: conf.get_replication_backlog(navigate=False) == 0, fail_condition=False,
             num_sec=120, delay=10, fail_func=sel.refresh, message="get_replication_backlog")
Exemplo n.º 18
0
def local_setup_provider(request, setup_provider_modscope, provider, vm_analysis_data, appliance):

    # TODO: allow for vddk parameterization
    if provider.one_of(VMwareProvider):
        appliance.install_vddk(reboot=True, wait_for_web_ui_after_reboot=True)
        appliance.browser.quit_browser()
        appliance.browser.open_browser()
        set_host_credentials(request, provider, vm_analysis_data)

    # Make sure all roles are set
    roles = configuration.get_server_roles(db=False)
    roles["automate"] = True
    roles["smartproxy"] = True
    roles["smartstate"] = True
    configuration.set_server_roles(**roles)
Exemplo n.º 19
0
def local_setup_provider(request, setup_provider_modscope, provider, vm_analysis_data, appliance):

    # TODO: allow for vddk parameterization
    if provider.one_of(VMwareProvider):
        appliance.install_vddk(reboot=True, wait_for_web_ui_after_reboot=True)
        appliance.browser.quit_browser()
        appliance.browser.open_browser()
        set_host_credentials(request, provider, vm_analysis_data)

    # Make sure all roles are set
    roles = configuration.get_server_roles(db=False)
    roles["automate"] = True
    roles["smartproxy"] = True
    roles["smartstate"] = True
    configuration.set_server_roles(**roles)
def configure_db_replication(db_address):
    """Enables the sync role and configures the appliance to replicate to
       the db_address specified. Then, it waits for the UI to show the replication
       as active and the backlog as empty.
    """
    conf.set_replication_worker_host(db_address)
    flash.assert_message_contain("Configuration settings saved for CFME Server")
    navigate_to(current_appliance.server, 'Server')
    conf.set_server_roles(database_synchronization=True)
    navigate_to(current_appliance.server.zone.region, 'Replication')
    wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False,
             num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status")
    assert conf.get_replication_status()
    wait_for(lambda: conf.get_replication_backlog(navigate=False) == 0, fail_condition=False,
             num_sec=120, delay=10, fail_func=sel.refresh, message="get_replication_backlog")
Exemplo n.º 21
0
def set_roles_for_sm():
    roles = get_server_roles()
    roles["storage_metrics_processor"] = True
    roles["storage_metrics_collector"] = True
    roles["storage_metrics_coordinator"] = True
    roles["storage_inventory"] = True
    return set_server_roles(**roles)
Exemplo n.º 22
0
def configure_websocket():
    """
    Enable websocket role if it is disabled.

    Currently the fixture cfme/fixtures/base.py,
    disables the websocket role to avoid intrusive popups.
    """
    roles = configuration.get_server_roles()
    if 'websocket' in roles and not roles['websocket']:
        logger.info('Enabling the websocket role to allow console connections')
        roles['websocket'] = True
        configuration.set_server_roles(**roles)
        yield
    roles['websocket'] = False
    logger.info('Disabling the websocket role to avoid intrusive popups')
    configuration.set_server_roles(**roles)
Exemplo n.º 23
0
def enable_candu():
    try:
        original_roles = get_server_roles()
        new_roles = original_roles.copy()
        new_roles.update({
            'ems_metrics_coordinator': True,
            'ems_metrics_collector': True,
            'ems_metrics_processor': True,
            'automate': False,
            'smartstate': False})
        set_server_roles(**new_roles)
        candu.enable_all()
        yield
    finally:
        candu.disable_all()
        set_server_roles(**original_roles)
Exemplo n.º 24
0
def configure_websocket():
    """
    Enable websocket role if it is disabled.

    Currently the fixture cfme/fixtures/base.py,
    disables the websocket role to avoid intrusive popups.
    """
    roles = configuration.get_server_roles()
    if 'websocket' in roles and not roles['websocket']:
        logger.info('Enabling the websocket role to allow console connections')
        roles['websocket'] = True
        configuration.set_server_roles(**roles)
        yield
    roles['websocket'] = False
    logger.info('Disabling the websocket role to avoid intrusive popups')
    configuration.set_server_roles(**roles)
Exemplo n.º 25
0
def set_roles_for_sm():
    roles = get_server_roles()
    roles["storage_metrics_processor"] = True
    roles["storage_metrics_collector"] = True
    roles["storage_metrics_coordinator"] = True
    roles["storage_inventory"] = True
    return set_server_roles(**roles)
Exemplo n.º 26
0
def enable_candu():
    try:
        original_roles = get_server_roles()
        new_roles = original_roles.copy()
        new_roles.update({
            'ems_metrics_coordinator': True,
            'ems_metrics_collector': True,
            'ems_metrics_processor': True,
            'automate': False,
            'smartstate': False
        })
        set_server_roles(**new_roles)
        candu.enable_all()
        yield
    finally:
        candu.disable_all()
        set_server_roles(**original_roles)
Exemplo n.º 27
0
def local_setup_provider(request, setup_provider_modscope, provider, vm_analysis_data):
    if provider.type == 'rhevm' and version.current_version() < "5.5":
        # See https://bugzilla.redhat.com/show_bug.cgi?id=1300030
        pytest.skip("SSA is not supported on RHEVM for appliances earlier than 5.5 and upstream")
    if GH("ManageIQ/manageiq:6506").blocks:
        pytest.skip("Upstream provisioning is blocked by" +
                    "https://github.com/ManageIQ/manageiq/issues/6506")
    if provider.type == 'virtualcenter':
        store.current_appliance.install_vddk(reboot=True, wait_for_web_ui_after_reboot=True)
        ensure_browser_open()
        set_host_credentials(request, provider, vm_analysis_data)

    # Make sure all roles are set
    roles = configuration.get_server_roles(db=False)
    roles["automate"] = True
    roles["smartproxy"] = True
    roles["smartstate"] = True
    configuration.set_server_roles(**roles)
Exemplo n.º 28
0
def server_roles(fixtureconf):
    """The fixture that does the work. See usage in :py:mod:`fixtures.server_roles`"""

    # Disable all server roles
    # and then figure out which ones should be enabled
    roles_with_vals = {k: False for k in available_roles}
    if 'clear_roles' in fixtureconf:
        # Only user interface
        roles_with_vals['user_interface'] = True
    elif 'set_default_roles' in fixtureconf:
        # The ones specified in YAML
        roles_list = cfme_data["server_roles"]["sets"]["default"]
        roles_with_vals.update({k: True for k in roles_list})
    elif 'server_roles' in fixtureconf:
        # The ones that are already enabled and enable/disable the ones specified
        # -server_role, +server_role or server_role
        roles_with_vals = get_server_roles()
        fixture_roles = fixtureconf['server_roles']
        if isinstance(fixture_roles, basestring):
            fixture_roles = fixture_roles.split(' ')
        for role in fixture_roles:
            if role.startswith('-'):
                roles_with_vals[role[1:]] = False
            elif role.startswith('+'):
                roles_with_vals[role[1:]] = True
            else:
                roles_with_vals[role] = True
    elif 'server_roles_cfmedata' in fixtureconf:
        roles_list = cfme_data
        # Drills down into cfme_data YAML by selector, expecting a list
        # of roles at the end. A KeyError here probably means the YAML
        # selector is wrong
        for selector in fixtureconf['server_roles_cfmedata']:
            roles_list = roles_list[selector]
        roles_with_vals.update({k: True for k in roles_list})
    else:
        raise Exception('No server role changes defined.')

    if not available_roles.issuperset(set(roles_with_vals)):
        unknown_roles = ', '.join(set(roles_with_vals) - available_roles)
        raise Exception('Unknown server role(s): {}'.format(unknown_roles))

    set_server_roles(**roles_with_vals)
Exemplo n.º 29
0
def enable_candu():
    # C&U data collection consumes a lot of memory and CPU.So, we are disabling some server roles
    # that are not needed for Chargeback reporting.
    original_roles = get_server_roles()
    new_roles = original_roles.copy()
    new_roles.update({
        'ems_metrics_coordinator': True,
        'ems_metrics_collector': True,
        'ems_metrics_processor': True,
        'automate': False,
        'smartstate': False})

    set_server_roles(**new_roles)
    candu.enable_all()

    yield

    set_server_roles(**original_roles)
    candu.disable_all()
Exemplo n.º 30
0
def test_server_roles_changing(request, roles):
    """ Test that sets and verifies the server roles in configuration.

    If there is no forced interrupt, it cleans after, so the roles are intact after the testing.
    Todo:
        - Use for parametrization on more roles set?
        - Change the yaml role list to dict.
    """
    request.addfinalizer(partial(configuration.set_server_roles,
                                 **configuration.get_server_roles()))   # For reverting back
    # Set roles
    configuration.set_server_roles(**roles)
    flash.assert_no_errors()
    # Get roles and check
    for role, is_enabled in configuration.get_server_roles().iteritems():
        if is_enabled:
            assert roles[role], "Role '%s' is selected but should not be" % role
        else:
            assert not roles[role], "Role '%s' is not selected but should be" % role
Exemplo n.º 31
0
def test_server_roles_changing(request, roles):
    """ Test that sets and verifies the server roles in configuration.

    If there is no forced interrupt, it cleans after, so the roles are intact after the testing.
    Todo:
        - Use for parametrization on more roles set?
        - Change the yaml role list to dict.
    """
    request.addfinalizer(partial(configuration.set_server_roles,
                                 **configuration.get_server_roles()))   # For reverting back
    # Set roles
    configuration.set_server_roles(**roles)
    flash.assert_no_errors()
    # Get roles and check; use UI because the changes take a while to propagate to DB
    for role, is_enabled in configuration.get_server_roles(db=False).iteritems():
        if is_enabled:
            assert roles[role], "Role '%s' is selected but should not be" % role
        else:
            assert not roles[role], "Role '%s' is not selected but should be" % role
def enable_candu(db):

    # C&U data collection consumes a lot of memory and CPU.So, we are disabling some server roles
    # that are not needed for Chargeback reporting.
    original_roles = get_server_roles()
    new_roles = original_roles.copy()
    new_roles.update({
        'ems_metrics_coordinator': True,
        'ems_metrics_collector': True,
        'ems_metrics_processor': True,
        'automate': False,
        'smartstate': False})

    set_server_roles(**new_roles)
    candu.enable_all()

    yield

    candu.disable_all()
    set_server_roles(**original_roles)
Exemplo n.º 33
0
def add_server_roles(server_roles, server_roles_mode="add"):
    # Disable all server roles
    # and then figure out which ones should be enabled
    roles_with_vals = {k: False for k in available_roles}
    if server_roles is None:
        # Only user interface
        roles_with_vals['user_interface'] = True
    elif server_roles == "default":
        # The ones specified in YAML
        roles_list = cfme_data["server_roles"]["sets"]["default"]
        roles_with_vals.update({k: True for k in roles_list})
    elif server_roles_mode == "add":
        # The ones that are already enabled and enable/disable the ones specified
        # -server_role, +server_role or server_role
        roles_with_vals = get_server_roles()
        if isinstance(server_roles, basestring):
            server_roles = server_roles.split(' ')
        for role in server_roles:
            if role.startswith('-'):
                roles_with_vals[role[1:]] = False
            elif role.startswith('+'):
                roles_with_vals[role[1:]] = True
            else:
                roles_with_vals[role] = True
    elif server_roles_mode == "cfmedata":
        roles_list = cfme_data
        # Drills down into cfme_data YAML by selector, expecting a list
        # of roles at the end. A KeyError here probably means the YAML
        # selector is wrong
        for selector in server_roles:
            roles_list = roles_list[selector]
        roles_with_vals.update({k: True for k in roles_list})
    else:
        raise Exception('No server role changes defined.')

    if not available_roles.issuperset(set(roles_with_vals)):
        unknown_roles = ', '.join(set(roles_with_vals) - available_roles)
        raise Exception('Unknown server role(s): {}'.format(unknown_roles))

    set_server_roles(**roles_with_vals)
Exemplo n.º 34
0
def add_server_roles(server_roles, server_roles_mode="add"):
    # Disable all server roles
    # and then figure out which ones should be enabled
    roles_with_vals = {k: False for k in available_roles}
    if server_roles is None:
        # Only user interface
        roles_with_vals['user_interface'] = True
    elif server_roles == "default":
        # The ones specified in YAML
        roles_list = cfme_data["server_roles"]["sets"]["default"]
        roles_with_vals.update({k: True for k in roles_list})
    elif server_roles_mode == "add":
        # The ones that are already enabled and enable/disable the ones specified
        # -server_role, +server_role or server_role
        roles_with_vals = get_server_roles()
        if isinstance(server_roles, basestring):
            server_roles = server_roles.split(' ')
        for role in server_roles:
            if role.startswith('-'):
                roles_with_vals[role[1:]] = False
            elif role.startswith('+'):
                roles_with_vals[role[1:]] = True
            else:
                roles_with_vals[role] = True
    elif server_roles_mode == "cfmedata":
        roles_list = cfme_data
        # Drills down into cfme_data YAML by selector, expecting a list
        # of roles at the end. A KeyError here probably means the YAML
        # selector is wrong
        for selector in server_roles:
            roles_list = roles_list[selector]
        roles_with_vals.update({k: True for k in roles_list})
    else:
        raise Exception('No server role changes defined.')

    if not available_roles.issuperset(set(roles_with_vals)):
        unknown_roles = ', '.join(set(roles_with_vals) - available_roles)
        raise Exception('Unknown server role(s): {}'.format(unknown_roles))

    set_server_roles(**roles_with_vals)
Exemplo n.º 35
0
def test_server_roles_changing(request, roles):
    """ Test that sets and verifies the server roles in configuration.

    If there is no forced interrupt, it cleans after, so the roles are intact after the testing.
    Note:
      TODO:
      - Use for parametrization on more roles set?
      - Change the yaml role list to dict.
    """
    request.addfinalizer(
        partial(configuration.set_server_roles,
                **configuration.get_server_roles()))  # For reverting back
    # Set roles
    configuration.set_server_roles(db=False, **roles)
    flash.assert_no_errors()
    # Get roles and check; use UI because the changes take a while to propagate to DB
    for role, is_enabled in configuration.get_server_roles(
            db=False).iteritems():
        if is_enabled:
            assert roles[
                role], "Role '{}' is selected but should not be".format(role)
        else:
            assert not roles[
                role], "Role '{}' is not selected but should be".format(role)
Exemplo n.º 36
0
def resource_usage(vm_ownership, appliance, provider):
    # Retrieve resource usage values from metric_rollups table.
    average_cpu_used_in_mhz = 0
    average_memory_used_in_mb = 0
    average_network_io = 0
    average_disk_io = 0
    average_storage_used = 0
    consumed_hours = 0
    vm_name = provider.data['cap_and_util']['chargeback_vm']

    metrics = appliance.db.client['metrics']
    rollups = appliance.db.client['metric_rollups']
    ems = appliance.db.client['ext_management_systems']
    logger.info('Deleting METRICS DATA from metrics and metric_rollups tables')

    appliance.db.client.session.query(metrics).delete()
    appliance.db.client.session.query(rollups).delete()

    provider_id = appliance.db.client.session.query(ems).filter(
        ems.name == provider.name).first().id

    # Chargeback reporting is done on hourly and daily rollup values and not real-time values.So, we
    # are capturing C&U data and forcing hourly rollups by running these commands through
    # the Rails console.

    def verify_records_metrics_table(appliance, provider):
        # Verify that rollups are present in the metric_rollups table.
        vm_name = provider.data['cap_and_util']['chargeback_vm']

        ems = appliance.db.client['ext_management_systems']
        metrics = appliance.db.client['metrics']

        rc, out = appliance.ssh_client.run_rails_command(
            "\"vm = Vm.where(:ems_id => {}).where(:name => {})[0];\
            vm.perf_capture('realtime', 1.hour.ago.utc, Time.now.utc)\"".
            format(provider_id, repr(vm_name)))
        assert rc == 0, "Failed to capture VM C&U data:".format(out)

        with appliance.db.client.transaction:
            result = (appliance.db.client.session.query(metrics.id).join(
                ems, metrics.parent_ems_id == ems.id).filter(
                    metrics.capture_interval_name == 'realtime',
                    metrics.resource_name == vm_name,
                    ems.name == provider.name,
                    metrics.timestamp >= date.today()))

        for record in appliance.db.client.session.query(metrics).filter(
                metrics.id.in_(result.subquery())):
            if record.cpu_usagemhz_rate_average:
                return True
        return False

    wait_for(verify_records_metrics_table, [appliance, provider],
             timeout=600,
             fail_condition=False,
             message='Waiting for VM real-time data')

    # New C&U data may sneak in since 1)C&U server roles are running and 2)collection for clusters
    # and hosts is on.This would mess up our Chargeback calculations, so we are disabling C&U
    # collection after data has been fetched for the last hour.

    original_roles = get_server_roles()
    new_roles = original_roles.copy()
    new_roles.update({
        'ems_metrics_coordinator': False,
        'ems_metrics_collector': False
    })

    set_server_roles(**new_roles)

    rc, out = appliance.ssh_client.run_rails_command(
        "\"vm = Vm.where(:ems_id => {}).where(:name => {})[0];\
        vm.perf_rollup_range(1.hour.ago.utc, Time.now.utc,'realtime')\"".
        format(provider_id, repr(vm_name)))
    assert rc == 0, "Failed to rollup VM C&U data:".format(out)

    wait_for(verify_records_rollups_table, [appliance, provider],
             timeout=600,
             fail_condition=False,
             message='Waiting for hourly rollups')

    # Since we are collecting C&U data for > 1 hour, there will be multiple hourly records per VM
    # in the metric_rollups DB table.The values from these hourly records are summed up.

    with appliance.db.client.transaction:
        result = (appliance.db.client.session.query(rollups.id).join(
            ems, rollups.parent_ems_id == ems.id).filter(
                rollups.capture_interval_name == 'hourly',
                rollups.resource_name == vm_name, ems.name == provider.name,
                rollups.timestamp >= date.today()))

    for record in appliance.db.client.session.query(rollups).filter(
            rollups.id.in_(result.subquery())):
        consumed_hours = consumed_hours + 1
        if (record.cpu_usagemhz_rate_average or record.cpu_usage_rate_average
                or record.derived_memory_used or record.net_usage_rate_average
                or record.disk_usage_rate_average):
            average_cpu_used_in_mhz = average_cpu_used_in_mhz + record.cpu_usagemhz_rate_average
            average_memory_used_in_mb = average_memory_used_in_mb + record.derived_memory_used
            average_network_io = average_network_io + record.net_usage_rate_average
            average_disk_io = average_disk_io + record.disk_usage_rate_average

    for record in appliance.db.client.session.query(rollups).filter(
            rollups.id.in_(result.subquery())):
        if record.derived_vm_used_disk_storage:
            average_storage_used = average_storage_used + record.derived_vm_used_disk_storage

    # Convert storage used in Bytes to GB
    average_storage_used = average_storage_used * math.pow(2, -30)

    return {
        "average_cpu_used_in_mhz": average_cpu_used_in_mhz,
        "average_memory_used_in_mb": average_memory_used_in_mb,
        "average_network_io": average_network_io,
        "average_disk_io": average_disk_io,
        "average_storage_used": average_storage_used,
        "consumed_hours": consumed_hours
    }