Ejemplo n.º 1
0
def set_rails_loglevel(level, validate_against_worker='MiqUiWorker'):
    """Sets the logging level for level_rails and detects when change occured."""
    ui_worker_pid = '#{}'.format(get_worker_pid(validate_against_worker))

    logger.info('Setting log level_rails on appliance to {}'.format(level))
    yaml = store.current_appliance.get_yaml_config()
    if not str(yaml['log']['level_rails']).lower() == level.lower():
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

        yaml['log']['level_rails'] = level
        store.current_appliance.set_yaml_config(yaml)

        attempts = 0
        detected = False
        while (not detected and attempts < 60):
            logger.debug('Attempting to detect log level_rails change: {}'.format(attempts))
            for line in evm_tail:
                if ui_worker_pid in line:
                    if 'Log level for production.log has been changed to' in line:
                        # Detects a log level change but does not validate the log level
                        logger.info('Detected change to log level for production.log')
                        detected = True
                        break
            time.sleep(1)  # Allow more log lines to accumulate
            attempts += 1
        if not (attempts < 60):
            # Note the error in the logger but continue as the appliance could be slow at logging
            # that the log level changed
            logger.error('Could not detect log level_rails change.')
        evm_tail.close()
    else:
        logger.info('Log level_rails already set to {}'.format(level))
Ejemplo n.º 2
0
def wait_for_miq_server_workers_started(evm_tail=None, poll_interval=5):
    """Waits for the CFME's workers to be started by tailing evm.log for:
    'INFO -- : MIQ(MiqServer#wait_for_started_workers) All workers have been started'
    Verified works with 5.5 and 5.6 appliances.
    """
    if evm_tail is None:
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

    attempts = 0
    detected = False
    max_attempts = 60
    while (not detected and attempts < max_attempts):
        logger.debug('Attempting to detect MIQ Server workers started: {}'.format(attempts))
        for line in evm_tail:
            if 'MiqServer#wait_for_started_workers' in line:
                if ('All workers have been started' in line):
                    logger.info('Detected MIQ Server is ready.')
                    detected = True
                    break
        time.sleep(poll_interval)  # Allow more log lines to accumulate
        attempts += 1
    if not (attempts < max_attempts):
        logger.error('Could not detect MIQ Server workers started in {}s.'.format(
            poll_interval * max_attempts))
    evm_tail.close()
Ejemplo n.º 3
0
def standup_perf_ui(ui_worker_pid, soft_assert):
    logger.info('Opening /var/www/miq/vmdb/log/production.log for tail')
    prod_tail = SSHTail('/var/www/miq/vmdb/log/production.log')
    prod_tail.set_initial_file_end()

    ensure_browser_open()
    pages = analyze_page_stat(perf_click(ui_worker_pid, prod_tail, False, login_admin), soft_assert)

    return pages, prod_tail
Ejemplo n.º 4
0
def standup_perf_ui(ui_worker_pid, soft_assert):
    logger.info('Opening /var/www/miq/vmdb/log/production.log for tail')
    prod_tail = SSHTail('/var/www/miq/vmdb/log/production.log')
    prod_tail.set_initial_file_end()

    ensure_browser_open()
    pages = analyze_page_stat(perf_click(ui_worker_pid, prod_tail, False, login_admin), soft_assert)

    return pages, prod_tail
def test_containers_smartstate_analysis(provider, ssh_client):
    """Smart State analysis functionality check for single container image.
    Steps:
        1. Perform smart state analysis
            Expected: Green message showing: "...Analysis successfully Initiated"
        2. Waiting for analysis finish
            Expected: 'finished'
        3. check task succession in log
            Expected: LOG_VERIFICATION_TAGS are shown in the log
        4. verify that detail was added
            Expected: all RESULT_DETAIL_FIELDS are shown an pass the function"""
    delete_all_vm_tasks()
    # step 1
    navigate_to(Image, 'All')
    tb.select('List View')
    count = list_tbl.row_count()
    if not count:
        pytest.skip('Images table is empty! - cannot perform SSA test -> Skipping...')
    try:
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
    except:  # TODO: Should we add a specific exception?
        pytest.skip('Cannot continue test, probably due to containerized appliance\n'
                    'Traceback: \n{}'.format(format_exc()))
    evm_tail.set_initial_file_end()
    list_tbl.select_rows_by_indexes(choice(range(count)))
    tb.select('Configuration', 'Perform SmartState Analysis', invokes_alert=True)
    sel.handle_alert()
    flash.assert_message_contain('Analysis successfully initiated')
    # step 2
    ssa_timeout = '5M'
    try:
        tasks.wait_analysis_finished('Container image analysis',
                                     'vm', delay=5, timeout=ssa_timeout)
    except TimedOutError:
        pytest.fail('Timeout exceeded, Waited too much time for SSA to finish ({}).'
                    .format(ssa_timeout))
    # Step 3
    check_log(evm_tail.raw_string(), LOG_VERIFICATION_TAGS)
    # Step 4
    time_queued = tasks_table.rows_as_list()[0].updated.text
    tasks_table.click_cell('Updated', value=time_queued)
    for field, verify_func in RESULT_DETAIL_FIELDS.items():
        assert verify_func(InfoBlock.text('Configuration', field))
Ejemplo n.º 6
0
def wait_for_miq_server_workers_started(evm_tail=None, poll_interval=5):
    """Waits for the CFME's workers to be started by tailing evm.log for:
    'INFO -- : MIQ(MiqServer#wait_for_started_workers) All workers have been started'
    Verified works with 5.5 and 5.6 appliances.
    """
    if evm_tail is None:
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

    attempts = 0
    detected = False
    max_attempts = 60
    while (not detected and attempts < max_attempts):
        logger.debug('Attempting to detect MIQ Server workers started: {}'.format(attempts))
        for line in evm_tail:
            if 'MiqServer#wait_for_started_workers' in line:
                if ('All workers have been started' in line):
                    logger.info('Detected MIQ Server is ready.')
                    detected = True
                    break
        time.sleep(poll_interval)  # Allow more log lines to accumulate
        attempts += 1
    if not (attempts < max_attempts):
        logger.error('Could not detect MIQ Server workers started in {}s.'.format(
            poll_interval * max_attempts))
    evm_tail.close()
Ejemplo n.º 7
0
def set_rails_loglevel(level, validate_against_worker='MiqUiWorker'):
    """Sets the logging level for level_rails and detects when change occured."""
    ui_worker_pid = '#{}'.format(get_worker_pid(validate_against_worker))

    logger.info('Setting log level_rails on appliance to {}'.format(level))
    yaml = get_yaml_config('vmdb')
    if not str(yaml['log']['level_rails']).lower() == level.lower():
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

        yaml['log']['level_rails'] = level
        set_yaml_config("vmdb", yaml)

        attempts = 0
        detected = False
        while (not detected and attempts < 60):
            logger.debug(
                'Attempting to detect log level_rails change: {}'.format(
                    attempts))
            for line in evm_tail:
                if ui_worker_pid in line:
                    if 'Log level for production.log has been changed to' in line:
                        # Detects a log level change but does not validate the log level
                        logger.info(
                            'Detected change to log level for production.log')
                        detected = True
                        break
            time.sleep(1)  # Allow more log lines to accumulate
            attempts += 1
        if not (attempts < 60):
            # Note the error in the logger but continue as the appliance could be slow at logging
            # that the log level changed
            logger.error('Could not detect log level_rails change.')
    else:
        logger.info('Log level_rails already set to {}'.format(level))
def test_workload_capacity_and_utilization_rep(appliance, request, scenario, setup_perf_provider):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = appliance.ssh_client()

    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']}
    master_appliance = IPAppliance(address=scenario['replication_master']['ip_address'],
                                   openshift_creds=ssh_master_args)

    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    appliance.set_pglogical_replication(replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info('Clean appliance under test ({})'.format(ssh_client))
    appliance.clean_appliance()
    logger.info('Clean master appliance ({})'.format(ssh_client_master))
    master_appliance.clean_appliance()  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {'appliance_ip': appliance.hostname,
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    else:
        scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        appliance.set_pglogical_replication(replication_type=':remote')
        # Setup master appliance to :global
        master_appliance.set_pglogical_replication(replication_type=':global')
        # Setup master to subscribe:
        master_appliance.add_pglogical_replication_subscription(ssh_client_master,
            appliance.address)
    else:
        # Setup local towards Master
        appliance.set_rubyrep_replication(scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        appliance.set_pglogical_replication(replication_type=':none')
    else:
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
Ejemplo n.º 9
0
def test_workload_capacity_and_utilization_rep(request, scenario):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']
    }
    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    set_pglogical_replication(ssh_client_master, replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info('Clean appliance under test ({})'.format(ssh_client))
    clean_appliance(ssh_client)
    logger.info('Clean master appliance ({})'.format(ssh_client_master))
    clean_appliance(ssh_client_master,
                    False)  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {
            'appliance_ip':
            cfme_performance['appliance']['ip_address'],
            'appliance_name':
            cfme_performance['appliance']['appliance_name'],
            'test_dir':
            'workload-cap-and-util-rep',
            'test_name':
            'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles':
            get_server_roles_workload_cap_and_util(separator=', '),
            'scenario':
            scenario
        }
    else:
        scenario_data = {
            'appliance_ip':
            cfme_performance['appliance']['ip_address'],
            'appliance_name':
            cfme_performance['appliance']['appliance_name'],
            'test_dir':
            'workload-cap-and-util-rep',
            'test_name':
            'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles':
            get_server_roles_workload_cap_and_util_rep(separator=', '),
            'scenario':
            scenario
        }
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2)
    set_server_roles_workload_cap_and_util(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    set_cap_and_util_all_via_rails(ssh_client)

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        set_pglogical_replication(ssh_client, replication_type=':remote')
        # Setup master appliance to :global
        set_pglogical_replication(ssh_client_master,
                                  replication_type=':global')
        # Setup master to subscribe:
        add_pglogical_replication_subscription(
            ssh_client_master, cfme_performance['appliance']['ip_address'])
    else:
        # Setup local towards Master
        set_rubyrep_replication(ssh_client,
                                scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        set_server_roles_workload_cap_and_util_rep(ssh_client)

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        set_pglogical_replication(ssh_client_master, replication_type=':none')
    else:
        set_server_roles_workload_cap_and_util(ssh_client)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')