Exemplo n.º 1
0
    def __init__(self, remote_filename, **kwargs):
        self.skip_patterns = kwargs.pop('skip_patterns', [])
        self.failure_patterns = kwargs.pop('failure_patterns', [])
        self.matched_patterns = kwargs.pop('matched_patterns', [])

        self._remote_file_tail = SSHTail(remote_filename, **kwargs)
        self._matches = {key: 0 for key in self.matched_patterns}
Exemplo n.º 2
0
def set_rails_loglevel(level, validate_against_worker='MiqUiWorker'):
    """Sets the logging level for level_rails and detects when change occured."""
    ui_worker_pid = '#{}'.format(get_worker_pid(validate_against_worker))

    logger.info('Setting log level_rails on appliance to {}'.format(level))
    yaml = store.current_appliance.get_yaml_config()
    if not str(yaml['log']['level_rails']).lower() == level.lower():
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

        yaml_data = {'log': {'level_rails': level}}
        store.current_appliance.set_yaml_config(yaml_data)

        attempts = 0
        detected = False
        while (not detected and attempts < 60):
            logger.debug('Attempting to detect log level_rails change: {}'.format(attempts))
            for line in evm_tail:
                if ui_worker_pid in line:
                    if 'Log level for production.log has been changed to' in line:
                        # Detects a log level change but does not validate the log level
                        logger.info('Detected change to log level for production.log')
                        detected = True
                        break
            time.sleep(1)  # Allow more log lines to accumulate
            attempts += 1
        if not (attempts < 60):
            # Note the error in the logger but continue as the appliance could be slow at logging
            # that the log level changed
            logger.error('Could not detect log level_rails change.')
        evm_tail.close()
    else:
        logger.info('Log level_rails already set to {}'.format(level))
Exemplo n.º 3
0
def test_workload_capacity_and_utilization_rep(appliance, request, scenario,
                                               setup_perf_provider):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario.

    Polarion:
        assignee: rhcf3_machine
        casecomponent: CandU
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    ssh_client = appliance.ssh_client()

    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']
    }
    master_appliance = IPAppliance(
        hostname=scenario['replication_master']['ip_address'],
        openshift_creds=ssh_master_args)

    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    appliance.set_pglogical_replication(replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info(f'Clean appliance under test ({ssh_client})')
    appliance.clean_appliance()
    logger.info(f'Clean master appliance ({ssh_client_master})')
    master_appliance.clean_appliance()  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {
            'appliance_ip': appliance.hostname,
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario
        }
    else:
        scenario_data = {
            'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario
        }
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(f'Finished cleaning up monitoring thread in {timediff}')

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm,
                                                  poll_interval=2)
    appliance.update_server_roles(
        {role: True
         for role in roles_cap_and_util_rep})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        appliance.set_pglogical_replication(replication_type=':remote')
        # Setup master appliance to :global
        master_appliance.set_pglogical_replication(replication_type=':global')
        # Setup master to subscribe:
        master_appliance.add_pglogical_replication_subscription(
            ssh_client_master, appliance.hostname)
    else:
        # Setup local towards Master
        appliance.set_rubyrep_replication(
            scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        appliance.update_server_roles(
            {role: True
             for role in roles_cap_and_util_rep})

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        appliance.set_pglogical_replication(replication_type=':none')
    else:
        appliance.update_server_roles(
            {role: True
             for role in roles_cap_and_util_rep})

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')