Example #1
0
    def __init__(self, remote_filename, **kwargs):
        self.skip_patterns = kwargs.pop('skip_patterns', [])
        self.failure_patterns = kwargs.pop('failure_patterns', [])
        self.matched_patterns = kwargs.pop('matched_patterns', [])

        self._remote_file_tail = SSHTail(remote_filename, **kwargs)
        self._matches = {key: 0 for key in self.matched_patterns}
Example #2
0
def set_rails_loglevel(level, validate_against_worker='MiqUiWorker'):
    """Sets the logging level for level_rails and detects when change occured."""
    ui_worker_pid = '#{}'.format(get_worker_pid(validate_against_worker))

    logger.info('Setting log level_rails on appliance to {}'.format(level))
    yaml = store.current_appliance.advanced_settings
    if not str(yaml['log']['level_rails']).lower() == level.lower():
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

        log_yaml = yaml.get('log', {})
        log_yaml['level_rails'] = level
        store.current_appliance.update_advanced_settings({'log': log_yaml})

        attempts = 0
        detected = False
        while (not detected and attempts < 60):
            logger.debug('Attempting to detect log level_rails change: {}'.format(attempts))
            for line in evm_tail:
                if ui_worker_pid in line:
                    if 'Log level for production.log has been changed to' in line:
                        # Detects a log level change but does not validate the log level
                        logger.info('Detected change to log level for production.log')
                        detected = True
                        break
            time.sleep(1)  # Allow more log lines to accumulate
            attempts += 1
        if not (attempts < 60):
            # Note the error in the logger but continue as the appliance could be slow at logging
            # that the log level changed
            logger.error('Could not detect log level_rails change.')
        evm_tail.close()
    else:
        logger.info('Log level_rails already set to {}'.format(level))
Example #3
0
def set_rails_loglevel(level, validate_against_worker='MiqUiWorker'):
    """Sets the logging level for level_rails and detects when change occured."""
    ui_worker_pid = '#{}'.format(get_worker_pid(validate_against_worker))

    logger.info('Setting log level_rails on appliance to {}'.format(level))
    yaml = store.current_appliance.get_yaml_config()
    if not str(yaml['log']['level_rails']).lower() == level.lower():
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

        yaml_data = {'log': {'level_rails': level}}
        store.current_appliance.set_yaml_config(yaml_data)

        attempts = 0
        detected = False
        while (not detected and attempts < 60):
            logger.debug('Attempting to detect log level_rails change: {}'.format(attempts))
            for line in evm_tail:
                if ui_worker_pid in line:
                    if 'Log level for production.log has been changed to' in line:
                        # Detects a log level change but does not validate the log level
                        logger.info('Detected change to log level for production.log')
                        detected = True
                        break
            time.sleep(1)  # Allow more log lines to accumulate
            attempts += 1
        if not (attempts < 60):
            # Note the error in the logger but continue as the appliance could be slow at logging
            # that the log level changed
            logger.error('Could not detect log level_rails change.')
        evm_tail.close()
    else:
        logger.info('Log level_rails already set to {}'.format(level))
Example #4
0
class LogValidator(object):
    """
    Log content validator class provides methods
    to monitor the log content before test is started,
    and validate the content of log during test execution,
    according to predefined patterns.
    Predefined patterns are:

    * Logs which should be skipped. Skip further checks on particular line if matched
    * Logs which should cause failure of test.
    * Logs which are expected to be matched, otherwise fail.

    The priority of patterns to be checked are defined in above order.
    Skipping patterns have priority over other ones,
    to be possible to skip particular ERROR log,
    but fail for wider range of other ERRORs.

    Note: If failures pattern matched in log; It will raise `FailPatternMatchError`

    Args:
        remote_filename: path to the remote log file
        skip_patterns: array of skip regex patterns
        failure_patterns: array of failure regex patterns
        matched_patterns: array of expected regex patterns to be matched

    Usage:
        .. code-block:: python
          evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
                                  skip_patterns=['PARTICULAR_ERROR'],
                                  failure_patterns=['.*ERROR.*'],
                                  matched_patterns=['PARTICULAR_INFO'])
          evm_tail.start_monitoring()
          do_some_stuff()
          evm_tail.validate()       # evm_tail.validate(wait="30s")

        or

        .. code-block:: python
          with LogValidator('/var/www/miq/vmdb/log/evm.log',
                            skip_patterns=['PARTICULAR_ERROR'],
                            failure_patterns=['.*ERROR.*'],
                            matched_patterns=['PARTICULAR_INFO']):
              do_some_stuff()
    """

    def __init__(self, remote_filename, **kwargs):
        self.skip_patterns = kwargs.pop('skip_patterns', [])
        self.failure_patterns = kwargs.pop('failure_patterns', [])
        self.matched_patterns = kwargs.pop('matched_patterns', [])

        self._remote_file_tail = SSHTail(remote_filename, **kwargs)
        self._matches = {key: 0 for key in self.matched_patterns}

    def start_monitoring(self):
        """Start monitoring log before action"""
        self._remote_file_tail.set_initial_file_end()
        logger.info("Log monitoring has been started on remote file")

    def _check_skip_logs(self, line):
        for pattern in self.skip_patterns:
            if re.search(pattern, line):
                logger.info(
                    "Skip pattern %s was matched on line %s so skipping this line", pattern, line
                )
                return True
        return False

    def _check_fail_logs(self, line):
        for pattern in self.failure_patterns:
            if re.search(pattern, line):
                logger.error("Failure pattern %s was matched on line %s", pattern, line)
                raise FailPatternMatchError(pattern, "Expected failure pattern found in log.", line)

    def _check_match_logs(self, line):
        for pattern in self.matched_patterns:
            if re.search(pattern, line):
                logger.info("Expected pattern %s was matched on line %s", pattern, line)
                self._matches[pattern] = self._matches[pattern] + 1

    @property
    def _is_valid(self):
        for pattern, count in self.matches.items():
            if count == 0:
                logger.info("Expected '%s' pattern not found", pattern)
                return False
        return True

    @property
    def matches(self):
        """Collect match count in log

        Returns (dict): Pattern match count dictionary
        """

        for line in self._remote_file_tail:
            if self._check_skip_logs(line):
                continue
            self._check_fail_logs(line)
            self._check_match_logs(line)

        logger.info("Matches found: {}".format(self._matches))
        return self._matches

    def validate(self, wait=None, message="waiting for log validation", **kwargs):
        """Validate log pattern

        Args:
            wait: Wait for log validation (timeout)
            message: Specific message.
        Returns (bool): True if expected pattern matched in log else False
        Raise:
            TimedOutError: If failed to match pattern in respective timeout
            FailPatternMatchError: If failure pattern matched
        """

        if wait:
            wait_for(lambda: self._is_valid, delay=5, timeout=wait, message=message, **kwargs)
            return True
        else:
            return self._is_valid

    @contextmanager
    def waiting(self, **kwargs):
        self.start_monitoring()
        yield
        self.validate(**kwargs)

    __enter__ = start_monitoring

    def __exit__(self, exc_type, exc_value, traceback):
        self.validate()
Example #5
0
def test_workload_capacity_and_utilization_rep(appliance, request, scenario,
                                               setup_perf_provider):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario.

    Polarion:
        assignee: rhcf3_machine
        casecomponent: CandU
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    ssh_client = appliance.ssh_client()

    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']
    }
    master_appliance = IPAppliance(
        hostname=scenario['replication_master']['ip_address'],
        openshift_creds=ssh_master_args)

    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    appliance.set_pglogical_replication(replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info(f'Clean appliance under test ({ssh_client})')
    appliance.clean_appliance()
    logger.info(f'Clean master appliance ({ssh_client_master})')
    master_appliance.clean_appliance()  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {
            'appliance_ip': appliance.hostname,
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario
        }
    else:
        scenario_data = {
            'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario
        }
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(f'Finished cleaning up monitoring thread in {timediff}')

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm,
                                                  poll_interval=2)
    appliance.update_server_roles(
        {role: True
         for role in roles_cap_and_util_rep})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        appliance.set_pglogical_replication(replication_type=':remote')
        # Setup master appliance to :global
        master_appliance.set_pglogical_replication(replication_type=':global')
        # Setup master to subscribe:
        master_appliance.add_pglogical_replication_subscription(
            ssh_client_master, appliance.hostname)
    else:
        # Setup local towards Master
        appliance.set_rubyrep_replication(
            scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        appliance.update_server_roles(
            {role: True
             for role in roles_cap_and_util_rep})

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        appliance.set_pglogical_replication(replication_type=':none')
    else:
        appliance.update_server_roles(
            {role: True
             for role in roles_cap_and_util_rep})

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
def test_workload_capacity_and_utilization_rep(appliance, request, scenario, setup_perf_provider):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario.

    Polarion:
        assignee: rhcf3_machine
        casecomponent: CandU
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    ssh_client = appliance.ssh_client()

    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']}
    master_appliance = IPAppliance(hostname=scenario['replication_master']['ip_address'],
                                   openshift_creds=ssh_master_args)

    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    appliance.set_pglogical_replication(replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info('Clean appliance under test ({})'.format(ssh_client))
    appliance.clean_appliance()
    logger.info('Clean master appliance ({})'.format(ssh_client_master))
    master_appliance.clean_appliance()  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {'appliance_ip': appliance.hostname,
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    else:
        scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        appliance.set_pglogical_replication(replication_type=':remote')
        # Setup master appliance to :global
        master_appliance.set_pglogical_replication(replication_type=':global')
        # Setup master to subscribe:
        master_appliance.add_pglogical_replication_subscription(ssh_client_master,
            appliance.hostname)
    else:
        # Setup local towards Master
        appliance.set_rubyrep_replication(scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        appliance.set_pglogical_replication(replication_type=':none')
    else:
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')