Ejemplo n.º 1
0
def test_queue_infrastructure(request, ssh_client, enable_candu):
    local_evm_gz = str(log_path.join('evm.perf.log.gz'))
    local_evm = str(log_path.join('evm.perf.log'))
    local_top_gz = str(log_path.join('top_output.perf.log.gz'))
    local_top = str(log_path.join('top_output.perf.log'))

    def clean_up_log_files(files):
        for clean_file in files:
            # Clean up collected log files as they can be huge in case of exception
            if os.path.exists(clean_file):
                logger.info('Removing: %s', clean_file)
                os.remove(clean_file)
    request.addfinalizer(lambda: clean_up_log_files([local_evm, local_evm_gz, local_top,
        local_top_gz]))

    sleep_time = perf_tests['test_queue']['infra_time']

    logger.info('Waiting: %s', sleep_time)
    time.sleep(sleep_time)

    collect_log(ssh_client, 'evm', local_evm_gz)
    collect_log(ssh_client, 'top_output', local_top_gz, strip_whitespace=True)

    logger.info('Calling gunzip %s', local_evm_gz)
    subprocess.call(['gunzip', local_evm_gz])

    logger.info('Calling gunzip {}'.format(local_top_gz))
    subprocess.call(['gunzip', local_top_gz])

    # Post process evm log and top_output log for charts and csvs
    perf_process_evm(local_evm, local_top)
Ejemplo n.º 2
0
def test_queue_infrastructure(request, ssh_client, enable_candu):
    local_evm_gz = str(log_path.join('evm.perf.log.gz'))
    local_evm = str(log_path.join('evm.perf.log'))
    local_top_gz = str(log_path.join('top_output.perf.log.gz'))
    local_top = str(log_path.join('top_output.perf.log'))

    def clean_up_log_files(files):
        for clean_file in files:
            # Clean up collected log files as they can be huge in case of exception
            if os.path.exists(clean_file):
                logger.info('Removing: %s', clean_file)
                os.remove(clean_file)

    request.addfinalizer(lambda: clean_up_log_files(
        [local_evm, local_evm_gz, local_top, local_top_gz]))

    sleep_time = perf_tests['test_queue']['infra_time']

    logger.info('Waiting: %s', sleep_time)
    time.sleep(sleep_time)

    collect_log(ssh_client, 'evm', local_evm_gz)
    collect_log(ssh_client, 'top_output', local_top_gz, strip_whitespace=True)

    logger.info('Calling gunzip %s', local_evm_gz)
    subprocess.call(['gunzip', local_evm_gz])

    logger.info('Calling gunzip {}'.format(local_top_gz))
    subprocess.call(['gunzip', local_top_gz])

    # Post process evm log and top_output log for charts and csvs
    perf_process_evm(local_evm, local_top)
Ejemplo n.º 3
0
def run(port, run_id=None):
    art_config = env.get('artifactor', {})
    art_config['server_port'] = int(port)
    art = Artifactor(None)

    if 'log_dir' not in art_config:
        art_config['log_dir'] = log_path.join('artifacts').strpath
    art.set_config(art_config)

    art.register_plugin(merkyl.Merkyl, "merkyl")
    art.register_plugin(logger.Logger, "logger")
    art.register_plugin(video.Video, "video")
    art.register_plugin(filedump.Filedump, "filedump")
    art.register_plugin(reporter.Reporter, "reporter")
    art.register_hook_callback('filedump', 'pre', parse_setup_dir,
                               name="filedump_dir_setup")

    initialize(art)
    ip = urlparse(env['base_url']).hostname

    art.configure_plugin('merkyl', ip=ip)
    art.configure_plugin('logger')
    art.configure_plugin('video')
    art.configure_plugin('filedump')
    art.configure_plugin('reporter')
    art.fire_hook('start_session', run_id=run_id)
Ejemplo n.º 4
0
def pytest_sessionfinish(session, exitstatus):
    udf_log_file = log_path.join('unused_data_files.log')

    if udf_log_file.check():
        # Clean up old udf log if it exists
        udf_log_file.remove()

    if session.config.option.udf_report is False:
        # Short out here if not making a report
        return

    # Output an unused data files log after a test run
    data_files = set()
    for dirpath, dirnames, filenames in os.walk(str(data_path)):
        for filename in filenames:
            filepath = os.path.join(dirpath, filename)
            data_files.add(filepath)
    unused_data_files = data_files - seen_data_files

    if unused_data_files:
        # Write the log of unused data files out, minus the data dir prefix
        udf_log = ''.join(
            (line[len(str(data_path)):] + '\n' for line in unused_data_files))
        udf_log_file.write(udf_log + '\n')

        # Throw a notice into the terminal reporter to check the log
        tr = reporter()
        tr.write_line('')
        tr.write_sep(
            '-', '%d unused data files after test run, check %s' %
            (len(unused_data_files), udf_log_file.basename))
Ejemplo n.º 5
0
 def parse_config(self):
     """
     Reads the config data and sets up values
     """
     if not self.config:
         return False
     self.log_dir = local(self.config.get('log_dir', log_path))
     self.log_dir.ensure(dir=True)
     self.artifact_dir = local(
         self.config.get('artifact_dir', log_path.join('artifacts')))
     self.artifact_dir.ensure(dir=True)
     self.logger = create_logger(
         'artifactor',
         self.log_dir.join('artifactor.log').strpath)
     self.squash_exceptions = self.config.get('squash_exceptions', False)
     if not self.log_dir:
         print("!!! Log dir must be specified in yaml")
         sys.exit(127)
     if not self.artifact_dir:
         print("!!! Artifact dir must be specified in yaml")
         sys.exit(127)
     self.config['zmq_socket_address'] = 'tcp://127.0.0.1:{}'.format(
         random_port())
     self.setup_plugin_instances()
     self.start_server()
     self.global_data = {
         'artifactor_config': self.config,
         'log_dir': self.log_dir.strpath,
         'artifact_dir': self.artifact_dir.strpath,
         'artifacts': dict(),
         'old_artifacts': dict()
     }
Ejemplo n.º 6
0
def run(port, run_id=None):
    art_config = env.get('artifactor', {})
    art_config['server_port'] = int(port)
    art = Artifactor(None)

    if 'log_dir' not in art_config:
        art_config['log_dir'] = log_path.strpath
    if 'artifact_dir' not in art_config:
        art_config['artifact_dir'] = log_path.join('artifacts').strpath
    art.set_config(art_config)

    art.register_plugin(merkyl.Merkyl, "merkyl")
    art.register_plugin(logger.Logger, "logger")
    art.register_plugin(video.Video, "video")
    art.register_plugin(filedump.Filedump, "filedump")
    art.register_plugin(reporter.Reporter, "reporter")
    art.register_plugin(post_result.PostResult, "post-result")
    art.register_plugin(ostriz.Ostriz, "ostriz")

    initialize(art)

    art.configure_plugin('merkyl')
    art.configure_plugin('logger')
    art.configure_plugin('video')
    art.configure_plugin('filedump')
    art.configure_plugin('reporter')
    art.configure_plugin('post-result')
    art.configure_plugin('ostriz')
    art.fire_hook('start_session', run_id=run_id)
Ejemplo n.º 7
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument(
        '-f',
        '--force',
        default=True,
        action='store_false',
        dest='prompt',
        help='Do not prompt before deleting VMs (danger zone!)')
    parser.add_argument(
        '--max-hours',
        default=24,
        help='Max hours since the VM was created or last powered on '
        '(varies by provider, default 24)')
    parser.add_argument(
        '--provider',
        dest='providers',
        action='append',
        default=None,
        help='Provider(s) to inspect, can be used multiple times',
        metavar='PROVIDER')
    parser.add_argument('--outfile',
                        dest='outfile',
                        default=log_path.join('cleanup_old_vms.log').strpath,
                        help='outfile to list ')
    parser.add_argument(
        'text_to_match',
        nargs='*',
        default=['^test_', '^jenkins', '^i-'],
        help='Regex in the name of vm to be affected, can be use multiple times'
        ' (Defaults to \'^test_\' and \'^jenkins\')')

    args = parser.parse_args()
    return args
Ejemplo n.º 8
0
def run(port, run_id=None):
    art_config = env.get('artifactor', {})
    art_config['server_port'] = int(port)
    art = Artifactor(None)

    if 'log_dir' not in art_config:
        art_config['log_dir'] = log_path.join('artifacts').strpath
    art.set_config(art_config)

    art.register_plugin(merkyl.Merkyl, "merkyl")
    art.register_plugin(logger.Logger, "logger")
    art.register_plugin(video.Video, "video")
    art.register_plugin(filedump.Filedump, "filedump")
    art.register_plugin(softassert.SoftAssert, "softassert")
    art.register_plugin(reporter.Reporter, "reporter")
    art.register_plugin(post_result.PostResult, "post-result")
    art.register_hook_callback('filedump', 'pre', parse_setup_dir,
                               name="filedump_dir_setup")

    initialize(art)

    art.configure_plugin('merkyl')
    art.configure_plugin('logger')
    art.configure_plugin('video')
    art.configure_plugin('filedump')
    art.configure_plugin('softassert')
    art.configure_plugin('reporter')
    art.configure_plugin('post-result')
    art.fire_hook('start_session', run_id=run_id)
Ejemplo n.º 9
0
def pytest_collection_modifyitems(session, config, items):
    from fixtures.pytest_store import store
    len_collected = len(items)

    new_items = []

    from utils.path import log_path
    with log_path.join('uncollected.log').open('w') as f:
        for item in items:
            # First filter out all items who have the uncollect mark
            if item.get_marker('uncollect') or not uncollectif(item):
                # if a uncollect marker has been added,
                # give it priority for the explanation
                uncollect = item.get_marker('uncollect')
                marker = uncollect or item.get_marker('uncollectif')
                if marker:
                    reason = marker.kwargs.get('reason', "No reason given")
                else:
                    reason = None
                f.write("{} - {}\n".format(item.name, reason))
            else:
                new_items.append(item)

    items[:] = new_items

    len_filtered = len(items)
    filtered_count = len_collected - len_filtered
    store.uncollection_stats['uncollectif'] = filtered_count
Ejemplo n.º 10
0
def pytest_sessionfinish(session, exitstatus):
    udf_log_file = log_path.join('unused_data_files.log')

    if udf_log_file.check():
        # Clean up old udf log if it exists
        udf_log_file.remove()

    if session.config.option.udf_report is False:
        # Short out here if not making a report
        return

    # Output an unused data files log after a test run
    data_files = set()
    for dirpath, dirnames, filenames in os.walk(str(data_path)):
        for filename in filenames:
            filepath = os.path.join(dirpath, filename)
            data_files.add(filepath)
    unused_data_files = data_files - seen_data_files

    if unused_data_files:
        # Write the log of unused data files out, minus the data dir prefix
        udf_log = ''.join(
            (line[len(str(data_path)):] + '\n' for line in unused_data_files)
        )
        udf_log_file.write(udf_log + '\n')

        # Throw a notice into the terminal reporter to check the log
        tr = reporter()
        tr.write_line('')
        tr.write_sep(
            '-',
            '%d unused data files after test run, check %s' % (
                len(unused_data_files), udf_log_file.basename
            )
        )
Ejemplo n.º 11
0
def messages_to_statistics_csv(messages, statistics_file_name):
    all_statistics = []
    for msg_id in messages:
        msg = messages[msg_id]

        added = False
        if len(all_statistics) > 0:
            for msg_statistics in all_statistics:
                if msg_statistics.cmd == msg.msg_cmd:

                    if msg.del_time > 0:
                        msg_statistics.delivertimes.append(float(msg.del_time))
                        msg_statistics.gets += 1
                    msg_statistics.dequeuetimes.append(float(msg.deq_time))
                    msg_statistics.totaltimes.append(float(msg.total_time))
                    msg_statistics.puts += 1
                    added = True
                    break

        if not added:
            msg_statistics = MiqMsgLists()
            msg_statistics.cmd = msg.msg_cmd
            if msg.del_time > 0:
                msg_statistics.delivertimes.append(float(msg.del_time))
                msg_statistics.gets = 1
            msg_statistics.dequeuetimes.append(float(msg.deq_time))
            msg_statistics.totaltimes.append(float(msg.total_time))
            msg_statistics.puts = 1
            all_statistics.append(msg_statistics)

    csvdata_path = log_path.join('csv_output', statistics_file_name)
    outputfile = csvdata_path.open('w', ensure=True)

    try:
        csvfile = csv.writer(outputfile)
        metrics = ['samples', 'min', 'avg', 'median', 'max', 'std', '90', '99']
        measurements = ['deq_time', 'del_time', 'total_time']
        headers = ['cmd', 'puts', 'gets']
        for measurement in measurements:
            for metric in metrics:
                headers.append('{}_{}'.format(measurement, metric))

        csvfile.writerow(headers)

        # Contents of CSV
        for msg_statistics in sorted(all_statistics, key=lambda x: x.cmd):
            if msg_statistics.gets > 1:
                logger.debug('Samples/Avg/90th/Std: {} : {} : {} : {},Cmd: {}'.format(
                    str(len(msg_statistics.totaltimes)).rjust(7),
                    str(round(numpy.average(msg_statistics.totaltimes), 3)).rjust(7),
                    str(round(numpy.percentile(msg_statistics.totaltimes, 90), 3)).rjust(7),
                    str(round(numpy.std(msg_statistics.totaltimes), 3)).rjust(7),
                    msg_statistics.cmd))
            stats = [msg_statistics.cmd, msg_statistics.puts, msg_statistics.gets]
            stats.extend(generate_statistics(msg_statistics.dequeuetimes, 3))
            stats.extend(generate_statistics(msg_statistics.delivertimes, 3))
            stats.extend(generate_statistics(msg_statistics.totaltimes, 3))
            csvfile.writerow(stats)
    finally:
        outputfile.close()
Ejemplo n.º 12
0
def run(port, run_id=None):
    art_config = env.get('artifactor', {})
    art_config['server_port'] = int(port)
    art = Artifactor(None)

    if 'log_dir' not in art_config:
        art_config['log_dir'] = log_path.strpath
    if 'artifact_dir' not in art_config:
        art_config['artifact_dir'] = log_path.join('artifacts').strpath
    art.set_config(art_config)

    art.register_plugin(merkyl.Merkyl, "merkyl")
    art.register_plugin(logger.Logger, "logger")
    art.register_plugin(video.Video, "video")
    art.register_plugin(filedump.Filedump, "filedump")
    art.register_plugin(reporter.Reporter, "reporter")
    art.register_plugin(post_result.PostResult, "post-result")
    art.register_plugin(ostriz.Ostriz, "ostriz")

    initialize(art)

    art.configure_plugin('merkyl')
    art.configure_plugin('logger')
    art.configure_plugin('video')
    art.configure_plugin('filedump')
    art.configure_plugin('reporter')
    art.configure_plugin('post-result')
    art.configure_plugin('ostriz')
    art.fire_hook('start_session', run_id=run_id)
Ejemplo n.º 13
0
    def pytest_sessionfinish(self, exitstatus):
        # Now master/standalone needs to move all the reports to an appliance for the source report
        if store.parallelizer_role != 'master':
            manager().collect()

        # for slaves, everything is done at this point
        if store.parallelizer_role == 'slave':
            return

        # on master/standalone, merge all the collected reports and bring them back
        manager().merge()

        try:
            global ui_coverage_percent
            last_run = json.load(log_path.join('coverage', 'merged', '.last_run.json').open())
            ui_coverage_percent = last_run['result']['covered_percent']
            style = {'bold': True}
            if ui_coverage_percent > 40:
                style['green'] = True
            else:
                style['red'] = True
            store.write_line('UI Coverage Result: {}%'.format(ui_coverage_percent),
                **style)
        except Exception as ex:
            logger.error('Error printing coverage report to terminal')
            logger.exception(ex)
Ejemplo n.º 14
0
def pytest_collection_modifyitems(session, config, items):
    from fixtures.pytest_store import store
    len_collected = len(items)

    new_items = []

    from utils.path import log_path
    with log_path.join('uncollected.log').open('w') as f:
        for item in items:
            # First filter out all items who have the uncollect mark
            if item.get_marker('uncollect') or not uncollectif(item):
                # if a uncollect marker has been added,
                # give it priority for the explanation
                uncollect = item.get_marker('uncollect')
                marker = uncollect or item.get_marker('uncollectif')
                if marker:
                    reason = marker.kwargs.get('reason', "No reason given")
                else:
                    reason = None
                f.write("{} - {}\n".format(item.name, reason))
            else:
                new_items.append(item)

    items[:] = new_items

    len_filtered = len(items)
    filtered_count = len_collected - len_filtered
    store.uncollection_stats['uncollectif'] = filtered_count
def update_template_log(appliance_template, action, provider=None, failed_providers=None):
    try:
        trackerbot_ip = re.findall(r'[0-9]+(?:\.[0-9]+){3}', args.trackerbot_url)[0]
        creds = credentials['host_default']
        sshclient = make_ssh_client(trackerbot_ip, creds['username'], creds['password'])
        template_resultlog = log_path.join('template_result.log').strpath
        if action == 'create':
            command = 'mkdir -p /home/amavinag/{}'.format(appliance_template)
            sshclient.run_command(command)
            sshclient.put_file(template_resultlog, remote_file='/home/amavinag/{}/{}'.format(
                appliance_template, provider))
        if action == 'merge':
            with open(template_resultlog, 'w') as report:
                command = 'cd /home/amavinag/{}/&&cat {}'.format(appliance_template,
                                                                 ' '.join(failed_providers))
                status, output = sshclient.run_command(command)
                if 'No such file or directory' in output:
                    command = 'cd /home/amavinag/{}/&&cat *'.format(appliance_template)
                    status, output = sshclient.run_command(command)
                report.write(output)
                report.close()
        elif action == 'remove':
            sshclient.run_command('cd /home/amavinag/&&rm -rf {}'.format(
                appliance_template))
        sshclient.close()
    except Exception as e:
        print(e)
        return False
Ejemplo n.º 16
0
 def parse_config(self):
     """
     Reads the config data and sets up values
     """
     if not self.config:
         return False
     self.log_dir = local(self.config.get('log_dir', log_path))
     self.log_dir.ensure(dir=True)
     self.artifact_dir = local(self.config.get('artifact_dir', log_path.join('artifacts')))
     self.artifact_dir.ensure(dir=True)
     self.logger = create_logger('artifactor', self.log_dir.join('artifactor.log').strpath)
     self.squash_exceptions = self.config.get('squash_exceptions', False)
     if not self.log_dir:
         print "!!! Log dir must be specified in yaml"
         sys.exit(127)
     if not self.artifact_dir:
         print "!!! Artifact dir must be specified in yaml"
         sys.exit(127)
     self.config['zmq_socket_address'] = 'tcp://127.0.0.1:{}'.format(random_port())
     self.setup_plugin_instances()
     self.start_server()
     self.global_data = {
         'artifactor_config': self.config,
         'log_dir': self.log_dir.strpath,
         'artifact_dir': self.artifact_dir.strpath,
         'artifacts': dict(),
         'old_artifacts': dict()
     }
Ejemplo n.º 17
0
def generate_hourly_charts_and_csvs(hourly_buckets, charts_dir):
    for cmd in sorted(hourly_buckets):
        current_csv = 'hourly_' + cmd + '.csv'
        csv_rawdata_path = log_path.join('csv_output', current_csv)

        logger.info('Writing {} csvs/charts'.format(cmd))
        output_file = csv_rawdata_path.open('w', ensure=True)
        csvwriter = csv.DictWriter(output_file, fieldnames=MiqMsgBucket().headers,
            delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL)
        csvwriter.writeheader()
        for dt in sorted(hourly_buckets[cmd].keys()):
            linechartxaxis = []
            avgdeqtimings = []
            mindeqtimings = []
            maxdeqtimings = []
            avgdeltimings = []
            mindeltimings = []
            maxdeltimings = []
            cmd_put = []
            cmd_get = []

            sortedhr = sorted(hourly_buckets[cmd][dt].keys())
            for hr in sortedhr:
                linechartxaxis.append(str(hr))
                bk = hourly_buckets[cmd][dt][hr]

                avgdeqtimings.append(round(bk.avg_deq, 2))
                mindeqtimings.append(round(bk.min_deq, 2))
                maxdeqtimings.append(round(bk.max_deq, 2))
                avgdeltimings.append(round(bk.avg_del, 2))
                mindeltimings.append(round(bk.min_del, 2))
                maxdeltimings.append(round(bk.max_del, 2))
                cmd_put.append(bk.total_put)
                cmd_get.append(bk.total_get)
                bk.date = dt
                bk.hour = hr
                csvwriter.writerow(dict(bk))

            lines = {}
            lines['Put ' + cmd] = cmd_put
            lines['Get ' + cmd] = cmd_get
            line_chart_render(cmd + ' Command Put/Get Count', 'Hour during ' + dt,
                '# Count of Commands', linechartxaxis, lines,
                charts_dir.join('/{}-{}-cmdcnt.svg'.format(cmd, dt)))

            lines = {}
            lines['Average Dequeue Timing'] = avgdeqtimings
            lines['Min Dequeue Timing'] = mindeqtimings
            lines['Max Dequeue Timing'] = maxdeqtimings
            line_chart_render(cmd + ' Dequeue Timings', 'Hour during ' + dt, 'Time (s)',
                linechartxaxis, lines, charts_dir.join('/{}-{}-dequeue.svg'.format(cmd, dt)))

            lines = {}
            lines['Average Deliver Timing'] = avgdeltimings
            lines['Min Deliver Timing'] = mindeltimings
            lines['Max Deliver Timing'] = maxdeltimings
            line_chart_render(cmd + ' Deliver Timings', 'Hour during ' + dt, 'Time (s)',
                linechartxaxis, lines, charts_dir.join('/{}-{}-deliver.svg'.format(cmd, dt)))
        output_file.close()
Ejemplo n.º 18
0
def pages_to_csv(pages, file_name):
    csvdata_path = log_path.join('csv_output', file_name)
    outputfile = csvdata_path.open('w', ensure=True)
    csvwriter = csv.DictWriter(outputfile, fieldnames=PageStat().headers, delimiter=',',
        quotechar='\'', quoting=csv.QUOTE_MINIMAL)
    csvwriter.writeheader()
    for page in pages:
        csvwriter.writerow(dict(page))
Ejemplo n.º 19
0
 def _inc_test_count(test):
     error = ""
     if 'statuses' in test:
         test_counts[test['statuses']['overall']] += 1
     else:
         error += str(test)
     with log_path.join('no_status.log').open('a') as f:
         f.write(error)
Ejemplo n.º 20
0
def pages_to_csv(pages, file_name):
    csvdata_path = log_path.join('csv_output', file_name)
    outputfile = csvdata_path.open('w', ensure=True)
    csvwriter = csv.DictWriter(outputfile, fieldnames=PageStat().headers, delimiter=',',
        quotechar='\'', quoting=csv.QUOTE_MINIMAL)
    csvwriter.writeheader()
    for page in pages:
        csvwriter.writerow(dict(page))
Ejemplo n.º 21
0
def create_logger(logger_name,
                  filename=None,
                  max_file_size=None,
                  max_backups=None):
    """Creates and returns the named logger

    If the logger already exists, it will be destroyed and recreated
    with the current config in env.yaml

    """
    # If the logger already exists, destroy it
    # TODO: remove the need to destroy the logger
    logging.root.manager.loggerDict.pop(logger_name, None)

    # Grab the logging conf
    conf = _load_conf(logger_name)

    log_path.ensure(dir=True)
    if filename:
        log_file = filename
    else:
        log_file = str(log_path.join('{}.log'.format(logger_name)))

    # log_file is dynamic, so we can't used logging.config.dictConfig here without creating
    # a custom RotatingFileHandler class. At some point, we should do that, and move the
    # entire logging config into env.yaml

    file_formatter = logging.Formatter(conf['file_format'])
    file_handler = RotatingFileHandler(log_file,
                                       maxBytes=max_file_size
                                       or conf['max_file_size'],
                                       backupCount=max_backups
                                       or conf['max_file_backups'],
                                       encoding='utf8')
    file_handler.setFormatter(file_formatter)

    logger = logging.getLogger(logger_name)
    logger.addHandler(file_handler)

    syslog_settings = _get_syslog_settings()
    if syslog_settings:
        lid = fauxfactory.gen_alphanumeric(8)
        fmt = '%(asctime)s [' + lid + '] %(message)s'
        syslog_formatter = SyslogMsecFormatter(fmt=fmt)
        syslog_handler = SysLogHandler(address=syslog_settings)
        syslog_handler.setFormatter(syslog_formatter)
        logger.addHandler(syslog_handler)
    logger.setLevel(conf['level'])
    if conf['errors_to_console']:
        stream_formatter = logging.Formatter(conf['stream_format'])
        stream_handler = logging.StreamHandler()
        stream_handler.setLevel(logging.ERROR)
        stream_handler.setFormatter(stream_formatter)

        logger.addHandler(stream_handler)

    logger.addFilter(_RelpathFilter())
    return logger
Ejemplo n.º 22
0
def generate_raw_data_csv(rawdata_dict, csv_file_name):
    csv_rawdata_path = log_path.join('csv_output', csv_file_name)
    output_file = csv_rawdata_path.open('w', ensure=True)
    csvwriter = csv.DictWriter(output_file, fieldnames=rawdata_dict[rawdata_dict.keys()[0]].headers,
        delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL)
    csvwriter.writeheader()
    sorted_rd_keys = sorted(rawdata_dict.keys())
    for key in sorted_rd_keys:
        csvwriter.writerow(dict(rawdata_dict[key]))
Ejemplo n.º 23
0
def create_logger(logger_name, filename=None, max_file_size=None, max_backups=None):
    """Creates and returns the named logger

    If the logger already exists, it will be destroyed and recreated
    with the current config in env.yaml

    """
    # If the logger already exists, destroy it
    if logger_name in logging.root.manager.loggerDict:
        del (logging.root.manager.loggerDict[logger_name])

    # Grab the logging conf
    conf = _load_conf(logger_name)

    log_path.ensure(dir=True)
    if filename:
        log_file = filename
    else:
        log_file = str(log_path.join("{}.log".format(logger_name)))

    # log_file is dynamic, so we can't used logging.config.dictConfig here without creating
    # a custom RotatingFileHandler class. At some point, we should do that, and move the
    # entire logging config into env.yaml

    file_formatter = logging.Formatter(conf["file_format"])
    file_handler = RotatingFileHandler(
        log_file,
        maxBytes=max_file_size or conf["max_file_size"],
        backupCount=max_backups or conf["max_file_backups"],
        encoding="utf8",
    )
    file_handler.setFormatter(file_formatter)

    logger = logging.getLogger(logger_name)
    logger.addHandler(file_handler)

    syslog_settings = _get_syslog_settings()
    if syslog_settings:
        lid = fauxfactory.gen_alphanumeric(8)
        fmt = "%(asctime)s [" + lid + "] %(message)s"
        syslog_formatter = SyslogMsecFormatter(fmt=fmt)
        syslog_handler = SysLogHandler(address=syslog_settings)
        syslog_handler.setFormatter(syslog_formatter)
        logger.addHandler(syslog_handler)
    logger.setLevel(conf["level"])
    if conf["errors_to_console"]:
        stream_formatter = logging.Formatter(conf["stream_format"])
        stream_handler = logging.StreamHandler()
        stream_handler.setLevel(logging.ERROR)
        stream_handler.setFormatter(stream_formatter)

        logger.addHandler(stream_handler)

    logger.addFilter(_RelpathFilter())
    return logger
Ejemplo n.º 24
0
def _configure_warnings():
    # Capture warnings
    warnings.simplefilter('once')
    logging.captureWarnings(True)
    wlog = logging.getLogger('py.warnings')
    wlog.addFilter(WarningsRelpathFilter())
    wlog.addFilter(WarningsDeduplicationFilter())
    file_handler = RotatingFileHandler(
        str(log_path.join('py.warnings.log')), encoding='utf8')
    wlog.addHandler(file_handler)
    wlog.propagate = False
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument("--tracketbot-url", dest="trackerbot_url",
                        help="tracker bot url to make api call",
                        default='http://10.16.4.32/trackerbot/api')
    parser.add_argument("--stream", dest="stream",
                        help="stream to generate the template test result")
    parser.add_argument("--output", dest="output", help="target file name",
                        default=log_path.join('template_tester_results.html').strpath)
    args = parser.parse_args()
    return args
Ejemplo n.º 26
0
def create_logger(logger_name, filename=None):
    """Creates and returns the named logger

    If the logger already exists, it will be destroyed and recreated
    with the current config in env.yaml

    """
    # If the logger already exists, destroy it
    if logger_name in logging.root.manager.loggerDict:
        del(logging.root.manager.loggerDict[logger_name])

    # Grab the logging conf
    conf = _load_conf(logger_name)

    log_path.ensure(dir=True)
    if filename:
        log_file = filename
    else:
        log_file = str(log_path.join('%s.log' % logger_name))

    relpath_filter = _RelpathFilter()

    # log_file is dynamic, so we can't used logging.config.dictConfig here without creating
    # a custom RotatingFileHandler class. At some point, we should do that, and move the
    # entire logging config into env.yaml

    file_formatter = logging.Formatter(conf['file_format'])
    file_handler = RotatingFileHandler(log_file, maxBytes=conf['max_file_size'],
        backupCount=conf['max_file_backups'], encoding='utf8')
    file_handler.setFormatter(file_formatter)

    logger = logging.getLogger(logger_name)
    logger.addHandler(file_handler)

    syslog_settings = _get_syslog_settings()
    if syslog_settings:
        lid = generate_random_string(8)
        fmt = '%(asctime)s [' + lid + '] %(message)s'
        syslog_formatter = SyslogMsecFormatter(fmt=fmt)
        syslog_handler = SysLogHandler(address=syslog_settings)
        syslog_handler.setFormatter(syslog_formatter)
        logger.addHandler(syslog_handler)
    logger.setLevel(conf['level'])
    if conf['errors_to_console']:
        stream_formatter = logging.Formatter(conf['stream_format'])
        stream_handler = logging.StreamHandler()
        stream_handler.setLevel(logging.ERROR)
        stream_handler.setFormatter(stream_formatter)

        logger.addHandler(stream_handler)

    logger.addFilter(relpath_filter)
    return logger
Ejemplo n.º 27
0
def pytest_runtest_setup(item):
    global recorder
    if vid_options and vid_options['enabled']:
        vid_log_path = log_path.join(vid_options['dir'])
        vid_dir, vid_name = get_path_and_file_name(item)
        full_vid_path = vid_log_path.join(vid_dir)
        try:
            os.makedirs(full_vid_path.strpath)
        except OSError:
            pass
        vid_name = vid_name + ".ogv"
        recorder = Recorder(full_vid_path.join(vid_name).strpath)
        recorder.start()
Ejemplo n.º 28
0
def pytest_runtest_setup(item):
    global recorder
    if vid_options and vid_options['enabled']:
        vid_log_path = log_path.join(vid_options['dir'])
        vid_dir, vid_name = get_path_and_file_name(item)
        full_vid_path = vid_log_path.join(vid_dir)
        try:
            os.makedirs(full_vid_path.strpath)
        except OSError:
            pass
        vid_name = vid_name + ".ogv"
        recorder = Recorder(full_vid_path.join(vid_name).strpath)
        recorder.start()
Ejemplo n.º 29
0
def pytest_sessionfinish(session, exitstatus):
    failed_tests_template = template_env.get_template('failed_browser_tests.html')
    outfile = log_path.join('failed_browser_tests.html')

    # Clean out any old reports
    try:
        outfile.remove(ignore_errors=True)
    except ENOENT:
        pass

    # Generate a new one if needed
    if failed_test_tracking['tests']:
        failed_tests_report = failed_tests_template.render(**failed_test_tracking)
        outfile.write(failed_tests_report)
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument("--tracketbot-url", dest="trackerbot_url",
                        help="tracker bot url to make api call",
                        default='http://10.16.4.32/trackerbot/api')
    parser.add_argument("--stream", dest="stream",
                        help="stream to generate the template test result")
    parser.add_argument("--template", dest="appliance_template",
                        help="appliance/latest template name",
                        default=None)
    parser.add_argument("--output", dest="output", help="target file name",
                        default=log_path.join('template_tester_results.log').strpath)
    args = parser.parse_args()
    return args
Ejemplo n.º 31
0
 def collect_reports(self):
     coverage_dir = log_path.join('coverage')
     # clean out old coverage dir if it exists
     if coverage_dir.check():
         coverage_dir.remove(rec=True, ignore_errors=True)
     # Then ensure the the empty dir exists
     coverage_dir.ensure(dir=True)
     # then copy the remote coverage dir into it
     logger.info("Collecting coverage reports to {}".format(
         coverage_dir.strpath))
     logger.info("Report collection can take several minutes")
     self.ssh_client.get_file(rails_root.join('coverage').strpath,
                              log_path.strpath,
                              recursive=True)
Ejemplo n.º 32
0
def pytest_sessionfinish(session, exitstatus):
    failed_tests_template = template_env.get_template('failed_browser_tests.html')
    outfile = log_path.join('failed_browser_tests.html')

    # Clean out any old reports
    try:
        outfile.remove(ignore_errors=True)
    except ENOENT:
        pass

    # Generate a new one if needed
    if failed_test_tracking['tests']:
        failed_tests_report = failed_tests_template.render(**failed_test_tracking)
        outfile.write(failed_tests_report)
Ejemplo n.º 33
0
def main(run_id, port):
    """Main function for running artifactor server"""
    port = port if port else random_port()
    try:
        run(port, run_id)
        print ("Artifactor server running on port: ", port)
    except Exception as e:
        import traceback
        import sys
        with log_path.join('artifactor_crash.log').open('w') as f:
            print(e, file=f)
            print(e, file=sys.stderr)
            tb = '\n'.join(traceback.format_tb(sys.exc_traceback))
            print(tb, file=f)
            print(tb, file=sys.stderr)
Ejemplo n.º 34
0
def main(run_id, port):
    """Main function for running artifactor server"""
    port = port if port else random_port()
    try:
        run(port, run_id)
        print("Artifactor server running on port: ", port)
    except Exception as e:
        import traceback
        import sys
        with log_path.join('artifactor_crash.log').open('w') as f:
            print(e, file=f)
            print(e, file=sys.stderr)
            tb = '\n'.join(traceback.format_tb(sys.exc_traceback))
            print(tb, file=f)
            print(tb, file=sys.stderr)
Ejemplo n.º 35
0
 def collect_reports(self):
     coverage_dir = log_path.join('coverage')
     # clean out old coverage dir if it exists
     if coverage_dir.check():
         coverage_dir.remove(rec=True, ignore_errors=True)
     # Then ensure the the empty dir exists
     coverage_dir.ensure(dir=True)
     # then copy the remote coverage dir into it
     logger.info("Collecting coverage reports to {}".format(coverage_dir.strpath))
     logger.info("Report collection can take several minutes")
     self.ssh_client.get_file(
         rails_root.join('coverage').strpath,
         log_path.strpath,
         recursive=True
     )
Ejemplo n.º 36
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--nic-template',
                        help='NIC Name template to be removed', default="test*", type=str)
    parser.add_argument('--pip-template',
                        help='PIP Name template to be removed', default="test*", type=str)
    parser.add_argument('--days-old',
                        help='--days-old argument to find stack items older than X days ',
                        default="7", type=int)
    parser.add_argument("--output", dest="output", help="target file name, default "
                                                        "'cleanup_azure.log' in "
                                                        "utils.path.log_path",
                        default=log_path.join('cleanup_azure.log').strpath)
    args = parser.parse_args()
    return args
Ejemplo n.º 37
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--exclude-volumes',
                        nargs='+',
                        help='List of volumes, which should be excluded.')
    parser.add_argument('--exclude-eips',
                        nargs='+',
                        help='List of EIPs, which should be '
                        'excluded. Allocation_id or public IP are allowed.')
    parser.add_argument("--output",
                        dest="output",
                        help="target file name, default "
                        "'cleanup_ec2.log' in utils.path.log_path",
                        default=log_path.join('cleanup_ec2.log').strpath)
    args = parser.parse_args()
    return args
Ejemplo n.º 38
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--max-hours', dest='maxhours', type=int, default=24, help='Max hours '
        'since Instanced was created. (Default is 24 hours.)')
    parser.add_argument('--exclude-instances', nargs='+', help='List of instances, '
        'which should be excluded.')
    parser.add_argument('--exclude-volumes', nargs='+', help='List of volumes, which should be '
        'excluded.')
    parser.add_argument('--exclude-eips', nargs='+', help='List of EIPs, which should be '
        'excluded. Allocation_id or public IP are allowed.')
    parser.add_argument('text_to_match', nargs='*', default=None,
                        help='Regex in the name of vm to be affected, can be use multiple times'
                             "['^test_', '^jenkins', '^i-']")
    parser.add_argument("--output", dest="output", help="target file name",
                        default=log_path.join('ec2_instance_list.log').strpath)
    args = parser.parse_args()
    return args
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument(
        '-f',
        '--force',
        default=True,
        action='store_false',
        dest='prompt',
        help='Do not prompt before deleting VMs (danger zone!)')
    parser.add_argument(
        '--max-hours',
        default=24,
        help='Max hours since the VM was created or last powered on '
        '(varies by provider, default 24)')
    parser.add_argument(
        '--provider',
        dest='providers',
        action='append',
        default=None,
        help='Provider(s) to inspect, can be used multiple times',
        metavar='PROVIDER')
    parser.add_argument('-l',
                        '--list',
                        default=False,
                        action='store_true',
                        dest='list_vms',
                        help='list vms of the specified "provider_type"')
    parser.add_argument(
        '--provider-type',
        dest='provider_type',
        default='ec2, gce, azure',
        help='comma separated list of the provider type, useful in case of gce,'
        'azure, ec2 to get the insight into cost/vm listing')
    parser.add_argument('--outfile',
                        dest='outfile',
                        default=log_path.join('cleanup_old_vms.log').strpath,
                        help='outfile to list ')
    parser.add_argument(
        'text_to_match',
        nargs='*',
        default=['^test_', '^jenkins', '^i-'],
        help='Regex in the name of vm to be affected, can be use multiple times'
        ' (Defaults to "^test_" and "^jenkins")')

    args = parser.parse_args()
    return args
Ejemplo n.º 40
0
 def print_report(self):
     try:
         last_run = json.load(log_path.join('coverage', '.last_run.json').open())
         coverage = last_run['result']['covered_percent']
         # TODO: Make the happy vs. sad coverage color configurable, and set it to something
         # good once we know what good is
         style = {'bold': True}
         if coverage > 40:
             style['green'] = True
         else:
             style['red'] = True
         self.reporter.line('UI Coverage Result: {}%'.format(coverage), **style)
     except KeyboardInterrupt:
         # don't block this, so users can cancel out
         raise
     except:
         logger.error('Error printing coverage report to terminal, traceback follows')
         logger.error(traceback.format_exc())
Ejemplo n.º 41
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('-f', '--force', default=True, action='store_false', dest='prompt',
                        help='Do not prompt before deleting VMs (danger zone!)')
    parser.add_argument('--max-hours', default=24,
                        help='Max hours since the VM was created or last powered on '
                             '(varies by provider, default 24)')
    parser.add_argument('--provider', dest='providers', action='append', default=None,
                        help='Provider(s) to inspect, can be used multiple times',
                        metavar='PROVIDER')
    parser.add_argument('--outfile', dest='outfile',
                        default=log_path.join('cleanup_old_vms.log').strpath,
                        help='outfile to list ')
    parser.add_argument('text_to_match', nargs='*', default=['^test_', '^jenkins', '^i-'],
                        help='Regex in the name of vm to be affected, can be use multiple times'
                             ' (Defaults to \'^test_\' and \'^jenkins\')')

    args = parser.parse_args()
    return args
Ejemplo n.º 42
0
 def parse_config(self):
     """
     Reads the config data and sets up values
     """
     if not self.config:
         return False
     self.log_dir = self.config.get('log_dir', log_path.join('artifacts').strpath)
     if not os.path.isdir(self.log_dir):
         os.makedirs(self.log_dir)
     log_file_name = os.path.join(self.log_dir, "artifactor_log.txt")
     self.logger = create_logger('artifactor_logger', log_file_name)
     if not os.path.isdir(self.log_dir):
         os.makedirs(self.log_dir)
     self.squash_exceptions = self.config.get('squash_exceptions', False)
     if not self.log_dir:
         print "!!! Log dir must be specified in yaml"
         sys.exit(127)
     self.setup_plugin_instances()
     self.start_server()
     self.global_data = {'artifactor_config': self.config, 'log_dir': self.config['log_dir'],
                         'artifacts': dict()}
Ejemplo n.º 43
0
def pytest_configure(config):
    if not art_client:
        return

    if SLAVEID:
        art_client.port = config.option.artifactor_port
    else:
        import artifactor
        from artifactor.plugins import merkyl, logger, video, filedump, reporter
        from artifactor import parse_setup_dir

        art = artifactor.artifactor

        if 'log_dir' not in art_config:
            art_config['log_dir'] = log_path.join('artifacts').strpath
        art.set_config(art_config)

        art.register_plugin(merkyl.Merkyl, "merkyl")
        art.register_plugin(logger.Logger, "logger")
        art.register_plugin(video.Video, "video")
        art.register_plugin(filedump.Filedump, "filedump")
        art.register_plugin(reporter.Reporter, "reporter")
        art.register_hook_callback('filedump',
                                   'pre',
                                   parse_setup_dir,
                                   name="filedump_dir_setup")

        artifactor.initialize()
        ip = urlparse(env['base_url']).hostname

        art.configure_plugin('merkyl', ip=ip)
        art.configure_plugin('logger')
        art.configure_plugin('video')
        art.configure_plugin('filedump')
        art.configure_plugin('reporter')
        art.fire_hook('start_session', run_id=config.getvalue('run_id'))

        # Stash this where slaves can find it
        config.option.artifactor_port = art_client.port
        log.logger.info('artifactor listening on port %d', art_client.port)
Ejemplo n.º 44
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('-f', '--force', default=True, action='store_false', dest='prompt',
                        help='Do not prompt before deleting VMs (danger zone!)')
    parser.add_argument('--max-hours', default=24,
                        help='Max hours since the VM was created or last powered on '
                             '(varies by provider, default 24)')
    parser.add_argument('--provider', dest='providers', action='append', default=None,
                        help='Provider(s) to inspect, can be used multiple times',
                        metavar='PROVIDER')
    parser.add_argument('text_to_match', nargs='*', default=['^test_', '^jenkins', '^i-'],
                        help='Regex in the name of vm to be affected, can be use multiple times'
                             ' (Defaults to "^test_" and "^jenkins")')
    parser.add_argument('-l', '--list', default=False, action='store_true', dest='list_vms',
                        help='list vms of the specified "provider_type"')
    parser.add_argument('--provider-type', dest='provider_type', default='ec2, gce, azure',
                        help='comma separated list of the provider type, useful in case of gce,'
                             'azure, ec2 to get the insight into cost/vm listing')
    parser.add_argument('--outfile', dest='outfile', default=log_path.join(
        'instance_list.log').strpath, help='outfile to list ')
    args = parser.parse_args()
    return args
Ejemplo n.º 45
0
 def print_report(self):
     try:
         last_run = json.load(
             log_path.join('coverage', '.last_run.json').open())
         coverage = last_run['result']['covered_percent']
         # TODO: Make the happy vs. sad coverage color configurable, and set it to something
         # good once we know what good is
         style = {'bold': True}
         if coverage > 40:
             style['green'] = True
         else:
             style['red'] = True
         self.reporter.line('UI Coverage Result: {}%'.format(coverage),
                            **style)
     except KeyboardInterrupt:
         # don't block this, so users can cancel out
         raise
     except:
         logger.error(
             'Error printing coverage report to terminal, traceback follows'
         )
         logger.error(traceback.format_exc())
Ejemplo n.º 46
0
def pytest_collection_modifyitems(session, config, items):
    len_collected = len(items)

    new_items = []

    from utils.path import log_path
    with log_path.join('uncollected.log').open('w') as f:
        for item in items:
            # First filter out all items who have the uncollect mark
            if item.get_marker('uncollect') or not uncollectif(item):
                # if a uncollect marker has been added,
                # give it priority for the explanation
                uncollect = item.get_marker('uncollect')
                marker = uncollect or item.get_marker('uncollectif')
                if marker:
                    reason = marker.kwargs.get('reason', "No reason given")
                else:
                    reason = None
                f.write("{} - {}\n".format(item.name, reason))
            else:
                new_items.append(item)

    items[:] = new_items

    len_filtered = len(items)
    filtered_count = len_collected - len_filtered

    if filtered_count:
        # A warning should go into log/cfme.log when a test has this mark applied.
        # It might be good to write uncollected test names out via terminalreporter,
        # but I suspect it would be extremely spammy. It might be useful in the
        # --collect-only output?

        from fixtures.pytest_store import store
        store.terminalreporter.write('collected %d items' % len_filtered,
                                     bold=True)
        store.terminalreporter.write(' (uncollected %d items)\n' %
                                     filtered_count)
Ejemplo n.º 47
0
def pytest_configure(config):
    if not art_client:
        return

    if SLAVEID:
        art_client.port = config.option.artifactor_port
    else:
        import artifactor
        from artifactor.plugins import merkyl, logger, video, filedump, reporter
        from artifactor import parse_setup_dir

        art = artifactor.artifactor

        if 'log_dir' not in art_config:
            art_config['log_dir'] = log_path.join('artifacts').strpath
        art.set_config(art_config)

        art.register_plugin(merkyl.Merkyl, "merkyl")
        art.register_plugin(logger.Logger, "logger")
        art.register_plugin(video.Video, "video")
        art.register_plugin(filedump.Filedump, "filedump")
        art.register_plugin(reporter.Reporter, "reporter")
        art.register_hook_callback('filedump', 'pre', parse_setup_dir,
                                   name="filedump_dir_setup")

        artifactor.initialize()
        ip = urlparse(env['base_url']).hostname

        art.configure_plugin('merkyl', ip=ip)
        art.configure_plugin('logger')
        art.configure_plugin('video')
        art.configure_plugin('filedump')
        art.configure_plugin('reporter')
        art.fire_hook('start_session', run_id=config.getvalue('run_id'))

        # Stash this where slaves can find it
        config.option.artifactor_port = art_client.port
        log.logger.info('artifactor listening on port %d', art_client.port)
Ejemplo n.º 48
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--nic-template',
                        help='NIC Name template to be removed',
                        default="test*",
                        type=str)
    parser.add_argument('--pip-template',
                        help='PIP Name template to be removed',
                        default="test*",
                        type=str)
    parser.add_argument(
        '--days-old',
        help='--days-old argument to find stack items older than X days ',
        default="7",
        type=int)
    parser.add_argument("--output",
                        dest="output",
                        help="target file name, default "
                        "'cleanup_azure.log' in "
                        "utils.path.log_path",
                        default=log_path.join('cleanup_azure.log').strpath)
    args = parser.parse_args()
    return args
Ejemplo n.º 49
0
def parse_cmd_line():
    """
    Specify and parse arguments
    :return: args, kwargs, the usual
    """
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--outfile',
                        default=log_path.join('list_provider_vms.log').strpath,
                        dest='outfile')
    parser.add_argument('--tag',
                        default=None,
                        dest='tag',
                        action='append',
                        help='A provider tag to match a group of providers instead of all '
                             'providers from cfme_data. Can be used multiple times')
    parser.add_argument('provider',
                        default=None,
                        nargs='*',
                        help='Provider keys, can be user multiple times. If none are given '
                             'the script will use all providers from cfme_data or match tags')

    args = parser.parse_args()
    return args
Ejemplo n.º 50
0
def main(**kwargs):
    # get_mgmt validates, since it will explode without an existing key or type
    if kwargs.get('deploy', None):
        kwargs['configure'] = True
        kwargs['outfile'] = 'appliance_ip_address_1'
        provider_data = utils.conf.provider_data
        providers = provider_data['management_systems']
        provider_dict = provider_data['management_systems'][kwargs['provider']]
        credentials =\
            {'username': provider_dict['username'],
             'password': provider_dict['password'],
             'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'),
             'auth_url': provider_dict.get('auth_url', None),
             }
        provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials)
        flavors = provider_dict['template_upload'].get('flavors', ['m1.medium'])
        provider_type = provider_data['management_systems'][kwargs['provider']]['type']
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }
    else:
        provider = get_mgmt(kwargs['provider'])
        provider_dict = cfme_data['management_systems'][kwargs['provider']]
        provider_type = provider_dict['type']
        flavors = cfme_data['appliance_provisioning']['default_flavors'].get(provider_type, [])
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }

    logger.info('Connecting to {}'.format(kwargs['provider']))

    if kwargs.get('destroy', None):
        # TODO: destroy should be its own script
        # but it's easy enough to just hijack the parser here
        # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc)
        return not destroy_vm(provider, deploy_args['vm_name'])

    # Try to snag defaults from cfme_data here for each provider type
    if provider_type == 'rhevm':
        cluster = provider_dict.get('default_cluster', kwargs.get('cluster', None))
        if cluster is None:
            raise Exception('--cluster is required for rhev instances and default is not set')
        deploy_args['cluster'] = cluster

        if kwargs.get('place_policy_host', None) and kwargs.get('place_policy_aff', None):
            deploy_args['placement_policy_host'] = kwargs['place_policy_host']
            deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff']
    elif provider_type == 'ec2':
        # ec2 doesn't have an api to list available flavors, so the first flavor is the default
        try:
            flavor = kwargs.get('flavor', None) or flavors[0]
        except IndexError:
            raise Exception('--flavor is required for EC2 instances and default is not set')
        deploy_args['instance_type'] = flavor
    elif provider_type == 'openstack':
        # filter openstack flavors based on what's available
        available_flavors = provider.list_flavor()
        flavors = filter(lambda f: f in available_flavors, flavors)
        try:
            flavor = kwargs.get('flavor', None) or flavors[0]
        except IndexError:
            raise Exception('--flavor is required for RHOS instances and '
                            'default is not set or unavailable on provider')
        # flavour? Thanks, psav...
        deploy_args['flavour_name'] = flavor

        if 'network' in provider_dict:
            # support rhos4 network names
            deploy_args['network_name'] = provider_dict['network']

        provider_pools = [p.name for p in provider.api.floating_ip_pools.list()]
        try:
            # TODO: If there are multiple pools, have a provider default in cfme_data
            floating_ip_pool = kwargs.get('floating_ip_pool', None) or provider_pools[0]
        except IndexError:
            raise Exception('No floating IP pools available on provider')

        if floating_ip_pool is not None:
            deploy_args['floating_ip_pool'] = floating_ip_pool
    elif provider_type == "virtualcenter":
        if "allowed_datastores" in provider_dict:
            deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"]
    elif provider_type == 'scvmm':
        deploy_args["host_group"] = provider_dict["provisioning"]['host_group']
    elif provider_type == 'gce':
        deploy_args['ssh_key'] = '{user_name}:{public_key}'.format(
            user_name=cred['ssh']['ssh-user'],
            public_key=cred['ssh']['public_key'])
    # Do it!
    try:
        logger.info('Cloning {} to {} on {}'.format(deploy_args['template'], deploy_args['vm_name'],
                                                    kwargs['provider']))
        provider.deploy_template(**deploy_args)
    except Exception as e:
        logger.exception(e)
        logger.error('Clone failed')
        if kwargs.get('cleanup', None):
            logger.info('attempting to destroy {}'.format(deploy_args['vm_name']))
            destroy_vm(provider, deploy_args['vm_name'])
            return 12

    if provider.is_vm_running(deploy_args['vm_name']):
        logger.info("VM {} is running".format(deploy_args['vm_name']))
    else:
        logger.error("VM is not running")
        return 10

    try:
        ip, time_taken = wait_for(provider.get_ip_address, [deploy_args['vm_name']], num_sec=1200,
                                  fail_condition=None)
        logger.info('IP Address returned is {}'.format(ip))
    except Exception as e:
        logger.exception(e)
        logger.error('IP address not returned')
        return 10

    try:
        if kwargs.get('configure', None):
            logger.info('Configuring appliance, this can take a while.')
            if kwargs.get('deploy', None):
                app = IPAppliance(address=ip)
            else:
                app = Appliance(kwargs['provider'], deploy_args['vm_name'])
            if provider_type == 'gce':
                with app as ipapp:
                    ipapp.configure_gce()
            else:
                app.configure()
            logger.info('Successfully Configured the appliance.')
    except Exception as e:
        logger.exception(e)
        logger.error('Appliance Configuration Failed')
        if not kwargs.get('deploy', None):
            app = Appliance(kwargs['provider'], deploy_args['vm_name'])
            ssh_client = app.ssh_client()
            status, output = ssh_client.run_command('find /root/anaconda-post.log')
            if status == 0:
                ssh_client.get_file('/root/anaconda-post.log',
                                    log_path.join('anaconda-post.log').strpath)
            ssh_client.close()
        return 10

    if kwargs.get('outfile', None) or kwargs.get('deploy', None):
        with open(kwargs['outfile'], 'w') as outfile:
            outfile.write("appliance_ip_address={}\n".format(ip))

    # In addition to the outfile, drop the ip address on stdout for easy parsing
    print(ip)
Ejemplo n.º 51
0
# on the appliance
#: Corresponds to Rails.root in the rails env
rails_root = local('/var/www/miq/vmdb')
#: coverage root, should match what's in the coverage hook and merger scripts
appliance_coverage_root = rails_root.join('coverage')

# local
coverage_data = scripts_data_path.join('coverage')
gemfile = coverage_data.join('coverage_gem.rb')
bundler_d = rails_root.join('bundler.d')
coverage_hook_file_name = 'coverage_hook.rb'
coverage_hook = coverage_data.join(coverage_hook_file_name)
coverage_merger = coverage_data.join('coverage_merger.rb')
thing_toucher = coverage_data.join('thing_toucher.rb')
coverage_output_dir = log_path.join('coverage')
coverage_results_archive = coverage_output_dir.join('coverage-results.tgz')
coverage_appliance_conf = conf_path.join('.ui-coverage')

# This is set in sessionfinish, and should be reliably readable
# in post-yield sessionfinish hook wrappers and all hooks thereafter
ui_coverage_percent = None


def _thing_toucher_async(ssh_client):
    # for use in a subprocess to kick off the thing toucher
    result = ssh_client.run_rails_command('thing_toucher.rb', timeout=0)
    return result.rc == 0


def clean_coverage_dir():
def main(appliance, jenkins_url, jenkins_user, jenkins_token, job_name):
    appliance_version = str(appliance.version).strip()
    print('Looking for appliance version {} in {}'.format(
        appliance_version, job_name))
    client = jenkins.Jenkins(jenkins_url,
                             username=jenkins_user,
                             password=jenkins_token)
    build_numbers = get_build_numbers(client, job_name)
    if not build_numbers:
        print('No builds for job {}'.format(job_name))
        return 1

    # Find the builds with appliance version
    eligible_build_numbers = set()
    for build_number in build_numbers:
        try:
            artifacts = client.get_build_info(job_name,
                                              build_number)['artifacts']
            if not artifacts:
                raise ValueError()
        except (KeyError, ValueError):
            print('No artifacts for {}/{}'.format(job_name, build_number))
            continue

        artifacts = group_list_dict_by(artifacts, 'fileName')
        if 'appliance_version' not in artifacts:
            print('appliance_version not in artifacts of {}/{}'.format(
                job_name, build_number))
            continue

        build_appliance_version = download_artifact(
            jenkins_user, jenkins_token, jenkins_url, job_name, build_number,
            artifacts['appliance_version']['relativePath']).strip()

        if Version(build_appliance_version) < Version(appliance_version):
            print('Build {} already has lower version ({})'.format(
                build_number, build_appliance_version))
            print('Ending here')
            break

        if 'coverage-results.tgz' not in artifacts:
            print('coverage-results.tgz not in artifacts of {}/{}'.format(
                job_name, build_number))
            continue

        if build_appliance_version == appliance_version:
            print('Build {} waas found to contain what is needed'.format(
                build_number))
            eligible_build_numbers.add(build_number)
        else:
            print(
                'Skipping build {} because it does not have correct version ({})'
                .format(build_number, build_appliance_version))

    if not eligible_build_numbers:
        print('Could not find coverage reports for {} in {}'.format(
            appliance_version, job_name))
        return 2

    # Stop the evm service, not needed at all
    print('Stopping evmserverd')
    appliance.evmserverd.stop()
    # Install the coverage tools on the appliance
    print('Installing simplecov')
    appliance.coverage._install_simplecov()
    # Upload the merger
    print('Installing coverage merger')
    appliance.coverage._upload_coverage_merger()
    with appliance.ssh_client as ssh:
        if not ssh.run_command('mkdir -p /var/www/miq/vmdb/coverage'):
            print(
                'Could not create /var/www/miq/vmdb/coverage on the appliance!'
            )
            return 3
        # Download all the coverage reports
        for build_number in eligible_build_numbers:
            print('Downloading the coverage report from build {}'.format(
                build_number))
            download_url = jenkins_artifact_url(
                jenkins_user, jenkins_token, jenkins_url, job_name,
                build_number, 'log/coverage/coverage-results.tgz')
            cmd = ssh.run_command(
                'curl -k -o /var/www/miq/vmdb/coverage/tmp.tgz {}'.format(
                    quote(download_url)))
            if not cmd:
                print('Could not download! - {}'.format(str(cmd)))
                return 4
            print('Extracting the coverage report from build {}'.format(
                build_number))
            extract_command = ' && '.join([
                'cd /var/www/miq/vmdb/coverage',
                'tar xf tmp.tgz --strip-components=1',
                'rm -f tmp.tgz',
            ])
            cmd = ssh.run_command(extract_command)
            if not cmd:
                print('Could not extract! - {}'.format(str(cmd)))
                return 5

        # Now run the merger
        print('Running the merger')
        cmd = ssh.run_command(
            'cd /var/www/miq/vmdb; time bin/rails runner coverage_merger.rb')
        if not cmd:
            print('Failure running the merger - {}'.format(str(cmd)))
            return 6
        else:
            print('Coverage report generation was successful')
            print(str(cmd))
        print('Packing the generated HTML')
        cmd = ssh.run_command(
            'cd /var/www/miq/vmdb/coverage; tar cfz /tmp/merged.tgz merged')
        if not cmd:
            print('Could not compress! - {}'.format(str(cmd)))
            return 7
        print('Grabbing the generated HTML')
        ssh.get_file('/tmp/merged.tgz', log_path.strpath)
        print('Decompressing the generated HTML')
        rc = subprocess.call([
            'tar', 'xf',
            log_path.join('merged.tgz').strpath, '-C', log_path.strpath
        ])
        if rc == 0:
            print('Done!')
        else:
            print('Failure to extract')
            return 8
Ejemplo n.º 53
0
    reuse_dir: True
    plugins:
        post-result:
            enabled: True
            plugin: post_result
"""
from collections import defaultdict

from artifactor import ArtifactorBasePlugin

from utils.path import log_path

# preseed the normal statuses, but let defaultdict handle
# any unexpected statuses, which should probably never happen

test_report = log_path.join('test-report.json')
test_counts = defaultdict(int, {
    'passed': 0,
    'failed': 0,
    'skipped': 0,
    'error': 0,
    'xpassed': 0,
    'xfailed': 0
})


class PostResult(ArtifactorBasePlugin):
    def plugin_initialize(self):
        self.register_plugin_hook('finish_session', self.post_result)
        test_report.check() and test_report.remove()
Ejemplo n.º 54
0
def wait_for_ajax():
    """
    Waits until all ajax timers are complete, in other words, waits until there are no
    more pending ajax requests, page load should be finished completely.

    Raises:
        TimedOutError: when ajax did not load in time
    """

    _thread_local.ajax_log_msg = ''

    def _nothing_in_flight():
        """Checks if there is no ajax in flight and also logs current status
        """
        prev_log_msg = _thread_local.ajax_log_msg

        try:
            running = in_flight()
        except Exception as e:
            # if jQuery in error message, a non-cfme page (proxy error) is displayed
            # should be handled by something else
            if "jquery" not in str(e).lower():
                raise
            return True
        anything_in_flight = False
        anything_in_flight |= running["jquery"] > 0
        anything_in_flight |= running["prototype"] > 0
        anything_in_flight |= running["spinner"]
        anything_in_flight |= running["document"] != "complete"
        log_msg = ', '.join(["{}: {}".format(k, str(v)) for k, v in running.iteritems()])
        # Log the message only if it's different from the last one
        if prev_log_msg != log_msg:
            _thread_local.ajax_log_msg = log_msg
            logger.trace('Ajax running: {}'.format(log_msg))
        if (not anything_in_flight) and prev_log_msg:
            logger.trace('Ajax done')

        return not anything_in_flight

    wait_for(
        _nothing_in_flight,
        num_sec=_thread_local.ajax_timeout, delay=0.1, message="wait for ajax", quiet=True,
        silent_failure=True)

    # If we are not supposed to take page screenshots...well...then...dont.
    if store.config and not store.config.getvalue('page_screenshots'):
        return

    url = browser().current_url
    url = url.replace(base_url(), '')
    url = url.replace("/", '_')
    if url not in urls:
        logger.info('Taking picture of page: {}'.format(url))
        ss, sse = take_screenshot()
        if ss:
            ss_path = log_path.join('page_screenshots')
            if not ss_path.exists():
                ss_path.mkdir()
            with ss_path.join("{}.png".format(url)).open('wb') as f:
                f.write(base64.b64decode(ss))
        urls.append(url)
Ejemplo n.º 55
0
def pages_to_statistics_csv(pages, filters, report_file_name):
    all_statistics = []
    for page in pages:
        # Determine if the page matches a pattern and swap request to pattern
        for p_filter in filters:
            results = p_filter.search(page.request.strip())
            if results:
                page.request = p_filter.pattern
                break
        added = False

        if len(all_statistics) > 0:
            for pg_statistics in all_statistics:
                if pg_statistics.request == page.request:
                    if page.seleniumtime > 0:
                        pg_statistics.seleniumtimes.append(
                            int(page.seleniumtime))
                    pg_statistics.completedintimes.append(
                        float(page.completedintime))
                    if page.viewstime > 0:
                        pg_statistics.viewstimes.append(float(page.viewstime))
                    pg_statistics.activerecordtimes.append(
                        float(page.activerecordtime))
                    pg_statistics.selectcounts.append(int(page.selectcount))
                    pg_statistics.cachedcounts.append(int(page.cachedcount))
                    pg_statistics.uncachedcounts.append(int(
                        page.uncachedcount))
                    added = True
                    break

        if not added:
            pg_statistics = PageStatLists()
            pg_statistics.request = page.request
            if page.seleniumtime > 0:
                pg_statistics.seleniumtimes.append(int(page.seleniumtime))
            pg_statistics.completedintimes.append(float(page.completedintime))
            if page.viewstime > 0:
                pg_statistics.viewstimes.append(float(page.viewstime))
            pg_statistics.activerecordtimes.append(float(
                page.activerecordtime))
            pg_statistics.selectcounts.append(int(page.selectcount))
            pg_statistics.cachedcounts.append(int(page.cachedcount))
            pg_statistics.uncachedcounts.append(int(page.uncachedcount))
            all_statistics.append(pg_statistics)

    csvdata_path = log_path.join('csv_output', report_file_name)
    if csvdata_path.isfile():
        logger.info('Appending to: {}'.format(report_file_name))
        outputfile = csvdata_path.open('a', ensure=True)
        appending = True
    else:
        logger.info('Writing to: {}'.format(report_file_name))
        outputfile = csvdata_path.open('w', ensure=True)
        appending = False

    try:
        csvfile = csv.writer(outputfile)
        if not appending:
            metrics = [
                'samples', 'min', 'avg', 'median', 'max', 'std', '90', '99'
            ]
            measurements = [
                'sel_time', 'c_time', 'v_time', 'ar_time', 's_count',
                'c_count', 'uc_count'
            ]
            headers = ['pattern']
            for measurement in measurements:
                for metric in metrics:
                    headers.append('{}_{}'.format(measurement, metric))
            csvfile.writerow(headers)

        # Contents of CSV
        for page_statistics in all_statistics:
            if len(page_statistics.completedintimes) > 1:
                logger.debug(
                    'Samples/Avg/90th/Std: {} : {} : {} : {} Pattern: {}'.
                    format(
                        str(len(page_statistics.completedintimes)).rjust(7),
                        str(
                            round(
                                numpy.average(
                                    page_statistics.completedintimes),
                                2)).rjust(7),
                        str(
                            round(
                                numpy.percentile(
                                    page_statistics.completedintimes, 90),
                                2)).rjust(7),
                        str(
                            round(numpy.std(page_statistics.completedintimes),
                                  2)).rjust(7), page_statistics.request))
            stats = [page_statistics.request]
            stats.extend(generate_statistics(page_statistics.seleniumtimes))
            stats.extend(generate_statistics(page_statistics.completedintimes))
            stats.extend(generate_statistics(page_statistics.viewstimes))
            stats.extend(generate_statistics(
                page_statistics.activerecordtimes))
            stats.extend(generate_statistics(page_statistics.selectcounts))
            stats.extend(generate_statistics(page_statistics.cachedcounts))
            stats.extend(generate_statistics(page_statistics.uncachedcounts))
            csvfile.writerow(stats)
    finally:
        outputfile.close()

    logger.debug('Size of Aggregated list of pages: {}'.format(
        len(all_statistics)))
Ejemplo n.º 56
0
def test_collect_log_depot(depot_type, depot_machine, depot_credentials,
                           depot_ftp, depot_configured, soft_assert, request):
    """ Boilerplate test to verify functionality of this concept

    Will be extended and improved.
    """
    # Wipe the FTP contents in the end
    @request.addfinalizer
    def _clear_ftp():
        with depot_ftp() as ftp:
            ftp.cwd(ftp.upload_dir)
            ftp.recursively_delete()

    # Prepare empty workspace
    with depot_ftp() as ftp:
        # move to upload folder
        ftp.cwd(ftp.upload_dir)
        # delete all files
        ftp.recursively_delete()

    # Start the collection
    configure.ServerLogDepot.collect_all()
    # Check it on FTP
    with depot_ftp() as ftp:
        # Files must have been created after start
        zip_files = ftp.filesystem.search(re.compile(r"^.*?[.]zip$"),
                                          directories=False)
        assert zip_files, "No logs found!"

        # And must be older than the start time.
        for file in zip_files:
            soft_assert(file.local_time < parsetime.now(),
                        "{} is older.".format(file.name))

        # No file contains 'unknown_unknown' sequence
        # BZ: 1018578
        bad_files = ftp.filesystem.search(
            re.compile(r"^.*?unknown_unknown.*?[.]zip$"), directories=False)
        if bad_files:
            print_list = []
            for file in bad_files:
                random_name = "{}.zip".format(fauxfactory.gen_alphanumeric())
                download_file_name = log_path.join(random_name).strpath
                file.download(download_file_name)
                print_list.append((file, random_name))

            pytest.fail("BUG1018578: Files {} present!".format(", ".join(
                "{} as {}".format(f, r) for f, r in print_list)))

    # Check the times of the files by names
    datetimes = []
    regexp = re.compile(
        r"^.*?_(?P<y1>[0-9]{4})(?P<m1>[0-9]{2})(?P<d1>[0-9]{2})_"
        r"(?P<h1>[0-9]{2})(?P<M1>[0-9]{2})(?P<S1>[0-9]{2})"
        r"_(?P<y2>[0-9]{4})(?P<m2>[0-9]{2})(?P<d2>[0-9]{2})_"
        r"(?P<h2>[0-9]{2})(?P<M2>[0-9]{2})(?P<S2>[0-9]{2})[.]zip$")
    failed = False
    for file in zip_files:
        data = regexp.match(file.name)
        if not soft_assert(data, "Wrong file matching of {}".format(
                file.name)):
            failed = True
            continue
        data = {key: int(value) for key, value in data.groupdict().iteritems()}
        date_from = parsetime(data["y1"], data["m1"], data["d1"], data["h1"],
                              data["M1"], data["S1"])
        date_to = parsetime(data["y2"], data["m2"], data["d2"], data["h2"],
                            data["M2"], data["S2"])
        datetimes.append((date_from, date_to, file.name))

    if not failed:
        # Check for the gaps
        if len(datetimes) > 1:
            for i in range(len(datetimes) - 1):
                dt = datetimes[i + 1][0] - datetimes[i][1]
                soft_assert(
                    dt.total_seconds() >= 0.0,
                    "Negative gap between log files ({}, {})".format(
                        datetimes[i][2], datetimes[i + 1][2]))
Ejemplo n.º 57
0
def vdebug(self, message, *args, **kws):
    self._log(VDEBUG_LEVEL, message, args, **kws)


logging.Logger.vdebug = vdebug

logging.addLevelName(VDEBUG_LEVEL, "VDEBUG")

logging.Logger.trace = trace
logging.addLevelName(TRACE_LEVEL, "TRACE")

logger = logging.getLogger('cfme-performance')
logger.setLevel(cfme_performance['logging']['level'])

formatter = logging.Formatter(
    '%(asctime)s : %(threadName)-11s : %(levelname)7s : %(message)s (%(source)s)'
)

# Main Log File
filehandler = logging.FileHandler(
    log_path.join('cfme-performance.log').strpath, 'a')
filehandler.setLevel(cfme_performance['logging']['level'])
filehandler.setFormatter(formatter)
logger.addFilter(_RelpathFilter())
logger.addHandler(filehandler)
logger.propagate = False

# Log warnings to cfme-performance logger
warnings.showwarning = _showwarning
warnings.simplefilter('default')