def messages_to_statistics_csv(messages, statistics_file_name):
    all_statistics = []
    for msg_id in messages:
        msg = messages[msg_id]

        added = False
        if len(all_statistics) > 0:
            for msg_statistics in all_statistics:
                if msg_statistics.cmd == msg.msg_cmd:

                    if msg.del_time > 0:
                        msg_statistics.delivertimes.append(float(msg.del_time))
                        msg_statistics.gets += 1
                    msg_statistics.dequeuetimes.append(float(msg.deq_time))
                    msg_statistics.totaltimes.append(float(msg.total_time))
                    msg_statistics.puts += 1
                    added = True
                    break

        if not added:
            msg_statistics = MiqMsgLists()
            msg_statistics.cmd = msg.msg_cmd
            if msg.del_time > 0:
                msg_statistics.delivertimes.append(float(msg.del_time))
                msg_statistics.gets = 1
            msg_statistics.dequeuetimes.append(float(msg.deq_time))
            msg_statistics.totaltimes.append(float(msg.total_time))
            msg_statistics.puts = 1
            all_statistics.append(msg_statistics)

    csvdata_path = log_path.join('csv_output', statistics_file_name)
    outputfile = csvdata_path.open('w', ensure=True)

    try:
        csvfile = csv.writer(outputfile)
        metrics = ['samples', 'min', 'avg', 'median', 'max', 'std', '90', '99']
        measurements = ['deq_time', 'del_time', 'total_time']
        headers = ['cmd', 'puts', 'gets']
        for measurement in measurements:
            for metric in metrics:
                headers.append('{}_{}'.format(measurement, metric))

        csvfile.writerow(headers)

        # Contents of CSV
        for msg_statistics in sorted(all_statistics, key=lambda x: x.cmd):
            if msg_statistics.gets > 1:
                logger.debug('Samples/Avg/90th/Std: %s: %s : %s : %s,Cmd: %s',
                    str(len(msg_statistics.totaltimes)).rjust(7),
                    str(round(numpy.average(msg_statistics.totaltimes), 3)).rjust(7),
                    str(round(numpy.percentile(msg_statistics.totaltimes, 90), 3)).rjust(7),
                    str(round(numpy.std(msg_statistics.totaltimes), 3)).rjust(7),
                    msg_statistics.cmd)
            stats = [msg_statistics.cmd, msg_statistics.puts, msg_statistics.gets]
            stats.extend(generate_statistics(msg_statistics.dequeuetimes, 3))
            stats.extend(generate_statistics(msg_statistics.delivertimes, 3))
            stats.extend(generate_statistics(msg_statistics.totaltimes, 3))
            csvfile.writerow(stats)
    finally:
        outputfile.close()
Exemple #2
0
def pytest_sessionfinish(session, exitstatus):
    udf_log_file = log_path.join('unused_data_files.log')

    if udf_log_file.check():
        # Clean up old udf log if it exists
        udf_log_file.remove()

    if session.config.option.udf_report is False:
        # Short out here if not making a report
        return

    # Output an unused data files log after a test run
    data_files = set()
    for dirpath, dirnames, filenames in os.walk(str(data_path)):
        for filename in filenames:
            filepath = os.path.join(dirpath, filename)
            data_files.add(filepath)
    unused_data_files = data_files - seen_data_files

    if unused_data_files:
        # Write the log of unused data files out, minus the data dir prefix
        udf_log = ''.join(
            (line[len(str(data_path)):] + '\n' for line in unused_data_files)
        )
        udf_log_file.write(udf_log + '\n')

        # Throw a notice into the terminal reporter to check the log
        tr = reporter()
        tr.write_line('')
        tr.write_sep(
            '-',
            '%d unused data files after test run, check %s' % (
                len(unused_data_files), udf_log_file.basename
            )
        )
Exemple #3
0
def collect_logs(app):
    log_files = DEFAULT_FILES
    local_dir = DEFAULT_LOCAL
    try:
        log_files = env.log_collector.log_files
    except (AttributeError, KeyError):
        logger.info('No log_collector.log_files in env, using the default: %s', log_files)
        pass
    try:
        local_dir = log_path.join(env.log_collector.local_dir)
    except (AttributeError, KeyError):
        logger.info('No log_collector.local_dir in env, using the default: %s', local_dir)
        pass

    # Handle local dir existing
    local_dir.ensure(dir=True)

    with app.ssh_client as ssh_client:
        logger.info(f'Starting log collection on appliance {app.hostname}')
        tarred_dir_name = f'log-collector-{app.hostname}'
        # wrap the files in ls, redirecting stderr, to ignore files that don't exist
        tar_dir_path = os.path.join(local_dir.strpath, tarred_dir_name)
        tarball_path = f'{tar_dir_path}.tar.gz'
        os.mkdir(tar_dir_path)
        for f in log_files:
            try:
                ssh_client.get_file(f, tar_dir_path)
            except scp.SCPException as ex:
                logger.error("Failed to transfer file %s: %s", f, ex)
        logger.debug('Creating tar file for appliance %s', app)
        subprocess.run(['tar', '-C', local_dir, '-czvf', tarball_path, tarred_dir_name],
                       stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
        shutil.rmtree(tar_dir_path)
        logger.info('Wrote the following file %s', tarball_path)
def run(port, run_id=None):
    art_config = env.get("artifactor", {})
    art_config["server_port"] = int(port)
    art = Artifactor(None)

    if "log_dir" not in art_config:
        art_config["log_dir"] = log_path.strpath
    if "artifact_dir" not in art_config:
        art_config["artifact_dir"] = log_path.join("artifacts").strpath
    art.set_config(art_config)

    art.register_plugin(merkyl.Merkyl, "merkyl")
    art.register_plugin(logger.Logger, "logger")
    art.register_plugin(video.Video, "video")
    art.register_plugin(filedump.Filedump, "filedump")
    art.register_plugin(reporter.Reporter, "reporter")
    art.register_plugin(post_result.PostResult, "post-result")
    art.register_plugin(ostriz.Ostriz, "ostriz")

    initialize(art)

    art.configure_plugin("merkyl")
    art.configure_plugin("logger")
    art.configure_plugin("video")
    art.configure_plugin("filedump")
    art.configure_plugin("reporter")
    art.configure_plugin("post-result")
    art.configure_plugin("ostriz")
    art.fire_hook("start_session", run_id=run_id)
def parse_cmdline():
    parser = trackerbot.cmdline_parser()
    parser.add_argument(
        '--mark-usable',
        default=None,
        action='store_true',
        help="Mark all added templates as usable",
    )
    parser.add_argument(
        '--provider-key',
        default=None,
        dest='selected_provider',
        nargs='*',
        help='A specific provider key to sync for',
    )
    parser.add_argument(
        '--outfile',
        dest='outfile',
        default=log_path.join('sync_template_tracker_report.log').strpath,
        help='Output file for tabulated reports on ProviderTemplate actions'
    )
    parser.add_argument(
        '--verbose',
        default=False,
        action='store_true',
        help='Log to stdout'
    )
    args = parser.parse_args()
    return dict(args._get_kwargs())
Exemple #6
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--exclude-volumes',
                        nargs='+',
                        help='List of volumes, which should be excluded.')
    parser.add_argument('--exclude-eips',
                        nargs='+',
                        help='List of EIPs, which should be '
                        'excluded. Allocation_id or public IP are allowed.')
    parser.add_argument('--exclude-elbs',
                        nargs='+',
                        help='List of ELBs, which should be excluded.')
    parser.add_argument(
        '--exclude-enis',
        nargs='+',
        help='List of ENIs, which should be excluded. ENI ID is allowed.')
    parser.add_argument('--exclude_stacks',
                        nargs='+',
                        help='List of Stacks, which should be excluded')
    parser.add_argument('--stack-template',
                        help='Stack name template to be removed',
                        default="test",
                        type=str)
    parser.add_argument("--output",
                        dest="output",
                        help="target file name, default "
                        "'cleanup_ec2.log' in utils.path.log_path",
                        default=log_path.join('cleanup_ec2.log').strpath)
    args = parser.parse_args()
    return args
Exemple #7
0
def pytest_collection_modifyitems(session, config, items):
    from cfme.fixtures.pytest_store import store
    len_collected = len(items)

    new_items = []

    from cfme.utils.path import log_path
    with log_path.join('uncollected.log').open('w') as f:
        for item in items:
            # First filter out all items who have the uncollect mark
            uncollect_marker = item.get_closest_marker('uncollect')
            if uncollect_marker:
                reason = uncollect_marker.kwargs.get('reason')
                if reason is None:
                    raise ValueError(REASON_REQUIRED.format(item.name))
                f.write(f'{item.name} - {reason}\n')
            else:
                result, reason = uncollectif(item)
                if result:
                    f.write(f'{item.name} - {reason}\n')
                else:
                    new_items.append(item)

    items[:] = new_items

    len_filtered = len(items)
    filtered_count = len_collected - len_filtered
    store.uncollection_stats['uncollectif'] = filtered_count
Exemple #8
0
def run(port, run_id=None):
    art_config = env.get("artifactor", {})
    art_config["server_port"] = int(port)
    art = Artifactor(None)

    if "log_dir" not in art_config:
        art_config["log_dir"] = log_path.strpath
    if "artifact_dir" not in art_config:
        art_config["artifact_dir"] = log_path.join("artifacts").strpath
    art.set_config(art_config)

    art.register_plugin(merkyl.Merkyl, "merkyl")
    art.register_plugin(logger.Logger, "logger")
    art.register_plugin(video.Video, "video")
    art.register_plugin(filedump.Filedump, "filedump")
    art.register_plugin(reporter.Reporter, "reporter")
    art.register_plugin(post_result.PostResult, "post-result")
    art.register_plugin(ostriz.Ostriz, "ostriz")

    initialize(art)

    art.configure_plugin("merkyl")
    art.configure_plugin("logger")
    art.configure_plugin("video")
    art.configure_plugin("filedump")
    art.configure_plugin("reporter")
    art.configure_plugin("post-result")
    art.configure_plugin("ostriz")
    art.fire_hook("start_session", run_id=run_id)
Exemple #9
0
def pytest_sessionfinish(session, exitstatus):
    udf_log_file = log_path.join('unused_data_files.log')

    if udf_log_file.check():
        # Clean up old udf log if it exists
        udf_log_file.remove()

    if session.config.option.udf_report is False:
        # Short out here if not making a report
        return

    # Output an unused data files log after a test run
    data_files = set()
    for dirpath, dirnames, filenames in os.walk(str(data_path)):
        for filename in filenames:
            filepath = os.path.join(dirpath, filename)
            data_files.add(filepath)
    unused_data_files = data_files - seen_data_files

    if unused_data_files:
        # Write the log of unused data files out, minus the data dir prefix
        udf_log = ''.join(
            (line[len(str(data_path)):] + '\n' for line in unused_data_files))
        udf_log_file.write(udf_log + '\n')

        # Throw a notice into the terminal reporter to check the log
        tr = reporter()
        tr.write_line('')
        tr.write_sep(
            '-', '%d unused data files after test run, check %s' %
            (len(unused_data_files), udf_log_file.basename))
Exemple #10
0
 def pytest_sessionstart(self):
     if isinstance(self.held_appliance, DummyAppliance):
         return
     if pytest.store.parallelizer_role != 'slave':
         with log_path.join('appliance_version').open(
                 'w') as appliance_version:
             appliance_version.write(self.held_appliance.version.vstring)
Exemple #11
0
 def parse_config(self):
     """
     Reads the config data and sets up values
     """
     if not self.config:
         return False
     self.log_dir = local(self.config.get("log_dir", log_path))
     self.log_dir.ensure(dir=True)
     self.artifact_dir = local(self.config.get("artifact_dir", log_path.join("artifacts")))
     self.artifact_dir.ensure(dir=True)
     self.logger = create_logger("artifactor", self.log_dir.join("artifactor.log").strpath)
     self.squash_exceptions = self.config.get("squash_exceptions", False)
     if not self.log_dir:
         print("!!! Log dir must be specified in yaml")
         sys.exit(127)
     if not self.artifact_dir:
         print("!!! Artifact dir must be specified in yaml")
         sys.exit(127)
     self.config["zmq_socket_address"] = "tcp://127.0.0.1:{}".format(random_port())
     self.setup_plugin_instances()
     self.start_server()
     self.global_data = {
         "artifactor_config": self.config,
         "log_dir": self.log_dir.strpath,
         "artifact_dir": self.artifact_dir.strpath,
         "artifacts": dict(),
         "old_artifacts": dict(),
     }
Exemple #12
0
def run(port, run_id=None):
    art_config = env.get('artifactor', {})
    art_config['server_port'] = int(port)
    art = Artifactor(None)

    if 'log_dir' not in art_config:
        art_config['log_dir'] = log_path.strpath
    if 'artifact_dir' not in art_config:
        art_config['artifact_dir'] = log_path.join('artifacts').strpath
    art.set_config(art_config)

    art.register_plugin(merkyl.Merkyl, "merkyl")
    art.register_plugin(logger.Logger, "logger")
    art.register_plugin(video.Video, "video")
    art.register_plugin(filedump.Filedump, "filedump")
    art.register_plugin(reporter.Reporter, "reporter")
    art.register_plugin(post_result.PostResult, "post-result")
    art.register_plugin(ostriz.Ostriz, "ostriz")

    initialize(art)

    art.configure_plugin('merkyl')
    art.configure_plugin('logger')
    art.configure_plugin('video')
    art.configure_plugin('filedump')
    art.configure_plugin('reporter')
    art.configure_plugin('post-result')
    art.configure_plugin('ostriz')
    art.fire_hook('start_session', run_id=run_id)
Exemple #13
0
def pytest_collection_modifyitems(session, config, items):
    from fixtures.pytest_store import store
    len_collected = len(items)

    new_items = []

    from cfme.utils.path import log_path
    with log_path.join('uncollected.log').open('w') as f:
        for item in items:
            # First filter out all items who have the uncollect mark
            uncollect_marker = item.get_marker('uncollect')
            if uncollect_marker:
                uncollect_reason = uncollect_marker.kwargs.get('reason', "No reason given")
                f.write("{} - {}\n".format(item.name, uncollect_reason))
            else:
                uncollectif_result, uncollectif_reason = uncollectif(item)
                if uncollectif_result:
                    f.write("{} - {}\n".format(item.name, uncollectif_reason))
                else:
                    new_items.append(item)

    items[:] = new_items

    len_filtered = len(items)
    filtered_count = len_collected - len_filtered
    store.uncollection_stats['uncollectif'] = filtered_count
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument(
        '-f',
        '--force',
        default=True,
        action='store_false',
        dest='prompt',
        help='Do not prompt before deleting VMs (danger zone!)')
    parser.add_argument(
        '--max-hours',
        default=24,
        help='Max hours since the VM was created or last powered on '
        '(varies by provider, default 24)')
    parser.add_argument(
        '--provider',
        dest='providers',
        action='append',
        default=None,
        help='Provider(s) to inspect, can be used multiple times',
        metavar='PROVIDER')
    parser.add_argument('--outfile',
                        dest='outfile',
                        default=log_path.join('cleanup_old_vms.log').strpath,
                        help='outfile to list ')
    parser.add_argument(
        'text_to_match',
        nargs='*',
        default=['^test_', '^jenkins', '^i-'],
        help='Regex in the name of vm to be affected, can be use multiple times'
        ' (Defaults to \'^test_\' and \'^jenkins\')')

    args = parser.parse_args()
    return args
 def parse_config(self):
     """
     Reads the config data and sets up values
     """
     if not self.config:
         return False
     self.log_dir = local(self.config.get('log_dir', log_path))
     self.log_dir.ensure(dir=True)
     self.artifact_dir = local(self.config.get('artifact_dir', log_path.join('artifacts')))
     self.artifact_dir.ensure(dir=True)
     self.logger = create_logger('artifactor', self.log_dir.join('artifactor.log').strpath)
     self.squash_exceptions = self.config.get('squash_exceptions', False)
     if not self.log_dir:
         print("!!! Log dir must be specified in yaml")
         sys.exit(127)
     if not self.artifact_dir:
         print("!!! Artifact dir must be specified in yaml")
         sys.exit(127)
     self.config['zmq_socket_address'] = 'tcp://127.0.0.1:{}'.format(random_port())
     self.setup_plugin_instances()
     self.start_server()
     self.global_data = {
         'artifactor_config': self.config,
         'log_dir': self.log_dir.strpath,
         'artifact_dir': self.artifact_dir.strpath,
         'artifacts': dict(),
         'old_artifacts': dict()
     }
Exemple #16
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--nic-template',
                        help='NIC Name template to be removed',
                        default="test",
                        type=str)
    parser.add_argument('--pip-template',
                        help='PIP Name template to be removed',
                        default="test",
                        type=str)
    parser.add_argument(
        '--days-old',
        help='--days-old argument to find stack items older than X days ',
        default="7",
        type=int)
    parser.add_argument("--output",
                        dest="output",
                        help="target file name, default "
                        "'cleanup_azure.log' in "
                        "utils.path.log_path",
                        default=log_path.join('cleanup_azure.log').strpath)
    parser.add_argument('--remove-unused-blobs',
                        help='Removal of unused blobs',
                        default=True)
    args = parser.parse_args()
    return args
def parse_cmd_line():
    """
    Specify and parse arguments
    :return: args, kwargs, the usual
    """
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--outfile',
                        default=log_path.join('list_provider_vms.log').strpath,
                        dest='outfile')
    parser.add_argument(
        '--tag',
        default=None,
        dest='tag',
        action='append',
        help='A provider tag to match a group of providers instead of all '
        'providers from cfme_data. Can be used multiple times')
    parser.add_argument(
        '--provider',
        default=None,
        action='append',
        help='Provider keys, can be user multiple times. If none are given '
        'the script will use all providers from cfme_data or match tags')

    args = parser.parse_args()
    return args
def pytest_collection_modifyitems(session, config, items):
    from fixtures.pytest_store import store
    len_collected = len(items)

    new_items = []

    from cfme.utils.path import log_path
    with log_path.join('uncollected.log').open('w') as f:
        for item in items:
            # First filter out all items who have the uncollect mark
            if item.get_marker('uncollect') or not uncollectif(item):
                # if a uncollect marker has been added,
                # give it priority for the explanation
                uncollect = item.get_marker('uncollect')
                marker = uncollect or item.get_marker('uncollectif')
                if marker:
                    reason = marker.kwargs.get('reason', "No reason given")
                else:
                    reason = None
                f.write("{} - {}\n".format(item.name, reason))
            else:
                new_items.append(item)

    items[:] = new_items

    len_filtered = len(items)
    filtered_count = len_collected - len_filtered
    store.uncollection_stats['uncollectif'] = filtered_count
Exemple #19
0
 def parse_config(self):
     """
     Reads the config data and sets up values
     """
     if not self.config:
         return False
     self.log_dir = local(self.config.get('log_dir', log_path))
     self.log_dir.ensure(dir=True)
     self.artifact_dir = local(
         self.config.get('artifact_dir', log_path.join('artifacts')))
     self.artifact_dir.ensure(dir=True)
     self.logger = create_logger(
         'artifactor',
         self.log_dir.join('artifactor.log').strpath)
     self.squash_exceptions = self.config.get('squash_exceptions', False)
     if not self.log_dir:
         print("!!! Log dir must be specified in yaml")
         sys.exit(127)
     if not self.artifact_dir:
         print("!!! Artifact dir must be specified in yaml")
         sys.exit(127)
     self.config['zmq_socket_address'] = 'tcp://127.0.0.1:{}'.format(
         random_port())
     self.setup_plugin_instances()
     self.start_server()
     self.global_data = {
         'artifactor_config': self.config,
         'log_dir': self.log_dir.strpath,
         'artifact_dir': self.artifact_dir.strpath,
         'artifacts': dict(),
         'old_artifacts': dict()
     }
Exemple #20
0
def pytest_collection_modifyitems(session, config, items):
    from fixtures.pytest_store import store
    len_collected = len(items)

    new_items = []

    from cfme.utils.path import log_path
    with log_path.join('uncollected.log').open('w') as f:
        for item in items:
            # First filter out all items who have the uncollect mark
            if item.get_marker('uncollect') or not uncollectif(item):
                # if a uncollect marker has been added,
                # give it priority for the explanation
                uncollect = item.get_marker('uncollect')
                marker = uncollect or item.get_marker('uncollectif')
                if marker:
                    reason = marker.kwargs.get('reason', "No reason given")
                else:
                    reason = None
                f.write("{} - {}\n".format(item.name, reason))
            else:
                new_items.append(item)

    items[:] = new_items

    len_filtered = len(items)
    filtered_count = len_collected - len_filtered
    store.uncollection_stats['uncollectif'] = filtered_count
Exemple #21
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--exclude-volumes', nargs='+',
                        help='List of volumes, which should be excluded.')
    parser.add_argument('--exclude-eips', nargs='+',
                        help='List of EIPs, which should be '
                             'excluded. Allocation_id or public IP are allowed.')
    parser.add_argument('--exclude-elbs', nargs='+',
                        help='List of ELBs, which should be excluded.')
    parser.add_argument('--exclude-enis', nargs='+',
                        help='List of ENIs, which should be excluded. ENI ID is allowed.')
    parser.add_argument('--exclude_stacks', nargs='+',
                        help='List of Stacks, which should be excluded')
    parser.add_argument('--exclude_snapshots', nargs='+',
                        help='List of snapshots, which should be excluded. Snapshot ID is allowed')
    parser.add_argument('--exclude_queues', nargs='+',
                        help='List of queues, which should be excluded. Queue Url is allowed')
    parser.add_argument('--stack-template',
                        help='Stack name template to be removed', default="test", type=str)
    parser.add_argument('--bucket-name',
                        help='Specified Bucket will be removed', default="smartstate", type=str)
    parser.add_argument("--output", dest="output", help="target file name, default "
                                                        "'cleanup_ec2.log' in utils.path.log_path",
                        default=log_path.join('cleanup_ec2.log').strpath)
    args = parser.parse_args()
    return args
def parse_cmdline():
    parser = trackerbot.cmdline_parser()
    parser.add_argument(
        '--mark-usable',
        default=None,
        action='store_true',
        help="Mark all added templates as usable",
    )
    parser.add_argument(
        '--provider-key',
        default=None,
        dest='selected_provider',
        nargs='*',
        help='A specific provider key to sync for',
    )
    parser.add_argument(
        '--outfile',
        dest='outfile',
        default=log_path.join('sync_template_tracker_report.log').strpath,
        help='Output file for tabulated reports on ProviderTemplate actions')
    parser.add_argument('--verbose',
                        default=False,
                        action='store_true',
                        help='Log to stdout')
    args = parser.parse_args()
    return dict(args._get_kwargs())
def generate_hourly_charts_and_csvs(hourly_buckets, charts_dir):
    for cmd in sorted(hourly_buckets):
        current_csv = 'hourly_' + cmd + '.csv'
        csv_rawdata_path = log_path.join('csv_output', current_csv)

        logger.info('Writing %s csvs/charts', cmd)
        output_file = csv_rawdata_path.open('w', ensure=True)
        csvwriter = csv.DictWriter(output_file, fieldnames=MiqMsgBucket().headers,
            delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL)
        csvwriter.writeheader()
        for dt in sorted(hourly_buckets[cmd].keys()):
            linechartxaxis = []
            avgdeqtimings = []
            mindeqtimings = []
            maxdeqtimings = []
            avgdeltimings = []
            mindeltimings = []
            maxdeltimings = []
            cmd_put = []
            cmd_get = []

            sortedhr = sorted(hourly_buckets[cmd][dt].keys())
            for hr in sortedhr:
                linechartxaxis.append(str(hr))
                bk = hourly_buckets[cmd][dt][hr]

                avgdeqtimings.append(round(bk.avg_deq, 2))
                mindeqtimings.append(round(bk.min_deq, 2))
                maxdeqtimings.append(round(bk.max_deq, 2))
                avgdeltimings.append(round(bk.avg_del, 2))
                mindeltimings.append(round(bk.min_del, 2))
                maxdeltimings.append(round(bk.max_del, 2))
                cmd_put.append(bk.total_put)
                cmd_get.append(bk.total_get)
                bk.date = dt
                bk.hour = hr
                csvwriter.writerow(dict(bk))

            lines = {}
            lines['Put ' + cmd] = cmd_put
            lines['Get ' + cmd] = cmd_get
            line_chart_render(cmd + ' Command Put/Get Count', 'Hour during ' + dt,
                '# Count of Commands', linechartxaxis, lines,
                charts_dir.join('/{}-{}-cmdcnt.svg'.format(cmd, dt)))

            lines = {}
            lines['Average Dequeue Timing'] = avgdeqtimings
            lines['Min Dequeue Timing'] = mindeqtimings
            lines['Max Dequeue Timing'] = maxdeqtimings
            line_chart_render(cmd + ' Dequeue Timings', 'Hour during ' + dt, 'Time (s)',
                linechartxaxis, lines, charts_dir.join('/{}-{}-dequeue.svg'.format(cmd, dt)))

            lines = {}
            lines['Average Deliver Timing'] = avgdeltimings
            lines['Min Deliver Timing'] = mindeltimings
            lines['Max Deliver Timing'] = maxdeltimings
            line_chart_render(cmd + ' Deliver Timings', 'Hour during ' + dt, 'Time (s)',
                linechartxaxis, lines, charts_dir.join('/{}-{}-deliver.svg'.format(cmd, dt)))
        output_file.close()
def generate_hourly_charts_and_csvs(hourly_buckets, charts_dir):
    for cmd in sorted(hourly_buckets):
        current_csv = 'hourly_' + cmd + '.csv'
        csv_rawdata_path = log_path.join('csv_output', current_csv)

        logger.info('Writing %s csvs/charts', cmd)
        output_file = csv_rawdata_path.open('w', ensure=True)
        csvwriter = csv.DictWriter(output_file, fieldnames=MiqMsgBucket().headers,
            delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL)
        csvwriter.writeheader()
        for dt in sorted(hourly_buckets[cmd].keys()):
            linechartxaxis = []
            avgdeqtimings = []
            mindeqtimings = []
            maxdeqtimings = []
            avgdeltimings = []
            mindeltimings = []
            maxdeltimings = []
            cmd_put = []
            cmd_get = []

            sortedhr = sorted(hourly_buckets[cmd][dt].keys())
            for hr in sortedhr:
                linechartxaxis.append(str(hr))
                bk = hourly_buckets[cmd][dt][hr]

                avgdeqtimings.append(round(bk.avg_deq, 2))
                mindeqtimings.append(round(bk.min_deq, 2))
                maxdeqtimings.append(round(bk.max_deq, 2))
                avgdeltimings.append(round(bk.avg_del, 2))
                mindeltimings.append(round(bk.min_del, 2))
                maxdeltimings.append(round(bk.max_del, 2))
                cmd_put.append(bk.total_put)
                cmd_get.append(bk.total_get)
                bk.date = dt
                bk.hour = hr
                csvwriter.writerow(dict(bk))

            lines = {}
            lines['Put ' + cmd] = cmd_put
            lines['Get ' + cmd] = cmd_get
            line_chart_render(cmd + ' Command Put/Get Count', 'Hour during ' + dt,
                '# Count of Commands', linechartxaxis, lines,
                charts_dir.join('/{}-{}-cmdcnt.svg'.format(cmd, dt)))

            lines = {}
            lines['Average Dequeue Timing'] = avgdeqtimings
            lines['Min Dequeue Timing'] = mindeqtimings
            lines['Max Dequeue Timing'] = maxdeqtimings
            line_chart_render(cmd + ' Dequeue Timings', 'Hour during ' + dt, 'Time (s)',
                linechartxaxis, lines, charts_dir.join('/{}-{}-dequeue.svg'.format(cmd, dt)))

            lines = {}
            lines['Average Deliver Timing'] = avgdeltimings
            lines['Min Deliver Timing'] = mindeltimings
            lines['Max Deliver Timing'] = maxdeltimings
            line_chart_render(cmd + ' Deliver Timings', 'Hour during ' + dt, 'Time (s)',
                linechartxaxis, lines, charts_dir.join('/{}-{}-deliver.svg'.format(cmd, dt)))
        output_file.close()
Exemple #25
0
 def _inc_test_count(test):
     error = ""
     if 'statuses' in test:
         test_counts[test['statuses']['overall']] += 1
     else:
         error += str(test)
     with log_path.join('no_status.log').open('a') as f:
         f.write(error)
Exemple #26
0
 def _inc_test_count(test):
     error = ""
     if "statuses" in test:
         test_counts[test["statuses"]["overall"]] += 1
     else:
         error += str(test)
     with log_path.join("no_status.log").open("a") as f:
         f.write(error)
Exemple #27
0
    def _install_coverage_hook(self):
        # Clean appliance coverage dir
        self.ipapp.ssh_client.run_command('rm -rf {}'.format(
            appliance_coverage_root.strpath))
        # Put the coverage hook in the miq config path
        self.ipapp.ssh_client.put_file(
            coverage_hook.strpath,
            rails_root.join('config', coverage_hook_file_name).strpath)
        # XXX: Once the manageiq PR 17302 makes it into the 5.9 and 5.8 stream we
        #      can remove all the code in this function after this.   This is only
        #      a temporary fix so we can start acquiring code coverage statistics.
        #
        # See if we need to install the patch.   If not just return.
        # The patch will create the file lib/code_coverage.rb under the rails root.
        # so if that is there we assume the patch is already installed.
        result = self.ipapp.ssh_client.run_command(
            'cd {}; [ -e lib/code_coverage.rb ]'.format(rails_root))
        if result.success:
            return True
        # place patch on the system
        self.log.info('Patching system with manageiq patch #17302')
        coverage_hook_patch_name = 'manageiq-17302.patch'
        local_coverage_hook_patch = coverage_data.join(
            coverage_hook_patch_name)
        remote_coverage_hook_patch = rails_root.join(coverage_hook_patch_name)
        self.ipapp.ssh_client.put_file(local_coverage_hook_patch.strpath,
                                       remote_coverage_hook_patch.strpath)
        # See if we need to install the patch command:
        result = self.ipapp.ssh_client.run_command('rpm -q patch')
        if not result.success:
            # Setup yum repositories and install patch
            local_yum_repo = log_path.join('yum.local.repo')
            remote_yum_repo = '/etc/yum.repos.d/local.repo'
            repo_data = cfme_data['basic_info']['local_yum_repo']
            yum_repo_data = '''
[{name}]
name={name}
baseurl={baseurl}
enabled={enabled}
gpgcheck={gpgcheck}
'''.format(name=repo_data['name'],
            baseurl=repo_data['baseurl'],
            enabled=repo_data['enabled'],
            gpgcheck=repo_data['gpgcheck'])
            with open(local_yum_repo.strpath, 'w') as f:
                f.write(yum_repo_data)
            self.ipapp.ssh_client.put_file(local_yum_repo.strpath,
                                           remote_yum_repo)
            self.ipapp.ssh_client.run_command('yum install -y patch')
            # Remove the yum repo just in case a test of registering the system might
            # happen and this repo cause problems with the test.
            self.ipapp.ssh_client.run_command('rm {}'.format(remote_yum_repo))
        # patch system.
        result = self.ipapp.ssh_client.run_command(
            'cd {}; patch -p1 < {}'.format(rails_root.strpath,
                                           remote_coverage_hook_patch.strpath))
        return result.success
def generate_raw_data_csv(rawdata_dict, csv_file_name):
    csv_rawdata_path = log_path.join('csv_output', csv_file_name)
    output_file = csv_rawdata_path.open('w', ensure=True)
    csvwriter = csv.DictWriter(output_file, fieldnames=rawdata_dict[rawdata_dict.keys()[0]].headers,
        delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL)
    csvwriter.writeheader()
    sorted_rd_keys = sorted(rawdata_dict.keys())
    for key in sorted_rd_keys:
        csvwriter.writerow(dict(rawdata_dict[key]))
def generate_raw_data_csv(rawdata_dict, csv_file_name):
    csv_rawdata_path = log_path.join('csv_output', csv_file_name)
    output_file = csv_rawdata_path.open('w', ensure=True)
    csvwriter = csv.DictWriter(output_file, fieldnames=rawdata_dict[rawdata_dict.keys()[0]].headers,
        delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL)
    csvwriter.writeheader()
    sorted_rd_keys = sorted(rawdata_dict.keys())
    for key in sorted_rd_keys:
        csvwriter.writerow(dict(rawdata_dict[key]))
Exemple #30
0
    def _install_coverage_hook(self):
        # Clean appliance coverage dir
        self.ipapp.ssh_client.run_command('rm -rf {}'.format(appliance_coverage_root.strpath))
        # Put the coverage hook in the miq config path
        self.ipapp.ssh_client.put_file(
            coverage_hook.strpath,
            rails_root.join('config', coverage_hook_file_name).strpath)
        # XXX: Once the manageiq PR 17302 makes it into the 5.9 and 5.8 stream we
        #      can remove all the code in this function after this.   This is only
        #      a temporary fix so we can start acquiring code coverage statistics.
        #
        # See if we need to install the patch.   If not just return.
        # The patch will create the file lib/code_coverage.rb under the rails root.
        # so if that is there we assume the patch is already installed.
        result = self.ipapp.ssh_client.run_command('cd {}; [ -e lib/code_coverage.rb ]'.format(
            rails_root))
        if result.success:
            return True
        # place patch on the system
        self.log.info('Patching system with manageiq patch #17302')
        coverage_hook_patch_name = 'manageiq-17302.patch'
        local_coverage_hook_patch = coverage_data.join(coverage_hook_patch_name)
        remote_coverage_hook_patch = rails_root.join(coverage_hook_patch_name)
        self.ipapp.ssh_client.put_file(
            local_coverage_hook_patch.strpath,
            remote_coverage_hook_patch.strpath)
        # See if we need to install the patch command:
        result = self.ipapp.ssh_client.run_command('rpm -q patch')
        if not result.success:
            # Setup yum repositories and install patch
            local_yum_repo = log_path.join('yum.local.repo')
            remote_yum_repo = '/etc/yum.repos.d/local.repo'
            repo_data = cfme_data['basic_info']['local_yum_repo']
            yum_repo_data = '''
[{name}]
name={name}
baseurl={baseurl}
enabled={enabled}
gpgcheck={gpgcheck}
'''.format(
                name=repo_data['name'],
                baseurl=repo_data['baseurl'],
                enabled=repo_data['enabled'],
                gpgcheck=repo_data['gpgcheck'])
            with open(local_yum_repo.strpath, 'w') as f:
                f.write(yum_repo_data)
            self.ipapp.ssh_client.put_file(local_yum_repo.strpath, remote_yum_repo)
            self.ipapp.ssh_client.run_command('yum install -y patch')
            # Remove the yum repo just in case a test of registering the system might
            # happen and this repo cause problems with the test.
            self.ipapp.ssh_client.run_command('rm {}'.format(remote_yum_repo))
        # patch system.
        result = self.ipapp.ssh_client.run_command('cd {}; patch -p1 < {}'.format(
            rails_root.strpath,
            remote_coverage_hook_patch.strpath))
        return result.success
Exemple #31
0
def pytest_unconfigure(config):
    yield  # since hookwrapper, let hookimpl run
    if config.getoption('--collect-logs'):
        logger.info('Starting log collection on appliances')
        log_files = DEFAULT_FILES
        local_dir = DEFAULT_LOCAL
        try:
            log_files = env.log_collector.log_files
        except (AttributeError, KeyError):
            logger.info(
                'No log_collector.log_files in env, use default files: %s',
                log_files)
            pass
        try:
            local_dir = log_path.join(env.log_collector.local_dir)
        except (AttributeError, KeyError):
            logger.info(
                'No log_collector.local_dir in env, use default local_dir: %s',
                local_dir)
            pass

        # Handle local dir existing
        local_dir.ensure(dir=True)
        from cfme.test_framework.appliance import PLUGIN_KEY
        holder = config.pluginmanager.get_plugin(PLUGIN_KEY)
        if holder is None:
            # No appliances to fetch logs from
            logger.warning('No logs collected, appliance holder is empty')
            return

        written_files = []
        for app in holder.appliances:
            with app.ssh_client as ssh_client:
                tar_file = 'log-collector-{}.tar.gz'.format(app.hostname)
                logger.debug(
                    'Creating tar file on app %s:%s with log files %s', app,
                    tar_file, ' '.join(log_files))
                # wrap the files in ls, redirecting stderr, to ignore files that don't exist
                tar_result = ssh_client.run_command(
                    'tar -czvf {tar} $(ls {files} 2>/dev/null)'.format(
                        tar=tar_file, files=' '.join(log_files)))
                try:
                    assert tar_result.success
                except AssertionError:
                    logger.exception(
                        'Tar command non-zero RC when collecting logs on %s: %s',
                        app, tar_result.output)
                    continue
                ssh_client.get_file(tar_file, local_dir.strpath)
            written_files.append(tar_file)
        logger.info('Wrote the following files to local log path: %s',
                    written_files)
Exemple #32
0
def pytest_runtest_setup(item):
    global recorder
    if vid_options and vid_options['enabled']:
        vid_log_path = log_path.join(vid_options['dir'])
        vid_dir, vid_name = get_path_and_file_name(item)
        full_vid_path = vid_log_path.join(vid_dir)
        try:
            os.makedirs(full_vid_path.strpath)
        except OSError:
            pass
        vid_name = vid_name + ".ogv"
        recorder = Recorder(full_vid_path.join(vid_name).strpath)
        recorder.start()
    yield
Exemple #33
0
def pytest_runtest_setup(item):
    global recorder
    if vid_options and vid_options['enabled']:
        vid_log_path = log_path.join(vid_options['dir'])
        vid_dir, vid_name = get_path_and_file_name(item)
        full_vid_path = vid_log_path.join(vid_dir)
        try:
            os.makedirs(full_vid_path.strpath)
        except OSError:
            pass
        vid_name = vid_name + ".ogv"
        recorder = Recorder(full_vid_path.join(vid_name).strpath)
        recorder.start()
    yield
Exemple #34
0
def pytest_sessionfinish(session, exitstatus):
    failed_tests_template = template_env.get_template('failed_browser_tests.html')
    outfile = log_path.join('failed_browser_tests.html')

    # Clean out any old reports
    try:
        outfile.remove(ignore_errors=True)
    except ENOENT:
        pass

    # Generate a new one if needed
    if failed_test_tracking['tests']:
        failed_tests_report = failed_tests_template.render(**failed_test_tracking)
        outfile.write(failed_tests_report)
Exemple #35
0
def pytest_sessionfinish(session, exitstatus):
    failed_tests_template = template_env.get_template('failed_browser_tests.html')
    outfile = log_path.join('failed_browser_tests.html')

    # Clean out any old reports
    try:
        outfile.remove(ignore_errors=True)
    except ENOENT:
        pass

    # Generate a new one if needed
    if failed_test_tracking['tests']:
        failed_tests_report = failed_tests_template.render(**failed_test_tracking)
        outfile.write(failed_tests_report)
Exemple #36
0
def main(run_id, port):
    """Main function for running artifactor server"""
    port = port if port else random_port()
    try:
        run(port, run_id)
        print("Artifactor server running on port: ", port)
    except Exception as e:
        import traceback
        import sys
        with log_path.join('artifactor_crash.log').open('w') as f:
            print(e, file=f)
            print(e, file=sys.stderr)
            tb = '\n'.join(traceback.format_tb(sys.exc_traceback))
            print(tb, file=f)
            print(tb, file=sys.stderr)
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--nic-template',
                        help='NIC Name template to be removed', default="test", type=str)
    parser.add_argument('--pip-template',
                        help='PIP Name template to be removed', default="test", type=str)
    parser.add_argument('--days-old',
                        help='--days-old argument to find stack items older than X days ',
                        default="7", type=int)
    parser.add_argument("--output", dest="output", help="target file name, default "
                                                        "'cleanup_azure.log' in "
                                                        "utils.path.log_path",
                        default=log_path.join('cleanup_azure.log').strpath)
    args = parser.parse_args()
    return args
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument("--tracketbot-url", dest="trackerbot_url",
                        help="tracker bot url to make api call",
                        default=None)
    parser.add_argument("--stream", dest="stream",
                        help="stream to generate the template test result")
    parser.add_argument("--template", dest="appliance_template",
                        help="appliance/latest template name")
    parser.add_argument("--provider", dest="provider",
                        help="provider under test")
    parser.add_argument("--output", dest="output", help="target file name",
                        default=log_path.join('template_tester_results.log').strpath)
    args = parser.parse_args()
    return args
Exemple #39
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--exclude-volumes', nargs='+',
                        help='List of volumes, which should be excluded.')
    parser.add_argument('--exclude-eips', nargs='+',
                        help='List of EIPs, which should be '
                             'excluded. Allocation_id or public IP are allowed.')
    parser.add_argument('--exclude-elbs', nargs='+',
                        help='List of ELBs, which should be excluded.')
    parser.add_argument('--exclude-enis', nargs='+',
                        help='List of ENIs, which should be excluded. ENI ID is allowed.')
    parser.add_argument("--output", dest="output", help="target file name, default "
                                                        "'cleanup_ec2.log' in utils.path.log_path",
                        default=log_path.join('cleanup_ec2.log').strpath)
    args = parser.parse_args()
    return args
Exemple #40
0
def main(run_id, port):
    """Main function for running artifactor server"""
    port = port if port else random_port()
    try:
        run(port, run_id)
        print("Artifactor server running on port: ", port)
    except Exception as e:
        import traceback
        import sys

        with log_path.join("artifactor_crash.log").open("w") as f:
            print(e, file=f)
            print(e, file=sys.stderr)
            tb = "\n".join(traceback.format_tb(sys.exc_traceback))
            print(tb, file=f)
            print(tb, file=sys.stderr)
def pytest_unconfigure(config):
    yield  # since hookwrapper, let hookimpl run
    if config.getoption('--collect-logs'):
        logger.info('Starting log collection on appliances')
        log_files = DEFAULT_FILES
        local_dir = DEFAULT_LOCAL
        try:
            log_files = env.log_collector.log_files
        except (AttributeError, KeyError):
            logger.info('No log_collector.log_files in env, use default files: %s', log_files)
            pass
        try:
            local_dir = log_path.join(env.log_collector.local_dir)
        except (AttributeError, KeyError):
            logger.info('No log_collector.local_dir in env, use default local_dir: %s', local_dir)
            pass

        # Handle local dir existing
        local_dir.ensure(dir=True)
        from cfme.test_framework.appliance import PLUGIN_KEY
        holder = config.pluginmanager.get_plugin(PLUGIN_KEY)
        if holder is None:
            # No appliances to fetch logs from
            logger.warning('No logs collected, appliance holder is empty')
            return

        written_files = []
        for app in holder.appliances:
            with app.ssh_client as ssh_client:
                tar_file = 'log-collector-{}.tar.gz'.format(
                    app.hostname)
                logger.debug('Creating tar file on app %s:%s with log files %s',
                             app, tar_file, ' '.join(log_files))
                # wrap the files in ls, redirecting stderr, to ignore files that don't exist
                tar_result = ssh_client.run_command(
                    'tar -czvf {tar} $(ls {files} 2>/dev/null)'
                    .format(tar=tar_file, files=' '.join(log_files)))
                try:
                    assert tar_result.success
                except AssertionError:
                    logger.exception('Tar command non-zero RC when collecting logs on %s: %s',
                                     app, tar_result.output)
                    continue
                ssh_client.get_file(tar_file, local_dir.strpath)
            written_files.append(tar_file)
        logger.info('Wrote the following files to local log path: %s', written_files)
def pull_merged_coverage_data(ssh, coverage_dir):
    """Pulls merged coverage data to log directory.

    Args:
        ssh:  ssh client
        coverage_dir:  Directory where the coverage archive was extracted.

    Returns:
        Nothing
    """
    logger.info('Packing the generated HTML')
    cmd = ssh.run_command('cd {}; tar cfz /tmp/merged.tgz merged'.format(coverage_dir))
    if cmd.failed:
        raise Exception('Could not compress! - {}'.format(str(cmd)))
    logger.info('Grabbing the generated HTML')
    ssh.get_file('/tmp/merged.tgz', log_path.strpath)
    logger.info('Locally decompressing the generated HTML')
    subprocess.check_call(
        ['tar', 'xf', log_path.join('merged.tgz').strpath, '-C', log_path.strpath])
    logger.info('Done!')
def pull_merged_coverage_data(ssh, coverage_dir):
    """Pulls merged coverage data to log directory.

    Args:
        ssh:  ssh client
        coverage_dir:  Directory where the coverage archive was extracted.

    Returns:
        Nothing
    """
    logger.info('Packing the generated HTML')
    ssh_run_cmd(
        ssh=ssh,
        cmd='cd {}; tar cfz /tmp/merged.tgz merged'.format(coverage_dir),
        error_msg='Could not archive results!')
    logger.info('Grabbing the generated HTML')
    ssh.get_file('/tmp/merged.tgz', log_path.strpath)
    logger.info('Locally decompressing the generated HTML')
    subprocess.check_call(
        ['tar', 'xf', log_path.join('merged.tgz').strpath, '-C', log_path.strpath])
    logger.info('Done!')
def pull_merged_coverage_data(ssh, coverage_dir):
    """Pulls merged coverage data to log directory.

    Args:
        ssh:  ssh client
        coverage_dir:  Directory where the coverage archive was extracted.

    Returns:
        Nothing
    """
    logger.info('Packing the generated HTML')
    ssh_run_cmd(
        ssh=ssh,
        cmd='cd {}; tar cfz /tmp/merged.tgz merged'.format(coverage_dir),
        error_msg='Could not archive results!')
    logger.info('Grabbing the generated HTML')
    ssh.get_file('/tmp/merged.tgz', log_path.strpath)
    logger.info('Locally decompressing the generated HTML')
    subprocess.check_call(
        ['tar', 'xf', log_path.join('merged.tgz').strpath, '-C', log_path.strpath])
    logger.info('Done!')
Exemple #45
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument("--tracketbot-url",
                        dest="trackerbot_url",
                        help="tracker bot url to make api call",
                        default=None)
    parser.add_argument("--stream",
                        dest="stream",
                        help="stream to generate the template test result")
    parser.add_argument("--template",
                        dest="appliance_template",
                        help="appliance/latest template name")
    parser.add_argument("--provider",
                        dest="provider",
                        help="provider under test")
    parser.add_argument(
        "--output",
        dest="output",
        help="target file name",
        default=log_path.join('template_tester_results.log').strpath)
    args = parser.parse_args()
    return args
Exemple #46
0
def parse_cmd_line():
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('-f', '--force', default=True, action='store_false', dest='dryrun',
                        help='Do NOT dry-run (DANGER zone!)')
    parser.add_argument('--max-hours', default=24,
                        help='Max hours since the VM was created or last powered on '
                             '(varies by provider, default 24)')
    parser.add_argument('--provider', dest='providers', action='append', default=None,
                        help='Provider(s) to inspect, can be used multiple times.',
                        metavar='PROVIDER')
    parser.add_argument('--tag', dest='tags', action='append', default=None,
                        help='Tag to filter providers by, like "extcloud". '
                             'Can be used multiple times')
    parser.add_argument('--outfile', dest='outfile',
                        default=log_path.join('cleanup_old_vms.log').strpath,
                        help='outfile to list ')
    parser.add_argument('text_to_match', nargs='*', default=['^test_', '^jenkins', '^i-'],
                        help='Regex in the name of vm to be affected, can be use multiple times'
                             ' (Defaults to \'^test_\' and \'^jenkins\')')

    args = parser.parse_args()
    return args
def pull_merged_coverage_data(ssh, coverage_dir):
    """Pulls merged coverage data to log directory.

    Args:
        ssh:  ssh client
        coverage_dir:  Directory where the coverage archive was extracted.

    Returns:
        Nothing
    """
    logger.info('Packing the generated HTML')
    cmd = ssh.run_command(
        'cd {}; tar cfz /tmp/merged.tgz merged'.format(coverage_dir))
    if cmd.failed:
        raise Exception('Could not compress! - {}'.format(str(cmd)))
    logger.info('Grabbing the generated HTML')
    ssh.get_file('/tmp/merged.tgz', log_path.strpath)
    logger.info('Locally decompressing the generated HTML')
    subprocess.check_call([
        'tar', 'xf',
        log_path.join('merged.tgz').strpath, '-C', log_path.strpath
    ])
    logger.info('Done!')
def parse_cmd_line():
    """
    Specify and parse arguments
    :return: args, kwargs, the usual
    """
    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--outfile',
                        default=log_path.join('list_provider_vms.log').strpath,
                        dest='outfile')
    parser.add_argument('--tag',
                        default=None,
                        dest='tag',
                        action='append',
                        help='A provider tag to match a group of providers instead of all '
                             'providers from cfme_data. Can be used multiple times')
    parser.add_argument('provider',
                        default=None,
                        nargs='*',
                        help='Provider keys, can be user multiple times. If none are given '
                             'the script will use all providers from cfme_data or match tags')

    args = parser.parse_args()
    return args
Exemple #49
0
    reuse_dir: True
    plugins:
        post-result:
            enabled: True
            plugin: post_result
"""
from collections import defaultdict

from artifactor import ArtifactorBasePlugin

from cfme.utils.path import log_path

# preseed the normal statuses, but let defaultdict handle
# any unexpected statuses, which should probably never happen

test_report = log_path.join('test-report.json')
test_counts = defaultdict(int, {
    'passed': 0,
    'failed': 0,
    'skipped': 0,
    'error': 0,
    'xpassed': 0,
    'xfailed': 0
})


class PostResult(ArtifactorBasePlugin):
    def plugin_initialize(self):
        self.register_plugin_hook('finish_session', self.post_result)
        if test_report.check():
            test_report.remove()
Exemple #50
0
# paths to all of the coverage-related files

# on the appliance
#: Corresponds to Rails.root in the rails env
rails_root = local('/var/www/miq/vmdb')
#: coverage root, should match what's in the coverage hook and merger scripts
appliance_coverage_root = rails_root.join('coverage')

# local
coverage_data = scripts_data_path.join('coverage')
gemfile = coverage_data.join('coverage_gem.rb')
bundler_d = rails_root.join('bundler.d')
coverage_hook_file_name = 'coverage_hook.rb'
coverage_hook = coverage_data.join(coverage_hook_file_name)
coverage_merger = coverage_data.join('coverage_merger.rb')
coverage_output_dir = log_path.join('coverage')
coverage_results_archive = coverage_output_dir.join('coverage-results.tgz')
coverage_appliance_conf = conf_path.join('.ui-coverage')

# This is set in sessionfinish, and should be reliably readable
# in post-yield sessionfinish hook wrappers and all hooks thereafter
ui_coverage_percent = None


def clean_coverage_dir():
    try:
        coverage_output_dir.remove(ignore_errors=True)
    except ENOENT:
        pass
    coverage_output_dir.ensure(dir=True)
Exemple #51
0
cur = connection.cursor()
cur.execute("""
    CREATE TABLE emails (
        from_address TEXT,
        to_address TEXT,
        subject TEXT,
        time TIMESTAMP DEFAULT (datetime('now','localtime')),
        text TEXT
    )
    """)
connection.commit()

# To write the e-mails into the files
files_lock = threading.RLock()  # To prevent filename collisions
test_name = None  # Name of the test which currently runs
email_path = log_path.join("emails")
email_folder = None  # Name of the root folder for testing

template_env = Environment(loader=FileSystemLoader(template_path.strpath))


def write(what, end="\n"):
    """Wrapper that forces flush on each write

    Args:
        what: What to print
        end: Ending character. Default LF
    """
    sys.stdout.write(what)
    if end is not None:
        sys.stdout.write(end)
Exemple #52
0
def main(**kwargs):
    # get_mgmt validates, since it will explode without an existing key or type
    if kwargs.get('deploy'):
        kwargs['configure'] = True
        kwargs['outfile'] = 'appliance_ip_address_1'
        providers = provider_data['management_systems']
        provider_dict = provider_data['management_systems'][kwargs['provider']]
        credentials =\
            {'username': provider_dict['username'],
             'password': provider_dict['password'],
             'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'),
             'auth_url': provider_dict.get('auth_url'),
             }
        provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials)
        flavors = provider_dict['template_upload'].get('flavors', ['m1.medium'])
        provider_type = provider_data['management_systems'][kwargs['provider']]['type']
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }
    else:
        provider = get_mgmt(kwargs['provider'])
        provider_dict = cfme_data['management_systems'][kwargs['provider']]
        provider_type = provider_dict['type']
        flavors = cfme_data['appliance_provisioning']['default_flavors'].get(provider_type, [])
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }

    logger.info('Connecting to {}'.format(kwargs['provider']))

    if kwargs.get('destroy'):
        # TODO: destroy should be its own script
        # but it's easy enough to just hijack the parser here
        # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc)
        return not destroy_vm(provider, deploy_args['vm_name'])

    # Try to snag defaults from cfme_data here for each provider type
    if provider_type == 'rhevm':
        cluster = provider_dict.get('default_cluster', kwargs.get('cluster'))
        if cluster is None:
            raise Exception('--cluster is required for rhev instances and default is not set')
        deploy_args['cluster'] = cluster

        if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'):
            deploy_args['placement_policy_host'] = kwargs['place_policy_host']
            deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff']
    elif provider_type == 'ec2':
        # ec2 doesn't have an api to list available flavors, so the first flavor is the default
        try:
            # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance
            flavor = kwargs.get('flavor', 'c3.xlarge')
        except IndexError:
            raise Exception('--flavor is required for EC2 instances and default is not set')
        deploy_args['instance_type'] = flavor
        deploy_args['key_name'] = "shared"
        # we want to override default cloud-init which disables root login and password login
        cloud_init_dict = {
            'chpasswd':
            {
                'expire': False,
                'list': '{}:{}\n'.format(cred['ssh']['username'], cred['ssh']['password'])
            },
            'disable_root': 0,
            'ssh_pwauth': 1
        }
        cloud_init = "#cloud-config\n{}".format(yaml.safe_dump(cloud_init_dict,
                                                               default_flow_style=False))
        deploy_args['user_data'] = cloud_init
    elif provider_type == 'openstack':
        # filter openstack flavors based on what's available
        available_flavors = provider.list_flavor()
        flavors = filter(lambda f: f in available_flavors, flavors)
        try:
            flavor = kwargs.get('flavor') or flavors[0]
        except IndexError:
            raise Exception('--flavor is required for RHOS instances and '
                            'default is not set or unavailable on provider')
        # flavour? Thanks, psav...
        deploy_args['flavour_name'] = flavor

        if 'network' in provider_dict:
            # support rhos4 network names
            deploy_args['network_name'] = provider_dict['network']

        provider_pools = [p.name for p in provider.api.floating_ip_pools.list()]
        try:
            # TODO: If there are multiple pools, have a provider default in cfme_data
            floating_ip_pool = kwargs.get('floating_ip_pool') or provider_pools[0]
        except IndexError:
            raise Exception('No floating IP pools available on provider')

        if floating_ip_pool is not None:
            deploy_args['floating_ip_pool'] = floating_ip_pool
    elif provider_type == "virtualcenter":
        if "allowed_datastores" in provider_dict:
            deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"]
    elif provider_type == 'scvmm':
        deploy_args["host_group"] = provider_dict["provisioning"]['host_group']
    elif provider_type == 'gce':
        deploy_args['ssh_key'] = '{user_name}:{public_key}'.format(
            user_name=cred['ssh']['ssh-user'],
            public_key=cred['ssh']['public_key'])
    # Do it!
    try:
        logger.info('Cloning {} to {} on {}'.format(deploy_args['template'], deploy_args['vm_name'],
                                                    kwargs['provider']))
        provider.deploy_template(**deploy_args)
    except Exception as e:
        logger.exception(e)
        logger.error('provider.deploy_template failed')
        if kwargs.get('cleanup'):
            logger.info('attempting to destroy {}'.format(deploy_args['vm_name']))
            destroy_vm(provider, deploy_args['vm_name'])
        return 12

    if not provider.does_vm_exist(deploy_args['vm_name']):
        logger.error('provider.deploy_template failed without exception')
        return 12

    if provider.is_vm_running(deploy_args['vm_name']):
        logger.info("VM {} is running".format(deploy_args['vm_name']))
    else:
        logger.error("VM is not running")
        return 10

    try:
        ip, time_taken = wait_for(provider.get_ip_address, [deploy_args['vm_name']], num_sec=1200,
                                  fail_condition=None)
        logger.info('IP Address returned is {}'.format(ip))
    except Exception as e:
        logger.exception(e)
        logger.error('IP address not returned')
        return 10

    try:
        if kwargs.get('configure'):
            logger.info('Configuring appliance, this can take a while.')
            if kwargs.get('deploy'):
                app = IPAppliance(hostname=ip)
            else:
                app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name'])
            if provider_type == 'gce':
                with app as ipapp:
                    ipapp.configure_gce()
            else:
                app.configure()
            logger.info('Successfully Configured the appliance.')
    except Exception as e:
        logger.exception(e)
        logger.error('Appliance Configuration Failed')
        if not kwargs.get('deploy'):
            app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name'])
            ssh_client = app.ssh_client()
            status, output = ssh_client.run_command('find /root/anaconda-post.log')
            if status == 0:
                ssh_client.get_file('/root/anaconda-post.log',
                                    log_path.join('anaconda-post.log').strpath)
            ssh_client.close()
        return 10

    if kwargs.get('outfile') or kwargs.get('deploy'):
        with open(kwargs['outfile'], 'w') as outfile:
            outfile.write("appliance_ip_address={}\n".format(ip))

    # In addition to the outfile, drop the ip address on stdout for easy parsing
    print(ip)
def wait_for_ajax():
    """
    Waits until all ajax timers are complete, in other words, waits until there are no
    more pending ajax requests, page load should be finished completely.

    Raises:
        TimedOutError: when ajax did not load in time
    """

    execute_script("""
        try {
            angular.element('error-modal').hide();
        } catch(err) {
        }""")

    _thread_local.ajax_log_msg = ''

    def _nothing_in_flight():
        """Checks if there is no ajax in flight and also logs current status
        """
        prev_log_msg = _thread_local.ajax_log_msg

        # 5.5.z and 5.7.0.4+
        if not store.current_appliance.is_miqqe_patch_candidate:
            try:
                anything_in_flight = in_flight("return ManageIQ.qe.anythingInFlight()")
            except Exception as e:
                # if jQuery in error message, a non-cfme page (proxy error) is displayed
                # should be handled by something else
                if "jquery" not in str(e).lower():
                    raise
                return True
            running = execute_script("return ManageIQ.qe.inFlight()")
            log_msg = ', '.join(["{}: {}".format(k, str(v)) for k, v in running.iteritems()])
        # 5.6.z, 5.7.0.{1,2,3}
        else:
            try:
                running = in_flight(js.in_flight)
            except Exception as e:
                # if jQuery in error message, a non-cfme page (proxy error) is displayed
                # should be handled by something else
                if "jquery" not in str(e).lower():
                    raise
                return True
            anything_in_flight = False
            anything_in_flight |= running["jquery"] > 0
            anything_in_flight |= running["prototype"] > 0
            anything_in_flight |= running["spinner"]
            anything_in_flight |= running["document"] != "complete"
            anything_in_flight |= running["autofocus"] > 0
            anything_in_flight |= running["debounce"] > 0
            anything_in_flight |= running["miqQE"] > 0
            log_msg = ', '.join(["{}: {}".format(k, str(v)) for k, v in running.iteritems()])

        # Log the message only if it's different from the last one
        if prev_log_msg != log_msg:
            _thread_local.ajax_log_msg = log_msg
            logger.trace('Ajax running: %s', log_msg)
        if (not anything_in_flight) and prev_log_msg:
            logger.trace('Ajax done')

        return not anything_in_flight

    wait_for(
        _nothing_in_flight,
        num_sec=_thread_local.ajax_timeout, delay=0.1, message="wait for ajax", quiet=True,
        silent_failure=True)

    # If we are not supposed to take page screenshots...well...then...dont.
    if store.config and not store.config.getvalue('page_screenshots'):
        return

    url = browser().current_url
    url = url.replace(base_url(), '')
    url = url.replace("/", '_')
    if url not in urls:
        logger.info('Taking picture of page: %s', url)
        ss, sse = take_screenshot()
        if ss:
            ss_path = log_path.join('page_screenshots')
            if not ss_path.exists():
                ss_path.mkdir()
            with ss_path.join("{}.png".format(url)).open('wb') as f:
                f.write(base64.b64decode(ss))
        urls.append(url)
Exemple #54
0
# paths to all of the coverage-related files

# on the appliance
#: Corresponds to Rails.root in the rails env
rails_root = local('/var/www/miq/vmdb')
#: coverage root, should match what's in the coverage hook and merger scripts
appliance_coverage_root = rails_root.join('coverage')

# local
coverage_data = scripts_data_path.join('coverage')
gemfile = coverage_data.join('coverage_gem.rb')
bundler_d = rails_root.join('bundler.d')
coverage_hook_file_name = 'coverage_hook.rb'
coverage_hook = coverage_data.join(coverage_hook_file_name)
coverage_merger = coverage_data.join('coverage_merger.rb')
coverage_output_dir = log_path.join('coverage')
coverage_results_archive = coverage_output_dir.join('coverage-results.tgz')
coverage_appliance_conf = conf_path.join('.ui-coverage')

# This is set in sessionfinish, and should be reliably readable
# in post-yield sessionfinish hook wrappers and all hooks thereafter
ui_coverage_percent = None


def clean_coverage_dir():
    try:
        coverage_output_dir.remove(ignore_errors=True)
    except ENOENT:
        pass
    coverage_output_dir.ensure(dir=True)
Exemple #55
0
 def pytest_sessionstart(self):
     if isinstance(self.held_appliance, DummyAppliance) or self.held_appliance.is_dev:
         return
     if pytest.store.parallelizer_role != 'slave':
         with log_path.join('appliance_version').open('w') as appliance_version:
             appliance_version.write(self.held_appliance.version.vstring)