Ejemplo n.º 1
0
def main():
    binary_info = static.get_binary_info()
    arch_info = static.get_arch_info()
    performance = dynamic.run_benchmarks(timeout=0.4, cores=[1, 2, 3, 4, 5, 6, 7, 8])

    info = dict()
    info['arch'] = arch_info
    info['bins'] = binary_info
    info['tests'] = performance

    mongo = MongoDB()
    mongo.arch.insert_one(copy.copy(info))

    print strings.to_json(info, 'performance.json')
Ejemplo n.º 2
0
def main():
    binary_info = static.get_binary_info()
    arch_info = static.get_arch_info()
    performance = dynamic.run_benchmarks(timeout=0.4,
                                         cores=[1, 2, 3, 4, 5, 6, 7, 8])

    info = dict()
    info['arch'] = arch_info
    info['bins'] = binary_info
    info['tests'] = performance

    mongo = MongoDB()
    mongo.arch.insert_one(copy.copy(info))

    print strings.to_json(info, 'performance.json')
Ejemplo n.º 3
0
def main():
    options, args = create_parser().parse()

    logger.debug('Settings: {d}'.format(d=to_json(options.__dict__)))

    tester = FlowTester(**options.__dict__)
    tester.run()
Ejemplo n.º 4
0
def main():
    options, args = create_parser().parse()

    logger.debug("Settings: {d}".format(d=to_json(options.__dict__)))

    tester = FlowTester(**options.__dict__)
    tester.run()
Ejemplo n.º 5
0
def main():
    parser = create_parser()
    options, args, = parse_args(parser)

    binary_info = static.get_binary_info()
    arch_info = static.get_arch_info()

    info = dict()
    info['arch'] = arch_info
    info['bins'] = binary_info

    for arg in args:
        path, value = str(arg).split('=', 2)
        inject(info, path, value)

    io.mkdir(options.output)
    print strings.to_json(info, options.output)
Ejemplo n.º 6
0
def main():
    parser = create_parser()
    options, args, includes = parse_args(parser)

    print includes

    performance = dynamic.run_benchmarks(
        tests=includes,
        timeout=options.timeout,
        tries=options.tries,
        cores=options.cores
    )

    info = dict()
    info['tests'] = performance

    io.mkdir(options.output)
    print strings.to_json(info, options.output)
Ejemplo n.º 7
0
    def run(self):
        logger.debug("Running tests...")
        for test_name, test_option in self.tests.items():
            self.browse_test_config_files(test_name, test_option)
            logger.debug("{test_name}:{test_option[problem]}".format(
                test_name=test_name, test_option=test_option))
            self.setup_test_paths(test_name, test_option)
            executors = self.prepare_test_executor(test_name, test_option)

            for executor in executors:
                logger.debug(
                    "{test_name}:{e.environment[problem_config]}: running".
                    format(test_name=test_name, e=executor))
                environment = executor.environment

                # purge output directory
                if os.path.exists(environment['output_path']):
                    rmtree(environment['output_path'])

                # run test
                executor.run()

                # save info about test
                logger.debug(
                    "{test_name}:{e.environment[problem_config]}: generating report"
                    .format(test_name=test_name, e=executor))
                json_report = self.generate_report(executor)

                # get comparisons
                if self.compare_result:
                    logger.debug(
                        "{test_name}:{e.environment[problem_config]}: comparing output result"
                        .format(test_name=test_name, e=executor))
                    comparisons = self.compare_results_files(environment)
                    if not comparisons or max(pluck(comparisons,
                                                    'exit_code')) == 0:
                        json_report['correct'] = True
                    else:
                        json_report['correct'] = False
                        json_report['comparisons'] = [
                            ex.environment['file'] for ex in comparisons
                            if ex.exit_code != 0
                        ]

                if self.save_stderr:
                    json_report['stderr'] = executor.stderr
                if self.save_stdout:
                    json_report['stdout'] = executor.stdout

                # create name for json file based on settings
                info_json = executor.environment['info_json'].format(
                    **json_report)
                info_json = io.join_path(self.output_dir, info_json)
                mkdir(info_json, is_file=True)

                # merge artifacts (so fat only one - profiler info)
                profilers = browse(environment['output_path'])
                profilers = lists.filter(
                    profilers, lambda x: self.select_artifact_rule.match(x))

                # merge report with profiler
                json_report = FlowJson.merge_json_info(json_report, profilers)
                json_report = FlowJson.clean_json(json_report)
                logger.debug(to_json(json_report, info_json))