Esempio n. 1
0
    def compare_results_files(self, environment):
        # locate ref output dir
        problem_config = os.path.basename(environment['problem_config'])
        ref_dir = io.join_path(environment['ref_output_path'], problem_config)
        output_dir = environment['output_path']

        # grab all files in dir
        files_to_compare = browse(ref_dir)
        files_to_compare = [
            f.replace(ref_dir, '').lstrip('/\\') for f in files_to_compare
        ]

        # compare them
        comparisons = []
        for filename in files_to_compare:
            variables = {
                'ndiff': self.bins['ndiff'],
                'ref': io.join_path(ref_dir, filename),
                'res': io.join_path(output_dir, filename),
                'file': filename,
                'name': "comparison {file}".format(file=filename)
            }
            comparison = exec_single(
                command="""perl {ndiff} "{ref}" "{res}" """,
                variables=variables,
                plugins=[
                    PluginEnv(variables),
                    PluginBriefInfo(name=variables['name'])
                ])
            comparisons.append(comparison)
        return comparisons
Esempio n. 2
0
    def insert_one(self, dirname):
        with timer.measured("Processing one, folder {dirname}".format(dirname=dirname), False):
            env = self.mongo._extract_environment(io.join_path(dirname, "environment.json"))
            cal = self.mongo._extract_calibration(io.join_path(dirname, "performance.json"))

            env["cal"] = cal

            self.mongo.insert_process(dirname, env)
Esempio n. 3
0
    def insert_one(self, dirname):
        with timer.measured(
                'Processing one, folder {dirname}'.format(dirname=dirname),
                False):
            env = self.mongo._extract_environment(
                io.join_path(dirname, 'environment.json'))
            cal = self.mongo._extract_calibration(
                io.join_path(dirname, 'performance.json'))

            env['cal'] = cal

            self.mongo.insert_process(dirname, env)
Esempio n. 4
0
    def setup_test_paths(self, test_name, test_option):
        test_option['input_path'] = io.join_path(self.tests_root, test_name,
                                                 test_option['input'])
        test_option['output_path'] = io.join_path(self.tests_root, test_name,
                                                  test_option['output'])
        test_option['ref_output_path'] = io.join_path(
            self.tests_root, test_name, test_option['ref_output'])
        test_option['problem_config'] = lists.prepend_path(
            test_option['problem'], self.tests_root, test_name)

        del test_option['input']
        del test_option['output']
        del test_option['ref_output']
        del test_option['problem']
Esempio n. 5
0
    def browse_test_config_files(self, test_name, test_option):
        # browse con and yamls files
        cwd = io.join_path(self.tests_root, test_name)
        cons = check_output_secure('ls *.con', cwd=cwd).strip().split()
        yamls = check_output_secure('ls *.yaml', cwd=cwd).strip().split()

        # union these lists and filter them
        test_option['problem'] = lists.union(cons, yamls)
        test_option['problem'] = lists.filter(test_option['problem'],
                                              lambda x: bool(x))
        test_option['problem'] = lists.filter(
            test_option['problem'], lambda x: self.select_ini_rule.match(x))
Esempio n. 6
0
                io.join_path(dirname, 'performance.json'))

            env['cal'] = cal

            self.mongo.insert_process(dirname, env)

    def insert_many(self, dirname, filters=[]):
        dirs = io.listdir(dirname)
        for f in filters:
            dirs = lists.filter(dirs, f)

        with timer.measured(
                'Processing many, folder {dirname}'.format(dirname=dirname),
                False):
            for dir in dirs:
                self.insert_one(dir)


mongo = MongoDB()
mongo.remove_all()
mongo.close()
mongo = MongoDB()

experiments = Experiments(MongoDB())
# experiments.insert_many('/home/jan-hybs/Dropbox/meta', [lambda x: str(x).startswith('test')])

meta_folder = r'c:\Users\Jan\Dropbox\meta'
experiments.insert_many(io.join_path(meta_folder, 'article', 'A'))
experiments.insert_many(io.join_path(meta_folder, 'article', 'B'))
experiments.insert_many(io.join_path(meta_folder, 'article', 'C'))
# experiments.insert_one('/home/jan-hybs/Dropbox/meta/test-13')
Esempio n. 7
0
            env = self.mongo._extract_environment(io.join_path(dirname, "environment.json"))
            cal = self.mongo._extract_calibration(io.join_path(dirname, "performance.json"))

            env["cal"] = cal

            self.mongo.insert_process(dirname, env)

    def insert_many(self, dirname, filters=[]):
        dirs = io.listdir(dirname)
        for f in filters:
            dirs = lists.filter(dirs, f)

        with timer.measured("Processing many, folder {dirname}".format(dirname=dirname), False):
            for dir in dirs:
                self.insert_one(dir)


mongo = MongoDB()
mongo.remove_all()
mongo.close()
mongo = MongoDB()

experiments = Experiments(MongoDB())
# experiments.insert_many('/home/jan-hybs/Dropbox/meta', [lambda x: str(x).startswith('test')])

meta_folder = r"c:\Users\Jan\Dropbox\meta"
experiments.insert_many(io.join_path(meta_folder, "article", "A"))
experiments.insert_many(io.join_path(meta_folder, "article", "B"))
experiments.insert_many(io.join_path(meta_folder, "article", "C"))
# experiments.insert_one('/home/jan-hybs/Dropbox/meta/test-13')
Esempio n. 8
0
    def __init__(self, **kwargs):
        if kwargs.get('flow_root'):
            self.flow_root = os.path.abspath(kwargs.get('flow_root'))

        if kwargs.get('test_root'):
            self.tests_root = os.path.abspath(kwargs.get('test_root'))
        else:
            self.tests_root = io.join_path(self.flow_root, 'tests')

        self.randomize_output = bool(kwargs.get('randomize_output', False))

        self.nproc = kwargs.get('nproc', [1, 2, 3])
        self.output_dir = os.path.abspath(
            io.join_path(self.tests_root, kwargs.get('tests_output',
                                                     '_output')))

        output_timestamp_dir = kwargs.get('output_timestamp_dir',
                                          '%Y-%m-%d_%H-%M-%S')
        if output_timestamp_dir:
            self.output_dir = io.join_path(
                self.output_dir,
                datetime.datetime.now().strftime(output_timestamp_dir))

        self.select_dir_rule = re.compile(
            kwargs.get('select_dir_rule', r'\d+_.*'))
        self.select_ini_rule = re.compile(kwargs.get('select_ini_rule', r'.*'))
        self.select_artifact_rule = re.compile(
            kwargs.get('select_artifact_rule', r'.*/profiler.*\.json$'))
        self.compare_result = kwargs.get('compare-result', False)
        self.save_stderr = kwargs.get('save-stderr', True)
        self.save_stdout = kwargs.get('save-stdout', True)

        f, m, n = kwargs.get("flow123d"), kwargs.get("mpiexec"), kwargs.get(
            "ndiff")
        self.bins = {
            "flow123d":
            f if f else io.join_path(self.flow_root, 'build_tree', 'bin',
                                     'flow123d'),
            "mpiexec":
            m if m else io.join_path(self.flow_root, 'build_tree', 'bin',
                                     'mpiexec'),
            "ndiff":
            n
            if n else io.join_path(self.flow_root, 'bin', 'ndiff', 'ndiff.pl')
        }

        all_tests = sorted([test for test in os.listdir(self.tests_root)])

        # filter folders
        all_tests = lists.filter(
            all_tests,
            lambda x: os.path.isdir(io.join_path(self.tests_root, x)))
        self.selected_tests = lists.filter(
            all_tests, lambda x: self.select_dir_rule.match(x))
        self.tests = {
            test: self.test_template()
            for test in self.selected_tests
        }

        for val in [
                'flow_root',
                'tests_root',
                'selected_tests',
                'nproc',
                'output_dir',
                'randomize_output',
        ]:
            logger.debug("{name:20s}: {val:s}".format(name=val,
                                                      val=str(
                                                          getattr(self, val))))
Esempio n. 9
0
    def run(self):
        logger.debug("Running tests...")
        for test_name, test_option in self.tests.items():
            self.browse_test_config_files(test_name, test_option)
            logger.debug("{test_name}:{test_option[problem]}".format(
                test_name=test_name, test_option=test_option))
            self.setup_test_paths(test_name, test_option)
            executors = self.prepare_test_executor(test_name, test_option)

            for executor in executors:
                logger.debug(
                    "{test_name}:{e.environment[problem_config]}: running".
                    format(test_name=test_name, e=executor))
                environment = executor.environment

                # purge output directory
                if os.path.exists(environment['output_path']):
                    rmtree(environment['output_path'])

                # run test
                executor.run()

                # save info about test
                logger.debug(
                    "{test_name}:{e.environment[problem_config]}: generating report"
                    .format(test_name=test_name, e=executor))
                json_report = self.generate_report(executor)

                # get comparisons
                if self.compare_result:
                    logger.debug(
                        "{test_name}:{e.environment[problem_config]}: comparing output result"
                        .format(test_name=test_name, e=executor))
                    comparisons = self.compare_results_files(environment)
                    if not comparisons or max(pluck(comparisons,
                                                    'exit_code')) == 0:
                        json_report['correct'] = True
                    else:
                        json_report['correct'] = False
                        json_report['comparisons'] = [
                            ex.environment['file'] for ex in comparisons
                            if ex.exit_code != 0
                        ]

                if self.save_stderr:
                    json_report['stderr'] = executor.stderr
                if self.save_stdout:
                    json_report['stdout'] = executor.stdout

                # create name for json file based on settings
                info_json = executor.environment['info_json'].format(
                    **json_report)
                info_json = io.join_path(self.output_dir, info_json)
                mkdir(info_json, is_file=True)

                # merge artifacts (so fat only one - profiler info)
                profilers = browse(environment['output_path'])
                profilers = lists.filter(
                    profilers, lambda x: self.select_artifact_rule.match(x))

                # merge report with profiler
                json_report = FlowJson.merge_json_info(json_report, profilers)
                json_report = FlowJson.clean_json(json_report)
                logger.debug(to_json(json_report, info_json))
Esempio n. 10
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# author:   Jan Hybs

import logging
import traceback
import datetime
import os

from flowrunner.utils import io

logging.basicConfig(
    filename=io.join_path(os.getcwd(), "python.log"),
    level=logging.NOTSET,
    format="%(asctime)s %(name)s %(levelname)-4s: %(message)s",
)


class Logger(object):
    def __init__(self, name=__name__, debug=True):
        self.logger = logging.getLogger(name)
        self.level = 0

        # add console log
        if __debug__ or debug:
            stream = logging.StreamHandler()
            stream.setLevel(logging.NOTSET)
            self.logger.addHandler(stream)

        with open("python.log", "a+") as fp:
            fp.write("-" * 110)
Esempio n. 11
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# author:   Jan Hybs

import logging
import traceback
import datetime
import os

from flowrunner.utils import io

logging.basicConfig(
    filename=io.join_path(os.getcwd(), 'python.log'),
    level=logging.NOTSET,
    format='%(asctime)s %(name)s %(levelname)-4s: %(message)s'
)


class Logger(object):
    def __init__(self, name=__name__, debug=True):
        self.logger = logging.getLogger(name)
        self.level = 0

        # add console log
        if __debug__ or debug:
            stream = logging.StreamHandler()
            stream.setLevel(logging.NOTSET)
            self.logger.addHandler(stream)

        with open('python.log', 'a+') as fp:
            fp.write('-' * 110)