Exemplo n.º 1
0
 def setUp(self):
     super(TestMakePipeline, self).setUp()
     self.env_vars = {key: value for key, value in \
                      self.config['before_run']['environment_variables'].items()}
     self.runner = PipelineGenerator(self.config)
     self.scripts1 = ['./script_a --option 0.1', './script_b 0.2']
     self.scripts2 = ['./script_a --option 0.3', './script_b 0.4']
Exemplo n.º 2
0
    def test_run_optimum_within_bounds(self):
        generator = PipelineGenerator(self.config_with_optimum(15, 3))

        designer = generator.new_designer_from_config(
            model_selection='manual', manual_formula=self.full_formula)
        design = designer.new_design()
        pipeline = generator.new_pipeline_collection(design)

        cmd = '{script}'
        executor = self.__class__.executor(workdir=self.work_dir,
                                           base_command=cmd)
        results = executor.run_pipeline_collection(pipeline)
        optimum = designer.update_factors_from_response(results)

        expected_optimum = {'FactorA': 15, 'FactorB': 3}
        self.assertTrue(optimum.converged)
        for factor in design.columns:
            self.assertTrue(
                np.isclose(optimum.predicted_optimum[factor],
                           expected_optimum[factor]))
Exemplo n.º 3
0
    def test_screening_finds_optimum_at_origin(self):
        generator = PipelineGenerator(self.config_with_optimum(0, 0))
        designer = generator.new_designer_from_config(
            skip_screening=False,
            model_selection='manual',
            manual_formula=self.full_formula)
        design = designer.new_design()
        pipeline = generator.new_pipeline_collection(design)

        cmd = '{script}'
        executor = self.__class__.executor(workdir=self.work_dir,
                                           base_command=cmd)
        results = executor.run_pipeline_collection(pipeline)
        optimum = designer.update_factors_from_response(results)

        self.assertEqual(designer._phase, 'optimization')
        self.assertFalse(optimum.converged)
        expected_optimum = {'FactorA': 0, 'FactorB': 0}
        for factor in design.columns:
            self.assertTrue(
                np.isclose(optimum.predicted_optimum[factor],
                           expected_optimum[factor]))
Exemplo n.º 4
0
class TestMakePipeline(BaseGeneratorTestCase):
    def setUp(self):
        super(TestMakePipeline, self).setUp()
        self.env_vars = {key: value for key, value in \
                         self.config['before_run']['environment_variables'].items()}
        self.runner = PipelineGenerator(self.config)
        self.scripts1 = ['./script_a --option 0.1', './script_b 0.2']
        self.scripts2 = ['./script_a --option 0.3', './script_b 0.4']

    def test_render_experiments_without_id_column(self):
        pipeline_collection = self.runner.new_pipeline_collection(
            self.dummy_design)
        expected = {
            0: self.scripts1,
            1: self.scripts2,
            'ENV_VARIABLES': self.env_vars,
            'SETUP_SCRIPTS': None,
            'RESULTS_FILE': self.config['results_file'],
            'WORKDIR': self.config['working_directory'],
            'JOBNAMES': ['ScriptWithOptions', 'ScriptWithSub']
        }
        self.assertDictEqual(expected, pipeline_collection)

    def test_render_experiments_with_id_column(self):
        new_collection = self.runner.new_pipeline_collection(
            self.dummy_design, 'Exp Id')
        self.maxDiff = None
        expected = {
            'A': self.scripts1,
            'B': self.scripts2,
            'ENV_VARIABLES': self.env_vars,
            'SETUP_SCRIPTS': None,
            'RESULTS_FILE': self.config['results_file'],
            'WORKDIR': self.config['working_directory'],
            'JOBNAMES': ['ScriptWithOptions', 'ScriptWithSub']
        }
        self.assertDictEqual(expected, new_collection)
Exemplo n.º 5
0
    def test_run_optimum_outside_bounds(self):
        generator = PipelineGenerator(self.config_with_optimum(5, 5))

        designer = generator.new_designer_from_config(
            model_selection='manual', manual_formula=self.full_formula)
        design = designer.new_design()
        pipeline = generator.new_pipeline_collection(design)

        cmd = '{script}'
        executor = self.__class__.executor(workdir=self.work_dir,
                                           base_command=cmd)
        results = executor.run_pipeline_collection(pipeline)
        optimum = designer.update_factors_from_response(results)

        expected_optimum = {'FactorA': 5, 'FactorB': 5}
        self.assertFalse(optimum.converged)
        for factor in design.columns:
            # Optimum is outside of bounds and optimization should be
            # constrained to be inside design space. So the expected
            # optimum should not have been predicted after a single
            # iteration.
            self.assertFalse(
                np.isclose(optimum.predicted_optimum[factor],
                           expected_optimum[factor]))
Exemplo n.º 6
0
from doepipeline.designer import BaseExperimentDesigner
import pandas as pd
import numpy as np


class ExampleDesigner(BaseExperimentDesigner):
    def __init__(self, *args, **kwargs):
        super(ExampleDesigner, self).__init__(*args, **kwargs)
        self.design = None
        np.random.seed(123456789)

    def update_factors_from_response(self, response):
        self.design += 1
        return self.design

    def new_design(self, factor_settings=None):
        data = np.random.randint(10, size=(2, len(self.factors)))
        self.design = pd.DataFrame(data,
                                   index=['A', 'B'],
                                   columns=self.factors.keys())
        return self.design


if __name__ == '__main__':
    generator = PipelineGenerator.from_yaml('example_pipeline.yaml')
    designer = generator.new_designer_from_config(ExampleDesigner)
    design = designer.new_design()
    pipeline = generator.new_pipeline_collection(design)
    executor = LocalPipelineExecutor()
    results = executor.run_pipeline_collection(pipeline)
    design = designer.update_factors_from_response(results)
Exemplo n.º 7
0
    def setUp(self):
        script_one = {
            'script': './script_a',
            'factors': {
                'FactorA': {
                    'factor_name': 'Factor A',
                    'script_option': '--option'
                }
            }
        }
        script_two = {
            'script': './script_b {% FactorB %}',
            'factors': {
                'FactorB': {
                    'factor_name': 'Factor B',
                    'substitute': True
                }
            }
        }
        design_spec = {
            'type': 'CCC',
            'factors': {
                'FactorA': {
                    'min': 0,
                    'max': 1,
                    'low_init': 0,
                    'high_init': .2
                },
                'FactorB': {
                    'min': 0,
                    'max': 1,
                    'low_init': .1,
                    'high_init': .3
                }
            },
            'responses': {
                'ResponseA': {'criterion': 'maximize'}
            }
        }

        self.work_dir = os.path.join(os.getcwd(), 'work_dir')
        os.makedirs(self.work_dir, exist_ok=True)

        self.env_vars = {'MYPATH': '~/a/path'}
        before = {'environment_variables': self.env_vars}
        self.outfile = 'my_results.txt'
        self.config = {
            'working_directory': self.work_dir,
            'design': design_spec,
            'results_file': self.outfile,
            'before_run': before,
            'pipeline': ['ScriptOne', 'ScriptTwo'],
            'ScriptOne': script_one,
            'ScriptTwo': script_two
        }
        self.design = pd.DataFrame([
            ['One', .1, .2],
            ['Two', .3, .4]
        ], columns=['Exp Id', 'FactorA', 'FactorB'])
        self.generator = PipelineGenerator(copy.deepcopy(self.config))
        self.pipeline = self.generator.new_pipeline_collection(self.design,'Exp Id')
Exemplo n.º 8
0
class ExecutorTestCase(unittest.TestCase):

    executor_class = MockBaseExecutor
    init_args = tuple()
    init_kwargs = dict()

    def setUp(self):
        script_one = {
            'script': './script_a',
            'factors': {
                'FactorA': {
                    'factor_name': 'Factor A',
                    'script_option': '--option'
                }
            }
        }
        script_two = {
            'script': './script_b {% FactorB %}',
            'factors': {
                'FactorB': {
                    'factor_name': 'Factor B',
                    'substitute': True
                }
            }
        }
        design_spec = {
            'type': 'CCC',
            'factors': {
                'FactorA': {
                    'min': 0,
                    'max': 1,
                    'low_init': 0,
                    'high_init': .2
                },
                'FactorB': {
                    'min': 0,
                    'max': 1,
                    'low_init': .1,
                    'high_init': .3
                }
            },
            'responses': {
                'ResponseA': {'criterion': 'maximize'}
            }
        }

        self.work_dir = os.path.join(os.getcwd(), 'work_dir')
        os.makedirs(self.work_dir, exist_ok=True)

        self.env_vars = {'MYPATH': '~/a/path'}
        before = {'environment_variables': self.env_vars}
        self.outfile = 'my_results.txt'
        self.config = {
            'working_directory': self.work_dir,
            'design': design_spec,
            'results_file': self.outfile,
            'before_run': before,
            'pipeline': ['ScriptOne', 'ScriptTwo'],
            'ScriptOne': script_one,
            'ScriptTwo': script_two
        }
        self.design = pd.DataFrame([
            ['One', .1, .2],
            ['Two', .3, .4]
        ], columns=['Exp Id', 'FactorA', 'FactorB'])
        self.generator = PipelineGenerator(copy.deepcopy(self.config))
        self.pipeline = self.generator.new_pipeline_collection(self.design,'Exp Id')

    @mock.patch('os.makedirs')
    def test_execute_commands_returns_tuple(self, *args):
        executor = self.executor_class(*self.init_args, **self.init_kwargs)
        result = executor.execute_command('test')
        self.assertIsInstance(result, Sequence)
        self.assertEqual(len(result), 3)
        for value in result:
            self.assertIsInstance(value, str)
Exemplo n.º 9
0
 def test_bad_config_raises_valueerror(self):
     # Create bad pipeline by "skipping" one step in pipeline
     bad_config = copy.deepcopy(self.config)
     bad_config['pipeline'].pop()
     self.assertRaises(ValueError, lambda: PipelineGenerator(bad_config))
Exemplo n.º 10
0
 def test_bad_yaml_raises_valueerror(self):
     self.assertRaises(ValueError, lambda: PipelineGenerator.from_yaml({}))
Exemplo n.º 11
0
 def test_creating_from_yaml_doesnt_crash(self):
     yaml_generator = PipelineGenerator.from_yaml(self.yaml_path)
     with open(self.yaml_path) as f:
         buffer_generator = PipelineGenerator.from_yaml(f)
Exemplo n.º 12
0
 def test_creating_doesnt_crash(self):
     generator = PipelineGenerator(self.config)