예제 #1
0
 def setUp(self):
     super(TestMakePipeline, self).setUp()
     self.env_vars = {key: value for key, value in \
                      self.config['before_run']['environment_variables'].items()}
     self.runner = PipelineGenerator(self.config)
     self.scripts1 = ['./script_a --option 0.1', './script_b 0.2']
     self.scripts2 = ['./script_a --option 0.3', './script_b 0.4']
예제 #2
0
    def test_run_optimum_within_bounds(self):
        generator = PipelineGenerator(self.config_with_optimum(15, 3))

        designer = generator.new_designer_from_config(
            model_selection='manual', manual_formula=self.full_formula)
        design = designer.new_design()
        pipeline = generator.new_pipeline_collection(design)

        cmd = '{script}'
        executor = self.__class__.executor(workdir=self.work_dir,
                                           base_command=cmd)
        results = executor.run_pipeline_collection(pipeline)
        optimum = designer.update_factors_from_response(results)

        expected_optimum = {'FactorA': 15, 'FactorB': 3}
        self.assertTrue(optimum.converged)
        for factor in design.columns:
            self.assertTrue(
                np.isclose(optimum.predicted_optimum[factor],
                           expected_optimum[factor]))
예제 #3
0
    def test_screening_finds_optimum_at_origin(self):
        generator = PipelineGenerator(self.config_with_optimum(0, 0))
        designer = generator.new_designer_from_config(
            skip_screening=False,
            model_selection='manual',
            manual_formula=self.full_formula)
        design = designer.new_design()
        pipeline = generator.new_pipeline_collection(design)

        cmd = '{script}'
        executor = self.__class__.executor(workdir=self.work_dir,
                                           base_command=cmd)
        results = executor.run_pipeline_collection(pipeline)
        optimum = designer.update_factors_from_response(results)

        self.assertEqual(designer._phase, 'optimization')
        self.assertFalse(optimum.converged)
        expected_optimum = {'FactorA': 0, 'FactorB': 0}
        for factor in design.columns:
            self.assertTrue(
                np.isclose(optimum.predicted_optimum[factor],
                           expected_optimum[factor]))
예제 #4
0
    def test_run_optimum_outside_bounds(self):
        generator = PipelineGenerator(self.config_with_optimum(5, 5))

        designer = generator.new_designer_from_config(
            model_selection='manual', manual_formula=self.full_formula)
        design = designer.new_design()
        pipeline = generator.new_pipeline_collection(design)

        cmd = '{script}'
        executor = self.__class__.executor(workdir=self.work_dir,
                                           base_command=cmd)
        results = executor.run_pipeline_collection(pipeline)
        optimum = designer.update_factors_from_response(results)

        expected_optimum = {'FactorA': 5, 'FactorB': 5}
        self.assertFalse(optimum.converged)
        for factor in design.columns:
            # Optimum is outside of bounds and optimization should be
            # constrained to be inside design space. So the expected
            # optimum should not have been predicted after a single
            # iteration.
            self.assertFalse(
                np.isclose(optimum.predicted_optimum[factor],
                           expected_optimum[factor]))
예제 #5
0
    def setUp(self):
        script_one = {
            'script': './script_a',
            'factors': {
                'FactorA': {
                    'factor_name': 'Factor A',
                    'script_option': '--option'
                }
            }
        }
        script_two = {
            'script': './script_b {% FactorB %}',
            'factors': {
                'FactorB': {
                    'factor_name': 'Factor B',
                    'substitute': True
                }
            }
        }
        design_spec = {
            'type': 'CCC',
            'factors': {
                'FactorA': {
                    'min': 0,
                    'max': 1,
                    'low_init': 0,
                    'high_init': .2
                },
                'FactorB': {
                    'min': 0,
                    'max': 1,
                    'low_init': .1,
                    'high_init': .3
                }
            },
            'responses': {
                'ResponseA': {'criterion': 'maximize'}
            }
        }

        self.work_dir = os.path.join(os.getcwd(), 'work_dir')
        os.makedirs(self.work_dir, exist_ok=True)

        self.env_vars = {'MYPATH': '~/a/path'}
        before = {'environment_variables': self.env_vars}
        self.outfile = 'my_results.txt'
        self.config = {
            'working_directory': self.work_dir,
            'design': design_spec,
            'results_file': self.outfile,
            'before_run': before,
            'pipeline': ['ScriptOne', 'ScriptTwo'],
            'ScriptOne': script_one,
            'ScriptTwo': script_two
        }
        self.design = pd.DataFrame([
            ['One', .1, .2],
            ['Two', .3, .4]
        ], columns=['Exp Id', 'FactorA', 'FactorB'])
        self.generator = PipelineGenerator(copy.deepcopy(self.config))
        self.pipeline = self.generator.new_pipeline_collection(self.design,'Exp Id')
예제 #6
0
 def test_bad_config_raises_valueerror(self):
     # Create bad pipeline by "skipping" one step in pipeline
     bad_config = copy.deepcopy(self.config)
     bad_config['pipeline'].pop()
     self.assertRaises(ValueError, lambda: PipelineGenerator(bad_config))
예제 #7
0
 def test_creating_doesnt_crash(self):
     generator = PipelineGenerator(self.config)