def test_norecorder(self):
        logging.debug('')
        logging.debug('test_norecorder')

        # Check response to no recorder set.
        self.model.driver.iterator = ListCaseIterator([])
        self.model.run()
    def test_nooutput(self):
        logging.debug('')
        logging.debug('test_nooutput')

        # Create cases with missing output 'dc.sum_z'.
        cases = []
        for i in range(2):
            inputs = [('driven.x', numpy_random.normal(size=4)),
                      ('driven.y', numpy_random.normal(size=10))]
            outputs = [('driven.rosen_suzuki', None),
                       ('driven.sum_z', None)]
            cases.append(Case(inputs, outputs))

        self.model.driver.iterator = ListCaseIterator(cases)
        results = ListCaseRecorder()
        self.model.driver.recorders = [results]
        self.model.driver.error_policy = 'RETRY'

        self.model.run()

        self.assertEqual(len(results), len(cases))
        msg = "driver: Exception getting case outputs: " \
            "driven: 'DrivenComponent' object has no attribute 'sum_z'"
        for case in results.cases:
            self.assertEqual(case.msg, msg)
    def run_cases(self, sequential, forced_errors=False, retry=True):
        """ Evaluate cases, either sequentially or across multiple servers. """
        self.model.driver.sequential = sequential
        if not sequential:
            # Try to ensure more than one worker is used.
            self.model.driven.sleep = 0.2
        self.model.driver.iterator = ListCaseIterator(self.cases)
        results = ListCaseRecorder()
        self.model.driver.recorders = [results]
        self.model.driver.error_policy = 'RETRY' if retry else 'ABORT'

        if retry:
            self.model.run()
            self.assertEqual(len(results), len(self.cases))
            self.verify_results(forced_errors)
        else:
            try:
                self.model.run()
            except Exception as err:
                startmsg = 'driver: Run aborted: Traceback '
                endmsg = 'driven: Forced error'
                self.assertEqual(str(err)[:len(startmsg)], startmsg)
                self.assertEqual(str(err)[-len(endmsg):], endmsg)
            else:
                self.fail("Exception expected")
    def test_noresource(self):
        logging.debug('')
        logging.debug('test_noresource')

        # Check response to unsupported resource.
        self.model.driver.extra_reqs = {'no-such-resource': 0}
        self.model.driver.sequential = False
        self.model.driver.iterator = ListCaseIterator([])
        assert_raises(self, 'self.model.run()', globals(), locals(),
                      RuntimeError,
                      'driver: No servers supporting required resources')
 def test_output_errors(self):
     inputs = [('driven.x', numpy_random.normal(size=4)),
               ('driven.y', numpy_random.normal(size=10)),
               ('driven.raise_error', False),
               ('driven.stop_exec', False)]
     outputs = ['driven.rosen_suzuki','driven.foobar']
     self.cases = [Case(inputs, outputs, label='1')]
     self.model.driver.sequential = True
     self.model.driver.iterator = ListCaseIterator(self.cases)
     self.model.driver.recorders = [ListCaseRecorder()]
     self.model.driver.error_policy = 'RETRY'
     self.model.run()
    def test_2d_filter2(self):
        pf = ParetoFilter()
        x = [1,1,2,2,2,3,3,3,]
        y = [2,3,1,2,3,1,2,3]
        cases = []
        for x_0,y_0 in zip(x,y):
            cases.append(Case(outputs=[("x",x_0),("y",y_0)]))
        
        pf.case_sets = [ListCaseIterator(cases),]
        pf.criteria = ['x','y']
        pf.execute()

        x_p,y_p = zip(*[(case['x'],case['y']) for case in pf.pareto_set])
 def test_1d_filter(self):
     pf = ParetoFilter()
     x = [1,2,3,4,5,6,7,8,9,10]
     cases = [Case(outputs=[("x",x_0)]) for x_0 in x]
     pf.case_sets = [ListCaseIterator(cases),]
     pf.criteria = ['x']
     pf.execute()
    
     x_p = [case['x'] for case in pf.pareto_set]
     x_dom = [case['x'] for case in pf.dominated_set]
     
     self.assertEqual([1],x_p)
     self.assertEqual([2,3,4,5,6,7,8,9,10],x_dom)
    def test_save_load(self):
        logging.debug('')
        logging.debug('test_save_load')

        self.model.driver.iterator = ListCaseIterator(self.cases)
        results = ListCaseRecorder()
        self.model.driver.recorders = [results]

        # Set local dir in case we're running in a different directory.
        py_dir = self.directory

        # Exercise check_save_load().
        retcode = check_save_load(self.model, py_dir=py_dir)
        self.assertEqual(retcode, 0)
 def test_bad_case_set(self): 
     pf = ParetoFilter()
     x = [1,1,2,2,2,3,3,3,]
     y = [2,3,1,2,3,1,2,3]
     cases = []
     for x_0,y_0 in zip(x,y):
         cases.append(Case(outputs=[("x",x_0),("y",y_0)]))
         
     pf.case_sets = [ListCaseIterator(cases),]
     pf.criteria = ['z','w']
     try:
         pf.execute()
     except ValueError,err: 
         self.assertEqual(str(err),": no cases provided had all of the outputs "
              "matching the provided criteria, ['z', 'w']")
示例#10
0
 def setUp(self):
     cases = []
     for i in range(20):
         inputs = [('comp1.x', float(i)), ('comp1.y', i * 2.)]
         outputs = [('comp1.z', i * 1.5),
                    ('comp2.normal', NormalDistribution(float(i), 0.5))]
         if i < 10:
             msg = ''
         else:
             msg = 'had an error'
         case = Case(inputs=inputs, msg=msg)
         case._outputs = dict(outputs)
         cases.append(case)
     self.caseiter = ListCaseIterator(cases)
     self.varnames = ['comp2.normal', 'comp1.x', 'comp1.z']
示例#11
0
    def test_warm_start(self):
        metamodel = MetaModel()
        metamodel.name = 'meta'
        metamodel.surrogate = {'default': KrigingSurrogate()}
        metamodel.model = Simple()
        metamodel.recorder = DumbRecorder()
        simple = Simple()

        cases = []

        metamodel.a = 1.
        metamodel.b = 2.
        metamodel.train_next = True
        metamodel.run()
        inputs = [('meta2.a', metamodel.a), ('meta2.b', metamodel.b)]
        outputs = [('meta2.c', metamodel.c.mu), ('meta2.d', metamodel.d.mu)]
        cases.append(Case(inputs=inputs, outputs=outputs))

        metamodel.a = 3.
        metamodel.b = 5.
        metamodel.train_next = True
        metamodel.run()
        inputs = [('meta2.a', metamodel.a), ('meta2.b', metamodel.b)]
        outputs = [('meta2.c', metamodel.c.mu), ('meta2.d', metamodel.d.mu)]
        cases.append(Case(inputs=inputs, outputs=outputs))

        case_iter = ListCaseIterator(cases)

        metamodel2 = MetaModel()
        metamodel2.name = 'meta2'
        metamodel2.surrogate = {'default': KrigingSurrogate()}
        metamodel2.model = Simple()
        metamodel2.recorder = DumbRecorder()
        metamodel2.warm_start_data = case_iter

        metamodel2.a = simple.a = 1
        metamodel2.b = simple.b = 2
        metamodel2.run()
        simple.run()

        self.assertEqual(metamodel2.c.getvalue(), 3.)
        self.assertEqual(metamodel2.d.getvalue(), -1.)
        self.assertEqual(metamodel2.c.getvalue(), simple.c)
        self.assertEqual(metamodel2.d.getvalue(), simple.d)
    def test_run_stop_step_resume(self):
        logging.debug('')
        logging.debug('test_run_stop_step_resume')

        self.generate_cases()
        stop_case = self.cases[1]  # Stop after 2 cases run.
        stop_case['driven.stop_exec'] = True
        self.model.driver.iterator = ListCaseIterator(self.cases)
        results = ListCaseRecorder()
        self.model.driver.recorders = [results]
        self.model.driver.sequential = True

        try:
            self.model.run()
        except RunStopped:
            self.assertEqual(len(results), 2)
            self.verify_results()
        else:
            self.fail('Expected RunStopped')

        self.model.driver.step()
        self.assertEqual(len(results), 3)
        self.verify_results()

        self.model.driver.step()
        self.assertEqual(len(results), 4)
        self.verify_results()

        self.model.driver.resume()
        self.assertEqual(len(results), len(self.cases))
        self.verify_results()

        try:
            self.model.driver.resume()
        except RuntimeError as exc:
            self.assertEqual(str(exc), 'driver: Run already complete')
        else:
            self.fail('Expected RuntimeError')
示例#13
0
 def get_iterator(self):
     return ListCaseIterator(self.cases)