Beispiel #1
0
    def test_warm_start(self):
        # create metamodel with warm_restart = True
        mm = MetaModelUnStructured()
        mm.add_input('x1', 0.)
        mm.add_input('x2', 0.)
        mm.add_output('y1', 0.)
        mm.add_output('y2', 0.)
        mm.default_surrogate = ResponseSurface()
        mm.warm_restart = True

        # add to problem
        prob = Problem()
        prob.model.add_subsystem('mm', mm)
        prob.setup(check=False)

        # provide initial training data
        mm.metadata['train:x1'] = [1.0, 3.0]
        mm.metadata['train:x2'] = [1.0, 4.0]
        mm.metadata['train:y1'] = [3.0, 1.0]
        mm.metadata['train:y2'] = [1.0, 7.0]

        # run against a data point and check result
        prob['mm.x1'] = 2.0
        prob['mm.x2'] = 3.0
        prob.run_model()

        assert_rel_error(self, prob['mm.y1'], 1.9085, .001)
        assert_rel_error(self, prob['mm.y2'], 3.9203, .001)

        # Add 3rd training point, moves the estimate for that point
        # back to where it should be.
        mm.metadata['train:x1'] = [2.0]
        mm.metadata['train:x2'] = [3.0]
        mm.metadata['train:y1'] = [2.0]
        mm.metadata['train:y2'] = [4.0]

        mm.train = True  # currently need to tell meta to re-train

        prob.run_model()
        assert_rel_error(self, prob['mm.y1'], 2.0, .00001)
        assert_rel_error(self, prob['mm.y2'], 4.0, .00001)
    def test_warm_start(self):
        # create metamodel with warm_restart = True
        mm = MetaModelUnStructured()
        mm.add_input('x1', 0.)
        mm.add_input('x2', 0.)
        mm.add_output('y1', 0.)
        mm.add_output('y2', 0.)
        mm.default_surrogate = ResponseSurface()
        mm.warm_restart = True

        # add to problem
        prob = Problem()
        prob.model.add_subsystem('mm', mm)
        prob.setup(check=False)

        # provide initial training data
        mm.metadata['train:x1'] = [1.0, 3.0]
        mm.metadata['train:x2'] = [1.0, 4.0]
        mm.metadata['train:y1'] = [3.0, 1.0]
        mm.metadata['train:y2'] = [1.0, 7.0]

        # run against a data point and check result
        prob['mm.x1'] = 2.0
        prob['mm.x2'] = 3.0
        prob.run_model()

        assert_rel_error(self, prob['mm.y1'], 1.9085, .001)
        assert_rel_error(self, prob['mm.y2'], 3.9203, .001)

        # Add 3rd training point, moves the estimate for that point
        # back to where it should be.
        mm.metadata['train:x1'] = [2.0]
        mm.metadata['train:x2'] = [3.0]
        mm.metadata['train:y1'] = [2.0]
        mm.metadata['train:y2'] = [4.0]

        mm.train = True  # currently need to tell meta to re-train

        prob.run_model()
        assert_rel_error(self, prob['mm.y1'], 2.0, .00001)
        assert_rel_error(self, prob['mm.y2'], 4.0, .00001)
Beispiel #3
0
    def test_basics(self):
        # create a metamodel component
        mm = MetaModelUnStructured()

        mm.add_input('x1', 0.)
        mm.add_input('x2', 0.)

        mm.add_output('y1', 0.)
        mm.add_output('y2', 0., surrogate=FloatKrigingSurrogate())

        mm.default_surrogate = ResponseSurface()

        # add metamodel to a problem
        prob = Problem(model=Group())
        prob.model.add_subsystem('mm', mm)
        prob.setup(check=False)

        # check that surrogates were properly assigned
        surrogate = mm._metadata('y1').get('surrogate')
        self.assertTrue(isinstance(surrogate, ResponseSurface))

        surrogate = mm._metadata('y2').get('surrogate')
        self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate))

        # populate training data
        mm.metadata['train:x1'] = [1.0, 2.0, 3.0]
        mm.metadata['train:x2'] = [1.0, 3.0, 4.0]
        mm.metadata['train:y1'] = [3.0, 2.0, 1.0]
        mm.metadata['train:y2'] = [1.0, 4.0, 7.0]

        # run problem for provided data point and check prediction
        prob['mm.x1'] = 2.0
        prob['mm.x2'] = 3.0

        self.assertTrue(mm.train)   # training will occur before 1st run
        prob.run_model()

        assert_rel_error(self, prob['mm.y1'], 2.0, .00001)
        assert_rel_error(self, prob['mm.y2'], 4.0, .00001)

        # run problem for interpolated data point and check prediction
        prob['mm.x1'] = 2.5
        prob['mm.x2'] = 3.5

        self.assertFalse(mm.train)  # training will not occur before 2nd run
        prob.run_model()

        assert_rel_error(self, prob['mm.y1'], 1.5934, .001)

        # change default surrogate, re-setup and check that metamodel re-trains
        mm.default_surrogate = FloatKrigingSurrogate()
        prob.setup(check=False)

        surrogate = mm._metadata('y1').get('surrogate')
        self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate))

        self.assertTrue(mm.train)  # training will occur after re-setup
        mm.warm_restart = True     # use existing training data

        prob['mm.x1'] = 2.5
        prob['mm.x2'] = 3.5

        prob.run_model()
        assert_rel_error(self, prob['mm.y1'], 1.5, 1e-2)
    def test_basics(self):
        # create a metamodel component
        mm = MetaModelUnStructured()

        mm.add_input('x1', 0.)
        mm.add_input('x2', 0.)

        mm.add_output('y1', 0.)
        mm.add_output('y2', 0., surrogate=FloatKrigingSurrogate())

        mm.default_surrogate = ResponseSurface()

        # add metamodel to a problem
        prob = Problem(model=Group())
        prob.model.add_subsystem('mm', mm)
        prob.setup(check=False)

        # check that surrogates were properly assigned
        surrogate = mm._metadata('y1').get('surrogate')
        self.assertTrue(isinstance(surrogate, ResponseSurface))

        surrogate = mm._metadata('y2').get('surrogate')
        self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate))

        # populate training data
        mm.metadata['train:x1'] = [1.0, 2.0, 3.0]
        mm.metadata['train:x2'] = [1.0, 3.0, 4.0]
        mm.metadata['train:y1'] = [3.0, 2.0, 1.0]
        mm.metadata['train:y2'] = [1.0, 4.0, 7.0]

        # run problem for provided data point and check prediction
        prob['mm.x1'] = 2.0
        prob['mm.x2'] = 3.0

        self.assertTrue(mm.train)   # training will occur before 1st run
        prob.run_model()

        assert_rel_error(self, prob['mm.y1'], 2.0, .00001)
        assert_rel_error(self, prob['mm.y2'], 4.0, .00001)

        # run problem for interpolated data point and check prediction
        prob['mm.x1'] = 2.5
        prob['mm.x2'] = 3.5

        self.assertFalse(mm.train)  # training will not occur before 2nd run
        prob.run_model()

        assert_rel_error(self, prob['mm.y1'], 1.5934, .001)

        # change default surrogate, re-setup and check that metamodel re-trains
        mm.default_surrogate = FloatKrigingSurrogate()
        prob.setup(check=False)

        surrogate = mm._metadata('y1').get('surrogate')
        self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate))

        self.assertTrue(mm.train)  # training will occur after re-setup
        mm.warm_restart = True     # use existing training data

        prob['mm.x1'] = 2.5
        prob['mm.x2'] = 3.5

        prob.run_model()
        assert_rel_error(self, prob['mm.y1'], 1.5, 1e-2)