Beispiel #1
0
    def test_unequal_training_outputs(self):
        mm = MetaModelUnStructuredComp()
        mm.add_input('x', 0.)
        mm.add_input('y', 0.)
        mm.add_output('f', 0.)

        mm.options['default_surrogate'] = FloatKrigingSurrogate()

        prob = Problem()
        prob.model.add_subsystem('mm', mm)
        prob.setup(check=False)

        mm.options['train:x'] = [1.0, 1.0, 1.0, 1.0]
        mm.options['train:y'] = [1.0, 2.0, 3.0, 4.0]
        mm.options['train:f'] = [1.0, 1.0]

        prob['mm.x'] = 1.0
        prob['mm.y'] = 1.0

        with self.assertRaises(RuntimeError) as cm:
            prob.run_model()

        expected = (
            "MetaModelUnStructuredComp: Each variable must have the same number"
            " of training points. Expected 4 but found"
            " 2 points for 'f'.")
        self.assertEqual(str(cm.exception), expected)
Beispiel #2
0
    def test_sin_metamodel_rmse(self):
        # create MetaModelUnStructuredComp with Kriging, using the rmse option
        sin_mm = MetaModelUnStructuredComp()
        sin_mm.add_input('x', 0.)
        sin_mm.add_output('f_x', 0.)

        sin_mm.options['default_surrogate'] = KrigingSurrogate(eval_rmse=True)

        # add it to a Problem
        prob = Problem()
        prob.model.add_subsystem('sin_mm', sin_mm)
        prob.setup(check=False)

        # train the surrogate and check predicted value
        sin_mm.options['train:x'] = np.linspace(0, 10, 20)
        sin_mm.options['train:f_x'] = np.sin(sin_mm.options['train:x'])

        prob['sin_mm.x'] = 2.1

        prob.run_model()

        assert_rel_error(self, prob['sin_mm.f_x'], np.sin(2.1), 1e-4)  # mean
        self.assertTrue(
            self,
            sin_mm._metadata('f_x')['rmse'] < 1e-5)  # std deviation
Beispiel #3
0
    def test_array_inputs(self):
        mm = MetaModelUnStructuredComp()
        mm.add_input('x', np.zeros((2, 2)))
        mm.add_output('y1', 0.)
        mm.add_output('y2', 0.)

        mm.options['default_surrogate'] = FloatKrigingSurrogate()

        prob = Problem()
        prob.model.add_subsystem('mm', mm)
        prob.setup(check=False)

        mm.options['train:x'] = [[[1.0, 1.0], [1.0, 1.0]],
                                 [[2.0, 1.0], [1.0, 1.0]],
                                 [[1.0, 2.0], [1.0, 1.0]],
                                 [[1.0, 1.0], [2.0, 1.0]],
                                 [[1.0, 1.0], [1.0, 2.0]]]
        mm.options['train:y1'] = [3.0, 2.0, 1.0, 6.0, -2.0]
        mm.options['train:y2'] = [1.0, 4.0, 7.0, -3.0, 3.0]

        prob['mm.x'] = [[1.0, 2.0], [1.0, 1.0]]
        prob.run_model()

        assert_rel_error(self, prob['mm.y1'], 1.0, .00001)
        assert_rel_error(self, prob['mm.y2'], 7.0, .00001)
Beispiel #4
0
    def test_metamodel_feature(self):
        # create a MetaModelUnStructuredComp, specifying surrogates for the outputs
        import numpy as np

        from openmdao.api import Problem, MetaModelUnStructuredComp, FloatKrigingSurrogate

        trig = MetaModelUnStructuredComp()

        x_train = np.linspace(0, 10, 20)

        trig.add_input('x', 0., training_data=x_train)

        trig.add_output('sin_x',
                        0.,
                        training_data=.5 * np.sin(x_train),
                        surrogate=FloatKrigingSurrogate())
        trig.add_output('cos_x', 0., training_data=.5 * np.cos(x_train))

        trig.options['default_surrogate'] = FloatKrigingSurrogate()

        # add it to a Problem, run and check the predicted values
        prob = Problem()
        prob.model.add_subsystem('trig', trig)
        prob.setup(check=False)

        prob['trig.x'] = 2.1
        prob.run_model()

        assert_rel_error(self, prob['trig.sin_x'], .5 * np.sin(prob['trig.x']),
                         1e-4)
        assert_rel_error(self, prob['trig.cos_x'], .5 * np.cos(prob['trig.x']),
                         1e-4)
Beispiel #5
0
    def test_vectorized_kriging(self):
        # Test for coverage (handling the rmse)
        size = 3

        # create a vectorized MetaModelUnStructuredComp for sine
        trig = MetaModelUnStructuredComp(
            vec_size=size, default_surrogate=KrigingSurrogate(eval_rmse=True))
        trig.add_input('x', np.zeros(size))
        trig.add_output('y', np.zeros(size))

        # add it to a Problem
        prob = Problem()
        prob.model.add_subsystem('trig', trig)
        prob.setup(check=False)

        # provide training data
        trig.options['train:x'] = np.linspace(0, 10, 20)
        trig.options['train:y'] = .5 * np.sin(trig.options['train:x'])

        # train the surrogate and check predicted value
        prob['trig.x'] = np.array([2.1, 3.2, 4.3])
        prob.run_model()
        assert_rel_error(self, prob['trig.y'],
                         np.array(.5 * np.sin(prob['trig.x'])), 1e-4)
        self.assertEqual(len(prob.model.trig._metadata('y')['rmse']), 3)
Beispiel #6
0
    def test_vectorized(self):
        size = 3

        # create a vectorized MetaModelUnStructuredComp for sine
        trig = MetaModelUnStructuredComp(
            vec_size=size, default_surrogate=FloatKrigingSurrogate())
        trig.add_input('x', np.zeros(size))
        trig.add_output('y', np.zeros(size))

        # add it to a Problem
        prob = Problem()
        prob.model.add_subsystem('trig', trig)
        prob.setup(check=False)

        # provide training data
        trig.options['train:x'] = np.linspace(0, 10, 20)
        trig.options['train:y'] = .5 * np.sin(trig.options['train:x'])

        # train the surrogate and check predicted value
        prob['trig.x'] = np.array([2.1, 3.2, 4.3])
        prob.run_model()
        assert_rel_error(self, prob['trig.y'],
                         np.array(.5 * np.sin(prob['trig.x'])), 1e-4)

        data = prob.check_partials(out_stream=None)

        abs_errors = data['trig'][('y', 'x')]['abs error']
        self.assertTrue(len(abs_errors) > 0)
        for match in abs_errors:
            abs_error = float(match)
            self.assertTrue(abs_error < 1.e-6)
Beispiel #7
0
    def test_sin_metamodel(self):
        # create a MetaModelUnStructuredComp for sine and add it to a Problem
        sin_mm = MetaModelUnStructuredComp()
        sin_mm.add_input('x', 0.)
        sin_mm.add_output('f_x', 0.)

        prob = Problem()
        prob.model.add_subsystem('sin_mm', sin_mm)

        # check that missing surrogate is detected in check_config
        testlogger = TestLogger()
        prob.setup(check=True, logger=testlogger)

        # Conclude setup but don't run model.
        prob.final_setup()

        msg = ("No default surrogate model is defined and the "
               "following outputs do not have a surrogate model:\n"
               "['f_x']\n"
               "Either specify a default_surrogate, or specify a "
               "surrogate model for all outputs.")
        self.assertEqual(len(testlogger.get('error')), 1)
        self.assertTrue(msg in testlogger.get('error')[0])

        # check that output with no specified surrogate gets the default
        sin_mm.options['default_surrogate'] = FloatKrigingSurrogate()
        prob.setup(check=False)
        surrogate = sin_mm._metadata('f_x').get('surrogate')
        self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate),
                        'sin_mm.f_x should get the default surrogate')

        # check error message when no training data is provided
        with self.assertRaises(RuntimeError) as cm:
            prob.run_model()

        msg = (
            "MetaModelUnStructuredComp: The following training data sets must be "
            "provided as options for sin_mm: ['train:x', 'train:f_x']")
        self.assertEqual(str(cm.exception), msg)

        # train the surrogate and check predicted value
        sin_mm.options['train:x'] = np.linspace(0, 10, 20)
        sin_mm.options['train:f_x'] = .5 * np.sin(sin_mm.options['train:x'])

        prob['sin_mm.x'] = 2.1

        prob.run_model()

        assert_rel_error(self, prob['sin_mm.f_x'],
                         .5 * np.sin(prob['sin_mm.x']), 1e-4)
Beispiel #8
0
    def test_metamodel_feature_vector2d(self):
        # similar to previous example, but processes 3 inputs/outputs at a time
        import numpy as np

        from openmdao.api import Problem, MetaModelUnStructuredComp, FloatKrigingSurrogate

        size = 3

        # create a vectorized MetaModelUnStructuredComp for sine and cosine
        trig = MetaModelUnStructuredComp(
            vec_size=size, default_surrogate=FloatKrigingSurrogate())
        trig.add_input('x', np.zeros(size))
        trig.add_output('y', np.zeros((size, 2)))

        # add it to a Problem
        prob = Problem()
        prob.model.add_subsystem('trig', trig)
        prob.setup(check=False)

        # provide training data
        trig.options['train:x'] = np.linspace(0, 10, 20)
        trig.options['train:y'] = np.column_stack(
            (.5 * np.sin(trig.options['train:x']),
             .5 * np.cos(trig.options['train:x'])))

        # train the surrogate and check predicted value
        prob['trig.x'] = np.array([2.1, 3.2, 4.3])
        prob.run_model()
        assert_rel_error(
            self, prob['trig.y'],
            np.column_stack(
                (.5 * np.sin(prob['trig.x']), .5 * np.cos(prob['trig.x']))),
            1e-4)
Beispiel #9
0
    def test_metamodel_feature_vector(self):
        # Like simple sine example, but with input of length n instead of scalar
        # The expected behavior is that the output is also of length n, with
        # each one being an independent prediction.
        # Its as if you stamped out n copies of metamodel, ran n scalars
        # through its input, then muxed all those outputs into one contiguous
        # array but you skip all the n-copies thing and do it all as an array
        import numpy as np

        from openmdao.api import Problem, MetaModelUnStructuredComp, FloatKrigingSurrogate

        size = 3

        # create a vectorized MetaModelUnStructuredComp for sine
        trig = MetaModelUnStructuredComp(
            vec_size=size, default_surrogate=FloatKrigingSurrogate())
        trig.add_input('x', np.zeros(size))
        trig.add_output('y', np.zeros(size))

        # add it to a Problem
        prob = Problem()
        prob.model.add_subsystem('trig', trig)
        prob.setup(check=False)

        # provide training data
        trig.options['train:x'] = np.linspace(0, 10, 20)
        trig.options['train:y'] = .5 * np.sin(trig.options['train:x'])

        # train the surrogate and check predicted value
        prob['trig.x'] = np.array([2.1, 3.2, 4.3])
        prob.run_model()
        assert_rel_error(self, prob['trig.y'],
                         np.array(.5 * np.sin(prob['trig.x'])), 1e-4)
Beispiel #10
0
    def test_metamodel_feature2d(self):
        # similar to previous example, but output is 2d
        import numpy as np

        from openmdao.api import Problem, MetaModelUnStructuredComp, FloatKrigingSurrogate

        # create a MetaModelUnStructuredComp that predicts sine and cosine as an array
        trig = MetaModelUnStructuredComp(
            default_surrogate=FloatKrigingSurrogate())
        trig.add_input('x', 0)
        trig.add_output('y', np.zeros(2))

        # add it to a Problem
        prob = Problem()
        prob.model.add_subsystem('trig', trig)
        prob.setup(check=False)

        # provide training data
        trig.options['train:x'] = np.linspace(0, 10, 20)
        trig.options['train:y'] = np.column_stack(
            (.5 * np.sin(trig.options['train:x']),
             .5 * np.cos(trig.options['train:x'])))

        # train the surrogate and check predicted value
        prob['trig.x'] = 2.1
        prob.run_model()
        assert_rel_error(
            self, prob['trig.y'],
            np.append(.5 * np.sin(prob['trig.x']),
                      .5 * np.cos(prob['trig.x'])), 1e-4)
Beispiel #11
0
    def test_derivatives(self):
        mm = MetaModelUnStructuredComp()
        mm.add_input('x', 0.)
        mm.add_output('f', 0.)

        mm.options['default_surrogate'] = FloatKrigingSurrogate()

        prob = Problem()
        prob.model.add_subsystem('p',
                                 IndepVarComp('x', 0.),
                                 promotes_outputs=['x'])
        prob.model.add_subsystem('mm', mm, promotes_inputs=['x'])
        prob.setup()

        mm.options['train:x'] = [0., .25, .5, .75, 1.]
        mm.options['train:f'] = [1., .75, .5, .25, 0.]

        prob['x'] = 0.125
        prob.run_model()

        data = prob.check_partials(out_stream=None)

        Jf = data['mm'][('f', 'x')]['J_fwd']
        Jr = data['mm'][('f', 'x')]['J_rev']

        assert_rel_error(self, Jf[0][0], -1., 1.e-3)
        assert_rel_error(self, Jr[0][0], -1., 1.e-3)

        abs_errors = data['mm'][('f', 'x')]['abs error']
        self.assertTrue(len(abs_errors) > 0)
        for match in abs_errors:
            abs_error = float(match)
            self.assertTrue(abs_error < 1.e-6)

        # Complex step
        prob.setup(force_alloc_complex=True)
        prob.model.mm.set_check_partial_options(wrt='*', method='cs')
        data = prob.check_partials(out_stream=None)

        abs_errors = data['mm'][('f', 'x')]['abs error']
        self.assertTrue(len(abs_errors) > 0)
        for match in abs_errors:
            abs_error = float(match)
            self.assertTrue(abs_error < 1.e-6)
Beispiel #12
0
    def test_sin_metamodel_preset_data(self):
        # preset training data
        x = np.linspace(0, 10, 200)
        f_x = .5 * np.sin(x)

        # create a MetaModelUnStructuredComp for Sin and add it to a Problem
        sin_mm = MetaModelUnStructuredComp()
        sin_mm.add_input('x', 0., training_data=x)
        sin_mm.add_output('f_x', 0., training_data=f_x)

        prob = Problem()
        prob.model.add_subsystem('sin_mm', sin_mm)

        # check that missing surrogate is detected in check_setup
        testlogger = TestLogger()
        prob.setup(check=True, logger=testlogger)

        # Conclude setup but don't run model.
        prob.final_setup()

        msg = ("No default surrogate model is defined and the "
               "following outputs do not have a surrogate model:\n"
               "['f_x']\n"
               "Either specify a default_surrogate, or specify a "
               "surrogate model for all outputs.")
        self.assertEqual(len(testlogger.get('error')), 1)
        self.assertTrue(msg in testlogger.get('error')[0])

        # check that output with no specified surrogate gets the default
        sin_mm.options['default_surrogate'] = FloatKrigingSurrogate()
        prob.setup(check=False)

        surrogate = sin_mm._metadata('f_x').get('surrogate')
        self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate),
                        'sin_mm.f_x should get the default surrogate')

        prob['sin_mm.x'] = 2.22

        prob.run_model()

        assert_rel_error(self, prob['sin_mm.f_x'],
                         .5 * np.sin(prob['sin_mm.x']), 1e-4)
Beispiel #13
0
    def test_error_no_surrogate(self):
        # Seems like the error message from above should also be present and readable even if the
        # user chooses to skip checking the model.
        sin_mm = MetaModelUnStructuredComp()
        sin_mm.add_input('x', 0.)
        sin_mm.add_output('f_x', 0.)

        prob = Problem()
        prob.model.add_subsystem('sin_mm', sin_mm)

        prob.setup(check=False)

        sin_mm.options['train:x'] = np.linspace(0, 10, 20)
        sin_mm.options['train:f_x'] = .5 * np.sin(sin_mm.options['train:x'])

        with self.assertRaises(RuntimeError) as cm:
            prob.run_model()

        msg = ("Metamodel 'sin_mm': No surrogate specified for output 'f_x'")
        self.assertEqual(str(cm.exception), msg)
Beispiel #14
0
    def test_metamodel_vector_errors(self):
        # first dimension of all inputs/outputs must be 3
        mm = MetaModelUnStructuredComp(vec_size=3)

        with self.assertRaises(RuntimeError) as cm:
            mm.add_input('x', np.zeros(2))
        self.assertEqual(str(cm.exception),
                         "Metamodel: First dimension of input 'x' must be 3")

        with self.assertRaises(RuntimeError) as cm:
            mm.add_output('y', np.zeros(4))
        self.assertEqual(str(cm.exception),
                         "Metamodel: First dimension of output 'y' must be 3")
Beispiel #15
0
    def test_warning_bug(self):
        # Make sure we don't warn that we are doing FD when the surrogate has analytic derivs.

        x_train = np.arange(0., 10.)
        y_train = np.arange(10., 20.)
        z_train = x_train**2 + y_train**2

        p = Problem()
        p.model = m = Group()

        params = IndepVarComp()
        params.add_output('x', val=0.)
        params.add_output('y', val=0.)

        m.add_subsystem('params', params, promotes=['*'])

        sm = MetaModelUnStructuredComp(default_surrogate=ResponseSurface())
        sm.add_input('x', val=0.)
        sm.add_input('y', val=0.)
        sm.add_output('z', val=0.)

        sm.options['train:x'] = x_train
        sm.options['train:y'] = y_train
        sm.options['train:z'] = z_train

        # With or without the line below does not matter
        # Only when method is set to fd, then RuntimeWarning disappears
        sm.declare_partials('*', '*', method='exact')

        m.add_subsystem('sm', sm, promotes=['*'])

        m.add_design_var('x', lower=0., upper=10.)
        m.add_design_var('y', lower=0., upper=10.)
        m.add_objective('z')

        p.setup(check=True)

        stderr = sys.stderr
        str_err = StringIO()
        sys.stderr = str_err
        try:
            p.final_setup()
        finally:
            sys.stderr = stderr

        output = str_err.getvalue()
        self.assertTrue('finite difference' not in output)
Beispiel #16
0
    def test_basics(self):
        # create a metamodel component
        mm = MetaModelUnStructuredComp()

        mm.add_input('x1', 0.)
        mm.add_input('x2', 0.)

        mm.add_output('y1', 0.)
        mm.add_output('y2', 0., surrogate=FloatKrigingSurrogate())

        mm.options['default_surrogate'] = ResponseSurface()

        # add metamodel to a problem
        prob = Problem(model=Group())
        prob.model.add_subsystem('mm', mm)
        prob.setup(check=False)

        # check that surrogates were properly assigned
        surrogate = mm._metadata('y1').get('surrogate')
        self.assertTrue(isinstance(surrogate, ResponseSurface))

        surrogate = mm._metadata('y2').get('surrogate')
        self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate))

        # populate training data
        mm.options['train:x1'] = [1.0, 2.0, 3.0]
        mm.options['train:x2'] = [1.0, 3.0, 4.0]
        mm.options['train:y1'] = [3.0, 2.0, 1.0]
        mm.options['train:y2'] = [1.0, 4.0, 7.0]

        # run problem for provided data point and check prediction
        prob['mm.x1'] = 2.0
        prob['mm.x2'] = 3.0

        self.assertTrue(mm.train)  # training will occur before 1st run
        prob.run_model()

        assert_rel_error(self, prob['mm.y1'], 2.0, .00001)
        assert_rel_error(self, prob['mm.y2'], 4.0, .00001)

        # run problem for interpolated data point and check prediction
        prob['mm.x1'] = 2.5
        prob['mm.x2'] = 3.5

        self.assertFalse(mm.train)  # training will not occur before 2nd run
        prob.run_model()

        assert_rel_error(self, prob['mm.y1'], 1.5934, .001)

        # change default surrogate, re-setup and check that metamodel re-trains
        mm.options['default_surrogate'] = FloatKrigingSurrogate()
        prob.setup(check=False)

        surrogate = mm._metadata('y1').get('surrogate')
        self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate))

        self.assertTrue(mm.train)  # training will occur after re-setup
Beispiel #17
0
    def test_derivatives_vectorized_multiD(self):
        vec_size = 5

        mm = MetaModelUnStructuredComp(vec_size=vec_size)
        mm.add_input('x', np.zeros((vec_size, 2, 3)))
        mm.add_input('xx', np.zeros((vec_size, 1)))
        mm.add_output('y', np.zeros((vec_size, 4, 2)))

        mm.options['default_surrogate'] = FloatKrigingSurrogate()

        prob = Problem()
        prob.model.add_subsystem('mm', mm)
        prob.setup(check=False)

        mm.options['train:x'] = [[[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]],
                                 [[2.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
                                 [[1.0, 1.0, 2.0], [1.0, 2.0, 1.0]],
                                 [[1.0, 1.0, 1.0], [2.0, 1.0, 1.0]],
                                 [[1.0, 2.0, 1.0], [1.0, 2.0, 2.0]]]

        mm.options['train:xx'] = [1.0, 2.0, 1.0, 1.0, 2.0]

        mm.options['train:y'] = [[[30.0, 10.0], [30.0, 25.0], [50.0, 10.7],
                                  [15.0, 25.7]],
                                 [[20.0, 40.0], [20.0, 40.0], [80.0, 30.3],
                                  [12.0, 20.7]],
                                 [[10.0, 70.0], [10.0, 70.0], [20.0, 10.9],
                                  [13.0, 15.7]],
                                 [[60.0, -30.0], [60.0, -30.0], [50.0, 50.5],
                                  [14.0, 10.7]],
                                 [[-20.0, 30.0], [-20.0, 30.0], [20.2, 10.0],
                                  [15.0, 60.7]]]

        prob['mm.x'] = [[[1.3, 1.3, 1.3], [1.5, 1.5, 1.5]],
                        [[1.4, 1.4, 1.4], [1.5, 1.5, 1.5]],
                        [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]],
                        [[1.5, 1.5, 1.5], [1.4, 1.4, 1.4]],
                        [[1.5, 1.5, 1.5], [1.3, 1.3, 1.3]]]

        prob['mm.xx'] = [[1.4], [1.5], [1.6], [1.5], [1.4]]

        prob.run_model()

        data = prob.check_partials(out_stream=None)

        abs_errors = data['mm'][('y', 'x')]['abs error']
        self.assertTrue(len(abs_errors) > 0)
        for match in abs_errors:
            abs_error = float(match)
            self.assertTrue(abs_error < 1.e-5)

        abs_errors = data['mm'][('y', 'xx')]['abs error']
        self.assertTrue(len(abs_errors) > 0)
        for match in abs_errors:
            abs_error = float(match)
            self.assertTrue(abs_error < 1.e-5)

        # Complex step
        prob.setup(force_alloc_complex=True)
        prob.model.mm.set_check_partial_options(wrt='*', method='cs')
        data = prob.check_partials(out_stream=None)

        abs_errors = data['mm'][('y', 'x')]['abs error']
        self.assertTrue(len(abs_errors) > 0)
        for match in abs_errors:
            abs_error = float(match)
            self.assertTrue(abs_error < 1.e-5)

        abs_errors = data['mm'][('y', 'xx')]['abs error']
        self.assertTrue(len(abs_errors) > 0)
        for match in abs_errors:
            abs_error = float(match)
            self.assertTrue(abs_error < 1.e-5)