예제 #1
0
    def test_single_input_parameter(self):

        # Model
        interp = om.MetaModelUnStructuredComp()

        # Training Data
        x_train = np.linspace(0, 10, 20)
        y_train = np.linspace(0, 20, 20)

        # Inputs
        interp.add_input('simple_x', 0., training_data=x_train)

        #Outputs
        interp.add_output('cos_x', 0., training_data=.5 * np.cos(y_train))

        # Surrogate Model
        interp.options['default_surrogate'] = om.ResponseSurface()

        prob = om.Problem()
        prob.model.add_subsystem('interp', interp)
        prob.setup()

        with self.assertRaises(Exception) as context:
            viz = MetaModelVisualization(interp)

        msg = 'Must have more than one input value'
        self.assertTrue(msg in str(context.exception))
예제 #2
0
    def test_response_surface(self):
        import numpy as np

        import openmdao.api as om

        prob = om.Problem()

        prob.model.add_subsystem('p', om.IndepVarComp('x', 2.1))

        sin_mm = om.MetaModelUnStructuredComp()
        sin_mm.add_input('x', 0.)
        sin_mm.add_output('f_x', 0., surrogate=om.ResponseSurface())

        prob.model.add_subsystem('sin_mm', sin_mm)

        prob.model.connect('p.x', 'sin_mm.x')

        prob.setup(check=True)

        # train the surrogate and check predicted value
        sin_mm.options['train:x'] = np.linspace(0, 3.14, 20)
        sin_mm.options['train:f_x'] = .5 * np.sin(sin_mm.options['train:x'])

        prob['sin_mm.x'] = 2.1

        prob.run_model()

        assert_near_equal(prob['sin_mm.f_x'], .5 * np.sin(prob['sin_mm.x']),
                          2e-3)
예제 #3
0
    def test_missing_training_data_in_parameter(self):
        
        # Model
        interp = om.MetaModelUnStructuredComp()

        # Training Data
        x_train = np.linspace(0,10,20)
        y_train = np.linspace(0,20,20)

        # Inputs
        interp.add_input('simple_x', 0., training_data=x_train)
        interp.add_input('sin_x', 0.)

        #Outputs
        interp.add_output('cos_x', 0., training_data=.5*np.cos(y_train))

        # Surrogate Model
        interp.options['default_surrogate'] = om.ResponseSurface()

        prob = om.Problem()
        prob.model.add_subsystem('interp', interp)
        prob.setup()

        with self.assertRaises(Exception) as context:
            viz = UnstructuredMetaModelVisualization(prob, interp)

        msg = "No training data present for one or more parameters"
        self.assertTrue(msg in str(context.exception))
예제 #4
0
    def test_not_top_level_prob(self):
        # Model
        interp = om.MetaModelUnStructuredComp()

        # Training Data
        x_train1 = np.linspace(0, 10, 20)
        x_train2 = np.linspace(0, 20, 20)
        x_train3 = np.linspace(0, 30, 20)
        x_train4 = np.linspace(0, 40, 20)
        y_train = np.linspace(10, 20, 20)

        # Inputs
        interp.add_input('input_1', 0., training_data=x_train1)
        interp.add_input('input_2', 0., training_data=x_train2)
        interp.add_input('input_3', 0., training_data=x_train3)
        interp.add_input('input_4', 0., training_data=x_train4)

        # Outputs
        interp.add_output('output_1', 0., training_data=.5 * np.cos(y_train))
        interp.add_output('output_2', 0., training_data=.5 * np.sin(y_train))

        # Surrogate Model
        interp.options['default_surrogate'] = om.ResponseSurface()

        prob = om.Problem(model=interp)
        prob.setup()
        prob.final_setup()
예제 #5
0
    def test_make_predictions(self):

        # Model
        interp = om.MetaModelUnStructuredComp()

        # Training Data
        x_train = np.linspace(0, 10, 20)
        y_train = np.linspace(10, 20, 20)

        # Inputs
        interp.add_input('simple_x', 0., training_data=x_train)
        interp.add_input('sin_x', 0., training_data=x_train)

        #Outputs
        interp.add_output('cos_x', 0., training_data=.5 * np.cos(y_train))

        # Surrogate Model
        interp.options['default_surrogate'] = om.ResponseSurface()

        prob = om.Problem()
        prob.model.add_subsystem('interp', interp)
        prob.setup()

        viz = MetaModelVisualization(interp)
        resolution = 50
        data = dict({
            'simple_x': np.array([np.random.rand(resolution**2, 1)]),
            'sin_x': np.array([np.random.rand(resolution**2, 1)])
        })
        pred_array = viz._make_predictions(data)

        self.assertTrue(pred_array.shape == (resolution**2, 1))
예제 #6
0
    def test_training_point_array_for_nan_values(self):

        # Model
        interp = om.MetaModelUnStructuredComp()

        # Training Data
        x_train = np.linspace(0, 10, 20)
        y_train = np.linspace(0, 20, 20)

        # Inputs
        interp.add_input('x', 0., training_data=x_train)
        interp.add_input('y', 0., training_data=x_train)

        #Outputs
        interp.add_output('cos_x', 0., training_data=.5 * np.cos(y_train))

        # Surrogate Model
        interp.options['default_surrogate'] = om.ResponseSurface()

        prob = om.Problem()
        prob.model.add_subsystem('interp', interp)
        prob.setup()

        viz = MetaModelVisualization(interp)
        training_points_output = viz._unstructured_training_points()

        for i in range(0, 2):
            self.assertFalse(np.any(np.isnan(training_points_output[:, i])))
예제 #7
0
    def test_training_point_array_width(self):

        # Model
        interp = om.MetaModelUnStructuredComp()

        # Training Data
        x_train = np.linspace(0, 10, 20)
        y_train = np.linspace(0, 20, 20)

        # Inputs
        interp.add_input('x', 0., training_data=x_train)
        interp.add_input('y', 0., training_data=x_train)

        #Outputs
        interp.add_output('cos_x', 0., training_data=.5 * np.cos(y_train))

        # Surrogate Model
        interp.options['default_surrogate'] = om.ResponseSurface()

        prob = om.Problem()
        prob.model.add_subsystem('interp', interp)
        prob.setup()

        viz = MetaModelVisualization(interp)
        training_points_output = viz._unstructured_training_points()

        self.assertTrue(training_points_output.shape[1] == 2)
예제 #8
0
    def test_basics(self):
        # create a metamodel component
        mm = om.MetaModelUnStructuredComp()

        mm.add_input('x1', 0.)
        mm.add_input('x2', 0.)

        mm.add_output('y1', 0.)
        mm.add_output('y2', 0., surrogate=om.KrigingSurrogate())

        mm.options['default_surrogate'] = om.ResponseSurface()

        # add metamodel to a problem
        prob = om.Problem()
        prob.model.add_subsystem('mm', mm)
        prob.setup()

        # check that surrogates were properly assigned
        surrogate = mm._metadata('y1').get('surrogate')
        self.assertTrue(isinstance(surrogate, om.ResponseSurface))

        surrogate = mm._metadata('y2').get('surrogate')
        self.assertTrue(isinstance(surrogate, om.KrigingSurrogate))

        # populate training data
        mm.options['train:x1'] = [1.0, 2.0, 3.0]
        mm.options['train:x2'] = [1.0, 3.0, 4.0]
        mm.options['train:y1'] = [3.0, 2.0, 1.0]
        mm.options['train:y2'] = [1.0, 4.0, 7.0]

        # run problem for provided data point and check prediction
        prob['mm.x1'] = 2.0
        prob['mm.x2'] = 3.0

        self.assertTrue(mm.train)  # training will occur before 1st run
        prob.run_model()

        assert_near_equal(prob['mm.y1'], 2.0, .00001)
        assert_near_equal(prob['mm.y2'], 4.0, .00001)

        # run problem for interpolated data point and check prediction
        prob['mm.x1'] = 2.5
        prob['mm.x2'] = 3.5

        self.assertFalse(mm.train)  # training will not occur before 2nd run
        prob.run_model()

        assert_near_equal(prob['mm.y1'], 1.5934, .001)

        # change default surrogate, re-setup and check that metamodel re-trains
        mm.options['default_surrogate'] = om.KrigingSurrogate()
        prob.setup()

        surrogate = mm._metadata('y1').get('surrogate')
        self.assertTrue(isinstance(surrogate, om.KrigingSurrogate))

        self.assertTrue(mm.train)  # training will occur after re-setup
예제 #9
0
    def setup(self):
        # Training Data
        x_train = np.linspace(0, 10, 20)
        y_train = np.linspace(0, 20, 20)

        # Inputs
        self.add_input('simple_x', 0., training_data=x_train)
        self.add_input('sin_x', 0., training_data=x_train)

        # Outputs
        self.add_output('cos_x', 0., training_data=.5*np.cos(y_train))

        # Surrogate Model
        self.options['default_surrogate'] = om.ResponseSurface()
예제 #10
0
    def test_warning_bug(self):
        # Make sure we don't warn that we are doing FD when the surrogate has analytic derivs.

        x_train = np.arange(0., 10.)
        y_train = np.arange(10., 20.)
        z_train = x_train**2 + y_train**2

        p = om.Problem()
        p.model = m = om.Group()

        params = om.IndepVarComp()
        params.add_output('x', val=0.)
        params.add_output('y', val=0.)

        m.add_subsystem('params', params, promotes=['*'])

        sm = om.MetaModelUnStructuredComp(
            default_surrogate=om.ResponseSurface())
        sm.add_input('x', val=0.)
        sm.add_input('y', val=0.)
        sm.add_output('z', val=0.)

        sm.options['train:x'] = x_train
        sm.options['train:y'] = y_train
        sm.options['train:z'] = z_train

        # With or without the line below does not matter
        # Only when method is set to fd, then RuntimeWarning disappears
        sm.declare_partials('*', '*', method='exact')

        m.add_subsystem('sm', sm, promotes=['*'])

        m.add_design_var('x', lower=0., upper=10.)
        m.add_design_var('y', lower=0., upper=10.)
        m.add_objective('z')

        p.setup(check=True)

        stderr = sys.stderr
        str_err = StringIO()
        sys.stderr = str_err
        try:
            p.final_setup()
        finally:
            sys.stderr = stderr

        output = str_err.getvalue()
        self.assertTrue('finite difference' not in output)
예제 #11
0
    def test_setup_message(self):
        x_train = np.arange(0., 10.)
        y_train = np.arange(10., 20.)
        z_train = x_train**2 + y_train**2

        p = om.Problem()
        p.model = model = om.Group()

        params = om.IndepVarComp()
        params.add_output('x', val=0.)
        params.add_output('y', val=0.)

        model.add_subsystem('params', params, promotes=['*'])

        sm = om.MetaModelUnStructuredComp(
            default_surrogate=om.ResponseSurface())
        sm.add_input('x', val=0.)
        sm.add_input('y', val=0.)
        sm.add_output('z', val=0.)

        sm.options['train_x'] = x_train
        sm.options['train_y'] = y_train
        sm.options['train_z'] = z_train

        # With or without the line below does not matter
        # Only when method is set to fd, then RuntimeWarning disappears
        sm.declare_partials('*', '*', method='exact')

        model.add_subsystem('sm', sm, promotes=['*'])

        model.add_design_var('x', lower=0., upper=10.)
        model.add_design_var('y', lower=0., upper=10.)
        model.add_objective('z')

        p.setup()

        with self.assertRaises(RuntimeError) as cm:
            p.driver.scaling_report()

        msg = "Either 'run_model' or 'final_setup' must be called before the scaling report can be generated."
        self.assertEqual(str(cm.exception), msg)

        # Now, make sure it runs run_model automatically as long as we final_setup.
        p.final_setup()
        p.driver.scaling_report(show_browser=False)
예제 #12
0
    def test_response_surface(self):

        prob = om.Problem()

        sin_mm = om.MetaModelUnStructuredComp()
        sin_mm.add_input('x', 2.1)
        sin_mm.add_output('f_x', 0., surrogate=om.ResponseSurface())

        prob.model.add_subsystem('sin_mm', sin_mm)

        prob.setup(check=True)

        # train the surrogate and check predicted value
        sin_mm.options['train:x'] = np.linspace(0, 3.14, 20)
        sin_mm.options['train:f_x'] = .5*np.sin(sin_mm.options['train:x'])

        prob.set_val('sin_mm.x', 2.1)

        prob.run_model()

        assert_near_equal(prob.get_val('sin_mm.f_x'), .5*np.sin(prob.get_val('sin_mm.x')), 2e-3)
예제 #13
0
# Altitude in column 1 of xt
xt[:, 1] = tmp[:, 1] / 1e3
# Trottle in column 2 of xt
xt[:, 2] = tmp[:, 2]

# Thrust in column 0 of yt
yt[:, 0] = tmp[:, 3] / 1e5
# tsfc in column 1 of yt
yt[:, 1] = tmp[:, 6] / 3600.

# Set the limits of x
xlimits = np.array([[0.0, 0.9], [0., 43.], [0, 1]])

# Initial surrogate call
#### CHANGE THIS TO KRIGING SURROGATE WHEN GENERAL PLOTS ARE WORKING
interp = om.MetaModelUnStructuredComp(default_surrogate=om.ResponseSurface())
# Inputs
interp.add_input('Mach', 0., training_data=xt[:, 0])
interp.add_input('Alt', 0., training_data=xt[:, 1])
interp.add_input('Throttle', 0., training_data=xt[:, 2])

# Outputs
interp.add_output('Thrust', training_data=yt[:, 0])
interp.add_output('TSFC', training_data=yt[:, 1])

# Create the problem setup
prob = om.Problem()
prob.model.add_subsystem('interp', interp)
prob.setup()

### Start of class ###
예제 #14
0
def create_model(name):
    # Setup training domain
    l = 1
    nb_sp = 25
    d1 = np.linspace(-l, l, nb_sp)
    d2 = d1
    X, Y = np.meshgrid(d1, d2)
    Z = function(X, Y)

    # Setup prediction domain
    a = np.linspace(-l, l, 100)
    pd1 = a * np.sin(a**2)
    pd2 = a * np.cos(a**2)
    po = []
    err = []

    # Computing the surface
    coord = []
    res = []
    for i in d1:
        for j in d2:
            res.append(function(i, j))
            coord.append([i, j])

    # Create the training dataset
    xi = np.array(coord)
    yi = np.transpose(np.atleast_2d(res))

    # Create and train the model
    if name == 'Kg':
        k = om.KrigingSurrogate()
    elif name == 'RS':
        k = om.ResponseSurface()

    print('train start')
    k.train(xi, yi)
    print('train stop')

    # Make a prediction
    for i in range(0, len(a)):
        pres = k.predict(np.array([pd1[i], pd2[i]]))
        if name == 'Kg':
            po.append(pres[-1][-1])
            err.append(po[-1] - function(pd1[i], pd2[i]))
        elif name == 'RS':
            po.append(pres[-1])
            err.append(po[-1] - function(pd1[i], pd2[i]))
    rms = np.sqrt(np.mean(np.array(err)**2))
    print('Root mean-square error {} : {}'.format(name, rms))

    # Plot the training set
    plt.figure()
    ax = plt.axes(projection='3d')
    # ax.plot_surface(X, Y, Z)
    ax.plot_wireframe(X, Y, Z)

    #Plot the predicted set
    ax.scatter(pd1, pd2, po, color='red')
    ax.plot(pd1, pd2, err, color='gray')

    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')

    # plt.show()
    return rms