def test_single_input_parameter(self): # Model interp = om.MetaModelUnStructuredComp() # Training Data x_train = np.linspace(0, 10, 20) y_train = np.linspace(0, 20, 20) # Inputs interp.add_input('simple_x', 0., training_data=x_train) #Outputs interp.add_output('cos_x', 0., training_data=.5 * np.cos(y_train)) # Surrogate Model interp.options['default_surrogate'] = om.ResponseSurface() prob = om.Problem() prob.model.add_subsystem('interp', interp) prob.setup() with self.assertRaises(Exception) as context: viz = MetaModelVisualization(interp) msg = 'Must have more than one input value' self.assertTrue(msg in str(context.exception))
def test_not_top_level_prob(self): # Model interp = om.MetaModelUnStructuredComp() # Training Data x_train1 = np.linspace(0, 10, 20) x_train2 = np.linspace(0, 20, 20) x_train3 = np.linspace(0, 30, 20) x_train4 = np.linspace(0, 40, 20) y_train = np.linspace(10, 20, 20) # Inputs interp.add_input('input_1', 0., training_data=x_train1) interp.add_input('input_2', 0., training_data=x_train2) interp.add_input('input_3', 0., training_data=x_train3) interp.add_input('input_4', 0., training_data=x_train4) # Outputs interp.add_output('output_1', 0., training_data=.5 * np.cos(y_train)) interp.add_output('output_2', 0., training_data=.5 * np.sin(y_train)) # Surrogate Model interp.options['default_surrogate'] = om.ResponseSurface() prob = om.Problem(model=interp) prob.setup() prob.final_setup()
def setUp(self): self.mm = mm = om.MetaModelUnStructuredComp() filename = os.path.join(self.csv_dir, 'unstructured_data_points.csv') # Training Data x_train1 = np.genfromtxt(filename, delimiter=',', usecols=0) x_train2 = np.genfromtxt(filename, delimiter=',', usecols=1) x_train3 = np.genfromtxt(filename, delimiter=',', usecols=2) y = np.sin(x_train1 * x_train2 * x_train3) # Inputs mm.add_input('input_1', 0., training_data=x_train1) mm.add_input('input_2', 0., training_data=x_train2) mm.add_input('input_3', 0., training_data=x_train3) # Outputs mm.add_output('output_1', 0., training_data=y) # Surrogate Model mm.options['default_surrogate'] = om.KrigingSurrogate() prob = om.Problem() prob.model.add_subsystem('mm', mm) prob.setup() prob.final_setup()
def test_make_predictions(self): # Model interp = om.MetaModelUnStructuredComp() # Training Data x_train = np.linspace(0, 10, 20) y_train = np.linspace(10, 20, 20) # Inputs interp.add_input('simple_x', 0., training_data=x_train) interp.add_input('sin_x', 0., training_data=x_train) #Outputs interp.add_output('cos_x', 0., training_data=.5 * np.cos(y_train)) # Surrogate Model interp.options['default_surrogate'] = om.ResponseSurface() prob = om.Problem() prob.model.add_subsystem('interp', interp) prob.setup() viz = MetaModelVisualization(interp) resolution = 50 data = dict({ 'simple_x': np.array([np.random.rand(resolution**2, 1)]), 'sin_x': np.array([np.random.rand(resolution**2, 1)]) }) pred_array = viz._make_predictions(data) self.assertTrue(pred_array.shape == (resolution**2, 1))
def test_training_point_array_for_nan_values(self): # Model interp = om.MetaModelUnStructuredComp() # Training Data x_train = np.linspace(0, 10, 20) y_train = np.linspace(0, 20, 20) # Inputs interp.add_input('x', 0., training_data=x_train) interp.add_input('y', 0., training_data=x_train) #Outputs interp.add_output('cos_x', 0., training_data=.5 * np.cos(y_train)) # Surrogate Model interp.options['default_surrogate'] = om.ResponseSurface() prob = om.Problem() prob.model.add_subsystem('interp', interp) prob.setup() viz = MetaModelVisualization(interp) training_points_output = viz._unstructured_training_points() for i in range(0, 2): self.assertFalse(np.any(np.isnan(training_points_output[:, i])))
def test_training_point_array_width(self): # Model interp = om.MetaModelUnStructuredComp() # Training Data x_train = np.linspace(0, 10, 20) y_train = np.linspace(0, 20, 20) # Inputs interp.add_input('x', 0., training_data=x_train) interp.add_input('y', 0., training_data=x_train) #Outputs interp.add_output('cos_x', 0., training_data=.5 * np.cos(y_train)) # Surrogate Model interp.options['default_surrogate'] = om.ResponseSurface() prob = om.Problem() prob.model.add_subsystem('interp', interp) prob.setup() viz = MetaModelVisualization(interp) training_points_output = viz._unstructured_training_points() self.assertTrue(training_points_output.shape[1] == 2)
def test_missing_training_data_in_parameter(self): # Model interp = om.MetaModelUnStructuredComp() # Training Data x_train = np.linspace(0,10,20) y_train = np.linspace(0,20,20) # Inputs interp.add_input('simple_x', 0., training_data=x_train) interp.add_input('sin_x', 0.) #Outputs interp.add_output('cos_x', 0., training_data=.5*np.cos(y_train)) # Surrogate Model interp.options['default_surrogate'] = om.ResponseSurface() prob = om.Problem() prob.model.add_subsystem('interp', interp) prob.setup() with self.assertRaises(Exception) as context: viz = UnstructuredMetaModelVisualization(prob, interp) msg = "No training data present for one or more parameters" self.assertTrue(msg in str(context.exception))
def test_metamodel_feature_vector2d(self): # similar to previous example, but processes 3 inputs/outputs at a time import numpy as np import openmdao.api as om size = 3 # create a vectorized MetaModelUnStructuredComp for sine and cosine trig = om.MetaModelUnStructuredComp( vec_size=size, default_surrogate=om.KrigingSurrogate()) trig.add_input('x', np.zeros(size)) trig.add_output('y', np.zeros((size, 2))) # add it to a Problem prob = om.Problem() prob.model.add_subsystem('trig', trig) prob.setup() # provide training data trig.options['train:x'] = np.linspace(0, 10, 20) trig.options['train:y'] = np.column_stack( (.5 * np.sin(trig.options['train:x']), .5 * np.cos(trig.options['train:x']))) # train the surrogate and check predicted value prob['trig.x'] = np.array([2.1, 3.2, 4.3]) prob.run_model() assert_near_equal( prob['trig.y'], np.column_stack( (.5 * np.sin(prob['trig.x']), .5 * np.cos(prob['trig.x']))), 1e-4)
def test_array_inputs(self): mm = om.MetaModelUnStructuredComp() mm.add_input('x', np.zeros((2, 2))) mm.add_output('y1', 0.) mm.add_output('y2', 0.) mm.options['default_surrogate'] = om.KrigingSurrogate() prob = om.Problem() prob.model.add_subsystem('mm', mm) prob.setup() mm.options['train:x'] = [[[1.0, 1.0], [1.0, 1.0]], [[2.0, 1.0], [1.0, 1.0]], [[1.0, 2.0], [1.0, 1.0]], [[1.0, 1.0], [2.0, 1.0]], [[1.0, 1.0], [1.0, 2.0]]] mm.options['train:y1'] = [3.0, 2.0, 1.0, 6.0, -2.0] mm.options['train:y2'] = [1.0, 4.0, 7.0, -3.0, 3.0] prob['mm.x'] = [[1.0, 2.0], [1.0, 1.0]] prob.run_model() assert_rel_error(self, prob['mm.y1'], 1.0, .00001) assert_rel_error(self, prob['mm.y2'], 7.0, .00001)
def test_metamodel_feature_vector(self): # Like simple sine example, but with input of length n instead of scalar # The expected behavior is that the output is also of length n, with # each one being an independent prediction. # Its as if you stamped out n copies of metamodel, ran n scalars # through its input, then muxed all those outputs into one contiguous # array but you skip all the n-copies thing and do it all as an array import numpy as np import openmdao.api as om size = 3 # create a vectorized MetaModelUnStructuredComp for sine trig = om.MetaModelUnStructuredComp( vec_size=size, default_surrogate=om.KrigingSurrogate()) trig.add_input('x', np.zeros(size)) trig.add_output('y', np.zeros(size)) # add it to a Problem prob = om.Problem() prob.model.add_subsystem('trig', trig) prob.setup() # provide training data trig.options['train:x'] = np.linspace(0, 10, 20) trig.options['train:y'] = .5 * np.sin(trig.options['train:x']) # train the surrogate and check predicted value prob['trig.x'] = np.array([2.1, 3.2, 4.3]) prob.run_model() assert_near_equal(prob['trig.y'], np.array(.5 * np.sin(prob['trig.x'])), 1e-4)
def test_nearest_neighbor(self): import numpy as np import openmdao.api as om prob = om.Problem() sin_mm = om.MetaModelUnStructuredComp() sin_mm.add_input('x', 2.1) sin_mm.add_output( 'f_x', 0., surrogate=om.NearestNeighbor(interpolant_type='linear')) prob.model.add_subsystem('sin_mm', sin_mm) prob.setup(check=True) # train the surrogate and check predicted value sin_mm.options['train:x'] = np.linspace(0, 10, 20) sin_mm.options['train:f_x'] = .5 * np.sin(sin_mm.options['train:x']) prob.set_val('sin_mm.x', 2.1) prob.run_model() assert_near_equal(prob.get_val('sin_mm.f_x'), .5 * np.sin(prob.get_val('sin_mm.x')), 2e-3)
def test_surrogate_message_format(self): prob = om.Problem() prob.model.add_subsystem('p', om.IndepVarComp('x', 2.1)) sin_mm = om.MetaModelUnStructuredComp() sin_mm.add_input('x', 0.) sin_mm.add_output('f_x', 0., surrogate=om.KrigingSurrogate()) prob.model.add_subsystem('sin_mm', sin_mm) prob.model.connect('p.x', 'sin_mm.x') prob.setup(check=True) # train the surrogate and check predicted value sin_mm.options['train:x'] = np.linspace(0, 10, 1) sin_mm.options['train:f_x'] = .5 * np.sin(sin_mm.options['train:x']) prob['sin_mm.x'] = 2.1 with self.assertRaises(ValueError) as cm: prob.run_model() self.assertEqual( str(cm.exception), 'sin_mm: KrigingSurrogate requires at least' ' 2 training points.')
def test_response_surface(self): import numpy as np import openmdao.api as om prob = om.Problem() prob.model.add_subsystem('p', om.IndepVarComp('x', 2.1)) sin_mm = om.MetaModelUnStructuredComp() sin_mm.add_input('x', 0.) sin_mm.add_output('f_x', 0., surrogate=om.ResponseSurface()) prob.model.add_subsystem('sin_mm', sin_mm) prob.model.connect('p.x', 'sin_mm.x') prob.setup(check=True) # train the surrogate and check predicted value sin_mm.options['train:x'] = np.linspace(0, 3.14, 20) sin_mm.options['train:f_x'] = .5 * np.sin(sin_mm.options['train:x']) prob['sin_mm.x'] = 2.1 prob.run_model() assert_near_equal(prob['sin_mm.f_x'], .5 * np.sin(prob['sin_mm.x']), 2e-3)
def test_vectorized(self): size = 3 # create a vectorized MetaModelUnStructuredComp for sine trig = om.MetaModelUnStructuredComp( vec_size=size, default_surrogate=om.KrigingSurrogate()) trig.add_input('x', np.zeros(size)) trig.add_output('y', np.zeros(size)) # add it to a Problem prob = om.Problem() prob.model.add_subsystem('trig', trig) prob.setup() # provide training data trig.options['train_x'] = np.linspace(0, 10, 20) trig.options['train_y'] = .5 * np.sin(trig.options['train_x']) # train the surrogate and check predicted value prob['trig.x'] = np.array([2.1, 3.2, 4.3]) prob.run_model() assert_near_equal(prob['trig.y'], np.array(.5 * np.sin(prob['trig.x'])), 1e-4) data = prob.check_partials(out_stream=None) assert_check_partials(data, atol=1e-6, rtol=1e-6)
def test_derivatives(self): mm = om.MetaModelUnStructuredComp() mm.add_input('x', 0.) mm.add_output('f', 0.) mm.options['default_surrogate'] = om.KrigingSurrogate() prob = om.Problem() prob.model.add_subsystem('p', om.IndepVarComp('x', 0.), promotes_outputs=['x']) prob.model.add_subsystem('mm', mm, promotes_inputs=['x']) prob.setup() mm.options['train:x'] = [0., .25, .5, .75, 1.] mm.options['train:f'] = [1., .75, .5, .25, 0.] prob['x'] = 0.125 prob.run_model() data = prob.check_partials(out_stream=None) Jf = data['mm'][('f', 'x')]['J_fwd'] assert_near_equal(Jf[0][0], -1., 1.e-3) assert_check_partials(data, atol=1e-6, rtol=1e-6) # Complex step prob.setup(force_alloc_complex=True) prob.model.mm.set_check_partial_options(wrt='*', method='cs') data = prob.check_partials(out_stream=None) assert_check_partials(data, atol=1e-11, rtol=1e-11)
def test_two_vector_inputs(self): mm = om.MetaModelUnStructuredComp() mm.add_input('x1', np.zeros(4)) mm.add_input('x2', np.zeros(4)) mm.add_output('y1', 0.) mm.add_output('y2', 0.) mm.options['default_surrogate'] = om.KrigingSurrogate() prob = om.Problem() prob.model.add_subsystem('mm', mm) prob.setup() mm.options['train_x1'] = [[1.0, 1.0, 1.0, 1.0], [2.0, 1.0, 1.0, 1.0], [1.0, 2.0, 1.0, 1.0], [1.0, 1.0, 2.0, 1.0], [1.0, 1.0, 1.0, 2.0]] mm.options['train_x2'] = [[1.0, 1.0, 1.0, 1.0], [2.0, 1.0, 1.0, 1.0], [1.0, 2.0, 1.0, 1.0], [1.0, 1.0, 2.0, 1.0], [1.0, 1.0, 1.0, 2.0]] mm.options['train_y1'] = [3.0, 2.0, 1.0, 6.0, -2.0] mm.options['train_y2'] = [1.0, 4.0, 7.0, -3.0, 3.0] prob['mm.x1'] = [1.0, 2.0, 1.0, 1.0] prob['mm.x2'] = [1.0, 2.0, 1.0, 1.0] prob.run_model() assert_near_equal(prob['mm.y1'], 1.0, .00001) assert_near_equal(prob['mm.y2'], 7.0, .00001)
def test_nearest_neighbor_rbf_options(self): prob = om.Problem() sin_mm = om.MetaModelUnStructuredComp() sin_mm.add_input('x', 2.1) sin_mm.add_output('f_x', 0., surrogate=om.NearestNeighbor(interpolant_type='rbf', num_neighbors=3)) prob.model.add_subsystem('sin_mm', sin_mm) prob.setup(check=True) # train the surrogate and check predicted value sin_mm.options['train_x'] = np.linspace(0, 10, 20) sin_mm.options['train_f_x'] = .5 * np.sin(sin_mm.options['train_x']) prob.set_val('sin_mm.x', 2.1) prob.run_model() assert_near_equal(prob.get_val('sin_mm.f_x'), .5 * np.sin(prob.get_val('sin_mm.x')), 5e-3)
def test_kriging_options_eval_rmse(self): prob = om.Problem() sin_mm = om.MetaModelUnStructuredComp() sin_mm.add_input('x', 2.1) sin_mm.add_output('f_x', 0., surrogate=om.KrigingSurrogate(eval_rmse=True)) prob.model.add_subsystem('sin_mm', sin_mm) prob.setup(check=True) # train the surrogate and check predicted value sin_mm.options['train_x'] = np.linspace(0, 10, 20) sin_mm.options['train_f_x'] = .5 * np.sin(sin_mm.options['train_x']) prob.set_val('sin_mm.x', 2.1) prob.run_model() print("mean") assert_near_equal(prob.get_val('sin_mm.f_x'), .5 * np.sin(prob.get_val('sin_mm.x')), 1e-4) print("std") assert_near_equal(sin_mm._metadata('f_x')['rmse'][0, 0], 0.0, 1e-4)
def test_sin_metamodel_rmse(self): # create MetaModelUnStructuredComp with Kriging, using the rmse option sin_mm = om.MetaModelUnStructuredComp() sin_mm.add_input('x', 0.) sin_mm.add_output('f_x', 0.) sin_mm.options['default_surrogate'] = om.KrigingSurrogate( eval_rmse=True) # add it to a Problem prob = om.Problem() prob.model.add_subsystem('sin_mm', sin_mm) prob.setup() # train the surrogate and check predicted value sin_mm.options['train:x'] = np.linspace(0, 10, 20) sin_mm.options['train:f_x'] = np.sin(sin_mm.options['train:x']) prob['sin_mm.x'] = 2.1 prob.run_model() assert_near_equal(prob['sin_mm.f_x'], np.sin(2.1), 1e-4) # mean self.assertTrue( self, sin_mm._metadata('f_x')['rmse'] < 1e-5) # std deviation
def test_kriging_options_eval_rmse(self): import numpy as np import openmdao.api as om prob = om.Problem() prob.model.add_subsystem('p', om.IndepVarComp('x', 2.1)) sin_mm = om.MetaModelUnStructuredComp() sin_mm.add_input('x', 0.) sin_mm.add_output('f_x', 0., surrogate=om.KrigingSurrogate(eval_rmse=True)) prob.model.add_subsystem('sin_mm', sin_mm) prob.model.connect('p.x', 'sin_mm.x') prob.setup(check=True) # train the surrogate and check predicted value sin_mm.options['train:x'] = np.linspace(0, 10, 20) sin_mm.options['train:f_x'] = .5 * np.sin(sin_mm.options['train:x']) prob['sin_mm.x'] = 2.1 prob.run_model() print("mean") assert_near_equal(prob['sin_mm.f_x'], .5 * np.sin(prob['sin_mm.x']), 1e-4) print("std") assert_near_equal(sin_mm._metadata('f_x')['rmse'][0, 0], 0.0, 1e-4)
def test_vectorized_kriging(self): # Test for coverage (handling the rmse) size = 3 # create a vectorized MetaModelUnStructuredComp for sine trig = om.MetaModelUnStructuredComp( vec_size=size, default_surrogate=om.KrigingSurrogate(eval_rmse=True)) trig.add_input('x', np.zeros(size)) trig.add_output('y', np.zeros(size)) # add it to a Problem prob = om.Problem() prob.model.add_subsystem('trig', trig) prob.setup() # provide training data trig.options['train:x'] = np.linspace(0, 10, 20) trig.options['train:y'] = .5 * np.sin(trig.options['train:x']) # train the surrogate and check predicted value prob['trig.x'] = np.array([2.1, 3.2, 4.3]) prob.run_model() assert_near_equal(prob['trig.y'], np.array(.5 * np.sin(prob['trig.x'])), 1e-4) self.assertEqual(len(prob.model.trig._metadata('y')['rmse']), 3)
def test_vectorized(self): size = 3 # create a vectorized MetaModelUnStructuredComp for sine trig = om.MetaModelUnStructuredComp( vec_size=size, default_surrogate=om.KrigingSurrogate()) trig.add_input('x', np.zeros(size)) trig.add_output('y', np.zeros(size)) # add it to a Problem prob = om.Problem() prob.model.add_subsystem('trig', trig) prob.setup() # provide training data trig.options['train:x'] = np.linspace(0, 10, 20) trig.options['train:y'] = .5 * np.sin(trig.options['train:x']) # train the surrogate and check predicted value prob['trig.x'] = np.array([2.1, 3.2, 4.3]) prob.run_model() assert_near_equal(prob['trig.y'], np.array(.5 * np.sin(prob['trig.x'])), 1e-4) data = prob.check_partials(out_stream=None) abs_errors = data['trig'][('y', 'x')]['abs error'] self.assertTrue(len(abs_errors) > 0) for match in abs_errors: abs_error = float(match) self.assertTrue(abs_error < 1.e-6)
def test_nearest_neighbor_rbf_options(self): import numpy as np import openmdao.api as om prob = om.Problem() prob.model.add_subsystem('p', om.IndepVarComp('x', 2.1)) sin_mm = om.MetaModelUnStructuredComp() sin_mm.add_input('x', 0.) sin_mm.add_output('f_x', 0., surrogate=om.NearestNeighbor(interpolant_type='rbf', num_neighbors=3)) prob.model.add_subsystem('sin_mm', sin_mm) prob.model.connect('p.x', 'sin_mm.x') prob.setup(check=True) # train the surrogate and check predicted value sin_mm.options['train:x'] = np.linspace(0, 10, 20) sin_mm.options['train:f_x'] = .5 * np.sin(sin_mm.options['train:x']) prob['sin_mm.x'] = 2.1 prob.run_model() assert_near_equal(prob['sin_mm.f_x'], .5 * np.sin(prob['sin_mm.x']), 5e-3)
def test_metamodel_feature2d(self): # similar to previous example, but output is 2d import numpy as np import openmdao.api as om # create a MetaModelUnStructuredComp that predicts sine and cosine as an array trig = om.MetaModelUnStructuredComp( default_surrogate=om.KrigingSurrogate()) trig.add_input('x', 0) trig.add_output('y', np.zeros(2)) # add it to a Problem prob = om.Problem() prob.model.add_subsystem('trig', trig) prob.setup() # provide training data trig.options['train:x'] = np.linspace(0, 10, 20) trig.options['train:y'] = np.column_stack( (.5 * np.sin(trig.options['train:x']), .5 * np.cos(trig.options['train:x']))) # train the surrogate and check predicted value prob['trig.x'] = 2.1 prob.run_model() assert_near_equal( prob['trig.y'], np.append(.5 * np.sin(prob['trig.x']), .5 * np.cos(prob['trig.x'])), 1e-4)
def test_metamodel_feature(self): # create a MetaModelUnStructuredComp, specifying surrogates for the outputs import numpy as np import openmdao.api as om trig = om.MetaModelUnStructuredComp() x_train = np.linspace(0, 10, 20) trig.add_input('x', 0., training_data=x_train) trig.add_output('sin_x', 0., training_data=.5 * np.sin(x_train), surrogate=om.KrigingSurrogate()) trig.add_output('cos_x', 0., training_data=.5 * np.cos(x_train)) trig.options['default_surrogate'] = om.KrigingSurrogate() # add it to a Problem, run and check the predicted values prob = om.Problem() prob.model.add_subsystem('trig', trig) prob.setup() prob['trig.x'] = 2.1 prob.run_model() assert_near_equal(prob['trig.sin_x'], .5 * np.sin(prob['trig.x']), 1e-4) assert_near_equal(prob['trig.cos_x'], .5 * np.cos(prob['trig.x']), 1e-4)
def test_unequal_training_outputs(self): mm = om.MetaModelUnStructuredComp() mm.add_input('x', 0.) mm.add_input('y', 0.) mm.add_output('f', 0.) mm.options['default_surrogate'] = om.KrigingSurrogate() prob = om.Problem() prob.model.add_subsystem('mm', mm) prob.setup() mm.options['train:x'] = [1.0, 1.0, 1.0, 1.0] mm.options['train:y'] = [1.0, 2.0, 3.0, 4.0] mm.options['train:f'] = [1.0, 1.0] prob['mm.x'] = 1.0 prob['mm.y'] = 1.0 with self.assertRaises(RuntimeError) as cm: prob.run_model() expected = ( "MetaModelUnStructuredComp (mm): Each variable must have the same number" " of training points. Expected 4 but found" " 2 points for 'f'.") self.assertEqual(str(cm.exception), expected)
def test_2darray_outputs(self): mm = om.MetaModelUnStructuredComp() mm.add_input('x', np.zeros((2, 2))) mm.add_output('y', np.zeros((2, 2))) mm.options['default_surrogate'] = om.KrigingSurrogate() prob = om.Problem() prob.model.add_subsystem('mm', mm) prob.setup() mm.options['train:x'] = [[[1.0, 1.0], [1.0, 1.0]], [[2.0, 1.0], [1.0, 1.0]], [[1.0, 2.0], [1.0, 1.0]], [[1.0, 1.0], [2.0, 1.0]], [[1.0, 1.0], [1.0, 2.0]]] mm.options['train:y'] = [[[3.0, 1.0], [3.0, 1.0]], [[2.0, 4.0], [2.0, 4.0]], [[1.0, 7.0], [1.0, 7.0]], [[6.0, -3.0], [6.0, -3.0]], [[-2.0, 3.0], [-2.0, 3.0]]] prob['mm.x'] = [[1.0, 2.0], [1.0, 1.0]] prob.run_model() assert_near_equal(prob['mm.y'], np.array([[1.0, 7.0], [1.0, 7.0]]), .00001)
def test_basics(self): # create a metamodel component mm = om.MetaModelUnStructuredComp() mm.add_input('x1', 0.) mm.add_input('x2', 0.) mm.add_output('y1', 0.) mm.add_output('y2', 0., surrogate=om.KrigingSurrogate()) mm.options['default_surrogate'] = om.ResponseSurface() # add metamodel to a problem prob = om.Problem() prob.model.add_subsystem('mm', mm) prob.setup() # check that surrogates were properly assigned surrogate = mm._metadata('y1').get('surrogate') self.assertTrue(isinstance(surrogate, om.ResponseSurface)) surrogate = mm._metadata('y2').get('surrogate') self.assertTrue(isinstance(surrogate, om.KrigingSurrogate)) # populate training data mm.options['train:x1'] = [1.0, 2.0, 3.0] mm.options['train:x2'] = [1.0, 3.0, 4.0] mm.options['train:y1'] = [3.0, 2.0, 1.0] mm.options['train:y2'] = [1.0, 4.0, 7.0] # run problem for provided data point and check prediction prob['mm.x1'] = 2.0 prob['mm.x2'] = 3.0 self.assertTrue(mm.train) # training will occur before 1st run prob.run_model() assert_near_equal(prob['mm.y1'], 2.0, .00001) assert_near_equal(prob['mm.y2'], 4.0, .00001) # run problem for interpolated data point and check prediction prob['mm.x1'] = 2.5 prob['mm.x2'] = 3.5 self.assertFalse(mm.train) # training will not occur before 2nd run prob.run_model() assert_near_equal(prob['mm.y1'], 1.5934, .001) # change default surrogate, re-setup and check that metamodel re-trains mm.options['default_surrogate'] = om.KrigingSurrogate() prob.setup() surrogate = mm._metadata('y1').get('surrogate') self.assertTrue(isinstance(surrogate, om.KrigingSurrogate)) self.assertTrue(mm.train) # training will occur after re-setup
def test_derivatives_vectorized_multiD(self): vec_size = 5 mm = om.MetaModelUnStructuredComp(vec_size=vec_size) mm.add_input('x', np.zeros((vec_size, 2, 3))) mm.add_input('xx', np.zeros((vec_size, 1))) mm.add_output('y', np.zeros((vec_size, 4, 2))) mm.options['default_surrogate'] = om.KrigingSurrogate() prob = om.Problem() prob.model.add_subsystem('mm', mm) prob.setup() mm.options['train_x'] = [[[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]], [[2.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[1.0, 1.0, 2.0], [1.0, 2.0, 1.0]], [[1.0, 1.0, 1.0], [2.0, 1.0, 1.0]], [[1.0, 2.0, 1.0], [1.0, 2.0, 2.0]]] mm.options['train_xx'] = [1.0, 2.0, 1.0, 1.0, 2.0] mm.options['train_y'] = [[[30.0, 10.0], [30.0, 25.0], [50.0, 10.7], [15.0, 25.7]], [[20.0, 40.0], [20.0, 40.0], [80.0, 30.3], [12.0, 20.7]], [[10.0, 70.0], [10.0, 70.0], [20.0, 10.9], [13.0, 15.7]], [[60.0, -30.0], [60.0, -30.0], [50.0, 50.5], [14.0, 10.7]], [[-20.0, 30.0], [-20.0, 30.0], [20.2, 10.0], [15.0, 60.7]]] prob['mm.x'] = [[[1.3, 1.3, 1.3], [1.5, 1.5, 1.5]], [[1.4, 1.4, 1.4], [1.5, 1.5, 1.5]], [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], [[1.5, 1.5, 1.5], [1.4, 1.4, 1.4]], [[1.5, 1.5, 1.5], [1.3, 1.3, 1.3]]] prob['mm.xx'] = [[1.4], [1.5], [1.6], [1.5], [1.4]] prob.run_model() data = prob.check_partials(out_stream=None) assert_check_partials(data, atol=1e-5, rtol=1e-5) # Complex step prob.setup(force_alloc_complex=True) prob.model.mm.set_check_partial_options(wrt='*', method='cs') data = prob.check_partials(out_stream=None) assert_check_partials(data, atol=1e-11, rtol=1e-11)
def test_metamodel_vector_errors(self): # first dimension of all inputs/outputs must be 3 mm = om.MetaModelUnStructuredComp(vec_size=3) with self.assertRaises(RuntimeError) as cm: mm.add_input('x', np.zeros(2)) self.assertEqual(str(cm.exception), "<class MetaModelUnStructuredComp>: First dimension of input 'x' must be 3") with self.assertRaises(RuntimeError) as cm: mm.add_output('y', np.zeros(4)) self.assertEqual(str(cm.exception), "<class MetaModelUnStructuredComp>: First dimension of output 'y' must be 3")