def setUp(self): self.md = (gr.Model() >> gr.cp_function( fun=lambda x: x, var=1, out=1, runtime=1) >> gr.cp_marginals(x0={ "dist": "uniform", "loc": 0, "scale": 1 }) >> gr.cp_copula_independence()) self.md_2d = (gr.Model() >> gr.cp_function( fun=lambda x: x[0], var=2, out=1) >> gr.cp_marginals( x0={ "dist": "uniform", "loc": 0, "scale": 1 }, x1={ "dist": "uniform", "loc": 0, "scale": 1 }, ) >> gr.cp_copula_independence()) self.md_mixed = (gr.Model() >> gr.cp_function( fun=lambda x: x[0] + x[1], var=2, out=1) >> gr.cp_bounds(x0=(-1, +1)) >> gr.cp_marginals(x1={ "dist": "uniform", "loc": 0, "scale": 1 }, ) >> gr.cp_copula_independence())
def setUp(self): ## Smooth model self.md_smooth = (gr.Model() >> gr.cp_function( fun=lambda x: [x, x + 1], var=["x"], out=["y", "z"]) >> gr.cp_marginals(x={ "dist": "uniform", "loc": 0, "scale": 2 }) >> gr.cp_copula_independence()) self.df_smooth = self.md_smooth >> gr.ev_df( df=pd.DataFrame(dict(x=[0, 1, 2]))) ## Tree model self.md_tree = (gr.Model() >> gr.cp_function( fun=lambda x: [0, x < 5], var=["x"], out=["y", "z"]) >> gr.cp_marginals(x={ "dist": "uniform", "loc": 0, "scale": 2 }) >> gr.cp_copula_independence()) self.df_tree = self.md_tree >> gr.ev_df( df=pd.DataFrame(dict(x=np.linspace(0, 10, num=8)))) ## Cluster model self.df_cluster = pd.DataFrame( dict( x=[0.1, 0.2, 0.3, 0.4, 1.1, 1.2, 1.3, 1.4], y=[0.3, 0.2, 0.1, 0.0, 1.3, 1.2, 1.1, 1.0], c=[0, 0, 0, 0, 1, 1, 1, 1], ))
def test_opt(self): # invariant checks self.inv_test.md_arg(gr.eval_min, df_arg="df_start") self.inv_test.df_arg(gr.eval_min, df_arg="df_start", acc_none="always") md_bowl = (gr.Model("Constrained bowl") >> gr.cp_function( fun=lambda x: x[0]**2 + x[1]**2, var=["x", "y"], out=["f"], ) >> gr.cp_function( fun=lambda x: (x[0] + x[1] + 1), var=["x", "y"], out=["g1"], ) >> gr.cp_function( fun=lambda x: -(-x[0] + x[1] - np.sqrt(2 / 10)), var=["x", "y"], out=["g2"], ) >> gr.cp_bounds( x=(-1, +1), y=(-1, +1), )) df_res = md_bowl >> gr.ev_min( out_min="f", out_geq=["g1"], out_leq=["g2"], ) # Check result self.assertTrue(abs(df_res.x[0] + np.sqrt(1 / 20)) < 1e-6) self.assertTrue(abs(df_res.y[0] - np.sqrt(1 / 20)) < 1e-6) # Check errors for violated invariants with self.assertRaises(ValueError): gr.eval_min(md_bowl, out_min="FALSE") with self.assertRaises(ValueError): gr.eval_min(md_bowl, out_min="f", out_geq=["FALSE"]) with self.assertRaises(ValueError): gr.eval_min(md_bowl, out_min="f", out_eq=["FALSE"]) # Test multiple restarts df_multi = gr.eval_min( md_bowl, out_min="f", out_geq=["g1"], out_leq=["g2"], n_restart=2, ) self.assertTrue(df_multi.shape[0] == 2)
def test_grad_fd(self): """Checks the FD code """ ## Accuracy df_grad = gr.eval_grad_fd( self.model_2d, df_base=self.df_2d_nominal, append=False ) self.assertTrue(np.allclose(df_grad[self.df_2d_grad.columns], self.df_2d_grad)) ## Subset df_grad_sub = gr.eval_grad_fd( self.model_2d, df_base=self.df_2d_nominal, var=["x"], append=False ) self.assertTrue(set(df_grad_sub.columns) == set(["Df_Dx", "Dg_Dx"])) ## Flags md_test = ( gr.Model() >> gr.cp_function(fun=lambda x: x[0] + x[1] ** 2, var=2, out=1) >> gr.cp_marginals(x0={"dist": "norm", "loc": 0, "scale": 1}) ) df_base = pd.DataFrame(dict(x0=[0, 1], x1=[0, 1])) ## Multiple base points df_true = pd.DataFrame(dict(Dy0_Dx0=[1, 1], Dy0_Dx1=[0, 2])) df_rand = gr.eval_grad_fd(md_test, df_base=df_base, var="rand", append=False) self.assertTrue(gr.df_equal(df_true[["Dy0_Dx0"]], df_rand, close=True)) df_det = gr.eval_grad_fd(md_test, df_base=df_base, var="det", append=False) self.assertTrue(gr.df_equal(df_true[["Dy0_Dx1"]], df_det, close=True))
def setUp(self): self.md = (gr.Model() >> gr.cp_function( fun=lambda x: x, var=["x"], out=["y"], ) >> gr.cp_marginals(x=dict(dist="norm", loc=0, scale=1)) >> gr.cp_copula_independence())
def __init__(self): """Setup necessary values""" self.md = (gr.Model() >> gr.cp_function( fun=lambda x: x, var=1, out=1, runtime=1)) self.md_var_det = self.md >> gr.cp_bounds(x1=(0, 1)) self.df = pd.DataFrame(data={"x": [0.0], "y": [0.5]}) # declare tests self.type_tests = [(1, 2), 2, [1, 8]]
def test_nls(self): ## Setup md_feat = ( gr.Model() >> gr.cp_function(fun=lambda x: x[0] * x[1] + x[2], var=3, out=1,) >> gr.cp_bounds(x0=[-1, +1], x2=[0, 0]) >> gr.cp_marginals(x1=dict(dist="norm", loc=0, scale=1)) ) md_const = ( gr.Model() >> gr.cp_function(fun=lambda x: x[0], var=1, out=1) >> gr.cp_bounds(x0=(-1, +1)) ) df_response = md_feat >> gr.ev_df( df=gr.df_make(x0=0.1, x1=[-1, -0.5, +0, +0.5, +1], x2=0) ) df_data = df_response[["x1", "y0"]] ## Model with features df_true = gr.df_make(x0=0.1) df_fit = md_feat >> gr.ev_nls(df_data=df_data, append=False) pd.testing.assert_frame_equal( df_fit, df_true, check_exact=False, check_dtype=False, check_column_type=False, ) ## Fitting synonym md_feat_fit = df_data >> gr.ft_nls(md=md_feat, verbose=False) self.assertTrue(set(md_feat_fit.var) == set(["x1", "x2"])) ## Constant model df_const = gr.df_make(x0=0) df_fit = md_const >> gr.ev_nls(df_data=gr.df_make(y0=[-1, 0, +1])) pd.testing.assert_frame_equal( df_fit, df_const, check_exact=False, check_dtype=False, check_column_type=False, )
def test_drop_out(self): """Checks that output column names are properly dropped""" md = gr.Model() >> gr.cp_function(lambda x: x[0] + 1, var=1, out=1) df_in = gr.df_make(x0=[0, 1, 2], y0=[0, 1, 2]) df_true = gr.df_make(x0=[0, 1, 2], y0=[1, 2, 3]) df_res = md >> gr.ev_df(df=df_in) self.assertTrue(gr.df_equal(df_res, df_true, close=True))
def test_dag(self): md = (gr.Model("model") >> gr.cp_function(lambda x: x, var=1, out=1) >> gr.cp_function(lambda x: x[0] + x[1], var=["x0", "y0"], out=1)) G_true = nx.DiGraph() G_true.add_edge("(var)", "f0", label="{}".format({"x0"})) G_true.add_edge("f0", "(out)", label="{}".format({"y0"})) G_true.add_edge("(var)", "f1", label="{}".format({"x0"})) G_true.add_edge("f0", "f1", label="{}".format({"y0"})) G_true.add_edge("f1", "(out)", label="{}".format({"y1"})) nx.set_node_attributes(G_true, "model", "parent") self.assertTrue( nx.is_isomorphic( md.make_dag(), G_true, node_match=lambda u, v: u == v, edge_match=lambda u, v: u == v, ))
def setUp(self): self.md = models.make_test() self.md_mixed = ( gr.Model() >> gr.cp_function(fun=lambda x: x[0], var=3, out=1) >> gr.cp_bounds(x2=(0, 1)) >> gr.cp_marginals( x0={"dist": "uniform", "loc": 0, "scale": 1}, x1={"dist": "uniform", "loc": 0, "scale": 1}, ) >> gr.cp_copula_independence() )
def setUp(self): ## Linear limit state w/ MPP off initial guess self.beta_true = 3 self.md = ( gr.Model() >> gr.cp_function( fun=lambda x: self.beta_true * 2 - x[0] - np.sqrt(3) * x[1], var=2, out=["g"], ) >> gr.cp_marginals( x0=dict(dist="norm", loc=0, scale=1, sign=1), x1=dict(dist="norm", loc=0, scale=1, sign=1), ) >> gr.cp_copula_independence() )
def test_var_outer(self): ## Test pass-throughs df_test = pd.DataFrame(dict(x0=[0])) md_no_rand = gr.Model() >> gr.cp_function( fun=lambda x: x, var=1, out=1) md_no_rand.var_outer(pd.DataFrame(), df_det="nom") md_no_det = md_no_rand >> gr.cp_marginals(x0={ "dist": "uniform", "loc": 0, "scale": 1 }) md_no_det.var_outer(df_test, df_det="nom") ## Test assertions with self.assertRaises(ValueError): self.model_3d.var_outer(self.df_2d, df_det=self.df_2d)
def test_function_model(self): md_base = gr.Model() >> gr.cp_function( fun=lambda x: x, var=1, out=1, name="name", runtime=1) ## Base constructor func = gr.FunctionModel(md_base) self.assertTrue(md_base.var == func.var) self.assertTrue(md_base.out == func.out) self.assertTrue(md_base.name == func.name) self.assertTrue(md_base.runtime(1) == func.runtime) ## Test copy func_copy = func.copy() self.assertTrue(func_copy.var == func.var) self.assertTrue(func_copy.out == func.out) self.assertTrue(func_copy.name == func.name) self.assertTrue(func_copy.runtime == func.runtime)
def setUp(self): ## Linear limit state w/ MPP off initial guess self.beta_true = 3 self.md = ( gr.Model() >> gr.cp_function( fun=lambda x: self.beta_true * 2 - x[0] - np.sqrt(3) * x[1], var=2, out=["g"], ) >> gr.cp_marginals( x0=dict(dist="norm", loc=0, scale=1, sign=1), x1=dict(dist="norm", loc=0, scale=1, sign=1), ) >> gr.cp_copula_independence() ) ## Linear limit state w/ lognormal marginals self.md_log = ( gr.Model() >> gr.cp_vec_function( fun=lambda df: gr.df_make( g=gr.exp(gr.sqrt(2) * 1) - df.x * df.y ), var=["x", "y"], out=["g"] ) >> gr.cp_marginals( x=dict(dist="lognorm", loc=0, scale=1, s=1), y=dict(dist="lognorm", loc=0, scale=1, s=1), ) >> gr.cp_copula_independence() ) self.df_mpp = gr.df_make( x=gr.exp(gr.sqrt(2)/2), y=gr.exp(gr.sqrt(2)/2), beta_g=1.0, g=0.0, ) ## Cantilever beam for flatten test self.md_beam = models.make_cantilever_beam()
def test_comp_model(self): """Test model composition""" md_inner = ( gr.Model("inner") >> gr.cp_function(fun=lambda x: x[0] + x[1], var=2, out=1) >> gr.cp_marginals(x0=dict(dist="norm", loc=0, scale=1)) >> gr.cp_copula_independence() ) ## Deterministic composition md_det = gr.Model("outer_det") >> gr.cp_md_det(md=md_inner) self.assertTrue(set(md_det.var) == {"x0", "x1"}) self.assertTrue(md_det.out == ["y0"]) gr.eval_df(md_det, df=gr.df_make(x0=0, x1=0)) ## Deterministic composition md_sample = gr.Model("outer_det") >> gr.cp_md_sample( md=md_inner, param=dict(x0=("loc", "scale")) ) self.assertTrue(set(md_sample.var) == {"x0_loc", "x0_scale", "x1"}) self.assertTrue(set(md_sample.out) == {"y0"}) gr.eval_df(md_sample, df=gr.df_make(x0_loc=0, x0_scale=1, x1=0))
def test_nls(self): ## Ground-truth model c_true = 2 a_true = 1 md_true = (gr.Model() >> gr.cp_function( fun=lambda x: a_true * np.exp(x[0] * c_true) + x[1], var=["x", "epsilon"], out=["y"], ) >> gr.cp_marginals(epsilon={ "dist": "norm", "loc": 0, "scale": 0.5 }) >> gr.cp_copula_independence()) df_data = md_true >> gr.ev_monte_carlo( n=5, seed=101, df_det=gr.df_make(x=[0, 1, 2, 3, 4])) ## Model to fit md_param = (gr.Model() >> gr.cp_function( fun=lambda x: x[2] * np.exp(x[0] * x[1]), var=["x", "c", "a"], out=["y"]) >> gr.cp_bounds(c=[0, 4], a=[0.1, 2.0])) ## Fit the model md_fit = df_data >> gr.ft_nls( md=md_param, verbose=False, uq_method="linpool", ) ## Unidentifiable model throws warning # ------------------------- md_unidet = (gr.Model() >> gr.cp_function( fun=lambda x: x[2] / x[3] * np.exp(x[0] * x[1]), var=["x", "c", "a", "z"], out=["y"], ) >> gr.cp_bounds(c=[0, 4], a=[0.1, 2.0], z=[0, 1])) with self.assertWarns(RuntimeWarning): gr.fit_nls( df_data, md=md_unidet, uq_method="linpool", ) ## True parameters in wide confidence region # ------------------------- alpha = 1e-3 self.assertTrue( (md_fit.density.marginals["c"].q(alpha / 2) <= c_true) and (c_true <= md_fit.density.marginals["c"].q(1 - alpha / 2))) self.assertTrue( (md_fit.density.marginals["a"].q(alpha / 2) <= a_true) and (a_true <= md_fit.density.marginals["a"].q(1 - alpha / 2))) ## Model with fixed parameter # ------------------------- md_fixed = (gr.Model() >> gr.cp_function( fun=lambda x: x[2] * np.exp(x[0] * x[1]), var=["x", "c", "a"], out=["y"]) >> gr.cp_bounds(c=[0, 4], a=[1, 1])) md_fit_fixed = df_data >> gr.ft_nls( md=md_fixed, verbose=False, uq_method="linpool") # Test that fixed model can evaluate successfully gr.eval_monte_carlo(md_fit_fixed, n=1, df_det="nom") ## Trajectory model # ------------------------- md_base = models.make_trajectory_linear() md_fit = data.df_trajectory_windowed >> gr.ft_nls( md=md_base, method="SLSQP", tol=1e-3) df_tmp = md_fit >> gr.ev_nominal(df_det="nom")
def test_comp_function(self): """Test comp_function()""" md_new0 = gr.comp_function(self.md, fun=lambda x: x, var=1, out=1) md_new1 = gr.comp_function(md_new0, fun=lambda x: x, var=1, out=1) ## Operations above should not affect self.md md_named = gr.comp_function( self.md, fun=lambda x: [x, 2 * x], var=["foo"], out=["bar1", "bar2"], name="test", ) ## Default var and out names self.assertEqual(md_new0.var, ["x0"]) self.assertEqual(md_new0.out, ["y0"]) ## New default names iterate counter self.assertEqual(set(md_new1.var), set(["x0", "x1"])) self.assertEqual(set(md_new1.out), set(["y0", "y1"])) ## Output names assigned correctly # Also tests for copy issues self.assertEqual(set(md_named.out), set(["bar1", "bar2"])) ## Function name assigned correctly self.assertEqual(md_named.functions[0].name, "test") ## Invariant checks with self.assertRaises(ValueError): # Missing function gr.comp_function(self.md, fun=None, var=["foo"], out=["bar"]) with self.assertRaises(ValueError): # Missing var gr.comp_function(self.md, fun=lambda x: x, var=None, out=["bar"]) with self.assertRaises(ValueError): # Missing out gr.comp_function(self.md, fun=lambda x: x, var=["foo"], out=None) with self.assertRaises(ValueError): # Intersection var / out names self.md >> gr.cp_function(lambda x: x, var=["x"], out=["x"], name="f0") with self.assertRaises(ValueError): # Non-unique function names self.md >> gr.cp_function( lambda x: x, var=1, out=1, name="f0" ) >> gr.cp_function(lambda x: x, var=1, out=1, name="f0") ## DAG invariant checks with self.assertRaises(ValueError): # Cycle by input self.md >> gr.cp_function( fun=lambda x: x[0], var=["y0"], out=1 ) >> gr.cp_function(fun=lambda x: x[0], var=1, out=["y0"]) with self.assertRaises(ValueError): # Non-unique output self.md >> gr.cp_function( fun=lambda x: x[0], var=1, out=["y0"] ) >> gr.cp_function(fun=lambda x: x[0], var=1, out=["y0"]) ## Check vectorized builder md_vec = gr.comp_vec_function( self.md, fun=lambda df: df.assign(y0=df.x0), var=1, out=1, ) self.assertTrue( gr.df_equal(gr.df_make(x0=0, y0=0), md_vec >> gr.ev_df(df=gr.df_make(x0=0))) )
def test_nls(self): ## Setup md_feat = (gr.Model() >> gr.cp_function( fun=lambda x: x[0] * x[1] + x[2], var=3, out=1, ) >> gr.cp_bounds(x0=[-1, +1], x2=[0, 0]) >> gr.cp_marginals(x1=dict(dist="norm", loc=0, scale=1))) md_const = (gr.Model() >> gr.cp_function( fun=lambda x: x[0], var=1, out=1) >> gr.cp_bounds(x0=(-1, +1))) df_response = md_feat >> gr.ev_df( df=gr.df_make(x0=0.1, x1=[-1, -0.5, +0, +0.5, +1], x2=0)) df_data = df_response[["x1", "y0"]] ## Model with features df_true = gr.df_make(x0=0.1) df_fit = md_feat >> gr.ev_nls(df_data=df_data, append=False) pd.testing.assert_frame_equal( df_fit, df_true, check_exact=False, check_dtype=False, check_column_type=False, ) ## Fitting synonym md_feat_fit = df_data >> gr.ft_nls(md=md_feat, verbose=False) self.assertTrue(set(md_feat_fit.var) == set(["x1", "x2"])) ## Constant model df_const = gr.df_make(x0=0) df_fit = md_const >> gr.ev_nls(df_data=gr.df_make(y0=[-1, 0, +1])) pd.testing.assert_frame_equal( df_fit, df_const, check_exact=False, check_dtype=False, check_column_type=False, ) ## Multiple restarts works df_multi = gr.eval_nls(md_feat, df_data=df_data, n_restart=2) self.assertTrue(df_multi.shape[0] == 2) ## Specified initial guess df_spec = gr.eval_nls(md_feat, df_data=df_data, df_init=gr.df_make(x0=0.5), append=False) pd.testing.assert_frame_equal( df_spec, df_true, check_exact=False, check_dtype=False, check_column_type=False, ) # Raises if incorrect guess data with self.assertRaises(ValueError): gr.eval_nls(md_feat, df_data=df_data, df_init=gr.df_make(foo=0.5))
def test_nls(self): ## Ground-truth model c_true = 2 a_true = 1 md_true = (gr.Model() >> gr.cp_function( fun=lambda x: a_true * np.exp(x[0] * c_true) + x[1], var=["x", "epsilon"], out=["y"], ) >> gr.cp_marginals(epsilon={ "dist": "norm", "loc": 0, "scale": 0.5 }) >> gr.cp_copula_independence()) df_data = md_true >> gr.ev_sample( n=5, seed=101, df_det=gr.df_make(x=[0, 1, 2, 3, 4])) ## Model to fit md_param = (gr.Model() >> gr.cp_function( fun=lambda x: x[2] * np.exp(x[0] * x[1]), var=["x", "c", "a"], out=["y"]) >> gr.cp_bounds(c=[0, 4], a=[0.1, 2.0])) ## Fit the model md_fit = df_data >> gr.ft_nls( md=md_param, verbose=False, uq_method="linpool", ) ## Unidentifiable model throws warning # ------------------------- md_unidet = (gr.Model() >> gr.cp_function( fun=lambda x: x[2] / x[3] * np.exp(x[0] * x[1]), var=["x", "c", "a", "z"], out=["y"], ) >> gr.cp_bounds(c=[0, 4], a=[0.1, 2.0], z=[0, 1])) with self.assertWarns(RuntimeWarning): gr.fit_nls( df_data, md=md_unidet, uq_method="linpool", ) ## True parameters in wide confidence region # ------------------------- alpha = 1e-3 self.assertTrue( (md_fit.density.marginals["c"].q(alpha / 2) <= c_true) and (c_true <= md_fit.density.marginals["c"].q(1 - alpha / 2))) self.assertTrue( (md_fit.density.marginals["a"].q(alpha / 2) <= a_true) and (a_true <= md_fit.density.marginals["a"].q(1 - alpha / 2))) ## Model with fixed parameter # ------------------------- md_fixed = (gr.Model() >> gr.cp_function( fun=lambda x: x[2] * np.exp(x[0] * x[1]), var=["x", "c", "a"], out=["y"]) >> gr.cp_bounds(c=[0, 4], a=[1, 1])) md_fit_fixed = df_data >> gr.ft_nls( md=md_fixed, verbose=False, uq_method="linpool") # Test that fixed model can evaluate successfully gr.eval_sample(md_fit_fixed, n=1, df_det="nom") ## Trajectory model # ------------------------- md_base = models.make_trajectory_linear() md_fit = data.df_trajectory_windowed >> gr.ft_nls( md=md_base, method="SLSQP", tol=1e-3) df_tmp = md_fit >> gr.ev_nominal(df_det="nom") ## Select output for fitting # ------------------------- # Split model has inconsistent "true" parameter value md_split = (gr.Model("Split") >> gr.cp_vec_function( fun=lambda df: gr.df_make( f=1 * df.c * df.x, g=2 * df.c * df.x, ), var=["c", "x"], out=["f", "g"], ) >> gr.cp_bounds( x=(-1, +1), c=(-1, +1), )) df_split = (gr.df_make(x=gr.linspace(-1, +1, 100)) >> gr.tf_mutate( f=X.x, g=X.x)) # Fitting both outputs: cannot achieve mse ~= 0 df_both = (df_split >> gr.ft_nls(md_split, out=["f", "g"]) >> gr.ev_df(df_split >> gr.tf_rename(f_t=X.f, g_t=X.g)) >> gr.tf_summarize( mse_f=gr.mse(X.f, X.f_t), mse_g=gr.mse(X.g, X.g_t), )) self.assertTrue(df_both.mse_f[0] > 0) self.assertTrue(df_both.mse_g[0] > 0) # Fitting "f" only df_f = (df_split >> gr.ft_nls(md_split, out=["f"]) >> gr.ev_df(df_split >> gr.tf_rename(f_t=X.f, g_t=X.g)) >> gr.tf_summarize( mse_f=gr.mse(X.f, X.f_t), mse_g=gr.mse(X.g, X.g_t), )) self.assertTrue(df_f.mse_f[0] < 1e-16) self.assertTrue(df_f.mse_g[0] > 0) # Fitting "g" only df_g = (df_split >> gr.ft_nls(md_split, out=["g"]) >> gr.ev_df(df_split >> gr.tf_rename(f_t=X.f, g_t=X.g)) >> gr.tf_summarize( mse_f=gr.mse(X.f, X.f_t), mse_g=gr.mse(X.g, X.g_t), )) self.assertTrue(df_g.mse_f[0] > 0) self.assertTrue(df_g.mse_g[0] < 1e-16)