def test_propagate_uncertainty_error(self): ''' It tests a TypeError when the modle_uncertian of function propagate_uncertainty is neither python function nor Pyomo ConcreteModel ''' from idaes.apps.uncertainty_propagation.examples.rooney_biegler import rooney_biegler_model variable_name = ['asymptote', 'rate_constant'] data = pd.DataFrame(data=[[1, 8.3], [2, 10.3], [3, 19.0], [4, 16.0], [5, 15.6], [7, 19.8]], columns=['hour', 'y']) def SSE(model, data): expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index) return expr parmest_class = parmest.Estimator(rooney_biegler_model, data, variable_name, SSE) obj, theta, cov = parmest_class.theta_est(calc_cov=True) model_uncertain = ConcreteModel() model_uncertain.asymptote = Var(initialize=15) model_uncertain.rate_constant = Var(initialize=0.5) model_uncertain.obj = Objective( expr=model_uncertain.asymptote * (1 - exp(-model_uncertain.rate_constant * 10)), sense=minimize) with pytest.raises(TypeError): propagate_results = propagate_uncertainty(1, theta, cov, variable_name)
def test_propagate_uncertainty(self): ''' It tests the function propagate_uncertainty with rooney & biegler's model. ''' from idaes.apps.uncertainty_propagation.examples.rooney_biegler import rooney_biegler_model variable_name = ['asymptote', 'rate_constant'] data = pd.DataFrame(data=[[1, 8.3], [2, 10.3], [3, 19.0], [4, 16.0], [5, 15.6], [7, 19.8]], columns=['hour', 'y']) def SSE(model, data): expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index) return expr parmest_class = parmest.Estimator(rooney_biegler_model, data, variable_name, SSE) obj, theta, cov = parmest_class.theta_est(calc_cov=True) model_uncertain = ConcreteModel() model_uncertain.asymptote = Var(initialize=15) model_uncertain.rate_constant = Var(initialize=0.5) model_uncertain.obj = Objective( expr=model_uncertain.asymptote * (1 - exp(-model_uncertain.rate_constant * 10)), sense=minimize) propagate_results = propagate_uncertainty(model_uncertain, theta, cov, variable_name) np.testing.assert_array_almost_equal( propagate_results.gradient_f, [0.9950625870024135, 0.9451480001755206]) assert list(propagate_results.gradient_c) == [] np.testing.assert_array_almost_equal(propagate_results.dsdp.toarray(), [[1., 0.], [0., 1.]]) assert list(propagate_results.propagation_c) == [] assert propagate_results.propagation_f == pytest.approx( 5.45439337747349)
# Covariance matrix sigma_p = np.array([[2, 0], [0, 1]]) # Nominal values for uncertain parameters theta = {'p1':m.p1(), 'p2':m.p2()} # Names of uncertain parameters theta_names = ['p1','p2'] # Important to unfix the parameters! # Otherwise k_aug will complain about too few degrees of freedom m.p1.unfix() m.p2.unfix() ## Run sensitivity toolbox results = propagate_uncertainty(m, theta, sigma_p, theta_names) ## Compute uncertainty propagation by hand # We now compute the expected result with our # analytic solution. # Take 1: use the formula in the documentation tmp_f = (df_dp + df_dx @ dx_dp) sigma_f = tmp_f @ sigma_p @ tmp_f.transpose() print("\nsigma_f = ",sigma_f) tmp_c = (dc_dp + dc_dx @ dx_dp) sigma_c = tmp_c @ sigma_p @ tmp_c.transpose() print("\nsigma_c = \n",sigma_c) # Take 2: use an equivalent formula
def test_propagate_uncertainty1(self): ''' It tests the function propagate_uncertainty with min f: p1*x1+ p2*(x2^2) + p1*p2 s.t c1: x1 + x2 = p1 c2: x2 + x3 = p2 0 <= x1, x2, x3 <= 10 p1 = 10 p2 = 5 Variables = (x1, x2, x3) Parameters (fixed variables) = (p1, p2) ''' ### Create optimization model m = ConcreteModel() m.dual = Suffix(direction=Suffix.IMPORT) m.x1 = Var() m.x2 = Var() m.x3 = Var() # Define parameters m.p1 = Var(initialize=10) m.p2 = Var(initialize=5) m.p1.fix() m.p2.fix() # Define constraints m.con1 = Constraint(expr=m.x1 + m.x2 - m.p1 == 0) m.con2 = Constraint(expr=m.x2 + m.x3 - m.p2 == 0) # Define objective m.obj = Objective(expr=m.p1 * m.x1 + m.p2 * (m.x2**2) + m.p1 * m.p2, sense=minimize) ### Solve optimization model opt = SolverFactory('ipopt', tee=True) opt.solve(m) ### Analytic solution ''' At the optimal solution, none of the bounds are active. As long as the active set does not change (i.e., none of the bounds become active), the first order optimality conditions reduce to a simple linear system. ''' # dual variables (multipliers) v2_ = 0 v1_ = m.p1() # primal variables x2_ = (v1_ + v2_) / (2 * m.p2()) x1_ = m.p1() - x2_ x3_ = m.p2() - x2_ ### Analytic sensitivity ''' Using the analytic solution above, we can compute the sensitivies of x and v to perturbations in p1 and p2. The matrix dx_dp constains the sensitivities of x to perturbations in p ''' # Initialize sensitivity matrix Nx x Np # Rows: variables x # Columns: parameters p dx_dp = np.zeros((3, 2)) # dx2/dp1 = 1/(2 * p2) dx_dp[1, 0] = 1 / (2 * m.p2()) # dx2/dp2 = -(v1 + v2)/(2 * p2**2) dx_dp[1, 1] = -(v1_ + v2_) / (2 * m.p2()**2) # dx1/dp1 = 1 - dx2/dp1 dx_dp[0, 0] = 1 - dx_dp[1, 0] # dx1/dp2 = 0 - dx2/dp2 dx_dp[0, 1] = 0 - dx_dp[1, 1] # dx3/dp1 = 1 - dx2/dp1 dx_dp[2, 0] = 0 - dx_dp[1, 0] # dx3/dp2 = 0 - dx2/dp2 dx_dp[2, 1] = 1 - dx_dp[1, 1] ''' Similarly, we can compute the gradients df_dx, df_dp and Jacobians dc_dx, dc_dp ''' # Initialize 1 x 3 array to store (\partial f)/(\partial x) # Elements: variables x df_dx = np.zeros(3) # df/dx1 = p1 df_dx[0] = m.p1() # df/dx2 = p2 df_dx[1] = 2 * m.p2() * x2_ # df/dx3 = 0 # Initialize 1 x 2 array to store (\partial f)/(\partial p) # Elements: parameters p df_dp = np.zeros(2) # df/dxp1 = x1 + p2 df_dp[0] = x1_ + m.p2() # df/dp2 = x2**2 + p1 df_dp[1] = x2_**2 + m.p1() # Initialize 2 x 3 array to store (\partial c)/(\partial x) # Rows: constraints c # Columns: variables x dc_dx = np.zeros((2, 3)) # dc1/dx1 = 1 dc_dx[0, 0] = 1 # dc1/dx2 = 1 dc_dx[0, 1] = 1 # dc2/dx2 = 1 dc_dx[1, 1] = 1 # dc2/dx3 = 1 dc_dx[1, 2] = 1 # Remaining entries are 0 # Initialize 2 x 2 array to store (\partial c)/(\partial x) # Rows: constraints c # Columns: variables x dc_dp = np.zeros((2, 2)) # dc1/dp1 = -1 dc_dp[0, 0] = -1 # dc2/dp2 = -1 dc_dp[1, 1] = -1 ### Uncertainty propagation ''' Now lets test the uncertainty propagation package. We will assume p has covariance sigma_p = [[2, 0], [0, 1]] ''' ## Prepare inputs # Covariance matrix sigma_p = np.array([[2, 0], [0, 1]]) # Nominal values for uncertain parameters theta = {'p1': m.p1(), 'p2': m.p2()} # Names of uncertain parameters theta_names = ['p1', 'p2'] # Important to unfix the parameters! # Otherwise k_aug will complain about too few degrees of freedom m.p1.unfix() m.p2.unfix() ## Run package results = propagate_uncertainty(m, theta, sigma_p, theta_names) ## Check results tmp_f = (df_dp + df_dx @ dx_dp) sigma_f = tmp_f @ sigma_p @ tmp_f.transpose() tmp_c = (dc_dp + dc_dx @ dx_dp) sigma_c = tmp_c @ sigma_p @ tmp_c.transpose() # This currently just checks if the order of the outputs did not change # TODO: improve test robustness by using this information to set # var_idx and theta_idx. This way the test will still work # regardless of the order. In other words, the analytic solution needs to be # reordered to match the variable/constraint order from # this package. Alternately, the results could be converted into a Pandas dataframe assert results.col == ['x1', 'x2', 'p1', 'p2', 'x3'] assert results.row == ['con1', 'con2', 'obj'] var_idx = np.array([True, True, False, False, True]) theta_idx = np.array([False, False, True, True, False]) # Check the gradient of the objective w.r.t. x matches np.testing.assert_array_almost_equal(results.gradient_f[var_idx], np.array(df_dx)) # Check the gradient of the objective w.r.t. p (parameters) matches np.testing.assert_array_almost_equal(results.gradient_f[theta_idx], np.array(df_dp)) # Check the Jacobian of the constraints w.r.t. x matches np.testing.assert_array_almost_equal( results.gradient_c.toarray()[:, var_idx], np.array(dc_dx)) # Check the Jacobian of the constraints w.r.t. p (parameters) matches np.testing.assert_array_almost_equal( results.gradient_c.toarray()[:, theta_idx], np.array(dc_dp)) # Check the NLP sensitivity results for the variables (x) matches np.testing.assert_array_almost_equal( results.dsdp.toarray()[var_idx, :], np.array(dx_dp)) # Check the NLP sensitivity results for the parameters (p) matches np.testing.assert_array_almost_equal( results.dsdp.toarray()[theta_idx, :], np.array([[1, 0], [0, 1]])) # Check the uncertainty propagation results for the constrains matches np.testing.assert_array_almost_equal(results.propagation_c, np.sum(sigma_c)) # Check the uncertainty propagation results for the objective matches assert results.propagation_f == pytest.approx(sigma_f)