def setInitialGuess(problem, p, n_params, params_in_vector=True): if params_in_vector: parameter_map = problem.getParameterMap(0) parameter = Tpetra.Vector(parameter_map, dtype="d") for j in range(0, n_params): parameter[j] = p[j] problem.setParameter(0, parameter) else: for j in range(0, n_params): parameter_map = problem.getParameterMap(j) parameter = Tpetra.Vector(parameter_map, dtype="d") parameter[0] = p[j] problem.setParameter(j, parameter)
def set_theta_star(self, theta_star): self.theta_star = theta_star if self.params_in_vector: parameter_map = self.problem.getParameterMap(0) parameter = Tpetra.Vector(parameter_map, dtype="d") for k in range(0, self.n_params): parameter[k] = theta_star[k] self.problem.setParameter(0, parameter) else: for k in range(0, self.n_params): parameter_map = self.problem.getParameterMap(k) parameter = Tpetra.Vector(parameter_map, dtype="d") parameter[0] = theta_star[k] self.problem.setParameter(k, parameter)
def evaluate_responses(X, Y, problem, recompute=False): if not recompute and os.path.isfile('Z1_2.txt'): Z1 = np.loadtxt('Z1_2.txt') Z2 = np.loadtxt('Z2_2.txt') else: comm = MPI.COMM_WORLD myGlobalRank = comm.rank parameter_map = problem.getParameterMap(0) parameter = Tpetra.Vector(parameter_map, dtype="d") n_x = len(X) n_y = len(Y) Z1 = np.zeros((n_y, n_x)) Z2 = np.zeros((n_y, n_x)) for i in range(n_x): parameter[0] = X[i] for j in range(n_y): parameter[1] = Y[j] problem.setParameter(0, parameter) problem.performSolve() Z1[j, i] = problem.getCumulativeResponseContribution(0, 0) Z2[j, i] = problem.getCumulativeResponseContribution(0, 1) np.savetxt('Z1_2.txt', Z1) np.savetxt('Z2_2.txt', Z2) return Z1, Z2
def test_all(self): cls = self.__class__ rank = cls.comm.getRank() file_dir = os.path.dirname(__file__) # Create an Albany problem: filename = "input_dirichlet_mixed_paramsT.yaml" parameter = Utils.createParameterList(file_dir + "/" + filename, cls.parallelEnv) parameter.sublist("Discretization").set("1D Elements", 10) parameter.sublist("Discretization").set("2D Elements", 10) problem = Utils.createAlbanyProblem(parameter, cls.parallelEnv) parameter_map_0 = problem.getParameterMap(0) para_0_new = Tpetra.Vector(parameter_map_0, dtype="d") parameter_map_1 = problem.getParameterMap(1) para_1_new = Tpetra.Vector(parameter_map_1, dtype="d") para_1_new[:] = 0.333333 n_values = 5 para_0_values = np.linspace(-1, 1, n_values) responses = np.zeros((n_values, )) responses_target = np.array( [0.69247527, 0.48990929, 0.35681844, 0.29320271, 0.2990621]) tol = 1e-8 for i in range(0, n_values): para_0_new[:] = para_0_values[i] problem.setParameter(0, para_0_new) problem.performSolve() response = problem.getResponse(0) responses[i] = response.getData()[0] print("p = " + str(para_0_values)) print("QoI = " + str(responses)) if rank == 0: self.assertTrue( np.abs(np.amax(responses - responses_target)) < tol)
def importanceSamplingEstimator(theta_0, C, theta_star, F_star, P_star, samples_0, problem, F_id=1, params_in_vector=True): invC = np.linalg.inv(C) n_l = len(F_star) P = np.zeros((n_l, )) n_samples = np.shape(samples_0)[0] n_params = np.shape(samples_0)[1] # Loop over the lambdas for i in range(0, n_l): # Loop over the samples for j in range(0, n_samples): sample = samples_0[j, :] + theta_star[i, :] - theta_0 if params_in_vector: parameter_map = problem.getParameterMap(0) parameter = Tpetra.Vector(parameter_map, dtype="d") for j in range(0, n_params): parameter[j] = sample[j] problem.setParameter(0, parameter) else: for k in range(0, n_params): parameter_map = problem.getParameterMap(k) parameter = Tpetra.Vector(parameter_map, dtype="d") parameter[0] = sample[k] problem.setParameter(k, parameter) problem.performSolve() if problem.getCumulativeResponseContribution(0, F_id) > F_star[i]: P[i] += np.exp(-invC.dot(theta_star[i, :] - theta_0).dot(sample - theta_star[i, :])) P[i] = P_star[i] * P[i] / n_samples return P
def main(parallelEnv): comm = MPI.COMM_WORLD myGlobalRank = comm.rank # Create an Albany problem: filename = "input_dirichletT.yaml" parameter = Utils.createParameterList( filename, parallelEnv ) problem = Utils.createAlbanyProblem(parameter, parallelEnv) parameter_map_0 = problem.getParameterMap(0) parameter_0 = Tpetra.Vector(parameter_map_0, dtype="d") N = 200 p_min = -2. p_max = 2. # Generate N samples randomly chosen in [p_min, p_max]: p = np.random.uniform(p_min, p_max, N) QoI = np.zeros((N,)) # Loop over the N samples and evaluate the quantity of interest: for i in range(0, N): parameter_0[0] = p[i] problem.setParameter(0, parameter_0) problem.performSolve() response = problem.getResponse(0) QoI[i] = response.getData()[0] if myGlobalRank == 0: if printPlot: f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16,6)) ax1.hist(p) ax1.set_ylabel('Counts') ax1.set_xlabel('Random parameter') ax2.scatter(p, QoI) ax2.set_ylabel('Quantity of interest') ax2.set_xlabel('Random parameter') ax3.hist(QoI) ax3.set_ylabel('Counts') ax3.set_xlabel('Quantity of interest') plt.savefig('UQ.jpeg', dpi=800) plt.close()
def mixedImportanceSamplingEstimator(theta_0, C, theta_star, F_star, P_star, samples_0, problem, angle_1, angle_2, F_id=1, params_in_vector=True): invC = np.linalg.inv(C) n_l = len(F_star) P = np.zeros((n_l, )) n_samples = np.shape(samples_0)[0] n_params = np.shape(samples_0)[1] problem.updateCumulativeResponseContributionWeigth(0, 0, -1) problem.updateCumulativeResponseContributionWeigth(0, F_id, 0) # Loop over the lambdas for i in range(0, n_l): # Compute the normal of I - lambda F (= normal of F) n_theta_star = np.zeros((n_params, )) if params_in_vector: parameter_map = problem.getParameter(0) parameter = Tpetra.Vector(parameter_map, dtype="d") for j in range(0, n_params): parameter[j] = theta_star[i, j] problem.setParameter(0, parameter) else: for k in range(0, n_params): parameter_map = problem.getParameterMap(k) parameter = Tpetra.Vector(parameter_map, dtype="d") parameter[0] = theta_star[i, k] problem.setParameter(k, parameter) problem.performSolve() if params_in_vector: n_theta_star = -problem.getSensitivity(0, 0).getData(0) else: for k in range(0, n_params): n_theta_star[k] = -problem.getSensitivity(0, k).getData(0)[0] norm = np.linalg.norm(n_theta_star) n_theta_star /= norm # Loop over the samples for j in range(0, n_samples): vector_2 = samples_0[j, :] - theta_0 unit_vector_2 = vector_2 / np.linalg.norm(vector_2) dot_product = np.dot(n_theta_star, unit_vector_2) shifted_sample_angles = np.arccos(dot_product) sample = samples_0[j, :] + theta_star[i, :] - theta_0 if shifted_sample_angles < angle_1: current_F_above = True elif shifted_sample_angles > angle_2: current_F_above = False else: if params_in_vector: parameter_map = problem.getParameter(0) parameter = Tpetra.Vector(parameter_map, dtype="d") for j in range(0, n_params): parameter[j] = sample[j] problem.setParameter(0, parameter) else: for k in range(0, n_params): parameter_map = problem.getParameterMap(k) parameter = Tpetra.Vector(parameter_map, dtype="d") parameter[0] = sample[k] problem.setParameter(k, parameter) problem.performSolve() current_F_above = problem.getCumulativeResponseContribution( 0, F_id) > F_star[i] if current_F_above: P[i] += np.exp(-invC.dot(theta_star[i, :] - theta_0).dot(sample - theta_star[i, :])) P[i] = P_star[i] * P[i] / n_samples return P
def test_all(self): cls = self.__class__ rank = cls.comm.getRank() file_dir = os.path.dirname(__file__) # Create an Albany problem: filename = "input_dirichlet_mixed_paramsT.yaml" parameter = Utils.createParameterList( file_dir + "/" + filename, cls.parallelEnv ) parameter.sublist("Discretization").set("1D Elements", 10) parameter.sublist("Discretization").set("2D Elements", 10) problem = Utils.createAlbanyProblem(parameter, cls.parallelEnv) g_target_before = 0.35681844 g_target_after = 0.17388298 g_target_2 = 0.19570272 p_0_target = 0.39886689 p_1_norm_target = 5.37319376038225 tol = 1e-8 problem.performSolve() response_before_analysis = problem.getResponse(0) problem.performAnalysis() para_0 = problem.getParameter(0) para_1 = problem.getParameter(1) print(para_0.getData()) print(para_1.getData()) para_1_norm = Utils.norm(para_1.getData(), cls.comm) print(para_1_norm) if rank == 0: self.assertTrue(np.abs(para_0[0] - p_0_target) < tol) self.assertTrue(np.abs(para_1_norm - p_1_norm_target) < tol) problem.performSolve() response_after_analysis = problem.getResponse(0) print("Response before analysis " + str(response_before_analysis.getData())) print("Response after analysis " + str(response_after_analysis.getData())) if rank == 0: self.assertTrue(np.abs(response_before_analysis[0] - g_target_before) < tol) self.assertTrue(np.abs(response_after_analysis[0] - g_target_after) < tol) parameter_map_0 = problem.getParameterMap(0) para_0_new = Tpetra.Vector(parameter_map_0, dtype="d") para_0_new[:] = 0.0 problem.setParameter(0, para_0_new) problem.performSolve() response = problem.getResponse(0) print("Response after setParameter " + str(response.getData())) if rank == 0: self.assertTrue(np.abs(response[0] - g_target_2) < tol)