def test_samples(): samples_z = np.array([[0.3, 0.36], [0.2, 1.6]]) rz = np.array([[1.0, 0.8], [0.8, 1.0]]) ntf_obj = Decorrelate(samples_z=samples_z, corr_z=rz) np.testing.assert_allclose( ntf_obj.samples_u, [[0.3, 0.19999999999999998], [0.2, 2.4000000000000004]], rtol=1e-09)
plt.figure() plt.title('Correlated standard normal samples') plt.scatter(nataf_obj.samples_z[:, 0], nataf_obj.samples_z[:, 1]) plt.grid(True) plt.xlabel('$Z_1$') plt.ylabel('$Z_2$') plt.show() # %% md # # We can use the :code:`Decorrelate` class to transform the correlated standard normal samples to the uncorrelated # standard normal space :code:`U`. # %% samples_u = Decorrelate(nataf_obj.samples_z, nataf_obj.corr_z).samples_u # %% md # # We can visualize the uncorrelated (standard normal) samples by plotting them on axes of each distribution's range. # %% plt.figure() plt.title('Uncorrelated standard normal samples') plt.scatter(samples_u[:, 0], samples_u[:, 1]) plt.grid(True) plt.xlabel('$U_1$') plt.ylabel('$U_2$') plt.show()
def run(self, seed_x: Union[list, np.ndarray] = None, seed_u: Union[list, np.ndarray] = None): """ Runs FORM. :param seed_u: Either `seed_u` or `seed_x` must be provided. If `seed_u` is provided, it should be a point in the uncorrelated standard normal space of **U**. If `seed_x` is provided, it should be a point in the parameter space of **X**. :param seed_x: The initial starting point for the `Hasofer-Lind` algorithm. Either `seed_u` or `seed_x` must be provided. If `seed_u` is provided, it should be a point in the uncorrelated standard normal space of **U**. If `seed_x` is provided, it should be a point in the parameter space of **X**. """ self.logger.info("UQpy: Running FORM...") if seed_u is None and seed_x is None: seed = np.zeros(self.dimension) elif seed_u is None and seed_x is not None: self.nataf_object.run(samples_x=seed_x.reshape(1, -1), jacobian=False) seed_z = self.nataf_object.samples_z seed = Decorrelate(seed_z, self.nataf_object.corr_z) elif seed_u is not None and seed_x is None: seed = np.squeeze(seed_u) else: raise ValueError( "UQpy: Only one seed (seed_x or seed_u) must be provided") u_record = list() x_record = list() g_record = list() alpha_record = list() error_record = list() converged = False k = 0 beta = np.zeros(shape=(self.n_iterations + 1, )) u = np.zeros([self.n_iterations + 1, self.dimension]) u[0, :] = seed g_record.append(0.0) dg_u_record = np.zeros([self.n_iterations + 1, self.dimension]) while not converged: self.logger.info("Number of iteration: %i", k) # FORM always starts from the standard normal space if k == 0: if seed_x is not None: x = seed_x else: seed_z = Correlate( samples_u=seed.reshape(1, -1), corr_z=self.nataf_object.corr_z).samples_z self.nataf_object.run(samples_z=seed_z.reshape(1, -1), jacobian=True) x = self.nataf_object.samples_x self.jacobian_zx = self.nataf_object.jxz else: z = Correlate(u[k, :].reshape(1, -1), self.nataf_object.corr_z).samples_z self.nataf_object.run(samples_z=z, jacobian=True) x = self.nataf_object.samples_x self.jacobian_zx = self.nataf_object.jxz self.x = x u_record.append(u) x_record.append(x) self.logger.info("Design point Y: {0}\n".format(u[k, :]) + "Design point X: {0}\n".format(self.x) + "Jacobian Jzx: {0}\n".format(self.jacobian_zx)) # 2. evaluate Limit State Function and the gradient at point u_k and direction cosines dg_u, qoi, _ = self._derivatives( point_u=u[k, :], point_x=self.x, runmodel_object=self.runmodel_object, nataf_object=self.nataf_object, df_step=self.df_step, order="first") g_record.append(qoi) dg_u_record[k + 1, :] = dg_u norm_grad = np.linalg.norm(dg_u_record[k + 1, :]) alpha = dg_u / norm_grad self.logger.info( "Directional cosines (alpha): {0}\n".format(alpha) + "Gradient (dg_y): {0}\n".format(dg_u_record[k + 1, :]) + "norm dg_y:", norm_grad, ) self.alpha = alpha.squeeze() alpha_record.append(self.alpha) beta[k] = -np.inner(u[k, :].T, self.alpha) beta[k + 1] = beta[k] + qoi / norm_grad self.logger.info("Beta: {0}\n".format(beta[k]) + "Pf: {0}".format(stats.norm.cdf(-beta[k]))) u[k + 1, :] = -beta[k + 1] * self.alpha if (self.tol1 is not None) and (self.tol2 is not None) and (self.tol3 is not None): error1 = np.linalg.norm(u[k + 1, :] - u[k, :]) error2 = np.linalg.norm(beta[k + 1] - beta[k]) error3 = np.linalg.norm(dg_u_record[k + 1, :] - dg_u_record[k, :]) error_record.append([error1, error2, error3]) if error1 <= self.tol1 and error2 <= self.tol2 and error3 < self.tol3: converged = True else: k = k + 1 if (self.tol1 is None) and (self.tol2 is None) and (self.tol3 is None): error1 = np.linalg.norm(u[k + 1, :] - u[k, :]) error2 = np.linalg.norm(beta[k + 1] - beta[k]) error3 = np.linalg.norm(dg_u_record[k + 1, :] - dg_u_record[k, :]) error_record.append([error1, error2, error3]) if error1 <= 1e-3 or error2 <= 1e-3 or error3 < 1e-3: converged = True else: k = k + 1 elif (self.tol1 is not None) and (self.tol2 is None) and (self.tol3 is None): error1 = np.linalg.norm(u[k + 1, :] - u[k, :]) error_record.append(error1) if error1 <= self.tol1: converged = True else: k = k + 1 elif ((self.tol1 is None) and (self.tol2 is not None) and (self.tol3 is None)): error2 = np.linalg.norm(beta[k + 1] - beta[k]) error_record.append(error2) if error2 <= self.tol2: converged = True else: k = k + 1 elif (self.tol1 is None) and (self.tol2 is None) and (self.tol3 is not None): error3 = np.linalg.norm(dg_u_record[k + 1, :] - dg_u_record[k, :]) error_record.append(error3) if error3 < self.tol3: converged = True else: k = k + 1 elif (self.tol1 is not None) and (self.tol2 is not None) and (self.tol3 is None): error1 = np.linalg.norm(u[k + 1, :] - u[k, :]) error2 = np.linalg.norm(beta[k + 1] - beta[k]) error_record.append([error1, error2]) if error1 <= self.tol1 and error2 <= self.tol1: converged = True else: k = k + 1 elif (self.tol1 is not None) and (self.tol2 is None) and (self.tol3 is not None): error1 = np.linalg.norm(u[k + 1, :] - u[k, :]) error3 = np.linalg.norm(dg_u_record[k + 1, :] - dg_u_record[k, :]) error_record.append([error1, error3]) if error1 <= self.tol1 and error3 < self.tol3: converged = True else: k = k + 1 elif (self.tol1 is None) and (self.tol2 is not None) and (self.tol3 is not None): error2 = np.linalg.norm(beta[k + 1] - beta[k]) error3 = np.linalg.norm(dg_u_record[k + 1, :] - dg_u_record[k, :]) error_record.append([error2, error3]) if error2 <= self.tol2 and error3 < self.tol3: converged = True else: k = k + 1 self.logger.error("Error: %s", error_record[-1]) if converged is True or k > self.n_iterations: break if k > self.n_iterations: self.logger.info( "UQpy: Maximum number of iterations {0} was reached before convergence." .format(self.n_iterations)) self.error_record = error_record self.u_record = [u_record] self.x_record = [x_record] self.g_record = [g_record] self.dg_u_record = [dg_u_record[:k]] self.alpha_record = [alpha_record] else: if self.call is None: self.beta_record = [beta[:k]] self.error_record = error_record self.beta = [beta[k]] self.DesignPoint_U = [u[k, :]] self.DesignPoint_X = [np.squeeze(self.x)] self.failure_probability = [stats.norm.cdf(-self.beta[-1])] self.iterations = [k] self.u_record = [u_record[:k]] self.x_record = [x_record[:k]] self.g_record = [g_record] self.dg_u_record = [dg_u_record[:k]] self.alpha_record = [alpha_record] else: self.beta_record = self.beta_record + [beta[:k]] self.beta = self.beta + [beta[k]] self.error_record = self.error_record + error_record self.DesignPoint_U = self.DesignPoint_U + [u[k, :]] self.DesignPoint_X = self.DesignPoint_X + [np.squeeze(self.x)] self.failure_probability = self.failure_probability + [ stats.norm.cdf(-beta[k]) ] self.iterations = self.iterations + [k] self.u_record = self.u_record + [u_record[:k]] self.x_record = self.x_record + [x_record[:k]] self.g_record = self.g_record + [g_record] self.dg_u_record = self.dg_u_record + [dg_u_record[:k]] self.alpha_record = self.alpha_record + [alpha_record] self.call = True
def test_corr_z_1(): samples_z = np.array([[0.3, 0.36], [0.2, 1.6]]) rz = np.array([[1.0, 0.8], [0.8, 1.0]]) ntf_obj = Decorrelate(samples_z=samples_z, corr_z=rz) assert (ntf_obj.corr_z == np.array([[1.0, 0.8], [0.8, 1.0]])).all()
def test_h(): samples_z = np.array([[0.3, 0.36], [0.2, 1.6]]) rz = np.array([[1.0, 0.8], [0.8, 1.0]]) ntf_obj = Decorrelate(samples_z=samples_z, corr_z=rz) np.testing.assert_allclose(ntf_obj.H, [[1.0, 0.], [0.8, 0.6]], rtol=1e-09)
def test_corr_z(): rz = np.array([[1.0, 0.0], [0.0, 1.0]]) with pytest.raises(Exception): assert Decorrelate(corr_z=rz)
def test_samples_u(): samples_z = np.array([[0.3, 0.36], [0.2, 1.6]]) with pytest.raises(Exception): assert Decorrelate(samples_z=samples_z)