def test_sub_two_scalars(self): a = Ad_array(1, 0) b = Ad_array(3, 0) c = a - b self.assertTrue(c.val == -2 and c.jac == 0) self.assertTrue(a.val == 1 and a.jac == 0) self.assertTrue(b.val == 3 and a.jac == 0)
def test_sub_two_ad_variables(self): a = Ad_array(4, 1.0) b = Ad_array(9, 3) c = a - b self.assertTrue(np.allclose(c.val, -5) and np.allclose(c.jac, -2)) self.assertTrue(a.val == 4 and np.allclose(a.jac, 1.0)) self.assertTrue(b.val == 9 and b.jac == 3)
def test_add_two_scalars(self): a = Ad_array(1, 0) b = Ad_array(-10, 0) c = a + b self.assertTrue(c.val == -9 and c.jac == 0) self.assertTrue(a.val == 1 and a.jac == 0) self.assertTrue(b.val == -10 and b.jac == 0)
def test_mul_scal_ad_scal(self): a = Ad_array(3, 0) b = Ad_array(2, 0) c = a * b self.assertTrue(c.val == 6 and c.jac == 0) self.assertTrue(a.val == 3 and a.jac == 0) self.assertTrue(b.val == 2 and b.jac == 0)
def test_mul_ad_var_ad_var(self): a = Ad_array(3, 3) b = Ad_array(2, -4) c = a * b self.assertTrue(c.val == 6 and c.jac == -6) self.assertTrue(a.val == 3 and a.jac == 3) self.assertTrue(b.val == 2 and b.jac == -4)
def apply(self, mobility_inner, direction_inner, mobility_bound, direction_bound): """Compute transmissibility via upwinding over faces. Use monotonicityexpr for deciding directionality. Idea: 'face value' = 'left cell value' * Heaviside('flux from left') + 'right cell value' * Heaviside('flux from right'). """ # TODO only implemented for scalar relative permeabilities so far # TODO so far not for periodic bondary conditions. # Rename internal properties hs = self._heaviside cf_inner = self._cf_inner cf_is_dir = self._cf_is_dir # Determine direction-determining cell values to the left(0) and right(1) of each face. # Use Dirichlet boundary data where suitable. # Neglect Neumann boundaries since face transmissibilities at Neumann boundary data # anyhow does not play a role. # Determine the Jacobian manually - only in the interior of the domain. if isinstance(direction_inner, Ad_array): dir_f_val = [ cf_inner[i] * direction_inner.val for i in range(0, 2) ] for i in range(0, 2): dir_f_val[i][cf_is_dir[i]] = direction_bound[cf_is_dir[i]] dir_f_jac = [ cf_inner[i] * direction_inner.jac for i in range(0, 2) ] dir_f = [Ad_array(dir_f_val[i], dir_f_jac[i]) for i in range(0, 2)] else: dir_f = [cf_inner[i] * direction_inner for i in range(0, 2)] for i in range(0, 2): dir_f[i][cf_is_dir[i]] = direction_bound[cf_is_dir[i]] # Do the same for the mobility as for the direction-determining arrays. if isinstance(mobility_inner, Ad_array): mob_f_val = [cf_inner[i] * mobility_inner.val for i in range(0, 2)] for i in range(0, 2): mob_f_val[i][cf_is_dir[i]] = mobility_bound[cf_is_dir[i]] mob_f_jac = [cf_inner[i] * mobility_inner.jac for i in range(0, 2)] mob_f = [Ad_array(mob_f_val[i], mob_f_jac[i]) for i in range(0, 2)] else: mob_f = [cf_inner[i] * mobility_inner for i in range(0, 2)] for i in range(0, 2): mob_f[i][cf_is_dir[i]] = mobility_bound[cf_is_dir[i]] # Evaluate the Heaviside function of the "flux directions". hs_f_01 = hs(dir_f[0] - dir_f[1]) hs_f_10 = hs(dir_f[1] - dir_f[0]) # Determine the face mobility by utilizing the general idea (see above). face_mobility = mob_f[0] * hs_f_01 + mob_f[1] * hs_f_10 return face_mobility
def _apply_ad(self, cellwise_field) -> Ad_array: """ Compute transmissibility via harmonic averaging over faces.""" # References to private variables data = self._data tpfa = self._tpfa if data.get("Aavatsmark_transmissibilities", False): raise RuntimeError( "AD version of Aavatsmark_transmissibilities not implemented.") # Get connectivity and grid based data ci = tpfa.ci ci_periodic = tpfa.ci_periodic fc_cc = tpfa.fc_cc dist_face_cell = np.power(np.power(fc_cc, 2).sum(axis=0), 0.5) # Consider two cases: scalar and tensor valued fields. # assert (cellwise_field.val, np.ndarray) # Case 1: Scalar valued fields. if len(cellwise_field.val.shape) == 1: t_cf_val = cellwise_field.val[ci] t_cf_jac = cellwise_field.jac[ci] t_cf_val /= dist_face_cell t_cf_jac /= dist_face_cell # Case 2: Tensor valued fields. elif len(cellwise_field.val.shape) == 3 and all( [cellwise_field.val.shape[i] == 3 for i in range(0, 2)]): t_cf_tensor_val = cellwise_field.val[::, ::, ci] t_cf_tensor_jac = cellwise_field.jac[::, ::, ci] tn_cf_val = (t_cf_tensor_val * fc_cc).sum(axis=1) tn_cf_jac = (t_cf_tensor_jac * fc_cc).sum(axis=1) ntn_cf_val = (tn_cf_val * fc_cc).sum(axis=0) ntn_cf_jac = (tn_cf_jac * fc_cc).sum(axis=0) dist_face_cell_3 = np.power(dist_face_cell, 3) t_cf_val = np.divide(ntn_cf_val, dist_face_cell_3) t_cf_jac = np.divide(ntn_cf_jac, dist_face_cell_3) else: raise RuntimeError("Type of cell-wise field not supported.") # Continue with AD representation and utilize chain rule. t_face = Ad_array(t_cf_val, sps.diags(t_cf_jac).tocsc()) # The final harmonic averaging using a linear operator representation of bincount. # TODO test! t_face = (self.bincount_fi_periodic * dist_face_cell) * ( (self.bincount_fi_periodic * t_face**(-1))**(-1)) # Project column space of t.jac onto the actual cell # TODO is there not a better way to create the projection matrix? By correct indexing? c = np.arange(len(ci_periodic)) proj = sps.coo_matrix((np.ones_like(c), (c, ci_periodic))).tocsr() t_face.jac = t_face.jac * proj return t_face
def test_copy_vector(self): a = Ad_array(np.ones((3, 1)), np.ones((3, 1))) b = a.copy() self.assertTrue(np.allclose(a.val, b.val)) self.assertTrue(np.allclose(a.jac, b.jac)) a.val[0] = 3 a.jac[2] = 4 self.assertTrue(np.allclose(b.val, np.ones((3, 1)))) self.assertTrue(np.allclose(b.jac, np.ones((3, 1))))
def test_copy_scalar(self): a = Ad_array(1, 0) b = a.copy() self.assertTrue(a.val == b.val) self.assertTrue(a.jac == b.jac) a.val = 2 a.jac = 3 self.assertTrue(b.val == 1) self.assertTrue(b.jac == 0)
def test_add_var_with_scal(self): a = Ad_array(3, 2) b = 3 c = a + b self.assertTrue(np.allclose(c.val, 6) and np.allclose(c.jac, 2)) self.assertTrue(a.val == 3 and np.allclose(a.jac, 2)) self.assertTrue(b == 3)
def test_sign_advar(self): a = Ad_array(np.array([1, -10, 3, -np.pi]), np.eye(4)) sign = af.sign(a) self.assertTrue(np.all(sign == [1, -1, 1, -1])) self.assertTrue( np.allclose(a.val, [1, -10, 3, -np.pi]) and np.allclose(a.jac, np.eye(4)))
def test_mul_scar_ad_var(self): a = Ad_array(3, 3) b = 3 c = b * a self.assertTrue(c.val == 9 and c.jac == 9) self.assertTrue(a.val == 3 and a.jac == 3) self.assertTrue(b == 3)
def test_log_sparse_jac(self): val = np.array([1, 2, 3]) J = sps.csc_matrix(np.array([[3, 2, 1], [5, 6, 1], [2, 3, 5]])) a = Ad_array(val, J) b = af.log(a) jac = np.dot(np.diag(1 / val), J.A) self.assertTrue(np.all(b.val == np.log(val)) and np.all(b.jac == jac))
def test_sub_scal_with_var(self): a = Ad_array(3, 2) b = 3 c = b - a self.assertTrue(np.allclose(c.val, 0) and np.allclose(c.jac, -2)) self.assertTrue(a.val == 3 and a.jac == 2) self.assertTrue(b == 3)
def test_mul_sps_advar(self): J = sps.csc_matrix(np.array([[1, 3, 1], [5, 0, 0], [5, 1, 2]])) x = Ad_array(np.array([1, 2, 3]), J) A = sps.csc_matrix(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) f = A * x self.assertTrue(np.all(f.val == [14, 32, 50])) self.assertTrue(np.all(f.jac == A * J.A))
def test_full_jac(self): J1 = sps.csc_matrix( np.array([[1, 3, 5], [1, 5, 1], [6, 2, 4], [2, 4, 1], [6, 2, 1]])) J2 = sps.csc_matrix(np.array([[1, 2], [2, 5], [6, 0], [9, 9], [45, 2]])) J = np.array([ [1, 3, 5, 1, 2], [1, 5, 1, 2, 5], [6, 2, 4, 6, 0], [2, 4, 1, 9, 9], [6, 2, 1, 45, 2], ]) a = Ad_array(np.array([1, 2, 3, 4, 5]), J.copy()) # np.array([J1, J2])) self.assertTrue(np.sum(a.full_jac() != J) == 0)
def test_mul_advar_vectors(self): Ja = sps.csc_matrix(np.array([[1, 3, 1], [5, 0, 0], [5, 1, 2]])) Jb = sps.csc_matrix(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])) a = Ad_array(np.array([1, 2, 3]), Ja) b = Ad_array(np.array([1, 1, 1]), Jb) A = sps.csc_matrix(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) f = A * a + b jac = A * Ja + Jb self.assertTrue(np.all(f.val == [15, 33, 51])) self.assertTrue(np.sum(f.full_jac() != A * Ja + Jb) == 0) self.assertTrue( np.sum(Ja != sps.csc_matrix( np.array([[1, 3, 1], [5, 0, 0], [5, 1, 2]]))) == 0) self.assertTrue( np.sum(Jb != sps.csc_matrix( np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))) == 0)
def test_log_vector(self): val = np.array([1, 2, 3]) J = sps.csc_matrix(np.array([[3, 2, 1], [5, 6, 1], [2, 3, 5]])) a = Ad_array(val, J) b = af.log(a) jac = sps.diags(1 / val) * J self.assertTrue( np.all(b.val == np.log(val)) and np.all(b.jac.A == jac))
def concatenate(variables, axis=0): vals = [var.val for var in variables] jacs = np.array([var.jac for var in variables]) vals_stacked = np.concatenate(vals, axis=axis) jacs_stacked = [] jacs_stacked = sps.vstack(jacs) return Ad_array(vals_stacked, jacs_stacked)
def test_exp_vector(self): val = np.array([1, 2, 3]) J = np.array([[3, 2, 1], [5, 6, 1], [2, 3, 5]]) a = Ad_array(val, sps.csc_matrix(J)) b = af.exp(a) jac = np.dot(np.diag(np.exp(val)), J) self.assertTrue(np.all(b.val == np.exp(val)) and np.all(b.jac == jac)) self.assertTrue( np.all(J == np.array([[3, 2, 1], [5, 6, 1], [2, 3, 5]])))
def _ad_apply(self, face_transmissibility, potential, bc): tpfa = self._tpfa # Inner contribution matrix_dictionary = tpfa.data[pp.DISCRETIZATION_MATRICES][tpfa.keyword] flux_matrix = matrix_dictionary[tpfa.flux_matrix_key] bound_flux_matrix = matrix_dictionary[tpfa.bound_flux_matrix_key] # Compute flux and jacobian including bound flux data. # Determine value of face transmissibilities. if isinstance(face_transmissibility, Ad_array): face_transmissibility_val = face_transmissibility.val elif isinstance(face_transmissibility, np.ndarray): face_transmissibility_val = face_transmissibility else: raise RuntimeError("Type not implemented.") # How the flux and its jacobian are computed: # flux.val = diag(face_transmissibility.val) * flux_matrix * potential.val # = diag(flux_matrix * potential.val) * face_transmissibility.val; hence: # Start assuming face_transmissibility is constant flux_val = (sps.diags(face_transmissibility_val).tocsc() * flux_matrix * potential.val) flux_jac = (sps.diags(face_transmissibility_val).tocsc() * flux_matrix * potential.jac) # Boundary contribution - require some care with neumann boundary conditions. # How the boundary flux and its jacobian is computed: # bc_value = diag(t_b) * bound_flux_matrix * bc = diag(bound_flux_matrix * bc) * t_b # For the jacobian use the latter formula. t_b_val = face_transmissibility_val # Enforce Neumann BCs by setting the transmissibility equal 1: is_neu = tpfa.is_neu t_b_val[is_neu] = 1 # flux_val += sps.diags(t_b_val).tocsc() * bound_flux_matrix * bc # TODO rm? flux_val += t_b_val * (bound_flux_matrix * bc) # Now account in the Jacobian for nonlinear face_transmissibility if isinstance(face_transmissibility, Ad_array): flux_jac += (sps.diags(flux_matrix * potential.val) * face_transmissibility.jac) # Boundary contribution - require some care with neumann boundary conditions t_b_jac = face_transmissibility.jac # Enforce Neumann BCs by setting the transmissibility equal 1: neumann_rows = np.arange(len(t_b_val))[is_neu] pp.utils.sparse_mat.zero_rows(t_b_jac, neumann_rows) flux_jac += sps.diags(bound_flux_matrix * bc) * t_b_jac return Ad_array(flux_val, flux_jac)
def test_advar_mul_vec(self): x = Ad_array(np.array([1, 2, 3]), sps.diags([3, 2, 1])) A = np.array([1, 3, 10]) f = x * A sol = np.array([1, 6, 30]) jac = np.diag([3, 6, 10]) self.assertTrue(np.all(f.val == sol) and np.all(f.jac == jac)) self.assertTrue( np.all(x.val == np.array([1, 2, 3])) and np.all(x.jac == np.diag([3, 2, 1])))
def tanh(var): if isinstance(var, Ad_array): val = np.tanh(var.val) jac = sps.diags(1 - np.tanh(var.val) ** 2).tocsc() * var.jac return Ad_array(val, jac) elif isinstance(var, Local_Ad_array): val = np.tanh(var.val) jac = (1 - np.tanh(var.val) ** 2) * var.jac return Local_Ad_array(val, jac) else: return np.tanh(var)
def abs(var): if isinstance(var, Ad_array): val = np.abs(var.val) jac = var.diagvec_mul_jac(sign(var)) return Ad_array(val, jac) elif isinstance(var, Local_Ad_array): val = np.abs(var.val) jac = sign(var) * var.jac return Local_Ad_array(val, jac) else: return np.abs(var)
def log(var): if isinstance(var, Ad_array): val = np.log(var.val) der = var.diagvec_mul_jac(1 / var.val) return Ad_array(val, der) elif isinstance(var, Local_Ad_array): val = np.log(var.val) der = (1 / var.val) * var.jac return Local_Ad_array(val, der) else: return np.log(var)
def test_advar_m_mul_vec_n(self): x = Ad_array(np.array([1, 2, 3]), sps.diags([3, 2, 1])) vec = np.array([1, 2]) R = sps.csc_matrix(np.array([[1, 0, 1], [0, 1, 0]])) y = R * x z = y * vec Jy = np.array([[1, 0, 3], [0, 2, 0]]) Jz = np.array([[1, 0, 3], [0, 4, 0]]) self.assertTrue(np.all(y.val == [4, 2])) self.assertTrue(np.sum(y.full_jac().A - Jy) == 0) self.assertTrue(np.all(z.val == [4, 4])) self.assertTrue(np.sum(z.full_jac().A - Jz) == 0)
def cos(var): if isinstance(var, Ad_array): val = np.cos(var.val) # TODO use capabilties offered by forward_mode.py jac = -sps.diags(np.sin(var.val)).tocsc() * var.jac return Ad_array(val, jac) elif isinstance(var, Local_Ad_array): val = np.cos(var.val) jac = -np.sin(var.val) * var.jac return Local_Ad_array(val, jac) else: return np.cos(var)
def test_log_scalar_times_ad_var(self): val = np.array([1, 2, 3]) J = sps.diags(np.array([1, 1, 1])) a = Ad_array(val, J) c = 2 b = af.log(c * a) jac = sps.diags(1 / val) * J self.assertTrue( np.allclose(b.val, np.log(c * val)) and np.allclose(b.jac.A, jac.A)) self.assertTrue(np.all(a.val == [1, 2, 3]) and np.all(a.jac.A == J.A))
def test_rpower_advar_vector_scalar(self): J = sps.csc_matrix(np.array([[1, 2], [2, 3], [0, 1]])) a = Ad_array(np.array([1, 2, 3]), J) b = 3**a bJac = np.array([ [3 * np.log(3) * 1, 3 * np.log(3) * 2], [9 * np.log(3) * 2, 9 * np.log(3) * 3], [27 * np.log(3) * 0, 27 * np.log(3) * 1], ]) self.assertTrue(np.all(b.val == [3, 9, 27])) self.assertTrue(np.all(b.jac.A == bJac))
def __call__(self, var, zerovalue: float = 0.5): if isinstance(var, Ad_array): val = np.heaviside(var.val, 0.0) regularization = self._regularization(var) jac = regularization.jac return Ad_array(val, jac) elif isinstance(var, Local_Ad_array): val = np.heaviside(var.val, 0.0) regularization = self._regularization(var) jac = regularization.jac return Local_Ad_array(val, jac) else: return np.heaviside(var)