def nabla_grad(self, o, a): j = Index() if a.rank() > 0: ii = tuple(indices(a.rank())) return as_tensor(a[ii].dx(j), (j,) + ii) else: return as_tensor(a.dx(j), (j,))
def generic_pseudo_inverse_expr(A): """Compute the Penrose-Moore pseudo-inverse of A: (A.T*A)^-1 * A.T.""" i, j, k = indices(3) ATA = as_tensor(A[k, i] * A[k, j], (i, j)) ATAinv = inverse_expr(ATA) q, r, s = indices(3) return as_tensor(ATAinv[r, q] * A[s, q], (r, s))
def _function_from_ufl_component_tensor(expression: Product, indices: tuple_of(IndexBase)): factor_1 = expression.ufl_operands[0] factor_2 = expression.ufl_operands[1] assert isinstance(factor_1, (Number, ScalarValue)) or isinstance(factor_2, (Number, ScalarValue)) if isinstance(factor_1, (Number, ScalarValue)): factor_2 = as_tensor(factor_2, indices) else: # isinstance(factor_2, (Number, ScalarValue)) factor_1 = as_tensor(factor_1, indices) return _function_from_ufl_product(factor_1, factor_2)
def apply_single_function_pullbacks(r, element): """Apply an appropriate pullback to something in physical space :arg r: An expression wrapped in ReferenceValue. :arg element: The element this expression lives in. :returns: a pulled back expression.""" mapping = element.mapping() if r.ufl_shape != element.reference_value_shape(): error("Expecting reference space expression with shape '%s', got '%s'" % (element.reference_value_shape(), r.ufl_shape)) if mapping in {"physical", "identity", "contravariant Piola", "covariant Piola", "double contravariant Piola", "double covariant Piola", "L2 Piola"}: # Base case in recursion through elements. If the element # advertises a mapping we know how to handle, do that # directly. f = apply_known_single_pullback(r, element) if f.ufl_shape != element.value_shape(): error("Expecting pulled back expression with shape '%s', got '%s'" % (element.value_shape(), f.ufl_shape)) return f elif mapping in {"symmetries", "undefined"}: # Need to pull back each unique piece of the reference space thing gsh = element.value_shape() rsh = r.ufl_shape if mapping == "symmetries": subelem = element.sub_elements()[0] fcm = element.flattened_sub_element_mapping() offsets = (product(subelem.reference_value_shape()) * i for i in fcm) elements = repeat(subelem) else: elements = sub_elements_with_mappings(element) # Python >= 3.8 has an initial keyword argument to # accumulate, but 3.7 does not. offsets = chain([0], accumulate(product(e.reference_value_shape()) for e in elements)) rflat = as_vector([r[idx] for idx in numpy.ndindex(rsh)]) g_components = [] # For each unique piece in reference space, apply the appropriate pullback for offset, subelem in zip(offsets, elements): sub_rsh = subelem.reference_value_shape() rm = product(sub_rsh) rsub = [rflat[offset + i] for i in range(rm)] rsub = as_tensor(numpy.asarray(rsub).reshape(sub_rsh)) rmapped = apply_single_function_pullbacks(rsub, subelem) # Flatten into the pulled back expression for the whole thing g_components.extend([rmapped[idx] for idx in numpy.ndindex(rmapped.ufl_shape)]) # And reshape appropriately f = as_tensor(numpy.asarray(g_components).reshape(gsh)) if f.ufl_shape != element.value_shape(): error("Expecting pulled back expression with shape '%s', got '%s'" % (element.value_shape(), f.ufl_shape)) return f else: error("Unhandled mapping type '%s'" % mapping)
def apply_known_single_pullback(r, element): """Apply pullback with given mapping. :arg r: Expression wrapped in ReferenceValue :arg element: The element defining the mapping """ # Need to pass in r rather than the physical space thing, because # the latter may be a ListTensor or similar, rather than a # Coefficient/Argument (in the case of mixed elements, see below # in apply_single_function_pullbacks), to which we cannot apply ReferenceValue mapping = element.mapping() domain = r.ufl_domain() if mapping == "physical": return r elif mapping == "identity": return r elif mapping == "contravariant Piola": J = Jacobian(domain) detJ = JacobianDeterminant(J) transform = (1.0 / detJ) * J # Apply transform "row-wise" to TensorElement(PiolaMapped, ...) *k, i, j = indices(len(r.ufl_shape) + 1) kj = (*k, j) f = as_tensor(transform[i, j] * r[kj], (*k, i)) return f elif mapping == "covariant Piola": K = JacobianInverse(domain) # Apply transform "row-wise" to TensorElement(PiolaMapped, ...) *k, i, j = indices(len(r.ufl_shape) + 1) kj = (*k, j) f = as_tensor(K[j, i] * r[kj], (*k, i)) return f elif mapping == "L2 Piola": detJ = JacobianDeterminant(domain) return r / detJ elif mapping == "double contravariant Piola": J = Jacobian(domain) detJ = JacobianDeterminant(J) transform = (1.0 / detJ) * J # Apply transform "row-wise" to TensorElement(PiolaMapped, ...) *k, i, j, m, n = indices(len(r.ufl_shape) + 2) kmn = (*k, m, n) f = as_tensor((1.0 / detJ)**2 * J[i, m] * r[kmn] * J[j, n], (*k, i, j)) return f elif mapping == "double covariant Piola": K = JacobianInverse(domain) # Apply transform "row-wise" to TensorElement(PiolaMapped, ...) *k, i, j, m, n = indices(len(r.ufl_shape) + 2) kmn = (*k, m, n) f = as_tensor(K[m, i] * r[kmn] * K[n, j], (*k, i, j)) return f else: error("Should never be reached!")
def _as_tensor(self, indices): "UFL operator: A^indices := as_tensor(A, indices)." if not isinstance(indices, tuple): error("Expecting a tuple of Index objects to A^indices := as_tensor(A, indices).") if not all(isinstance(i, Index) for i in indices): error("Expecting a tuple of Index objects to A^indices := as_tensor(A, indices).") return as_tensor(self, indices)
def indexed(self, o, Ap, ii): # TODO: (Partially) duplicated in nesting rules # Propagate zeros if isinstance(Ap, Zero): return self.independent_operator(o) # Untangle as_tensor(C[kk], jj)[ii] -> C[ll] to simplify # resulting expression if isinstance(Ap, ComponentTensor): B, jj = Ap.ufl_operands if isinstance(B, Indexed): C, kk = B.ufl_operands kk = list(kk) if all(j in kk for j in jj): rep = dict(zip(jj, ii)) Cind = [rep.get(k, k) for k in kk] expr = Indexed(C, MultiIndex(tuple(Cind))) assert expr.ufl_free_indices == o.ufl_free_indices assert expr.ufl_shape == o.ufl_shape return expr # Otherwise a more generic approach r = len(Ap.ufl_shape) - len(ii) if r: kk = indices(r) op = Indexed(Ap, MultiIndex(ii.indices() + kk)) op = as_tensor(op, kk) else: op = Indexed(Ap, ii) return op
def component_tensor(self, o, Ap, ii): if isinstance(Ap, Zero): op = self.independent_operator(o) else: Ap, jj = as_scalar(Ap) op = as_tensor(Ap, ii.indices() + jj) return op
def _make_identity(self, sh): "Create a higher order identity tensor to represent dv/dv." res = None if sh == (): # Scalar dv/dv is scalar return FloatValue(1.0) elif len(sh) == 1: # Vector v makes dv/dv the identity matrix return Identity(sh[0]) else: # TODO: Add a type for this higher order identity? # II[i0,i1,i2,j0,j1,j2] = 1 if all((i0==j0, i1==j1, i2==j2)) else 0 # Tensor v makes dv/dv some kind of higher rank identity tensor ind1 = () ind2 = () for d in sh: i, j = indices(2) dij = Identity(d)[i, j] if res is None: res = dij else: res *= dij ind1 += (i,) ind2 += (j,) fp = as_tensor(res, ind1 + ind2) return fp
def dot(self, o, a, b): ai = indices(len(a.ufl_shape)-1) bi = indices(len(b.ufl_shape)-1) k = (Index(),) # Creates a single IndexSum over a Product s = a[ai+k]*b[k+bi] return as_tensor(s, ai+bi)
def indexed(self, o, Ap, ii): # TODO: (Partially) duplicated in generic rules # Reuse if untouched if Ap is o.ufl_operands[0]: return o # Untangle as_tensor(C[kk], jj)[ii] -> C[ll] to simplify # resulting expression if isinstance(Ap, ComponentTensor): B, jj = Ap.ufl_operands if isinstance(B, Indexed): C, kk = B.ufl_operands kk = list(kk) if all(j in kk for j in jj): Cind = list(kk) for i, j in zip(ii, jj): Cind[kk.index(j)] = i return Indexed(C, MultiIndex(tuple(Cind))) # Otherwise a more generic approach r = len(Ap.ufl_shape) - len(ii) if r: kk = indices(r) op = Indexed(Ap, MultiIndex(ii.indices() + kk)) op = as_tensor(op, kk) else: op = Indexed(Ap, ii) return op
def dot(self, o, a, b): ai = indices(a.rank()-1) bi = indices(b.rank()-1) k = indices(1) # Create an IndexSum over a Product s = a[ai+k]*b[k+bi] return as_tensor(s, ai+bi)
def indexed(self, o, Ap, ii): # TODO: (Partially) duplicated in nesting rules # Propagate zeros if isinstance(Ap, Zero): return self.independent_operator(o) # Untangle as_tensor(C[kk], jj)[ii] -> C[ll] to simplify # resulting expression if isinstance(Ap, ComponentTensor): B, jj = Ap.ufl_operands if isinstance(B, Indexed): C, kk = B.ufl_operands kk = list(kk) if all(j in kk for j in jj): Cind = list(kk) for i, j in zip(ii, jj): Cind[kk.index(j)] = i return Indexed(C, MultiIndex(tuple(Cind))) # Otherwise a more generic approach r = len(Ap.ufl_shape) - len(ii) if r: kk = indices(r) op = Indexed(Ap, MultiIndex(ii.indices() + kk)) op = as_tensor(op, kk) else: op = Indexed(Ap, ii) return op
def dot(self, o, a, b): ai = indices(len(a.ufl_shape) - 1) bi = indices(len(b.ufl_shape) - 1) k = (Index(),) # Creates a single IndexSum over a Product s = a[ai + k] * b[k + bi] return as_tensor(s, ai + bi)
def _make_identity(self, sh): "Create a higher order identity tensor to represent dv/dv." res = None if sh == (): # Scalar dv/dv is scalar return FloatValue(1.0) elif len(sh) == 1: # Vector v makes dv/dv the identity matrix return Identity(sh[0]) else: # TODO: Add a type for this higher order identity? # II[i0,i1,i2,j0,j1,j2] = 1 if all((i0==j0, i1==j1, i2==j2)) else 0 # Tensor v makes dv/dv some kind of higher rank identity tensor ind1 = () ind2 = () for d in sh: i, j = indices(2) dij = Identity(d)[i, j] if res is None: res = dij else: res *= dij ind1 += (i, ) ind2 += (j, ) fp = as_tensor(res, ind1 + ind2) return fp
def nabla_grad(self, o, a): sh = a.ufl_shape if sh == (): return Grad(a) else: j = Index() ii = tuple(indices(len(sh))) return as_tensor(a[ii].dx(j), (j,) + ii)
def _getitem(self, component): # Treat component consistently as tuple below if not isinstance(component, tuple): component = (component, ) shape = self.ufl_shape # Analyse slices (:) and Ellipsis (...) all_indices, slice_indices, repeated_indices = create_slice_indices( component, shape, self.ufl_free_indices) # Check that we have the right number of indices for a tensor with # this shape if len(shape) != len(all_indices): error( "Invalid number of indices {0} for expression of rank {1}.".format( len(all_indices), len(shape))) # Special case for simplifying foo[...] => foo, foo[:] => foo or # similar if len(slice_indices) == len(all_indices): return self # Special case for simplifying as_tensor(ai,(i,))[i] => ai if isinstance(self, ComponentTensor): if all_indices == self.indices().indices(): return self.ufl_operands[0] # Apply all indices to index self, yielding a scalar valued # expression mi = MultiIndex(all_indices) a = Indexed(self, mi) # TODO: I think applying as_tensor after index sums results in # cleaner expression graphs. # If the Ellipsis or any slices were found, wrap as tensor valued # with the slice indices created at the top here if slice_indices: a = as_tensor(a, slice_indices) # If any repeated indices were found, apply implicit summation # over those for i in repeated_indices: mi = MultiIndex((i, )) a = IndexSum(a, mi) # Check for zero (last so we can get indices etc from a, could # possibly be done faster by checking early instead) if isinstance(self, Zero): shape = a.ufl_shape fi = a.ufl_free_indices fid = a.ufl_index_dimensions a = Zero(shape, fi, fid) return a
def _div(self, o): if not isinstance(o, _valid_types): return NotImplemented sh = self.ufl_shape if sh: ii = indices(len(sh)) d = Division(self[ii], o) return as_tensor(d, ii) return Division(self, o)
def facet_jacobian(self, o): if self._preserve_types[o._ufl_typecode_]: return o domain = o.ufl_domain() J = self.jacobian(Jacobian(domain)) RFJ = CellFacetJacobian(domain) i, j, k = indices(3) return as_tensor(J[i, k]*RFJ[k, j], (i, j))
def facet_jacobian(self, o): if self._preserve_types[o._ufl_typecode_]: return o domain = o.ufl_domain() J = self.jacobian(Jacobian(domain)) RFJ = CellFacetJacobian(domain) i, j, k = indices(3) return as_tensor(J[i, k] * RFJ[k, j], (i, j))
def pseudo_inverse_expr(A): """Compute the Penrose-Moore pseudo-inverse of A: (A.T*A)^-1 * A.T.""" m, n = A.ufl_shape if n == 1: # Simpler special case for 1d i, j, k = indices(3) return as_tensor(A[i, j], (j, i)) / (A[k, 0] * A[k, 0]) else: # Generic formulation return generic_pseudo_inverse_expr(A)
def jacobian_inverse(self, o): # grad(K) == K_ji rgrad(K)_rj if is_cellwise_constant(o): return self.independent_terminal(o) if not o._ufl_is_terminal_: error("ReferenceValue can only wrap a terminal") r = indices(len(o.ufl_shape)) i, j = indices(2) Do = as_tensor(o[j, i]*ReferenceGrad(o)[r + (j,)], r + (i,)) return Do
def jacobian_inverse(self, o): # grad(K) == K_ji rgrad(K)_rj if is_cellwise_constant(o): return self.independent_terminal(o) if not o._ufl_is_terminal_: error("ReferenceValue can only wrap a terminal") r = indices(len(o.ufl_shape)) i, j = indices(2) Do = as_tensor(o[j, i] * ReferenceGrad(o)[r + (j, )], r + (i, )) return Do
def product(self, o, da, db): # Even though arguments to o are scalar, da and db may be # tensor valued a, b = o.ufl_operands (da, db), ii = as_scalars(da, db) pa = Product(da, b) pb = Product(a, db) s = Sum(pa, pb) if ii: s = as_tensor(s, ii) return s
def elem_op(op, *args): "UFL operator: Take the elementwise application of operator op on scalar values from one or more tensor arguments." args = map(as_ufl, args) sh = args[0].shape() ufl_assert(all(sh == x.shape() for x in args), "Cannot take elementwise operation with different shapes.") if sh == (): return op(*args) def op_ind(ind, *args): return op(*[x[ind] for x in args]) return as_tensor(elem_op_items(op_ind, (), *args))
def _getitem(self, component): # Treat component consistently as tuple below if not isinstance(component, tuple): component = (component,) shape = self.ufl_shape # Analyse slices (:) and Ellipsis (...) all_indices, slice_indices, repeated_indices = create_slice_indices(component, shape, self.ufl_free_indices) # Check that we have the right number of indices for a tensor with # this shape if len(shape) != len(all_indices): error("Invalid number of indices {0} for expression of rank {1}.".format(len(all_indices), len(shape))) # Special case for simplifying foo[...] => foo, foo[:] => foo or # similar if len(slice_indices) == len(all_indices): return self # Special case for simplifying as_tensor(ai,(i,))[i] => ai if isinstance(self, ComponentTensor): if all_indices == self.indices().indices(): return self.ufl_operands[0] # Apply all indices to index self, yielding a scalar valued # expression mi = MultiIndex(all_indices) a = Indexed(self, mi) # TODO: I think applying as_tensor after index sums results in # cleaner expression graphs. # If the Ellipsis or any slices were found, wrap as tensor valued # with the slice indices created at the top here if slice_indices: a = as_tensor(a, slice_indices) # If any repeated indices were found, apply implicit summation # over those for i in repeated_indices: mi = MultiIndex((i,)) a = IndexSum(a, mi) # Check for zero (last so we can get indices etc from a, could # possibly be done faster by checking early instead) if isinstance(self, Zero): shape = a.ufl_shape fi = a.ufl_free_indices fid = a.ufl_index_dimensions a = Zero(shape, fi, fid) return a
def reference_value(self, o): # grad(o) == grad(rv(f)) -> K_ji*rgrad(rv(f))_rj f = o.ufl_operands[0] if not f._ufl_is_terminal_: error("ReferenceValue can only wrap a terminal") domain = f.ufl_domain() K = JacobianInverse(domain) r = indices(len(o.ufl_shape)) i, j = indices(2) Do = as_tensor(K[j, i]*ReferenceGrad(o)[r + (j,)], r + (i,)) return Do
def reference_value(self, o): # grad(o) == grad(rv(f)) -> K_ji*rgrad(rv(f))_rj f = o.ufl_operands[0] if not f._ufl_is_terminal_: error("ReferenceValue can only wrap a terminal") domain = f.ufl_domain() K = JacobianInverse(domain) r = indices(len(o.ufl_shape)) i, j = indices(2) Do = as_tensor(K[j, i] * ReferenceGrad(o)[r + (j, )], r + (i, )) return Do
def inverse_expr(A): "Compute the inverse of A." sh = A.ufl_shape if sh == (): return 1.0 / A elif sh[0] == sh[1]: if sh[0] == 1: return as_tensor(((1.0 / A[0, 0],),)) else: return adj_expr(A) / determinant_expr(A) else: return pseudo_inverse_expr(A)
def cell_coordinate(self, o): "Compute from physical coordinates if they are known, using the appropriate mappings." if self._preserve_types[o._ufl_typecode_]: return o domain = o.ufl_domain() K = self.jacobian_inverse(JacobianInverse(domain)) x = self.spatial_coordinate(SpatialCoordinate(domain)) x0 = CellOrigin(domain) i, j = indices(2) X = as_tensor(K[i, j] * (x[j] - x0[j]), (i, )) return X
def reference_grad(self, o): # grad(o) == grad(rgrad(rv(f))) -> K_ji*rgrad(rgrad(rv(f)))_rj f = o.ufl_operands[0] valid_operand = f._ufl_is_in_reference_frame_ or isinstance(f, (JacobianInverse, SpatialCoordinate)) if not valid_operand: error("ReferenceGrad can only wrap a reference frame type!") domain = f.ufl_domain() K = JacobianInverse(domain) r = indices(len(o.ufl_shape)) i, j = indices(2) Do = as_tensor(K[j, i]*ReferenceGrad(o)[r + (j,)], r + (i,)) return Do
def compute_gprimeterm(ngrads, vval, vcomp, wshape, wcomp): # Apply gradients directly to argument vval, # and get the right indexed scalar component(s) kk = indices(ngrads) Dvkk = apply_grads(vval)[vcomp+kk] # Place scalar component(s) Dvkk into the right tensor positions if wshape: Ejj, jj = unit_indexed_tensor(wshape, wcomp) else: Ejj, jj = 1, () gprimeterm = as_tensor(Ejj*Dvkk, jj+kk) return gprimeterm
def cell_coordinate(self, o): "Compute from physical coordinates if they are known, using the appropriate mappings." if self._preserve_types[o._ufl_typecode_]: return o domain = o.ufl_domain() K = self.jacobian_inverse(JacobianInverse(domain)) x = self.spatial_coordinate(SpatialCoordinate(domain)) x0 = CellOrigin(domain) i, j = indices(2) X = as_tensor(K[i, j] * (x[j] - x0[j]), (i,)) return X
def compute_gprimeterm(ngrads, vval, vcomp, wshape, wcomp): # Apply gradients directly to argument vval, # and get the right indexed scalar component(s) kk = indices(ngrads) Dvkk = apply_grads(vval)[vcomp + kk] # Place scalar component(s) Dvkk into the right tensor positions if wshape: Ejj, jj = unit_indexed_tensor(wshape, wcomp) else: Ejj, jj = 1, () gprimeterm = as_tensor(Ejj * Dvkk, jj + kk) return gprimeterm
def elem_op(op, *args): "UFL operator: Take the elementwise application of operator *op* on scalar values from one or more tensor arguments." args = [as_ufl(arg) for arg in args] sh = args[0].ufl_shape if not all(sh == x.ufl_shape for x in args): error("Cannot take elementwise operation with different shapes.") if sh == (): return op(*args) def op_ind(ind, *args): return op(*[x[ind] for x in args]) return as_tensor(elem_op_items(op_ind, (), *args))
def altenative_dot(self, o, a, b): # TODO: Test this ash = a.ufl_shape bsh = b.ufl_shape ai = indices(len(ash) - 1) bi = indices(len(bsh) - 1) # Simplification for tensors where the dot-sum dimension has # length 1 if ash[-1] == 1: k = (FixedIndex(0),) else: k = (Index(),) # Potentially creates a single IndexSum over a Product s = a[ai + k] * b[k + bi] return as_tensor(s, ai + bi)
def altenative_dot(self, o, a, b): # TODO: Test this ash = a.ufl_shape bsh = b.ufl_shape ai = indices(len(ash) - 1) bi = indices(len(bsh) - 1) # Simplification for tensors where the dot-sum dimension has # length 1 if ash[-1] == 1: k = (FixedIndex(0),) else: k = (Index(),) # Potentially creates a single IndexSum over a Product s = a[ai+k]*b[k+bi] return as_tensor(s, ai+bi)
def coefficient(self, o): # Define dw/dw := d/ds [w + s v] = v debug("In CoefficientAD.coefficient:") debug("o = %s" % o) debug("self._w = %s" % self._w) debug("self._v = %s" % self._v) # Find o among w for (w, v) in izip(self._w, self._v): if o == w: return (w, v) # If o is not among coefficient derivatives, return do/dw=0 oprimesum = Zero(o.shape()) oprimes = self._cd._data.get(o) if oprimes is None: if self._cd._data: # TODO: Make it possible to silence this message in particular? # It may be good to have for debugging... warning("Assuming d{%s}/d{%s} = 0." % (o, self._w)) else: # Make sure we have a tuple to match the self._v tuple if not isinstance(oprimes, tuple): oprimes = (oprimes, ) ufl_assert(len(oprimes) == len(self._v), "Got a tuple of arguments, "+\ "expecting a matching tuple of coefficient derivatives.") # Compute do/dw_j = do/dw_h : v. # Since we may actually have a tuple of oprimes and vs in a # 'mixed' space, sum over them all to get the complete inner # product. Using indices to define a non-compound inner product. for (oprime, v) in izip(oprimes, self._v): so, oi = as_scalar(oprime) rv = len(v.shape()) oi1 = oi[:-rv] oi2 = oi[-rv:] prod = so * v[oi2] if oi1: oprimesum += as_tensor(prod, oi1) else: oprimesum += prod # Example: # (f : g) -> (dfdu : v) : g + ditto # shape(f) == shape(g) == shape(dfdu : v) # shape(dfdu) == shape(f) + shape(v) return (o, oprimesum)
def coefficient(self, o): # Define dw/dw := d/ds [w + s v] = v debug("In CoefficientAD.coefficient:") debug("o = %s" % o) debug("self._w = %s" % self._w) debug("self._v = %s" % self._v) # Find o among w for (w, v) in izip(self._w, self._v): if o == w: return (w, v) # If o is not among coefficient derivatives, return do/dw=0 oprimesum = Zero(o.shape()) oprimes = self._cd._data.get(o) if oprimes is None: if self._cd._data: # TODO: Make it possible to silence this message in particular? # It may be good to have for debugging... warning("Assuming d{%s}/d{%s} = 0." % (o, self._w)) else: # Make sure we have a tuple to match the self._v tuple if not isinstance(oprimes, tuple): oprimes = (oprimes,) ufl_assert(len(oprimes) == len(self._v), "Got a tuple of arguments, "+\ "expecting a matching tuple of coefficient derivatives.") # Compute do/dw_j = do/dw_h : v. # Since we may actually have a tuple of oprimes and vs in a # 'mixed' space, sum over them all to get the complete inner # product. Using indices to define a non-compound inner product. for (oprime, v) in izip(oprimes, self._v): so, oi = as_scalar(oprime) rv = len(v.shape()) oi1 = oi[:-rv] oi2 = oi[-rv:] prod = so*v[oi2] if oi1: oprimesum += as_tensor(prod, oi1) else: oprimesum += prod # Example: # (f : g) -> (dfdu : v) : g + ditto # shape(f) == shape(g) == shape(dfdu : v) # shape(dfdu) == shape(f) + shape(v) return (o, oprimesum)
def reference_value(self, o): # grad(o) == grad(rv(f)) -> K_ji*rgrad(rv(f))_rj f = o.ufl_operands[0] if f.ufl_element().mapping() == "physical": # TODO: Do we need to be more careful for immersed things? return ReferenceGrad(o) if not f._ufl_is_terminal_: error("ReferenceValue can only wrap a terminal") domain = f.ufl_domain() K = JacobianInverse(domain) r = indices(len(o.ufl_shape)) i, j = indices(2) Do = as_tensor(K[j, i] * ReferenceGrad(o)[r + (j, )], r + (i, )) return Do
def indexed(self, o): A, jj = o.operands() A2, Ap = self.visit(A) o = self.reuse_if_possible(o, A2, jj) if isinstance(Ap, Zero): op = self._make_zero_diff(o) else: r = Ap.rank() - len(jj) if r: ii = indices(r) op = Indexed(Ap, jj._indices + ii) op = as_tensor(op, ii) else: op = Indexed(Ap, jj) return (o, op)
def _make_ones_diff(self, o): ufl_assert(o.shape() == self._var_shape, "This is only used by VariableDerivative, yes?") # Define a scalar value with the right indices # (kind of cumbersome this... any simpler way?) sh = o.shape() fi = o.free_indices() idims = dict(o.index_dimensions()) if self._var_free_indices: # Currently assuming only one free variable index i, = self._var_free_indices if i not in idims: fi = unique_indices(fi + (i, )) idims[i] = self._var_index_dimensions[i] # Create a 1 with index annotations one = IntValue(1, (), fi, idims) res = None if sh == (): return one elif len(sh) == 1: # FIXME: If sh == (1,), I think this will get the wrong shape? # I think we should make sure sh=(1,...,1) is always converted to () early. fp = Identity(sh[0]) else: ind1 = () ind2 = () for d in sh: i, j = indices(2) dij = Identity(d)[i, j] if res is None: res = dij else: res *= dij ind1 += (i, ) ind2 += (j, ) fp = as_tensor(res, ind1 + ind2) # Apply index annotations if fi: fp *= one return fp
def coefficient(self, o): # Define dw/dw := d/ds [w + s v] = v # Return corresponding argument if we can find o among w do = self._w2v.get(o) if do is not None: return do # Look for o among coefficient derivatives dos = self._cd.get(o) if dos is None: # If o is not among coefficient derivatives, return # do/dw=0 do = Zero(o.ufl_shape) return do else: # Compute do/dw_j = do/dw_h : v. # Since we may actually have a tuple of oprimes and vs in a # 'mixed' space, sum over them all to get the complete inner # product. Using indices to define a non-compound inner product. # Example: # (f:g) -> (dfdu:v):g + f:(dgdu:v) # shape(dfdu) == shape(f) + shape(v) # shape(f) == shape(g) == shape(dfdu : v) # Make sure we have a tuple to match the self._v tuple if not isinstance(dos, tuple): dos = (dos, ) if len(dos) != len(self._v): error( "Got a tuple of arguments, expecting a matching tuple of coefficient derivatives." ) dosum = Zero(o.ufl_shape) for do, v in zip(dos, self._v): so, oi = as_scalar(do) rv = len(v.ufl_shape) oi1 = oi[:-rv] oi2 = oi[-rv:] prod = so * v[oi2] if oi1: dosum += as_tensor(prod, oi1) else: dosum += prod return dosum
def _make_ones_diff(self, o): ufl_assert(o.shape() == self._var_shape, "This is only used by VariableDerivative, yes?") # Define a scalar value with the right indices # (kind of cumbersome this... any simpler way?) sh = o.shape() fi = o.free_indices() idims = dict(o.index_dimensions()) if self._var_free_indices: # Currently assuming only one free variable index i, = self._var_free_indices if i not in idims: fi = unique_indices(fi + (i,)) idims[i] = self._var_index_dimensions[i] # Create a 1 with index annotations one = IntValue(1, (), fi, idims) res = None if sh == (): return one elif len(sh) == 1: # FIXME: If sh == (1,), I think this will get the wrong shape? # I think we should make sure sh=(1,...,1) is always converted to () early. fp = Identity(sh[0]) else: ind1 = () ind2 = () for d in sh: i, j = indices(2) dij = Identity(d)[i, j] if res is None: res = dij else: res *= dij ind1 += (i,) ind2 += (j,) fp = as_tensor(res, ind1 + ind2) # Apply index annotations if fi: fp *= one return fp
def product(self, o, *ops): # Start with a zero with the right shape and indices fp = self._make_zero_diff(o) # Get operands and their derivatives ops2, dops2 = unzip(ops) o = self.reuse_if_possible(o, *ops2) for i in xrange(len(ops)): # Get scalar representation of differentiated value of operand i dop = dops2[i] dop, ii = as_scalar(dop) # Replace operand i with its differentiated value in product fpoperands = ops2[:i] + [dop] + ops2[i+1:] p = Product(*fpoperands) # Wrap product in tensor again if ii: p = as_tensor(p, ii) # Accumulate terms fp += p return (o, fp)
def coefficient(self, o): # Define dw/dw := d/ds [w + s v] = v # Return corresponding argument if we can find o among w do = self._w2v.get(o) if do is not None: return do # Look for o among coefficient derivatives dos = self._cd.get(o) if dos is None: # If o is not among coefficient derivatives, return # do/dw=0 do = Zero(o.ufl_shape) return do else: # Compute do/dw_j = do/dw_h : v. # Since we may actually have a tuple of oprimes and vs in a # 'mixed' space, sum over them all to get the complete inner # product. Using indices to define a non-compound inner product. # Example: # (f:g) -> (dfdu:v):g + f:(dgdu:v) # shape(dfdu) == shape(f) + shape(v) # shape(f) == shape(g) == shape(dfdu : v) # Make sure we have a tuple to match the self._v tuple if not isinstance(dos, tuple): dos = (dos,) if len(dos) != len(self._v): error("Got a tuple of arguments, expecting a matching tuple of coefficient derivatives.") dosum = Zero(o.ufl_shape) for do, v in zip(dos, self._v): so, oi = as_scalar(do) rv = len(v.ufl_shape) oi1 = oi[:-rv] oi2 = oi[-rv:] prod = so*v[oi2] if oi1: dosum += as_tensor(prod, oi1) else: dosum += prod return dosum
def contraction(a, a_axes, b, b_axes): "UFL operator: Take the contraction of a and b over given axes." ai, bi = a_axes, b_axes ufl_assert(len(ai) == len(bi), "Contraction must be over the same number of axes.") ash = a.shape() bsh = b.shape() aii = indices(a.rank()) bii = indices(b.rank()) cii = indices(len(ai)) shape = [None]*len(ai) for i,j in enumerate(ai): aii[j] = cii[i] shape[i] = ash[j] for i,j in enumerate(bi): bii[j] = cii[i] ufl_assert(shape[i] == bsh[j], "Shape mismatch in contraction.") s = a[aii]*b[bii] cii = set(cii) ii = tuple(i for i in (aii + bii) if not i in cii) return as_tensor(s, ii)
def division(self, o, a, b): f, fp = a g, gp = b o = self.reuse_if_possible(o, f, g) ufl_assert(is_ufl_scalar(f), "Not expecting nonscalar nominator") ufl_assert(is_true_ufl_scalar(g), "Not expecting nonscalar denominator") #do_df = 1/g #do_dg = -h/g #op = do_df*fp + do_df*gp #op = (fp - o*gp) / g # Get o and gp as scalars, multiply, then wrap as a tensor again so, oi = as_scalar(o) sgp, gi = as_scalar(gp) o_gp = so*sgp if oi or gi: o_gp = as_tensor(o_gp, oi + gi) op = (fp - o_gp) / g return (o, op)