def dot(self, o, a, b): ai = indices(a.rank()-1) bi = indices(b.rank()-1) k = indices(1) # Create an IndexSum over a Product s = a[ai+k]*b[k+bi] return as_tensor(s, ai+bi)
def nabla_grad(self, o, a): j = Index() if a.rank() > 0: ii = tuple(indices(a.rank())) return as_tensor(a[ii].dx(j), (j,) + ii) else: return as_tensor(a.dx(j), (j,))
def _div(self, o): if not isinstance(o, _valid_types): return NotImplemented sh = self.shape() if sh: ii = indices(len(sh)) d = Division(self[ii], o) return as_tensor(d, ii) return Division(self, o)
def as_scalar(expression): """Given a scalar or tensor valued expression A, returns either of the tuples:: (a,b) = (A, ()) (a,b) = (A[indices], indices) such that a is always a scalar valued expression.""" ii = indices(expression.rank()) if ii: expression = expression[ii] return expression, ii
def contraction(a, a_axes, b, b_axes): "UFL operator: Take the contraction of a and b over given axes." ai, bi = a_axes, b_axes ufl_assert(len(ai) == len(bi), "Contraction must be over the same number of axes.") ash = a.shape() bsh = b.shape() aii = indices(a.rank()) bii = indices(b.rank()) cii = indices(len(ai)) shape = [None]*len(ai) for i,j in enumerate(ai): aii[j] = cii[i] shape[i] = ash[j] for i,j in enumerate(bi): bii[j] = cii[i] ufl_assert(shape[i] == bsh[j], "Shape mismatch in contraction.") s = a[aii]*b[bii] cii = set(cii) ii = tuple(i for i in (aii + bii) if not i in cii) return as_tensor(s, ii)
def compute_gprimeterm(ngrads, vval, vcomp, wshape, wcomp): # Apply gradients directly to argument vval, # and get the right indexed scalar component(s) kk = indices(ngrads) Dvkk = apply_grads(vval)[vcomp + kk] # Place scalar component(s) Dvkk into the right tensor positions if wshape: Ejj, jj = unit_indexed_tensor(wshape, wcomp) else: Ejj, jj = 1, () gprimeterm = as_tensor(Ejj * Dvkk, jj + kk) return gprimeterm
def compute_gprimeterm(ngrads, vval, vcomp, wshape, wcomp): # Apply gradients directly to argument vval, # and get the right indexed scalar component(s) kk = indices(ngrads) Dvkk = apply_grads(vval)[vcomp+kk] # Place scalar component(s) Dvkk into the right tensor positions if wshape: Ejj, jj = unit_indexed_tensor(wshape, wcomp) else: Ejj, jj = 1, () gprimeterm = as_tensor(Ejj*Dvkk, jj+kk) return gprimeterm
def contraction(a, a_axes, b, b_axes): "UFL operator: Take the contraction of a and b over given axes." ai, bi = a_axes, b_axes ufl_assert( len(ai) == len(bi), "Contraction must be over the same number of axes.") ash = a.shape() bsh = b.shape() aii = indices(a.rank()) bii = indices(b.rank()) cii = indices(len(ai)) shape = [None] * len(ai) for i, j in enumerate(ai): aii[j] = cii[i] shape[i] = ash[j] for i, j in enumerate(bi): bii[j] = cii[i] ufl_assert(shape[i] == bsh[j], "Shape mismatch in contraction.") s = a[aii] * b[bii] cii = set(cii) ii = tuple(i for i in (aii + bii) if not i in cii) return as_tensor(s, ii)
def indexed(self, o): A, jj = o.operands() A2, Ap = self.visit(A) o = self.reuse_if_possible(o, A2, jj) if isinstance(Ap, Zero): op = self._make_zero_diff(o) else: r = Ap.rank() - len(jj) if r: ii = indices(r) op = Indexed(Ap, jj._indices + ii) op = as_tensor(op, ii) else: op = Indexed(Ap, jj) return (o, op)
def _make_ones_diff(self, o): ufl_assert(o.shape() == self._var_shape, "This is only used by VariableDerivative, yes?") # Define a scalar value with the right indices # (kind of cumbersome this... any simpler way?) sh = o.shape() fi = o.free_indices() idims = dict(o.index_dimensions()) if self._var_free_indices: # Currently assuming only one free variable index i, = self._var_free_indices if i not in idims: fi = unique_indices(fi + (i, )) idims[i] = self._var_index_dimensions[i] # Create a 1 with index annotations one = IntValue(1, (), fi, idims) res = None if sh == (): return one elif len(sh) == 1: # FIXME: If sh == (1,), I think this will get the wrong shape? # I think we should make sure sh=(1,...,1) is always converted to () early. fp = Identity(sh[0]) else: ind1 = () ind2 = () for d in sh: i, j = indices(2) dij = Identity(d)[i, j] if res is None: res = dij else: res *= dij ind1 += (i, ) ind2 += (j, ) fp = as_tensor(res, ind1 + ind2) # Apply index annotations if fi: fp *= one return fp
def _make_ones_diff(self, o): ufl_assert(o.shape() == self._var_shape, "This is only used by VariableDerivative, yes?") # Define a scalar value with the right indices # (kind of cumbersome this... any simpler way?) sh = o.shape() fi = o.free_indices() idims = dict(o.index_dimensions()) if self._var_free_indices: # Currently assuming only one free variable index i, = self._var_free_indices if i not in idims: fi = unique_indices(fi + (i,)) idims[i] = self._var_index_dimensions[i] # Create a 1 with index annotations one = IntValue(1, (), fi, idims) res = None if sh == (): return one elif len(sh) == 1: # FIXME: If sh == (1,), I think this will get the wrong shape? # I think we should make sure sh=(1,...,1) is always converted to () early. fp = Identity(sh[0]) else: ind1 = () ind2 = () for d in sh: i, j = indices(2) dij = Identity(d)[i, j] if res is None: res = dij else: res *= dij ind1 += (i,) ind2 += (j,) fp = as_tensor(res, ind1 + ind2) # Apply index annotations if fi: fp *= one return fp
def unit_indexed_tensor(shape, component): from ufl.constantvalue import Identity from ufl.operators import outer # a bit of circular dependency issue here r = len(shape) if r == 0: return 0, () jj = indices(r) es = [] for i in xrange(r): s = shape[i] c = component[i] j = jj[i] e = Identity(s)[c, j] es.append(e) E = es[0] for e in es[1:]: E = outer(E, e) return E, jj
def _mult(a, b): # Discover repeated indices, which results in index sums ai = a.free_indices() bi = b.free_indices() ii = ai + bi ri = repeated_indices(ii) # Pick out valid non-scalar products here (dot products): # - matrix-matrix (A*B, M*grad(u)) => A . B # - matrix-vector (A*v) => A . v s1, s2 = a.shape(), b.shape() r1, r2 = len(s1), len(s2) if r1 == 2 and r2 in (1, 2): ufl_assert(not ri, "Not expecting repeated indices in non-scalar product.") # Check for zero, simplifying early if possible if isinstance(a, Zero) or isinstance(b, Zero): shape = s1[:-1] + s2[1:] fi = single_indices(ii) idims = mergedicts((a.index_dimensions(), b.index_dimensions())) idims = subdict(idims, fi) return Zero(shape, fi, idims) # Return dot product in index notation ai = indices(a.rank() - 1) bi = indices(b.rank() - 1) k = indices(1) # Create an IndexSum over a Product s = a[ai + k] * b[k + bi] return as_tensor(s, ai + bi) elif not (r1 == 0 and r2 == 0): # Scalar - tensor product if r2 == 0: a, b = b, a s1, s2 = s2, s1 # Check for zero, simplifying early if possible if isinstance(a, Zero) or isinstance(b, Zero): shape = s2 fi = single_indices(ii) idims = mergedicts((a.index_dimensions(), b.index_dimensions())) idims = subdict(idims, fi) return Zero(shape, fi, idims) # Repeated indices are allowed, like in: #v[i]*M[i,:] # Apply product to scalar components ii = indices(b.rank()) p = Product(a, b[ii]) # Wrap as tensor again p = as_tensor(p, ii) # TODO: Should we apply IndexSum or as_tensor first? # Apply index sums for i in ri: p = IndexSum(p, i) return p # Scalar products use Product and IndexSum for implicit sums: p = Product(a, b) for i in ri: p = IndexSum(p, i) return p
# # You should have received a copy of the GNU Lesser General Public License # along with UFL. If not, see <http://www.gnu.org/licenses/>. # # Modified by Anders Logg, 2008 # Modified by Kristian Oelgaard, 2009 # # First added: 2008-03-14 # Last changed: 2013-01-11 from ufl.indexing import indices from ufl.integral import Measure from ufl.geometry import Cell # Default indices i, j, k, l = indices(4) p, q, r, s = indices(4) # Default measures for integration dx = Measure(Measure.CELL, Measure.DOMAIN_ID_DEFAULT) ds = Measure(Measure.EXTERIOR_FACET, Measure.DOMAIN_ID_DEFAULT) dS = Measure(Measure.INTERIOR_FACET, Measure.DOMAIN_ID_DEFAULT) dP = Measure(Measure.POINT, Measure.DOMAIN_ID_DEFAULT) dE = Measure(Measure.MACRO_CELL, Measure.DOMAIN_ID_DEFAULT) dc = Measure(Measure.SURFACE, Measure.DOMAIN_ID_DEFAULT) # Cell types cell1D = Cell("cell1D", 1) cell2D = Cell("cell2D", 2) cell3D = Cell("cell3D", 3) vertex = Cell("vertex", 0)
def analyse_key(ii, rank): """Takes something the user might input as an index tuple inside [], which could include complete slices (:) and ellipsis (...), and returns tuples of actual UFL index objects. The return value is a tuple (indices, axis_indices), each being a tuple of IndexBase instances. The return value 'indices' corresponds to all input objects of these types: - Index - FixedIndex - int => Wrapped in FixedIndex The return value 'axis_indices' corresponds to all input objects of these types: - Complete slice (:) => Replaced by a single new index - Ellipsis (...) => Replaced by multiple new indices """ # Wrap in tuple if not isinstance(ii, (tuple, MultiIndex)): ii = (ii, ) else: # Flatten nested tuples, happens with f[...,ii] where ii is a tuple of indices jj = [] for j in ii: if isinstance(j, (tuple, MultiIndex)): jj.extend(j) else: jj.append(j) ii = tuple(jj) # Convert all indices to Index or FixedIndex objects. # If there is an ellipsis, split the indices into before and after. axis_indices = set() pre = [] post = [] indexlist = pre for i in ii: if i == Ellipsis: # Switch from pre to post list when an ellipsis is encountered ufl_assert(indexlist is pre, "Found duplicate ellipsis.") indexlist = post else: # Convert index to a proper type if isinstance(i, int): idx = FixedIndex(i) elif isinstance(i, IndexBase): idx = i elif isinstance(i, slice): if i == slice(None): idx = Index() axis_indices.add(idx) else: # TODO: Use ListTensor to support partial slices? error( "Partial slices not implemented, only complete slices like [:]" ) else: print '\n', '=' * 60 print Index, id(Index) print type(i), id(type(i)) print str(i) print repr(i) print type(i).__module__ print Index.__module__ print '\n', '=' * 60 error("Can't convert this object to index: %r" % i) # Store index in pre or post list indexlist.append(idx) # Handle ellipsis as a number of complete slices, # that is create a number of new axis indices num_axis = rank - len(pre) - len(post) if indexlist is post: ellipsis_indices = indices(num_axis) axis_indices.update(ellipsis_indices) else: ellipsis_indices = () # Construct final tuples to return all_indices = tuple(chain(pre, ellipsis_indices, post)) axis_indices = tuple(i for i in all_indices if i in axis_indices) return all_indices, axis_indices
def inner(self, o, a, b): ufl_assert(a.rank() == b.rank()) ii = indices(a.rank()) # Create multiple IndexSums over a Product s = a[ii]*b[ii] return s
def outer(self, o, a, b): ii = indices(a.rank()) jj = indices(b.rank()) # Create a Product with no shared indices s = a[ii]*b[jj] return as_tensor(s, ii+jj)
def sym(self, o, A): i, j = indices(2) return as_matrix( (A[i,j] + A[j,i]) / 2, (i,j) )
def transposed(self, o, A): i, j = indices(2) return as_tensor(A[i, j], (j, i))
def nabla_grad(self, o, a): r = o.rank() ii = indices(r) jj = ii[-1:] + ii[:-1] return as_tensor(Grad(a)[ii], jj)