def test_sympy_subs_symmetric(mapper, expected): a = Symbol('a') b = Symbol('b') c = Symbol('c') d = Symbol('d') e = Symbol('e') input = [a, b, c, d, e] input = [i.subs(mapper) for i in input] assert input == expected
def test_loops_in_transitive_closure(): a = Symbol('a') b = Symbol('b') c = Symbol('c') d = Symbol('d') e = Symbol('e') mapper = {a: b, b: c, c: d, d: e, e: b} mapper = transitive_closure(mapper) assert mapper == {a: b, b: c, c: d, d: e, e: b}
def test_symbols_args_vs_kwargs(self): """ Unlike Functions, Symbols don't require the use of a kwarg to specify the name. This test basically checks that `Symbol('s') is Symbol(name='s')`, i.e. that we don't make any silly mistakes when it gets to compute the cache key. """ v_arg = Symbol('v') v_kwarg = Symbol(name='v') assert v_arg is v_kwarg d_arg = Dimension('d100') d_kwarg = Dimension(name='d100') assert d_arg is d_kwarg
class SharedData(ThreadArray): """ An Array of structs, each struct containing data shared by one producer and one consumer thread. """ _field_id = 'id' _field_flag = 'flag' _symbolic_id = Symbol(name=_field_id, dtype=np.int32) _symbolic_flag = VolatileInt(name=_field_flag) def __init_finalize__(self, *args, **kwargs): self.dynamic_fields = tuple(kwargs.pop('dynamic_fields', ())) super().__init_finalize__(*args, **kwargs) @classmethod def __pfields_setup__(cls, **kwargs): fields = as_list(kwargs.get('fields')) + [cls._symbolic_id, cls._symbolic_flag] return [(i._C_name, i._C_ctype) for i in fields] @cached_property def symbolic_id(self): return self._symbolic_id @cached_property def symbolic_flag(self): return self._symbolic_flag # Pickling support _pickle_kwargs = ThreadArray._pickle_kwargs + ['dynamic_fields']
def test_transitive_closure(): a = Symbol('a') b = Symbol('b') c = Symbol('c') d = Symbol('d') e = Symbol('e') f = Symbol('f') mapper = {a: b, b: c, c: d, f: e} mapper = transitive_closure(mapper) assert mapper == {a: d, b: d, c: d, f: e}
def _index_matrix(self, offset): # Note about the use of *memoization* # Since this method is called by `_interpolation_indices`, using # memoization avoids a proliferation of symbolically identical # ConditionalDimensions for a given set of indirection indices # List of indirection indices for all adjacent grid points index_matrix = [tuple(idx + ii + offset for ii, idx in zip(inc, self._coordinate_indices)) for inc in self._point_increments] # A unique symbol for each indirection index indices = filter_ordered(flatten(index_matrix)) points = OrderedDict([(p, Symbol(name='ii_%s_%d' % (self.name, i))) for i, p in enumerate(indices)]) return index_matrix, points
def test_symbols(self): """ Test that ``Symbol(name='s') != Scalar(name='s') != Dimension(name='s')``. They all: * rely on the same caching mechanism * boil down to creating a sympy.Symbol * created with the same args/kwargs (``name='s'``) """ sy = Symbol(name='s') sc = Scalar(name='s') d = Dimension(name='s') assert sy is not sc assert sc is not d assert sy is not d assert isinstance(sy, Symbol) assert isinstance(sc, Scalar) assert isinstance(d, Dimension)
def __new__(cls, *args, **kwargs): return Symbol.__new__(cls, *args, **kwargs)
def to_ops_dat(function, block): ndim = function.ndim - (1 if function.is_TimeFunction else 0) dim = SymbolicArray(name="%s_dim" % function.name, dimensions=(ndim, ), dtype=np.int32) base = SymbolicArray(name="%s_base" % function.name, dimensions=(ndim, ), dtype=np.int32) d_p = SymbolicArray(name="%s_d_p" % function.name, dimensions=(ndim, ), dtype=np.int32) d_m = SymbolicArray(name="%s_d_m" % function.name, dimensions=(ndim, ), dtype=np.int32) res = [] dats = {} ops_decl_dat_call = [] if function.is_TimeFunction: time_pos = function._time_position time_index = function.indices[time_pos] time_dims = function.shape[time_pos] dim_shape = function.shape[:time_pos] + function.shape[time_pos + 1:] padding = function.padding[:time_pos] + function.padding[time_pos + 1:] halo = function.halo[:time_pos] + function.halo[time_pos + 1:] base_val = [0 for i in range(ndim)] d_p_val = tuple([p[0] + h[0] for p, h in zip(padding, halo)]) d_m_val = tuple([-(p[1] + h[1]) for p, h in zip(padding, halo)]) ops_dat_array = SymbolicArray( name="%s_dat" % function.name, dimensions=[time_dims], dtype="ops_dat", ) ops_decl_dat_call.append( Element( cgen.Statement( "%s %s[%s]" % (ops_dat_array.dtype, ops_dat_array.name, time_dims)))) for i in range(time_dims): access = FunctionTimeAccess(function, i) ops_dat_access = ArrayAccess(ops_dat_array, i) call = Call("ops_decl_dat", [ block, 1, dim, base, d_m, d_p, access, String(function._C_typedata), String("%s%s%s" % (function.name, time_index, i)) ], False) dats["%s%s%s" % (function.name, time_index, i)] = ArrayAccess( ops_dat_array, Symbol("%s%s" % (time_index, i))) ops_decl_dat_call.append(Element(cgen.Assign(ops_dat_access, call))) else: ops_dat = OPSDat("%s_dat" % function.name) dats[function.name] = ops_dat d_p_val = tuple( [p[0] + h[0] for p, h in zip(function.padding, function.halo)]) d_m_val = tuple( [-(p[1] + h[1]) for p, h in zip(function.padding, function.halo)]) dim_shape = function.shape base_val = [0 for i in function.shape] ops_decl_dat_call.append( Element( cgen.Initializer( ops_dat, Call("ops_decl_dat", [ block, 1, dim, base, d_m, d_p, FunctionTimeAccess(function, 0), String(function._C_typedata), String(function.name) ], False)))) res.append(Expression(ClusterizedEq(Eq(dim, ListInitializer(dim_shape))))) res.append(Expression(ClusterizedEq(Eq(base, ListInitializer(base_val))))) res.append(Expression(ClusterizedEq(Eq(d_p, ListInitializer(d_p_val))))) res.append(Expression(ClusterizedEq(Eq(d_m, ListInitializer(d_m_val))))) res.extend(ops_decl_dat_call) return res, dats
def symbolic_base(self): return Symbol(name=self.name, dtype=None)
def test_ctypes_to_cstr(dtype, expected): a = Symbol(name='a', dtype=dtype) assert ctypes_to_cstr(a._C_ctype) == expected