def _arg_defaults(self): """A map of default argument values defined by this Grid.""" args = ReducerMap() for k, v in self.dimension_map.items(): args.update(k._arg_defaults(_min=0, size=v.loc)) if configuration['mpi']: distributor = self.distributor args[distributor._obj_comm.name] = distributor._obj_comm.value args[distributor._obj_neighborhood.name] = distributor._obj_neighborhood.value return args
def _arg_defaults(self, alias=None): key = alias or self mapper = {self: key} mapper.update({getattr(self, i): getattr(key, i) for i in self._sub_functions}) args = ReducerMap() # Add in the sparse data (as well as any SubFunction data) belonging to # self's local domain only for k, v in self._dist_scatter().items(): args[mapper[k].name] = v for i, s, o in zip(mapper[k].indices, v.shape, k.staggered): args.update(i._arg_defaults(_min=0, size=s+o)) # Add MPI-related data structures args.update(self.grid._arg_defaults()) return args
def _arg_defaults(self, alias=None): """ A map of default argument values defined by this symbol. Parameters ---------- alias : DiscreteFunction, optional To bind the argument values to different names. """ key = alias or self args = ReducerMap({key.name: self._data_buffer}) # Collect default dimension arguments from all indices for i, s, o in zip(key.indices, self.shape, self.staggered): args.update(i._arg_defaults(_min=0, size=s+o)) # Add MPI-related data structures if self.grid is not None: args.update(self.grid._arg_defaults()) return args
def _prepare_arguments(self, **kwargs): """ Process runtime arguments passed to ``.apply()` and derive default values for any remaining arguments. """ overrides, defaults = split(self.input, lambda p: p.name in kwargs) # Process data-carrier overrides args = ReducerMap() for p in overrides: args.update(p._arg_values(**kwargs)) try: args = ReducerMap(args.reduce_all()) except ValueError: raise ValueError("Override `%s` is incompatible with overrides `%s`" % (p, [i for i in overrides if i.name in args])) # Process data-carrier defaults for p in defaults: if p.name in args: # E.g., SubFunctions continue for k, v in p._arg_values(**kwargs).items(): if k in args and args[k] != v: raise ValueError("Default `%s` is incompatible with other args as " "`%s=%s`, while `%s=%s` is expected. Perhaps you " "forgot to override `%s`?" % (p, k, v, k, args[k], p)) args[k] = v args = args.reduce_all() # All DiscreteFunctions should be defined on the same Grid grids = {getattr(p, 'grid', None) for p in self.input} - {None} if len(grids) > 1 and configuration['mpi']: raise ValueError("Multiple Grids found") try: grid = grids.pop() except KeyError: grid = None # Process Dimensions (derived go after as they might need/affect their parents) derived, main = split(self.dimensions, lambda i: i.is_Derived) for d in main: args.update(d._arg_values(args, self._dspace[d], grid, **kwargs)) for d in derived: args.update(d._arg_values(args, self._dspace[d], grid, **kwargs)) # Process Objects (which may need some `args`) for o in self.objects: args.update(o._arg_values(args, **kwargs)) # Sanity check for p in self.parameters: p._arg_check(args, self._dspace[p]) # Turn arguments into a format suitable for the generated code # E.g., instead of NumPy arrays for Functions, the generated code expects # pointers to ctypes.Struct for p in self.parameters: try: args.update(kwargs.get(p.name, p)._arg_as_ctype(args, alias=p)) except AttributeError: # User-provided floats/ndarray obviously do not have `_arg_as_ctype` args.update(p._arg_as_ctype(args, alias=p)) # Add in the profiler argument args[self._profiler.name] = self._profiler.timer.reset() # Add in any backend-specific argument args.update(kwargs.pop('backend', {})) # Execute autotuning and adjust arguments accordingly args = self._autotune(args, kwargs.pop('autotune', configuration['autotuning'])) # Check all user-provided keywords are known to the Operator if not configuration['ignore-unknowns']: for k, v in kwargs.items(): if k not in self._known_arguments: raise ValueError("Unrecognized argument %s=%s" % (k, v)) return args
def prepare_arguments(self, **kwargs): """ Process runtime arguments passed to ``.apply()` and derive default values for any remaining arguments. """ # Process data-carriers (first overrides, then fill up with whatever is needed) args = ReducerMap() args.update( [p._arg_values(**kwargs) for p in self.input if p.name in kwargs]) args.update( [p._arg_values() for p in self.input if p.name not in args]) args = args.reduce_all() # Process dimensions (derived go after as they might need/affect their parents) derived, main = split(self.dimensions, lambda i: i.is_Derived) for p in main: args.update(p._arg_values(args, self._dspace[p], **kwargs)) for p in derived: args.update(p._arg_values(args, self._dspace[p], **kwargs)) # Sanity check for p in self.input: p._arg_check(args, self._dspace[p]) # Derive additional values for DLE arguments # TODO: This is not pretty, but it works for now. Ideally, the # DLE arguments would be massaged into the IET so as to comply # with the rest of the argument derivation procedure. for arg in self.dle_args: dim = arg.argument osize = args[arg.original_dim.symbolic_size.name] if dim.symbolic_size in self.parameters: if arg.value is None: args[dim.symbolic_size.name] = osize elif isinstance(arg.value, int): args[dim.symbolic_size.name] = arg.value else: args[dim.symbolic_size.name] = arg.value(osize) # Add in the profiler argument args[self.profiler.name] = self.profiler.new() # Add in any backend-specific argument args.update(kwargs.pop('backend', {})) # Execute autotuning and adjust arguments accordingly if kwargs.pop('autotune', False): args = self._autotune(args) # Check all user-provided keywords are known to the Operator for k, v in kwargs.items(): if k not in self.known_arguments: raise ValueError( "Unrecognized argument %s=%s passed to `apply`" % (k, v)) return args
def _prepare_arguments(self, **kwargs): """ Process runtime arguments passed to ``.apply()` and derive default values for any remaining arguments. """ overrides, defaults = split(self.input, lambda p: p.name in kwargs) # Process data-carrier overrides args = ReducerMap() for p in overrides: args.update(p._arg_values(**kwargs)) try: args = ReducerMap(args.reduce_all()) except ValueError: raise ValueError("Override `%s` is incompatible with overrides `%s`" % (p, [i for i in overrides if i.name in args])) # Process data-carrier defaults for p in defaults: if p.name in args: # E.g., SubFunctions continue for k, v in p._arg_values(**kwargs).items(): if k in args and args[k] != v: raise ValueError("Default `%s` is incompatible with other args as " "`%s=%s`, while `%s=%s` is expected. Perhaps you " "forgot to override `%s`?" % (p, k, v, k, args[k], p)) args[k] = v args = args.reduce_all() # All DiscreteFunctions should be defined on the same Grid grids = {getattr(kwargs[p.name], 'grid', None) for p in overrides} grids.update({getattr(p, 'grid', None) for p in defaults}) grids.discard(None) if len(grids) > 1 and configuration['mpi']: raise ValueError("Multiple Grids found") try: grid = grids.pop() args.update(grid._arg_values(**kwargs)) except KeyError: grid = None # Process Dimensions # A topological sorting is used so that derived Dimensions are processed after # their parents (note that a leaf Dimension can have an arbitrary long list of # ancestors) dag = DAG(self.dimensions, [(i, i.parent) for i in self.dimensions if i.is_Derived]) for d in reversed(dag.topological_sort()): args.update(d._arg_values(args, self._dspace[d], grid, **kwargs)) # Process Objects (which may need some `args`) for o in self.objects: args.update(o._arg_values(args, grid=grid, **kwargs)) # Sanity check for p in self.parameters: p._arg_check(args, self._dspace[p]) for d in self.dimensions: if d.is_Derived: d._arg_check(args, self._dspace[p]) # Turn arguments into a format suitable for the generated code # E.g., instead of NumPy arrays for Functions, the generated code expects # pointers to ctypes.Struct for p in self.parameters: try: args.update(kwargs.get(p.name, p)._arg_as_ctype(args, alias=p)) except AttributeError: # User-provided floats/ndarray obviously do not have `_arg_as_ctype` args.update(p._arg_as_ctype(args, alias=p)) # Execute autotuning and adjust arguments accordingly args = self._autotune(args, kwargs.pop('autotune', configuration['autotuning'])) # Check all user-provided keywords are known to the Operator if not configuration['ignore-unknowns']: for k, v in kwargs.items(): if k not in self._known_arguments: raise ValueError("Unrecognized argument %s=%s" % (k, v)) # Attach `grid` to the arguments map args = ArgumentsMap(grid, **args) return args
def _arg_as_ctype(self, args, alias=None): key = alias or self return ReducerMap({key.name: self._C_make_dataobj(args[key.name])})
def _prepare_arguments(self, **kwargs): """ Process runtime arguments passed to ``.apply()` and derive default values for any remaining arguments. """ overrides, defaults = split(self.input, lambda p: p.name in kwargs) # Process data-carrier overrides args = ReducerMap() for p in overrides: args.update(p._arg_values(**kwargs)) try: args = ReducerMap(args.reduce_all()) except ValueError: raise ValueError( "Override `%s` is incompatible with overrides `%s`" % (p, [i for i in overrides if i.name in args])) # Process data-carrier defaults for p in defaults: if p.name in args: # E.g., SubFunctions continue for k, v in p._arg_values(**kwargs).items(): if k in args and args[k] != v: raise ValueError( "Default `%s` is incompatible with other args as " "`%s=%s`, while `%s=%s` is expected. Perhaps you " "forgot to override `%s`?" % (p, k, v, k, args[k], p)) args[k] = v args = args.reduce_all() # All DiscreteFunctions should be defined on the same Grid grids = {getattr(p, 'grid', None) for p in self.input} - {None} if len(grids) > 1 and configuration['mpi']: raise ValueError("Multiple Grids found") try: grid = grids.pop() except KeyError: grid = None # Process Dimensions (derived go after as they might need/affect their parents) derived, main = split(self.dimensions, lambda i: i.is_Derived) for d in main: args.update(d._arg_values(args, self._dspace[d], grid, **kwargs)) for d in derived: args.update(d._arg_values(args, self._dspace[d], grid, **kwargs)) # Process Objects (which may need some `args`) for o in self.objects: args.update(o._arg_values(args, **kwargs)) # Sanity check for p in self.parameters: p._arg_check(args, self._dspace[p]) # Turn arguments into a format suitable for the generated code # E.g., instead of NumPy arrays for Functions, the generated code expects # pointers to ctypes.Struct for p in self.parameters: try: args.update(kwargs.get(p.name, p)._arg_as_ctype(args, alias=p)) except AttributeError: # User-provided floats/ndarray obviously do not have `_arg_as_ctype` args.update(p._arg_as_ctype(args, alias=p)) # Add in the profiler argument args[self._profiler.name] = self._profiler.timer.reset() # Add in any backend-specific argument args.update(kwargs.pop('backend', {})) # Execute autotuning and adjust arguments accordingly args = self._autotune( args, kwargs.pop('autotune', configuration['autotuning'])) # Check all user-provided keywords are known to the Operator if not configuration['ignore-unknowns']: for k, v in kwargs.items(): if k not in self._known_arguments: raise ValueError("Unrecognized argument %s=%s" % (k, v)) return args
def run(expr): # Return semantic (rebuilt expression, factorization candidates) if expr.is_Number: return expr, {'coeffs': expr} elif expr.is_Function: return expr, {'funcs': expr} elif expr.is_Pow: return expr, {'pows': expr} elif expr.is_Symbol or expr.is_Indexed or expr.is_Atom: return expr, {} elif expr.is_Add: args, candidates = zip(*[run(arg) for arg in expr.args]) candidates = ReducerMap.fromdicts(*candidates) funcs = candidates.getall('funcs', []) pows = candidates.getall('pows', []) coeffs = candidates.getall('coeffs', []) # Functions/Pows are collected first, coefficients afterwards # Note: below we use sets, but SymPy will ensure determinism args = set(args) w_funcs = {i for i in args if any(j in funcs for j in i.args)} args -= w_funcs w_pows = {i for i in args if any(j in pows for j in i.args)} args -= w_pows w_coeffs = {i for i in args if any(j in coeffs for j in i.args)} args -= w_coeffs # Collect common funcs w_funcs = collect(expr.func(*w_funcs), funcs, evaluate=False) try: w_funcs = Add(*[Mul(k, collect_const(v)) for k, v in w_funcs.items()]) except AttributeError: assert w_funcs == 0 # Collect common pows w_pows = collect(expr.func(*w_pows), pows, evaluate=False) try: w_pows = Add(*[Mul(k, collect_const(v)) for k, v in w_pows.items()]) except AttributeError: assert w_pows == 0 # Collect common coefficients w_coeffs = collect_const(expr.func(*w_coeffs)) rebuilt = Add(w_funcs, w_pows, w_coeffs, *args) return rebuilt, {} elif expr.is_Mul: args, candidates = zip(*[run(arg) for arg in expr.args]) # Always collect coefficients rebuilt = collect_const(expr.func(*args)) try: rebuilt = Mul(*rebuilt.args) except AttributeError: pass return rebuilt, ReducerMap.fromdicts(*candidates) elif expr.is_Equality: args, candidates = zip(*[run(expr.lhs), run(expr.rhs)]) return expr.func(*args, evaluate=False), ReducerMap.fromdicts(*candidates) else: args, candidates = zip(*[run(arg) for arg in expr.args]) return expr.func(*args), ReducerMap.fromdicts(*candidates)