def integrate_definite(poly, a, b): antideriv = integrate(poly) a_bound = pymbolic.substitute(antideriv, {poly.base: a}) b_bound = pymbolic.substitute(antideriv, {poly.base: b}) from pymbolic.primitives import Sum return Sum((b_bound, -a_bound))
def kill_trivial_assignments(assignments, retain_names=set()): logger.info("kill trivial assignments (plain): start") approved_assignments = [] rejected_assignments = [] for name, value in assignments: if name in retain_names or is_assignment_nontrivial(name, value): approved_assignments.append((name, value)) else: rejected_assignments.append((name, value)) # un-substitute rejected assignments unsubst_rej = make_one_step_subst(rejected_assignments) result = [] from pymbolic import substitute for name, expr in approved_assignments: r = substitute(expr, unsubst_rej) result.append((name, r)) logger.info( "kill trivial assignments (plain): done, {nrej} assignments killed" .format(nrej=len(rejected_assignments))) return result
def _parse_expr(self, expr): from pymbolic import parse, substitute parsed = parse(expr) # substitute in global constants parsed = substitute(parsed, self.constants) return parsed
def get_expr_dataset(self, expression, description=None, unit=None): """Prepare a time-series dataset for a given expression. @arg expression: A C{pymbolic} expression that may involve the time-series variables and the constants in this :class:`LogManager`. If there is data from multiple ranks for a quantity occuring in this expression, an aggregator may have to be specified. @return: C{(description, unit, table)}, where C{table} is a list of tuples C{(tick_nbr, value)}. Aggregators are specified as follows: - C{qty.min}, C{qty.max}, C{qty.avg}, C{qty.sum}, C{qty.norm2} - C{qty[rank_nbr]} - C{qty.loc} """ parsed = self._parse_expr(expression) parsed, dep_data = self._get_expr_dep_data(parsed) # aggregate table data for dd in dep_data: table = self.get_table(dd.name) table.sort(["step"]) dd.table = table.aggregated(["step"], "value", dd.agg_func).data # evaluate unit and description, if necessary if unit is None: from pymbolic import substitute, parse unit_dict = dict((dd.varname, dd.qdat.unit) for dd in dep_data) from pytools import all if all(v is not None for v in six.itervalues(unit_dict)): unit_dict = dict( (k, parse(v)) for k, v in six.iteritems(unit_dict)) unit = substitute(parsed, unit_dict) else: unit = None if description is None: description = expression # compile and evaluate from pymbolic import compile compiled = compile(parsed, [dd.varname for dd in dep_data]) data = [] for key, values in _join_by_first_of_tuple(dd.table for dd in dep_data): try: data.append((key, compiled(*values))) except ZeroDivisionError: pass return (description, unit, data)
def get_expr_dataset(self, expression, description=None, unit=None): """Prepare a time-series dataset for a given expression. @arg expression: A C{pymbolic} expression that may involve the time-series variables and the constants in this :class:`LogManager`. If there is data from multiple ranks for a quantity occuring in this expression, an aggregator may have to be specified. @return: C{(description, unit, table)}, where C{table} is a list of tuples C{(tick_nbr, value)}. Aggregators are specified as follows: - C{qty.min}, C{qty.max}, C{qty.avg}, C{qty.sum}, C{qty.norm2} - C{qty[rank_nbr]} - C{qty.loc} """ parsed = self._parse_expr(expression) parsed, dep_data = self._get_expr_dep_data(parsed) # aggregate table data for dd in dep_data: table = self.get_table(dd.name) table.sort(["step"]) dd.table = table.aggregated(["step"], "value", dd.agg_func).data # evaluate unit and description, if necessary if unit is None: from pymbolic import substitute, parse unit_dict = dict((dd.varname, dd.qdat.unit) for dd in dep_data) from pytools import all if all(v is not None for v in six.itervalues(unit_dict)): unit_dict = dict((k, parse(v)) for k, v in six.iteritems(unit_dict)) unit = substitute(parsed, unit_dict) else: unit = None if description is None: description = expression # compile and evaluate from pymbolic import compile compiled = compile(parsed, [dd.varname for dd in dep_data]) data = [] for key, values in _join_by_first_of_tuple(dd.table for dd in dep_data): try: data.append((key, compiled(*values))) except ZeroDivisionError: pass return (description, unit, data)
def get_expr_dataset(self, expression, description=None, unit=None): """Prepare a time-series dataset for a given expression. @arg expression: A C{pymbolic} expression that may involve the time-series variables and the constants in this L{LogManager}. If there is data from multiple ranks for a quantity occuring in this expression, an aggregator may have to be specified. @return: C{(description, unit, table)}, where C{table} is a list of tuples C{(tick_nbr, value)}. Aggregators are specified as follows: - C{qty.min}, C{qty.max}, C{qty.avg}, C{qty.sum}, C{qty.norm2} - C{qty[rank_nbr] """ parsed = self._parse_expr(expression) parsed, dep_data = self._get_expr_dep_data(parsed) # aggregate table data for dd in dep_data: table = self.get_table(dd.name) table.sort(["step"]) dd.table = table.aggregated(["step"], "value", dd.agg_func).data # evaluate unit and description, if necessary if unit is None: from pymbolic import substitute, parse unit = substitute(parsed, dict((dd.varname, parse(dd.qdat.unit)) for dd in dep_data) ) if description is None: description = expression # compile and evaluate from pymbolic import compile compiled = compile(parsed, [dd.varname for dd in dep_data]) return (description, unit, [(key, compiled(*values)) for key, values in _join_by_first_of_tuple( dd.table for dd in dep_data) ])
def test(): from hedge.timestep.multirate_ab import \ TwoRateAdamsBashforthMethodBuilder from pymbolic import var stepper = TwoRateAdamsBashforthMethodBuilder( method="Fqsr", large_dt=var("dt"), substep_count=2, order=1) mat = numpy.random.randn(2, 2) def f2f_rhs(t, yf, ys): return fold_constants(expand(mat[0, 0] * yf())) def s2f_rhs(t, yf, ys): return fold_constants(expand(mat[0, 1] * ys())) def f2s_rhs(t, yf, ys): return fold_constants(expand(mat[1, 0] * yf())) def s2s_rhs(t, yf, ys): return fold_constants(expand(mat[1, 1] * ys())) z, vars = make_method_matrix(stepper, rhss=(f2f_rhs, s2f_rhs, f2s_rhs, s2s_rhs), f_size=1, s_size=1) from pymbolic import compile num_mat_func = compile(z) num_mat = num_mat_func(0.1) if False: from pymbolic import substitute num_mat_2 = numpy.array( fold_constants(substitute(z, dt=0.1)), dtype=numpy.complex128) print(la.norm(num_mat - num_mat_2)) if True: for row, ivar in zip(num_mat, vars): print("".join("*" if entry else "." for entry in row), ivar)
def map_statement(self, stmt): read_and_written = ( stmt.get_read_variables() & stmt.get_written_variables()) if not read_and_written: return [stmt] substs = [] tmp_stmt_ids = [] new_statements = [] from dagrt.language import Assign from pymbolic import var for var_name in read_and_written: tmp_var_name = self.var_name_gen( "temp_" + var_name.replace("<", "_").replace(">", "_")) substs.append((var_name, var(tmp_var_name))) tmp_stmt_id = self.stmt_id_gen("temp") tmp_stmt_ids.append(tmp_stmt_id) new_tmp_stmt = Assign( tmp_var_name, (), var(var_name), condition=stmt.condition, id=tmp_stmt_id, depends_on=stmt.depends_on) new_statements.append(new_tmp_stmt) from pymbolic import substitute new_stmt = (stmt .map_expressions( lambda expr: substitute(expr, dict(substs)), include_lhs=False) .copy( # lhs will be rewritten, but we don't want that. depends_on=stmt.depends_on | frozenset(tmp_stmt_ids))) new_statements.append(new_stmt) return new_statements
def _get_expr_dep_data(self, parsed): class Nth: def __init__(self, n): self.n = n def __call__(self, lst): return lst[self.n] from pymbolic.mapper.dependency import DependencyMapper deps = DependencyMapper(include_calls=False)(parsed) # gather information on aggregation expressions dep_data = [] from pymbolic.primitives import Variable, Lookup, Subscript for dep_idx, dep in enumerate(deps): nonlocal_agg = True if isinstance(dep, Variable): name = dep.name if name == "math": continue agg_func = self.quantity_data[name].default_aggregator if agg_func is None: if self.is_parallel: raise ValueError( "must specify explicit aggregator for '%s'" % name) agg_func = lambda lst: lst[0] elif isinstance(dep, Lookup): assert isinstance(dep.aggregate, Variable) name = dep.aggregate.name agg_name = dep.name if agg_name == "loc": agg_func = Nth(self.rank) nonlocal_agg = False elif agg_name == "min": agg_func = min elif agg_name == "max": agg_func = max elif agg_name == "avg": from pytools import average agg_func = average elif agg_name == "sum": agg_func = sum elif agg_name == "norm2": from math import sqrt agg_func = lambda iterable: sqrt( sum(entry**2 for entry in iterable)) else: raise ValueError("invalid rank aggregator '%s'" % agg_name) elif isinstance(dep, Subscript): assert isinstance(dep.aggregate, Variable) name = dep.aggregate.name from pymbolic import evaluate agg_func = Nth(evaluate(dep.index)) qdat = self.quantity_data[name] from pytools import Record class DependencyData(Record): pass this_dep_data = DependencyData(name=name, qdat=qdat, agg_func=agg_func, varname="logvar%d" % dep_idx, expr=dep, nonlocal_agg=nonlocal_agg) dep_data.append(this_dep_data) # substitute in the "logvar" variable names from pymbolic import var, substitute parsed = substitute(parsed, dict((dd.expr, var(dd.varname)) for dd in dep_data)) return parsed, dep_data
def test_substitute(): from pymbolic import parse, substitute, evaluate u = parse("5+x.min**2") xmin = parse("x.min") assert evaluate(substitute(u, {xmin: 25})) == 630
def make_one_step_subst(assignments): assignments = dict(assignments) unwanted_vars = set(six.iterkeys(assignments)) # Ensure no re-assignments. assert len(unwanted_vars) == len(assignments) from loopy.symbolic import get_dependencies unwanted_deps = dict( (name, get_dependencies(value) & unwanted_vars) for name, value in six.iteritems(assignments)) # {{{ compute substitution order toposort = [] visited = set() visiting = set() while unwanted_vars: stack = [unwanted_vars.pop()] while stack: top = stack[-1] if top in visiting: visiting.remove(top) toposort.append(top) if top in visited: stack.pop() continue visited.add(top) visiting.add(top) for dep in unwanted_deps[top]: # Check for no cycles. assert dep not in visiting stack.append(dep) # }}} # {{{ make substitution from pymbolic import substitute result = {} used_name_to_var = {} from pymbolic import evaluate from functools import partial simplify = partial(evaluate, context=used_name_to_var) for name in toposort: value = assignments[name] value = substitute(value, result) used_name_to_var.update( (used_name, prim.Variable(used_name)) for used_name in get_dependencies(value) if used_name not in used_name_to_var) result[name] = simplify(value) # }}} return result
def integrate_definite(poly, a, b): antideriv = integrate(poly) a_bound = pymbolic.substitute(antideriv, {poly.base: a}) b_bound = pymbolic.substitute(antideriv, {poly.base: b}) return pymbolic.sum((b_bound, -a_bound))
def generate_butcher(self, stage_coeff_set_names, stage_coeff_sets, rhs_funcs, estimate_coeff_set_names, estimate_coeff_sets): """ :arg stage_coeff_set_names: a list of names/string identifiers for stage coefficient sets :arg stage_coeff_sets: a mapping from set names to stage coefficients :arg rhs_funcs: a mapping from set names to right-hand-side functions :arg estimate_coeffs_set_names: a list of names/string identifiers for estimate coefficient sets :arg estimate_coeffs_sets: a mapping from estimate coefficient set names to cofficients. """ from pymbolic import var comp = self.component_id dt = self.dt t = self.t state = self.state nstages = len(self.c) # {{{ check coefficients for plausibility for name in stage_coeff_set_names: for istage in range(nstages): coeff_sum = sum(stage_coeff_sets[name][istage]) assert abs(coeff_sum - self.c[istage]) < 1e-12, ( name, istage, coeff_sum, self.c[istage]) # }}} # {{{ initialization last_rhss = {} with CodeBuilder(name="initialization") as cb: for name in stage_coeff_set_names: if (name in self.recycle_last_stage_coeff_set_names and _is_first_stage_same_as_last_stage( self.c, stage_coeff_sets[name])): last_rhss[name] = var("<p>last_rhs_" + name) cb(last_rhss[name], rhs_funcs[name](t=t, **{comp: state})) cb_init = cb # }}} stage_rhs_vars = {} rhs_var_to_unknown = {} for name in stage_coeff_set_names: stage_rhs_vars[name] = [ cb.fresh_var(f"rhs_{name}_s{i}") for i in range(nstages) ] # These are rhss if they are not yet known and pending an implicit solve. for i, rhsvar in enumerate(stage_rhs_vars[name]): unkvar = cb.fresh_var(f"unk_{name}_s{i}") rhs_var_to_unknown[rhsvar] = unkvar knowns = set() # {{{ stage loop last_state_est_var = cb.fresh_var("last_state_est") last_state_est_var_valid = False with CodeBuilder(name="primary") as cb: equations = [] unknowns = set() def make_known(v): unknowns.discard(v) knowns.add(v) for istage in range(nstages): for name in stage_coeff_set_names: c = self.c[istage] my_rhs = stage_rhs_vars[name][istage] if (name in self.recycle_last_stage_coeff_set_names and istage == 0 and _is_first_stage_same_as_last_stage( self.c, stage_coeff_sets[name])): cb(my_rhs, last_rhss[name]) make_known(my_rhs) else: is_implicit = False state_increment = 0 for src_name in stage_coeff_set_names: coeffs = stage_coeff_sets[src_name][istage] for src_istage, coeff in enumerate(coeffs): rhsval = stage_rhs_vars[src_name][src_istage] if rhsval not in knowns: unknowns.add(rhsval) is_implicit = True state_increment += dt * coeff * rhsval state_est = state + state_increment if (self.state_filter is not None and not ( # reusing last output state c == 0 and all( len(stage_coeff_sets[src_name][istage]) == 0 for src_name in stage_coeff_set_names))): state_est = self.state_filter(state_est) if is_implicit: rhs_expr = rhs_funcs[name](t=t + c * dt, **{ comp: state_est }) from dagrt.expression import collapse_constants solve_expression = collapse_constants( my_rhs - rhs_expr, list(unknowns) + [self.state], cb.assign, cb.fresh_var) equations.append(solve_expression) if istage + 1 == nstages: last_state_est_var_valid = False else: if istage + 1 == nstages: cb(last_state_est_var, state_est) state_est = last_state_est_var last_state_est_var_valid = True rhs_expr = rhs_funcs[name](t=t + c * dt, **{ comp: state_est }) cb(my_rhs, rhs_expr) make_known(my_rhs) # {{{ emit solve if possible if unknowns and len(unknowns) == len(equations): # got a square system, let's solve assignees = [unk.name for unk in unknowns] from pymbolic import substitute subst_dict = { rhs_var.name: rhs_var_to_unknown[rhs_var] for rhs_var in unknowns } cb.assign_implicit( assignees=assignees, solve_components=[ rhs_var_to_unknown[unk].name for unk in unknowns ], expressions=[ substitute(eq, subst_dict) for eq in equations ], # TODO: Could supply a starting guess other_params={"guess": state}, solver_id="solve") del equations[:] knowns.update(unknowns) unknowns.clear() # }}} # Compute solution estimates. estimate_vars = [ cb.fresh_var("est_" + name) for name in estimate_coeff_set_names ] for iest, name in enumerate(estimate_coeff_set_names): out_coeffs = estimate_coeff_sets[name] if (last_state_est_var_valid and # noqa: W504 _is_last_stage_same_as_output(self.c, stage_coeff_sets, out_coeffs)): state_est = last_state_est_var else: state_increment = 0 for src_name in stage_coeff_set_names: state_increment += sum( coeff * stage_rhs_vars[src_name][src_istage] for src_istage, coeff in enumerate(out_coeffs)) state_est = state + dt * state_increment if self.state_filter is not None: state_est = self.state_filter(state_est) cb(estimate_vars[iest], state_est) # This updates <t>. self.finish(cb, estimate_coeff_set_names, estimate_vars) # These updates have to happen *after* finish because before we # don't yet know whether finish will accept the new state. for name in stage_coeff_set_names: if (name in self.recycle_last_stage_coeff_set_names and _is_first_stage_same_as_last_stage( self.c, stage_coeff_sets[name])): cb(last_rhss[name], stage_rhs_vars[name][-1]) cb_primary = cb # }}} return DAGCode(phases={ "initial": cb_init.as_execution_phase(next_phase="primary"), "primary": cb_primary.as_execution_phase(next_phase="primary") }, initial_phase="initial")
def get_normalised_expr(self): nexpr = self.expr nvars = [pmbl.var("x%d" % d) for d in range(self.dim)] for var, nvar in zip(self.vars, nvars): nexpr = pmbl.substitute(nexpr, {var: nvar}) return nexpr