def map_sum(self, expr): sum_kind = None term_kind_matrix = intern("matrix") term_kind_vector = intern("vector") term_kind_scalar = intern("scalar") result = 0 for child in expr.children: rec_child = self.rec(child) if is_zero(rec_child): continue if isinstance(rec_child, np.ndarray): if self.is_kind_matrix(rec_child): term_kind = term_kind_matrix elif self.is_kind_vector(rec_child): term_kind = term_kind_vector else: raise RuntimeError("unexpected array rank") else: term_kind = term_kind_scalar if sum_kind is None: sum_kind = term_kind if term_kind != sum_kind: raise RuntimeError("encountered %s in sum of kind %s" % (term_kind, sum_kind)) result = result + rec_child return result
def __init__(self, next_attr_name=None, prev_attr_name=None): """Initializes this list. next_attr_name: The name of the attribute that holds a reference to the next item in the list. prev_attr_name: the name of the attribute that holds a reference to the previous item in the list. """ # Keep an interned version of the attribute names. This should # speed up the process of looking up the attributes. self.next_name = intern(next_attr_name) self.prev_name = intern(prev_attr_name)
def memoize_method_nested(inner): """Adds a cache to a function nested inside a method. The cache is attached to *memoize_cache_context* (if it exists) or *self* in the outer (method) namespace. Requires Python 2.5 or newer. """ from functools import wraps cache_dict_name = intern("_memoize_inner_dic_%s_%s_%d" % (inner.__name__, inner.__code__.co_filename, inner.__code__.co_firstlineno)) from inspect import currentframe outer_frame = currentframe().f_back cache_context = outer_frame.f_locals.get("memoize_cache_context") if cache_context is None: cache_context = outer_frame.f_locals.get("self") try: cache_dict = getattr(cache_context, cache_dict_name) except AttributeError: cache_dict = {} setattr(cache_context, cache_dict_name, cache_dict) @wraps(inner) def new_inner(*args): try: return cache_dict[args] except KeyError: result = inner(*args) cache_dict[args] = result return result return new_inner
def realize_conditional(self, node, context_cond=None): scope = self.scope_stack[-1] cond_name = intern("loopy_cond%d" % self.condition_id_counter) self.condition_id_counter += 1 assert cond_name not in scope.type_map scope.type_map[cond_name] = np.int32 from pymbolic import var cond_var = var(cond_name) self.add_expression_instruction( cond_var, self.parse_expr(node, node.expr)) cond_expr = cond_var if context_cond is not None: from pymbolic.primitives import LogicalAnd cond_expr = LogicalAnd((cond_var, context_cond)) self.conditions_data.append((context_cond, cond_var)) else: self.conditions_data.append((None, cond_var)) self.conditions.append(cond_expr)
def __init__(self, **kwargs): kwargs["name"] = intern(kwargs.pop("name")) target = kwargs.pop("target", None) dtype = kwargs.pop("dtype", None) if 'for_atomic' in kwargs: for_atomic = kwargs['for_atomic'] else: for_atomic = False from loopy.types import to_loopy_type dtype = to_loopy_type( dtype, allow_auto=True, allow_none=True, for_atomic=for_atomic, target=target) import loopy as lp if dtype is lp.auto: warn("Argument/temporary data type for '%s' should be None if " "unspecified, not auto. This usage will be disallowed in 2018." % kwargs["name"], DeprecationWarning, stacklevel=2) dtype = None kwargs["dtype"] = dtype ImmutableRecord.__init__(self, **kwargs)
def normalize_chromosome(c): try: return NORMALIZE_CHROMOSOME_CACHE[c] except KeyError: pass if not (is_string(c) or is_integer(c)): raise TypeError("Chromosome cannot be '%s' : %s" % (c, type(c))) result = str(c) if result == "0": raise ValueError("Chromosome name cannot be 0") elif result == "": raise ValueError("Chromosome name cannot be empty") # only strip off lowercase chr since some of the non-chromosomal # contigs start with "CHR" if result.startswith("chr"): result = result[3:] # just in case someone is being lazy, capitalize "M", "MT", X", "Y" result = result.upper() # standardize mitochondrial genome to be "MT" if result == "M": result = "MT" # interning strings since the chromosome names probably get constructed # or parsed millions of times, can save memory in tight situations # (such as parsing GTF files) result = intern(result) NORMALIZE_CHROMOSOME_CACHE[c] = result return result
def memoize_method(method): """Supports cache deletion via ``method_name.clear_cache(self)``. .. note:: *clear_cache* support requires Python 2.5 or newer. """ cache_dict_name = intern("_memoize_dic_"+method.__name__) def wrapper(self, *args, **kwargs): if kwargs: key = (_HasKwargs, frozenset(six.iteritems(kwargs))) + args else: key = args try: return getattr(self, cache_dict_name)[key] except AttributeError: result = method(self, *args, **kwargs) setattr(self, cache_dict_name, {key: result}) return result except KeyError: result = method(self, *args, **kwargs) getattr(self, cache_dict_name)[key] = result return result def clear_cache(self): delattr(self, cache_dict_name) if sys.version_info >= (2, 5): from functools import update_wrapper new_wrapper = update_wrapper(wrapper, method) new_wrapper.clear_cache = clear_cache return new_wrapper
def memoize_method(method): """Supports cache deletion via ``method_name.clear_cache(self)``. .. note:: *clear_cache* support requires Python 2.5 or newer. """ return memoize_on_first_arg(method, intern("_memoize_dic_"+method.__name__))
def make_unique_instruction_id(self, insns=None, based_on="insn", extra_used_ids=set()): if insns is None: insns = self.instructions used_ids = set(insn.id for insn in insns) | extra_used_ids for id_str in generate_unique_names(based_on): if id_str not in used_ids: return intern(id_str)
def __call__(self, based_on="id"): based_on = self.forced_prefix + based_on for var_name in generate_unique_names(based_on): if not self.is_name_conflicting(var_name): break var_name = intern(var_name) self.existing_names.add(var_name) return var_name
def __init__(self, **kwargs): kwargs["name"] = intern(kwargs.pop("name")) dtype = kwargs.pop("dtype", None) if isinstance(dtype, np.dtype): from loopy.tools import PicklableDtype kwargs["picklable_dtype"] = PicklableDtype(dtype) else: kwargs["picklable_dtype"] = dtype Record.__init__(self, **kwargs)
def parse_if_necessary(insn, defines): if isinstance(insn, InstructionBase): yield insn.copy( id=intern(insn.id) if isinstance(insn.id, str) else insn.id, insn_deps=frozenset(intern(dep) for dep in insn.insn_deps), groups=frozenset(intern(grp) for grp in insn.groups), conflicts_with_groups=frozenset( intern(grp) for grp in insn.conflicts_with_groups), forced_iname_deps=frozenset( intern(iname) for iname in insn.forced_iname_deps), predicates=frozenset( intern(pred) for pred in insn.predicates), ), [] return elif not isinstance(insn, str): raise TypeError("Instructions must be either an Instruction " "instance or a parseable string. got '%s' instead." % type(insn)) for insn in insn.split("\n"): comment_start = insn.find("#") if comment_start >= 0: insn = insn[:comment_start] insn = insn.strip() if not insn: continue for sub_insn in expand_defines(insn, defines, single_valued=False): yield parse_insn(sub_insn)
def der_parse_OID(derblob): oidvals = [0] for byte in map(ord, derblob): if byte & 0x80 != 0x00: oidvals[-1] = (oidvals[-1] << 7) | (byte & 0x7f) else: oidvals[-1] = (oidvals[-1] << 7) | byte oidvals.append(0) fst = oidvals[0] // 40 snd = oidvals[0] % 40 oidvals = [fst, snd] + oidvals[1:-1] retval = '.'.join(map(str, oidvals)) return intern(retval)
def __setstate__(self, val): super(InstructionBase, self).__setstate__(val) from loopy.tools import intern_frozenset_of_ids if self.id is not None: # pylint:disable=access-member-before-definition self.id = intern(self.id) self.depends_on = intern_frozenset_of_ids(self.depends_on) self.groups = intern_frozenset_of_ids(self.groups) self.conflicts_with_groups = ( intern_frozenset_of_ids(self.conflicts_with_groups)) self.within_inames = ( intern_frozenset_of_ids(self.within_inames))
def cursor_to_dict(cursor): """Converts a SQL cursor into an list of dicts. Args: cursor : The DBAPI cursor which has executed a query. Returns: A list of dicts where the key is the column header. """ col_headers = list(intern(str(column[0])) for column in cursor.description) results = list( dict(zip(col_headers, row)) for row in cursor ) return results
def intern_string(string): """Takes a (potentially) unicode string and interns it if it's ascii """ if string is None: return None try: if six.PY2: string = string.encode("ascii") return intern(string) except UnicodeEncodeError: return string
def __setstate__(self, val): super(InstructionBase, self).__setstate__(val) from loopy.tools import intern_frozenset_of_ids self.id = intern(self.id) self.insn_deps = intern_frozenset_of_ids(self.insn_deps) self.groups = intern_frozenset_of_ids(self.groups) self.conflicts_with_groups = ( intern_frozenset_of_ids(self.conflicts_with_groups)) self.forced_iname_deps = ( intern_frozenset_of_ids(self.forced_iname_deps)) self.predicates = ( intern_frozenset_of_ids(self.predicates))
def __call__(self, based_on="id"): based_on = self.forced_prefix + based_on counter = self.prefix_to_counter.get(based_on, None) for counter, var_name in generate_numbered_unique_names(based_on, counter): if not self.is_name_conflicting(var_name): break self.prefix_to_counter[based_on] = counter var_name = intern(var_name) self.existing_names.add(var_name) return var_name
def first_arg_dependent_memoize_nested(nested_func): """Provides memoization for nested functions. Typically used to cache things that get created inside a :class:`pyopencl.Context`, e.g. programs and kernels. Assumes that the first argument of the decorated function is an OpenCL object that might go away, such as a :class:`pyopencl.Context` or a :class:`pyopencl.CommandQueue`, and will therefore respond to :func:`clear_first_arg_caches`. .. versionadded:: 2013.1 Requires Python 2.5 or newer. """ from functools import wraps cache_dict_name = intern("_memoize_inner_dic_%s_%s_%d" % (nested_func.__name__, nested_func.__code__.co_filename, nested_func.__code__.co_firstlineno)) from inspect import currentframe # prevent ref cycle try: caller_frame = currentframe().f_back cache_context = caller_frame.f_globals[ caller_frame.f_code.co_name] finally: #del caller_frame pass try: cache_dict = getattr(cache_context, cache_dict_name) except AttributeError: cache_dict = {} _first_arg_dependent_caches.append(cache_dict) setattr(cache_context, cache_dict_name, cache_dict) @wraps(nested_func) def new_nested_func(cl_object, *args): try: return cache_dict[cl_object][args] except KeyError: arg_dict = cache_dict.setdefault(cl_object, {}) result = nested_func(cl_object, *args) arg_dict[args] = result return result return new_nested_func
def add_expression_instruction(self, lhs, rhs): scope = self.scope_stack[-1] new_id = intern("insn%d" % self.insn_id_counter) self.insn_id_counter += 1 from loopy.kernel.data import Assignment insn = Assignment( lhs, rhs, within_inames=frozenset( scope.active_loopy_inames), id=new_id, predicates=frozenset(self.conditions), tags=tuple(self.instruction_tags)) scope.previous_instruction_id = new_id scope.instructions.append(insn)
class RuleArgument(Expression): """Represents a (numbered) argument of a :class:`loopy.SubstitutionRule`. Only used internally in the rule-aware mappers to match subst rules independently of argument names. """ init_arg_names = ("index", ) def __init__(self, index): self.index = index def __getinitargs__(self): return (self.index, ) def stringifier(self): return StringifyMapper mapper_method = intern("map_rule_argument")
class RefStiffnessTOperator(RefDiffOperatorBase): mapper_method = intern("map_ref_stiffness_t") @staticmethod def matrices(out_elem_grp, in_elem_grp): if in_elem_grp == out_elem_grp: assert in_elem_grp.is_orthogonal_basis() mmat = in_elem_grp.mass_matrix() return [dmat.T.dot(mmat.T) for dmat in in_elem_grp.diff_matrices()] from modepy import vandermonde vand = vandermonde(out_elem_grp.basis(), out_elem_grp.unit_nodes) grad_vand = vandermonde(out_elem_grp.grad_basis(), in_elem_grp.unit_nodes) vand_inv_t = np.linalg.inv(vand).T weights = in_elem_grp.weights return np.einsum('c,bz,acz->abc', weights, vand_inv_t, grad_vand)
class LinearSubscript(Expression): """Represents a linear index into a multi-dimensional array, completely ignoring any multi-dimensional layout. """ init_arg_names = ("aggregate", "index") def __init__(self, aggregate, index): self.aggregate = aggregate self.index = index def __getinitargs__(self): return self.aggregate, self.index def stringifier(self): return StringifyMapper mapper_method = intern("map_linear_subscript")
def __init__(self, max_reqs=400000, max_reps=400000, max_events=400000, timer=None): self._accumulators = {} self._cv = Condition() self._stopped = True self._requests = Deque(maxlen=max_reqs) self._replies = Deque(maxlen=max_reps) self._events = Deque(maxlen=max_events) self._request_handlers = set() self._reply_handlers = set() self._event_handlers = set() self._auth_by_client = defaultdict(lambda: intern("noauth")) self._timer = timer if timer else Timer() super(QueueStatsLoader, self).__init__() self.setDaemon(True)
class Lookup(AlgebraicLeaf): """Access to an attribute of an *aggregate*, such as an attribute of a class. """ init_arg_names = ( "aggregate", "name", ) def __init__(self, aggregate, name): self.aggregate = aggregate self.name = name def __getinitargs__(self): return self.aggregate, self.name mapper_method = intern("map_lookup")
class If(Expression): """ .. attribute:: condition .. attribute:: then .. attribute:: else_ """ init_arg_names = ("condition", "then", "else_") def __init__(self, condition, then, else_): self.condition = condition self.then = then self.else_ = else_ def __getinitargs__(self): return self.condition, self.then, self.else_ mapper_method = intern("map_if")
class Slice(Expression): """A slice expression as in a[1:7].""" init_arg_names = ("children", ) def __init__(self, children): assert isinstance(children, tuple) self.children = children if len(children) > 3: raise ValueError("slice with more than three arguments") def __getinitargs__(self): return (self.children, ) def __bool__(self): return True __nonzero__ = __bool__ @property def start(self): if len(self.children) > 1: return self.children[0] else: return None @property def stop(self): if len(self.children) == 1: return self.children[0] elif len(self.children) > 1: return self.children[1] else: return None @property def step(self): if len(self.children) == 3: return self.children[2] else: return None mapper_method = intern("map_slice")
class _SignedFaceOnes(HasDOFDesc, ExpressionBase): """Produces DoFs on a face that are :math:`-1` if their corresponding face number is odd and :math:`+1` if it is even. *dd* must refer to a 0D (point-shaped) trace domain. This is based on the face order of :meth:`meshmode.mesh.MeshElementGroup.face_vertex_indices`. .. note:: This is used as a hack to generate normals with the correct orientation in 1D problems, and so far only intended for this particular use cases. (If you can think of a better way, please speak up!) """ def __init__(self, dd): dd = as_dofdesc(dd) assert dd.is_trace() super(_SignedFaceOnes, self).__init__(dd) mapper_method = intern("map_signed_face_ones")
def parametrized_decorator(method): cache_dict_name = intern("_memoize_dic_"+method.__name__) def wrapper(self, *args, **kwargs): cache_args = list(args) cache_kwargs = kwargs.copy() for i in uncached_args: if i < len(cache_args): cache_args.pop(i) cache_args = tuple(cache_args) if kwargs: for name in uncached_kwargs: cache_kwargs.pop(name, None) key = ( (_HasKwargs, frozenset(six.iteritems(cache_kwargs))) + cache_args) else: key = cache_args try: return getattr(self, cache_dict_name)[key] except AttributeError: result = method(self, *args, **kwargs) setattr(self, cache_dict_name, {key: result}) return result except KeyError: result = method(self, *args, **kwargs) getattr(self, cache_dict_name)[key] = result return result def clear_cache(self): delattr(self, cache_dict_name) if sys.version_info >= (2, 5): from functools import update_wrapper new_wrapper = update_wrapper(wrapper, method) new_wrapper.clear_cache = clear_cache return new_wrapper
class Power(Expression): """ .. attribute:: base .. attribute:: exponent """ init_arg_names = ( "base", "exponent", ) def __init__(self, base, exponent): self.base = base self.exponent = exponent def __getinitargs__(self): return self.base, self.exponent mapper_method = intern("map_power")
def map_IfThen(self, node): scope = self.scope_stack[-1] cond_name = intern("loopy_cond%d" % self.condition_id_counter) self.condition_id_counter += 1 assert cond_name not in scope.type_map scope.type_map[cond_name] = np.int32 from pymbolic import var cond_var = var(cond_name) self.add_expression_instruction( cond_var, self.parse_expr(node, node.expr)) self.conditions.append(cond_name) self.block_nest.append("if") for c in node.content: self.rec(c)
class TaggedVariable(Variable): """This is an identifier with a tag, such as 'matrix$one', where 'one' identifies this specific use of the identifier. This mechanism may then be used to address these uses--such as by prefetching only accesses tagged a certain way. """ init_arg_names = ("name", "tag") def __init__(self, name, tag): Variable.__init__(self, name) self.tag = tag def __getinitargs__(self): return self.name, self.tag def stringifier(self): return StringifyMapper mapper_method = intern("map_tagged_variable")
class OperatorBinding(ExpressionBase): init_arg_names = ("op", "field") def __init__(self, op, field): self.op = op self.field = field mapper_method = intern("map_operator_binding") def __getinitargs__(self): return self.op, self.field def is_equal(self, other): return (other.__class__ == self.__class__ and other.op == self.op and np.array_equal(other.field, self.field)) def get_hash(self): from pytools.obj_array import obj_array_to_hashable return hash( (self.__class__, self.op, obj_array_to_hashable(self.field)))
def __init_bindata__(self): """The object has been setup with structural information in _der_packer and _recipe, as well as instance data in _bindata and _offset. We now iterate over all the fields in the _recipe to replace some or all entries in _bindata with an ASN1Object subclass instance. The last step of this procedure is to self-register into _bindata [_offset], so as to support future _der_pack() calls. """ if self._recipe[0] != '_NAMED': import sys sys.exit(1) assert self._recipe[ 0] == '_NAMED', 'ASN1ConstructedType instances must have a dictionary in their _recipe' (_NAMED, recp) = self._recipe self._fields = {} # Static recipe is generated from the ASN.1 grammar # Iterate over this recipe to form the instance data from quick_der import builder for (subfld, subrcp) in recp.items(): if type(subfld) != str: raise Exception("ASN.1 recipe keys can only be strings") # Interned strings yield faster dictionary lookups # Field names in Python are always interned subfld = intern(subfld.replace('-', '_')) self._fields[subfld] = self._offset # fallback subval = builder.build_asn1(self._context, subrcp, self._bindata, self._offset) if type(subval) == int: # Primitive: Index into _bindata; set in _fields self._fields[subfld] += subval elif subval.__class__ == ASN1Atom: # The following moved into __init_bindata__(): # self._bindata [self._offset] = subval # Native types may be assigned instead of subval pass print('Not placing field {} subvalue :: {}'.format( subfld, type(subval))) elif isinstance(subval, ASN1Object): self._fields[subfld] = subval
class CommonSubexpression(Expression): """A helper for code generation and caching. Denotes a subexpression that should only be evaluated once. If, in code generation, it is assigned to a variable, a name starting with :attr:`prefix` should be used. .. attribute:: child .. attribute:: prefix .. attribute:: scope One of the values in :class:`cse_scope`. See there for meaning. See :class:`pymbolic.mapper.c_code.CCodeMapper` for an example. """ init_arg_names = ("child", "prefix", "scope") def __init__(self, child, prefix=None, scope=None): """ :arg scope: Defaults to :attr:`cse_scope.EVALUATION` if given as *None*. """ if scope is None: scope = cse_scope.EVALUATION self.child = child self.prefix = prefix self.scope = scope def __getinitargs__(self): return (self.child, self.prefix, self.scope) def get_extra_properties(self): """Return a dictionary of extra kwargs to be passed to the constructor from the identity mapper. This allows derived classes to exist without having to extend every mapper that processes them. """ return {} mapper_method = intern("map_common_subexpression")
class ProjectionOperator(Operator): def __init__(self, dd_in, dd_out): super(ProjectionOperator, self).__init__(dd_in, dd_out) def __call__(self, expr): from pytools.obj_array import obj_array_vectorize def project_one(subexpr): from pymbolic.primitives import is_constant if self.dd_in == self.dd_out: # no-op projection, go away return subexpr elif is_constant(subexpr): return subexpr else: from grudge.symbolic.primitives import OperatorBinding return OperatorBinding(self, subexpr) return obj_array_vectorize(project_one, expr) mapper_method = intern("map_projection")
class Assign(AssignBase): """ .. attribute:: names .. attribute:: exprs .. attribute:: do_not_return a list of bools indicating whether the corresponding entry in names and exprs describes an expression that is not needed beyond this assignment .. attribute:: priority """ def __init__(self, names, exprs, **kwargs): Instruction.__init__(self, names=names, exprs=exprs, **kwargs) if not hasattr(self, "do_not_return"): self.do_not_return = [False] * len(names) @memoize_method def flop_count(self): return sum(mappers.FlopCounter()(expr) for expr in self.exprs) def get_assignees(self): return set(self.names) @memoize_method def get_dependencies(self, each_vector=False): dep_mapper = _make_dep_mapper(include_subscripts=False) from operator import or_ deps = reduce(or_, (dep_mapper(expr) for expr in self.exprs)) from pymbolic.primitives import Variable deps -= set(Variable(name) for name in self.names) if not each_vector: self._dependencies = deps return deps mapper_method = intern("map_insn_assign")
def memoize_on_first_arg(function, cache_dict_name=None): """Like :func:`memoize_method`, but for functions that take the object to do memoization as first argument. Supports cache deletion via ``function_name.clear_cache(self)``. .. note:: *clear_cache* support requires Python 2.5 or newer. """ if cache_dict_name is None: cache_dict_name = intern("_memoize_dic_" + function.__module__ + function.__name__) def wrapper(obj, *args, **kwargs): if kwargs: key = (_HasKwargs, frozenset(six.iteritems(kwargs))) + args else: key = args try: return getattr(obj, cache_dict_name)[key] except AttributeError: result = function(obj, *args, **kwargs) setattr(obj, cache_dict_name, {key: result}) return result except KeyError: result = function(obj, *args, **kwargs) getattr(obj, cache_dict_name)[key] = result return result def clear_cache(obj): delattr(obj, cache_dict_name) if sys.version_info >= (2, 5): from functools import update_wrapper new_wrapper = update_wrapper(wrapper, function) new_wrapper.clear_cache = clear_cache return new_wrapper
def memoize_method_nested(inner): """Adds a cache to a function nested inside a method. The cache is attached to *memoize_cache_context* (if it exists) or *self* in the outer (method) namespace. Requires Python 2.5 or newer. """ from warnings import warn warn("memoize_method_nested is deprecated. Use @memoize_in(self, 'identifier') " "instead", DeprecationWarning, stacklevel=2) from functools import wraps cache_dict_name = intern("_memoize_inner_dic_%s_%s_%d" % (inner.__name__, inner.__code__.co_filename, inner.__code__.co_firstlineno)) from inspect import currentframe outer_frame = currentframe().f_back cache_context = outer_frame.f_locals.get("memoize_cache_context") if cache_context is None: cache_context = outer_frame.f_locals.get("self") try: cache_dict = getattr(cache_context, cache_dict_name) except AttributeError: cache_dict = {} setattr(cache_context, cache_dict_name, cache_dict) @wraps(inner) def new_inner(*args): try: return cache_dict[args] except KeyError: result = inner(*args) cache_dict[args] = result return result return new_inner
class FromDiscretizationScopedAssign(AssignBase): scope_indicator = "(discr)-" neglect_for_dofdesc_inference = True def __init__(self, name, **kwargs): super(FromDiscretizationScopedAssign, self).__init__(name=name, **kwargs) @memoize_method def flop_count(self): return 0 def get_assignees(self): return frozenset([self.name]) def get_dependencies(self): return frozenset() def __str__(self): return "%s <-(from discr)" % self.name mapper_method = intern("map_insn_assign_from_discr_scoped")
class Product(_MultiChildExpression): """ .. attribute:: children A :class:`tuple`. """ def __mul__(self, other): if not is_valid_operand(other): return NotImplemented if isinstance(other, Product): return Product(self.children + other.children) if is_zero(other): return 0 if is_zero(other-1): return self return Product(self.children + (other,)) def __rmul__(self, other): if not is_constant(other): return NotImplemented if isinstance(other, Product): return Product(other.children + self.children) if is_zero(other): return 0 if is_zero(other-1): return self return Product((other,) + self.children) def __bool__(self): for i in self.children: if is_zero(i): return False return True __nonzero__ = __bool__ mapper_method = intern("map_product")
class OppositePartitionFaceSwap(Operator): """ .. attribute:: unique_id An integer corresponding to the :attr:`OppositeInteriorFaceSwap.unique_id` which led to the creation of this object. This integer is used as an MPI tag offset to keep different subexpressions apart in MPI traffic. """ def __init__(self, dd_in=None, dd_out=None, unique_id=None): import grudge.symbolic.primitives as prim if dd_in is None and dd_out is None: raise ValueError("dd_in or dd_out must be specified") elif dd_in is None: dd_in = dd_out elif dd_out is None: dd_out = dd_in super(OppositePartitionFaceSwap, self).__init__(dd_in, dd_out) if not (isinstance(self.dd_in.domain_tag, prim.DTAG_BOUNDARY) and isinstance(self.dd_in.domain_tag.tag, prim.BTAG_PARTITION)): raise ValueError( "dd_in must be a partition boundary faces domain, not '%s'" % self.dd_in.domain_tag) if self.dd_out != self.dd_in: raise ValueError("dd_out and dd_in must be identical") self.i_remote_part = self.dd_in.domain_tag.tag.part_nr assert unique_id is None or isinstance(unique_id, int) self.unique_id = unique_id init_arg_names = ("dd_in", "dd_out", "unique_id") def __getinitargs__(self): return (self.dd_in, self.dd_out, self.unique_id) mapper_method = intern("map_opposite_partition_face_swap")
class RankDataSwapAssign(Instruction): """ .. attribute:: name .. attribute:: field .. attribute:: i_remote_rank The number of the remote rank that this instruction swaps data with. .. attribute:: dd_out .. attribute:: comment """ # TODO: We need to be sure this does not conflict with some other tag. MPI_TAG_GRUDGE_DATA_BASE = 0x3700d3e def __init__(self, name, field, op): self.name = name self.field = field self.i_remote_rank = op.i_remote_part self.dd_out = op.dd_out self.send_tag = self.MPI_TAG_GRUDGE_DATA_BASE + op.unique_id self.recv_tag = self.MPI_TAG_GRUDGE_DATA_BASE + op.unique_id self.comment = "Swap data with rank %02d" % self.i_remote_rank @memoize_method def get_assignees(self): return set([self.name]) @memoize_method def get_dependencies(self): return _make_dep_mapper(include_subscripts=False)(self.field) def __str__(self): return ("{\n" + " /* %s */\n" % self.comment + " send_tag = %s\n" % self.send_tag + " recv_tag = %s\n" % self.recv_tag + " %s <- %s\n" % (self.name, self.field) + "}") mapper_method = intern("map_insn_rank_data_swap")
def __init_bindata__(self): """The object has been setup with structural information in _der_packer and _recipe, as well as instance data in _bindata and _offset. We now iterate over all the fields in the _recipe to replace some or all entries in _bindata with an ASN1Object subclass instance. The last step of this procedure is to self-register into _bindata [_offset], so as to support future _der_pack() calls. """ if self._recipe[0] != '_NAMED': import sys sys.exit(1) assert self._recipe[0] == '_NAMED', 'ASN1ConstructedType instances must have a dictionary in their _recipe' (_NAMED, recp) = self._recipe self._fields = {} # Static recipe is generated from the ASN.1 grammar # Iterate over this recipe to form the instance data from quick_der import builder for (subfld, subrcp) in recp.items(): if type(subfld) != str: raise Exception("ASN.1 recipe keys can only be strings") # Interned strings yield faster dictionary lookups # Field names in Python are always interned subfld = intern(subfld.replace('-', '_')) self._fields[subfld] = self._offset # fallback subval = builder.build_asn1(self._context, subrcp, self._bindata, self._offset) if type(subval) == int: # Primitive: Index into _bindata; set in _fields self._fields[subfld] += subval elif subval.__class__ == ASN1Atom: # The following moved into __init_bindata__(): # self._bindata [self._offset] = subval # Native types may be assigned instead of subval pass print('Not placing field {} subvalue :: {}'.format(subfld, type(subval))) elif isinstance(subval, ASN1Object): self._fields[subfld] = subval
def add_expression_instruction(self, lhs, rhs): scope = self.scope_stack[-1] new_id = intern("insn%d" % self.insn_id_counter) self.insn_id_counter += 1 if self.auto_dependencies and scope.previous_instruction_id: depends_on = frozenset([scope.previous_instruction_id]) else: depends_on = frozenset() from loopy.kernel.data import Assignment insn = Assignment( lhs, rhs, within_inames=frozenset( scope.active_loopy_inames), depends_on=depends_on, id=new_id, predicates=frozenset(self.conditions), tags=tuple(self.instruction_tags)) scope.previous_instruction_id = new_id scope.instructions.append(insn)
def add_expression_instruction(self, lhs, rhs): scope = self.scope_stack[-1] new_id = intern("insn%d" % self.insn_id_counter) self.insn_id_counter += 1 if self.auto_dependencies and scope.previous_instruction_id: insn_deps = frozenset([scope.previous_instruction_id]) else: insn_deps = frozenset() from loopy.kernel.data import Assignment insn = Assignment( lhs, rhs, forced_iname_deps=frozenset( scope.active_loopy_inames), insn_deps=insn_deps, id=new_id, predicates=frozenset(self.conditions), tags=tuple(self.instruction_tags)) scope.previous_instruction_id = new_id scope.instructions.append(insn)
class Call(AlgebraicLeaf): """A function invocation. .. attribute:: function A :class:`Expression` that evaluates to a function. .. attribute:: parameters A :class:`tuple` of positional parameters, each element of which is a :class:`Expression` or a constant. """ init_arg_names = ( "function", "parameters", ) def __init__(self, function, parameters): self.function = function self.parameters = parameters try: arg_count = self.function.arg_count except AttributeError: pass else: if len(self.parameters) != arg_count: raise TypeError("%s called with wrong number of arguments " "(need %d, got %d)" % (self.function, arg_count, len(parameters))) def __getinitargs__(self): return self.function, self.parameters mapper_method = intern("map_call")
def __init__(self, name, dtype=None, shape=(), is_local=auto, dim_tags=None, offset=0, dim_names=None, strides=None, order=None, base_indices=None, storage_shape=None, base_storage=None): """ :arg dtype: :class:`loopy.auto` or a :class:`numpy.dtype` :arg shape: :class:`loopy.auto` or a shape tuple :arg base_indices: :class:`loopy.auto` or a tuple of base indices """ if is_local is None: raise ValueError("is_local is None is no longer supported. " "Use loopy.auto.") if base_indices is None: base_indices = (0,) * len(shape) ArrayBase.__init__(self, name=intern(name), dtype=dtype, shape=shape, dim_tags=dim_tags, offset=offset, dim_names=dim_names, order="C", base_indices=base_indices, is_local=is_local, storage_shape=storage_shape, base_storage=base_storage)
form = BulkPreapprovalsForm(pctx.course) return render_course_page( pctx, "course/generic-course-form.html", { "form": form, "form_description": _("Create Participation Preapprovals"), }) # }}} # {{{ participation query parsing # {{{ lexer data _and = intern("and") _or = intern("or") _not = intern("not") _openpar = intern("openpar") _closepar = intern("closepar") _id = intern("id") _email = intern("email") _email_contains = intern("email_contains") _user = intern("user") _user_contains = intern("user_contains") _institutional_id = intern("institutional_id") _institutional_id_contains = intern("institutional_id__contains") _tagged = intern("tagged") _role = intern("role") _status = intern("status")
def intern_frozenset_of_ids(fs): return frozenset(intern(s) for s in fs)
def is_interned(s): return s is None or intern(s) is s
def read(fp): """Deserialize an OOPS from an RFC822 format message.""" msg = BytesParser().parse(fp, headersonly=True) id = msg.get('oops-id') exc_type = msg.get('exception-type') exc_value = msg.get('exception-value') datestr = msg.get('date') if datestr is not None: date = iso8601.parse_date(msg.get('date')) else: date = None topic = msg.get('topic') if topic is None: topic = msg.get('page-id') username = msg.get('user') url = msg.get('url') try: duration = float(msg.get('duration', '-1')) except ValueError: duration = float(-1) informational = msg.get('informational') branch_nick = msg.get('branch') revno = msg.get('revision') reporter = msg.get('oops-reporter') # Explicitly use an iterator so we can process the file sequentially. lines = iter(msg.get_payload().splitlines(True)) statement_pat = re.compile(r'^(\d+)-(\d+)(?:@([\w-]+))?\s+(.*)') def is_req_var(line): return "=" in line and not statement_pat.match(line) def is_traceback(line): return line.lower().startswith('traceback') or line.startswith( '== EXTRA DATA ==') req_vars = [] statements = [] first_tb_line = '' for line in lines: first_tb_line = line line = line.strip() if line == '': continue else: match = statement_pat.match(line) if match is not None: start, end, db_id, statement = match.groups() if db_id is not None: db_id = intern(db_id) # This string is repeated lots. statements.append([int(start), int(end), db_id, statement]) elif is_req_var(line): key, value = line.split('=', 1) req_vars.append([unquote(key), unquote(value)]) elif is_traceback(line): break req_vars = dict(req_vars) # The rest is traceback. tb_text = ''.join([first_tb_line] + list(lines)) result = dict(id=id, type=exc_type, value=exc_value, time=date, topic=topic, tb_text=tb_text, username=username, url=url, duration=duration, req_vars=req_vars, timeline=statements, branch_nick=branch_nick, revno=revno) if informational is not None: result['informational'] = informational if reporter is not None: result['reporter'] = reporter return result
def expand_attribute_strings(attribute_strings, quote_char='\"', missing_value=""): """ The last column of GTF has a variable number of key value pairs of the format: "key1 value1; key2 value2;" Parse these into a dictionary mapping each key onto a list of values, where the value is None for any row where the key was missing. Parameters ---------- attribute_strings : list of str quote_char : str Quote character to remove from values missing_value : any If an attribute is missing from a row, give it this value. Returns OrderedDict of column->value list mappings, in the order they appeared in the attribute strings. """ logging.debug("Memory usage before expanding GTF attributes: %0.4f MB" % (memory_usage(), )) n = len(attribute_strings) extra_columns = {} column_order = [] # Split the semi-colon separated attributes in the last column of a GTF # into a list of (key, value) pairs. kv_generator = ( # We're slicing the first two elements out of split() because # Ensembl release 79 added values like: # transcript_support_level "1 (assigned to previous version 5)"; # ...which gets mangled by splitting on spaces. # # TODO: implement a proper parser! (i, kv.strip().split(" ", 2)[:2]) for (i, attribute_string) in enumerate(attribute_strings) for kv in attribute_string.split(";") # need at least 3 chars for minimal entry like 'k v' if len(kv) > 2 and " " in kv) # # SOME NOTES ABOUT THE BIZARRE STRING INTERNING GOING ON BELOW # # While parsing millions of repeated strings (e.g. "gene_id" and "TP53"), # we can save a lot of memory by making sure there's only one string # object per unique string. The canonical way to do this is using # the 'intern' function. One problem is that Py2 won't let you intern # unicode objects, so to get around this we call intern(str(...)). # # It also turns out to be faster to check interned strings ourselves # using a local dictionary, hence the two dictionaries below # and pair of try/except blocks in the loop. column_interned_strings = {} value_interned_strings = {} for i, (column_name, value) in kv_generator: try: column_name = column_interned_strings[column_name] column = extra_columns[column_name] except KeyError: column_name = intern(str(column_name)) column_interned_strings[column_name] = column_name column = [missing_value] * n extra_columns[column_name] = column column_order.append(column_name) value = value.replace(quote_char, "") if quote_char in value else value try: value = value_interned_strings[value] except KeyError: value = intern(str(value)) value_interned_strings[value] = value column[i] = value logging.debug("Memory usage after expanding GTF attributes: %0.4f MB" % (memory_usage(), )) logging.info("Extracted GTF attributes: %s" % column_order) return OrderedDict((column_name, extra_columns[column_name]) for column_name in column_order)
def all_inames(self): result = set() for dom in self.domains: result.update( intern(n) for n in dom.get_var_names(dim_type.set)) return frozenset(result)
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import weakref from six.moves import intern dict_name = intern('_internable_dict') # # Internable # class Internable(object): """Class that allows instances to be 'interned'. That is, given an instance of this class, one can obtain a canonical (interned) copy. This saves memory when there are likely to be many identical instances of the class -- users hold references to a single interned object instead of references to different objects that are identical.
class Reduction(Expression): """Represents a reduction operation on :attr:`expr` across :attr:`inames`. .. attribute:: operation an instance of :class:`loopy.library.reduction.ReductionOperation` .. attribute:: inames a list of inames across which reduction on :attr:`expr` is being carried out. .. attribute:: expr The expression (as a :class:`pymbolic.primitives.Expression`) on which reduction is performed. .. attribute:: allow_simultaneous A :class:`bool`. If not *True*, an iname is allowed to be used in precisely one reduction, to avoid mis-nesting errors. """ init_arg_names = ("operation", "inames", "expr", "allow_simultaneous") def __init__(self, operation, inames, expr, allow_simultaneous=False): if isinstance(inames, str): inames = tuple(iname.strip() for iname in inames.split(",")) elif isinstance(inames, Variable): inames = (inames,) assert isinstance(inames, tuple) def strip_var(iname): if isinstance(iname, Variable): iname = iname.name assert isinstance(iname, str) return iname inames = tuple(strip_var(iname) for iname in inames) if isinstance(operation, str): from loopy.library.reduction import parse_reduction_op operation = parse_reduction_op(operation) from loopy.library.reduction import ReductionOperation assert isinstance(operation, ReductionOperation) self.operation = operation self.inames = inames self.expr = expr self.allow_simultaneous = allow_simultaneous def __getinitargs__(self): return (self.operation, self.inames, self.expr, self.allow_simultaneous) def get_hash(self): return hash((self.__class__, self.operation, self.inames, self.expr)) def is_equal(self, other): return (other.__class__ == self.__class__ and other.operation == self.operation and other.inames == self.inames and other.expr == self.expr) def stringifier(self): return StringifyMapper @property @memoize_method def inames_set(self): return set(self.inames) mapper_method = intern("map_reduction")
operation = parse_reduction_op(name) if operation: if len(expr.parameters) != 2: raise RuntimeError("invalid invocation of " "reduction operation '%s'" % expr.function.name) inames, red_expr = expr.parameters return self._parse_reduction(operation, inames, self.rec(red_expr)) else: return IdentityMapper.map_call(self, expr) # {{{ customization to pymbolic parser _open_dbl_bracket = intern("open_dbl_bracket") TRAILING_FLOAT_TAG_RE = re.compile("^(.*?)([a-zA-Z]*)$") class LoopyParser(ParserBase): lex_table = [ (_open_dbl_bracket, pytools.lex.RE(r"\[\[")), ] + ParserBase.lex_table def parse_float(self, s): match = TRAILING_FLOAT_TAG_RE.match(s) val = match.group(1) tag = frozenset(match.group(2)) if tag == frozenset("j"):