def __init__(self, target, name, dtype, arg_class, base_name=None, shape=None, strides=None, unvec_shape=None, unvec_strides=None, offset_for_name=None, stride_for_name_and_axis=None, allows_offset=None, is_written=None): from loopy.types import LoopyType assert isinstance(dtype, LoopyType) Record.__init__(self, name=name, dtype=dtype, arg_class=arg_class, base_name=base_name, shape=shape, strides=strides, unvec_shape=unvec_shape, unvec_strides=unvec_strides, offset_for_name=offset_for_name, stride_for_name_and_axis=stride_for_name_and_axis, allows_offset=allows_offset, is_written=is_written)
def __init__( # All Boolean flags in here should default to False for the # string-based interface of make_options (below) to make sense. # All defaults are further required to be False when cast to bool # for the update() functionality to work. self, annotate_inames=False, trace_assignments=False, trace_assignment_values=False, skip_arg_checks=False, no_numpy=False, return_dict=False, write_wrapper=False, highlight_wrapper=False, write_cl=False, highlight_cl=False, edit_cl=False, cl_build_options=[], ): Record.__init__( self, annotate_inames=annotate_inames, trace_assignments=trace_assignments, trace_assignment_values=trace_assignment_values, skip_arg_checks=skip_arg_checks, no_numpy=no_numpy, return_dict=return_dict, write_wrapper=write_wrapper, highlight_wrapper=highlight_wrapper, write_cl=write_cl, highlight_cl=highlight_cl, edit_cl=edit_cl, cl_build_options=cl_build_options, )
def __init__( self, target, name, dtype, cgen_declarator, arg_class, base_name=None, shape=None, strides=None, unvec_shape=None, unvec_strides=None, offset_for_name=None, stride_for_name_and_axis=None, allows_offset=None, ): from loopy.tools import PicklableDtype Record.__init__( self, name=name, picklable_dtype=PicklableDtype(dtype, target=target), cgen_declarator=cgen_declarator, arg_class=arg_class, base_name=base_name, shape=shape, strides=strides, unvec_shape=unvec_shape, unvec_strides=unvec_strides, offset_for_name=offset_for_name, stride_for_name_and_axis=stride_for_name_and_axis, allows_offset=allows_offset, )
def __init__(self, name, arguments, expression): assert isinstance(arguments, tuple) Record.__init__(self, name=name, arguments=arguments, expression=expression)
def __init__(self, source, target, variable, var_kind, is_forward): Record.__init__(self, source=source, target=target, variable=variable, var_kind=var_kind, is_forward=is_forward)
def __init__(self, **kwargs): kwargs["name"] = intern(kwargs.pop("name")) dtype = kwargs.pop("dtype", None) from loopy.types import to_loopy_type kwargs["dtype"] = to_loopy_type(dtype, allow_auto=True, allow_none=True) Record.__init__(self, **kwargs)
def __init__(self, id, insn_deps, insn_deps_is_final, groups, conflicts_with_groups, forced_iname_deps_is_final, forced_iname_deps, priority, boostable, boostable_into, predicates, tags): if insn_deps is None: insn_deps = frozenset() if groups is None: groups = frozenset() if conflicts_with_groups is None: conflicts_with_groups = frozenset() if forced_iname_deps_is_final is None: forced_iname_deps_is_final = False if insn_deps_is_final is None: insn_deps_is_final = False if insn_deps_is_final and not isinstance(insn_deps, frozenset): raise LoopyError("Setting insn_deps_is_final to True requires " "actually specifying insn_deps") if tags is None: tags = () # Periodically reenable these and run the tests to ensure all # performance-relevant identifiers are interned. # # from loopy.tools import is_interned # assert is_interned(id) # assert all(is_interned(dep) for dep in insn_deps) # assert all(is_interned(grp) for grp in groups) # assert all(is_interned(grp) for grp in conflicts_with_groups) # assert all(is_interned(iname) for iname in forced_iname_deps) # assert all(is_interned(pred) for pred in predicates) assert isinstance(forced_iname_deps, frozenset) assert isinstance(insn_deps, frozenset) or insn_deps is None assert isinstance(groups, frozenset) assert isinstance(conflicts_with_groups, frozenset) Record.__init__(self, id=id, insn_deps=insn_deps, insn_deps_is_final=insn_deps_is_final, groups=groups, conflicts_with_groups=conflicts_with_groups, forced_iname_deps_is_final=forced_iname_deps_is_final, forced_iname_deps=forced_iname_deps, priority=priority, boostable=boostable, boostable_into=boostable_into, predicates=predicates, tags=tags)
def __init__(self, **kwargs): dtype = kwargs.pop("dtype", None) if isinstance(dtype, np.dtype): from loopy.tools import PicklableDtype kwargs["picklable_dtype"] = PicklableDtype(dtype) else: kwargs["picklable_dtype"] = dtype Record.__init__(self, **kwargs)
def __init__(self, source, center1, center2, target, expansion1, expansion2, conv_factor): if isinstance(conv_factor, str): conv_factor = self.eval(conv_factor, source, center1, center2, target) Record.__init__(self, source=source, center1=center1, center2=center2, target=target, expansion1=expansion1, expansion2=expansion2, conv_factor=conv_factor)
def __init__( # All Boolean flags in here should default to False for the # string-based interface of make_options (below) to make sense. # All defaults are further required to be False when cast to bool # for the update() functionality to work. self, annotate_inames=False, trace_assignments=False, trace_assignment_values=False, skip_arg_checks=False, no_numpy=False, return_dict=False, write_wrapper=False, highlight_wrapper=False, write_cl=False, highlight_cl=False, edit_cl=False, cl_build_options=[], allow_terminal_colors=None, disable_global_barriers=False, ): if allow_terminal_colors is None: try: import colorama # noqa except ImportError: allow_terminal_colors = False else: allow_terminal_colors = True Record.__init__( self, annotate_inames=annotate_inames, trace_assignments=trace_assignments, trace_assignment_values=trace_assignment_values, skip_arg_checks=skip_arg_checks, no_numpy=no_numpy, return_dict=return_dict, write_wrapper=write_wrapper, highlight_wrapper=highlight_wrapper, write_cl=write_cl, highlight_cl=highlight_cl, edit_cl=edit_cl, cl_build_options=cl_build_options, allow_terminal_colors=allow_terminal_colors, disable_global_barriers=disable_global_barriers, )
def get_copy_kwargs(self, **kwargs): result = Record.get_copy_kwargs(self, **kwargs) if "dtype" not in result: result["dtype"] = self.dtype del result["picklable_dtype"] return result
def __init__( self, id, insn_deps, insn_deps_is_final, forced_iname_deps_is_final, forced_iname_deps, priority, boostable, boostable_into, predicates, tags, ): if forced_iname_deps_is_final is None: forced_iname_deps_is_final = False if insn_deps_is_final is None: insn_deps_is_final = False if insn_deps_is_final and not isinstance(insn_deps, frozenset): raise LoopyError("Setting insn_deps_is_final to True requires " "actually specifying insn_deps") if tags is None: tags = () assert isinstance(forced_iname_deps, frozenset) assert isinstance(insn_deps, frozenset) or insn_deps is None Record.__init__( self, id=id, insn_deps=insn_deps, insn_deps_is_final=insn_deps_is_final, forced_iname_deps_is_final=forced_iname_deps_is_final, forced_iname_deps=forced_iname_deps, priority=priority, boostable=boostable, boostable_into=boostable_into, predicates=predicates, tags=tags, )
def __init__(self, order, vertex_indices, nodes, element_nr_base=None, node_nr_base=None, unit_nodes=None, dim=None): """ :arg order: the mamximum total degree used for interpolation. :arg nodes: ``[ambient_dim, nelements, nunit_nodes]`` The nodes are assumed to be mapped versions of *unit_nodes*. :arg unit_nodes: ``[dim, nunit_nodes]`` The unit nodes of which *nodes* is a mapped version. Do not supply *element_nr_base* and *node_nr_base*, they will be automatically assigned. """ Record.__init__(self, order=order, vertex_indices=vertex_indices, nodes=nodes, unit_nodes=unit_nodes, element_nr_base=element_nr_base, node_nr_base=node_nr_base)
def __init__(self, name, c_name, arg_dtypes): Record.__init__(self, name=name, c_name=c_name, arg_dtypes=arg_dtypes)
def __init__(self, vertices, groups, skip_tests=False, nodal_adjacency=False, facial_adjacency_groups=False, boundary_tags=None, vertex_id_dtype=np.int32, element_id_dtype=np.int32): """ The following are keyword-only: :arg skip_tests: Skip mesh tests, in case you want to load a broken mesh anyhow and then fix it inside of this data structure. :arg nodal_adjacency: One of three options: *None*, in which case this information will be deduced from vertex adjacency. *False*, in which case this information will be marked unavailable (such as if there are hanging nodes in the geometry, so that vertex adjacency does not convey the full picture), and references to :attr:`element_neighbors_starts` and :attr:`element_neighbors` will result in exceptions. Lastly, a tuple :class:`NodalAdjacency` object. :arg facial_adjacency_groups: One of three options: *None*, in which case this information will be deduced from vertex adjacency. *False*, in which case this information will be marked unavailable (such as if there are hanging nodes in the geometry, so that vertex adjacency does not convey the full picture), and references to :attr:`element_neighbors_starts` and :attr:`element_neighbors` will result in exceptions. Lastly, a data structure as described in :attr:`facial_adjacency_groups` may be passed. """ el_nr = 0 node_nr = 0 new_groups = [] for g in groups: ng = g.join_mesh(el_nr, node_nr) new_groups.append(ng) el_nr += ng.nelements node_nr += ng.nnodes # {{{ boundary tags if boundary_tags is None: boundary_tags = [] else: boundary_tags = boundary_tags[:] if BTAG_NONE in boundary_tags: raise ValueError("BTAG_NONE is not allowed to be part of " "boundary_tags") if BTAG_ALL not in boundary_tags: boundary_tags.append(BTAG_ALL) if BTAG_REALLY_ALL not in boundary_tags: boundary_tags.append(BTAG_REALLY_ALL) max_boundary_tag_count = int( np.log(np.iinfo(element_id_dtype).max)/np.log(2)) if len(boundary_tags) > max_boundary_tag_count: raise ValueError("too few bits in element_id_dtype to represent all " "boundary tags") btag_to_index = dict( (btag, i) for i, btag in enumerate(boundary_tags)) # }}} if nodal_adjacency is not False and nodal_adjacency is not None: if not isinstance(nodal_adjacency, NodalAdjacency): nb_starts, nbs = nodal_adjacency nodal_adjacency = NodalAdjacency( neighbors_starts=nb_starts, neighbors=nbs) del nb_starts del nbs Record.__init__( self, vertices=vertices, groups=new_groups, _nodal_adjacency=nodal_adjacency, _facial_adjacency_groups=facial_adjacency_groups, boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), element_id_dtype=np.dtype(element_id_dtype), ) if not skip_tests: assert _test_node_vertex_consistency(self) for g in self.groups: assert g.vertex_indices.dtype == self.vertex_id_dtype if nodal_adjacency: assert nodal_adjacency.neighbor_starts.shape == (self.nelements+1,) assert len(nodal_adjacency.neighbors.shape) == 1 assert nodal_adjacency.neighbor_starts.dtype == self.element_id_dtype assert nodal_adjacency.neighbors.dtype == self.element_id_dtype if facial_adjacency_groups: assert len(facial_adjacency_groups) == len(self.groups) for fagrp_map in facial_adjacency_groups: for fagrp in six.itervalues(fagrp_map): grp = self.groups[fagrp.igroup] fvi = grp.face_vertex_indices() assert fagrp.neighbors.dtype == self.element_id_dtype assert fagrp.neighbors.shape == ( grp.nelements, len(fvi)) assert fagrp.neighbors.dtype == self.face_id_dtype assert fagrp.neighbor_faces.shape == ( grp.nelements, len(fvi)) is_bdry = fagrp.neighbors < 0 assert ((1 << btag_to_index[BTAG_REALLY_ALL]) & fagrp.neighbors[is_bdry]).all(), \ "boundary faces without BTAG_REALLY_ALL found" from meshmode.mesh.processing import \ test_volume_mesh_element_orientations if self.dim == self.ambient_dim: # only for volume meshes, for now assert test_volume_mesh_element_orientations(self), \ "negatively oriented elements found"
def __init__(self, id, depends_on, depends_on_is_final, groups, conflicts_with_groups, no_sync_with, within_inames_is_final, within_inames, priority, boostable, boostable_into, predicates, tags, insn_deps=None, insn_deps_is_final=None, forced_iname_deps=None, forced_iname_deps_is_final=None): # {{{ backwards compatibility goop if depends_on is not None and insn_deps is not None: raise LoopyError("may not specify both insn_deps and depends_on") elif insn_deps is not None: warn("insn_deps is deprecated, use depends_on", DeprecationWarning, stacklevel=2) depends_on = insn_deps depends_on_is_final = insn_deps_is_final if forced_iname_deps is not None and within_inames is not None: raise LoopyError("may not specify both forced_iname_deps " "and within_inames") elif forced_iname_deps is not None: warn("forced_iname_deps is deprecated, use within_inames", DeprecationWarning, stacklevel=2) within_inames = forced_iname_deps within_inames_is_final = forced_iname_deps_is_final new_predicates = set() for pred in predicates: if isinstance(pred, str): from pymbolic.primitives import LogicalNot from loopy.symbolic import parse if pred.startswith("!"): from warnings import warn warn("predicates starting with '!' are deprecated. " "Simply use 'not' instead") pred = LogicalNot(parse(pred[1:])) else: pred = parse(pred) new_predicates.add(pred) predicates = new_predicates del new_predicates # }}} if depends_on is None: depends_on = frozenset() if groups is None: groups = frozenset() if conflicts_with_groups is None: conflicts_with_groups = frozenset() if no_sync_with is None: no_sync_with = frozenset() if within_inames is None: within_inames = frozenset() if within_inames_is_final is None: within_inames_is_final = False if depends_on_is_final is None: depends_on_is_final = False if depends_on_is_final and not isinstance(depends_on, frozenset): raise LoopyError("Setting depends_on_is_final to True requires " "actually specifying depends_on") if tags is None: tags = frozenset() if not isinstance(tags, frozenset): # was previously allowed to be tuple tags = frozenset(tags) # Periodically reenable these and run the tests to ensure all # performance-relevant identifiers are interned. # # from loopy.tools import is_interned # assert is_interned(id) # assert all(is_interned(dep) for dep in depends_on) # assert all(is_interned(grp) for grp in groups) # assert all(is_interned(grp) for grp in conflicts_with_groups) # assert all(is_interned(iname) for iname in within_inames) # assert all(is_interned(pred) for pred in predicates) assert isinstance(within_inames, frozenset) assert isinstance(depends_on, frozenset) or depends_on is None assert isinstance(groups, frozenset) assert isinstance(conflicts_with_groups, frozenset) Record.__init__(self, id=id, depends_on=depends_on, depends_on_is_final=depends_on_is_final, no_sync_with=no_sync_with, groups=groups, conflicts_with_groups=conflicts_with_groups, within_inames_is_final=within_inames_is_final, within_inames=within_inames, priority=priority, boostable=boostable, boostable_into=boostable_into, predicates=predicates, tags=tags)
def __init__(self, *args, **kwargs): if 'features' not in kwargs: kwargs['features'] = set() Record.__init__(self, *args, **kwargs)
def __init__(self, sleep_duration): Record.__init__(self, sleep_duration=sleep_duration)
def __init__(self, vertices, groups, skip_tests=False, node_vertex_consistency_tolerance=None, nodal_adjacency=False, facial_adjacency_groups=False, boundary_tags=None, vertex_id_dtype=np.int32, element_id_dtype=np.int32): """ The following are keyword-only: :arg skip_tests: Skip mesh tests, in case you want to load a broken mesh anyhow and then fix it inside of this data structure. :arg node_vertex_consistency_tolerance: If *False*, do not check for consistency between vertex and nodal data. If *None*, use the (small, near FP-epsilon) tolerance. :arg nodal_adjacency: One of three options: *None*, in which case this information will be deduced from vertex adjacency. *False*, in which case this information will be marked unavailable (such as if there are hanging nodes in the geometry, so that vertex adjacency does not convey the full picture), and references to :attr:`element_neighbors_starts` and :attr:`element_neighbors` will result in exceptions. Lastly, a tuple :class:`NodalAdjacency` object. :arg facial_adjacency_groups: One of three options: *None*, in which case this information will be deduced from vertex adjacency. *False*, in which case this information will be marked unavailable (such as if there are hanging nodes in the geometry, so that vertex adjacency does not convey the full picture), and references to :attr:`element_neighbors_starts` and :attr:`element_neighbors` will result in exceptions. Lastly, a data structure as described in :attr:`facial_adjacency_groups` may be passed. """ el_nr = 0 node_nr = 0 new_groups = [] for g in groups: ng = g.join_mesh(el_nr, node_nr) new_groups.append(ng) el_nr += ng.nelements node_nr += ng.nnodes # {{{ boundary tags if boundary_tags is None: boundary_tags = [] else: boundary_tags = boundary_tags[:] if BTAG_NONE in boundary_tags: raise ValueError("BTAG_NONE is not allowed to be part of " "boundary_tags") if BTAG_ALL not in boundary_tags: boundary_tags.append(BTAG_ALL) if BTAG_REALLY_ALL not in boundary_tags: boundary_tags.append(BTAG_REALLY_ALL) max_boundary_tag_count = int( np.log(np.iinfo(element_id_dtype).max)/np.log(2)) if len(boundary_tags) > max_boundary_tag_count: raise ValueError("too few bits in element_id_dtype to represent all " "boundary tags") btag_to_index = dict( (btag, i) for i, btag in enumerate(boundary_tags)) # }}} if nodal_adjacency is not False and nodal_adjacency is not None: if not isinstance(nodal_adjacency, NodalAdjacency): nb_starts, nbs = nodal_adjacency nodal_adjacency = NodalAdjacency( neighbors_starts=nb_starts, neighbors=nbs) del nb_starts del nbs Record.__init__( self, vertices=vertices, groups=new_groups, _nodal_adjacency=nodal_adjacency, _facial_adjacency_groups=facial_adjacency_groups, boundary_tags=boundary_tags, btag_to_index=btag_to_index, vertex_id_dtype=np.dtype(vertex_id_dtype), element_id_dtype=np.dtype(element_id_dtype), ) if not skip_tests: assert _test_node_vertex_consistency( self, node_vertex_consistency_tolerance) for g in self.groups: assert g.vertex_indices.dtype == self.vertex_id_dtype if nodal_adjacency: assert nodal_adjacency.neighbors_starts.shape == (self.nelements+1,) assert len(nodal_adjacency.neighbors.shape) == 1 assert (nodal_adjacency.neighbors_starts.dtype == self.element_id_dtype) assert nodal_adjacency.neighbors.dtype == self.element_id_dtype if facial_adjacency_groups: assert len(facial_adjacency_groups) == len(self.groups) for fagrp_map in facial_adjacency_groups: for fagrp in six.itervalues(fagrp_map): grp = self.groups[fagrp.igroup] fvi = grp.face_vertex_indices() assert fagrp.neighbors.dtype == self.element_id_dtype assert fagrp.neighbors.shape == ( grp.nelements, len(fvi)) assert fagrp.neighbors.dtype == self.face_id_dtype assert fagrp.neighbor_faces.shape == ( grp.nelements, len(fvi)) is_bdry = fagrp.neighbors < 0 assert ((1 << btag_to_index[BTAG_REALLY_ALL]) & fagrp.neighbors[is_bdry]).all(), \ "boundary faces without BTAG_REALLY_ALL found" from meshmode.mesh.processing import \ test_volume_mesh_element_orientations if self.dim == self.ambient_dim: # only for volume meshes, for now assert test_volume_mesh_element_orientations(self), \ "negatively oriented elements found"
def __init__(self, loop_end=n): Record.__init__(self, loop_end=loop_end)
def __init__(self, name, dtype=None, shape=None, dim_tags=None, offset=0, dim_names=None, strides=None, order=None, for_atomic=False, **kwargs): """ All of the following are optional. Specify either strides or shape. :arg name: May contain multiple names separated by commas, in which case multiple arguments, each with identical properties, are created for each name. :arg dtype: the :class:`numpy.dtype` of the array. If this is *None*, :mod:`loopy` will try to continue without knowing the type of this array, where the idea is that precise knowledge of the type will become available at invocation time. :class:`loopy.CompiledKernel` (and thereby :meth:`loopy.LoopKernel.__call__`) automatically add this type information based on invocation arguments. Note that some transformations, such as :func:`loopy.add_padding` cannot be performed without knowledge of the exact *dtype*. :arg shape: May be one of the following: * *None*. In this case, no shape is intended to be specified, only the strides will be used to access the array. Bounds checking will not be performed. * :class:`loopy.auto`. The shape will be determined by finding the access footprint. * a tuple like like :attr:`numpy.ndarray.shape`. Each entry of the tuple is also allowed to be a :mod:`pymbolic` expression involving kernel parameters, or a (potentially-comma separated) or a string that can be parsed to such an expression. Any element of the shape tuple not used to compute strides may be *None*. * A string which can be parsed into the previous form. :arg dim_tags: A comma-separated list of tags as understood by :func:`parse_array_dim_tag`. :arg strides: May be one of the following: * None * :class:`loopy.auto`. The strides will be determined by *order* and the access footprint. * a tuple like like :attr:`numpy.ndarray.shape`. Each entry of the tuple is also allowed to be a :mod:`pymbolic` expression involving kernel parameters, or a (potentially-comma separated) or a string that can be parsed to such an expression. * A string which can be parsed into the previous form. :arg order: "F" or "C" for C (row major) or Fortran (column major). Defaults to the *default_order* argument passed to :func:`loopy.make_kernel`. :arg for_atomic: Whether the array is declared for atomic access, and, if necessary, using atomic-capable data types. :arg offset: Offset from the beginning of the buffer to the point from which the strides are counted. May be one of * 0 * a string (that is interpreted as an argument name). * :class:`loopy.auto`, in which case an offset argument is added automatically, immediately following this argument. :class:`loopy.CompiledKernel` is even smarter in its treatment of this case and will compile custom versions of the kernel based on whether the passed arrays have offsets or not. """ for kwarg_name in kwargs: if kwarg_name not in self.allowed_extra_kwargs: raise TypeError("invalid kwarg: %s" % kwarg_name) import loopy as lp from loopy.types import to_loopy_type dtype = to_loopy_type(dtype, allow_auto=True, allow_none=True, for_atomic=for_atomic) strides_known = strides is not None and strides is not lp.auto shape_known = shape is not None and shape is not lp.auto if strides_known: strides = _parse_shape_or_strides(strides) if shape_known: shape = _parse_shape_or_strides(shape) # {{{ check dim_names if dim_names is not None: if len(dim_names) != len(set(dim_names)): raise LoopyError("dim_names are not unique") for n in dim_names: if not isinstance(n, str): raise LoopyError("found non-string '%s' in dim_names" % type(n).__name__) # }}} # {{{ convert strides to dim_tags (Note: strides override order) if dim_tags is not None and strides_known: raise TypeError("may not specify both strides and dim_tags") if dim_tags is None and strides_known: dim_tags = [FixedStrideArrayDimTag(s) for s in strides] strides = None # }}} if dim_tags is not None: dim_tags = parse_array_dim_tags(dim_tags, n_axes=(len(shape) if shape_known else None), use_increasing_target_axes=self.max_target_axes > 1, dim_names=dim_names) # {{{ determine number of user axes num_user_axes = None if shape_known: num_user_axes = len(shape) for dim_iterable in [dim_tags, dim_names]: if dim_iterable is not None: new_num_user_axes = len(dim_iterable) if num_user_axes is None: num_user_axes = new_num_user_axes else: if new_num_user_axes != num_user_axes: raise LoopyError("contradictory values for number of " "dimensions of array '%s' from shape, strides, " "dim_tags, or dim_names" % name) del new_num_user_axes # }}} # {{{ convert order to dim_tags if order is None and self.max_target_axes > 1: # FIXME: Hackety hack. ImageArgs need to generate dim_tags even # if no order is specified. Plus they don't care that much. order = "C" if dim_tags is None and num_user_axes is not None and order is not None: dim_tags = parse_array_dim_tags(num_user_axes*[order], n_axes=num_user_axes, use_increasing_target_axes=self.max_target_axes > 1, dim_names=dim_names) order = None # }}} if dim_tags is not None: # {{{ find number of target axes target_axes = set() for dim_tag in dim_tags: if isinstance(dim_tag, _StrideArrayDimTagBase): target_axes.add(dim_tag.target_axis) if target_axes != set(range(len(target_axes))): raise LoopyError("target axes for variable '%s' are non-" "contiguous" % self.name) num_target_axes = len(target_axes) del target_axes # }}} if not (self.min_target_axes <= num_target_axes <= self.max_target_axes): raise LoopyError("%s only supports between %d and %d target axes " "('%s' has %d)" % (type(self).__name__, self.min_target_axes, self.max_target_axes, self.name, num_target_axes)) new_dim_tags = convert_computed_to_fixed_dim_tags( name, num_user_axes, num_target_axes, shape, dim_tags) if new_dim_tags is not None: # successfully normalized dim_tags = new_dim_tags del new_dim_tags if dim_tags is not None: # for hashability dim_tags = tuple(dim_tags) order = None if strides is not None: # Preserve strides if we weren't able to process them yet. # That only happens if they're set to loopy.auto (and 'guessed' # in loopy.kernel.creation). kwargs["strides"] = strides if dim_names is not None and not isinstance(dim_names, tuple): from warnings import warn warn("dim_names is not a tuple when calling ArrayBase constructor", DeprecationWarning, stacklevel=2) Record.__init__(self, name=name, dtype=dtype, shape=shape, dim_tags=dim_tags, offset=offset, dim_names=dim_names, order=order, **kwargs)
def __init__(self, *args, **kwargs): Record.__init__(self, *args, **kwargs) self.features = set()
def __init__(self): Record.__init__(self, gpudata=0)
def __init__(self, shape, indices, data): Record.__init__(self, shape=shape, indices=indices, data=data)
def __init__(self, axis): Record.__init__(self, axis=axis)
def __init__(self, **kwargs): from hedge.optemplate.primitives import make_common_subexpression as cse Record.__init__(self, dict((name, cse(expr, name)) for name, expr in kwargs.iteritems()))
def __init__(self, **kwargs): from hedge.tools.symbolic import make_common_subexpression as cse Record.__init__(self, dict((name, cse(expr, name)) for name, expr in kwargs.iteritems()))
def __init__(self, name, dtype=None, shape=None, dim_tags=None, offset=0, dim_names=None, strides=None, order=None, for_atomic=False, **kwargs): """ All of the following are optional. Specify either strides or shape. :arg name: May contain multiple names separated by commas, in which case multiple arguments, each with identical properties, are created for each name. :arg dtype: the :class:`numpy.dtype` of the array. If this is *None*, :mod:`loopy` will try to continue without knowing the type of this array, where the idea is that precise knowledge of the type will become available at invocation time. :class:`loopy.CompiledKernel` (and thereby :meth:`loopy.LoopKernel.__call__`) automatically add this type information based on invocation arguments. Note that some transformations, such as :func:`loopy.add_padding` cannot be performed without knowledge of the exact *dtype*. :arg shape: May be one of the following: * *None*. In this case, no shape is intended to be specified, only the strides will be used to access the array. Bounds checking will not be performed. * :class:`loopy.auto`. The shape will be determined by finding the access footprint. * a tuple like like :attr:`numpy.ndarray.shape`. Each entry of the tuple is also allowed to be a :mod:`pymbolic` expression involving kernel parameters, or a (potentially-comma separated) or a string that can be parsed to such an expression. Any element of the shape tuple not used to compute strides may be *None*. * A string which can be parsed into the previous form. :arg dim_tags: A comma-separated list of tags as understood by :func:`parse_array_dim_tag`. :arg strides: May be one of the following: * None * :class:`loopy.auto`. The strides will be determined by *order* and the access footprint. * a tuple like like :attr:`numpy.ndarray.shape`. Each entry of the tuple is also allowed to be a :mod:`pymbolic` expression involving kernel parameters, or a (potentially-comma separated) or a string that can be parsed to such an expression. * A string which can be parsed into the previous form. :arg order: "F" or "C" for C (row major) or Fortran (column major). Defaults to the *default_order* argument passed to :func:`loopy.make_kernel`. :arg for_atomic: Whether the array is declared for atomic access, and, if necessary, using atomic-capable data types. :arg offset: Offset from the beginning of the buffer to the point from which the strides are counted. May be one of * 0 * a string (that is interpreted as an argument name). * :class:`loopy.auto`, in which case an offset argument is added automatically, immediately following this argument. :class:`loopy.CompiledKernel` is even smarter in its treatment of this case and will compile custom versions of the kernel based on whether the passed arrays have offsets or not. """ for kwarg_name in kwargs: if kwarg_name not in self.allowed_extra_kwargs: raise TypeError("invalid kwarg: %s" % kwarg_name) import loopy as lp from loopy.types import to_loopy_type dtype = to_loopy_type(dtype, allow_auto=True, allow_none=True, for_atomic=for_atomic) strides_known = strides is not None and strides is not lp.auto shape_known = shape is not None and shape is not lp.auto if strides_known: strides = _parse_shape_or_strides(strides) if shape_known: shape = _parse_shape_or_strides(shape) # {{{ check dim_names if dim_names is not None: if len(dim_names) != len(set(dim_names)): raise LoopyError("dim_names are not unique") for n in dim_names: if not isinstance(n, str): raise LoopyError("found non-string '%s' in dim_names" % type(n).__name__) # }}} # {{{ convert strides to dim_tags (Note: strides override order) if dim_tags is not None and strides_known: raise TypeError("may not specify both strides and dim_tags") if dim_tags is None and strides_known: dim_tags = [FixedStrideArrayDimTag(s) for s in strides] strides = None # }}} if dim_tags is not None: dim_tags = parse_array_dim_tags(dim_tags, n_axes=(len(shape) if shape_known else None), use_increasing_target_axes=self.max_target_axes > 1, dim_names=dim_names) # {{{ determine number of user axes num_user_axes = None if shape_known: num_user_axes = len(shape) for dim_iterable in [dim_tags, dim_names]: if dim_iterable is not None: new_num_user_axes = len(dim_iterable) if num_user_axes is None: num_user_axes = new_num_user_axes else: if new_num_user_axes != num_user_axes: raise LoopyError("contradictory values for number of " "dimensions of array '%s' from shape, strides, " "dim_tags, or dim_names" % name) del new_num_user_axes # }}} # {{{ convert order to dim_tags if order is None and self.max_target_axes > 1: # FIXME: Hackety hack. ImageArgs need to generate dim_tags even # if no order is specified. Plus they don't care that much. order = "C" if dim_tags is None and num_user_axes is not None and order is not None: dim_tags = parse_array_dim_tags(num_user_axes*[order], n_axes=num_user_axes, use_increasing_target_axes=self.max_target_axes > 1, dim_names=dim_names) order = None # }}} if dim_tags is not None: # {{{ find number of target axes target_axes = set() for dim_tag in dim_tags: if isinstance(dim_tag, _StrideArrayDimTagBase): target_axes.add(dim_tag.target_axis) if target_axes != set(range(len(target_axes))): raise LoopyError("target axes for variable '%s' are non-" "contiguous" % self.name) num_target_axes = len(target_axes) del target_axes # }}} if not (self.min_target_axes <= num_target_axes <= self.max_target_axes): raise LoopyError("%s only supports between %d and %d target axes " "('%s' has %d)" % (type(self).__name__, self.min_target_axes, self.max_target_axes, self.name, num_target_axes)) new_dim_tags = convert_computed_to_fixed_dim_tags( name, num_user_axes, num_target_axes, shape, dim_tags) if new_dim_tags is not None: # successfully normalized dim_tags = new_dim_tags del new_dim_tags if dim_tags is not None: # for hashability dim_tags = tuple(dim_tags) order = None if strides is not None: # Preserve strides if we weren't able to process them yet. # That only happens if they're set to loopy.auto (and 'guessed' # in loopy.kernel.creation). kwargs["strides"] = strides if dim_names is not None and not isinstance(dim_names, tuple): pu.db from warnings import warn warn("dim_names is not a tuple when calling ArrayBase constructor", DeprecationWarning, stacklevel=2) Record.__init__(self, name=name, dtype=dtype, shape=shape, dim_tags=dim_tags, offset=offset, dim_names=dim_names, order=order, **kwargs)
def copy(self, **kwargs): if "element_nr_base" not in kwargs: kwargs["element_nr_base"] = None if "node_nr_base" not in kwargs: kwargs["node_nr_base"] = None return Record.copy(self, **kwargs)
def __init__(self, source, target, dep_descr, variable, var_kind): Record.__init__(self, source=source, target=target, dep_descr=dep_descr, variable=variable, var_kind=var_kind)
def __init__(self, *args, **kwargs): if "features" not in kwargs: kwargs["features"] = set() Record.__init__(self, *args, **kwargs)