コード例 #1
0
ファイル: nodes.py プロジェクト: cpenny42/dace
class Reduce(Node):
    """ An SDFG node that reduces an N-dimensional array to an 
        (N-k)-dimensional array, with a list of axes to reduce and
        a reduction binary function. """
    from dace.codegen.instrumentation.perfsettings import PerfSettings

    # Properties
    axes = Property(dtype=tuple, allow_none=True)
    wcr = LambdaProperty()
    identity = Property(dtype=object, allow_none=True)
    schedule = Property(dtype=types.ScheduleType,
                        desc="Reduction execution policy",
                        enum=types.ScheduleType,
                        from_string=lambda x: types.ScheduleType[x])

    papi_counters = Property(dtype=list,
                             desc="List of PAPI counter preset identifiers.",
                             default=PerfSettings.perf_default_papi_counters())
    debuginfo = DebugInfoProperty()

    def __init__(self,
                 wcr,
                 axes,
                 wcr_identity=None,
                 schedule=types.ScheduleType.Default,
                 debuginfo=None):
        super(Reduce, self).__init__()
        self.wcr = wcr  # type: ast._Lambda
        self.axes = axes
        self.identity = wcr_identity
        self.schedule = schedule
        self.debuginfo = debuginfo

    def draw_node(self, sdfg, state):
        return dot.draw_node(sdfg, state, self, shape="invtriangle")

    def __str__(self):
        # Autodetect reduction type
        redtype = detect_reduction_type(self.wcr)
        if redtype == types.ReductionType.Custom:
            wcrstr = unparse(ast.parse(self.wcr).body[0].value.body)
        else:
            wcrstr = str(redtype)
            wcrstr = wcrstr[wcrstr.find('.') + 1:]  # Skip "ReductionType."

        return 'Op: {op}, Axes: {axes}'.format(
            axes=('all' if self.axes is None else str(self.axes)), op=wcrstr)

    def __label__(self, sdfg, state):
        # Autodetect reduction type
        redtype = detect_reduction_type(self.wcr)
        if redtype == types.ReductionType.Custom:
            wcrstr = unparse(ast.parse(self.wcr).body[0].value.body)
        else:
            wcrstr = str(redtype)
            wcrstr = wcrstr[wcrstr.find('.') + 1:]  # Skip "ReductionType."

        return 'Op: {op}\nAxes: {axes}'.format(
            axes=('all' if self.axes is None else str(self.axes)), op=wcrstr)
コード例 #2
0
class AccessNode(Node):
    """ A node that accesses data in the SDFG. Denoted by a circular shape. """

    setzero = Property(dtype=bool, desc="Initialize to zero", default=False)
    debuginfo = DebugInfoProperty()
    data = DataProperty(desc="Data (array, stream, scalar) to access")

    def __init__(self, data, debuginfo=None):
        super(AccessNode, self).__init__()

        # Properties
        self.debuginfo = debuginfo
        if not isinstance(data, str):
            raise TypeError('Data for AccessNode must be a string')
        self.data = data

    @staticmethod
    def from_json(json_obj, context=None):
        ret = AccessNode("Nodata")
        dace.serialize.set_properties_from_json(ret, json_obj, context=context)
        return ret

    def __deepcopy__(self, memo):
        node = object.__new__(AccessNode)
        node._data = self._data
        node._setzero = self._setzero
        node._in_connectors = dcpy(self._in_connectors, memo=memo)
        node._out_connectors = dcpy(self._out_connectors, memo=memo)
        node._debuginfo = dcpy(self._debuginfo, memo=memo)
        return node

    @property
    def label(self):
        return self.data

    def __label__(self, sdfg, state):
        return self.data

    def desc(self, sdfg):
        from dace.sdfg import SDFGState, ScopeSubgraphView
        if isinstance(sdfg, (SDFGState, ScopeSubgraphView)):
            sdfg = sdfg.parent
        return sdfg.arrays[self.data]

    def validate(self, sdfg, state):
        if self.data not in sdfg.arrays:
            raise KeyError('Array "%s" not found in SDFG' % self.data)

    def has_writes(self, state):
        for e in state.in_edges(self):
            if not e.data.is_empty():
                return True
        return False

    def has_reads(self, state):
        for e in state.out_edges(self):
            if not e.data.is_empty():
                return True
        return False
コード例 #3
0
ファイル: nodes.py プロジェクト: targetsm/dace
class Tasklet(CodeNode):
    """ A node that contains a tasklet: a functional computation procedure
        that can only access external data specified using connectors.

        Tasklets may be implemented in Python, C++, or any supported
        language by the code generator.
    """

    code = CodeProperty(desc="Tasklet code", default=CodeBlock(""))
    debuginfo = DebugInfoProperty()

    instrument = Property(choices=dtypes.InstrumentationType,
                          desc="Measure execution statistics with given method",
                          default=dtypes.InstrumentationType.No_Instrumentation)

    def __init__(self,
                 label,
                 inputs=None,
                 outputs=None,
                 code="",
                 language=dtypes.Language.Python,
                 location=None,
                 debuginfo=None):
        super(Tasklet, self).__init__(label, location, inputs, outputs)

        self.code = CodeBlock(code, language)
        self.debuginfo = debuginfo

    @property
    def language(self):
        return self.code.language

    @staticmethod
    def from_json(json_obj, context=None):
        ret = Tasklet("dummylabel")
        dace.serialize.set_properties_from_json(ret, json_obj, context=context)
        return ret

    @property
    def name(self):
        return self._label

    def validate(self, sdfg, state):
        if not dtypes.validate_name(self.label):
            raise NameError('Invalid tasklet name "%s"' % self.label)
        for in_conn in self.in_connectors:
            if not dtypes.validate_name(in_conn):
                raise NameError('Invalid input connector "%s"' % in_conn)
        for out_conn in self.out_connectors:
            if not dtypes.validate_name(out_conn):
                raise NameError('Invalid output connector "%s"' % out_conn)

    @property
    def free_symbols(self) -> Set[str]:
        return self.code.get_free_symbols(self.in_connectors.keys()
                                          | self.out_connectors.keys())

    def infer_connector_types(self, sdfg, state):
        # If a Python tasklet, use type inference to figure out all None output
        # connectors
        if all(cval.type is not None for cval in self.out_connectors.values()):
            return
        if self.code.language != dtypes.Language.Python:
            return

        if any(cval.type is None for cval in self.in_connectors.values()):
            raise TypeError('Cannot infer output connectors of tasklet "%s", '
                            'not all input connectors have types' % str(self))

        # Avoid import loop
        from dace.codegen.tools.type_inference import infer_types

        # Get symbols defined at beginning of node, and infer all types in
        # tasklet
        syms = state.symbols_defined_at(self)
        syms.update(self.in_connectors)
        new_syms = infer_types(self.code.code, syms)
        for cname, oconn in self.out_connectors.items():
            if oconn.type is None:
                if cname not in new_syms:
                    raise TypeError('Cannot infer type of tasklet %s output '
                                    '"%s", please specify manually.' %
                                    (self.label, cname))
                self.out_connectors[cname] = new_syms[cname]

    def __str__(self):
        if not self.label:
            return "--Empty--"
        else:
            return self.label
コード例 #4
0
ファイル: nodes.py プロジェクト: targetsm/dace
class LibraryNode(CodeNode):

    name = Property(dtype=str, desc="Name of node")
    implementation = LibraryImplementationProperty(
        dtype=str,
        allow_none=True,
        desc=("Which implementation this library node will expand into."
              "Must match a key in the list of possible implementations."))
    schedule = Property(
        dtype=dtypes.ScheduleType,
        desc="If set, determines the default device mapping of "
        "the node upon expansion, if expanded to a nested SDFG.",
        choices=dtypes.ScheduleType,
        from_string=lambda x: dtypes.ScheduleType[x],
        default=dtypes.ScheduleType.Default)
    debuginfo = DebugInfoProperty()

    def __init__(self, name, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.name = name
        self.label = name

    # Overrides subclasses to return LibraryNode as their JSON type
    @property
    def __jsontype__(self):
        return 'LibraryNode'

    # Based on https://stackoverflow.com/a/2020083/6489142
    def _fullclassname(self):
        module = self.__class__.__module__
        if module is None or module == str.__class__.__module__:
            return self.__class__.__name__  # Avoid reporting __builtin__
        else:
            return module + '.' + self.__class__.__name__

    def to_json(self, parent):
        jsonobj = super().to_json(parent)
        jsonobj['classpath'] = self._fullclassname()
        return jsonobj

    @classmethod
    def from_json(cls, json_obj, context=None):
        if cls == LibraryNode:
            clazz = pydoc.locate(json_obj['classpath'])
            if clazz is None:
                raise TypeError('Unrecognized library node type "%s"' %
                                json_obj['classpath'])
            return clazz.from_json(json_obj, context)
        else:  # Subclasses are actual library nodes
            ret = cls(json_obj['attributes']['name'])
            dace.serialize.set_properties_from_json(ret,
                                                    json_obj,
                                                    context=context)
            return ret

    def expand(self, sdfg, state, *args, **kwargs):
        """Create and perform the expansion transformation for this library
           node."""
        implementation = self.implementation
        library_name = type(self)._dace_library_name
        try:
            config_implementation = Config.get("library", library_name,
                                               "default_implementation")
        except KeyError:
            # Non-standard libraries are not defined in the config schema, and
            # thus might not exist in the config.
            config_implementation = None
        if config_implementation is not None:
            try:
                config_override = Config.get("library", library_name,
                                             "override")
                if config_override and implementation in self.implementations:
                    if implementation is not None:
                        warnings.warn(
                            "Overriding explicitly specified "
                            "implementation {} for {} with {}.".format(
                                implementation, self.label,
                                config_implementation))
                    implementation = config_implementation
            except KeyError:
                config_override = False
        # If not explicitly set, try the node default
        if implementation is None:
            implementation = type(self).default_implementation
            # If no node default, try library default
            if implementation is None:
                import dace.library  # Avoid cyclic dependency
                lib = dace.library._DACE_REGISTERED_LIBRARIES[type(
                    self)._dace_library_name]
                implementation = lib.default_implementation
                # Try the default specified in the config
                if implementation is None:
                    implementation = config_implementation
                    # Otherwise we don't know how to expand
                    if implementation is None:
                        raise ValueError("No implementation or default "
                                         "implementation specified.")
        if implementation not in self.implementations.keys():
            raise KeyError("Unknown implementation for node {}: {}".format(
                type(self).__name__, implementation))
        transformation_type = type(self).implementations[implementation]
        sdfg_id = sdfg.sdfg_id
        state_id = sdfg.nodes().index(state)
        subgraph = {transformation_type._match_node: state.node_id(self)}
        transformation = transformation_type(sdfg_id, state_id, subgraph, 0)
        transformation.apply(sdfg, *args, **kwargs)

    @classmethod
    def register_implementation(cls, name, transformation_type):
        """Register an implementation to belong to this library node type."""
        cls.implementations[name] = transformation_type
        match_node_name = "__" + transformation_type.__name__
        if (hasattr(transformation_type, "_match_node")
                and transformation_type._match_node != match_node_name):
            raise ValueError(
                "Transformation " + transformation_type.__name__ +
                " is already registered with a different library node.")
        transformation_type._match_node = cls(match_node_name)
コード例 #5
0
class Tasklet(CodeNode):
    """ A node that contains a tasklet: a functional computation procedure
        that can only access external data specified using connectors.

        Tasklets may be implemented in Python, C++, or any supported
        language by the code generator.
    """

    code = CodeProperty(desc="Tasklet code", default=CodeBlock(""))
    debuginfo = DebugInfoProperty()

    instrument = Property(
        choices=dtypes.InstrumentationType,
        desc="Measure execution statistics with given method",
        default=dtypes.InstrumentationType.No_Instrumentation)

    def __init__(self,
                 label,
                 inputs=None,
                 outputs=None,
                 code="",
                 language=dtypes.Language.Python,
                 location=None,
                 debuginfo=None):
        super(Tasklet, self).__init__(label, location, inputs, outputs)

        self.code = CodeBlock(code, language)
        self.debuginfo = debuginfo

    @property
    def language(self):
        return self.code.language

    @staticmethod
    def from_json(json_obj, context=None):
        ret = Tasklet("dummylabel")
        dace.serialize.set_properties_from_json(ret, json_obj, context=context)
        return ret

    @property
    def name(self):
        return self._label

    def validate(self, sdfg, state):
        if not dtypes.validate_name(self.label):
            raise NameError('Invalid tasklet name "%s"' % self.label)
        for in_conn in self.in_connectors:
            if not dtypes.validate_name(in_conn):
                raise NameError('Invalid input connector "%s"' % in_conn)
        for out_conn in self.out_connectors:
            if not dtypes.validate_name(out_conn):
                raise NameError('Invalid output connector "%s"' % out_conn)

    @property
    def free_symbols(self) -> Set[str]:
        return self.code.get_free_symbols(self.in_connectors
                                          | self.out_connectors)

    def __str__(self):
        if not self.label:
            return "--Empty--"
        else:
            return self.label
コード例 #6
0
ファイル: nodes.py プロジェクト: cpenny42/dace
class Consume(object):
    """ Consume is a scope, like `Map`, that is a part of the parametric 
        graph extension of the SDFG. It creates a producer-consumer 
        relationship between the input stream and the scope subgraph. The
        subgraph is scheduled to a given number of processing elements
        for processing, and they will try to pop elements from the input
        stream until a given quiescence condition is reached. """

    # Properties
    label = Property(dtype=str, desc="Name of the consume node")
    pe_index = Property(dtype=str, desc="Processing element identifier")
    num_pes = SymbolicProperty(desc="Number of processing elements")
    condition = CodeProperty(desc="Quiescence condition", allow_none=True)
    language = Property(enum=types.Language, default=types.Language.Python)
    schedule = Property(dtype=types.ScheduleType,
                        desc="Consume schedule",
                        enum=types.ScheduleType,
                        from_string=lambda x: types.ScheduleType[x])
    chunksize = Property(dtype=int,
                         desc="Maximal size of elements to consume at a time",
                         default=1)
    debuginfo = DebugInfoProperty()
    is_collapsed = Property(dtype=bool,
                            desc="Show this node/scope/state as collapsed",
                            default=False)

    def as_map(self):
        """ Compatibility function that allows to view the consume as a map,
            mainly in memlet propagation. """
        return Map(self.label, [self.pe_index],
                   sbs.Range([(0, self.num_pes - 1, 1)]), self.schedule)

    def __init__(self,
                 label,
                 pe_tuple,
                 condition,
                 schedule=types.ScheduleType.Default,
                 chunksize=1,
                 debuginfo=None):
        super(Consume, self).__init__()

        # Properties
        self.label = label
        self.pe_index, self.num_pes = pe_tuple
        self.condition = condition
        self.schedule = schedule
        self.chunksize = chunksize
        self.debuginfo = debuginfo

    def __str__(self):
        if self.condition is not None:
            return ("%s [%s=0:%s], Condition: %s" %
                    (self._label, self.pe_index, self.num_pes,
                     CodeProperty.to_string(self.condition)))
        else:
            return ("%s [%s=0:%s]" %
                    (self._label, self.pe_index, self.num_pes))

    def validate(self, sdfg, state, node):
        if not data.validate_name(self.label):
            raise NameError('Invalid consume name "%s"' % self.label)

    def get_param_num(self):
        """ Returns the number of consume dimension parameters/symbols. """
        return 1
コード例 #7
0
ファイル: nodes.py プロジェクト: cpenny42/dace
class NestedSDFG(CodeNode):
    """ An SDFG state node that contains an SDFG of its own, runnable using
        the data dependencies specified using its connectors.

        It is encouraged to use nested SDFGs instead of coarse-grained tasklets
        since they are analyzable with respect to transformations.
        
        @note: A nested SDFG cannot create recursion (one of its parent SDFGs).
    """

    label = Property(dtype=str, desc="Name of the SDFG")
    # NOTE: We cannot use SDFG as the type because of an import loop
    sdfg = Property(dtype=graph.OrderedDiGraph, desc="The SDFG")
    schedule = Property(dtype=types.ScheduleType,
                        desc="SDFG schedule",
                        enum=types.ScheduleType,
                        from_string=lambda x: types.ScheduleType[x])
    location = Property(dtype=str, desc="SDFG execution location descriptor")
    debuginfo = DebugInfoProperty()
    is_collapsed = Property(dtype=bool,
                            desc="Show this node/scope/state as collapsed",
                            default=False)

    def __init__(self,
                 label,
                 sdfg,
                 inputs: Set[str],
                 outputs: Set[str],
                 schedule=types.ScheduleType.Default,
                 location="-1",
                 debuginfo=None):
        super(NestedSDFG, self).__init__(inputs, outputs)

        # Properties
        self.label = label
        self.sdfg = sdfg
        self.schedule = schedule
        self.location = location
        self.debuginfo = debuginfo

    def draw_node(self, sdfg, graph):
        return dot.draw_node(sdfg, graph, self, shape="doubleoctagon")

    def __str__(self):
        if not self.label:
            return "SDFG"
        else:
            return self.label

    def validate(self, sdfg, state):
        if not data.validate_name(self.label):
            raise NameError('Invalid nested SDFG name "%s"' % self.label)
        for in_conn in self.in_connectors:
            if not data.validate_name(in_conn):
                raise NameError('Invalid input connector "%s"' % in_conn)
        for out_conn in self.out_connectors:
            if not data.validate_name(out_conn):
                raise NameError('Invalid output connector "%s"' % out_conn)

        # Recursively validate nested SDFG
        self.sdfg.validate()
コード例 #8
0
ファイル: nodes.py プロジェクト: cpenny42/dace
class AccessNode(Node):
    """ A node that accesses data in the SDFG. Denoted by a circular shape. """

    access = Property(enum=types.AccessType,
                      desc="Type of access to this array",
                      default=types.AccessType.ReadWrite)
    setzero = Property(dtype=bool, desc="Initialize to zero", default=False)
    debuginfo2 = DebugInfoProperty()
    data = DataProperty(desc="Data (array, stream, scalar) to access")

    def __init__(self,
                 data,
                 access=types.AccessType.ReadWrite,
                 debuginfo=None):
        super(AccessNode, self).__init__()

        # Properties
        self.debuginfo2 = debuginfo
        self.access = access
        if not isinstance(data, str):
            raise TypeError('Data for AccessNode must be a string')
        self.data = data

    def __deepcopy__(self, memo):
        node = object.__new__(AccessNode)
        node._access = self._access
        node._data = self._data
        node._setzero = self._setzero
        node._in_connectors = self._in_connectors
        node._out_connectors = self._out_connectors
        node.debuginfo2 = dcpy(self.debuginfo2)
        return node

    @property
    def label(self):
        return self.data

    def __label__(self, sdfg, state):
        return self.data

    def desc(self, sdfg):
        from dace.sdfg import SDFGState, ScopeSubgraphView
        if isinstance(sdfg, (SDFGState, ScopeSubgraphView)):
            sdfg = sdfg.parent
        return sdfg.arrays[self.data]

    def draw_node(self, sdfg, graph):
        desc = self.desc(sdfg)
        if isinstance(desc, data.Stream):
            return dot.draw_node(sdfg,
                                 graph,
                                 self,
                                 shape="oval",
                                 style='dashed')
        elif desc.transient:
            return dot.draw_node(sdfg, graph, self, shape="oval")
        else:
            return dot.draw_node(sdfg, graph, self, shape="oval", style='bold')

    def validate(self, sdfg, state):
        if self.data not in sdfg.arrays:
            raise KeyError('Array "%s" not found in SDFG' % self.data)
コード例 #9
0
class Memlet(object):
    """ Data movement object. Represents the data, the subset moved, and the
        manner it is reindexed (`other_subset`) into the destination.
        If there are multiple conflicting writes, this object also specifies
        how they are resolved with a lambda function.
    """

    # Properties
    volume = SymbolicProperty(default=0,
                              desc='The exact number of elements moved '
                              'using this memlet, or the maximum number '
                              'if dynamic=True (with 0 as unbounded)')
    dynamic = Property(default=False,
                       dtype=bool,
                       desc='Is the number of elements moved determined at '
                       'runtime (e.g., data dependent)')
    subset = SubsetProperty(allow_none=True,
                            desc='Subset of elements to move from the data '
                            'attached to this edge.')
    other_subset = SubsetProperty(
        allow_none=True,
        desc='Subset of elements after reindexing to the data not attached '
        'to this edge (e.g., for offsets and reshaping).')
    data = DataProperty(desc='Data descriptor attached to this memlet')
    wcr = LambdaProperty(allow_none=True,
                         desc='If set, defines a write-conflict resolution '
                         'lambda function. The syntax of the lambda function '
                         'receives two elements: `current` value and `new` '
                         'value, and returns the value after resolution')

    # Code generation and validation hints
    debuginfo = DebugInfoProperty(desc='Line information to track source and '
                                  'generated code')
    wcr_nonatomic = Property(dtype=bool,
                             default=False,
                             desc='If True, always generates non-conflicting '
                             '(non-atomic) writes in resulting code')
    allow_oob = Property(dtype=bool,
                         default=False,
                         desc='Bypass out-of-bounds validation')

    def __init__(self,
                 expr: str = None,
                 data: str = None,
                 subset: Union[str, subsets.Subset] = None,
                 other_subset: Union[str, subsets.Subset] = None,
                 volume: Union[int, str, symbolic.SymbolicType] = None,
                 dynamic: bool = False,
                 wcr: Union[str, ast.AST] = None,
                 debuginfo: dtypes.DebugInfo = None,
                 wcr_nonatomic: bool = False,
                 allow_oob: bool = False):
        """ 
        Constructs a Memlet.
        :param expr: A string expression of the this memlet, given as an ease
                     of use API. Must follow one of the following forms:
                     1. ``ARRAY``,
                     2. ``ARRAY[SUBSET]``,
                     3. ``ARRAY[SUBSET] -> OTHER_SUBSET``.
        :param data: (DEPRECATED) Data descriptor name attached to this memlet.
        :param subset: The subset to take from the data attached to the edge,
                       represented either as a string or a Subset object.
        :param other_subset: The subset to offset into the other side of the
                             memlet, represented either as a string or a Subset 
                             object.
        :param volume: The exact number of elements moved using this
                       memlet, or the maximum number of elements if
                       ``dynamic`` is set to True. If dynamic and this
                       value is set to zero, the number of elements moved
                       is runtime-defined and unbounded.
        :param dynamic: If True, the number of elements moved in this memlet
                        is defined dynamically at runtime.
        :param wcr: A lambda function (represented as a string or Python AST) 
                    specifying how write-conflicts are resolved. The syntax
                    of the lambda function receives two elements: ``current`` 
                    value and `new` value, and returns the value after 
                    resolution. For example, summation is represented by
                    ``'lambda cur, new: cur + new'``.
        :param debuginfo: Line information from the generating source code.
        :param wcr_nonatomic: If True, overrides the automatic code generator 
                              decision and treat all write-conflict resolution
                              operations as non-atomic, which might cause race
                              conditions in the general case.
        :param allow_oob: If True, bypasses the checks in SDFG validation for
                          out-of-bounds accesses in memlet subsets.
        """

        # Will be set once memlet is added into an SDFG (in try_initialize)
        self._sdfg = None
        self._state = None
        self._edge = None

        # Field caching which subset belongs to source or destination of memlet
        self._is_data_src = None

        # Initialize first by string expression
        self.data = None
        self.subset = None
        self.other_subset = None
        if expr is not None:
            self._parse_memlet_from_str(expr)

        # Set properties
        self.data = self.data or data
        self.subset = self.subset or subset
        self.other_subset = self.other_subset or other_subset

        if volume is not None:
            self.volume = volume
        else:
            if self.subset is not None:
                self.volume = self.subset.num_elements()
            elif self.other_subset is not None:
                self.volume = self.other_subset.num_elements()
            else:
                self.volume = 1

        self.dynamic = dynamic
        self.wcr = wcr
        self.wcr_nonatomic = wcr_nonatomic
        self.debuginfo = debuginfo
        self.allow_oob = allow_oob

    def to_json(self):
        attrs = dace.serialize.all_properties_to_json(self)

        # Fill in new values
        if self.src_subset is not None:
            attrs['src_subset'] = self.src_subset.to_json()
        else:
            attrs['src_subset'] = None
        if self.dst_subset is not None:
            attrs['dst_subset'] = self.dst_subset.to_json()
        else:
            attrs['dst_subset'] = None

        # Fill in legacy (DEPRECATED) values for backwards compatibility
        attrs['num_accesses'] = \
            str(self.volume) if not self.dynamic else -1

        return {"type": "Memlet", "attributes": attrs}

    @staticmethod
    def from_json(json_obj, context=None):
        ret = Memlet()
        dace.serialize.set_properties_from_json(
            ret,
            json_obj,
            context=context,
            ignore_properties={'src_subset', 'dst_subset', 'num_accesses'})
        if context:
            ret._sdfg = context['sdfg']
            ret._state = context['sdfg_state']
        return ret

    def __deepcopy__(self, memo):
        node = object.__new__(Memlet)

        # Set properties
        node._volume = dcpy(self._volume, memo=memo)
        node._dynamic = self._dynamic
        node._subset = dcpy(self._subset, memo=memo)
        node._other_subset = dcpy(self._other_subset, memo=memo)
        node._data = dcpy(self._data, memo=memo)
        node._wcr = dcpy(self._wcr, memo=memo)
        node._wcr_nonatomic = dcpy(self._wcr_nonatomic, memo=memo)
        node._debuginfo = dcpy(self._debuginfo, memo=memo)
        node._wcr_nonatomic = self._wcr_nonatomic
        node._allow_oob = self._allow_oob
        node._is_data_src = self._is_data_src

        # Nullify graph references
        node._sdfg = None
        node._state = None
        node._edge = None

        return node

    def is_empty(self) -> bool:
        """ 
        Returns True if this memlet carries no data. Memlets without data are
        primarily used for connecting nodes to scopes without transferring 
        data to them. 
        """
        return (self.data is None and self.src_subset is None
                and self.dst_subset is None)

    @property
    def num_accesses(self):
        """ 
        Returns the total memory movement volume (in elements) of this memlet.
        """
        return self.volume

    @num_accesses.setter
    def num_accesses(self, value):
        self.volume = value

    @staticmethod
    def simple(data,
               subset_str,
               wcr_str=None,
               other_subset_str=None,
               wcr_conflict=True,
               num_accesses=None,
               debuginfo=None,
               dynamic=False):
        """ DEPRECATED: Constructs a Memlet from string-based expressions.
            :param data: The data object or name to access. 
            :type data: Either a string of the data descriptor name or an
                        AccessNode.
            :param subset_str: The subset of `data` that is going to
                               be accessed in string format. Example: '0:N'.
            :param wcr_str: A lambda function (as a string) specifying
                            how write-conflicts are resolved. The syntax
                            of the lambda function receives two elements:
                            `current` value and `new` value,
                            and returns the value after resolution. For
                            example, summation is
                            `'lambda cur, new: cur + new'`.
            :param other_subset_str: The reindexing of `subset` on the other
                                     connected data (as a string).
            :param wcr_conflict: If False, forces non-locked conflict
                                 resolution when generating code. The default
                                 is to let the code generator infer this
                                 information from the SDFG.
            :param num_accesses: The number of times that the moved data
                                 will be subsequently accessed. If
                                 -1, designates that the number of accesses is
                                 unknown at compile time.
            :param debuginfo: Source-code information (e.g., line, file)
                              used for debugging.
            :param dynamic: If True, the number of elements moved in this memlet
                            is defined dynamically at runtime.
        """
        # warnings.warn(
        #     'This function is deprecated, please use the Memlet '
        #     'constructor instead', DeprecationWarning)

        result = Memlet()

        if isinstance(subset_str, subsets.Subset):
            result.subset = subset_str
        else:
            result.subset = SubsetProperty.from_string(subset_str)

        result.dynamic = dynamic

        if num_accesses is not None:
            if num_accesses == -1:
                result.dynamic = True
                result.volume = 0
            else:
                result.volume = num_accesses
        else:
            result.volume = result._subset.num_elements()

        if wcr_str is not None:
            if isinstance(wcr_str, ast.AST):
                result.wcr = wcr_str
            else:
                result.wcr = LambdaProperty.from_string(wcr_str)

        if other_subset_str is not None:
            if isinstance(other_subset_str, subsets.Subset):
                result.other_subset = other_subset_str
            else:
                result.other_subset = SubsetProperty.from_string(
                    other_subset_str)
        else:
            result.other_subset = None

        # If it is an access node or another memlet
        if hasattr(data, 'data'):
            result.data = data.data
        else:
            result.data = data

        result.wcr_nonatomic = not wcr_conflict

        return result

    def _parse_from_subexpr(self, expr: str):
        if expr[-1] != ']':  # No subset given, try to use whole array
            if not dtypes.validate_name(expr):
                raise SyntaxError('Invalid memlet syntax "%s"' % expr)
            return expr, None

        # array[subset] syntax
        arrname, subset_str = expr[:-1].split('[')
        if not dtypes.validate_name(arrname):
            raise SyntaxError('Invalid array name "%s" in memlet' % arrname)
        return arrname, SubsetProperty.from_string(subset_str)

    def _parse_memlet_from_str(self, expr: str):
        """
        Parses a memlet and fills in either the src_subset,dst_subset fields
        or the _data,_subset fields.
        :param expr: A string expression of the this memlet, given as an ease
                of use API. Must follow one of the following forms:
                1. ``ARRAY``,
                2. ``ARRAY[SUBSET]``,
                3. ``ARRAY[SUBSET] -> OTHER_SUBSET``.
                Note that modes 2 and 3 are deprecated and will leave 
                the memlet uninitialized until inserted into an SDFG.
        """
        expr = expr.strip()
        if '->' not in expr:  # Options 1 and 2
            self.data, self.subset = self._parse_from_subexpr(expr)
            return

        # Option 3
        src_expr, dst_expr = expr.split('->')
        src_expr = src_expr.strip()
        dst_expr = dst_expr.strip()
        if '[' not in src_expr and not dtypes.validate_name(src_expr):
            raise SyntaxError('Expression without data name not yet allowed')

        self.data, self.subset = self._parse_from_subexpr(src_expr)
        self.other_subset = SubsetProperty.from_string(dst_expr)

    def try_initialize(self, sdfg: 'dace.sdfg.SDFG',
                       state: 'dace.sdfg.SDFGState',
                       edge: 'dace.sdfg.graph.MultiConnectorEdge'):
        """ 
        Tries to initialize the internal fields of the memlet (e.g., src/dst 
        subset) once it is added to an SDFG as an edge.
        """
        from dace.sdfg.nodes import AccessNode, CodeNode  # Avoid import loops
        self._sdfg = sdfg
        self._state = state
        self._edge = edge

        # If memlet is code->code, ensure volume=1
        if (isinstance(edge.src, CodeNode) and isinstance(edge.dst, CodeNode)
                and self.volume == 0):
            self.volume = 1

        # Find source/destination of memlet
        try:
            path = state.memlet_path(edge)
        except (ValueError, AssertionError, StopIteration):
            # Cannot initialize yet
            return

        is_data_src = True
        if isinstance(path[-1].dst, AccessNode):
            if path[-1].dst.data == self._data:
                is_data_src = False
        self._is_data_src = is_data_src

        # If subset is None, fill in with entire array
        if (self.data is not None and self.subset is None):
            self.subset = subsets.Range.from_array(sdfg.arrays[self.data])

    def get_src_subset(self, edge: 'dace.sdfg.graph.MultiConnectorEdge',
                       state: 'dace.sdfg.SDFGState'):
        self.try_initialize(state.parent, state, edge)
        return self.src_subset

    def get_dst_subset(self, edge: 'dace.sdfg.graph.MultiConnectorEdge',
                       state: 'dace.sdfg.SDFGState'):
        self.try_initialize(state.parent, state, edge)
        return self.dst_subset

    @staticmethod
    def from_array(dataname, datadesc, wcr=None):
        """ Constructs a Memlet that transfers an entire array's contents.
            :param dataname: The name of the data descriptor in the SDFG.
            :param datadesc: The data descriptor object.
            :param wcr: The conflict resolution lambda.
            :type datadesc: Data
        """
        rng = subsets.Range.from_array(datadesc)
        return Memlet.simple(dataname, rng, wcr_str=wcr)

    def __hash__(self):
        return hash(
            (self.volume, self.src_subset, self.dst_subset, str(self.wcr)))

    def __eq__(self, other):
        return all([
            self.volume == other.volume, self.src_subset == other.src_subset,
            self.dst_subset == other.dst_subset, self.wcr == other.wcr
        ])

    def replace(self, repl_dict):
        """ Substitute a given set of symbols with a different set of symbols.
            :param repl_dict: A dict of string symbol names to symbols with
                              which to replace them.
        """
        repl_to_intermediate = {}
        repl_to_final = {}
        for symbol in repl_dict:
            if str(symbol) != str(repl_dict[symbol]):
                intermediate = symbolic.symbol('__dacesym_' + str(symbol))
                repl_to_intermediate[symbolic.symbol(symbol)] = intermediate
                repl_to_final[intermediate] = repl_dict[symbol]

        if len(repl_to_intermediate) > 0:
            if self.volume is not None and symbolic.issymbolic(self.volume):
                self.volume = self.volume.subs(repl_to_intermediate)
                self.volume = self.volume.subs(repl_to_final)
            if self.subset is not None:
                self.subset.replace(repl_to_intermediate)
                self.subset.replace(repl_to_final)
            if self.other_subset is not None:
                self.other_subset.replace(repl_to_intermediate)
                self.other_subset.replace(repl_to_final)

    def num_elements(self):
        """ Returns the number of elements in the Memlet subset. """
        if self.subset:
            return self.subset.num_elements()
        elif self.other_subset:
            return self.other_subset.num_elements()
        return 0

    def bounding_box_size(self):
        """ Returns a per-dimension upper bound on the maximum number of
            elements in each dimension.

            This bound will be tight in the case of Range.
        """
        if self.src_subset:
            return self.src_subset.bounding_box_size()
        elif self.dst_subset:
            return self.dst_subset.bounding_box_size()
        return []

    # New fields
    @property
    def src_subset(self):
        if self._is_data_src is not None:
            return self.subset if self._is_data_src else self.other_subset
        return self.subset

    @src_subset.setter
    def src_subset(self, new_src_subset):
        if self._is_data_src is not None:
            if self._is_data_src:
                self.subset = new_src_subset
            else:
                self.other_subset = new_src_subset
        else:
            self.subset = new_src_subset

    @property
    def dst_subset(self):
        if self._is_data_src is not None:
            return self.other_subset if self._is_data_src else self.subset
        return self.other_subset

    @dst_subset.setter
    def dst_subset(self, new_dst_subset):
        if self._is_data_src is not None:
            if self._is_data_src:
                self.other_subset = new_dst_subset
            else:
                self.subset = new_dst_subset
        else:
            self.other_subset = new_dst_subset

    def validate(self, sdfg, state):
        if self.data is not None and self.data not in sdfg.arrays:
            raise KeyError('Array "%s" not found in SDFG' % self.data)

    @property
    def free_symbols(self) -> Set[str]:
        """ Returns a set of symbols used in this edge's properties. """
        # Symbolic properties are in volume, and the two subsets
        result = set()
        result |= set(map(str, self.volume.free_symbols))
        if self.src_subset:
            result |= self.src_subset.free_symbols
        if self.dst_subset:
            result |= self.dst_subset.free_symbols
        return result

    def __label__(self, sdfg, state):
        """ Returns a string representation of the memlet for display in a
            graph.

            :param sdfg: The SDFG in which the memlet resides.
            :param state: An SDFGState object in which the memlet resides.
        """
        if self.data is None:
            return self._label(None)
        return self._label(sdfg.arrays[self.data].shape)

    def __str__(self):
        return self._label(None)

    def _label(self, shape):
        result = ''
        if self.data is not None:
            result = self.data

        if self.subset is None:
            return result

        num_elements = self.subset.num_elements()
        if self.dynamic:
            result += '(dyn) '
        elif self.volume != num_elements:
            result += '(%s) ' % SymbolicProperty.to_string(self.volume)
        arrayNotation = True
        try:
            if shape is not None and reduce(operator.mul, shape, 1) == 1:
                # Don't mention array if we're accessing a single element and it's zero
                if all(s == 0 for s in self.subset.min_element()):
                    arrayNotation = False
        except TypeError:
            # Will fail if trying to check the truth value of a sympy expr
            pass
        if arrayNotation:
            result += '[%s]' % str(self.subset)
        if self.wcr is not None and str(self.wcr) != '':
            # Autodetect reduction type
            redtype = detect_reduction_type(self.wcr)
            if redtype == dtypes.ReductionType.Custom:
                wcrstr = unparse(ast.parse(self.wcr).body[0].value.body)
            else:
                wcrstr = str(redtype)
                wcrstr = wcrstr[wcrstr.find('.') + 1:]  # Skip "ReductionType."

            result += ' (CR: %s)' % wcrstr

        if self.other_subset is not None:
            result += ' -> [%s]' % str(self.other_subset)
        return result

    def __repr__(self):
        return "Memlet (" + self.__str__() + ")"
コード例 #10
0
ファイル: data.py プロジェクト: am-ivanov/dace
class Data(object):
    """ Data type descriptors that can be used as references to memory.
        Examples: Arrays, Streams, custom arrays (e.g., sparse matrices).
    """

    dtype = TypeClassProperty(default=dtypes.int32, choices=dtypes.Typeclasses)
    shape = ShapeProperty(default=[])
    transient = Property(dtype=bool, default=False)
    storage = EnumProperty(dtype=dtypes.StorageType, desc="Storage location", default=dtypes.StorageType.Default)
    lifetime = EnumProperty(dtype=dtypes.AllocationLifetime,
                            desc='Data allocation span',
                            default=dtypes.AllocationLifetime.Scope)
    location = DictProperty(key_type=str, value_type=str, desc='Full storage location identifier (e.g., rank, GPU ID)')
    debuginfo = DebugInfoProperty(allow_none=True)

    def __init__(self, dtype, shape, transient, storage, location, lifetime, debuginfo):
        self.dtype = dtype
        self.shape = shape
        self.transient = transient
        self.storage = storage
        self.location = location if location is not None else {}
        self.lifetime = lifetime
        self.debuginfo = debuginfo
        self._validate()

    def validate(self):
        """ Validate the correctness of this object.
            Raises an exception on error. """
        self._validate()

    # Validation of this class is in a separate function, so that this
    # class can call `_validate()` without calling the subclasses'
    # `validate` function.
    def _validate(self):
        if any(not isinstance(s, (int, symbolic.SymExpr, symbolic.symbol, symbolic.sympy.Basic)) for s in self.shape):
            raise TypeError('Shape must be a list or tuple of integer values ' 'or symbols')
        return True

    def to_json(self):
        attrs = serialize.all_properties_to_json(self)

        retdict = {"type": type(self).__name__, "attributes": attrs}

        return retdict

    @property
    def toplevel(self):
        return self.lifetime is not dtypes.AllocationLifetime.Scope

    def copy(self):
        raise RuntimeError('Data descriptors are unique and should not be copied')

    def is_equivalent(self, other):
        """ Check for equivalence (shape and type) of two data descriptors. """
        raise NotImplementedError

    def as_arg(self, with_types=True, for_call=False, name=None):
        """Returns a string for a C++ function signature (e.g., `int *A`). """
        raise NotImplementedError

    @property
    def free_symbols(self) -> Set[symbolic.SymbolicType]:
        """ Returns a set of undefined symbols in this data descriptor. """
        result = set()
        for s in self.shape:
            if isinstance(s, sp.Basic):
                result |= set(s.free_symbols)
        return result

    def __repr__(self):
        return 'Abstract Data Container, DO NOT USE'

    @property
    def veclen(self):
        return self.dtype.veclen if hasattr(self.dtype, "veclen") else 1

    @property
    def ctype(self):
        return self.dtype.ctype

    def strides_from_layout(
        self,
        *dimensions: int,
        alignment: symbolic.SymbolicType = 1,
        only_first_aligned: bool = False,
    ) -> Tuple[Tuple[symbolic.SymbolicType], symbolic.SymbolicType]:
        """
        Returns the absolute strides and total size of this data descriptor,
        according to the given dimension ordering and alignment.
        :param dimensions: A sequence of integers representing a permutation
                           of the descriptor's dimensions.
        :param alignment: Padding (in elements) at the end, ensuring stride
                          is a multiple of this number. 1 (default) means no
                          padding.
        :param only_first_aligned: If True, only the first dimension is padded
                                   with ``alignment``. Otherwise all dimensions
                                   are.
        :return: A 2-tuple of (tuple of strides, total size).
        """
        # Verify dimensions
        if tuple(sorted(dimensions)) != tuple(range(len(self.shape))):
            raise ValueError('Every dimension must be given and appear once.')
        if (alignment < 1) == True or (alignment < 0) == True:
            raise ValueError('Invalid alignment value')

        strides = [1] * len(dimensions)
        total_size = 1
        first = True
        for dim in dimensions:
            strides[dim] = total_size
            if not only_first_aligned or first:
                dimsize = (((self.shape[dim] + alignment - 1) // alignment) * alignment)
            else:
                dimsize = self.shape[dim]
            total_size *= dimsize
            first = False

        return (tuple(strides), total_size)

    def set_strides_from_layout(self,
                                *dimensions: int,
                                alignment: symbolic.SymbolicType = 1,
                                only_first_aligned: bool = False):
        """
        Sets the absolute strides and total size of this data descriptor,
        according to the given dimension ordering and alignment.
        :param dimensions: A sequence of integers representing a permutation
                           of the descriptor's dimensions.
        :param alignment: Padding (in elements) at the end, ensuring stride
                          is a multiple of this number. 1 (default) means no
                          padding.
        :param only_first_aligned: If True, only the first dimension is padded
                                   with ``alignment``. Otherwise all dimensions
                                   are.
        """
        strides, totalsize = self.strides_from_layout(*dimensions,
                                                      alignment=alignment,
                                                      only_first_aligned=only_first_aligned)
        self.strides = strides
        self.total_size = totalsize
コード例 #11
0
class Data(object):
    """ Data type descriptors that can be used as references to memory.
        Examples: Arrays, Streams, custom arrays (e.g., sparse matrices).
    """

    dtype = TypeClassProperty()
    shape = ShapeProperty()
    transient = Property(dtype=bool)
    storage = Property(dtype=dace.types.StorageType,
                       desc="Storage location",
                       enum=dace.types.StorageType,
                       default=dace.types.StorageType.Default,
                       from_string=lambda x: types.StorageType[x])
    location = Property(
        dtype=str,  # Dict[str, symbolic]
        desc='Full storage location identifier (e.g., rank, GPU ID)',
        default='')
    toplevel = Property(dtype=bool,
                        desc="Allocate array outside of state",
                        default=False)
    debuginfo = DebugInfoProperty()

    def __init__(self, dtype, shape, transient, storage, location, toplevel,
                 debuginfo):
        self.dtype = dtype
        self.shape = shape
        self.transient = transient
        self.storage = storage
        self.location = location
        self.toplevel = toplevel
        self.debuginfo = debuginfo
        self._validate()

    def validate(self):
        """ Validate the correctness of this object.
            Raises an exception on error. """
        self._validate()

    # Validation of this class is in a separate function, so that this
    # class can call `_validate()` without calling the subclasses'
    # `validate` function.
    def _validate(self):
        if any(not isinstance(s, (int, symbolic.SymExpr, symbolic.symbol,
                                  symbolic.sympy.Basic)) for s in self.shape):
            raise TypeError('Shape must be a list or tuple of integer values '
                            'or symbols')
        return True

    def copy(self):
        raise RuntimeError(
            'Data descriptors are unique and should not be copied')

    def is_equivalent(self, other):
        """ Check for equivalence (shape and type) of two data descriptors. """
        raise NotImplementedError

    def signature(self, with_types=True, for_call=False, name=None):
        """Returns a string for a C++ function signature (e.g., `int *A`). """
        raise NotImplementedError

    def __repr__(self):
        return 'Abstract Data Container, DO NOT USE'
コード例 #12
0
ファイル: memlet.py プロジェクト: mratsim/dace
class Memlet(object):
    """ Data movement object. Represents the data, the subset moved, and the
        manner it is reindexed (`other_subset`) into the destination.
        If there are multiple conflicting writes, this object also specifies
        how they are resolved with a lambda function.
    """

    # Properties
    veclen = Property(dtype=int, desc="Vector length", default=1)
    num_accesses = SymbolicProperty(default=0)
    subset = SubsetProperty(default=subsets.Range([]))
    other_subset = SubsetProperty(allow_none=True)
    data = DataProperty()
    debuginfo = DebugInfoProperty()
    wcr = LambdaProperty(allow_none=True)
    wcr_conflict = Property(dtype=bool, default=True)
    allow_oob = Property(dtype=bool,
                         default=False,
                         desc='Bypass out-of-bounds validation')

    def __init__(self,
                 data,
                 num_accesses,
                 subset,
                 vector_length,
                 wcr=None,
                 other_subset=None,
                 debuginfo=None,
                 wcr_conflict=True):
        """ Constructs a Memlet.
            :param data: The data object or name to access. B{Note:} this
                         parameter will soon be deprecated.
            @type data: Either a string of the data descriptor name or an
                        AccessNode.
            :param num_accesses: The number of times that the moved data
                                 will be subsequently accessed. If
                                 `dace.dtypes.DYNAMIC` (-1),
                                 designates that the number of accesses is
                                 unknown at compile time.
            :param subset: The subset of `data` that is going to be accessed.
            :param vector_length: The length of a single unit of access to
                                  the data (used for vectorization
                                  optimizations).
            :param wcr: A lambda function specifying how write-conflicts
                        are resolved. The syntax of the lambda function receives two elements: `current` value and `new` value,
                        and returns the value after resolution. For example,
                        summation is `lambda cur, new: cur + new`.
            :param other_subset: The reindexing of `subset` on the other
                                 connected data.
            :param debuginfo: Source-code information (e.g., line, file)
                              used for debugging.
            :param wcr_conflict: If False, forces non-locked conflict
                                 resolution when generating code. The default
                                 is to let the code generator infer this
                                 information from the SDFG.
        """

        # Properties
        self.num_accesses = num_accesses  # type: sympy.expr.Expr
        self.subset = subset  # type: subsets.Subset
        self.veclen = vector_length  # type: int
        if hasattr(data, 'data'):
            data = data.data
        self.data = data  # type: str

        # Annotates memlet with _how_ writing is performed in case of conflict
        self.wcr = wcr
        self.wcr_conflict = wcr_conflict

        # The subset of the other endpoint we are copying from/to (note:
        # carries the dimensionality of the other endpoint too!)
        self.other_subset = other_subset

        self.debuginfo = debuginfo

    def to_json(self, parent_graph=None):
        attrs = dace.serialize.all_properties_to_json(self)

        retdict = {"type": "Memlet", "attributes": attrs}

        return retdict

    @staticmethod
    def from_json(json_obj, context=None):
        if json_obj['type'] != "Memlet":
            raise TypeError("Invalid data type")

        # Create dummy object
        ret = Memlet("", dace.dtypes.DYNAMIC, None, 1)
        dace.serialize.set_properties_from_json(ret, json_obj, context=context)

        return ret

    @staticmethod
    def simple(data,
               subset_str,
               veclen=1,
               wcr_str=None,
               other_subset_str=None,
               wcr_conflict=True,
               num_accesses=None,
               debuginfo=None):
        """ Constructs a Memlet from string-based expressions.
            :param data: The data object or name to access. B{Note:} this
                         parameter will soon be deprecated.
            @type data: Either a string of the data descriptor name or an
                        AccessNode.
            :param subset_str: The subset of `data` that is going to
                               be accessed in string format. Example: '0:N'.
            :param veclen: The length of a single unit of access to
                           the data (used for vectorization optimizations).
            :param wcr_str: A lambda function (as a string) specifying
                            how write-conflicts are resolved. The syntax
                            of the lambda function receives two elements:
                            `current` value and `new` value,
                            and returns the value after resolution. For
                            example, summation is
                            `'lambda cur, new: cur + new'`.
            :param other_subset_str: The reindexing of `subset` on the other
                                     connected data (as a string).
            :param wcr_conflict: If False, forces non-locked conflict
                                 resolution when generating code. The default
                                 is to let the code generator infer this
                                 information from the SDFG.
            :param num_accesses: The number of times that the moved data
                                 will be subsequently accessed. If
                                 `dace.dtypes.DYNAMIC` (-1),
                                 designates that the number of accesses is
                                 unknown at compile time.
            :param debuginfo: Source-code information (e.g., line, file)
                              used for debugging.

        """
        subset = SubsetProperty.from_string(subset_str)
        if num_accesses is not None:
            na = num_accesses
        else:
            na = subset.num_elements()

        if wcr_str is not None:
            wcr = LambdaProperty.from_string(wcr_str)
        else:
            wcr = None

        if other_subset_str is not None:
            other_subset = SubsetProperty.from_string(other_subset_str)
        else:
            other_subset = None

        # If it is an access node or another memlet
        if hasattr(data, 'data'):
            data = data.data

        return Memlet(data,
                      na,
                      subset,
                      veclen,
                      wcr=wcr,
                      other_subset=other_subset,
                      wcr_conflict=wcr_conflict,
                      debuginfo=debuginfo)

    @staticmethod
    def from_array(dataname, datadesc, wcr=None):
        """ Constructs a Memlet that transfers an entire array's contents.
            :param dataname: The name of the data descriptor in the SDFG.
            :param datadesc: The data descriptor object.
            :param wcr: The conflict resolution lambda.
            @type datadesc: Data.
        """
        range = subsets.Range.from_array(datadesc)
        return Memlet(dataname, range.num_elements(), range, 1, wcr=wcr)

    def __hash__(self):
        return hash((self.data, self.num_accesses, self.subset, self.veclen,
                     str(self.wcr), self.other_subset))

    def __eq__(self, other):
        return all([
            self.data == other.data, self.num_accesses == other.num_accesses,
            self.subset == other.subset, self.veclen == other.veclen,
            self.wcr == other.wcr, self.other_subset == other.other_subset
        ])

    def num_elements(self):
        """ Returns the number of elements in the Memlet subset. """
        return self.subset.num_elements()

    def bounding_box_size(self):
        """ Returns a per-dimension upper bound on the maximum number of
            elements in each dimension.

            This bound will be tight in the case of Range.
        """
        return self.subset.bounding_box_size()

    def validate(self, sdfg, state):
        if self.data is not None and self.data not in sdfg.arrays:
            raise KeyError('Array "%s" not found in SDFG' % self.data)

    @property
    def free_symbols(self) -> Set[str]:
        """ Returns a set of symbols used in this edge's properties. """
        # Symbolic properties are in num_accesses, and the two subsets
        result = set()
        result |= set(map(str, self.num_accesses.free_symbols))
        if self.subset:
            result |= self.subset.free_symbols
        if self.other_subset:
            result |= self.other_subset.free_symbols
        return result

    def __label__(self, sdfg, state):
        """ Returns a string representation of the memlet for display in a
            graph.

            :param sdfg: The SDFG in which the memlet resides.
            :param state: An SDFGState object in which the memlet resides.
        """
        if self.data is None:
            return self._label(None)
        return self._label(sdfg.arrays[self.data].shape)

    def __str__(self):
        return self._label(None)

    def _label(self, shape):
        result = ''
        if self.data is not None:
            result = self.data

        if self.subset is None:
            return result

        num_elements = self.subset.num_elements()
        if self.num_accesses != num_elements:
            if self.num_accesses == -1:
                result += '(dyn) '
            else:
                result += '(%s) ' % SymbolicProperty.to_string(
                    self.num_accesses)
        arrayNotation = True
        try:
            if shape is not None and reduce(operator.mul, shape, 1) == 1:
                # Don't mention array if we're accessing a single element and it's zero
                if all(s == 0 for s in self.subset.min_element()):
                    arrayNotation = False
        except TypeError:
            # Will fail if trying to check the truth value of a sympy expr
            pass
        if arrayNotation:
            result += '[%s]' % str(self.subset)
        if self.wcr is not None and str(self.wcr) != '':
            # Autodetect reduction type
            redtype = detect_reduction_type(self.wcr)
            if redtype == dtypes.ReductionType.Custom:
                wcrstr = unparse(ast.parse(self.wcr).body[0].value.body)
            else:
                wcrstr = str(redtype)
                wcrstr = wcrstr[wcrstr.find('.') + 1:]  # Skip "ReductionType."

            result += ' (CR: %s)' % wcrstr

        if self.other_subset is not None:
            result += ' -> [%s]' % str(self.other_subset)
        return result

    def __repr__(self):
        return "Memlet (" + self.__str__() + ")"
コード例 #13
0
ファイル: nodes.py プロジェクト: HappySky2046/dace
class Reduce(Node):
    """ An SDFG node that reduces an N-dimensional array to an
        (N-k)-dimensional array, with a list of axes to reduce and
        a reduction binary function. """

    # Properties
    axes = ListProperty(element_type=int, allow_none=True)
    wcr = LambdaProperty()
    identity = Property(dtype=object, allow_none=True)
    schedule = Property(dtype=dtypes.ScheduleType,
                        desc="Reduction execution policy",
                        choices=dtypes.ScheduleType,
                        from_string=lambda x: dtypes.ScheduleType[x])
    debuginfo = DebugInfoProperty()

    instrument = Property(
        choices=dtypes.InstrumentationType,
        desc="Measure execution statistics with given method",
        default=dtypes.InstrumentationType.No_Instrumentation)

    def __init__(self,
                 wcr,
                 axes,
                 wcr_identity=None,
                 schedule=dtypes.ScheduleType.Default,
                 debuginfo=None):
        super(Reduce, self).__init__()
        self.wcr = wcr  # type: ast._Lambda
        self.axes = axes
        self.identity = wcr_identity
        self.schedule = schedule
        self.debuginfo = debuginfo

    def draw_node(self, sdfg, state):
        return dot.draw_node(sdfg, state, self, shape="invtriangle")

    @staticmethod
    def from_json(json_obj, context=None):
        ret = Reduce("(lambda a, b: (a + b))", None)
        dace.serialize.set_properties_from_json(ret, json_obj, context=context)
        return ret

    def __str__(self):
        # Autodetect reduction type
        redtype = detect_reduction_type(self.wcr)
        if redtype == dtypes.ReductionType.Custom:
            wcrstr = unparse(ast.parse(self.wcr).body[0].value.body)
        else:
            wcrstr = str(redtype)
            wcrstr = wcrstr[wcrstr.find('.') + 1:]  # Skip "ReductionType."

        return 'Op: {op}, Axes: {axes}'.format(
            axes=('all' if self.axes is None else str(self.axes)), op=wcrstr)

    def __label__(self, sdfg, state):
        # Autodetect reduction type
        redtype = detect_reduction_type(self.wcr)
        if redtype == dtypes.ReductionType.Custom:
            wcrstr = unparse(ast.parse(self.wcr).body[0].value.body)
        else:
            wcrstr = str(redtype)
            wcrstr = wcrstr[wcrstr.find('.') + 1:]  # Skip "ReductionType."

        return 'Op: {op}\nAxes: {axes}'.format(
            axes=('all' if self.axes is None else str(self.axes)), op=wcrstr)
コード例 #14
0
ファイル: nodes.py プロジェクト: HappySky2046/dace
class NestedSDFG(CodeNode):
    """ An SDFG state node that contains an SDFG of its own, runnable using
        the data dependencies specified using its connectors.

        It is encouraged to use nested SDFGs instead of coarse-grained tasklets
        since they are analyzable with respect to transformations.

        @note: A nested SDFG cannot create recursion (one of its parent SDFGs).
    """

    label = Property(dtype=str, desc="Name of the SDFG")
    # NOTE: We cannot use SDFG as the type because of an import loop
    sdfg = SDFGReferenceProperty(dtype=graph.OrderedDiGraph, desc="The SDFG")
    schedule = Property(dtype=dtypes.ScheduleType,
                        desc="SDFG schedule",
                        choices=dtypes.ScheduleType,
                        from_string=lambda x: dtypes.ScheduleType[x])
    location = Property(dtype=str, desc="SDFG execution location descriptor")
    debuginfo = DebugInfoProperty()
    is_collapsed = Property(dtype=bool,
                            desc="Show this node/scope/state as collapsed",
                            default=False)

    instrument = Property(
        choices=dtypes.InstrumentationType,
        desc="Measure execution statistics with given method",
        default=dtypes.InstrumentationType.No_Instrumentation)

    def __init__(self,
                 label,
                 sdfg,
                 inputs: Set[str],
                 outputs: Set[str],
                 schedule=dtypes.ScheduleType.Default,
                 location="-1",
                 debuginfo=None):
        super(NestedSDFG, self).__init__(inputs, outputs)

        # Properties
        self.label = label
        self.sdfg = sdfg
        self.schedule = schedule
        self.location = location
        self.debuginfo = debuginfo

    @staticmethod
    def from_json(json_obj, context=None):
        from dace import SDFG  # Avoid import loop

        # We have to load the SDFG first.
        ret = NestedSDFG("nolabel", SDFG('nosdfg'), set(), set())

        dace.serialize.set_properties_from_json(ret, json_obj, context)

        if context and 'sdfg_state' in context:
            ret.sdfg.parent = context['sdfg_state']
        if context and 'sdfg' in context:
            ret.sdfg.parent_sdfg = context['sdfg']

        return ret

    def draw_node(self, sdfg, graph):
        return dot.draw_node(sdfg, graph, self, shape="doubleoctagon")

    def __str__(self):
        if not self.label:
            return "SDFG"
        else:
            return self.label

    def validate(self, sdfg, state):
        if not data.validate_name(self.label):
            raise NameError('Invalid nested SDFG name "%s"' % self.label)
        for in_conn in self.in_connectors:
            if not data.validate_name(in_conn):
                raise NameError('Invalid input connector "%s"' % in_conn)
        for out_conn in self.out_connectors:
            if not data.validate_name(out_conn):
                raise NameError('Invalid output connector "%s"' % out_conn)

        # Recursively validate nested SDFG
        self.sdfg.validate()
コード例 #15
0
ファイル: nodes.py プロジェクト: HappySky2046/dace
class Tasklet(CodeNode):
    """ A node that contains a tasklet: a functional computation procedure
        that can only access external data specified using connectors.

        Tasklets may be implemented in Python, C++, or any supported
        language by the code generator.
    """

    label = Property(dtype=str, desc="Name of the tasklet")
    code = CodeProperty(desc="Tasklet code")
    code_global = CodeProperty(
        desc="Global scope code needed for tasklet execution", default="")
    code_init = CodeProperty(
        desc="Extra code that is called on DaCe runtime initialization",
        default="")
    code_exit = CodeProperty(
        desc="Extra code that is called on DaCe runtime cleanup", default="")
    location = Property(dtype=str,
                        desc="Tasklet execution location descriptor")
    debuginfo = DebugInfoProperty()

    instrument = Property(
        choices=dtypes.InstrumentationType,
        desc="Measure execution statistics with given method",
        default=dtypes.InstrumentationType.No_Instrumentation)

    def __init__(self,
                 label,
                 inputs=None,
                 outputs=None,
                 code="",
                 language=dtypes.Language.Python,
                 code_global="",
                 code_init="",
                 code_exit="",
                 location="-1",
                 debuginfo=None):
        super(Tasklet, self).__init__(inputs or set(), outputs or set())

        # Properties
        self.label = label
        # Set the language directly
        #self.language = language
        self.code = {'code_or_block': code, 'language': language}

        self.location = location
        self.code_global = {'code_or_block': code_global, 'language': language}
        self.code_init = {'code_or_block': code_init, 'language': language}
        self.code_exit = {'code_or_block': code_exit, 'language': language}
        self.debuginfo = debuginfo

    @property
    def language(self):
        return self._code['language']

    @staticmethod
    def from_json(json_obj, context=None):
        ret = Tasklet("dummylabel")
        dace.serialize.set_properties_from_json(ret, json_obj, context=context)
        return ret

    @property
    def name(self):
        return self._label

    def draw_node(self, sdfg, graph):
        return dot.draw_node(sdfg, graph, self, shape="octagon")

    def validate(self, sdfg, state):
        if not data.validate_name(self.label):
            raise NameError('Invalid tasklet name "%s"' % self.label)
        for in_conn in self.in_connectors:
            if not data.validate_name(in_conn):
                raise NameError('Invalid input connector "%s"' % in_conn)
        for out_conn in self.out_connectors:
            if not data.validate_name(out_conn):
                raise NameError('Invalid output connector "%s"' % out_conn)

    def __str__(self):
        if not self.label:
            return "--Empty--"
        else:
            return self.label
コード例 #16
0
ファイル: nodes.py プロジェクト: targetsm/dace
class NestedSDFG(CodeNode):
    """ An SDFG state node that contains an SDFG of its own, runnable using
        the data dependencies specified using its connectors.

        It is encouraged to use nested SDFGs instead of coarse-grained tasklets
        since they are analyzable with respect to transformations.

        @note: A nested SDFG cannot create recursion (one of its parent SDFGs).
    """

    # NOTE: We cannot use SDFG as the type because of an import loop
    sdfg = SDFGReferenceProperty(desc="The SDFG", allow_none=True)
    schedule = Property(dtype=dtypes.ScheduleType,
                        desc="SDFG schedule",
                        allow_none=True,
                        choices=dtypes.ScheduleType,
                        from_string=lambda x: dtypes.ScheduleType[x],
                        default=dtypes.ScheduleType.Default)
    symbol_mapping = DictProperty(
        key_type=str,
        value_type=dace.symbolic.pystr_to_symbolic,
        desc="Mapping between internal symbols and their values, expressed as "
        "symbolic expressions")
    debuginfo = DebugInfoProperty()
    is_collapsed = Property(dtype=bool,
                            desc="Show this node/scope/state as collapsed",
                            default=False)

    instrument = Property(choices=dtypes.InstrumentationType,
                          desc="Measure execution statistics with given method",
                          default=dtypes.InstrumentationType.No_Instrumentation)

    def __init__(self,
                 label,
                 sdfg,
                 inputs: Set[str],
                 outputs: Set[str],
                 symbol_mapping: Dict[str, Any] = None,
                 schedule=dtypes.ScheduleType.Default,
                 location=None,
                 debuginfo=None):
        super(NestedSDFG, self).__init__(label, location, inputs, outputs)

        # Properties
        self.sdfg = sdfg
        self.symbol_mapping = symbol_mapping or {}
        self.schedule = schedule
        self.debuginfo = debuginfo

    @staticmethod
    def from_json(json_obj, context=None):
        from dace import SDFG  # Avoid import loop

        # We have to load the SDFG first.
        ret = NestedSDFG("nolabel", SDFG('nosdfg'), {}, {})

        dace.serialize.set_properties_from_json(ret, json_obj, context)

        if context and 'sdfg_state' in context:
            ret.sdfg.parent = context['sdfg_state']
        if context and 'sdfg' in context:
            ret.sdfg.parent_sdfg = context['sdfg']

        ret.sdfg.parent_nsdfg_node = ret

        ret.sdfg.update_sdfg_list([])

        return ret

    @property
    def free_symbols(self) -> Set[str]:
        return set().union(
            *(map(str,
                  pystr_to_symbolic(v).free_symbols)
              for v in self.symbol_mapping.values()),
            *(map(str,
                  pystr_to_symbolic(v).free_symbols)
              for v in self.location.values()))

    def infer_connector_types(self, sdfg, state):
        # Avoid import loop
        from dace.sdfg.infer_types import infer_connector_types
        # Infer internal connector types
        infer_connector_types(self.sdfg)

    def __str__(self):
        if not self.label:
            return "SDFG"
        else:
            return self.label

    def validate(self, sdfg, state):
        if not dtypes.validate_name(self.label):
            raise NameError('Invalid nested SDFG name "%s"' % self.label)
        for in_conn in self.in_connectors:
            if not dtypes.validate_name(in_conn):
                raise NameError('Invalid input connector "%s"' % in_conn)
        for out_conn in self.out_connectors:
            if not dtypes.validate_name(out_conn):
                raise NameError('Invalid output connector "%s"' % out_conn)
        connectors = self.in_connectors.keys() | self.out_connectors.keys()
        for dname, desc in self.sdfg.arrays.items():
            # TODO(later): Disallow scalars without access nodes (so that this
            #              check passes for them too).
            if isinstance(desc, data.Scalar):
                continue
            if not desc.transient and dname not in connectors:
                raise NameError('Data descriptor "%s" not found in nested '
                                'SDFG connectors' % dname)
            if dname in connectors and desc.transient:
                raise NameError(
                    '"%s" is a connector but its corresponding array is transient'
                    % dname)

        # Validate undefined symbols
        symbols = set(k for k in self.sdfg.free_symbols if k not in connectors)
        missing_symbols = [s for s in symbols if s not in self.symbol_mapping]
        if missing_symbols:
            raise ValueError('Missing symbols on nested SDFG: %s' %
                             (missing_symbols))

        # Recursively validate nested SDFG
        self.sdfg.validate()
コード例 #17
0
ファイル: nodes.py プロジェクト: targetsm/dace
class Map(object):
    """ A Map is a two-node representation of parametric graphs, containing
        an integer set by which the contents (nodes dominated by an entry
        node and post-dominated by an exit node) are replicated.

        Maps contain a `schedule` property, which specifies how the scope
        should be scheduled (execution order). Code generators can use the
        schedule property to generate appropriate code, e.g., GPU kernels.
    """

    # List of (editable) properties
    label = Property(dtype=str, desc="Label of the map")
    params = ListProperty(element_type=str, desc="Mapped parameters")
    range = RangeProperty(desc="Ranges of map parameters",
                          default=sbs.Range([]))
    schedule = Property(dtype=dtypes.ScheduleType,
                        desc="Map schedule",
                        choices=dtypes.ScheduleType,
                        from_string=lambda x: dtypes.ScheduleType[x],
                        default=dtypes.ScheduleType.Default)
    unroll = Property(dtype=bool, desc="Map unrolling")
    collapse = Property(dtype=int,
                        default=1,
                        desc="How many dimensions to"
                        " collapse into the parallel range")
    debuginfo = DebugInfoProperty()
    is_collapsed = Property(dtype=bool,
                            desc="Show this node/scope/state as collapsed",
                            default=False)

    instrument = Property(choices=dtypes.InstrumentationType,
                          desc="Measure execution statistics with given method",
                          default=dtypes.InstrumentationType.No_Instrumentation)

    def __init__(self,
                 label,
                 params,
                 ndrange,
                 schedule=dtypes.ScheduleType.Default,
                 unroll=False,
                 collapse=1,
                 fence_instrumentation=False,
                 debuginfo=None):
        super(Map, self).__init__()

        # Assign properties
        self.label = label
        self.schedule = schedule
        self.unroll = unroll
        self.collapse = 1
        self.params = params
        self.range = ndrange
        self.debuginfo = debuginfo
        self._fence_instrumentation = fence_instrumentation

    def __str__(self):
        return self.label + "[" + ", ".join([
            "{}={}".format(i, r) for i, r in zip(
                self._params, [sbs.Range.dim_to_string(d) for d in self._range])
        ]) + "]"

    def validate(self, sdfg, state, node):
        if not dtypes.validate_name(self.label):
            raise NameError('Invalid map name "%s"' % self.label)

    def get_param_num(self):
        """ Returns the number of map dimension parameters/symbols. """
        return len(self.params)
コード例 #18
0
class LibraryNode(CodeNode):

    name = Property(dtype=str, desc="Name of node")
    implementation = LibraryImplementationProperty(
        dtype=str,
        allow_none=True,
        desc=("Which implementation this library node will expand into."
              "Must match a key in the list of possible implementations."))
    schedule = EnumProperty(
        dtype=dtypes.ScheduleType,
        desc="If set, determines the default device mapping of "
        "the node upon expansion, if expanded to a nested SDFG.",
        default=dtypes.ScheduleType.Default)
    debuginfo = DebugInfoProperty()

    def __init__(self, name, *args, schedule=None, **kwargs):
        super().__init__(*args, **kwargs)
        self.name = name
        self.label = name
        self.schedule = schedule or dtypes.ScheduleType.Default

    # Overrides subclasses to return LibraryNode as their JSON type
    @property
    def __jsontype__(self):
        return 'LibraryNode'

    def to_json(self, parent):
        jsonobj = super().to_json(parent)
        jsonobj['classpath'] = full_class_path(self)
        return jsonobj

    @classmethod
    def from_json(cls, json_obj, context=None):
        if cls == LibraryNode:
            clazz = pydoc.locate(json_obj['classpath'])
            if clazz is None:
                return UnregisteredLibraryNode.from_json(json_obj, context)
            return clazz.from_json(json_obj, context)
        else:  # Subclasses are actual library nodes
            ret = cls(json_obj['attributes']['name'])
            dace.serialize.set_properties_from_json(ret,
                                                    json_obj,
                                                    context=context)
            return ret

    def expand(self, sdfg, state, *args, **kwargs) -> str:
        """ Create and perform the expansion transformation for this library
            node.
            :return: the name of the expanded implementation
        """
        implementation = self.implementation
        library_name = getattr(type(self), '_dace_library_name', '')
        try:
            if library_name:
                config_implementation = Config.get("library", library_name,
                                                   "default_implementation")
            else:
                config_implementation = None
        except KeyError:
            # Non-standard libraries are not defined in the config schema, and
            # thus might not exist in the config.
            config_implementation = None
        if config_implementation is not None:
            try:
                config_override = Config.get("library", library_name,
                                             "override")
                if config_override and implementation in self.implementations:
                    if implementation is not None:
                        warnings.warn(
                            "Overriding explicitly specified "
                            "implementation {} for {} with {}.".format(
                                implementation, self.label,
                                config_implementation))
                    implementation = config_implementation
            except KeyError:
                config_override = False
        # If not explicitly set, try the node default
        if implementation is None:
            implementation = type(self).default_implementation
            # If no node default, try library default
            if implementation is None:
                import dace.library  # Avoid cyclic dependency
                lib = dace.library._DACE_REGISTERED_LIBRARIES[type(
                    self)._dace_library_name]
                implementation = lib.default_implementation
                # Try the default specified in the config
                if implementation is None:
                    implementation = config_implementation
                    # Otherwise we don't know how to expand
                    if implementation is None:
                        raise ValueError("No implementation or default "
                                         "implementation specified.")
        if implementation not in self.implementations.keys():
            raise KeyError("Unknown implementation for node {}: {}".format(
                type(self).__name__, implementation))
        transformation_type = type(self).implementations[implementation]
        sdfg_id = sdfg.sdfg_id
        state_id = sdfg.nodes().index(state)
        subgraph = {transformation_type._match_node: state.node_id(self)}
        transformation = transformation_type(sdfg, sdfg_id, state_id, subgraph,
                                             0)
        if not transformation.can_be_applied(state, 0, sdfg):
            raise RuntimeError("Library node "
                               "expansion applicability check failed.")
        sdfg.append_transformation(transformation)
        transformation.apply(state, sdfg, *args, **kwargs)
        return implementation

    @classmethod
    def register_implementation(cls, name, transformation_type):
        """Register an implementation to belong to this library node type."""
        cls.implementations[name] = transformation_type
        transformation_type._match_node = cls
コード例 #19
0
ファイル: nodes.py プロジェクト: cpenny42/dace
class Tasklet(CodeNode):
    """ A node that contains a tasklet: a functional computation procedure
        that can only access external data specified using connectors. 
        
        Tasklets may be implemented in Python, C++, or any supported 
        language by the code generator. 
    """

    label = Property(dtype=str, desc="Name of the tasklet")
    language = Property(enum=types.Language, default=types.Language.Python)
    code = CodeProperty(desc="Tasklet code")
    code_global = CodeProperty(
        desc="Global scope code needed for tasklet execution", default="")
    code_init = CodeProperty(
        desc="Extra code that is called on DaCe runtime initialization",
        default="")
    code_exit = CodeProperty(
        desc="Extra code that is called on DaCe runtime cleanup", default="")
    location = Property(dtype=str,
                        desc="Tasklet execution location descriptor")
    debuginfo = DebugInfoProperty()

    def __init__(self,
                 label,
                 inputs=set(),
                 outputs=set(),
                 code="",
                 language=types.Language.Python,
                 code_global="",
                 code_init="",
                 code_exit="",
                 location="-1",
                 debuginfo=None):
        super(Tasklet, self).__init__(inputs, outputs)

        # Properties
        self.label = label
        self.language = language
        self.code = code
        self.location = location
        self.code_global = code_global
        self.code_init = code_init
        self.code_exit = code_exit
        self.debuginfo = debuginfo

    @property
    def name(self):
        return self._label

    def draw_node(self, sdfg, graph):
        return dot.draw_node(sdfg, graph, self, shape="octagon")

    def validate(self, sdfg, state):
        if not data.validate_name(self.label):
            raise NameError('Invalid tasklet name "%s"' % self.label)
        for in_conn in self.in_connectors:
            if not data.validate_name(in_conn):
                raise NameError('Invalid input connector "%s"' % in_conn)
        for out_conn in self.out_connectors:
            if not data.validate_name(out_conn):
                raise NameError('Invalid output connector "%s"' % out_conn)

    def __str__(self):
        if not self.label:
            return "--Empty--"
        else:
            return self.label
コード例 #20
0
class Tasklet(CodeNode):
    """ A node that contains a tasklet: a functional computation procedure
        that can only access external data specified using connectors.

        Tasklets may be implemented in Python, C++, or any supported
        language by the code generator.
    """

    code = CodeProperty(desc="Tasklet code", default=CodeBlock(""))
    state_fields = ListProperty(
        element_type=str, desc="Fields that are added to the global state")
    code_global = CodeProperty(
        desc="Global scope code needed for tasklet execution",
        default=CodeBlock("", dtypes.Language.CPP))
    code_init = CodeProperty(
        desc="Extra code that is called on DaCe runtime initialization",
        default=CodeBlock("", dtypes.Language.CPP))
    code_exit = CodeProperty(
        desc="Extra code that is called on DaCe runtime cleanup",
        default=CodeBlock("", dtypes.Language.CPP))
    debuginfo = DebugInfoProperty()

    instrument = EnumProperty(
        dtype=dtypes.InstrumentationType,
        desc="Measure execution statistics with given method",
        default=dtypes.InstrumentationType.No_Instrumentation)

    def __init__(self,
                 label,
                 inputs=None,
                 outputs=None,
                 code="",
                 language=dtypes.Language.Python,
                 state_fields=None,
                 code_global="",
                 code_init="",
                 code_exit="",
                 location=None,
                 debuginfo=None):
        super(Tasklet, self).__init__(label, location, inputs, outputs)

        self.code = CodeBlock(code, language)

        self.state_fields = state_fields or []
        self.code_global = CodeBlock(code_global, dtypes.Language.CPP)
        self.code_init = CodeBlock(code_init, dtypes.Language.CPP)
        self.code_exit = CodeBlock(code_exit, dtypes.Language.CPP)
        self.debuginfo = debuginfo

    @property
    def language(self):
        return self.code.language

    @staticmethod
    def from_json(json_obj, context=None):
        ret = Tasklet("dummylabel")
        dace.serialize.set_properties_from_json(ret, json_obj, context=context)
        return ret

    @property
    def name(self):
        return self._label

    def validate(self, sdfg, state):
        if not dtypes.validate_name(self.label):
            raise NameError('Invalid tasklet name "%s"' % self.label)
        for in_conn in self.in_connectors:
            if not dtypes.validate_name(in_conn):
                raise NameError('Invalid input connector "%s"' % in_conn)
        for out_conn in self.out_connectors:
            if not dtypes.validate_name(out_conn):
                raise NameError('Invalid output connector "%s"' % out_conn)

    @property
    def free_symbols(self) -> Set[str]:
        return self.code.get_free_symbols(self.in_connectors.keys()
                                          | self.out_connectors.keys())

    def infer_connector_types(self, sdfg, state):
        # If a MLIR tasklet, simply read out the types (it's explicit)
        if self.code.language == dtypes.Language.MLIR:
            # Inline import because mlir.utils depends on pyMLIR which may not be installed
            # Doesn't cause crashes due to missing pyMLIR if a MLIR tasklet is not present
            from dace.codegen.targets.mlir import utils

            mlir_ast = utils.get_ast(self.code.code)
            mlir_is_generic = utils.is_generic(mlir_ast)
            mlir_entry_func = utils.get_entry_func(mlir_ast, mlir_is_generic)

            mlir_result_type = utils.get_entry_result_type(
                mlir_entry_func, mlir_is_generic)
            mlir_out_name = next(iter(self.out_connectors.keys()))

            if self.out_connectors[
                    mlir_out_name] is None or self.out_connectors[
                        mlir_out_name].ctype == "void":
                self.out_connectors[mlir_out_name] = utils.get_dace_type(
                    mlir_result_type)
            elif self.out_connectors[mlir_out_name] != utils.get_dace_type(
                    mlir_result_type):
                warnings.warn(
                    "Type mismatch between MLIR tasklet out connector and MLIR code"
                )

            for mlir_arg in utils.get_entry_args(mlir_entry_func,
                                                 mlir_is_generic):
                if self.in_connectors[
                        mlir_arg[0]] is None or self.in_connectors[
                            mlir_arg[0]].ctype == "void":
                    self.in_connectors[mlir_arg[0]] = utils.get_dace_type(
                        mlir_arg[1])
                elif self.in_connectors[mlir_arg[0]] != utils.get_dace_type(
                        mlir_arg[1]):
                    warnings.warn(
                        "Type mismatch between MLIR tasklet in connector and MLIR code"
                    )

            return

        # If a Python tasklet, use type inference to figure out all None output
        # connectors
        if all(cval.type is not None for cval in self.out_connectors.values()):
            return
        if self.code.language != dtypes.Language.Python:
            return

        if any(cval.type is None for cval in self.in_connectors.values()):
            raise TypeError('Cannot infer output connectors of tasklet "%s", '
                            'not all input connectors have types' % str(self))

        # Avoid import loop
        from dace.codegen.tools.type_inference import infer_types

        # Get symbols defined at beginning of node, and infer all types in
        # tasklet
        syms = state.symbols_defined_at(self)
        syms.update(self.in_connectors)
        new_syms = infer_types(self.code.code, syms)
        for cname, oconn in self.out_connectors.items():
            if oconn.type is None:
                if cname not in new_syms:
                    raise TypeError('Cannot infer type of tasklet %s output '
                                    '"%s", please specify manually.' %
                                    (self.label, cname))
                self.out_connectors[cname] = new_syms[cname]

    def __str__(self):
        if not self.label:
            return "--Empty--"
        else:
            return self.label
コード例 #21
0
ファイル: nodes.py プロジェクト: cpenny42/dace
class Map(object):
    """ A Map is a two-node representation of parametric graphs, containing
        an integer set by which the contents (nodes dominated by an entry 
        node and post-dominated by an exit node) are replicated.
        
        Maps contain a `schedule` property, which specifies how the scope
        should be scheduled (execution order). Code generators can use the
        schedule property to generate appropriate code, e.g., GPU kernels.
    """
    from dace.codegen.instrumentation.perfsettings import PerfSettings

    # List of (editable) properties
    label = Property(dtype=str, desc="Label of the map")
    params = ParamsProperty(desc="Mapped parameters")
    range = RangeProperty(desc="Ranges of map parameters")
    #   order = OrderProperty(desc="Order of map dimensions", unmapped=True)
    schedule = Property(dtype=types.ScheduleType,
                        desc="Map schedule",
                        enum=types.ScheduleType,
                        from_string=lambda x: types.ScheduleType[x])
    is_async = Property(dtype=bool, desc="Map asynchronous evaluation")
    unroll = Property(dtype=bool, desc="Map unrolling")
    flatten = Property(dtype=bool, desc="Map loop flattening")
    fence_instrumentation = Property(
        dtype=bool, desc="Disable instrumentation in all subnodes")
    papi_counters = Property(dtype=list,
                             desc="List of PAPI counter preset identifiers.",
                             default=PerfSettings.perf_default_papi_counters())
    debuginfo = DebugInfoProperty()
    is_collapsed = Property(dtype=bool,
                            desc="Show this node/scope/state as collapsed",
                            default=False)

    # We cannot have multiple consecutive papi start/stops inside the same thread. The following variable is used to recognize the map that started the counters.
    _has_papi_counters = False
    _can_be_supersection_start = True  # We must have supersections synchronized.

    def __init__(self,
                 label,
                 params,
                 ndrange,
                 schedule=types.ScheduleType.Default,
                 unroll=False,
                 is_async=False,
                 flatten=False,
                 fence_instrumentation=False,
                 debuginfo=None):
        super(Map, self).__init__()

        # Assign properties
        self.label = label
        self.schedule = schedule
        self.unroll = unroll
        self.is_async = is_async
        self.flatten = flatten
        self.params = params
        self.range = ndrange
        self.debuginfo = debuginfo
        self._fence_instrumentation = fence_instrumentation

    def __str__(self):
        return self.label + "[" + ", ".join([
            "{}={}".format(i, r)
            for i, r in zip(self._params,
                            [sbs.Range.dim_to_string(d) for d in self._range])
        ]) + "]"

    def validate(self, sdfg, state, node):
        if not data.validate_name(self.label):
            raise NameError('Invalid map name "%s"' % self.label)

    def get_param_num(self):
        """ Returns the number of map dimension parameters/symbols. """
        return len(self.params)
コード例 #22
0
class Memlet(object):
    """ Data movement object. Represents the data, the subset moved, and the
        manner it is reindexed (`other_subset`) into the destination.
        If there are multiple conflicting writes, this object also specifies
        how they are resolved with a lambda function.
    """

    # Properties
    veclen = Property(dtype=int, desc="Vector length")
    num_accesses = SymbolicProperty()
    subset = SubsetProperty()
    other_subset = SubsetProperty(allow_none=True)
    data = DataProperty()
    debuginfo = DebugInfoProperty()
    wcr = LambdaProperty(allow_none=True)
    wcr_identity = Property(dtype=object, default=None, allow_none=True)
    wcr_conflict = Property(dtype=bool, default=True)
    allow_oob = Property(dtype=bool,
                         default=False,
                         desc='Bypass out-of-bounds validation')

    def __init__(self,
                 data,
                 num_accesses,
                 subset,
                 vector_length,
                 wcr=None,
                 wcr_identity=None,
                 other_subset=None,
                 debuginfo=None,
                 wcr_conflict=True):
        """ Constructs a Memlet.
            @param data: The data object or name to access. B{Note:} this
                         parameter will soon be deprecated.
            @type data: Either a string of the data descriptor name or an
                        AccessNode.
            @param num_accesses: The number of times that the moved data
                                 will be subsequently accessed. If
                                 `dace.types.DYNAMIC` (-1),
                                 designates that the number of accesses is
                                 unknown at compile time.
            @param subset: The subset of `data` that is going to be accessed.
            @param vector_length: The length of a single unit of access to
                                  the data (used for vectorization 
                                  optimizations).
            @param wcr: A lambda function specifying how write-conflicts
                        are resolved. The syntax of the lambda function receives two elements: `current` value and `new` value,
                        and returns the value after resolution. For example,
                        summation is `lambda cur, new: cur + new`.
            @param wcr_identity: Identity value used for the first write 
                                 conflict. B{Note:} this parameter will soon
                                 be deprecated.
            @param other_subset: The reindexing of `subset` on the other 
                                 connected data.
            @param debuginfo: Source-code information (e.g., line, file) 
                              used for debugging.
            @param wcr_conflict: If False, forces non-locked conflict 
                                 resolution when generating code. The default
                                 is to let the code generator infer this 
                                 information from the SDFG.
        """

        # Properties
        self.num_accesses = num_accesses  # type: sympy math
        self.subset = subset  # type: subsets.Subset
        self.veclen = vector_length  # type: int (in elements, default 1)
        if hasattr(data, 'data'):
            data = data.data
        self.data = data  # type: str

        # Annotates memlet with _how_ writing is performed in case of conflict
        self.wcr = wcr
        self.wcr_identity = wcr_identity
        self.wcr_conflict = wcr_conflict

        # The subset of the other endpoint we are copying from/to (note:
        # carries the dimensionality of the other endpoint too!)
        self.other_subset = other_subset

        self.debuginfo = debuginfo

    def toJSON(self, indent=0):
        json = " " * indent + "{\n"
        indent += 2
        json += " " * indent + "\"type\" : \"" + type(self).__name__ + "\",\n"
        json += " " * indent + "\"label\" : \"" + str(self) + "\"\n"
        indent -= 2
        json += " " * indent + "}\n"
        return json

    @staticmethod
    def simple(data,
               subset_str,
               veclen=1,
               wcr_str=None,
               wcr_identity=None,
               other_subset_str=None,
               wcr_conflict=True,
               num_accesses=None,
               debuginfo=None):
        """ Constructs a Memlet from string-based expressions.
            @param data: The data object or name to access. B{Note:} this
                         parameter will soon be deprecated.
            @type data: Either a string of the data descriptor name or an
                        AccessNode.
            @param subset_str: The subset of `data` that is going to 
                               be accessed in string format. Example: '0:N'.
            @param veclen: The length of a single unit of access to
                           the data (used for vectorization optimizations).
            @param wcr_str: A lambda function (as a string) specifying 
                            how write-conflicts are resolved. The syntax 
                            of the lambda function receives two elements:
                            `current` value and `new` value,
                            and returns the value after resolution. For 
                            example, summation is 
                            `'lambda cur, new: cur + new'`.
            @param wcr_identity: Identity value used for the first write 
                                 conflict. B{Note:} this parameter will soon
                                 be deprecated.
            @param other_subset_str: The reindexing of `subset` on the other 
                                     connected data (as a string).
            @param wcr_conflict: If False, forces non-locked conflict 
                                 resolution when generating code. The default
                                 is to let the code generator infer this 
                                 information from the SDFG.
            @param num_accesses: The number of times that the moved data
                                 will be subsequently accessed. If
                                 `dace.types.DYNAMIC` (-1),
                                 designates that the number of accesses is
                                 unknown at compile time.
            @param debuginfo: Source-code information (e.g., line, file) 
                              used for debugging.
                                 
        """
        subset = SubsetProperty.from_string(subset_str)
        if num_accesses is not None:
            na = num_accesses
        else:
            na = subset.num_elements()

        if wcr_str is not None:
            wcr = LambdaProperty.from_string(wcr_str)
        else:
            wcr = None

        if other_subset_str is not None:
            other_subset = SubsetProperty.from_string(other_subset_str)
        else:
            other_subset = None

        # If it is an access node or another memlet
        if hasattr(data, 'data'):
            data = data.data

        return Memlet(data,
                      na,
                      subset,
                      veclen,
                      wcr=wcr,
                      wcr_identity=wcr_identity,
                      other_subset=other_subset,
                      wcr_conflict=wcr_conflict,
                      debuginfo=debuginfo)

    @staticmethod
    def from_array(dataname, datadesc):
        """ Constructs a Memlet that transfers an entire array's contents.
            @param dataname: The name of the data descriptor in the SDFG.
            @param datadesc: The data descriptor object.
            @type datadesc: Data.
        """
        range = subsets.Range.from_array(datadesc)
        return Memlet(dataname, range.num_elements(), range, 1)

    def __hash__(self):
        return hash((self.data, self.num_accesses, self.subset, self.veclen,
                     str(self.wcr), self.wcr_identity, self.other_subset))

    def __eq__(self, other):
        return all([
            self.data == other.data, self.num_accesses == other.num_accesses,
            self.subset == other.subset, self.veclen == other.veclen,
            self.wcr == other.wcr, self.wcr_identity == other.wcr_identity,
            self.other_subset == other.other_subset
        ])

    def num_elements(self):
        """ Returns the number of elements in the Memlet subset. """
        return self.subset.num_elements()

    def bounding_box_size(self):
        """ Returns a per-dimension upper bound on the maximum number of
            elements in each dimension.

            This bound will be tight in the case of Range.
        """
        return self.subset.bounding_box_size()

    def validate(self, sdfg, state):
        if self.data not in sdfg.arrays:
            raise KeyError('Array "%s" not found in SDFG' % self.data)

    def __label__(self, sdfg, state):
        """ Returns a string representation of the memlet for display in a 
            graph.

            @param sdfg: The SDFG in which the memlet resides.
            @param state: An SDFGState object in which the memlet resides.
        """
        if self.data is None:
            return self._label(None)
        return self._label(sdfg.arrays[self.data].shape)

    def __str__(self):
        return self._label(None)

    def _label(self, shape):
        result = ''
        if self.data is not None:
            result = self.data

        if self.subset is None:
            return result

        num_elements = self.subset.num_elements()
        if self.num_accesses != num_elements:
            result += '(%s) ' % str(self.num_accesses)
        arrayNotation = True
        try:
            if shape is not None and reduce(operator.mul, shape, 1) == 1:
                # Don't draw array if we're accessing a single element
                arrayNotation = False
        except TypeError:
            # Will fail if trying to check the truth value of a sympy expr
            pass
        if arrayNotation:
            result += '[%s]' % str(self.subset)
        if self.wcr is not None and str(self.wcr) != '':
            # Autodetect reduction type
            redtype = detect_reduction_type(self.wcr)
            if redtype == types.ReductionType.Custom:
                wcrstr = unparse(ast.parse(self.wcr).body[0].value.body)
            else:
                wcrstr = str(redtype)
                wcrstr = wcrstr[wcrstr.find('.') + 1:]  # Skip "ReductionType."

            result += ' (CR: %s' % wcrstr
            if self.wcr_identity is not None:
                result += ', id: %s' % str(self.wcr_identity)
            result += ')'

        if self.other_subset is not None:
            result += ' -> [%s]' % str(self.other_subset)
        return result

    def __repr__(self):
        return "Memlet (" + self.__str__() + ")"
コード例 #23
0
class Data(object):
    """ Data type descriptors that can be used as references to memory.
        Examples: Arrays, Streams, custom arrays (e.g., sparse matrices).
    """

    dtype = TypeClassProperty(default=dtypes.int32, choices=dtypes.Typeclasses)
    shape = ShapeProperty(default=[])
    transient = Property(dtype=bool, default=False)
    storage = EnumProperty(dtype=dtypes.StorageType,
                           desc="Storage location",
                           default=dtypes.StorageType.Default)
    lifetime = EnumProperty(dtype=dtypes.AllocationLifetime,
                            desc='Data allocation span',
                            default=dtypes.AllocationLifetime.Scope)
    location = DictProperty(
        key_type=str,
        value_type=symbolic.pystr_to_symbolic,
        desc='Full storage location identifier (e.g., rank, GPU ID)')
    debuginfo = DebugInfoProperty(allow_none=True)

    def __init__(self, dtype, shape, transient, storage, location, lifetime,
                 debuginfo):
        self.dtype = dtype
        self.shape = shape
        self.transient = transient
        self.storage = storage
        self.location = location if location is not None else {}
        self.lifetime = lifetime
        self.debuginfo = debuginfo
        self._validate()

    def validate(self):
        """ Validate the correctness of this object.
            Raises an exception on error. """
        self._validate()

    # Validation of this class is in a separate function, so that this
    # class can call `_validate()` without calling the subclasses'
    # `validate` function.
    def _validate(self):
        if any(not isinstance(s, (int, symbolic.SymExpr, symbolic.symbol,
                                  symbolic.sympy.Basic)) for s in self.shape):
            raise TypeError('Shape must be a list or tuple of integer values '
                            'or symbols')
        return True

    def to_json(self):
        attrs = serialize.all_properties_to_json(self)

        retdict = {"type": type(self).__name__, "attributes": attrs}

        return retdict

    @property
    def toplevel(self):
        return self.lifetime is not dtypes.AllocationLifetime.Scope

    def copy(self):
        raise RuntimeError(
            'Data descriptors are unique and should not be copied')

    def is_equivalent(self, other):
        """ Check for equivalence (shape and type) of two data descriptors. """
        raise NotImplementedError

    def as_arg(self, with_types=True, for_call=False, name=None):
        """Returns a string for a C++ function signature (e.g., `int *A`). """
        raise NotImplementedError

    @property
    def free_symbols(self) -> Set[symbolic.SymbolicType]:
        """ Returns a set of undefined symbols in this data descriptor. """
        result = set()
        for s in self.shape:
            if isinstance(s, sp.Basic):
                result |= set(s.free_symbols)
        return result

    def __repr__(self):
        return 'Abstract Data Container, DO NOT USE'

    @property
    def veclen(self):
        return self.dtype.veclen if hasattr(self.dtype, "veclen") else 1

    @property
    def ctype(self):
        return self.dtype.ctype
コード例 #24
0
ファイル: reduce.py プロジェクト: mratsim/dace
class Reduce(dace.sdfg.nodes.LibraryNode):
    """ An SDFG node that reduces an N-dimensional array to an
        (N-k)-dimensional array, with a list of axes to reduce and
        a reduction binary function. """

    # Global properties
    implementations = {
        'pure': ExpandReducePure,
        'OpenMP': ExpandReduceOpenMP,
        'CUDA (device)': ExpandReduceCUDADevice,
        'CUDA (block)': ExpandReduceCUDABlock,
        # 'CUDA (warp)': ExpandReduceCUDAWarp,
        # 'CUDA (warp allreduce)': ExpandReduceCUDAWarpAllreduce
    }

    default_implementation = 'pure'

    # Properties
    axes = ListProperty(element_type=int, allow_none=True)
    wcr = LambdaProperty(default='lambda a, b: a')
    identity = Property(allow_none=True)
    debuginfo = DebugInfoProperty()

    def __init__(self,
                 wcr='lambda a, b: a',
                 axes=None,
                 identity=None,
                 schedule=dtypes.ScheduleType.Default,
                 debuginfo=None,
                 **kwargs):
        super().__init__(name='Reduce', **kwargs)
        self.wcr = wcr
        self.axes = axes
        self.identity = identity
        self.debuginfo = debuginfo
        self.schedule = schedule

    @staticmethod
    def from_json(json_obj, context=None):
        ret = Reduce("lambda a, b: a", None)
        dace.serialize.set_properties_from_json(ret, json_obj, context=context)
        return ret

    def __str__(self):
        # Autodetect reduction type
        redtype = detect_reduction_type(self.wcr)
        if redtype == dtypes.ReductionType.Custom:
            wcrstr = unparse(ast.parse(self.wcr).body[0].value.body)
        else:
            wcrstr = str(redtype)
            wcrstr = wcrstr[wcrstr.find('.') + 1:]  # Skip "ReductionType."

        return 'Reduce ({op}), Axes: {axes}'.format(
            axes=('all' if self.axes is None else str(self.axes)), op=wcrstr)

    def __label__(self, sdfg, state):
        return str(self).replace(' Axes', '\nAxes')

    def validate(self, sdfg, state):
        if len(state.in_edges(self)) != 1:
            raise ValueError('Reduce node must have one input')
        if len(state.out_edges(self)) != 1:
            raise ValueError('Reduce node must have one output')