コード例 #1
0
    def create(cls, node, graph, memory_manager):
        """
        Derive necessary information from ComputeNode, ComputeGraph and MemoryManager to generate the layer code.
        :param node: ComputeNode object of a CNN layer
        :param graph: ComputeGraph object of the CNN
        :param memory_manager: MemoryManager object containing information about input and output buffers.
        :return:
        """

        attrs = node.attrs
        input_buffer = memory_manager.get_buffer(graph, node.inputs[0])
        output_buffer = memory_manager.get_buffer(graph, node.outputs[0])

        input_id = node.inputs[0]
        input_shape = graph.get_shape(input_id)

        output_id = node.outputs[0]
        output_shape = graph.get_shape(output_id)

        assert (reduce_mult(input_shape) == reduce_mult(output_shape))

        operation = cls(node, graph)
        operation.attributes['name'] = node.name
        operation.attributes['input_buffer'] = input_buffer
        operation.attributes['output_buffer'] = output_buffer

        return operation
コード例 #2
0
    def create(cls, node, graph, memory_manager):
        """
        Derive necessary information from ComputeNode, ComputeGraph and MemoryManager to generate the layer code.
        :param node: ComputeNode object of a CNN layer
        :param graph: ComputeGraph object of the CNN
        :param memory_manager: MemoryManager object containing information about input and output buffers.
        :return:
        """

        attrs = node.attrs
        input_buffer = memory_manager.get_buffer(graph, node.inputs[0])
        output_buffer = memory_manager.get_buffer(graph, node.outputs[0])

        input_id = node.inputs[0]
        input_shape = graph.get_shape(input_id)

        output_id = node.outputs[0]
        output_shape = graph.get_shape(output_id)

        assert (reduce_mult(input_shape) == reduce_mult(output_shape))

        if len(input_shape) == 4:
            num_input_channels = input_shape[1]
            input_height = input_shape[2]
            input_width = input_shape[3]
        elif len(input_shape) == 3:
            num_input_channels = input_shape[1]
            input_height = 1
            input_width = input_shape[2]
        elif len(input_shape) == 2:
            num_input_channels = input_shape[0]
            input_height = 1
            input_width = input_shape[1]
        else:
            print(
                "ERROR: Unsupported input shape for reshape layer: {}".format(
                    input_shape))
            exit(1)

        if len(output_shape) == 4:
            hotfix = "[0]"
        else:
            hotfix = ""

        # TODO: Find better alternative to this. See issue #60
        if input_shape == output_shape:
            no_change = True
        else:
            no_change = False

        operation = cls(node, graph)
        operation.attributes['input_buffer'] = input_buffer
        operation.attributes['num_input_channels'] = num_input_channels
        operation.attributes['input_height'] = input_height
        operation.attributes['input_width'] = input_width
        operation.attributes['no_change'] = no_change
        operation.attributes['output_buffer'] = output_buffer
        operation.attributes['hotfix'] = hotfix

        return operation
コード例 #3
0
    def create(cls, node, graph, memory_manager):
        """
        Derive necessary information from ComputeNode, ComputeGraph and MemoryManager to generate the layer code.
        :param node: ComputeNode object of a CNN layer
        :param graph: ComputeGraph object of the CNN
        :param memory_manager: MemoryManager object containing information about input and output buffers.
        :return:
        """
        attrs = node.attrs
        input_buffer = memory_manager.get_buffer(graph, node.inputs[0])
        output_buffer = memory_manager.get_buffer(graph, node.outputs[0])

        input_id = node.inputs[0]
        input_shape = graph.get_shape(input_id)

        output_id = node.outputs[0]
        output_shape = graph.get_shape(output_id)

        assert (reduce_mult(input_shape) == reduce_mult(output_shape))

        if len(input_shape) == 4:
            num_input_channels = input_shape[1]
            input_height = input_shape[2]
            input_width = input_shape[3]
            no_change = 0
        elif len(input_shape) == 2:
            num_input_channels = 1
            input_height = 1
            input_width = input_shape[1]
            no_change = 1
        else:
            print("ERROR: Unsupported tensor shape in flatten operation: {}".
                  format(input_shape))
            return 1

        operation = cls(node, graph)
        operation.attributes['input_buffer'] = input_buffer
        operation.attributes['num_input_channels'] = num_input_channels
        operation.attributes['input_height'] = input_height
        operation.attributes['input_width'] = input_width

        operation.attributes['output_buffer'] = output_buffer

        operation.attributes['no_change'] = no_change

        return operation
コード例 #4
0
    def create(cls, node, graph, memory_manager):
        attrs = node.attrs

        input_buffer = memory_manager.get_buffer(graph, node.inputs[0])
        weight_buffer = memory_manager.get_buffer(graph, node.inputs[1])
        output_buffer = memory_manager.get_buffer(graph, node.outputs[0])

        input_size = reduce_mult(input_buffer.shape)
        output_size = reduce_mult(output_buffer.shape)

        operation = cls(node, graph)

        operation.attributes['input_buffer'] = input_buffer
        operation.attributes['input_size'] = input_size
        operation.attributes['weight_buffer'] = weight_buffer
        operation.attributes['output_buffer'] = output_buffer
        operation.attributes['output_size'] = output_size

        return operation
コード例 #5
0
    def create(cls, node, graph, memory_manager):
        attrs = node.attrs

        # assert tuple(attrs["pads"]) == (0, 0)
        kernel_shape = attrs["kernel_shape"]

        if not (len(kernel_shape) == 1 or
                (len(kernel_shape) == 2 and kernel_shape[1] == 1)):
            print("{} is not a 1DMaxPool".format(node.name))
            return None

        input_id = node.inputs[0]
        input_shape = graph.get_shape(input_id)
        # input_buffer = "buffer" + input_id
        input_buffer = memory_manager.get_buffer(graph, node.inputs[0])
        num_input_channels = input_shape[1]

        # output_buffer = "buffer" + node.outputs[0]
        output_buffer = memory_manager.get_buffer(graph, node.outputs[0])

        if graph.is_output(node.outputs[0]):
            output_buffer = "output" + node.outputs[0]

        # output_width = graph.get_shape(node.outputs[0], node)[2]

        kernel_size = attrs["kernel_shape"][0]
        kernel_stride = attrs["strides"][0]

        padding = attrs["pads"]
        padding_needed = False
        for num in padding:
            if num != 0:
                padding_needed = True

        input_buffer_size = reduce_mult(input_shape)

        operation = cls(node, graph)

        identifier = node.name.replace('.',
                                       '_').replace(':',
                                                    '_').replace('/', '_')

        operation.attributes['identifier'] = identifier

        operation.attributes['num_input_channels'] = num_input_channels
        operation.attributes['input_buffer'] = input_buffer
        operation.attributes['output_buffer'] = output_buffer
        operation.attributes['input_width'] = input_shape[2]
        operation.attributes['kernel_size'] = kernel_size
        operation.attributes['kernel_stride'] = kernel_stride
        operation.attributes['padding_needed'] = padding_needed
        operation.attributes['padding'] = padding

        return operation
コード例 #6
0
    def create(cls, node, graph, memory_manager):
        """
        Derive necessary information from ComputeNode, ComputeGraph and MemoryManager to generate the layer code.
        :param node: ComputeNode object of a CNN layer
        :param graph: ComputeGraph object of the CNN
        :param memory_manager: MemoryManager object containing information about input and output buffers.
        :return:
        """
        attrs = node.attrs

        if 'alpha' in node.attrs:
            if node.attrs['alpha'] != 1.0:
                return None
        if 'beta' in node.attrs:
            if node.attrs['beta'] != 1.0:
                return None
        if 'tranB' in node.attrs:
            if node.attrs['transB'] != 1:
                return None

        input_buffer = memory_manager.get_buffer(graph, node.inputs[0])
        weight_buffer = memory_manager.get_buffer(graph, node.inputs[1])
        bias_buffer = None
        if len(node.inputs) > 2:
            bias_buffer = memory_manager.get_buffer(graph, node.inputs[2])
        output_buffer = memory_manager.get_buffer(graph, node.outputs[0])

        input_size = reduce_mult(input_buffer.shape)
        output_size = reduce_mult(output_buffer.shape)

        operation = cls(node, graph)

        operation.attributes['input_buffer'] = input_buffer
        operation.attributes['input_size'] = input_size
        operation.attributes['weight_buffer'] = weight_buffer
        operation.attributes['bias_buffer'] = bias_buffer
        operation.attributes['output_buffer'] = output_buffer
        operation.attributes['output_size'] = output_size

        return operation
コード例 #7
0
    def create(cls, node, graph, memory_manager):
        attrs = node.attrs

        input_buffer = memory_manager.get_buffer(graph, node.inputs[0])
        weight_buffer = memory_manager.get_buffer(graph, node.inputs[1])
        output_buffer = memory_manager.get_buffer(graph, node.outputs[0])

        input_size = reduce_mult(input_buffer.shape)
        output_size = reduce_mult(output_buffer.shape)

        operation = cls(node, graph)

        identifier = node.name.replace('.',
                                       '_').replace(':',
                                                    '_').replace('/', '_')

        operation.attributes['name'] = node.name
        operation.attributes['identifier'] = identifier
        operation.attributes['input_buffer'] = input_buffer
        operation.attributes['weight_buffer'] = weight_buffer
        operation.attributes['output_buffer'] = output_buffer

        return operation
コード例 #8
0
    def get(cls, graph, id: str, name="", alignment=None, dt_string=None):
        """
        Return a Buffer object containing the information passed to this method.
        :param graph: ComputeGraph representing the CNN
        :param id: Unique identifier of the buffer
        :param name: Name of the buffer, can be omitted
        :param alignment: Alignment of the data type in memory. If omitted alignment=4 is assumed.
        :param dt_string: Field to support more data types in the future.
        :return: Buffer object containing the information passed to this method.
        """
        buffer_name = "buffer_"
        is_managed = True
        if graph.is_input(id):
            buffer_name = "input_"
            is_managed = False
        elif graph.is_output(id):
            buffer_name = "output_"
            is_managed = False
            
        if name != "":
            buffer_name = name
            is_managed = False
            
        buffer_name += id        
        shape = graph.get_shape(id)

        # TODO: Refactor this because buffer_depth is a terrible name that does not represent what it should.
        # This variable is about whether we have multiple channels or not
        if len(shape) == 1 or len(shape) == 2:
            buffer_depth = 1
        elif len(shape) == 4:
            buffer_depth = 2
        elif len(shape) == 3:
            buffer_depth = 2  # TODO: When do we actually need depth = 3???
        else:
            buffer_depth = 0

        # TODO infer data types as soon as we support more data types (e.g. fixed-point)
        dt = np.float
        dtsize = 4

        size = reduce_mult(shape)*dtsize
        if alignment is None:
            alignment = dtsize

        return cls(id, buffer_name, shape, size,
                   dt, dtsize, alignment, is_managed, dt_string, buffer_depth)