Ejemplo n.º 1
0
    def set_output_shape(self, source_node, IR_node):
        sym_group = self.model.symbol.get_internals()
        for sym in sym_group:
            if source_node.name == sym.name:
                arg_shape, output_shape, aux_shape = sym.infer_shape(
                    data=self.data_shape)
                for idx in range(len(output_shape)):
                    output_list = list(output_shape[idx])

                    # transpose to channel last
                    if not self.data_format in MXNetParser.channels_last:
                        channel = output_list.pop(1)
                        output_list.append(channel)

                    if IR_node.op == "DataInput":
                        MXNetParser._copy_shape(IR_node,
                                                [-1] + output_list[1:])

                    shape = graph_pb2.TensorShape()
                    for dim in output_list:
                        new_dim = shape.dim.add()
                        if dim == None:
                            new_dim.size = -1
                        else:
                            new_dim.size = dim

                    IR_node.attr["_output_shapes"].list.shape.extend([shape])
                break
Ejemplo n.º 2
0
    def _set_output_shape(source_node, IR_node):
        shape = graph_pb2.TensorShape()
        for dim in source_node.layer.output_shape:
            new_dim = shape.dim.add()
            new_dim.size = dim if dim else -1

        IR_node.attr["_output_shapes"].list.shape.extend([shape])
Ejemplo n.º 3
0
    def _copy_and_reop(source_node, IR_node, new_op=None):
        if new_op == None: new_op = source_node.type
        IR_node.name = source_node.name
        IR_node.op = new_op

        if '_output_shape' in source_node.layer['attr'].keys():
            output_list = source_node.layer['attr']['_output_shape']
            shape = graph_pb2.TensorShape()
            for dim in output_list:
                new_dim = shape.dim.add()
                if dim == None:
                    new_dim.size = -1
                else:
                    new_dim.size = int(dim)

            IR_node.attr["_output_shape"].list.shape.extend([shape])

        if 'shape' in source_node.layer['attr'].keys():
            shape_list = source_node.layer['attr']['shape']
            if not output_list == None:
                for dim in shape_list:
                    new_dim = IR_node.attr["shape"].shape.dim.add()
                    if dim == None:
                        new_dim.size = -1
                    else:
                        new_dim.size = int(dim)
            else:
                IR_node.attr["shape"].shape.unknown_rank = True
Ejemplo n.º 4
0
    def _set_output_shape(self, source_node, IR_node):

        shape = graph_pb2.TensorShape()

        layer_name = source_node.name

        shape_pytorch = self.shape_dict[layer_name]

        new_dim = shape.dim.add()

        # (batch, C, H, W)  & NHWC
        if len(shape_pytorch) == 4:

            if shape_pytorch[0] == 1:
                new_dim.size = -1
            else:
                new_dim.size = shape_pytorch[0]
            for index in [2, 3, 1]:
                new_dim = shape.dim.add()
                dim = shape_pytorch[index]
                new_dim.size = dim if dim else -1
        elif len(shape_pytorch) == 2:
            if shape_pytorch[0] == 1:
                new_dim.size = -1
            else:
                new_dim.size = shape_pytorch[0]
            for _ in range(2):
                new_dim = shape.dim.add()
                new_dim.size = 1
            new_dim = shape.dim.add()
            dim = shape_pytorch[1]
            new_dim.size = dim if dim else -1

        IR_node.attr["_output_shapes"].list.shape.extend([shape])
Ejemplo n.º 5
0
    def rename_InputLayer(self, source_node):
        # only for training
        IR_node = self.IR_graph.node.add()

        # name, op
        IR_node.name = source_node.name
        IR_node.op = "DataInput"
        graph_shape = graph_pb2.TensorShape()
        coreml_node_layer = source_node.layer

        new_dim = graph_shape.dim.add()
        new_dim.size = -1
        new_dim = graph_shape.dim.add()
        new_dim.size = coreml_node_layer.type.imageType.width
        new_dim = graph_shape.dim.add()
        new_dim.size = coreml_node_layer.type.imageType.height
        new_dim = graph_shape.dim.add()

        if coreml_node_layer.type.imageType.colorSpace == 10:
            new_dim.size = 2
        elif coreml_node_layer.type.imageType.colorSpace == 20:
            new_dim.size = 3
        elif coreml_node_layer.type.imageType.colorSpace == 30:
            new_dim.size = 3
        else:
            assert False
        IR_node.attr["_output_shapes"].list.shape.extend([graph_shape])



        # input edge
        self.convert_inedge(source_node, IR_node)




        # shape
        # NHWC channel last
        # in fact, here is NWHC
        new_dim = IR_node.attr['shape'].shape.dim.add()
        new_dim.size = -1
        new_dim = IR_node.attr['shape'].shape.dim.add()
        new_dim.size = coreml_node_layer.type.imageType.width
        new_dim = IR_node.attr['shape'].shape.dim.add()
        new_dim.size = coreml_node_layer.type.imageType.height
        new_dim = IR_node.attr['shape'].shape.dim.add()

        if coreml_node_layer.type.imageType.colorSpace == 10:
            new_dim.size = 2
        elif coreml_node_layer.type.imageType.colorSpace == 20:
            new_dim.size = 3
        elif coreml_node_layer.type.imageType.colorSpace == 30:
            new_dim.size = 3
        else:
            assert False
Ejemplo n.º 6
0
    def gen_Input(self):
        IR_node = self.IR_graph.node.add()
        IR_node.name = 'input'
        IR_node.op = "DataInput"

        for node in self.IR_graph.node:
            if node.name in self.src_graph.input_layers:
                node.input.append('input')

        assert len(self.input_shape) == 4
        new_dim = IR_node.attr["shape"].shape.dim.add()
        if self.input_shape[0] == 1:
            new_dim.size = -1
        else:
            new_dim.size = self.input_shape[0]
        for index in [2, 3, 1]:
            new_dim = IR_node.attr["shape"].shape.dim.add()
            new_dim.size = self.input_shape[index]

        shape = graph_pb2.TensorShape()
        new_dim = shape.dim.add()
        shape_pytorch = self.input_shape

        if len(shape_pytorch) == 4:

            if shape_pytorch[0] == 1:
                new_dim.size = -1
            else:
                new_dim.size = shape_pytorch[0]
            for index in [2, 3, 1]:
                new_dim = shape.dim.add()
                dim = shape_pytorch[index]
                new_dim.size = dim if dim else -1
        elif len(shape_pytorch) == 2:
            if shape_pytorch[0] == 1:
                new_dim.size = -1
            else:
                new_dim.size = shape_pytorch[0]
            for _ in range(2):
                new_dim = shape.dim.add()
                new_dim.size = 1
            new_dim = shape.dim.add()
            dim = shape_pytorch[1]
            new_dim.size = dim if dim else -1

        IR_node.attr["_output_shapes"].list.shape.extend([shape])
Ejemplo n.º 7
0
    def _set_output_shape(self, source_node, IR_node):

        shape = graph_pb2.TensorShape()

        layer_name = source_node.name

        shape_pytorch = self.shape_dict[layer_name]

        new_dim = shape.dim.add()

        if not shape_pytorch:
            print(
                "Warning: Pytorch cannot inference outputshape of \"{}\" with operator \"{}\". Setting outputshape manually in json file is alternative ."
                .format(source_node.name, source_node.type))
            IR_node.attr["_output_shapes"].list.shape.extend([shape])
            return

        # (batch, C, H, W)  & NHWC
        if len(shape_pytorch) == 4:

            if shape_pytorch[0] == 1:
                new_dim.size = -1
            else:
                new_dim.size = shape_pytorch[0]
            for index in [2, 3, 1]:
                new_dim = shape.dim.add()
                dim = shape_pytorch[index]
                new_dim.size = dim if dim else -1
        elif len(shape_pytorch) == 2:
            if shape_pytorch[0] == 1:
                new_dim.size = -1
            else:
                new_dim.size = shape_pytorch[0]
            for _ in range(2):
                new_dim = shape.dim.add()
                new_dim.size = 1
            new_dim = shape.dim.add()
            dim = shape_pytorch[1]
            new_dim.size = dim if dim else -1

        IR_node.attr["_output_shapes"].list.shape.extend([shape])
Ejemplo n.º 8
0
    def _set_output_shape(source_node, IR_node):

        shape = graph_pb2.TensorShape()
        source_node_layer = source_node.layer

        layer_name = source_node_layer.output[0]

        shape_coreml = CoremlParser.shape_dict[layer_name]
        # (seq, batch, C, H, W)  & NHWC

        new_dim = shape.dim.add()
        if shape_coreml[1] == 1:
            new_dim.size = -1
        else:
            new_dim.size = shape_coreml[1]
        for index in [3, 4, 2]:
            new_dim = shape.dim.add()
            dim = shape_coreml[index]
            new_dim.size = dim if dim else -1

        IR_node.attr["_output_shapes"].list.shape.extend([shape])
Ejemplo n.º 9
0
def list_to_shape(shape):
    ret = graph_pb2.TensorShape()
    for dim in shape:
        new_dim = ret.dim.add()
        new_dim.size = dim
    return ret
Ejemplo n.º 10
0
 def _set_output_shape(source_node, IR_node, output_shapes):
     shape = graph_pb2.TensorShape()
     for output_shape in output_shapes:
         new_dim = shape.dim.add()
         new_dim.size = output_shape
     IR_node.attr["_output_shapes"].list.shape.extend([shape])