Ejemplo n.º 1
0
 def map_crop(cls, node):
     offset = node.parameters.offset
     if offset:
         kwargs = {'offset': int(node.parameters.offset[0])}
         return Node.create('crop', **kwargs)
     else:
         return Node.create('crop')
Ejemplo n.º 2
0
 def map_crop(cls, node):
     offset = node.parameters.offset
     if offset:
         kwargs = {'offset': int(node.parameters.offset[0])}
         return Node.create('crop', **kwargs)
     else:
         return Node.create('crop')
Ejemplo n.º 3
0
    def map_scale(cls, node):
        # TODO: The gamma parameter has to be set (in node.data?) and this should work.
        # Also, mean should be set to 0, and var to 1, just to be safe.
        if node.data:
            scale_value = float(node.parameters.filler.value)
            if node.parameters.bias_term:
                bias_value = float(node.parameters.bias_filler.value)
                kwargs = {
                    'use_scale': True,
                    'use_bias': node.parameters.bias_term,
                    'gamma': scale_value,
                    'beta': bias_value,
                    'epsilon': 0
                }
            else:
                kwargs = {
                    'use_scale': True,
                    'use_bias': node.parameters.bias_term,
                    'gamma': scale_value,
                    'epsilon': 0
                }

            cls._convert_output_shape(kwargs, node)
            return Node.create('Affine', **kwargs)
        else:
            return Node.create('Mul')
Ejemplo n.º 4
0
 def map_scale(cls, node):
     # TODO: The gamma parameter has to be set (in node.data?) and this should work.
     # Also, mean should be set to 0, and var to 1, just to be safe.
     if node.data:
         raise NotImplementedError
         scale_value = float(node.parameters.filler.value)
         kwargs = {
             'scale': False,
             'bias': False,
             'gamma': scale_value,
             'epsilon': 0
         }
         cls._convert_output_shape(kwargs, node)
         return Node.create('Scale', **kwargs)
     else:
         return Node.create('Mul')
Ejemplo n.º 5
0
 def map_eltwise(cls, node):
     operations = {0: 'Mul', 1: 'Add', 2: 'Max'}
     op_code = node.parameters.operation
     try:
         return Node.create(operations[op_code])
     except KeyError:
         raise ConversionError('Unknown elementwise operation: {}'.format(op_code))
Ejemplo n.º 6
0
 def map_batch_norm(cls, node):
     scale_offset = len(node.data) == 4
     kwargs = {} if scale_offset else {'scale': False, 'bias': False}
     epsilon = node.parameters.eps
     kwargs['epsilon'] = epsilon
     cls._convert_output_shape(kwargs, node)
     return Node.create('BatchNorm', **kwargs)
Ejemplo n.º 7
0
    def map_inner_product(cls, node):
        #TODO: Axis
        assert node.parameters.axis == 1
        #TODO: Unbiased
        shape = TensorShape()
        dim = shape.dim.add()
        dim.size = -1
        dim = shape.dim.add()
        dim.size = 1
        for i in node.output_shape[1:]:
            dim.size *= i
        kwargs = {
            'use_bias': node.parameters.bias_term,
            'units': node.parameters.num_output,
            '_output_shapes': [shape]
        }

        # check if need the Flatten layer
        parent, _ = node.get_only_parent()
        ret = []

        # if parent.output_shape.height > 1 or parent.output_shape.width > 1:
        ret.append(cls._add_flatten_layer(parent))
        ret.append(Node.create('FullyConnected', **kwargs))
        return ret
Ejemplo n.º 8
0
 def map_scale(cls, node):
     raise NotImplementedError
     # TODO: The gamma parameter has to be set (in node.data?) and this should work.
     # Also, mean should be set to 0, and var to 1, just to be safe.
     scale_value = float(node.parameters.filler.value)
     kwargs = {'scale' : True, 'bias' : False, 'gamma' : scale_value, 'epsilon': 0}
     return Node.create('BatchNorm', **kwargs)
Ejemplo n.º 9
0
 def map_convolution(cls, node):
     parent, _ = node.get_only_parent()
     kwargs = cls.get_kernel_params(node, parent.output_shape)
     kwargs['kernel_shape'] = [node.kernel_parameters.k_h, node.kernel_parameters.k_w, parent.output_shape.channels, node.parameters.num_output]
     kwargs['use_bias'] = node.parameters.bias_term
     kwargs['group'] = node.parameters.group
     return Node.create('Conv', **kwargs)
Ejemplo n.º 10
0
 def map_convolution(cls, node):
     parent, _ = node.get_only_parent()
     kwargs = cls.get_kernel_params(node, parent.output_shape)
     kwargs['kernel_shape'] = [node.kernel_parameters.k_h, node.kernel_parameters.k_w, parent.output_shape.channels, node.parameters.num_output]
     kwargs['use_bias'] = node.parameters.bias_term
     kwargs['group'] = node.parameters.group
     return Node.create('Conv', **kwargs)
Ejemplo n.º 11
0
 def map_eltwise(cls, node):
     operations = {0: 'Mul', 1: 'Add', 2: 'Max'}
     op_code = node.parameters.operation
     try:
         return Node.create(operations[op_code])
     except KeyError:
         raise ConversionError('Unknown elementwise operation: {}'.format(op_code))
Ejemplo n.º 12
0
 def map_scale(cls, node):
     raise NotImplementedError
     # TODO: The gamma parameter has to be set (in node.data?) and this should work.
     # Also, mean should be set to 0, and var to 1, just to be safe.
     scale_value = float(node.parameters.filler.value)
     kwargs = {'scale' : True, 'bias' : False, 'gamma' : scale_value, 'epsilon': 0}
     return Node.create('BatchNorm', **kwargs)
Ejemplo n.º 13
0
 def map_p_re_lu(cls, node):
     # print(node.parameters)
     # assert False
     kwargs = {}
     # kwargs['gamma'] = 0.25
     cls._convert_output_shape(kwargs, node)
     return Node.create('PRelu', **kwargs)
Ejemplo n.º 14
0
    def map_deconvolution(cls, node):
        raise NotImplementedError()
        parent, _ = node.get_only_parent()
        kwargs = cls.get_kernel_params(node, parent.output_shape)

        kwargs['kernel_shape'] = [node.kernel_parameters.k_h, node.kernel_parameters.k_w, parent.output_shape.channels, node.parameters.num_output]
        kwargs['group'] = node.parameters.group
        return Node.create('deconv', **kwargs)
Ejemplo n.º 15
0
    def map_deconvolution(cls, node):
        raise NotImplementedError()
        parent, _ = node.get_only_parent()
        kwargs = cls.get_kernel_params(node, parent.output_shape)

        kwargs['kernel_shape'] = [node.kernel_parameters.k_h, node.kernel_parameters.k_w, parent.output_shape.channels, node.parameters.num_output]
        kwargs['group'] = node.parameters.group
        return Node.create('deconv', **kwargs)
Ejemplo n.º 16
0
 def map_eltwise(cls, node):
     operations = {0: 'Mul', 1: 'Add', 2: 'Max'}
     op_code = node.parameters.operation
     try:
         kwargs = {}
         cls._convert_output_shape(kwargs, node)
         return Node.create(operations[op_code], **kwargs)
     except KeyError:
         raise ConversionError(
             'Unknown elementwise operation: {}'.format(op_code))
Ejemplo n.º 17
0
    def map_convolution(cls, node):
        kwargs, padding = cls.get_kernel_params(node)
        parent, _ = node.get_only_parent()
        kwargs['filter'] = [
            node.kernel_parameters.k_h, node.kernel_parameters.k_w,
            parent.output_shape.channels, node.parameters.num_output
        ]
        kwargs['use_bias'] = node.parameters.bias_term
        group = node.parameters.group
        if group != 1:
            kwargs['group'] = group

        if padding['paddings'] != None:
            return [
                Node.create('Pad', **padding),
                Node.create('Convolution', **kwargs)
            ]
        else:
            return Node.create('Convolution', **kwargs)
Ejemplo n.º 18
0
 def map_p_re_lu(cls, node):
     # print(node.parameters)
     # assert False
     try:
         scale_value = float(node.parameters.filler.value)
         kwargs = {'gamma': scale_value}
     except ConversionError:
         kwargs = {'gamma': 0.25}
     cls._convert_output_shape(kwargs, node)
     return Node.create('PRelu', **kwargs)
Ejemplo n.º 19
0
 def map_lrn(cls, node):
     params = node.parameters
     assert params.local_size % 2 == 1
     kwargs = {
         'size': int((params.local_size + 1) / 2),
         'alpha': params.alpha,
         'beta': params.beta,
         'k': params.k
     }
     cls._convert_output_shape(kwargs, node)
     return Node.create('LRN', **kwargs)
Ejemplo n.º 20
0
    def _add_flatten_layer(cls, node):
        shape = TensorShape()
        dim = shape.dim.add()
        dim.size = -1

        dim = shape.dim.add()
        dim.size = 1
        for i in node.output_shape[1:]:
            dim.size *= i
        kwargs = {'_output_shapes': [shape]}
        return Node.create('Flatten', **kwargs)
Ejemplo n.º 21
0
    def _add_flatten_layer(cls, node):
        shape = TensorShape()
        dim = shape.dim.add()
        dim.size = -1

        dim = shape.dim.add()
        dim.size = 1
        for i in node.output_shape[1:]:
            dim.size *= i
        kwargs = {'_output_shapes' : [shape]}
        return Node.create('Flatten', **kwargs)
Ejemplo n.º 22
0
    def map_crop(cls, node):
        kwargs = {}
        cls._convert_output_shape(kwargs, node)
        offset = node.parameters.offset
        if offset:
            if len(offset) == 1:
                kwargs['border'] = [offset[0], 0, offset[0], 0]
            else:
                kwargs['border'] = [offset[0], 0, offset[1], 0]

        return Node.create('Crop', **kwargs)
Ejemplo n.º 23
0
 def map_pooling(cls, node):
     parent, _ = node.get_only_parent()
     kwargs = cls.get_kernel_params(node, parent.output_shape)
     if node.parameters.pool == 0:
         kwargs['pooling_type'] = 'MAX'
     elif node.parameters.pool == 1:
         kwargs['pooling_type'] = 'AVG'
     else:
         # Stochastic pooling, for instance.
         raise ConversionError('Unsupported pooling type.')
     cls._convert_output_shape(kwargs, node)
     return Node.create('Pool', **kwargs)
Ejemplo n.º 24
0
    def map_deconvolution(cls, node):
        parent, _ = node.get_only_parent()
        kwargs = cls.get_kernel_params(node, parent.output_shape)

        kwargs['kernel_shape'] = [node.kernel_parameters.k_h, node.kernel_parameters.k_w, node.parameters.num_output, parent.output_shape.channels]
        kwargs['use_bias'] = node.parameters.bias_term
        if node.parameters.dilation:
            dilation = node.parameters.dilation[0]
            if dilation != 1:
                kwargs['dilations'] = [1, dilation, dilation, 1]
        kwargs['group'] = node.parameters.group
        return Node.create('ConvTranspose', **kwargs)
Ejemplo n.º 25
0
    def map_deconvolution(cls, node):
        parent, _ = node.get_only_parent()
        kwargs = cls.get_kernel_params(node, parent.output_shape)

        kwargs['kernel_shape'] = [node.kernel_parameters.k_h, node.kernel_parameters.k_w, node.parameters.num_output, parent.output_shape.channels]
        kwargs['use_bias'] = node.parameters.bias_term
        if node.parameters.dilation:
            dilation = node.parameters.dilation[0]
            if dilation != 1:
                kwargs['dilations'] = [1, dilation, dilation, 1]
        kwargs['group'] = node.parameters.group
        return Node.create('ConvTranspose', **kwargs)
Ejemplo n.º 26
0
 def map_pooling(cls, node):
     parent, _ = node.get_only_parent()
     kwargs = cls.get_kernel_params(node, parent.output_shape)
     if node.parameters.pool == 0:
         kwargs['pooling_type'] = 'MAX'
     elif node.parameters.pool == 1:
         kwargs['pooling_type'] = 'AVG'
     else:
         # Stochastic pooling, for instance.
         raise ConversionError('Unsupported pooling type.')
     cls._convert_output_shape(kwargs, node)
     return Node.create('Pool', **kwargs)
Ejemplo n.º 27
0
    def map_pooling(cls, node):
        kwargs, padding = cls.get_kernel_params(node)
        if node.parameters.pool == 0:
            kwargs['pooling_type'] = 'MAX'
        elif node.parameters.pool == 1:
            kwargs['pooling_type'] = 'AVG'
        else:
            # Stochastic pooling, for instance.
            raise ConversionError('Unsupported pooling type.')
        kwargs['window_shape'] = [
            1, node.kernel_parameters.k_h, node.kernel_parameters.k_w, 1
        ]
        cls._convert_output_shape(kwargs, node)

        if padding['paddings'] != None:
            return [
                Node.create('Pad', **padding),
                Node.create('Pool', **kwargs)
            ]
        else:
            return Node.create('Pool', **kwargs)
Ejemplo n.º 28
0
 def map_unpooling(cls, node):
     kwargs = {}
     kwargs['kernel_shape'] = [
         1, node.kernel_parameters.k_h, node.kernel_parameters.k_w, 1
     ]
     kwargs['pads'] = [
         0, node.kernel_parameters.p_h, node.kernel_parameters.p_w, 0
     ]
     kwargs['strides'] = [
         1, node.kernel_parameters.s_h, node.kernel_parameters.s_w, 1
     ]
     cls._convert_output_shape(kwargs, node)
     return Node.create('Unpool', **kwargs)
Ejemplo n.º 29
0
    def map_inner_product(cls, node):
        #TODO: Axis
        assert node.parameters.axis == 1
        #TODO: Unbiased
        kwargs = {'use_bias' : node.parameters.bias_term, 'units' : node.parameters.num_output}

        # check if need the Flatten layer
        parent, _ = node.get_only_parent()
        ret = []

        # if parent.output_shape.height > 1 or parent.output_shape.width > 1:
        ret.append(cls._add_flatten_layer(parent))
        ret.append(Node.create('FullyConnected', **kwargs))
        return ret
Ejemplo n.º 30
0
    def map_inner_product(cls, node):
        #TODO: Axis
        assert node.parameters.axis == 1
        #TODO: Unbiased
        kwargs = {'use_bias' : node.parameters.bias_term, 'units' : node.parameters.num_output}

        # check if need the Flatten layer
        parent, _ = node.get_only_parent()
        ret = []

        # if parent.output_shape.height > 1 or parent.output_shape.width > 1:
        ret.append(cls._add_flatten_layer(parent))
        ret.append(Node.create('FullyConnected', **kwargs))
        return ret
Ejemplo n.º 31
0
    def map_data(cls, node):
        # TODO: We need to identify whether this is 4D image data, otherwise we shouldn't change the dimension order
        shape = TensorShape()
        dim = shape.dim.add()
        dim.size = -1
        for i in node.output_shape[2:]:
            dim = shape.dim.add()
            dim.size = i
        dim = shape.dim.add()
        dim.size = node.output_shape.channels

        kwargs = {'shape': shape}  # Ignore the dimension of batch size
        cls._convert_output_shape(kwargs, node)
        return Node.create('DataInput', **kwargs)
Ejemplo n.º 32
0
    def map_data(cls, node):
        # TODO: We need to identify whether this is 4D image data, otherwise we shouldn't change the dimension order
        shape = TensorShape()
        dim = shape.dim.add()
        dim.size = -1
        for i in node.output_shape[2:]:
            dim = shape.dim.add()
            dim.size = i
        dim = shape.dim.add()
        dim.size = node.output_shape.channels

        kwargs = {'shape': shape} # Ignore the dimension of batch size
        cls._convert_output_shape(kwargs, node)
        return Node.create('DataInput', **kwargs)
Ejemplo n.º 33
0
 def map_relu(cls, node):
     kwargs = {}
     cls._convert_output_shape(kwargs, node)
     return Node.create('Relu', **kwargs)
Ejemplo n.º 34
0
 def map_reshape(cls, node):
     kwargs = {'shape' : [dim for dim in node.output_shape]}
     cls._convert_output_shape(kwargs, node)
     return Node.create('Reshape', **kwargs)
Ejemplo n.º 35
0
 def map_flatten(cls, node):
     return Node.create('Flatten')
Ejemplo n.º 36
0
 def map_relu(cls, node):
     kwargs = {}
     cls._convert_output_shape(kwargs, node)
     return Node.create('Relu', **kwargs)
Ejemplo n.º 37
0
 def map_dropout(cls, node):
     kwargs = {'keep_prob': node.parameters.dropout_ratio}
     cls._convert_output_shape(kwargs, node)
     return Node.create('Dropout', **kwargs)
Ejemplo n.º 38
0
 def map_abs_val(cls, node):
     return Node.create('Abs')
Ejemplo n.º 39
0
 def map_sigmoid(cls, node):
     return Node.create('Sigmoid')
Ejemplo n.º 40
0
 def map_abs_val(cls, node):
     return Node.create('Abs')
Ejemplo n.º 41
0
 def map_sigmoid(cls, node):
     return Node.create('Sigmoid')
Ejemplo n.º 42
0
 def map_tanh(cls, node):
     return Node.create('Tanh')
Ejemplo n.º 43
0
 def map_softmax(cls, node):
     kwargs = {}
     cls._convert_output_shape(kwargs, node)
     return Node.create('Softmax', **kwargs)
Ejemplo n.º 44
0
 def map_softmax(cls, node):
     kwargs = {}
     cls._convert_output_shape(kwargs, node)
     return Node.create('Softmax', **kwargs)
Ejemplo n.º 45
0
 def map_concat(cls, node):
     kwargs = {'axis': (2, 3, 1, 0)[node.parameters.axis]}
     cls._convert_output_shape(kwargs, node)
     return Node.create('Concat', **kwargs)
Ejemplo n.º 46
0
 def map_lrn(cls, node):
     params = node.parameters
     assert params.local_size % 2 == 1
     kwargs = {'size': int((params.local_size + 1) / 2), 'alpha': params.alpha, 'beta': params.beta, 'k' : params.k}
     cls._convert_output_shape(kwargs, node)
     return Node.create('LRN', **kwargs)
Ejemplo n.º 47
0
 def map_batch_norm(cls, node):
     kwargs = {'scale': len(node.data) >= 3, 'bias': len(node.data) == 4}
     epsilon = node.parameters.eps
     kwargs['epsilon'] = epsilon
     cls._convert_output_shape(kwargs, node)
     return Node.create('BatchNorm', **kwargs)
Ejemplo n.º 48
0
 def map_concat(cls, node):
     kwargs = {'axis': (2, 3, 1, 0)[node.parameters.axis]}
     cls._convert_output_shape(kwargs, node)
     return Node.create('Concat', **kwargs)
Ejemplo n.º 49
0
 def map_tanh(cls, node):
     return Node.create('Tanh')
Ejemplo n.º 50
0
 def map_dropout(cls, node):
     kwargs = {'keep_prob': node.parameters.dropout_ratio}
     cls._convert_output_shape(kwargs, node)
     return Node.create('Dropout', **kwargs)
Ejemplo n.º 51
0
 def map_batch_norm(cls, node):
     kwargs = {'scale' : len(node.data) >= 3, 'bias' : len(node.data) == 4}
     epsilon = node.parameters.eps
     kwargs['epsilon'] = epsilon
     cls._convert_output_shape(kwargs, node)
     return Node.create('BatchNorm', **kwargs)
Ejemplo n.º 52
0
 def map_softmax(cls, node):
     return Node.create('Softmax')