예제 #1
0
 def test_pooling_ext(self):
     params = {
         'kernel_size': 1,
         'stride': 2,
         'pad': 3,
         'pool': 1,
         'global_pooling': False,
         'ceil_mode': 0
     }
     node = PB({'pb': FakeProtoLayer(FakeMultiParam(params))})
     PoolingFrontExtractor.extract(node)
     res = node
     exp_res = {
         'window': np.array([1, 1, 1, 1], dtype=np.int64),
         'stride': np.array([1, 1, 2, 2], dtype=np.int64),
         'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]], dtype=np.int64),
         'pad_spatial_shape': np.array([[3, 3], [3, 3]], dtype=np.int64),
         'pool_method': 'avg',
         'exclude_pad': False,
         'infer': Pooling.infer,
         'global_pool': False,
         'output_spatial_shape': None,
         'pooling_convention': 'valid'
     }
     exp_res.update(layout_attrs())
     for i in exp_res.keys():
         if i in ('window', 'stride', 'pad', 'pad_spatial_shape',
                  'spatial_dims', 'batch_dims', 'channel_dims'):
             np.testing.assert_array_equal(res[i], exp_res[i])
         else:
             self.assertEqual(res[i], exp_res[i])
예제 #2
0
    def extract(cls, node):
        proto_layer, model_layer = node.pb, node.model_pb

        if not proto_layer:
            raise Error('Protobuf layer can not be empty')

        conv_param = proto_layer.convolution_param
        conv_type = 'ConvND' if len(proto_layer.bottom) > 1 else 'Conv2D'

        params = conv_set_params(conv_param, conv_type)
        attrs = conv_create_attrs(params)
        attrs.update({
            'op': __class__.op,
            'get_group': lambda node: node.group,
            'get_output_feature_dim': lambda node: node.output,
            'weights_index': 1 if conv_type == 'Conv2D' else 2
        })

        # Embed weights and biases as attributes
        # It will be moved to a separate nodes in special pass
        attrs.update(
            weights_biases(conv_param.bias_term,
                           model_layer,
                           start_index=len(proto_layer.bottom),
                           proto=conv_param))
        attrs.update(layout_attrs())

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
예제 #3
0
    def test_reverse_infer(self):
        graph = build_graph(nodes_attributes,
                            [
                             ('node_1', 'node_1_data'),
                             ('node_1_data', 'reverse'),
                             ('node_2', 'node_2_data'),
                             ('node_2_data', 'reverse'),
                             ('reverse', 'node_3'),
                             ('node_3', 'op_output')
                             ],
                            {'node_3': {'shape': None, 'value': None},
                             'node_1_data': {'shape': np.array([1, 4])},
                             'reverse': {'stride': 2,
                                       **layout_attrs()}
                             })

        reverse_node = Node(graph, 'reverse')
        Reverse.infer(reverse_node)
        exp_shape = np.array([1, 4])
        exp_value = np.array([[227, 227, 3, 1]])
        res_shape = graph.node['node_3']['shape']
        res_value = graph.node['node_3']['value']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
        for i in range(0, len(exp_value[0])):
            self.assertEqual(exp_value[0][i], res_value[0][i])
예제 #4
0
    def extract(cls, node):
        mapping_rule = collect_attributes(node.pb.shuffle_channel_param)
        mapping_rule.update(layout_attrs())

        # update the attributes of the node
        ShuffleChannels.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #5
0
    def extract(cls, node):
        proto_layer = node.pb
        param = proto_layer.prior_box_param

        variance = param.variance
        if len(variance) == 0:
            variance = [0.1]

        update_attrs = {
            'width': list(param.width),
            'height': list(param.height),
            'flip': int(param.flip),
            'clip': int(param.clip),
            'variance': list(variance),
            'img_size': param.img_size,
            'img_h': param.img_h,
            'img_w': param.img_w,
            'step': param.step,
            'step_h': param.step_h,
            'step_w': param.step_w,
            'offset': param.offset,
        }

        mapping_rule = merge_attrs(param, update_attrs)

        mapping_rule.update(layout_attrs())

        # update the attributes of the node
        PriorBoxClusteredOp.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #6
0
    def extract(cls, node):
        proto_layer = node.pb
        param = proto_layer.eltwise_param

        input_len = len(node.in_edges())

        eltwise_caffe_map = {
            0: EltwiseNMul if input_len > 2 else Mul,
            1: EltwiseNAdd if input_len > 2 else Add,
            2: EltwiseNMax if input_len > 2 else Maximum,
        }

        operation = int(param.operation)
        if operation not in eltwise_caffe_map:
            raise Exception(
                'Unsupported type of operation in Eltwise layer: ' + node.name)

        lin_op_class = eltwise_caffe_map[operation]

        mapping_rule = merge_attrs(param, {'coeff': np.array(param.coeff)})
        mapping_rule.update(layout_attrs())

        assert len(param.coeff) <= input_len

        lin_op_class.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #7
0
 def test_region_infer_dynamic_flatten(self):
     graph = build_graph(
         nodes_attributes, [('node_1', 'region'), ('region', 'node_3'),
                            ('node_3', 'op_output')],
         {
             'node_3': {
                 'shape': None,
                 'value': None
             },
             'node_1': {
                 'shape': shape_array(
                     [1, dynamic_dimension_value, 227, 227])
             },
             'region': {
                 'end_axis': 1,
                 'axis': 0,
                 'do_softmax': 1,
                 **layout_attrs()
             }
         })
     graph.graph['layout'] = 'NCHW'
     reorg_node = Node(graph, 'region')
     RegionYoloOp.regionyolo_infer(reorg_node)
     exp_shape = shape_array([dynamic_dimension_value, 227, 227])
     res_shape = graph.node['node_3']['shape']
     self.assertTrue(strict_compare_tensors(exp_shape, res_shape))
예제 #8
0
    def extract(cls, node):
        proto_layer = node.pb
        param = proto_layer.region_yolo_param
        flatten_param = proto_layer.flatten_param
        axis = flatten_param.axis
        end_axis = flatten_param.end_axis
        coords = param.coords
        classes = param.classes
        num = param.num
        update_attrs = {
            'coords': coords,
            'classes': classes,
            'num': num,
            'do_softmax': int(param.do_softmax),
            'anchors': np.array(param.anchors),
            'mask': np.array(param.mask)
        }

        flatten_attrs = {
            'axis': axis,
            'end_axis': end_axis
        }

        mapping_rule = merge_attrs(param, update_attrs)

        mapping_rule.update(flatten_attrs)
        mapping_rule.update(layout_attrs())

        # update the attributes of the node
        RegionYoloOp.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #9
0
    def test_region_infer_do_softmax(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'region'), ('region', 'node_3'),
                               ('node_3', 'op_output')], {
                                   'node_3': {
                                       'shape': None,
                                       'value': None
                                   },
                                   'node_1': {
                                       'shape': np.array([1, 227, 227, 3])
                                   },
                                   'region': {
                                       'do_softmax': 0,
                                       'end_axis': -1,
                                       'axis': 1,
                                       'classes': 80,
                                       'coords': 4,
                                       'mask': np.array([6, 7, 8]),
                                       **layout_attrs()
                                   }
                               })

        graph.graph['layout'] = 'NHWC'
        reorg_node = Node(graph, 'region')
        RegionYoloOp.regionyolo_infer(reorg_node)
        exp_shape = np.array([1, 227, 227, (80 + 4 + 1) * 3])
        res_shape = graph.node['node_3']['shape']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
예제 #10
0
 def test_region_infer_flatten(self):
     graph = build_graph(
         nodes_attributes, [('node_1', 'region'), ('region', 'node_3'),
                            ('node_3', 'op_output')], {
                                'node_3': {
                                    'shape': None,
                                    'value': None
                                },
                                'node_1': {
                                    'shape': np.array([1, 3, 227, 227])
                                },
                                'region': {
                                    'end_axis': 1,
                                    'axis': 0,
                                    'do_softmax': 1,
                                    **layout_attrs()
                                }
                            })
     graph.graph['layout'] = 'NCHW'
     reorg_node = Node(graph, 'region')
     RegionYoloOp.regionyolo_infer(reorg_node)
     exp_shape = np.array([1 * 3, 227, 227])
     res_shape = graph.node['node_3']['shape']
     for i in range(0, len(exp_shape)):
         self.assertEqual(exp_shape[i], res_shape[i])
예제 #11
0
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<PoolSize>')
        kernel = read_binary_integer32_token(pb)
        tag = find_next_tag(pb)
        if tag == '<PoolStep>':
            read_placeholder(pb, 1)
            stride = read_binary_integer32_token(pb)
            pool_step = stride
            pool_stride = read_token_value(pb, b'<PoolStride>')
        elif tag == '<PoolStride>':
            stride = 1
            pool_step = None
            read_placeholder(pb, 1)
            pool_stride = read_binary_integer32_token(pb)
        else:
            raise Error('Can not extract parameters for {}'.format(node))

        mapping_rule = {
            'window': np.array([1, 1, 1, kernel], dtype=np.int64),
            'stride': np.array([1, 1, 1, stride], dtype=np.int64),
            'pool_stride': pool_stride,
            'pool_step': pool_step,
            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
            'pool_method': 'max',
        }
        mapping_rule.update(layout_attrs())
        Pooling.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #12
0
    def extract(cls, node):
        proto_layer, model_layer = node.pb, node.model_pb

        if not proto_layer:
            raise Error('Protobuf layer can not be empty')

        deconv_param = proto_layer.convolution_param

        params = conv_set_params(deconv_param, 'Deconv2D')
        attrs = conv_create_attrs(params)
        attrs.update({
            'type': 'Deconvolution',
            'op': 'Deconv2D',
            'get_group': lambda node: node.group,
            'get_output_feature_dim': lambda node: node.output,
            'input_feature_channel': 0,
            'output_feature_channel': 1,
        })

        # Embed weights and biases as attributes
        # It will be moved to a separate nodes in special pass
        attrs.update(weights_biases(deconv_param.bias_term, model_layer))
        attrs.update(layout_attrs())

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
예제 #13
0
def scale_shift_ext(attrs):
    node_attrs = {
        'type': 'ScaleShift',
        'fix_gamma': attrs.bool("fix_gamma", True),
        'infer': batch_norm_4_infer
    }
    node_attrs.update(layout_attrs())
    return node_attrs
예제 #14
0
def batch_norm_ext(attrs):
    node_attrs = {
        'type': 'BatchNormalization',
        'eps': attrs.float('eps', 0.001),
        'infer': batch_norm_4_infer,
        'fix_gamma': attrs.bool('fix_gamma', False)
    }
    node_attrs.update(layout_attrs())
    return node_attrs
예제 #15
0
def batch_norm_ext(attrs):
    node_attrs = {
        'type': 'BatchNormalization',
        'eps': attrs.float('eps', 0.001),
        'infer': batch_norm_4_infer,
        'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]),
        'fix_gamma': attrs.bool('fix_gamma', False)
    }
    node_attrs.update(layout_attrs())
    return node_attrs
예제 #16
0
    def extract(cls, node: Node) -> bool:
        """
        Extract conv parameters from node.parameters.
        node.parameters like file descriptor object.
        :param node: Convolution node
        :return:
        """
        pb = node.parameters
        kernel = read_token_value(pb, b'<PatchDim>')
        stride = read_token_value(pb, b'<PatchStep>')
        patch_stride = read_token_value(pb, b'<PatchStride>')

        read_learning_info(pb)

        collect_until_whitespace(pb)
        weights, weights_shape = read_binary_matrix(pb)

        collect_until_whitespace(pb)
        biases = read_binary_vector(pb)

        if (patch_stride - kernel) % stride != 0:
            raise Error(
                'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. ' +
                refer_to_faq_msg(93))

        output = biases.shape[0]
        if weights_shape[0] != output:
            raise Error('Weights shape does not correspond to the `output` attribute of Convolution layer. ' +
                        refer_to_faq_msg(93))

        mapping_rule = {
            'output': output,
            'patch_stride': patch_stride,
            'bias_term': None,
            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
            'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
            'kernel': np.array([1, 1, 1, kernel], dtype=np.int64),
            'stride': np.array([1, 1, 1, stride], dtype=np.int64),
            'kernel_spatial': np.array([1, kernel], dtype=np.int64),
            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': [2, 3],
            'group': 1,
            'reshape_kernel': True,
        }

        mapping_rule.update(layout_attrs())
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        mapping_rule['bias_addable'] = len(biases) > 0

        Convolution.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #17
0
    def extract(cls, node):
        proto_layer = node.pb
        param = proto_layer.pooling_param

        method = 'max'
        exclude_pad = True
        kernel = [0, 0]
        stride = [1, 1]
        padding = [0, 0]
        global_pooling = False

        if hasattr(param, 'global_pooling') and param.global_pooling:
            global_pooling = param.global_pooling
        else:
            kernel = get_spatial_attr(kernel, 'kernel_size', 'kernel', param)
            padding = get_spatial_attr(padding, 'pad', 'pad', param)
            stride = get_spatial_attr(stride, 'stride', 'stride', param)

        if param.pool == 0:
            method = 'max'
            exclude_pad = True
        elif param.pool == 1:
            method = 'avg'
            exclude_pad = False
        else:
            raise ValueError('Unknown Pooling Method!')

        pooling_convention = 'full'  # for Caffe rounding type should be ceil
        rt = 'ceil'

        if hasattr(param, 'ceil_mode') and not param.ceil_mode:
            # If pooling has ceil_mode and ceil_mode is False using floor for rounding shapes in partial_infer
            pooling_convention = 'valid'
            rt = 'floor'

        attrs = {
            'window': int64_array([1, 1, kernel[1], kernel[0]]),
            'stride': int64_array([1, 1, stride[1], stride[0]]),
            'pad': int64_array([[0, 0], [0, 0], [padding[1], padding[1]], [padding[0], padding[0]]]),
            'pad_spatial_shape': int64_array([[padding[1], padding[1]], [padding[0], padding[0]]]),
            'pool_method': method,
            'exclude_pad': exclude_pad,
            'global_pool': global_pooling,
            'output_spatial_shape': None,
            'rounding_type': rt
        }

        attrs.update(layout_attrs())
        attrs['pooling_convention'] = pooling_convention

        # update the attributes of the node
        Pooling.update_node_stat(node, attrs)
        return cls.enabled
예제 #18
0
    def extract(cls, node):
        proto_layer = node.pb
        param = proto_layer.ctc_decoder_param

        update_attrs = {
            'ctc_merge_repeated': (int)(param.ctc_merge_repeated)
        }

        mapping_rule = merge_attrs(param, update_attrs)

        mapping_rule.update(layout_attrs())

        # update the attributes of the node
        CTCGreedyDecoderOp.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #19
0
    def extract(cls, node):
        proto_layer = node.pb
        param = proto_layer.reorg_yolo_param

        stride = param.stride
        update_attrs = {
            'stride': stride,
        }
        mapping_rule = merge_attrs(param, update_attrs)

        mapping_rule.update(layout_attrs())

        # update the attributes of the node
        ReorgYoloOp.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #20
0
    def extract(cls, node):
        proto_layer = node.pb
        param = proto_layer.psroi_pooling_param

        update_attrs = {
            'spatial_scale': param.spatial_scale,
            'output_dim': param.output_dim,
            'group_size': param.group_size,
        }

        mapping_rule = merge_attrs(param, update_attrs)

        mapping_rule.update(layout_attrs())

        # update the attributes of the node
        PSROIPoolingOp.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #21
0
    def extract(cls, node):
        attrs = get_mxnet_layer_attrs(node.symbol_dict)

        spatial_scale = attrs.float("spatial_scale", None)
        pooled_size = attrs.tuple("pooled_size", int, (0, 0))
        data = {
            'type': 'ROIPooling',
            'spatial_scale': spatial_scale,
            'pooled_w': pooled_size[1],
            'pooled_h': pooled_size[0]
        }

        data.update(layout_attrs())

        # update the attributes of the node
        ROIPooling.update_node_stat(node, data)
        return cls.enabled
예제 #22
0
    def test_proposal_infer_one_output(self):
        graph = build_graph(
            nodes_attributes, [('proposal_input', 'proposal'),
                               ('proposal', 'proposal_out_data_1'),
                               ('proposal_out_data_1', 'op_output')], {
                                   'proposal_input': {
                                       'shape': int64_array([1, 3, 227, 227])
                                   },
                                   'proposal': {
                                       'post_nms_topn': 2,
                                       **layout_attrs()
                                   }
                               })

        proposal_node = Node(graph, 'proposal')
        ProposalOp.proposal_infer(proposal_node)

        self.assertListEqual([1 * 2, 5],
                             list(graph.node['proposal_out_data_1']['shape']))
예제 #23
0
    def extract(cls, node):
        proto_layer = node.pb
        param = proto_layer.prior_box_param

        variance = param.variance
        if len(variance) == 0:
            variance = [0.1]

        update_attrs = {
            'aspect_ratio': np.array(param.aspect_ratio),
            'min_size': np.array(param.min_size),
            'max_size': np.array(param.max_size),
            'flip': int(param.flip),
            'clip': int(param.clip),
            'variance': list(variance),
            'img_size': param.img_size,
            'img_h': param.img_h,
            'img_w': param.img_w,
            'step': param.step,
            'step_h': param.step_h,
            'step_w': param.step_w,
            'offset': param.offset,
        }

        # these params can be omitted in caffe.proto and in param as consequence,
        # so check if it is set or set to default
        fields = [field[0].name for field in param.ListFields()]
        if 'density' in fields:
            update_attrs['density'] = np.array(param.density)
        if 'fixed_size' in fields:
            update_attrs['fixed_size'] = np.array(param.fixed_size)
        if 'fixed_ratio' in fields:
            update_attrs['fixed_ratio'] = np.array(param.fixed_ratio)

        mapping_rule = merge_attrs(param, update_attrs)

        mapping_rule.update(layout_attrs())

        # update the attributes of the node
        PriorBoxOp.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #24
0
    def extract(cls, node):
        proto_layer = node.pb
        pb_model = node.model_pb
        param = proto_layer.prelu_param

        update_attrs = {'channel_shared': int(param.channel_shared)}

        variance_norm_caffe_map = {
            0: 'caffe.FillerParameter.FAN_IN',
            1: 'caffe.FillerParameter.FAN_OUT',
            2: 'caffe.FillerParameter.AVERAGE'
        }

        if hasattr(param, 'filler'):
            update_attrs.update({
                'filler_type':
                param.filler.type,
                'filler_value':
                int(param.filler.value),
                'min':
                int(param.filler.min),
                'max':
                int(param.filler.max),
                'mean':
                int(param.filler.mean),
                'std':
                int(param.filler.std),
                'sparse':
                param.filler.sparse,
                'variance_norm':
                variance_norm_caffe_map[param.filler.variance_norm]
            })

        mapping_rule = merge_attrs(param, update_attrs)
        mapping_rule.update(weights_biases(False, pb_model))
        mapping_rule.update(layout_attrs())

        # update the attributes of the node
        PReLU.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #25
0
    def test_reorgyolo_infer(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'reorg'), ('reorg', 'node_3'),
                               ('node_3', 'op_output')], {
                                   'node_3': {
                                       'shape': None,
                                       'value': None
                                   },
                                   'node_1': {
                                       'shape': np.array([1, 3, 227, 227]),
                                       'value': None
                                   },
                                   'reorg': {
                                       'stride': 2,
                                       **layout_attrs()
                                   }
                               })

        reorg_node = Node(graph, 'reorg')
        ReorgYoloOp.reorgyolo_infer(reorg_node)
        exp_shape = calculate_reorgyolo_output(np.array([1, 3, 227, 227]), 2)
        res_shape = graph.node['node_3']['shape']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])