Example #1
0
    def extract(cls, node):
        attrs = tf_create_attrs(node, 3, 4)
        attrs.update({
            'op':
            __class__.op,
            'get_group':
            lambda node: 1,
            'get_output_feature_dim':
            lambda node: node.kernel_shape[node.output_feature_channel],
            'get_weights_permute':
            PermuteAttrs.Permutation(perm=int64_array([4, 3, 0, 1, 2]),
                                     inv=int64_array([2, 3, 4, 1, 0]))
        })

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
Example #2
0
    def extract(cls, node):
        attr = get_mxnet_layer_attrs(node.symbol_dict)

        kernel = attr.tuple("kernel", int, None)
        stride = attr.tuple("stride", int,
                            tuple(np.ones(len(kernel), dtype=np.int64)))
        padding = attr.tuple("pad", int,
                             tuple(np.zeros(len(kernel), dtype=np.int64)))
        dilate = attr.tuple("dilate", int,
                            tuple(np.ones(len(kernel), dtype=np.int64)))
        group = attr.int("num_group", 1)
        output = attr.int("num_filter", None)
        bias_term = not attr.bool("no_bias", False)

        final_dilations = int64_array([1, 1, *[d for d in dilate]
                                       ]) if dilate is not None else None

        node_attrs = {
            'op': __class__.op,
            'bias_addable': True,
            'bias_term': bias_term,
            'pad': int64_array([[0, 0], [0, 0],
                                *[[pad, pad] for pad in padding]]),
            'pad_spatial_shape': int64_array([[pad, pad] for pad in padding]),
            'dilation': final_dilations,
            'output_spatial_shape': None,
            'output_shape': None,
            'stride': int64_array([1, 1, *[s for s in stride]]),
            'group': group,
            'output': output,
            'kernel_spatial': int64_array([k for k in kernel]),
            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': None,
            'reshape_kernel': True,
            'spatial_dims': None,
            'channel_dims': int64_array([1]),
            'batch_dims': int64_array([0]),
            'layout': 'NCHW',
        }

        # update the attributes of the node
        Convolution.update_node_stat(node, node_attrs)
        return cls.enabled
Example #3
0
    def extract(cls, node):
        attrs = tf_create_attrs(node, 2, 2)
        attrs.update({
            'op':
            __class__.op,
            'kernel_spatial_idx':
            np.array([0, 1], dtype=np.int64),
            'get_group':
            lambda node: node.kernel_shape[node.output_feature_channel],
            'get_output_feature_dim':
            lambda node: node.kernel_shape[-1] * node.kernel_shape[-2],
            'get_weights_permute':
            PermuteAttrs.Permutation(perm=int64_array([2, 3, 0, 1]),
                                     inv=int64_array([2, 3, 0, 1]))
        })

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
Example #4
0
    def extract(cls, node):
        attrs = tf_create_attrs(node, 2, 3)
        attrs.update({
            'op':
            __class__.op,
            'get_group':
            lambda node: node.group
            if 'group' in node and node.group is not None else node.in_node(0).
            shape[node.channel_dims] // node.kernel_shape[
                node.input_feature_channel],
            'get_output_feature_dim':
            lambda node: node.kernel_shape[node.output_feature_channel],
            'get_weights_permute':
            PermuteAttrs.Permutation(perm=int64_array([3, 2, 0, 1]),
                                     inv=int64_array([2, 3, 1, 0]))
        })

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
    def extract(cls, node: Node) -> bool:
        """
        Extract conv parameters from node.parameters.
        node.parameters like file descriptor object.
        :param node: Convolution node
        :return:
        """
        pb = node.parameters
        read_learning_info(pb)

        kernel = read_token_value(pb, b'<PatchDim>')
        stride = read_token_value(pb, b'<PatchStep>')
        patch_stride = read_token_value(pb, b'<PatchStride>')

        token = find_next_tag(pb)
        if token == '<AppendedConv>':
            appended_conv = True
            token = find_next_tag(pb)
        if token != '<FilterParams>':
            raise Error(
                'Can not load token {} from Kaldi model'.format(token) +
                refer_to_faq_msg(94))
        collect_until_whitespace(pb)
        weights, weights_shape = read_binary_matrix(pb)

        collect_until_whitespace(pb)
        biases = read_binary_vector(pb)

        if (patch_stride - kernel) % stride != 0:
            raise Error(
                'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. '
                + refer_to_faq_msg(93))

        output = biases.shape[0]
        if weights_shape[0] != output:
            raise Error(
                'Weights shape does not correspond to the `output` attribute of Convolution layer. '
                + refer_to_faq_msg(93))

        mapping_rule = {
            'output':
            output,
            'patch_stride':
            patch_stride,
            'bias_term':
            None,
            'pad':
            int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]),
            'pad_spatial_shape':
            int64_array([[0, 0], [0, 0]]),
            'dilation':
            int64_array([1, 1, 1, 1]),
            'kernel':
            int64_array(
                [weights_shape[0], weights_shape[1] // kernel, 1, kernel]),
            'stride':
            int64_array([1, 1, 1, stride]),
            'kernel_spatial':
            int64_array([1, kernel]),
            'input_feature_channel':
            1,
            'output_feature_channel':
            0,
            'kernel_spatial_idx': [2, 3],
            'group':
            1,
            'reshape_kernel':
            True,
            'appended_conv':
            appended_conv
        }

        mapping_rule.update(layout_attrs())
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        mapping_rule['bias_addable'] = len(biases) > 0

        Convolution.update_node_stat(node, mapping_rule)
        return cls.enabled
Example #6
0
    def extract(cls, node):
        # Extract pads attribute
        # In case if pads is not specified it will be set in default (1) in infer function
        pads = onnx_attr(node,
                         'pads',
                         'ints',
                         default=None,
                         dst_type=lambda x: int64_array(x))
        assert pads is None or len(pads) % 2 == 0
        final_pad = None
        if pads is not None:
            pads = pads.reshape([2, -1])
            pads = np.transpose(pads)
            final_pad = int64_array([[0, 0], [0, 0], *pads])

        # Extract dilations attribute
        # In case if dilations is not specified it will be set in default (1) in infer function
        dilations = onnx_attr(node,
                              'dilations',
                              'ints',
                              default=None,
                              dst_type=lambda x: int64_array(x))
        final_dilations = int64_array([1, 1, *dilations
                                       ]) if dilations is not None else None

        # Extract dilations attribute
        # In case if dilations is not specified it will be set in default (1) in infer function
        strides = onnx_attr(node,
                            'strides',
                            'ints',
                            default=None,
                            dst_type=lambda x: int64_array(x))
        final_strides = int64_array([1, 1, *strides
                                     ]) if strides is not None else None

        kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None)
        auto_pad = onnx_attr(node,
                             'auto_pad',
                             's',
                             default=None,
                             dst_type=get_onnx_autopad)
        group = onnx_attr(node,
                          'group',
                          'i',
                          default=1,
                          dst_type=lambda x: int64_array(x))

        attrs = {
            'op':
            __class__.op,
            'auto_pad':
            auto_pad,
            'bias_addable':
            True,
            'bias_term':
            None,
            'pad':
            final_pad,
            'pad_spatial_shape':
            int64_array(pads) if pads is not None else None,
            'dilation':
            final_dilations,
            'output_spatial_shape':
            None,
            'output_shape':
            None,
            'stride':
            final_strides,
            'group':
            group,
            'output':
            None,
            'kernel_spatial':
            int64_array(kernel_shape) if kernel_shape is not None else None,
            'input_feature_channel':
            1,
            'output_feature_channel':
            0,
            'kernel_spatial_idx':
            None,  # Will be calculated in infer function (np.array([2, 3]))
            'spatial_dims':
            None,  # Will be calculated in infer function
            'channel_dims':
            int64_array([1]),
            'batch_dims':
            int64_array([0]),
            'layout':
            'NCHW'
        }

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
Example #7
0
    def extract(cls, node):
        pads = onnx_attr(node, 'pads', 'ints', dst_type=int64_array)
        auto_pad = onnx_attr(node,
                             'auto_pad',
                             's',
                             default=None,
                             dst_type=get_onnx_autopad)

        if pads is not None:
            if len(pads) % 2 != 0:
                raise Error(
                    'ConvTranspose node {} specifies pads = {} which has odd number of elements. The model is not correct.',
                    node.soft_get('name'), pads)
            pads = pads.reshape([2, -1])
            pads = np.transpose(pads)

        final_pads = int64_array([[0, 0], [0, 0], *pads
                                  ]) if pads is not None else None

        dilations = onnx_attr(node, 'dilations', 'ints', default=None)
        final_dilations = int64_array([1, 1, *dilations
                                       ]) if dilations is not None else None

        strides = onnx_attr(node, 'strides', 'ints', default=None)
        final_strides = int64_array([1, 1, *strides
                                     ]) if strides is not None else None

        kernel_shape = onnx_attr(node,
                                 'kernel_shape',
                                 'ints',
                                 dst_type=int64_array)

        if kernel_shape is None:
            raise Error(
                'ConvTranspose node {} doesn\'t have explicitly defined kernel_shape. It is not supported.',
                node.soft_get('name'))

        output_padding = onnx_attr(node,
                                   'output_padding',
                                   'ints',
                                   default=None)
        final_output_padding = int64_array(
            [0, 0, *output_padding]) if output_padding is not None else None

        output_shape = onnx_attr(node,
                                 'output_shape',
                                 'ints',
                                 default=None,
                                 dst_type=int64_array)

        attrs = {
            'type':
            'Deconvolution',
            'op':
            'Deconv2D',
            'auto_pad':
            auto_pad,
            'bias_addable':
            True,
            'bias_term':
            None,  # will be deduced later; not really needed
            'pad':
            final_pads,
            'dilation':
            final_dilations,
            'output_spatial_shape':
            output_shape,
            'original_output_spatial_shape':
            output_shape,
            'output_shape':
            None,
            'output_padding':
            final_output_padding,
            'stride':
            final_strides,
            'group':
            onnx_attr(node, 'group', 'i', default=1),
            'output':
            None,
            'spatial_dims':
            None,  # Will be calculated in infer function
            'channel_dims':
            int64_array([1]),
            'batch_dims':
            int64_array([0]),
            'layout':
            'NCHW',
            'input_feature_channel':
            0,
            'output_feature_channel':
            1,
            'get_pad':
            ConvTransposeFrontExtractor.get_pad,
            'get_output_feature_dim':
            lambda node: node.kernel_shape[node.output_feature_channel] * node.
            group,
        }

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled