Esempio n. 1
0
def test_max_pool_normal():
    filt = PoolFilterDim(2, 2)
    stride = StrideDim(1)
    pad = PadDim(0)
    params = PoolingParameters("test",
                               filt=filt,
                               stride=stride,
                               padding=pad,
                               pool_type="max")
    input_ = np.arange(9).reshape([1, 3, 3])
    in_dims = Dim.named(c=1, h=3, w=3).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = max_pool(params, in_dims, out_dims[0], input_)
    assert np.array_equal(output_, [[[4, 5], [7, 8]]])
Esempio n. 2
0
def add_mean(G,
             tensors,
             name,
             subgraph,
             op_name,
             op,
             load_tensors=False,
             dequantize=False):
    check(op.InputsLength() == 2,\
        "Very odd " + str(op.InputsAsNumpy()))
    mean_dims = get_tensor(G.model, tensors, subgraph, op, 1, dequantize=False)
    if len(mean_dims) != 2 or mean_dims[0] != 1 or mean_dims[1] != 2:
        LOG.warning(
            "MEAN operator seen but can't convert to global average pool")
        return add_unconverted(G, name, subgraph, op_name, op, load_tensors,
                               dequantize)
    else:
        LOG.info("MEAN operator converted to global average pool")

    inp = get_input_size(None, subgraph, op, 0, order=TF_LITE_IN_OUT_ORDER)
    check(inp['n'] == 1, "Multi batch not supported")

    return add_node(
        G,
        PoolingParameters(name,
                          filt=PoolFilterDim(inp['h'], inp['w']),
                          stride=StrideDim(1, 1),
                          padding=PadDim.valid(),
                          pool_type="average",
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']])))
Esempio n. 3
0
def add_pool(G,
             tensors,
             name,
             subgraph,
             op_name,
             op,
             load_tensors=False,
             dequantize=False):
    pool_opts = Pool2DOptions.Pool2DOptions()
    pool_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)
    pad = get_tf_padding(pool_opts.Padding())
    pool_type = TF_POOL_OPS[op_name]

    inp = get_input_size(None, subgraph, op, 0, order=TF_LITE_IN_OUT_ORDER)
    check(inp['n'] == 1, "Multi batch not supported")

    node = PoolingParameters(name,
                             filt=PoolFilterDim(pool_opts.FilterHeight(),
                                                pool_opts.FilterWidth()),
                             stride=StrideDim(pool_opts.StrideH(),
                                              pool_opts.StrideW()),
                             padding=pad,
                             pool_type=pool_type,
                             in_dims_hint=SparseList([['h', 'w', 'c']]),
                             out_dims_hint=SparseList([['h', 'w', 'c']]))

    return fuse_activation(G, pool_opts, name, node)
Esempio n. 4
0
def test_max_pool_q():
    filt = PoolFilterDim(2, 2)
    stride = StrideDim(1)
    pad = PadDim(0)
    params = PoolingParameters("test",
                               filt=filt,
                               stride=stride,
                               padding=pad,
                               pool_type="max")
    in_q = QType(16, 0, True)
    qrec = QuantizationRecord([in_q], [in_q])
    input_ = in_q.quantize(np.arange(9)).reshape([1, 3, 3])
    in_dims = Dim.named(c=1, h=3, w=3).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = max_pool(params, in_dims, out_dims[0], input_)
    output_ = in_q.dequantize(output_)
    assert np.array_equal(output_, [[[4, 5], [7, 8]]])
Esempio n. 5
0
    def pool(cls, node, pool_type=None, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        x_feature_shape = x_shape[2::]
        in_c = x_shape[1]

        kernel_shape = node.attrs["kernel_shape"]
        spatial_size = len(kernel_shape)
        x_rank = spatial_size + 2
        if spatial_size != 2:
            raise ValueError(valid_name + " with {}D input".format(x_rank))

        h = x_shape[2]
        w = x_shape[3]

        strides = node.attrs.get("strides", [1] * spatial_size)
        stride_is_one = all(stride == 1 for stride in strides)
        dilations = node.attrs.get("dilations", [1] * spatial_size)
        if any(dilation > 1 for dilation in dilations):
            raise ValueError(valid_name + " with dilation not supported")
        # ceil_mode = bool(node.attrs.get("ceil_mode", 0))
        pad_dim = cls.calc_pad_dim(node, spatial_size)
        # Note: This needs to check dilation if it is added
        filter_matches_input = (all(
            k_dim >= (x_dim + pad) for k_dim, x_dim, pad in zip(
                kernel_shape, x_feature_shape, [pad_dim.h, pad_dim.w])))

        if filter_matches_input and stride_is_one:
            params = GlobalPoolParameters(valid_name,
                                          pool_type=pool_type,
                                          axis=[1, 2],
                                          keep_dims=True,
                                          in_dims_hint=[['c', 'h', 'w']],
                                          out_dims_hint=[['c', 'h', 'w']])
        else:
            params = PoolingParameters(
                valid_name,
                filt=PoolFilterDim(kernel_shape[0], kernel_shape[1]),
                stride=StrideDim(strides[0], strides[1]),
                padding=pad_dim,
                pool_type=pool_type,
                in_dims_hint=[['c', 'h', 'w']],
                out_dims_hint=[['c', 'h', 'w']])

        in_dim = Dim.named_ordered(c=in_c, h=h, w=w)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Esempio n. 6
0
def add_pool(out_graph, routes, idx, l):
    pool_type = DARKNET_POOL_TYPES[l['type']]
    node_name = "{}_{}".format(l['type'], idx)
    if pool_type == "average":
        # Darknet average pool averages entire channel
        # Pool_w and _h set to None indicates that they must be computed
        # when the network sizes are computed
        routes['in'][idx], routes['out'][idx] =\
            out_graph.add_operator(node_name,\
                PoolingParameters(None, PoolFilterDim(1), StrideDim(0), pool_type=pool_type))
    else:
        stride = get_int(l, 'stride', default=1)
        size = get_int(l, 'size', default=stride)
        padding = get_int(l, 'padding', default=size - 1)
        pad = split_pad(padding)
        routes['in'][idx], routes['out'][idx] =\
            out_graph.add_operator(node_name,\
                PoolingParameters(PoolFilterDim(size), StrideDim(stride), pad, pool_type=pool_type))

    return True
Esempio n. 7
0
    def pool2d(cls, node, pool_type=None, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        opts = kwargs['opts']
        node_opts = node.get_options(Pool2DOptions)

        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x = cls.remove_known_batch_dimension(G, x, node)
        x_shape = x[2].shape
        in_c = x_shape[1]

        in_b, h, w, in_c = tuple(x_shape)

        filt_h = node_opts.FilterHeight()
        filt_w = node_opts.FilterWidth()
        stride_h = node_opts.StrideH()
        stride_w = node_opts.StrideW()

        pad = cls.get_tf_padding(node_opts.Padding())

        filter_matches_input = h == filt_h and w == filt_w
        stride_is_one = stride_h == 1 and stride_w == 1

        if filter_matches_input and stride_is_one:
            params = GlobalPoolParameters(node.name,
                                          pool_type=pool_type,
                                          axis=[0, 1],
                                          keep_dims=True,
                                          in_dims_hint=[['h', 'w', 'c']],
                                          out_dims_hint=[['h', 'w', 'c']])
        else:
            params = PoolingParameters(node.name,
                                       filt=PoolFilterDim(filt_h, filt_w),
                                       stride=StrideDim(stride_h, stride_w),
                                       padding=pad,
                                       pool_type=pool_type,
                                       in_dims_hint=[['h', 'w', 'c']],
                                       out_dims_hint=[['h', 'w', 'c']])

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                node.input, node.output)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
    def pool(cls, node, pool_type=None, copy_qtype=False, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        x_feature_shape = x_shape[2::]
        input_rank = len(x_feature_shape)
        in_c = x_shape[1]

        kernel_shape = node.attrs["kernel_shape"]
        kernel_rank = len(kernel_shape)
        if input_rank != kernel_rank:
            raise ValueError(
                f'error in ONNX graph. {pool_type} pool {valid_name} '
                f'has a different input spatial rank {input_rank} to kernel rank {kernel_rank}'
            )
        spatial_size = kernel_rank
        if kernel_rank > 2:
            raise NotImplementedError(
                f'{pool_type} pool {valid_name} is a {kernel_rank}D pool '
                'which is not supported by NNTOOL')

        strides = node.attrs.get("strides", [1] * spatial_size)
        stride_is_one = all(stride == 1 for stride in strides)
        dilations = node.attrs.get("dilations", [1] * spatial_size)
        if any(dilation > 1 for dilation in dilations):
            raise ValueError(valid_name + " with dilation not supported")
        # ceil_mode = bool(node.attrs.get("ceil_mode", 0))
        pad_dim = cls.calc_pad_dim(node, 2)

        if spatial_size == 1:
            strides = [1] + strides
            dilations = [1] + dilations
            kernel_shape = [1] + kernel_shape
            h = 1
            w = x_shape[2]
            x_feature_shape = [1] + x_feature_shape
        else:
            h = x_shape[2]
            w = x_shape[3]

        strides = node.attrs.get("strides", [1] * spatial_size)
        stride_is_one = all(stride == 1 for stride in strides)
        dilations = node.attrs.get("dilations", [1] * spatial_size)
        if any(dilation > 1 for dilation in dilations):
            raise ValueError(valid_name + " with dilation not supported")
        # ceil_mode = bool(node.attrs.get("ceil_mode", 0))
        pad_dim = cls.calc_pad_dim(node, spatial_size)
        if pad_dim.is_same:
            pad_dim.calculate_same(
                Dim.named_ordered(h=h, w=w),
                PoolFilterDim(kernel_shape[0], kernel_shape[1]),
                StrideDim(strides[0], strides[1]))
        # Note: This needs to check dilation if it is added
        filter_matches_input = (all(
            k_dim >= (x_dim + pad) for k_dim, x_dim, pad in zip(
                kernel_shape, x_feature_shape, [pad_dim.h, pad_dim.w])))

        if filter_matches_input and stride_is_one:
            params = GlobalPoolingParameters(valid_name,
                                             pool_type=pool_type,
                                             axis=[1, 2],
                                             keep_dims=True,
                                             in_dims_hint=[['c', 'h', 'w']],
                                             out_dims_hint=[['c', 'h', 'w']])
        else:
            params = PoolingParameters(
                valid_name,
                filt=PoolFilterDim(kernel_shape[0], kernel_shape[1]),
                stride=StrideDim(strides[0], strides[1]),
                padding=pad_dim,
                pool_type=pool_type,
                in_dims_hint=[['c', 'h', 'w']],
                out_dims_hint=[['c', 'h', 'w']])

        in_dim = Dim.named_ordered(c=in_c, h=h, w=w)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pout_dims,
                                     x[3] if copy_qtype else None)
        return params