Ejemplo n.º 1
0
def two_conv_graph():
    G = NNGraph(name='two_conv_graph')
    ti = G.add_input(Dim.unnamed([10, 10, 2]))
    c1filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c1filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n1 = Conv2DParameters("node1",
                          filt=c1filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n1)
    w1 = [[0.25, 0.25], [0.25, 0.25], [0.25, 0.25]]
    w1 = [w1, w1, w1]
    w2 = [[0.75, 0.75], [0.75, 0.75], [0.75, 0.75]]
    w2 = [w2, w2, w2]
    n1.weights = np.array([w1, w2])
    c2filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c2filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n2 = Conv2DParameters("node2",
                          filt=c2filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n2)
    w3 = [[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]
    w3 = [w3, w3, w3]
    n2.weights = np.array([w3, w3])
    to = G.add_output()
    G.add_edge(NNEdge(ti, n1))
    G.add_edge(NNEdge(n1, n2))
    G.add_edge(NNEdge(n2, to))
    G.add_dimensions()
    yield G
Ejemplo n.º 2
0
def add_pool(G,
             tensors,
             name,
             subgraph,
             op_name,
             op,
             load_tensors=False,
             dequantize=False):
    pool_opts = Pool2DOptions.Pool2DOptions()
    pool_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)
    pad = get_tf_padding(pool_opts.Padding())
    pool_type = TF_POOL_OPS[op_name]

    inp = get_input_size(None, subgraph, op, 0, order=TF_LITE_IN_OUT_ORDER)
    check(inp['n'] == 1, "Multi batch not supported")

    node = PoolingParameters(name,
                             filt=PoolFilterDim(pool_opts.FilterHeight(),
                                                pool_opts.FilterWidth()),
                             stride=StrideDim(pool_opts.StrideH(),
                                              pool_opts.StrideW()),
                             padding=pad,
                             pool_type=pool_type,
                             in_dims_hint=SparseList([['h', 'w', 'c']]),
                             out_dims_hint=SparseList([['h', 'w', 'c']]))

    return fuse_activation(G, pool_opts, name, node)
Ejemplo n.º 3
0
def test_conf2d_q2(caplog):
    caplog.set_level(logging.INFO)
    weights_q = QType(16, 1, True)
    weights = weights_q.quantize(np.full([1, 1, 2, 2], 1.0))
    filt = Conv2DFilterDim(2, 2, 1, 1)
    stride = StrideDim(1)
    pad = PadDim.valid()
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    in_q = QType(16, 0, True)
    calc_q = QType(weights_q.bits + in_q.bits, weights_q.q + in_q.q, True)
    qrec = FilterQuantizationRecord(in_qs=[in_q],
                                    out_qs=[in_q],
                                    weights_q=weights_q,
                                    acc_q=calc_q,
                                    calc_q=calc_q)
    input_ = in_q.quantize(np.full([1, 2, 2], 1.0))
    in_dims = Dim.named(c=1, h=2, w=2).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = conv2d(params,
                     in_dims,
                     out_dims[0],
                     input_,
                     weights,
                     None,
                     qrec=qrec)
    output_ = in_q.dequantize(output_)
    assert np.array_equal(output_, [[[4.]]])
Ejemplo n.º 4
0
def test_conf2d_normal():
    weights = np.arange(9).reshape([1, 1, 3, 3])
    filt = Conv2DFilterDim(3, 3, 1, 1)
    stride = StrideDim(1)
    pad = PadDim(0)
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    input_ = np.arange(16).reshape([1, 4, 4])
    in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    details = {}
    output_ = conv2d(params,
                     in_dims,
                     out_dims[0],
                     input_,
                     weights,
                     None,
                     details=details)
    # assert details['max_acc'] == 438.0 and details['min_acc'] == 258.0
    assert np.array_equal(output_, [[[258, 294], [402, 438]]])
Ejemplo n.º 5
0
def add_mean(G,
             tensors,
             name,
             subgraph,
             op_name,
             op,
             load_tensors=False,
             dequantize=False):
    check(op.InputsLength() == 2,\
        "Very odd " + str(op.InputsAsNumpy()))
    mean_dims = get_tensor(G.model, tensors, subgraph, op, 1, dequantize=False)
    if len(mean_dims) != 2 or mean_dims[0] != 1 or mean_dims[1] != 2:
        LOG.warning(
            "MEAN operator seen but can't convert to global average pool")
        return add_unconverted(G, name, subgraph, op_name, op, load_tensors,
                               dequantize)
    else:
        LOG.info("MEAN operator converted to global average pool")

    inp = get_input_size(None, subgraph, op, 0, order=TF_LITE_IN_OUT_ORDER)
    check(inp['n'] == 1, "Multi batch not supported")

    return add_node(
        G,
        PoolingParameters(name,
                          filt=PoolFilterDim(inp['h'], inp['w']),
                          stride=StrideDim(1, 1),
                          padding=PadDim.valid(),
                          pool_type="average",
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']])))
Ejemplo n.º 6
0
def test_conf2d_depth():
    # TF Lite depthwise convolution
    weights = np.arange(9).reshape([3, 3])
    weights = np.repeat(weights, 2).reshape([1, 3, 3, 2])
    filt = Conv2DFilterDim(3, 3, 2,
                           1).impose_order(["in_c", "h", "w", "out_c"])
    stride = StrideDim(1)
    pad = PadDim(0)
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              groups=1,
                              multiplier=2,
                              tf_depthwise=True,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    input_ = np.arange(16).reshape([1, 4, 4])
    in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output1 = conv2d(params, in_dims, out_dims[0], input_, weights, None)
    assert np.array_equal(output1,
                          [[[258, 294], [402, 438]], [[258, 294], [402, 438]]])
    output2 = conv2d(params,
                     in_dims,
                     out_dims[0],
                     input_,
                     weights,
                     None,
                     allow_faster=False)
    assert np.array_equal(output1, output2)
Ejemplo n.º 7
0
def actfusion_graph():
    G = NNGraph(name='actfusion_graph')
    ti1 = G.add_input(Dim.unnamed([10, 10, 2])).name
    ti2 = G.add_input(Dim.unnamed([10, 10, 2])).name
    c1filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c1filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n1 = Conv2DParameters("node1",
                          filt=c1filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n1)
    w1 = [[0.25, 0.25], [0.25, 0.25], [0.25, 0.25]]
    w1 = [w1, w1, w1]
    w2 = [[0.75, 0.75], [0.75, 0.75], [0.75, 0.75]]
    w2 = [w2, w2, w2]
    n1.weights = np.array([w1, w2])
    n1a = ReluActivationParameters("node1a")
    G.add_node(n1a)
    c2filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c2filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n2 = Conv2DParameters("node2",
                          filt=c2filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n2)
    w3 = [[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]
    w3 = [w3, w3, w3]
    n2.weights = np.array([w3, w3])
    n3 = MatrixAddParameters("node3")
    G.add_node(n3)
    n4 = ReluActivationParameters("node4")
    G.add_node(n4)
    to = G.add_output()
    G.add_edge(NNEdge(ti1, n1))
    G.add_edge(NNEdge(n1, n1a))
    G.add_edge(NNEdge(ti2, n2))
    G.add_edge(NNEdge(n1a, n3, to_idx=0))
    G.add_edge(NNEdge(n2, n3, to_idx=1))
    G.add_edge(NNEdge(n3, n4))
    G.add_edge(NNEdge(n4, to))
    G.add_dimensions()
    yield G
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(TransposeConvOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[2]
        x_shape = x[2].shape
        in_b, in_h, in_w, in_c = tuple(x_shape)
        pout_shape = [
            dim if x_shape[idx] is not None else None
            for idx, dim in enumerate(cls.get_constant(inputs[0]))
        ]
        out_b, out_h, out_w, out_c = tuple(pout_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        stride_w = node_opts.StrideW()
        stride_h = node_opts.StrideH()
        # compute padding
        pad = node_opts.Padding()
        if pad == Padding.SAME:
            pad_h = ((in_h - 1) * stride_h + filt_h - out_h)
            pad_w = ((in_w - 1) * stride_w + filt_w - out_w)
            pad_top = pad_h // 2
            pad_left = pad_w // 2
            pad = PadDim(pad_top,
                         pad_h - pad_top,
                         pad_left,
                         pad_w - pad_left,
                         same_type='balanced_right')
        else:
            pad = PadDim(0)

        params = TransposeConv2DParameters(
            node.name,
            filt=filt_dim,
            stride=StrideDim(stride_h, stride_w),
            padding=pad,
            in_dims_hint=[['h', 'w', 'c'],
                          cls.TF_LITE_FILTER_ORDER.copy()],
            out_dims_hint=[['h', 'w', 'c']])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        pout_dims = ProvisionalDim(pout_shape)

        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Ejemplo n.º 9
0
def add_convolution(out_graph, routes, idx, l):
    activation = get_str(l, 'activation', default="logistic")
    node_name = "{}_{}".format(l['type'], idx)
    routes['in'][idx] = node_name
    padding = l.get("padding")
    pad = l.get("pad")
    size = get_int(l, 'size', 1)
    groups = get_int(l, 'groups', 1)
    filters_c = get_int(l, 'filters', 1)
    stride = get_int(l, 'stride', 1)
    batch_normalize = get_int(l, 'batch_normalize', 0)
    flipped = get_int(l, 'flipped', 0)
    custom = {'batch_normalize': batch_normalize == 1, 'flipped': flipped == 1}

    assert 'binary' not in l, "Binary convolutions are not implemented"
    assert 'xnor' not in l, "XNOR convolutions are not implemented"
    assert 'dot' not in l, "dot is not implemented"

    # padding calculation as per Darknet code
    if pad is not None:
        padding = int(size / 2)
    if padding is None:
        padding = 0

    if activation is None:
        routes['in'][idx], routes['out'][idx] =\
            out_graph.add_operator(node_name,\
                Conv2DParameters(Conv2DFilterDim(size, size, filters_c),\
                    StrideDim(stride), PadDim(padding), groups=groups,\
                        custom=custom, has_bias=True))
    else:
        activation = DARKNET_ACTIVATION_TYPES[activation]
        routes['in'][idx], routes['out'][idx] =\
            out_graph.add_operators(
                node_name,
                [
                    Conv2DParameters(Conv2DFilterDim(size, size, filters_c),\
                        StrideDim(stride), PadDim(padding), groups=groups,\
                            custom=custom, has_bias=True),
                    ActivationParameters(activation)
                ]
            )

    return True
Ejemplo n.º 10
0
    def conv(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        # input N x C x H x W
        x = inputs[0]
        x_rank = len(x[2].shape)
        x_shape = x[2].shape
        spatial_size = x_rank - 2
        assert spatial_size <= 2, "only 1D and 2D convolutions supported"

        # M x C/group x kH x kW
        weights = cls.get_constant(inputs[1])
        out_c = weights.shape[0]
        group = node.attrs.get("group", 1)
        in_c = x_shape[1]
        filt_in_c = in_c // group
        filt_h = weights.shape[2]
        filt_w = weights.shape[2]
        h = 1 if spatial_size <= 1 else x_shape[2]
        w = 1 if spatial_size == 0 else (x_shape[2] if spatial_size == 1 else x_shape[3])

        filt_dim = Conv2DFilterDim(filt_h, filt_w,
                                   out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER)

        if len(inputs) > 2:
            biases = cls.get_constant(inputs[2])
        else:
            biases = np.zeros([out_c])

        dilations = cls.pad_start_with(node.attrs.get("dilations", [1] * spatial_size), [1], 2)
        strides = cls.pad_start_with(node.attrs.get("strides", [1] * spatial_size), [1], 2)
        pad_dim = cls.calc_pad_dim(node, spatial_size)

        params = Conv2DParameters(valid_name,
                                  filt=filt_dim,
                                  stride=StrideDim(strides[0],
                                                   strides[1]),
                                  dilation=DilationDim(dilations[0],
                                                       dilations[1]),
                                  groups=group,
                                  padding=pad_dim,
                                  has_bias=True,
                                  in_dims_hint=SparseList([['c', 'h', 'w']]),
                                  out_dims_hint=SparseList([['c', 'h', 'w']]),
                                  constant_store=G.constant_store)
        params.weights = weights
        params.biases = biases
        in_dim = Dim.named_ordered(c=in_c, h=h, w=w)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape)
        G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Ejemplo n.º 11
0
    def pool(cls, node, pool_type=None, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        x_feature_shape = x_shape[2::]
        in_c = x_shape[1]

        kernel_shape = node.attrs["kernel_shape"]
        spatial_size = len(kernel_shape)
        x_rank = spatial_size + 2
        if spatial_size != 2:
            raise ValueError(valid_name + " with {}D input".format(x_rank))

        h = x_shape[2]
        w = x_shape[3]

        strides = node.attrs.get("strides", [1] * spatial_size)
        stride_is_one = all(stride == 1 for stride in strides)
        dilations = node.attrs.get("dilations", [1] * spatial_size)
        if any(dilation > 1 for dilation in dilations):
            raise ValueError(valid_name + " with dilation not supported")
        # ceil_mode = bool(node.attrs.get("ceil_mode", 0))
        pad_dim = cls.calc_pad_dim(node, spatial_size)
        # Note: This needs to check dilation if it is added
        filter_matches_input = (all(
            k_dim >= (x_dim + pad) for k_dim, x_dim, pad in zip(
                kernel_shape, x_feature_shape, [pad_dim.h, pad_dim.w])))

        if filter_matches_input and stride_is_one:
            params = GlobalPoolParameters(valid_name,
                                          pool_type=pool_type,
                                          axis=[1, 2],
                                          keep_dims=True,
                                          in_dims_hint=[['c', 'h', 'w']],
                                          out_dims_hint=[['c', 'h', 'w']])
        else:
            params = PoolingParameters(
                valid_name,
                filt=PoolFilterDim(kernel_shape[0], kernel_shape[1]),
                stride=StrideDim(strides[0], strides[1]),
                padding=pad_dim,
                pool_type=pool_type,
                in_dims_hint=[['c', 'h', 'w']],
                out_dims_hint=[['c', 'h', 'w']])

        in_dim = Dim.named_ordered(c=in_c, h=h, w=w)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Ejemplo n.º 12
0
def add_pool(out_graph, routes, idx, l):
    pool_type = DARKNET_POOL_TYPES[l['type']]
    node_name = "{}_{}".format(l['type'], idx)
    if pool_type == "average":
        # Darknet average pool averages entire channel
        # Pool_w and _h set to None indicates that they must be computed
        # when the network sizes are computed
        routes['in'][idx], routes['out'][idx] =\
            out_graph.add_operator(node_name,\
                PoolingParameters(None, PoolFilterDim(1), StrideDim(0), pool_type=pool_type))
    else:
        stride = get_int(l, 'stride', default=1)
        size = get_int(l, 'size', default=stride)
        padding = get_int(l, 'padding', default=size - 1)
        pad = split_pad(padding)
        routes['in'][idx], routes['out'][idx] =\
            out_graph.add_operator(node_name,\
                PoolingParameters(PoolFilterDim(size), StrideDim(stride), pad, pool_type=pool_type))

    return True
Ejemplo n.º 13
0
def test_paddim():
    dim1 = PadDim(1)
    assert not dim1.is_same
    assert dim1.h == 2 and dim1.w == 2
    assert dim1.l == 1 and dim1.r == 1 and dim1.t == 1 and dim1.b == 1
    assert dim1.numpy_pad_shape(Dim.named_ordered(w=10, h=10)) == [(1, 1), (1, 1)]
    stride_dim = StrideDim(1)
    filt_dim = Conv2DFilterDim(5, 5, 1, 1)
    in_dim = Dim.named_ordered(c=1, h=20, w=20)
    dim1 = PadDim.same()
    dim1.calculate_same(in_dim, filt_dim, stride_dim)
    assert dim1.shape == [2, 2, 2, 2]
Ejemplo n.º 14
0
    def pool2d(cls, node, pool_type=None, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        opts = kwargs['opts']
        node_opts = node.get_options(Pool2DOptions)

        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x = cls.remove_known_batch_dimension(G, x, node)
        x_shape = x[2].shape
        in_c = x_shape[1]

        in_b, h, w, in_c = tuple(x_shape)

        filt_h = node_opts.FilterHeight()
        filt_w = node_opts.FilterWidth()
        stride_h = node_opts.StrideH()
        stride_w = node_opts.StrideW()

        pad = cls.get_tf_padding(node_opts.Padding())

        filter_matches_input = h == filt_h and w == filt_w
        stride_is_one = stride_h == 1 and stride_w == 1

        if filter_matches_input and stride_is_one:
            params = GlobalPoolParameters(node.name,
                                          pool_type=pool_type,
                                          axis=[0, 1],
                                          keep_dims=True,
                                          in_dims_hint=[['h', 'w', 'c']],
                                          out_dims_hint=[['h', 'w', 'c']])
        else:
            params = PoolingParameters(node.name,
                                       filt=PoolFilterDim(filt_h, filt_w),
                                       stride=StrideDim(stride_h, stride_w),
                                       padding=pad,
                                       pool_type=pool_type,
                                       in_dims_hint=[['h', 'w', 'c']],
                                       out_dims_hint=[['h', 'w', 'c']])

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                node.input, node.output)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Ejemplo n.º 15
0
def test_max_pool_normal():
    filt = PoolFilterDim(2, 2)
    stride = StrideDim(1)
    pad = PadDim(0)
    params = PoolingParameters("test",
                               filt=filt,
                               stride=stride,
                               padding=pad,
                               pool_type="max")
    input_ = np.arange(9).reshape([1, 3, 3])
    in_dims = Dim.named(c=1, h=3, w=3).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = max_pool(params, in_dims, out_dims[0], input_)
    assert np.array_equal(output_, [[[4, 5], [7, 8]]])
Ejemplo n.º 16
0
def add_convolution(G,
                    tensors,
                    name,
                    subgraph,
                    _,
                    op,
                    load_tensors=False,
                    dequantize=False):
    conv_opts = Conv2DOptions.Conv2DOptions()
    conv_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)

    # get filter dimensions
    filt = get_input_size(tensors, subgraph, op, 1, order=TF_LITE_FILTER_ORDER)
    filt = Conv2DFilterDim(filt['h'], filt['w'],\
        filt['out_c'], in_c=filt['in_c'])
    filt = filt.impose_order(TF_LITE_FILTER_ORDER)
    # compute padding
    pad = get_tf_padding(conv_opts.Padding())

    # does it have biases
    has_bias = op.InputsLength() > 2

    node = Conv2DParameters(name,
                            filt=filt,
                            stride=StrideDim(conv_opts.StrideH(),
                                             conv_opts.StrideW()),
                            padding=pad,
                            has_bias=has_bias,
                            in_dims_hint=SparseList([['h', 'w', 'c']]),
                            out_dims_hint=SparseList([['h', 'w', 'c']]),
                            constant_store=G.constant_store)

    if load_tensors:
        node.weights = get_tensor(G.model,
                                  tensors,
                                  subgraph,
                                  op,
                                  1,
                                  dequantize=dequantize)
        if has_bias:
            node.biases = get_tensor(G.model,
                                     tensors,
                                     subgraph,
                                     op,
                                     2,
                                     dequantize=dequantize)
    return fuse_activation(G, conv_opts, name, node)
Ejemplo n.º 17
0
def test_conf2d_depth_q():
    calc_q = QType(32, 9, True)
    biases_q = acc_q = out_q = QType(16, 4, True)
    weights_q = QType(16, 4, True)
    in_q = QType(16, 5, True)
    # TF Lite depthwise convolution
    biases = np.full([2], 0.5)
    qbiases = biases_q.quantize(biases)
    weights = np.full([3, 3], 0.5)
    weights = np.repeat(weights, 2).reshape([1, 3, 3, 2])
    qweights = weights_q.quantize(weights)
    filt = Conv2DFilterDim(3, 3, 2,
                           1).impose_order(["in_c", "h", "w", "out_c"])
    stride = StrideDim(1)
    pad = PadDim(0)
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              groups=1,
                              multiplier=2,
                              tf_depthwise=True,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    qrec = FilterQuantizationRecord(in_qs=[in_q],
                                    out_qs=[out_q],
                                    weights_q=weights_q,
                                    biases_q=biases_q,
                                    acc_q=acc_q,
                                    calc_q=calc_q)
    input_ = np.full([1, 4, 4], 2)
    qinput_ = in_q.quantize(input_)
    in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = conv2d(params, in_dims, out_dims[0], input_, weights, biases)
    qoutput_ = conv2d(params,
                      in_dims,
                      out_dims[0],
                      qinput_,
                      qweights,
                      qbiases,
                      qrec=qrec)
    dqoutput_ = out_q.dequantize(qoutput_)
    assert np.array_equal(output_, dqoutput_)
Ejemplo n.º 18
0
def test_max_pool_q():
    filt = PoolFilterDim(2, 2)
    stride = StrideDim(1)
    pad = PadDim(0)
    params = PoolingParameters("test",
                               filt=filt,
                               stride=stride,
                               padding=pad,
                               pool_type="max")
    in_q = QType(16, 0, True)
    qrec = QuantizationRecord([in_q], [in_q])
    input_ = in_q.quantize(np.arange(9)).reshape([1, 3, 3])
    in_dims = Dim.named(c=1, h=3, w=3).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = max_pool(params, in_dims, out_dims[0], input_)
    output_ = in_q.dequantize(output_)
    assert np.array_equal(output_, [[[4, 5], [7, 8]]])
Ejemplo n.º 19
0
    def __init__(self,
                 *args,
                 stride=None,
                 padding=None,
                 pad_type="zero",
                 **kwargs):
        if stride is None:
            stride = StrideDim(1, 1)
        if padding is None:
            padding = PadDim(0)

        super(FilterLikeParameters, self).__init__(*args, **kwargs)
        self.stride = stride
        self.padding = padding
        self.pad_type = pad_type
        self._ker_in_order = [['c', 'h', 'w']]
        self._ker_out_order = [['c', 'h', 'w']]
Ejemplo n.º 20
0
def test_conf2d_pad_dilate():
    weights = np.arange(9).reshape([1, 1, 3, 3])
    filt = Conv2DFilterDim(3, 3, 1, 1)
    stride = StrideDim(1)
    pad = PadDim.same()
    dilation = DilationDim(2)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    input_ = np.arange(16).reshape([1, 4, 4])
    in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, None)
    assert np.array_equal(output_, [[[266., 206.], [98., 66.]]])
Ejemplo n.º 21
0
def test_conf2d_pad():
    weights = np.arange(9).reshape([1, 1, 3, 3])
    filt = Conv2DFilterDim(3, 3, 1, 1)
    stride = StrideDim(1)
    pad = PadDim.same()
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    input_ = np.arange(16).reshape([1, 4, 4])
    in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None)
    assert np.array_equal(output_, [[[73, 121, 154, 103], [171, 258, 294, 186],\
        [279, 402, 438, 270], [139, 187, 202, 113]]])
 def calc_shapes(node, spatial_size, input_size, kernel_shape):
     padding = expand_dim(node.attrs.get('pads', None), 4 - spatial_size * 2, 0)
     auto_pad = node.attrs.get('auto_pad', 'NOTSET')
     output_shape = expand_dim(node.attrs.get('output_shape', None), 2 - spatial_size, 1)
     output_padding = Dim2D(*expand_dim(node.attrs.get('output_padding', None), 2 - spatial_size, 0))
     dilations = DilationDim(*expand_dim(node.attrs.get('dilations', None), 2 - spatial_size, 1))
     strides = StrideDim(*expand_dim(node.attrs.get('strides', None), 2 - spatial_size, 1))
     if output_shape:
         total_padding = strides * (input_size - 1) + output_padding + ((kernel_shape - 1) * dilations + 1) - output_shape
         if auto_pad == 'SAME_UPPER':
             pad_start = total_padding // 2
             pad_end = total_padding - pad_start
         else:
             pad_end = total_padding // 2
             pad_start = total_padding - pad_end
         padding = PadDim(pad_start.h, pad_end.h, pad_start.w, pad_end.w)
     elif auto_pad == 'NOTSET':
         assert padding, 'pads not set and auto_pad is NOTSET'
         padding = PadDim(*padding)
     elif auto_pad == 'VALID':
         padding = PadDim.valid()
     return padding, dilations, strides, output_padding
Ejemplo n.º 23
0
def test_conf2d_2_in_2_out_c():
    weights = np.arange(4).reshape([1, 2, 2])
    weights = np.append(weights, weights, axis=0)
    weights = np.append(weights, weights, axis=0)
    weights = weights.reshape([2, 2, 2, 2])
    filt = Conv2DFilterDim(2, 2, 2, 2)
    stride = StrideDim(1)
    pad = PadDim.valid()
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    input_ = np.arange(9).reshape([1, 3, 3])
    input_ = np.append(input_, input_, axis=0)
    in_dims = Dim.named(c=2, h=3, w=3).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, None)
    assert np.array_equal(output_, [[[38., 50.], [74., 86.]],\
        [[38., 50.], [74., 86.]]])
Ejemplo n.º 24
0
    def conv(cls, node, quantized=False, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        # input N x C x H x W
        x = inputs[0]
        x_rank = len(x[2].shape)
        x_shape = x[2].shape

        if x_shape[0] is not None:
            real_in_shape = tuple(x_shape.copy())
            if x_shape[0] > 1:
                # support for multi batch is very limited
                batch = x_shape[0]
                logger.warning(
                    f"{valid_name} has a non 1 batch dimension of {batch} -"
                    " this is not supported by nntool or autotiler kernels")
            else:
                # if the batch is specified but is 1 then the input will be reshaped
                # and the output will have the batch dim set as unknown.
                batch = None
        else:
            real_in_shape = tuple(x_shape[1:])
            batch = None

        spatial_size = x_rank - 2
        assert spatial_size == 2 or spatial_size == 1, "only 1D and 2D convolutions supported"

        # Input error checking
        undefined = []
        if x_shape[1] is None:
            # cope with swapped batch and channel due to bad initial reshape
            if x_shape[0] == 1:
                batch = None
                x_shape = [x_shape[1], x_shape[0]] + list(x_shape[2:])
                real_in_shape = x_shape[1:]
            else:
                undefined.append(f"input channel size of filter {valid_name} must be defined.")

        if not all(dim is not None for dim in x_shape[-spatial_size:]):
            undefined.append(f"input spatial size {x_shape} of filter {valid_name} must be defined.")
        if undefined:
            raise ValueError(f"{' '.join(undefined)}. You may need to override input dimensions.")

        # M x C/group x kH x kW
        weights_idx = 3 if quantized else 1
        weights_node = inputs[weights_idx][0]
        weights_node.name = f'{valid_name}_weights'
        weights = cls.get_constant(inputs[weights_idx])
        out_c = weights.shape[0]
        group = node.attrs.get("group", 1)
        in_c = x_shape[1]
        filt_in_c = in_c // group
        if in_c != weights.shape[1] * group:
            raise ValueError(f'node {valid_name} has incorrect input channel '
                             f'dimension {in_c} expecting {weights.shape[1] * group}')
        if spatial_size == 1:
            filt_w = weights.shape[-1]
            filt_h = h = 1
            w = x_shape[-1]
            # create a new constant node since we are changing the shape
            weights = np.reshape(weights, (out_c, filt_in_c, filt_h, filt_w))
            weights_node = ConstantInputParameters(f'{valid_name}_weights', value=weights,
                                                   dims=Dim.unnamed(
                                                       weights.shape))
            cls.record_constant_qrec(inputs[1], weights_node, **kwargs)
        else:
            filt_h = weights.shape[-2]
            filt_w = weights.shape[-1]
            h = x_shape[-2]
            w = x_shape[-1]

        conv_in_shape = (in_c, h, w)

        # h = 1 if spatial_size == 1 else (
        #     x_shape[-2] if x_shape[-2] is not None else 1)
        # w = x_shape[-1] if x_shape[-1] is not None else 1

        filt_dim = Conv2DFilterDim(filt_h, filt_w,
                                   out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER)

        biases_idx = 8 if quantized else 2
        if len(inputs) > biases_idx:
            biases_node = inputs[biases_idx][0]
            biases = cls.get_constant(inputs[biases_idx])
        else:
            biases = np.zeros([out_c], dtype=np.float32)
            biases_node = ConstantInputParameters(f'{valid_name}_biases', value=biases,
                                                  dims=Dim.unnamed(
                                                      biases.shape))

        dilations = cls.pad_start_with(node.attrs.get("dilations", []), [1], 2)
        strides = cls.pad_start_with(node.attrs.get("strides", []), [1], 2)
        pad_dim = cls.calc_pad_dim(node, 4)

        if batch is not None:
            in_hint = ['n', 'c', 'h', 'w']
            out_hint = ['n', 'c', 'h', 'w']
            in_dim = Dim.named_ordered(n=batch, c=in_c, h=h, w=w)
            ker_in_order = [
                ['n', 'c', 'h', 'w'],
                ['out_c', 'in_c', 'h', 'w'],
                ['out_c']]
            ker_out_order = [['n', 'c', 'h', 'w']]
        else:
            in_hint = ['c', 'h', 'w']
            out_hint = ['c', 'h', 'w']
            in_dim = Dim.named_ordered(c=in_c, h=h, w=w)
            ker_in_order = [
                ['c', 'h', 'w'],
                ['out_c', 'in_c', 'h', 'w'],
                ['out_c']]
            ker_out_order = [['c', 'h', 'w']]
        params = Conv2DParameters(valid_name,
                                  filt=filt_dim,
                                  stride=StrideDim(strides[0],
                                                   strides[1]),
                                  dilation=DilationDim(dilations[0],
                                                       dilations[1]),
                                  batch=batch,
                                  groups=group,
                                  padding=pad_dim,
                                  ker_in_order=ker_in_order,
                                  ker_out_order=ker_out_order,
                                  has_bias=True,
                                  in_dims_hint=[in_hint,
                                                cls.ONNX_FILTER_ORDER, ['c']],
                                  out_dims_hint=[out_hint])

        if quantized:
            qrecs = kwargs['qrecs']
            x_zp = cls.get_constant(inputs[2])
            x_scale = cls.get_constant(inputs[1])
            x_qtype = QType(dtype=x_zp.dtype, scale=x_scale, zero_point=x_zp)
            w_zp = cls.get_constant(inputs[5])
            w_scale = cls.get_constant(inputs[4])
            weights_node.qtype = w_qtype = QType(
                dtype=w_zp.dtype, scale=w_scale,
                zero_point=w_zp, quantized_dimension=0 if len(w_scale) > 1 else None)
            o_zp = cls.get_constant(inputs[7])
            o_scale = cls.get_constant(inputs[6])
            o_qtype = QType(dtype=o_zp.dtype, scale=o_scale, zero_point=o_zp)
            biases_node.qtype = b_qtype = QType(
                dtype=biases.dtype, scale=w_scale*x_scale)
            qrecs[NodeId(params)] = QRec.scaled(
                in_qs=[x_qtype, w_qtype, b_qtype],
                out_qs=[o_qtype],
            )
        else:
            o_qtype = None

        w_dim = Dim.named_ordered(
            out_c=out_c, in_c=filt_in_c, h=filt_h, w=filt_w)
        b_dim = Dim.named_ordered(c=out_c)
        out_dims = params.get_output_size([in_dim, w_dim, b_dim])
        G.add_edge(NNEdge(from_node=weights_node,
                          to_node=params, from_idx=0, to_idx=1))
        G.add_edge(NNEdge(from_node=biases_node,
                          to_node=params, from_idx=0, to_idx=2))

        # check if input needs a reshape
        if conv_in_shape != real_in_shape:
            r1_params = ReshapeParameters(f'{valid_name}_reshape_in',
                                          old_shape=Dim.unnamed(real_in_shape),
                                          shape=Dim.unnamed(conv_in_shape))
            G.add_edge(
                NNEdge(from_node=x[0], to_node=r1_params, from_idx=x[1], to_idx=0))
            G.add_edge(NNEdge(from_node=r1_params,
                              to_node=params, from_idx=0, to_idx=0))
        else:
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        # check if output needs a reshape
        if spatial_size == 1:
            if batch is not None:
                oned_out_shape = [batch, out_dims[0].c, out_dims[0].w]
                pout_dims = ProvisionalDim(oned_out_shape)
            else:
                oned_out_shape = [out_dims[0].c, out_dims[0].w]
                pout_dims = ProvisionalDim([None] + oned_out_shape)

            r2_params = ReshapeParameters(f'{valid_name}_reshape_out',
                                          old_shape=out_dims[0],
                                          shape=Dim.unnamed(oned_out_shape))
            G.add_edge(NNEdge(from_node=params,
                              to_node=r2_params, from_idx=0, to_idx=0))
            params = r2_params
        else:
            pout_dims = ProvisionalDim([batch] + out_dims[0].shape)

        all_nodes[node.output[0]] = (params, 0, pout_dims, o_qtype)
        return params
Ejemplo n.º 25
0
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(Conv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        # get filter dimensions
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([filt_out_c]),
                value=np.zeros([filt_out_c], dtype=np.float32))  # TODO - check

        params = Conv2DParameters(
            node.name,
            filt=filt_dim,
            stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
            dilation=DilationDim(node_opts.DilationHFactor(),
                                 node_opts.DilationWFactor()),
            padding=pad,
            has_bias=True,
            in_dims_hint=SparseList([['h', 'w', 'c'],
                                     cls.TF_LITE_FILTER_ORDER.copy(),
                                     ['out_c']]),
            out_dims_hint=SparseList([['h', 'w', 'c']]),
            constant_store=G.constant_store)
        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
        cls.new_load_filter_parameters(G, params, node.input[0], weights_node,
                                       bias_node, node.output[0], opts)
        # if opts.get('load_dequantized'):
        #     weights_node.value, bias_node.value = cls.load_dequantized_filter_parameters(
        #         node.input, bias_node.value)
        # else:
        #     qrec, weights_node.value, bias_node.value = cls.load_filter_parameters(G, params, node.input, bias_node.value,
        #                                                                            node.output, opts)
        #     if qrec:
        #         G.quantization[NodeId(weights_node)].out_qs[0] = qrec.in_qs[1]
        #         G.quantization[NodeId(bias_node)].out_qs[0] = qrec.in_qs[2]

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size(
            [in_dim,
             Dim.unnamed(filt_dim.shape),
             Dim.unnamed([filt_out_c])])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Ejemplo n.º 26
0
    def pool(cls, node, pool_type=None, copy_qtype=False, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        x_feature_shape = x_shape[2::]
        input_rank = len(x_feature_shape)
        in_c = x_shape[1]

        kernel_shape = node.attrs["kernel_shape"]
        kernel_rank = len(kernel_shape)
        if input_rank != kernel_rank:
            raise ValueError(
                f'error in ONNX graph. {pool_type} pool {valid_name} '
                f'has a different input spatial rank {input_rank} to kernel rank {kernel_rank}'
            )
        spatial_size = kernel_rank
        if kernel_rank > 2:
            raise NotImplementedError(
                f'{pool_type} pool {valid_name} is a {kernel_rank}D pool '
                'which is not supported by NNTOOL')

        strides = node.attrs.get("strides", [1] * spatial_size)
        stride_is_one = all(stride == 1 for stride in strides)
        dilations = node.attrs.get("dilations", [1] * spatial_size)
        if any(dilation > 1 for dilation in dilations):
            raise ValueError(valid_name + " with dilation not supported")
        # ceil_mode = bool(node.attrs.get("ceil_mode", 0))
        pad_dim = cls.calc_pad_dim(node, 2)

        if spatial_size == 1:
            strides = [1] + strides
            dilations = [1] + dilations
            kernel_shape = [1] + kernel_shape
            h = 1
            w = x_shape[2]
            x_feature_shape = [1] + x_feature_shape
        else:
            h = x_shape[2]
            w = x_shape[3]

        strides = node.attrs.get("strides", [1] * spatial_size)
        stride_is_one = all(stride == 1 for stride in strides)
        dilations = node.attrs.get("dilations", [1] * spatial_size)
        if any(dilation > 1 for dilation in dilations):
            raise ValueError(valid_name + " with dilation not supported")
        # ceil_mode = bool(node.attrs.get("ceil_mode", 0))
        pad_dim = cls.calc_pad_dim(node, spatial_size)
        if pad_dim.is_same:
            pad_dim.calculate_same(
                Dim.named_ordered(h=h, w=w),
                PoolFilterDim(kernel_shape[0], kernel_shape[1]),
                StrideDim(strides[0], strides[1]))
        # Note: This needs to check dilation if it is added
        filter_matches_input = (all(
            k_dim >= (x_dim + pad) for k_dim, x_dim, pad in zip(
                kernel_shape, x_feature_shape, [pad_dim.h, pad_dim.w])))

        if filter_matches_input and stride_is_one:
            params = GlobalPoolingParameters(valid_name,
                                             pool_type=pool_type,
                                             axis=[1, 2],
                                             keep_dims=True,
                                             in_dims_hint=[['c', 'h', 'w']],
                                             out_dims_hint=[['c', 'h', 'w']])
        else:
            params = PoolingParameters(
                valid_name,
                filt=PoolFilterDim(kernel_shape[0], kernel_shape[1]),
                stride=StrideDim(strides[0], strides[1]),
                padding=pad_dim,
                pool_type=pool_type,
                in_dims_hint=[['c', 'h', 'w']],
                out_dims_hint=[['c', 'h', 'w']])

        in_dim = Dim.named_ordered(c=in_c, h=h, w=w)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pout_dims,
                                     x[3] if copy_qtype else None)
        return params
Ejemplo n.º 27
0
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(Conv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        filt_tensor = node.input[1]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        # get filter dimensions
        filt_tensor.used = True
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        has_bias = len(inputs) > 2
        if has_bias:
            node.input[2].used = True

        params = Conv2DParameters(node.name,
                                  filt=filt_dim,
                                  stride=StrideDim(node_opts.StrideH(),
                                                   node_opts.StrideW()),
                                  dilation=DilationDim(
                                      node_opts.DilationHFactor(),
                                      node_opts.DilationWFactor()),
                                  padding=pad,
                                  has_bias=has_bias,
                                  in_dims_hint=SparseList([['h', 'w', 'c']]),
                                  out_dims_hint=SparseList([['h', 'w', 'c']]),
                                  constant_store=G.constant_store)

        if opts.get('load_dequantized'):
            cls.load_dequantized_filter_parameters(params, node.input)
        else:
            cls.load_filter_parameters(G, params, node.input, node.output,
                                       opts)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Ejemplo n.º 28
0
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(Conv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x = cls.remove_known_batch_dimension(G, x, node)
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        # get filter dimensions
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([filt_out_c]),
                value=np.zeros([filt_out_c], dtype=np.float32))  # TODO - check
        groups = in_c // filt_in_c
        params = Conv2DParameters(
            node.name,
            filt=filt_dim,
            stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
            dilation=DilationDim(node_opts.DilationHFactor(),
                                 node_opts.DilationWFactor()),
            groups=groups,
            padding=pad,
            has_bias=True,
            in_dims_hint=[['h', 'w', 'c'],
                          cls.TF_LITE_FILTER_ORDER.copy(), ['out_c']],
            out_dims_hint=[['h', 'w', 'c']])
        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
        cls.new_load_filter_parameters(G, params, params.filter.actual_shape,
                                       params.filter.get_order_idx('out_c'),
                                       node.input[0], weights_node, bias_node,
                                       node.output[0], opts)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size(
            [in_dim,
             Dim.unnamed(filt_dim.shape),
             Dim.unnamed([filt_out_c])])
        pout_dims = ProvisionalDim([None] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        oparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (oparams, 0, pout_dims)
        return oparams
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(DepthwiseConv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x = cls.remove_known_batch_dimension(G, x, node)
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_in_c, filt_h, filt_w, filt_out_c = tuple(filt_shape)

        # get filter dimensions
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER)

        # multiplier should match filter
        check(filt_dim.out_c == node_opts.DepthMultiplier() * in_c,
              "invalid multiplier")

        groups = filt_dim.out_c // node_opts.DepthMultiplier()

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([filt_out_c]),
                value=np.zeros([filt_out_c], dtype=np.float32))  # TODO - check

        # TFLITE produces single channel input DW convolutions with the
        # multiplier equal to the number of out channels. This is just
        # a normal convolution and since we don't handle the channel
        # multiplier at present (but can) just convert them to normal
        # convolutions
        convert_to_conv = in_c == 1 and groups == 1

        if convert_to_conv:
            filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)
            params = Conv2DParameters(
                node.name,
                filt=filt_dim,
                stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
                dilation=DilationDim(node_opts.DilationHFactor(),
                                     node_opts.DilationWFactor()),
                padding=pad,
                has_bias=True,
                in_dims_hint=[['h', 'w', 'c'],
                              cls.TF_LITE_FILTER_ORDER.copy(), ['out_c']],
                out_dims_hint=[['h', 'w', 'c']])
        else:
            filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER)
            params = Conv2DParameters(
                node.name,
                filt=filt_dim,
                stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
                dilation=DilationDim(node_opts.DilationHFactor(),
                                     node_opts.DilationWFactor()),
                padding=pad,
                groups=groups,
                multiplier=node_opts.DepthMultiplier(),
                has_bias=True,
                tf_depthwise=True,
                in_dims_hint=[['h', 'w', 'c'],
                              cls.TF_LITE_DW_FILTER_ORDER.copy(), ['out_c']],
                out_dims_hint=[['h', 'w', 'c']])

        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
        cls.new_load_filter_parameters(G,
                                       params,
                                       params.filter.actual_shape,
                                       params.filter.get_order_idx('out_c'),
                                       node.input[0],
                                       weights_node,
                                       bias_node,
                                       node.output[0],
                                       opts,
                                       dw_to_pw=convert_to_conv)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size(
            [in_dim,
             Dim.unnamed(filt_dim.shape),
             Dim.unnamed([filt_out_c])])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Ejemplo n.º 30
0
def add_depthwise_convolution(G,
                              tensors,
                              name,
                              subgraph,
                              _,
                              op,
                              load_tensors=False,
                              dequantize=False):
    conv_opts = DepthwiseConv2DOptions.DepthwiseConv2DOptions()
    conv_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)

    # get filter dimensions
    inp = get_input_size(tensors, subgraph, op, 0, order=TF_LITE_IN_OUT_ORDER)
    filt = get_input_size(tensors,
                          subgraph,
                          op,
                          1,
                          order=TF_LITE_DW_FILTER_ORDER)
    filt = Conv2DFilterDim(filt['h'], filt['w'],\
        filt['out_c'], in_c=1)

    # multiplier should match filter
    check(filt.out_c == conv_opts.DepthMultiplier() * inp['c'],
          "invalid multiplier")

    groups = filt.out_c // conv_opts.DepthMultiplier()

    # compute padding
    pad = get_tf_padding(conv_opts.Padding())

    # does it have biases
    has_bias = op.InputsLength() > 2

    # TFLITE produces single channel input DW convolutions with the
    # multiplier equal to the number of out channels. This is just
    # a normal convolution and since we don't handle the channel
    # multiplier at present (but can) just convert them to normal
    # convolutions
    convert_to_conv = inp['c'] == 1 and groups == 1

    if convert_to_conv:
        filt.impose_order(TF_LITE_FILTER_ORDER)
        node = Conv2DParameters(name,
                                filt=filt,
                                stride=StrideDim(conv_opts.StrideH(),
                                                 conv_opts.StrideW()),
                                padding=pad,
                                has_bias=has_bias,
                                in_dims_hint=SparseList([['h', 'w', 'c']]),
                                out_dims_hint=SparseList([['h', 'w', 'c']]),
                                constant_store=G.constant_store)
    else:
        filt.impose_order(TF_LITE_DW_FILTER_ORDER)
        node = Conv2DParameters(name,
                                filt=filt,
                                stride=StrideDim(conv_opts.StrideH(),
                                                 conv_opts.StrideW()),
                                padding=pad,
                                groups=groups,
                                multiplier=conv_opts.DepthMultiplier(),
                                has_bias=has_bias,
                                tf_depthwise=True,
                                in_dims_hint=SparseList([['h', 'w', 'c']]),
                                out_dims_hint=SparseList([['h', 'w', 'c']]),
                                constant_store=G.constant_store)

    if load_tensors:
        node.weights = get_tensor(G.model,
                                  tensors,
                                  subgraph,
                                  op,
                                  1,
                                  dequantize=dequantize)
        # If we've converted to a normal conv then change the weight order
        if convert_to_conv:
            node.weights = node.weights.transpose(TF_LITE_DW_FILTER_TRANSPOSE)
        if has_bias:
            node.biases = get_tensor(G.model,
                                     tensors,
                                     subgraph,
                                     op,
                                     2,
                                     dequantize=dequantize)

    return fuse_activation(G, conv_opts, name, node)