def leaky_relu_transpose_transform(X, axes):
    # type: (XLayer, List[int]) -> None
    """ Transform LeakyReLU layer with transpose according to provided axes """

    new_shape = TensorShape([X.shapes[i] for i in axes])
    X.shapes = new_shape
Esempio n. 2
0
def global_pool2d(op_name,
                  input_layer,
                  pool_type,
                  layout,
                  **kwargs):
    # type: (str, XLayer, str, str) -> XLayer
    """
    Create a global pooling 2dparameters layer

    Arguments
    ---------
    op_name: str
        The name of this pooling layer operation
    pool_type: str
        Indicates which pooling operation to use (Max or Avg)
    layout: str
        The layout of the pooling layer input (`NCHW` or `NHWC`)
    input_layer: XLayer
        The input layer to this pooling layer
    """

    if pool_type not in ['Max', 'Avg']:
        raise NotImplementedError("Invalid pooling type: {}, can either be"
                                  " `Max` or `Avg`.".format(pool_type))

    # NCHW by design
    insize = [input_layer.shapes[2], input_layer.shapes[3]]
    batches, channels = input_layer.shapes[0], input_layer.shapes[1]

    strides = [1, 1]
    padding = [0, 0]
    pool_size = insize

    out_h, out_w = 1, 1

    attrs = kwargs
    attrs.update({
        'padding': [[0, 0], [0, 0], [0, 0], [0, 0]],
        'insize': insize,
        'outsize': [out_h, out_w],
        'data_layout': layout,
        'strides': strides,
        'kernel_size': pool_size,
        'pool_type': pool_type,
        # 'channels': [channels, channels]
    })
    out_shape = TensorShape([batches, channels, out_h, out_w]
                            if layout == 'NCHW'
                            else [batches, out_h, out_w, channels])

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=['Pooling'],
        shapes=out_shape,
        sizes=out_shape.get_size(),
        attrs=attrs,
        layer=[op_name],
        tops=[],
        bottoms=[input_layer.name],
        targets=[])

    return X
Esempio n. 3
0
def pool2d(op_name,
           input_layer,
           pool_type,
           pool_size,
           strides,
           padding,
           layout,
           ceil_mode=False,
           count_include_pad=False,
           **kwargs):
    # type: (str, XLayer, str, List[int], List[int], str, bool) -> XLayer
    """
    Create a pooling parameters layer

    Arguments
    ---------
    op_name: str
        The name of this pooling layer operation
    pool_type: str
        Indicates which pooling operation to use (Max or Avg)
    pool_size: List[int]
        The size of the pooling window
    strides: List[int]
        The pooling operation strides
    padding: List[int]
        The padding to be added before pooling
    layout: str
        The layout of the pooling layer input (`NCHW` or `NHWC`)
    ceil_mode: bool
        Whether to use ceiling or floor rounding while pooling
    count_include_pad: boolean
        Whether to include padding to compute average
        (only for average pooling)
    input_layer: XLayer
        The input layer to this pooling layer
    """
    if layout not in ['NCHW', 'NHWC']:
        raise ValueError("Unsupported layout: {}, supported layouts are"
                         "NCHW and NHWC".format(layout))

    if pool_type not in ['Max', 'Avg']:
        raise NotImplementedError("Invalid pooling type: {}, can either be"
                                  " `Max` or `Avg`.".format(pool_type))

    def valid(x, k, p1, p2, s): return math.floor((x+p1+p2-k)/s) + 1

    def full(x, k, p1, p2, s): return math.ceil((x+p1+p2-k)/s) + 1

    # TODO: this is very similar as for NNVM operators -> merge
    if len(padding) == 4:
        # top bottom left right = h_before h_after w_before w_after
        full_paddings = \
            [[0, 0], [0, 0], [padding[0], padding[2]],
             [padding[1], padding[3]]]
    elif len(padding) == 2:
        full_paddings = \
            [[0, 0], [0, 0], [padding[0], padding[0]],
             [padding[1], padding[1]]]
    elif len(padding) == 1:
        full_paddings = [[0, 0], [0, 0], [padding, padding],
                         [padding, padding]]
    else:
        raise ValueError("Invalid padding size passed by Relay operator, "
                         " Sizes of 1, 2 and 4 are supported but not {}"
                         .format(len(padding)))

    # if full_paddings[2][0] != full_paddings[2][1] \
    #         or full_paddings[3][0] != full_paddings[3][1]:
    #     warnings.warn("[WARNING] Asymmetric padding for layer: {}. "
    #                   "Padding will be symmetrized for running on FPGA."
    #                   .format(op_name))

    padding = [min(full_paddings[2][0], full_paddings[2][1]),
               min(full_paddings[3][0], full_paddings[3][1])]

    if layout == 'NCHW':
        insize = [input_layer.shapes[2], input_layer.shapes[3]]
        batches, channels = input_layer.shapes[0], input_layer.shapes[1]
    else:
        # NHWC
        insize = [input_layer.shapes[1], input_layer.shapes[2]]
        batches, channels = input_layer.shapes[0], input_layer.shapes[3]
        full_paddings = [full_paddings[i] for i in [0, 2, 3, 1]]

    outsize = []
    calc_func = full if ceil_mode else valid

    outsize = [
        calc_func(insize[1], pool_size[1], full_paddings[3][0],
                  full_paddings[3][1], strides[1]),
        calc_func(insize[0], pool_size[0], full_paddings[2][0],
                  full_paddings[2][1], strides[0])
    ]

    attrs = kwargs
    attrs.update({
        'type': pool_type,
        'padding': full_paddings,
        'strides': strides,  # HW
        'kernel_size': pool_size,  # HW
        'insize': insize,  # HW
        'outsize': [outsize[1], outsize[0]],  # HW
        'data_layout': layout,
        'pool_type': pool_type,
        # 'channels': [channels, channels]
    })
    if pool_type == 'Avg':
        attrs['count_include_pad'] = count_include_pad

    out_h, out_w = outsize[1], outsize[0]
    out_shape = TensorShape([batches, channels, out_h, out_w]
                            if layout == 'NCHW'
                            else [batches, out_h, out_w, channels])

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=['Pooling'],
        shapes=out_shape,
        sizes=out_shape.get_size(),
        attrs=attrs,
        layer=[op_name],
        tops=[],
        bottoms=[input_layer.name],
        targets=[]
    )

    return X
Esempio n. 4
0
def conv2d_transpose(op_name,
                     input_layer,
                     weights_layer,
                     kernel_size,
                     strides,
                     padding_hw,
                     dilation,
                     groups,
                     channels,
                     data_layout,
                     kernel_layout,
                     **kwargs):
    # type: (str, XLayer, XLayer, List[int], List[int], List[int],
    #        List[int], int, int, str, str) -> XLayer
    """
    Create a conv2d parameters layer

    Arguments
    ---------
    op_name: str
        The name of this conv2d layer operation
    kernel_size: List[int]
        The size of the kernel windows
    strides: List[int]
        The convolution operation strides
    padding: List[int]
        The padding to be added before convolution operation, can be length
        2 or 4: [pad_h, pad_w] or [pad_h_top, pad_h_bottom, pad_w_left,
        pad_w_right]
    dilation: List[int]
        The dilation to be used for this convolution operation
    groups: int
        Number of groups for grouped convolution.
    channels: int (or None!)
        Number of output channels for this convolution.
    data_layout: str
        The layout of the conv2d layer input (`NCHW` or `NHWC`)
    kernel_layout: str
        The layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`)
    input_layer: XLayer
        The input layer to this conv2d layer
    weights_layer: XLayer
        The weights input layer to this conv2d layer
    """
    bottoms = [input_layer.name]

    logger.debug("-- Conv2DTranspose Kernel layout: {}".format(kernel_layout))
    logger.debug("-- Conv2DTranspose W shape: {}"
                 .format(weights_layer.data[0].shape))

    # Convert kernel to 'OIHW' layout
    if kernel_layout == 'OIHW':
        W = weights_layer.data[0]
    elif kernel_layout == 'HWIO':
        W = np.transpose(weights_layer.data[0], (3, 2, 0, 1))
    elif kernel_layout == 'IOHW':
        W = np.transpose(weights_layer.data[0], (1, 0, 2, 3))
    elif kernel_layout == 'OHWI':
        W = np.transpose(weights_layer.data[0], (0, 3, 1, 2))
    else:
        raise NotImplementedError("Unsupported kernel layout: {} for"
                                  " convolution: {}, should be one of `OIHW`"
                                  ", `HWIO`, `IOHW` or `OHWI`."
                                  .format(kernel_layout, op_name))

    assert len(padding_hw) in [2, 4]
    if len(padding_hw) == 4:
        pad_ht, pad_hb, pad_wl, pad_wr = padding_hw
    elif len(padding_hw) == 2:
        pad_ht, pad_wl = padding_hw
        pad_hb, pad_wr = padding_hw
    else:
        raise ValueError("'padding_hw' argument should be a list of length 2"
                         " but got: {}".format(len(padding_hw)))

    # W is now in OIHW shape
    in_ch, out_ch = W.shape[1], W.shape[0]
    logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch))
    logger.debug("-- channels: {}".format(channels))

    assert channels is None or out_ch == channels

    B = np.zeros([out_ch], dtype=np.float32)
    data = ConvData(W, B)

    # Shape
    # Input layer is always in NCHW by design
    insize = [input_layer.shapes[2], input_layer.shapes[3]]
    batches = input_layer.shapes[0]
    logger.debug("{} {}".format(input_layer.shapes, in_ch))
    assert input_layer.shapes[1] == in_ch

    if padding_hw[0] == (kernel_size[0] - strides[0]) / 2 and\
            padding_hw[1] == (kernel_size[1] - strides[1]) / 2:
        padding_type = 'SAME'
    elif padding_hw[0] == 0 and padding_hw[1] == 0:
        padding_type = 'VALID'
    else:
        raise NotImplementedError("Unsupported padding for Conv2DTranspose"
                                  " Only Tensorflow padding 'SAME' and 'VALID'"
                                  " are supported but got: {} which does not"
                                  " translate to 'SAME' == [{}, {}] or 'VALID'"
                                  " == [0, 0]"
                                  .format(padding_hw,
                                          (kernel_size[0] - strides[0]) / 2,
                                          (kernel_size[1] - strides[1]) / 2))

    if padding_type == 'SAME':
        out_h = insize[0] * strides[0]
        out_w = insize[1] * strides[1]
    elif padding_type == 'VALID':
        out_h = (insize[0] - 1) * strides[0] + kernel_size[0]
        out_w = (insize[1] - 1) * strides[1] + kernel_size[1]

    out_shape = TensorShape([batches, out_ch, out_h, out_w])

    padding = [[0, 0], [0, 0], [pad_ht, pad_hb], [pad_wl, pad_wr]]
    padding = [padding['NCHW'.index(i)] for i in data_layout]

    attrs = kwargs
    attrs.update({
        'padding': padding,
        'data_layout': data_layout,
        'kernel_layout': 'OIHW',
        'shape': out_shape.tolist(),
        'kernel_size': kernel_size,
        'strides': strides,
        'groups': groups,
        'dilation': dilation,
        'channels': [in_ch, out_ch]
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=['Conv2DTranspose'],
        shapes=out_shape,
        sizes=out_shape.get_size(),  # [int(out_ch * out_h * out_w)],
        data=data,
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[])

    return X
Esempio n. 5
0
def conv2d(op_name,
           input_layer,
           weights_layer,
           kernel_size,
           strides,
           padding_hw,
           dilation,
           groups,
           channels,
           data_layout,
           kernel_layout,
           **kwargs):
    # type: (str, List[int], List[int], List[int], List[int], int, int, str,
    #   str, XLayer, XLayer) -> XLayer
    """
    Create a conv2d XLayer

    Arguments
    ---------
    op_name: str
        The name of this conv2d layer operation
    kernel_size: List[int]
        The size of the kernel windows
    strides: List[int]
        The convolution operation strides
    padding_hw: List[int]
        The padding to be added before convolution operation, can be length
        2 or 4: [pad_h, pad_w] or [pad_h_top, pad_h_bottom, pad_w_left,
        pad_w_right]
    dilation: List[int]
        The dilation to be used for this convolution operation
    groups: int
        Number of groups for grouped convolution.
    channels: int (or None!)
        Number of output channels for this convolution.
    data_layout: str
        The layout of the conv2d layer input (`NCHW` or `NHWC`)
    kernel_layout: str
        The layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`)
    input_layer: XLayer
        The input layer to this conv2d layer
    weights_layer: XLayer
        The weights input layer to this conv2d layer
    """

    assert 'Constant' in weights_layer.type

    assert len(kernel_size) == 2
    assert len(dilation) == 2
    assert len(strides) == 2
    assert len(padding_hw) in [2, 4]

    layout_idx = tuple([data_layout.index(e) for e in 'NCHW'])
    layout_idx_transpose = tuple(["NCHW".index(e) for e in data_layout])
    B_idx, C_idx, H_idx, W_idx = layout_idx

    bottoms = [input_layer.name]

    logger.debug("-- Conv2D Kernel layout: {}".format(kernel_layout))
    logger.debug("-- Conv2D W shape: {}".format(weights_layer.data[0].shape))

    if len(kernel_layout) != 4 or \
            sorted(kernel_layout) != ['H', 'I', 'O', 'W']:
        raise NotImplementedError("Unsupported kernel layout: {} for"
                                  " convolution: {}, should be a permutation"
                                  " of `OIHW`"
                                  .format(kernel_layout, op_name))
    transpose_axes = tuple([kernel_layout.index(e) for e in 'OIHW'])
    W = np.transpose(weights_layer.data[0], transpose_axes)

    if len(padding_hw) == 4:
        pad_ht, pad_hb, pad_wl, pad_wr = padding_hw
    elif len(padding_hw) == 2:
        pad_ht, pad_wl = padding_hw
        pad_hb, pad_wr = padding_hw
    else:
        raise ValueError("'padding_hw' argument should be a list of length 2"
                         " but got: {}".format(len(padding_hw)))

    # W is now in OIHW shape
    in_ch, out_ch = W.shape[1], W.shape[0]
    logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch))
    logger.debug("-- channels: {}".format(channels))

    assert(channels is None or out_ch == channels)

    B = np.zeros([out_ch], dtype=np.float32)
    data = ConvData(W, B)

    # input layer is always in NCHW by design
    insize = [input_layer.shapes[H_idx], input_layer.shapes[W_idx]]
    batches = input_layer.shapes[0]
    logger.debug("-- in shape: {}".format(input_layer.shapes))
    assert(input_layer.shapes[C_idx] == in_ch*groups)

    logger.debug("-- padding (t,b,l,r): {}"
                 .format((pad_ht, pad_hb, pad_wl, pad_wr)))

    # TODO dilation
    out_h = \
        int((insize[0] + pad_ht + pad_hb - kernel_size[0]) / strides[0] + 1)
    out_w = \
        int((insize[1] + pad_wl + pad_wr - kernel_size[1]) / strides[1] + 1)

    out_shape = TensorShape([[batches, out_ch, out_h, out_w][i] for i in layout_idx_transpose])

    padding_hh = [pad_ht, pad_hb]
    padding_ww = [pad_wl, pad_wr]

    if data_layout == 'NCHW':
        granular_padding = [[0, 0], [0, 0], padding_hh, padding_ww]
    else:
        granular_padding = [[0, 0], padding_hh, padding_ww, [0, 0]]

    logger.debug("-- out shape: {}".format(out_shape))

    attrs = kwargs
    attrs.update({
        'padding': granular_padding,
        'data_layout': data_layout,
        'kernel_layout': 'OIHW',
        'shape': out_shape.tolist(),
        'kernel_size': kernel_size,
        'strides': strides,
        'groups': groups,
        'dilation': dilation,
        'channels': [in_ch, out_ch]
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=['Convolution'],
        shapes=out_shape,
        sizes=out_shape.get_size(),  # [int(out_ch * out_h * out_w)],
        data=data,
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[])

    return X
Esempio n. 6
0
    def test_batch_norm(self):
        M = np.array([0.5, 1.2], dtype=np.float32)
        V = np.array([0.1, 0.05], dtype=np.float32)
        G = np.array([2.0, 1.0], dtype=np.float32)
        B = np.array([1., -1.0], dtype=np.float32)

        layers = [
            InputLayer(name='input',
                       shape=TensorShape([1, 2, 1, 1]),
                       dtype='float32',
                       inputs=['input'],
                       input_shapes=[TensorShape([1, 2, 1, 1])],
                       subgraph=None),
            ConstantLayer(name='mean',
                          shape=TensorShape([2]),
                          dtype='float32',
                          inputs=[],
                          input_shapes=[],
                          subgraph=None,
                          value=M),
            ConstantLayer(name='var',
                          shape=TensorShape([2]),
                          dtype='float32',
                          inputs=[],
                          input_shapes=[],
                          subgraph=None,
                          value=V),
            ConstantLayer(name='gamma',
                          shape=TensorShape([2]),
                          dtype='float32',
                          inputs=[],
                          input_shapes=[],
                          subgraph=None,
                          value=G),
            ConstantLayer(name='beta',
                          shape=TensorShape([2]),
                          dtype='float32',
                          inputs=[],
                          input_shapes=[],
                          subgraph=None,
                          value=B),
            BatchNormLayer(name='bn',
                           shape=TensorShape([1, 2, 1, 1]),
                           dtype='float32',
                           inputs=['input', 'mean', 'var', 'gamma', 'beta'],
                           input_shapes=[
                               TensorShape([1, 2, 1, 1]),
                               TensorShape([2]),
                               TensorShape([2]),
                               TensorShape([2]),
                               TensorShape([2])
                           ],
                           subgraph=None,
                           attrs={'axis': 1},
                           mean=None,
                           variance=None,
                           gamma=None,
                           beta=None,
                           variance_epsilon=0.0000001)
        ]

        inputs = {'input': np.ones((1, 2, 1, 1), dtype=np.float32)}

        for layer in layers:
            inpts = [inputs[name] for name in layer.inputs]
            outpt = layer.forward_exec(inpts)

            inputs[layer.name] = outpt

        expected_outpt = np.reshape(G, (1, 2, 1, 1)) *\
            (np.ones((1, 2, 1, 1), dtype=np.float32) -
             np.reshape(M, (1, 2, 1, 1))) /\
            np.reshape(np.sqrt(V + 0.0000001), (1, 2, 1, 1)) +\
            np.reshape(B, (1, 2, 1, 1))

        np.testing.assert_array_almost_equal(outpt, expected_outpt)
Esempio n. 7
0
    def test_dense_layer(self):

        W = np.array([[1., 3., 0., -7.], [2., -4., 6., 8.]], dtype=np.float32)
        B = np.array([-1., -1.], dtype=np.float32)

        layers = [
            InputLayer(name='input',
                       shape=TensorShape([1, 4]),
                       dtype='float32',
                       inputs=['input'],
                       input_shapes=[TensorShape([1, 4])],
                       subgraph=None),
            ConstantLayer(name='dense1_weights',
                          shape=TensorShape([2, 4]),
                          dtype='float32',
                          inputs=[],
                          input_shapes=[],
                          subgraph=None,
                          value=W),
            ConstantLayer(name='dense1_biases',
                          shape=TensorShape([2]),
                          dtype='float32',
                          inputs=[],
                          input_shapes=[],
                          subgraph=None,
                          value=B),
            DenseLayer(name='dense1',
                       shape=TensorShape([1, 2]),
                       dtype='float32',
                       inputs=['input', 'dense1_weights', 'dense1_biases'],
                       input_shapes=[
                           TensorShape([1, 4]),
                           TensorShape([2, 4]),
                           TensorShape([2])
                       ],
                       subgraph=None,
                       data_layout='NC',
                       weights=W,
                       kernel_layout='OI',
                       biases=B,
                       use_relu=False),
            OutputLayer(name='output',
                        xtype='Output',
                        shape=TensorShape([1, 2]),
                        dtype='float32',
                        inputs=['dense1'],
                        input_shapes=[TensorShape([1, 2])],
                        data=[],
                        subgraph=None,
                        attrs={}),
        ]

        inputs = {'input': np.ones((1, 4), dtype=np.float32)}

        for layer in layers:
            inpts = [inputs[name] for name in layer.inputs]
            outpt = layer.forward_exec(inpts)

            inputs[layer.name] = outpt

        expected_outpt = np.array([[-4.0, 11.]], dtype=np.float32)

        np.testing.assert_array_almost_equal(outpt, expected_outpt)
Esempio n. 8
0
    def test_quantized_conv_layer(self):
        quant_params = {
            "bw_layer_in": 8,
            "bw_layer_out": 8,
            "bw_params": 8,
            "name": "conv2d0",
            "postscale_shift": [22, 24],
            "prescale_shift": [0, 0],
            "scale": [16584, 22112],
            "sf_layer_in": 1.220472440944882,
            "sf_layer_out": 7.291338582677166,
            "sf_params": [0.023622047156095505, 0.007874015718698502],
            "th_layer_in": 155.0,
            "th_layer_out": 926.0,
            "th_params": [3.0, 1.0]
        }
        layers = [
            InputLayer(name='input',
                       shape=TensorShape([1, 1, 4, 4]),
                       dtype='float32',
                       inputs=['input'],
                       input_shapes=[TensorShape([1, 1, 4, 4])],
                       subgraph=None),
            QuantizeLayer(name='input_quant',
                          shape=TensorShape([1, 1, 4, 4]),
                          dtype='int8',
                          inputs=['input'],
                          input_shapes=[TensorShape([1, 1, 4, 4])],
                          subgraph=None,
                          input_types=['float32'],
                          threshold=[quant_params['th_layer_in']],
                          axis=1,
                          bitwidth=8),
            InputLayer(name='kernel',
                       shape=TensorShape([2, 1, 2, 2]),
                       dtype='float32',
                       inputs=['kernel'],
                       input_shapes=[TensorShape([2, 1, 2, 2])],
                       subgraph=None),
            QuantizeLayer(name='kernel_quant',
                          shape=TensorShape([2, 1, 2, 2]),
                          dtype='int8',
                          inputs=['kernel'],
                          input_shapes=[TensorShape([2, 1, 2, 2])],
                          subgraph=None,
                          input_types=['float32'],
                          threshold=quant_params['th_params'],
                          axis=0,
                          bitwidth=8),
            InputLayer(name='bias',
                       shape=TensorShape([2]),
                       dtype='float32',
                       inputs=['bias'],
                       input_shapes=[TensorShape([2])],
                       subgraph=None),
            QuantizeBiasLayer(name='bias_quant',
                              shape=TensorShape([2]),
                              dtype='int32',
                              inputs=['bias'],
                              input_shapes=[TensorShape([2])],
                              subgraph=None,
                              input_types=['float32'],
                              threshold_bias=quant_params['th_params'],
                              threshold_ext=quant_params['th_layer_in'],
                              bitwidth=8,
                              do_rounding=True),
            ConvLayer(name='conv1',
                      shape=TensorShape([1, 2, 3, 3]),
                      dtype='float32',
                      inputs=['input_quant', 'kernel_quant', 'bias_quant'],
                      input_shapes=[
                          TensorShape([1, 1, 4, 4]),
                          TensorShape([2, 1, 2, 2]),
                          TensorShape([2])
                      ],
                      subgraph=None,
                      attrs={'data_layout': 'NCHW'},
                      kernel=None,
                      kernel_layout='OIHW',
                      kernel_groups=1,
                      biases=None,
                      paddings=[[0, 0], [0, 0], [0, 0], [0, 0]],
                      strides=[1, 1, 1, 1],
                      dilations=[1, 1, 1, 1]),
            QuantizeInterLayer(name='conv1_quant',
                               shape=TensorShape([1, 2, 3, 3]),
                               dtype='int8',
                               inputs=['conv1'],
                               input_shapes=[TensorShape([1, 2, 3, 3])],
                               subgraph=None,
                               prescale_shift=quant_params['prescale_shift'],
                               scale=quant_params['scale'],
                               postscale_shift=quant_params['postscale_shift'],
                               axis=1,
                               bitwidth=8),
            UnQuantizeLayer(name='output',
                            shape=TensorShape([1, 2, 3, 3]),
                            dtype='float32',
                            inputs=['conv1_quant'],
                            input_shapes=[TensorShape([1, 2, 3, 3])],
                            subgraph=None,
                            input_types=['int8'],
                            threshold=[quant_params['th_layer_out']],
                            axis=0,
                            bitwidth=8)
        ]

        inputs = {
            'input':
            np.reshape(
                np.array([[10, 10, 0, 40], [50, 10, 0, 80], [30, 50, 10, 0],
                          [10, 90, 30, 40]]), (1, 1, 4, 4)),
            'kernel':
            np.reshape(
                np.array([[[1, 2], [3, 0]], [[1, 1], [0, 1]]],
                         dtype=np.float32), (2, 1, 2, 2)),
            'bias':
            np.array([0., 0.])
        }

        for layer in layers:

            # print("-----------------------")
            # print("Run layer: {}".format(layer.name))

            inpts = [inputs[name] for name in layer.inputs]
            outpt = layer.forward_exec(inpts)

            # print("Output:", outpt.shape, outpt)

            inputs[layer.name] = outpt

        expected_outpt = np.array([[[[174.99213, 36.45669, 80.20473],
                                     [153.1181, 153.1181, 189.57481],
                                     [153.1181, 335.40158, 94.78741]],
                                    [[29.165354, 7.2913384, 116.661415],
                                     [109.37008, 21.874016, 80.20473],
                                     [167.70079, 87.49606, 51.03937]]]],
                                  dtype=np.float32)

        np.testing.assert_array_almost_equal(outpt, expected_outpt, decimal=4)