예제 #1
0
    def test_conv2d_transpose(self):
        tf.compat.v1.reset_default_graph()
        K = np.reshape(np.array([[[1, 1, 1], [1, 2, 1], [1, 1, 1]]],
                                dtype=np.float32),
                       (1, 1, 3, 3))
        B = np.array([1], dtype=np.float32)

        X = xlayer.XLayer(
            name='tconv',
            type=['TransposeConv2D'],
            shapes=[1, 1, 6, 6],
            sizes=[25],
            bottoms=['input'],
            tops=[],
            data=xlayer.ConvData(K, B),
            attrs={
                'data_layout': 'NCHW',
                'kernel_layout': 'OIHW',
                'padding': [[0, 0], [0, 0], [0, 0], [0, 0]],
                'strides': [2, 2],
                'dilation': [1, 1],
                'groups': 1
            },
            targets=[]
        )

        input_shapes = {
            'input': TensorShape([1, 1, 3, 3])
        }
        data = np.reshape(np.array([[1, 2, 3],
                                    [4, 5, 6],
                                    [7, 8, 9]], dtype=np.float32),
                          (1, 1, 3, 3))
        inputs = {
            'input': data
        }
        params = {
            'tconv_kernel': K,
            'tconv_biases': B
        }
        layers = base.get_conv2d_transpose_layer(
            Conv2DTransposeLayer, ConstantLayer)(X, input_shapes, params)
        assert(len(layers) == 3)

        inputs.update(params)
        for layer in layers:
            inpts = [inputs[name] for name in layer.inputs]
            outpt = layer.forward_exec(inpts)

        expected_outpt = np.array([1], dtype=np.float32) +\
            np.array([[[[1, 1, 3, 2, 5, 3],
                        [1, 2, 3, 4, 5, 6],
                        [5, 5, 12, 7, 16, 9],
                        [4, 8, 9, 10, 11, 12],
                        [11, 11, 24, 13, 28, 15],
                        [7, 14, 15, 16, 17, 18]]]],
                     dtype=np.float32)

        np.testing.assert_array_almost_equal(outpt, expected_outpt)
예제 #2
0
def merge_batchnorm_into_conv(xgraph, bottom_Xs, X, top_Xs, **kwargs):
    # type: (XGraph, List[XLayer], XLayer, List[XLayer]) -> XLayer
    """
    Try to merge batch normalization layer into preceding Convolution

    Conv = Conv + BN
         = Gamma*((Wx+B) - Mu)/Sigma + Beta
         = Gamma*(W/Sigma)x + Gamma*(-Mu+B)/Sigma + Beta
    """
    changes = False
    if 'BatchNorm' in X.type:
        if not all(['Convolution' in b_X.type for b_X in bottom_Xs]):
            # Batch norm can only be merged with preceding Convolution
            return changes
        changes = True

        if not isinstance(X.data, xlayer.BatchData):
            raise ValueError("Invalid batch normalization data type: {}, "
                             " should be of type: xlayer.BatchData".format(
                                 type(X.data)))

        for bottom_X in bottom_Xs:
            if not isinstance(bottom_X.data, xlayer.ConvData):
                raise ValueError(
                    "Invalid convolution parameters data type:"
                    " {}, should be of type: xlayer.ConvData".format(
                        type(bottom_X.data)))

            # Weights should have layout: OIHW at this point
            conv_weights = bottom_X.data.weights
            conv_biases = bottom_X.data.biases
            bn_mu, bn_sigma_square = X.data.mu, X.data.sigma_square
            bn_gamma, bn_beta = X.data.gamma, X.data.beta
            shape = (conv_weights.shape[0], 1, 1, 1)
            epsilon = X.attrs['epsilon']

            assert (conv_weights.shape[0] == conv_biases.shape[0] ==
                    bn_mu.shape[0] == bn_sigma_square.shape[0])
            conv_weights = bn_gamma.reshape(shape) *\
                (conv_weights /
                 np.sqrt(bn_sigma_square.reshape(shape) + epsilon))
            conv_biases = bn_gamma * \
                ((conv_biases - bn_mu) / np.sqrt(bn_sigma_square + epsilon)) +\
                bn_beta

            bottom_X.data = xlayer.ConvData(conv_weights, conv_biases)
            bottom_X.layer = bottom_X.layer[:] + [X.name]
            # bottom_X = bottom_X._replace(
            #     data=xlayer.ConvData(conv_weights, conv_biases),
            #     layer=bottom_X.layer[:] + [X.name]
            # )
            # xgraph.update(bottom_X)

        # Remove the batch norm node
        xgraph.remove(X.name)

    return changes
    def test_conv2d(self):
        tf.compat.v1.reset_default_graph()
        K = np.reshape(
            np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32),
            (2, 1, 2, 2))
        B = np.array([0, 0], dtype=np.float32)

        X = xlayer.XLayer(name='test_conv2d',
                          type=['Convolution'],
                          shapes=[1, 2, 3, 3],
                          sizes=[18],
                          bottoms=['input'],
                          tops=[],
                          data=xlayer.ConvData(K, B),
                          attrs={
                              'data_layout': 'NCHW',
                              'kernel_layout': 'OIHW',
                              'padding': [[0, 0], [0, 0], [0, 0], [0, 0]],
                              'strides': [1, 1],
                              'dilation': [1, 1],
                              'groups': 1
                          },
                          targets=[])

        input_shapes = {'input': TensorShape([1, 1, 4, 4])}
        inputs = {'input': np.ones((1, 1, 4, 4), dtype=np.float32)}
        params = {
            'test_conv2d_kernel':
            np.reshape(
                np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
                         dtype=np.float32), (2, 1, 2, 2)),
            'test_conv2d_biases':
            np.array([0, 0], dtype=np.float32)
        }
        layers = base.get_conv2d_layer(ConvLayer,
                                       ConstantLayer)(X, input_shapes, params)
        assert (len(layers) == 3)

        inputs.update(params)
        for layer in layers:

            # print("-----------------------")
            # print("Run layer: {}".format(layer.name))

            inpts = [inputs[name] for name in layer.inputs]
            outpt = layer.forward_exec(inpts)

            # print("Output:", outpt.shape, outpt)

            inputs[layer.name] = outpt

        expected_outpt = np.array([[[[10., 10., 10.], [10., 10., 10.],
                                     [10., 10., 10.]],
                                    [[26., 26., 26.], [26., 26., 26.],
                                     [26., 26., 26.]]]])

        np.testing.assert_array_equal(outpt, expected_outpt)
예제 #4
0
def merge_bias(xgraph, bottom_Xs, X, top_Xs, **kwargs):
    # type: (XGraph, List[XLayer], XLayer, List[XLayer]) -> XLayer
    """
    Try to merge bias layer into preceding Convolution or Dense layer
    """
    changes = False

    # Eltwise operation with bias add
    if 'BiasAdd' in X.type or ('Eltwise' in X.type and X.data != []):
        if len(bottom_Xs) != 1:
            raise ValueError("Impossible to merge bias layer. Bias layer"
                             " must always be preceded by exactly one layer,"
                             " but found: {}.".format(len(bottom_Xs)))

        bottom_X = bottom_Xs[0]
        if bottom_X.type[0] in ['Convolution', 'Dense', 'Conv2DTranspose']:
            bias = bottom_X.data.biases + X.data[0]
            # if bottom_X.bias and X.data is not None else X.data
            changes = True

            # TODO: remove Relay specific code in core functionality
            if 'relay_id' in bottom_X.attrs and 'relay_id' in X.attrs:
                bottom_X.attrs['relay_id'] += X.attrs['relay_id']

            bottom_X.data = xlayer.ConvData(bottom_X.data.weights, bias)
            bottom_X.layer = bottom_X.layer[:] + [X.name]
            # bottom_X = bottom_X._replace(
            #     data=xlayer.ConvData(bottom_X.data.weights, bias),
            #     layer=bottom_X.layer[:] + [X.name]
            # )
            # xgraph.update(bottom_X)

            # Remove the bias addition node
            xgraph.remove(X.name)

    return changes
예제 #5
0
def merge_scale_into_conv_bn(xgraph, bottom_Xs, X, top_Xs, **kwargs):
    # type: (XGraph, List[XLayer], XLayer, List[XLayer]) -> XLayer
    """
    Try to merge scaling layer into preceding Convolution(s) or BatchNorm(s)

    Conv = Conv + Scale : (Wx+B)*gamma + beta = (W*gamma)x + B*gamma + beta
    Scale = BN + Scale  :
        ((x- mu)/sigma)*gamma + beta = x*(gamma/sigma) +
        (beta - mu * gamma / sigma)
    """
    changes = False
    if 'Scale' in X.type:
        # TODO: Scale + Scale
        if not all(
            [b_X.type[0] in ['Convolution', 'BatchNorm']
             for b_X in bottom_Xs]):
            # Scaling can only be merged into Convolution and BatchNorm
            return changes

        changes = True

        if not isinstance(X.data, xlayer.ScaleData):
            raise ValueError("Invalid batch normalization data type: {}, "
                             " should be of type: xlayer.ScaleData".format(
                                 type(X.data)))

        for bottom_X in bottom_Xs:

            if bottom_X.type[0] == 'Convolution':
                if not isinstance(bottom_X.data, xlayer.ConvData):
                    raise ValueError("Invalid convolution parameters data"
                                     " type: {}, should be of type:"
                                     " xlayer.ConvData".format(
                                         type(bottom_X.data)))

                # Weights should have layout: OIHW at this point
                conv_weights = bottom_X.data.weights
                conv_biases = bottom_X.data.biases
                gamma, beta = X.data.gamma, X.data.beta
                shape = (conv_weights.shape[0], 1, 1, 1)

                assert (conv_weights.shape[0] == conv_biases.shape[0] ==
                        gamma.shape[0] == beta.shape[0])
                conv_weights = conv_weights * gamma.reshape(shape)
                conv_biases = conv_biases * gamma + beta

                # bottom_X = bottom_X._replace(
                #     data=xlayer.ConvData(conv_weights, conv_biases),
                #     layer=bottom_X.layer[:] + [X.name]
                # )

                bottom_X.data = xlayer.ConvData(conv_weights, conv_biases)
                bottom_X.layer = bottom_X.layer[:] + [X.name]

            elif bottom_X.type[0] == 'BatchNorm':
                if not isinstance(bottom_X.data, xlayer.BatchData):
                    raise ValueError(
                        "Invalid BatchNorm parameters data type:"
                        " {}, should be of type: xlayer.BatchData".format(
                            type(bottom_X.data)))

                if not isinstance(X.data, xlayer.ScaleData):
                    raise ValueError(
                        "Invalid scaling layer data type: {},"
                        " should be of type: xlayer.ScaleData".format(
                            type(X.data)))

                # Weights should have layout: OIHW at this point
                gamma, beta = X.data.gamma, X.data.beta
                bn_mu, bn_sigma_square = \
                    bottom_X.data.mu, bottom_X.data.sigma_square
                bn_gamma, bn_beta = \
                    bottom_X.data.gamma, bottom_X.data.beta

                epsilon = bottom_X.attrs['epsilon']

                assert (bn_mu.shape[0] == bn_sigma_square.shape[0] ==
                        bn_gamma.shape[0] == bn_beta.shape[0] == gamma.shape[0]
                        == beta.shape[0])

                new_gamma = gamma * bn_gamma
                new_beta = gamma * bn_beta + beta

                bottom_X.data = xlayer.BatchData(bn_mu, bn_sigma_square,
                                                 new_gamma, new_beta)
                bottom_X.layer = bottom_X.layer[:] + [X.name]

        # Remove the Scale node
        xgraph.remove(X.name)

    return changes