コード例 #1
1
ファイル: classification.py プロジェクト: zwsong/nnabla
def mnist_lenet_prediction(image, test=False):
    """
    Construct LeNet for MNIST.
    """
    image /= 255.0
    c1 = PF.convolution(image, 16, (5, 5), name='conv1')
    c1 = F.relu(F.max_pooling(c1, (2, 2)), inplace=True)
    c2 = PF.convolution(c1, 16, (5, 5), name='conv2')
    c2 = F.relu(F.max_pooling(c2, (2, 2)), inplace=True)
    c3 = F.relu(PF.affine(c2, 50, name='fc3'), inplace=True)
    c4 = PF.affine(c3, 10, name='fc4')
    return c4
コード例 #2
0
ファイル: cnn_model_005_001.py プロジェクト: kzky/works
def cnn_model_003(ctx, x, act=F.relu, test=False):
    with nn.context_scope(ctx):
        # Convblock0
        h = conv_unit(x, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
        h = F.max_pooling(h, (2, 2))  # 32 -> 16
        with nn.parameter_scope("bn0"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test:
            h = F.dropout(h)

        # Convblock 1
        h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
        h = F.max_pooling(h, (2, 2))  # 16 -> 8
        with nn.parameter_scope("bn1"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test:
            h = F.dropout(h)

        # Convblock 2
        h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test)  # 8 -> 6
        h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
        h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
        h = conv_unit(h, "conv23", 10, k=1, s=1, p=0, act=act, test=test)

        # Convblock 3
        h = F.average_pooling(h, (6, 6))
        with nn.parameter_scope("bn2"):
            h = PF.batch_normalization(h, batch_stat=not test)
        h = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
        return h
コード例 #3
0
ファイル: cnn_model_050.py プロジェクト: kzky/works
def cnn_model_003(ctx, x, act=F.elu, do=True, test=False):
    with nn.context_scope(ctx):
        # Convblock0
        h = conv_unit(x, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
        h = F.max_pooling(h, (2, 2))  # 32 -> 16
        with nn.parameter_scope("bn0"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test and do:
            h = F.dropout(h)

        # Convblock 1
        h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
        h = F.max_pooling(h, (2, 2))  # 16 -> 8
        with nn.parameter_scope("bn1"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test and do:
            h = F.dropout(h)

        # Convblock 2
        h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test)  # 8 -> 6
        h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
        h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
        h_branch = h

        # Convblock 3
        h = conv_unit(h_branch, "conv23", 10, k=1, s=1, p=0, act=act, test=test)
        h = F.average_pooling(h, (6, 6))
        with nn.parameter_scope("bn2"):
            h = PF.batch_normalization(h, batch_stat=not test)
        pred = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))

        # Uncertainty
        u0 = conv_unit(h_branch, "u0", 10, k=1, s=1, p=0, act=act, test=test)
        u0 = F.average_pooling(u0, (6, 6))
        with nn.parameter_scope("u0bn"):
            u0 = PF.batch_normalization(u0, batch_stat=not test)
            log_var = F.reshape(u0, (u0.shape[0], np.prod(u0.shape[1:])))

        # Uncertainty for uncertainty
        u1 = conv_unit(h_branch, "u1", 10, k=1, s=1, p=0, act=act, test=test)
        u1 = F.average_pooling(u1, (6, 6))
        with nn.parameter_scope("u1bn"):
            u1 = PF.batch_normalization(u1, batch_stat=not test)
            log_s = F.reshape(u1, (u1.shape[0], np.prod(u1.shape[1:])))

        return pred, log_var, log_s
コード例 #4
0
ファイル: cnn_model_051.py プロジェクト: kzky/works
def cnn_model_003(ctx, h, act=F.elu, do=True, test=False):
    with nn.context_scope(ctx):
        if not test:
            b, c, s, s = h.shape
            h = F.image_augmentation(h, (c, s, s),
                                     min_scale=1.0, max_scale=1.5,
                                     angle=0.5, aspect_ratio=1.3, distortion=0.2,
                                     flip_lr=True)
        # Convblock0
        h = conv_unit(h, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
        h = F.max_pooling(h, (2, 2))  # 32 -> 16
        with nn.parameter_scope("bn0"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test and do:
            h = F.dropout(h)

        # Convblock 1
        h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
        h = F.max_pooling(h, (2, 2))  # 16 -> 8
        with nn.parameter_scope("bn1"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test and do:
            h = F.dropout(h)

        # Convblock 2
        h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test)  # 8 -> 6
        h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
        h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
        u = h

        # Convblock 3
        h = conv_unit(h, "conv23", 10, k=1, s=1, p=0, act=act, test=test)
        h = F.average_pooling(h, (6, 6))
        with nn.parameter_scope("bn2"):
            h = PF.batch_normalization(h, batch_stat=not test)
        pred = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))

        # Uncertainty
        u = conv_unit(u, "u0", 10, k=1, s=1, p=0, act=act, test=test)
        u = F.average_pooling(u, (6, 6))
        with nn.parameter_scope("u0bn"):
            u = PF.batch_normalization(u, batch_stat=not test)
            log_var = F.reshape(u, (u.shape[0], np.prod(u.shape[1:])))

        return pred, log_var
コード例 #5
0
ファイル: cnn_model_060.py プロジェクト: kzky/works
    def res_unit(x, scope_name, dn=False, test=False):
        C = x.shape[1]
        with nn.parameter_scope(scope_name):

            # Conv -> BN -> Relu
            with nn.parameter_scope("conv1"):
                h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN -> Relu
            with nn.parameter_scope("conv2"):
                h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN
            with nn.parameter_scope("conv3"):
                h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> Relu
            h = F.relu(h + x)

            # Maxpooling
            if dn:
                h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
            return h
コード例 #6
0
ファイル: cnn_model_051.py プロジェクト: kzky/works
def cnn_model_003(ctx, x, act=F.elu, do=True, test=False):
    with nn.context_scope(ctx):
        # Convblock0
        h = conv_unit(x, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
        h = F.max_pooling(h, (2, 2))  # 28 -> 14
        with nn.parameter_scope("bn0"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test and do:
            h = F.dropout(h)

        # Convblock 1
        h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
        h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
        h = F.max_pooling(h, (2, 2))  # 14 -> 7
        with nn.parameter_scope("bn1"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test and do:
            h = F.dropout(h)

        # Convblock 2
        h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test)  # 7 -> 5
        h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
        h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
        u = h

        # Convblock 3
        h = conv_unit(h, "conv23", 10, k=1, s=1, p=0, act=act, test=test)
        h = F.average_pooling(h, (5, 5))
        with nn.parameter_scope("bn2"):
            h = PF.batch_normalization(h, batch_stat=not test)
        pred = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))

        # Uncertainty
        u = conv_unit(u, "u0", 10, k=1, s=1, p=0, act=act, test=test)
        u = F.average_pooling(u, (5, 5))
        with nn.parameter_scope("u0bn"):
            u = PF.batch_normalization(u, batch_stat=not test)
            log_var = F.reshape(u, (u.shape[0], np.prod(u.shape[1:])))

        return pred, log_var
コード例 #7
0
    def __call__(self, x):
        '''
        Defines a ResNet-like network according to the configuration specified.

        Args:
            x:
                A Variable object which has a shape with a format
                `NCHW` if `channel_last=False` else `NHWC`.

        Returns:
            * An output `Variable` of classification layer
            * Intermediate `Variable` outputs from input and output of each
              cell

        '''

        logger.debug(x.shape)

        # First convolution
        axes = [get_channel_axis(self.channel_last)]
        with nn.parameter_scope("conv1"):
            r = pf_convolution(x,
                               64, (7, 7),
                               stride=(2, 2),
                               channel_last=self.channel_last)
            r = PF.fused_batch_normalization(r,
                                             axes=axes,
                                             batch_stat=not self.test)
            mp_opts = dict(
                ignore_border=False) if self.max_pooling_ceil_border else dict(
                    pad=(1, 1))
            r = F.max_pooling(r, (3, 3), (2, 2),
                              channel_last=self.channel_last,
                              **mp_opts)
        hidden = {}
        hidden['r0'] = r
        logger.debug(r.shape)

        # Create cells each of which consists of blocks repeatedly applied
        cell_configs = self.get_cell_configurations(self.num_layers)
        for i, (counts, ochannels, strides) in enumerate(zip(*cell_configs)):
            with nn.parameter_scope("res{}".format(i + 1)):
                r = self.cell(r, ochannels, counts, (strides, ) * 2)
            hidden['r{}'.format(i + 1)] = r
            logger.debug(r.shape)

        # Global average pooling
        pool_shape = get_spatial_shape(r.shape, self.channel_last)
        r = F.average_pooling(r, pool_shape, channel_last=self.channel_last)

        # Final classification layer
        with nn.parameter_scope("fc"):
            r = pf_affine(r, self.num_classes, channel_last=self.channel_last)
        return r, hidden
コード例 #8
0
 def net(h):
     import nnabla.functions as F
     import nnabla.parametric_functions as PF
     h = PF.convolution(h, 3, (3, 3), name="conv1")
     h = PF.batch_normalization(h, name="bn1")
     h = F.relu(h)
     h = F.max_pooling(h, (2, 2))
     h = PF.convolution(h, 3, (3, 3), name="conv2")
     h = PF.batch_normalization(h, name="bn2")
     pred = F.relu(h)
     return pred
コード例 #9
0
def zero(x, output_filter, scope,
         input_node_id, is_reduced, test, is_search):
    """
        Zero operation, i.e. all elements become 0.
    """
    if is_reduced and input_node_id < 2:
        h = F.max_pooling(x, kernel=(1, 1), stride=(2, 2))  # downsampling
        h = F.mul_scalar(h, 0)
    else:
        h = F.mul_scalar(x, 0)
    return h
コード例 #10
0
def lenet(image, test=False):
    h = PF.convolution(image,
                       16, (5, 5), (1, 1),
                       with_bias=False,
                       name='conv1')
    h = PF.batch_normalization(h, batch_stat=not test, name='conv1-bn')
    h = F.max_pooling(h, (2, 2))
    h = F.relu(h)

    h = PF.convolution(h, 16, (5, 5), (1, 1), with_bias=True, name='conv2')
    h = PF.batch_normalization(h, batch_stat=not test, name='conv2-bn')
    h = F.max_pooling(h, (2, 2))
    h = F.relu(h)

    h = PF.affine(h, 10, with_bias=False, name='fc1')
    h = PF.batch_normalization(h, batch_stat=not test, name='fc1-bn')
    h = F.relu(h)

    pred = PF.affine(h, 10, with_bias=True, name='fc2')
    return pred
コード例 #11
0
ファイル: models.py プロジェクト: kzky/reproductions
def mnist_lenet_prediction_slim(image, scope="slim", rrate=0.75, test=False):
    """
    Construct LeNet for MNIST.
    """
    with nn.parameter_scope(scope):
        image /= 255.0
        c1 = PF.convolution(image, 16, (5, 5), name='conv1')
        c1 = F.relu(F.max_pooling(c1, (2, 2)), inplace=True)
        c2 = PF.convolution(c1, 16, (5, 5), name='conv2')
        c2 = F.relu(F.max_pooling(c2, (2, 2)), inplace=True)

        # SVD applied
        inmaps = np.prod(c2.shape[1:])  # c * h * w
        outmaps0 = 50  # original outmaps
        outmaps1 = reduce_maps(inmaps, outmaps0, rrate)
        d0 = F.relu(PF.affine(c2, outmaps1, name='fc-d0'), inplace=True)
        d1 = F.relu(PF.affine(d0, outmaps0, name='fc-d1'), inplace=True)

        c4 = PF.affine(d1, 10, name='fc4')
    return c4
コード例 #12
0
ファイル: model_resnet.py プロジェクト: sony/nnabla-examples
def resnet_imagenet(x,
                    num_classes,
                    num_layers,
                    shortcut_type,
                    test,
                    tiny=False):
    """
    Args:
        x : Variable
        num_classes : Number of classes of outputs
        num_layers : Number of layers of ResNet chosen from (18, 34, 50, 101, 152)
        shortcut_type : 'c', 'b', ''
            'c' : Use Convolution anytime
            'b' : Use Convolution if numbers of channels of input
                  and output mismatch.
            '' : Use Identity mapping if channels match, otherwise zero padding.
        test : Construct net for testing.
        tiny (bool): Tiny imagenet mode. Input image must be (3, 56, 56).
    """
    layers = {
        18: ((2, 2, 2, 2), basicblock, 1),
        34: ((3, 4, 6, 3), basicblock, 1),
        50: ((3, 4, 6, 3), bottleneck, 4),
        101: ((3, 4, 23, 3), bottleneck, 4),
        152: ((3, 8, 36, 3), bottleneck, 4)
    }

    counts, block, ocoef = layers[num_layers]
    logger.debug(x.shape)
    with nn.parameter_scope("conv1"):
        stride = (1, 1) if tiny else (2, 2)
        r = PF.convolution(x,
                           64, (7, 7),
                           pad=(3, 3),
                           stride=stride,
                           with_bias=False)
        r = F.relu(PF.batch_normalization(r, batch_stat=not test))
        r = F.max_pooling(r, (3, 3), stride, pad=(1, 1))
    hidden = {}
    hidden['r0'] = r
    ochannels = [64, 128, 256, 512]
    strides = [1, 2, 2, 2]
    logger.debug(r.shape)
    for i in range(4):
        with nn.parameter_scope("res{}".format(i + 1)):
            r = layer(r, block, ochannels[i] * ocoef, counts[i],
                      (strides[i], strides[i]), shortcut_type, test)
        hidden['r{}'.format(i + 1)] = r
        logger.debug(r.shape)
    r = F.average_pooling(r, r.shape[-2:])
    with nn.parameter_scope("fc"):
        r = PF.affine(r, num_classes)
    logger.debug(r.shape)
    return r, hidden
コード例 #13
0
ファイル: models.py プロジェクト: sony/nnabla-examples
 def down_block(input, output_channels=64, stride=1, scope='down_block'):
     with nn.parameter_scope(scope):
         net = conv2d(input,
                      output_channels, (3, 3), (stride, stride),
                      name='conv_1')
         net = F.leaky_relu(net, 0.2)
         net = conv2d(net,
                      output_channels, (3, 3), (stride, stride),
                      name='conv_2')
         net = F.leaky_relu(net, 0.2)
         net = F.max_pooling(net, (2, 2), channel_last=True)
     return net
コード例 #14
0
def cnn_dni(image, y=None, maps=128, ncls=10, test=False):
    with nn.parameter_scope("ref"):
        image /= 255.
        h = act_bn_conv(image, maps, test, name="conv0")
        h = F.max_pooling(h, (2, 2))  # 28x28 -> 14x14
        h = act_bn_conv(h, maps, act=None, test=test, name="conv1")

    # decoupled here
    h_d, h_copy, x_input, g_label = decouple(h)
    g_pred = cnn_gradient_synthesizer(x_input, y, test)
    h_d.grad = g_pred.data

    h = F.relu(h)  # decouple after non-linearity
    h = F.max_pooling(h, (2, 2))  # 14x14 -> 7x7
    with nn.parameter_scope("ref"):
        h = act_bn_conv(h, maps, test, name="conv2")
        h = F.average_pooling(h, (7, 7))  # 7x7 -> 1x1
        pred = PF.affine(h, ncls, name="fc")
        pred.persistent = True

    return h_d, h_copy, pred, g_pred, g_label
コード例 #15
0
ファイル: resnets.py プロジェクト: aswifi/nnabla
def small_bn_rm_resnet(image, test=False, w_bias=False, name='bn-rm-graph-ref'):
    h = image
    h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1),
                       with_bias=w_bias, name='first-conv')
    h = F.relu(h)
    h = F.max_pooling(h, (2, 2))
    h = bn_rm_resblock(h, maps=16, test=test, w_bias=w_bias, name='cb1')
    h = bn_rm_resblock(h, maps=16, test=test, w_bias=w_bias, name='cb2')
    h = bn_rm_resblock(h, maps=16, test=test, w_bias=w_bias, name='cb3')
    h = bn_rm_resblock(h, maps=16, test=test, w_bias=w_bias, name='cb4')
    h = F.average_pooling(h, (2, 2))
    pred = PF.affine(h, 10, name='bn-rm-fc')
    return pred
コード例 #16
0
def mnist_resnet_prediction(image, test=False, aug=None):
    """
    Construct ResNet for MNIST.
    """
    image /= 255.0
    image = augmentation(image, test, aug)

    def bn(x):
        return PF.batch_normalization(x, batch_stat=not test)

    def res_unit(x, scope):
        C = x.shape[1]
        with nn.parameter_scope(scope):
            with nn.parameter_scope('conv1'):
                h = F.elu(bn(PF.convolution(x, C / 2, (1, 1), with_bias=False)))
            with nn.parameter_scope('conv2'):
                h = F.elu(
                    bn(PF.convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
            with nn.parameter_scope('conv3'):
                h = bn(PF.convolution(h, C, (1, 1), with_bias=False))
        return F.elu(F.add2(h, x, inplace=True))
    # Conv1 --> 64 x 32 x 32
    with nn.parameter_scope("conv1"):
        c1 = F.elu(
            bn(PF.convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False)))
    # Conv2 --> 64 x 16 x 16
    c2 = F.max_pooling(res_unit(c1, "conv2"), (2, 2))
    # Conv3 --> 64 x 8 x 8
    c3 = F.max_pooling(res_unit(c2, "conv3"), (2, 2))
    # Conv4 --> 64 x 8 x 8
    c4 = res_unit(c3, "conv4")
    # Conv5 --> 64 x 4 x 4
    c5 = F.max_pooling(res_unit(c4, "conv5"), (2, 2))
    # Conv5 --> 64 x 4 x 4
    c6 = res_unit(c5, "conv6")
    pl = F.average_pooling(c6, (4, 4))
    with nn.parameter_scope("classifier"):
        y = PF.affine(pl, 10)
    return y
コード例 #17
0
def mnist_binary_connect_resnet_prediction(image, test=False):
    """
    Construct ResNet for MNIST (BinaryNet version).
    """
    def bn(x):
        return PF.batch_normalization(x, batch_stat=not test)

    def res_unit(x, scope):
        C = x.shape[1]
        with nn.parameter_scope(scope):
            with nn.parameter_scope('conv1'):
                h = F.elu(bn(PF.binary_connect_convolution(
                    x, C / 2, (1, 1), with_bias=False)))
            with nn.parameter_scope('conv2'):
                h = F.elu(
                    bn(PF.binary_connect_convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
            with nn.parameter_scope('conv3'):
                h = bn(PF.binary_connect_convolution(
                    h, C, (1, 1), with_bias=False))
        return F.elu(x + h)
    # Conv1 --> 64 x 32 x 32
    with nn.parameter_scope("conv1"):
        c1 = F.elu(
            bn(PF.binary_connect_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False)))
    # Conv2 --> 64 x 16 x 16
    c2 = F.max_pooling(res_unit(c1, "conv2"), (2, 2))
    # Conv3 --> 64 x 8 x 8
    c3 = F.max_pooling(res_unit(c2, "conv3"), (2, 2))
    # Conv4 --> 64 x 8 x 8
    c4 = res_unit(c3, "conv4")
    # Conv5 --> 64 x 4 x 4
    c5 = F.max_pooling(res_unit(c4, "conv5"), (2, 2))
    # Conv5 --> 64 x 4 x 4
    c6 = res_unit(c5, "conv6")
    pl = F.average_pooling(c6, (4, 4))
    with nn.parameter_scope("classifier"):
        y = bn(PF.binary_connect_affine(pl, 10))
    return y
コード例 #18
0
def fpq_lenet(image,
              test=False,
              n=8,
              delta=2e-4,
              name="fixed-point-graph-ref"):
    with nn.parameter_scope(name):
        h = PF.fixed_point_quantized_convolution(image,
                                                 16, (5, 5), (1, 1),
                                                 with_bias=False,
                                                 delta_w=delta,
                                                 name='conv1')
        h = PF.batch_normalization(h, batch_stat=not test, name='conv1-bn')
        h = F.max_pooling(h, (2, 2))
        h = F.fixed_point_quantize(h, n=n, delta=delta, sign=False)

        h = PF.fixed_point_quantized_convolution(h,
                                                 16, (5, 5), (1, 1),
                                                 with_bias=True,
                                                 delta_w=delta,
                                                 name='conv2')
        h = PF.batch_normalization(h, batch_stat=not test, name='conv2-bn')
        h = F.max_pooling(h, (2, 2))
        h = F.fixed_point_quantize(h, n=n, delta=delta, sign=False)

        h = PF.fixed_point_quantized_affine(h,
                                            10,
                                            with_bias=False,
                                            delta_w=delta,
                                            name='fc1')
        h = PF.batch_normalization(h, batch_stat=not test, name='fc1-bn')
        h = F.fixed_point_quantize(h, n=n, delta=delta, sign=False)

        pred = PF.fixed_point_quantized_affine(h,
                                               10,
                                               with_bias=True,
                                               delta_w=delta,
                                               name='fc2')
    return pred
コード例 #19
0
ファイル: lpips.py プロジェクト: sony/nnabla-examples
def get_alex_feat(input_var):
    """
        Exactly the same architecture used for LPIPS.
        This is a little bit modified version (can't use nnabla models!). 
    """
    assert input_var.shape[1] == 3
    act1 = F.relu(
        PF.convolution(input_var,
                       outmaps=64,
                       kernel=(11, 11),
                       pad=(2, 2),
                       stride=(4, 4),
                       name="conv0"), True)
    act2 = F.relu(
        PF.convolution(F.max_pooling(act1, kernel=(3, 3), stride=(2, 2)),
                       outmaps=192,
                       kernel=(5, 5),
                       pad=(2, 2),
                       name="conv3"), True)
    act3 = F.relu(
        PF.convolution(F.max_pooling(act2, kernel=(3, 3), stride=(2, 2)),
                       outmaps=384,
                       kernel=(3, 3),
                       pad=(1, 1),
                       name="conv6"), True)
    act4 = F.relu(
        PF.convolution(act3,
                       outmaps=256,
                       kernel=(3, 3),
                       pad=(1, 1),
                       name="conv8"), True)
    act5 = F.relu(
        PF.convolution(act4,
                       outmaps=256,
                       kernel=(3, 3),
                       pad=(1, 1),
                       name="conv10"), True)
    return [act1, act2, act3, act4, act5]
コード例 #20
0
ファイル: classification.py プロジェクト: zwsong/nnabla
def mnist_resnet_prediction(image, test=False):
    """
    Construct ResNet for MNIST.
    """
    image /= 255.0

    def bn(x):
        return PF.batch_normalization(x, batch_stat=not test)

    def res_unit(x, scope):
        C = x.shape[1]
        with nn.parameter_scope(scope):
            with nn.parameter_scope('conv1'):
                h = F.elu(bn(PF.convolution(x, C / 2, (1, 1), with_bias=False)))
            with nn.parameter_scope('conv2'):
                h = F.elu(
                    bn(PF.convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
            with nn.parameter_scope('conv3'):
                h = bn(PF.convolution(h, C, (1, 1), with_bias=False))
        return F.elu(F.add2(h, x, inplace=True))
    # Conv1 --> 64 x 32 x 32
    with nn.parameter_scope("conv1"):
        c1 = F.elu(
            bn(PF.convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False)))
    # Conv2 --> 64 x 16 x 16
    c2 = F.max_pooling(res_unit(c1, "conv2"), (2, 2))
    # Conv3 --> 64 x 8 x 8
    c3 = F.max_pooling(res_unit(c2, "conv3"), (2, 2))
    # Conv4 --> 64 x 8 x 8
    c4 = res_unit(c3, "conv4")
    # Conv5 --> 64 x 4 x 4
    c5 = F.max_pooling(res_unit(c4, "conv5"), (2, 2))
    # Conv5 --> 64 x 4 x 4
    c6 = res_unit(c5, "conv6")
    pl = F.average_pooling(c6, (4, 4))
    with nn.parameter_scope("classifier"):
        y = PF.affine(pl, 10)
    return y
コード例 #21
0
ファイル: resnets.py プロジェクト: aswifi/nnabla
def small_clbn_folding_resnet(image, test=False):
    h = image
    h /= 255.0
    h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=True,
                       with_bias=True, name='first-clbn-conv')
    h = F.relu(h)
    h = F.max_pooling(h, (2, 2), channel_last=True)
    h = clbn_folding_resblock(h, maps=16, test=test, name='clbn-cb1')
    h = clbn_folding_resblock(h, maps=16, test=test, name='clbn-cb2')
    h = clbn_folding_resblock(h, maps=16, test=test, name='clbn-cb3')
    h = clbn_folding_resblock(h, maps=16, test=test, name='clbn-cb4')
    h = F.average_pooling(h, (2, 2), channel_last=True)
    pred = PF.affine(h, 10, name='clbn-fc')
    return pred
コード例 #22
0
    def res_unit(x, scope_name, rng, dn=False, test=False):
        C = x.shape[1]
        with nn.parameter_scope(scope_name):

            # Conv -> BN -> Relu
            with nn.parameter_scope("conv1"):
                w_init = UniformInitializer(calc_uniform_lim_glorot(
                    C, C / 2, kernel=(1, 1)),
                                            rng=rng)
                h = PF.convolution(x,
                                   C / 2,
                                   kernel=(1, 1),
                                   pad=(0, 0),
                                   w_init=w_init,
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN -> Relu
            with nn.parameter_scope("conv2"):
                w_init = UniformInitializer(calc_uniform_lim_glorot(
                    C / 2, C / 2, kernel=(3, 3)),
                                            rng=rng)
                h = PF.convolution(h,
                                   C / 2,
                                   kernel=(3, 3),
                                   pad=(1, 1),
                                   w_init=w_init,
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN
            with nn.parameter_scope("conv3"):
                w_init = UniformInitializer(calc_uniform_lim_glorot(
                    C / 2, C, kernel=(1, 1)),
                                            rng=rng)
                h = PF.convolution(h,
                                   C,
                                   kernel=(1, 1),
                                   pad=(0, 0),
                                   w_init=w_init,
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> Relu
            h = F.relu(h + x)

            # Maxpooling
            if dn:
                h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))

            return h
コード例 #23
0
def mnist_inq_lenet_prediction(image, num_bits=3,
                               inq_iterations=(5000, 6000, 7000, 8000, 9000),
                               selection_algorithm='largest_abs', test=False):
    """
    Construct LeNet for MNIST (INQ Version).
    """
    image /= 255.0
    c1 = PF.inq_convolution(image, 16, (5, 5), name='conv1', num_bits=num_bits,
                            inq_iterations=inq_iterations,
                            selection_algorithm=selection_algorithm)
    c1 = F.relu(F.max_pooling(c1, (2, 2)), inplace=True)
    c2 = PF.inq_convolution(c1, 16, (5, 5), name='conv2', num_bits=num_bits,
                            inq_iterations=inq_iterations,
                            selection_algorithm=selection_algorithm)
    c2 = F.relu(F.max_pooling(c2, (2, 2)), inplace=True)
    c3 = F.relu(PF.inq_affine(c2, 50, name='fc3', num_bits=num_bits,
                              inq_iterations=inq_iterations,
                              selection_algorithm=selection_algorithm),
                inplace=True)
    c4 = PF.inq_affine(c3, 10, name='fc4', num_bits=num_bits,
                       inq_iterations=inq_iterations,
                       selection_algorithm=selection_algorithm)
    return c4
コード例 #24
0
ファイル: lenets.py プロジェクト: sony/nnabla
def bn_opp_lenet(image, test=False, channel_last=False, w_bias=False):
    axes = get_channel_axes(image, channel_last)
    h = PF.batch_normalization(image,
                               axes=axes,
                               batch_stat=not test,
                               name='conv1-bn')
    h = PF.convolution(h,
                       16, (5, 5), (1, 1),
                       with_bias=w_bias,
                       channel_last=channel_last,
                       name='conv1')
    h = F.max_pooling(h, (2, 2))
    h = F.relu(h)

    axes = get_channel_axes(h, channel_last)
    h = PF.batch_normalization(h,
                               axes=axes,
                               batch_stat=not test,
                               name='conv2-bn')
    h = PF.convolution(h,
                       16, (5, 5), (1, 1),
                       with_bias=w_bias,
                       channel_last=channel_last,
                       name='conv2')
    h = F.max_pooling(h, (2, 2), channel_last=channel_last)
    h = F.relu(h)

    axes = get_channel_axes(h, channel_last)
    h = PF.batch_normalization(h,
                               axes=axes,
                               batch_stat=not test,
                               name='fc1-bn')
    h = PF.affine(h, 10, with_bias=True, name='fc1')
    h = F.relu(h)

    pred = PF.affine(h, 10, with_bias=True, name='fc2')
    return pred
コード例 #25
0
def small_bn_resnet(image,
                    test=False,
                    w_bias=False,
                    channel_last=False,
                    name='bn-graph-ref'):
    axes = get_channel_axes(channel_last)

    h = image
    h /= 255.0
    h = PF.convolution(h,
                       16,
                       kernel=(3, 3),
                       pad=(1, 1),
                       channel_last=channel_last,
                       with_bias=w_bias,
                       name='first-conv')
    h = PF.batch_normalization(h,
                               axes=axes,
                               batch_stat=not test,
                               name='first-bn')
    h = F.relu(h)
    h = F.max_pooling(h, (2, 2), channel_last=channel_last)
    h = bn_resblock(h,
                    maps=16,
                    test=test,
                    w_bias=w_bias,
                    channel_last=channel_last,
                    name='cb1')
    h = bn_resblock(h,
                    maps=16,
                    test=test,
                    w_bias=w_bias,
                    channel_last=channel_last,
                    name='cb2')
    h = bn_resblock(h,
                    maps=16,
                    test=test,
                    w_bias=w_bias,
                    channel_last=channel_last,
                    name='cb3')
    h = bn_resblock(h,
                    maps=16,
                    test=test,
                    w_bias=w_bias,
                    channel_last=channel_last,
                    name='cb4')
    h = F.average_pooling(h, (2, 2), channel_last=channel_last)
    pred = PF.affine(h, 10, name='fc')
    return pred
コード例 #26
0
def bn_linear_lenet(image, test=False, name="bn-linear-graph-ref"):
    with nn.parameter_scope(name):
        h = PF.convolution(image,
                           16, (5, 5), (1, 1),
                           with_bias=False,
                           name='conv1')
        a, b = create_scale_bias(1, h.shape[1])
        h = a * h + b
        h = F.max_pooling(h, (2, 2))
        h = F.relu(h)

        h = PF.convolution(h, 16, (5, 5), (1, 1), with_bias=True, name='conv2')
        a, b = create_scale_bias(2, h.shape[1])
        h = a * h + b
        h = F.max_pooling(h, (2, 2))
        h = F.relu(h)

        h = PF.affine(h, 10, with_bias=False, name='fc1')
        a, b = create_scale_bias(4, h.shape[1], 2)
        h = a * h + b
        h = F.relu(h)

        pred = PF.affine(h, 10, with_bias=True, name='fc2')
    return pred
コード例 #27
0
ファイル: resnets.py プロジェクト: aswifi/nnabla
def small_bsf_resnet(image, w_bias=False, name='bn-graph-ref'):
    h = image
    h /= 255.0
    h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1),
                       with_bias=w_bias, name='first-conv')
    h = PF.batch_normalization(h, batch_stat=False, name='first-bn-bsf')
    h = F.relu(h)
    h = F.max_pooling(h, (2, 2))
    h = bsf_resblock(h, maps=16, test=False, w_bias=w_bias, name='cb1')
    h = bsf_resblock(h, maps=16, test=False, w_bias=w_bias, name='cb2')
    h = bsf_resblock(h, maps=16, test=False, w_bias=w_bias, name='cb3')
    h = bsf_resblock(h, maps=16, test=False, w_bias=w_bias, name='cb4')
    h = F.average_pooling(h, (2, 2))
    pred = PF.affine(h, 10, name='fc')
    return pred
コード例 #28
0
ファイル: cnn_model_059.py プロジェクト: kzky/works
def cnn_model_003(ctx, scope, x, act=F.relu, test=False):
    with nn.context_scope(ctx):
        with nn.parameter_scope(scope):
            # Convblock0
            h = conv_unit(x, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
            h = F.max_pooling(h, (2, 2))  # 32 -> 16
            with nn.parameter_scope("bn0"):
                h = PF.batch_normalization(h, batch_stat=not test)
            if not test:
                h = F.dropout(h)

            # Convblock 1
            h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
            h = F.max_pooling(h, (2, 2))  # 16 -> 8
            with nn.parameter_scope("bn1"):
                h = PF.batch_normalization(h, batch_stat=not test)
            if not test:
                h = F.dropout(h)

            # Convblock 2
            h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act,
                          test=test)  # 8 -> 6
            h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
            h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
            h = conv_unit(h, "conv23", 10, k=1, s=1, p=0, act=act, test=test)

            # Convblock 3
            h = F.average_pooling(h, (6, 6))
            with nn.parameter_scope("bn2"):
                h = PF.batch_normalization(h, batch_stat=not test)
            h = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
        return h
コード例 #29
0
ファイル: cnn_model_024.py プロジェクト: kzky/works
def stochastic_res_unit(x, scope_name, act=F.relu, dn=False, test=False):
    if not test:
        flag = np.random.randint(2)
        if flag:
            h = res_block(x, scope_name, act=act, dn=dn, test=test)
            h = F.add2(h, x)
        else:
            h = x
    else:
        h = res_block(x, scope_name, act=act, dn=dn, test=test)
        h = F.add2(h, x)
    h = act(h)
    # Maxpooling
    if dn:
        h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
    return h
コード例 #30
0
ファイル: cnn_model_024.py プロジェクト: kzky/works
def stochastic_res_unit(x, scope_name, act=F.relu, dn=False, test=False):
    if not test:
        flag = np.random.randint(2)
        if flag:
            h = res_block(x, scope_name, act=act, dn=dn, test=test)
            h = F.add2(h, x)
        else:
            h = x
    else:
        h = res_block(x, scope_name, act=act, dn=dn, test=test)
        h = F.add2(h, x)
    h = act(h)
    # Maxpooling
    if dn:
        h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
    return h
コード例 #31
0
ファイル: resnets.py プロジェクト: aswifi/nnabla
def small_clbn_self_folding_resnet(image, name='clbn-self-folding-graph-ref'):
    h = image
    h /= 255.0
    h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=True,
                       with_bias=False, name='first-conv')
    a, b = create_scale_bias(1, h.shape[3], axes=[3])
    h = a * h + b
    h = F.relu(h)
    h = F.max_pooling(h, (2, 2), channel_last=True)
    h = clbn_self_folding_resblock(h, 2, maps=16, name='cb1')
    h = clbn_self_folding_resblock(h, 3, maps=16, name='cb2')
    h = clbn_self_folding_resblock(h, 4, maps=16, name='cb3')
    h = clbn_self_folding_resblock(h, 5, maps=16, name='cb4')
    h = F.average_pooling(h, (2, 2), channel_last=True)
    pred = PF.affine(h, 10, name='fc')
    return pred
コード例 #32
0
def max_pool_3x3(x, output_filter, scope,
                 input_node_id, is_reduced, test, is_search):
    """
        max pooling (with no spatial downsampling).
    """
    if is_reduced and input_node_id < 2:
        stride = (2, 2)
    else:
        stride = (1, 1)

    h = F.max_pooling(x, kernel=(3, 3), stride=stride, pad=(1, 1))
    with nn.parameter_scope(scope + "bn"):
        h = PF.batch_normalization(h, batch_stat=not test,
                                   fix_parameters=is_search)

    return h
コード例 #33
0
ファイル: resnets.py プロジェクト: aswifi/nnabla
def small_cl_resnet(image, test=False):
    h = image
    h /= 255.0
    h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=True,
                       with_bias=False, name='first-cl-conv')
    h = PF.batch_normalization(
        h, axes=[3], batch_stat=not test, name='first-cl-bn')
    h = F.relu(h)
    h = F.max_pooling(h, (2, 2), channel_last=True)
    h = cl_resblock(h, maps=16, test=test, name='cl-cb1')
    h = cl_resblock(h, maps=16, test=test, name='cl-cb2')
    h = cl_resblock(h, maps=16, test=test, name='cl-cb3')
    h = cl_resblock(h, maps=16, test=test, name='cl-cb4')
    h = F.average_pooling(h, (2, 2), channel_last=True)
    pred = PF.affine(h, 10, name='cl-fc')
    return pred
コード例 #34
0
ファイル: model.py プロジェクト: sony/nnabla-examples
 def convblock(x, nmaps, layer_idx, with_bias, with_bn=False):
     h = x
     scopenames = ["conv{}".format(_) for _ in layer_idx]
     for scopename in scopenames:
         with nn.parameter_scope(scopename):
             if scopename not in ["conv1", "conv13"] and scopename == scopenames[-1]:
                 nmaps *= 2
             h = PF.convolution(h, nmaps, kernel=(3, 3), pad=(
                 1, 1), with_bias=with_bias, fix_parameters=finetune)
             if with_bn:
                 h = PF.batch_normalization(
                     h, batch_stat=not test, fix_parameters=finetune)
         h = F.relu(h)
         if len(scopenames) != 1 and scopename == scopenames[-2]:
             h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
     return h
コード例 #35
0
def _make_tree_level1(x,
                      children,
                      block,
                      ochannels,
                      level,
                      test,
                      level_root=False,
                      stride=1,
                      channel_last=False):
    axes = 3 if channel_last else 1
    ichannels = x.shape[axes]
    bottom = F.max_pooling(x,
                           kernel=(stride, stride),
                           stride=(stride, stride),
                           channel_last=channel_last) if stride > 1 else x
    if ichannels != ochannels:
        residual = pf_convolution(bottom,
                                  ochannels, (1, 1),
                                  stride=(1, 1),
                                  pad=None,
                                  with_bias=False,
                                  channel_last=channel_last)
        residual = PF.batch_normalization(residual,
                                          axes=[axes],
                                          batch_stat=not test)
    else:
        residual = bottom
    with nn.parameter_scope('block1'):
        b1 = block(x,
                   residual,
                   ochannels,
                   stride,
                   test,
                   channel_last=channel_last)
    with nn.parameter_scope('block2'):
        b2 = block(b1, b1, ochannels, 1, test, channel_last=channel_last)
    _children = [bottom, b2] if level_root else [b2]
    if children:
        _children += children
    x = root(b1,
             _children,
             ochannels,
             test,
             kernel_size=1,
             channel_last=channel_last)
    return x, bottom
コード例 #36
0
ファイル: cnn_model_021.py プロジェクト: kzky/works
def res_unit(x, scope_name, act=F.relu, dn=False, test=False):
    C = x.shape[1]

    with nn.parameter_scope(scope_name):
        # Conv -> BN -> Relu
        with nn.parameter_scope("conv1"):
            h = PF.convolution(x,
                               C / 2,
                               kernel=(1, 1),
                               pad=(0, 0),
                               with_bias=False)
            h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
            h = act(h)
        # Conv -> BN -> Relu
        with nn.parameter_scope("conv2"):
            h = PF.convolution(h,
                               C / 2,
                               kernel=(3, 3),
                               pad=(1, 1),
                               with_bias=False)
            h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
            h = act(h)
        # Conv -> BN
        with nn.parameter_scope("conv3"):
            h = PF.convolution(h,
                               C,
                               kernel=(1, 1),
                               pad=(0, 0),
                               with_bias=False)
            h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
    # Residual -> Relu
    if not test:
        h = F.dropout(h)
    with nn.parameter_scope(scope_name):
        h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
    h = F.add2(h, x)
    h = act(h)

    # Maxpooling
    if dn:
        h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))

    return h
コード例 #37
0
def conv4(x, test=False):
    '''
    Embedding function
        This network is a typical embedding network for the one-shot learning benchmark task.
        Args:
            x (~nnabla.Variable) : input images.
            test (boolean) : whether test or training
        Returns:
            h (~nnabla.Variable): embedding vector.

    '''
    h = x
    for i in range(4):
        h = PF.convolution(h, 64, [3, 3], pad=[1, 1], name='conv' + str(i))
        h = PF.batch_normalization(h, batch_stat=not test, name='bn' + str(i))
        h = F.relu(h)
        h = F.max_pooling(h, [2, 2])
    h = F.reshape(h, [h.shape[0], np.prod(h.shape[1:])])
    return h
コード例 #38
0
    def hg_module(n, x):
        with nn.parameter_scope(f"{n - 1}.0.0"):
            up1 = ops[n - 1][0](x)
        low1 = F.max_pooling(x, kernel=(2, 2), stride=(2, 2))
        with nn.parameter_scope(f"{n - 1}.1.0"):
            low1 = ops[n - 1][1](low1)

        if n > 1:
            low2 = hg_module(n - 1, low1)
        else:
            with nn.parameter_scope(f"{n - 1}.3.0"):
                low2 = ops[n - 1][3](low1)
        with nn.parameter_scope(f"{n - 1}.2.0"):
            low3 = ops[n - 1][2](low2)

        up2 = F.interpolate(low3, scale=(2, 2), mode="nearest")

        out = up1 + up2
        return out
コード例 #39
0
ファイル: cnn_model_025.py プロジェクト: kzky/works
def cnn_model_003_with_cross_attention(ctx, x_list, act=F.relu, test=False):
    """With attention before pooling
    """
    with nn.context_scope(ctx):
        # Convblock0
        h0_list = []
        for x in x_list:
            h = conv_unit(x, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
            h0_list.append(h)

        # Corss attention
        ca0 = attention(h0_list[0], h0_list[1], h0_list[1], 
                        div_dim=True, softmax=True)
        ca1 = attention(h0_list[1], h0_list[0], h0_list[0], 
                        div_dim=True, softmax=True)

        # Maxpooing, Batchnorm, Dropout
        h0_list = []
        for h in [ca0, ca1]:
            h = F.max_pooling(h, (2, 2))  # 32 -> 16
            with nn.parameter_scope("bn0"):
                h = PF.batch_normalization(h, batch_stat=not test)
            if not test:
                h = F.dropout(h)
            h0_list.append(h)

        # Convblock 1
        h1_list = []
        for h in h0_list:
            h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
            h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
            h1_list.append(h)

        # Corss attention
        ca0 = attention(h1_list[0], h1_list[1], h1_list[1], 
                        div_dim=True, softmax=True)
        ca1 = attention(h1_list[1], h1_list[0], h1_list[0], 
                        div_dim=True, softmax=True)
            
        # Maxpooing, Batchnorm, Dropout
        h1_list = []
        for h in [ca0, ca1]:
            h = F.max_pooling(h, (2, 2))  # 16 -> 8
            with nn.parameter_scope("bn1"):
                h = PF.batch_normalization(h, batch_stat=not test)
            if not test:
                h = F.dropout(h)
                h1_list.append(h)

        # Convblock 2
        h2_list = []
        for h in h1_list:
            h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test)  # 8 -> 6
            h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
            h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
            h = conv_unit(h, "conv23", 10, k=1, s=1, p=0, act=act, test=test)
            h2_list.append(h)

        # Corss attention
        ca0 = attention(h2_list[0], h2_list[1], h2_list[1], 
                        div_dim=True, softmax=True)
        ca1 = attention(h2_list[1], h2_list[0], h2_list[0], 
                        div_dim=True, softmax=True)

        # Convblock 3
        h3_list = []
        for h in [ca0, ca1]:
            h = F.average_pooling(h, (6, 6))
            with nn.parameter_scope("bn2"):
                h = PF.batch_normalization(h, batch_stat=not test)
            h = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
            h3_list.append(h)
        return h3_list