コード例 #1
0
ファイル: nonlocal_net.py プロジェクト: sony/nnabla-examples
def layer4_1(x):
    pad4_1 = F.pad(x, (1, 1, 1, 1), 'reflect')
    conv4_1 = PF.convolution(
        pad4_1, 256, kernel=(
            3, 3), stride=(
            1, 1), name='layer4_1.1')
    conv4_1 = F.instance_normalization(
        conv4_1, gamma=None, beta=None, channel_axis=1)
    conv4_1 = PF.prelu(conv4_1, name='layer4_1.3')
    pad4_2 = F.pad(conv4_1, (1, 1, 1, 1), 'reflect')
    conv4_2 = PF.convolution(
        pad4_2, 64, kernel=(
            3, 3), stride=(
            1, 1), name='layer4_1.5')
    conv4_2 = F.instance_normalization(
        conv4_2, gamma=None, beta=None, channel_axis=1)
    conv4_2 = PF.prelu(conv4_2, name='layer4_1.7')
    up4_1 = F.interpolate(
        conv4_2,
        scale=(
            2,
            2),
        mode='nearest',
        align_corners=False)
    return up4_1
コード例 #2
0
ファイル: nonlocal_net.py プロジェクト: sony/nnabla-examples
def layer3_1(x):
    pad3_1 = F.pad(x, (1, 1, 1, 1), 'reflect')
    conv3_1 = PF.convolution(
        pad3_1, 128, kernel=(
            3, 3), stride=(
            1, 1), name='layer3_1.1')
    conv3_1 = F.instance_normalization(
        conv3_1, gamma=None, beta=None, channel_axis=1)
    conv3_1 = PF.prelu(conv3_1, name='layer3_1.3')
    pad3_2 = F.pad(conv3_1, (1, 1, 1, 1), 'reflect')
    conv3_2 = PF.convolution(
        pad3_2, 64, kernel=(
            3, 3), stride=(
            1, 1), name='layer3_1.5')
    conv3_2 = F.instance_normalization(
        conv3_2, gamma=None, beta=None, channel_axis=1)
    conv3_2 = PF.prelu(conv3_2, name='layer3_1.7')
    return conv3_2
コード例 #3
0
ファイル: nonlocal_net.py プロジェクト: sony/nnabla-examples
def res_block(x, out_ch, name):
    with nn.parameter_scope(name):
        residual = x
        out = F.pad(x, (1, 1, 1, 1), 'reflect')
        out = PF.convolution(
            out, out_ch, kernel=(
                3, 3), stride=(
                1, 1), name='conv1')
        out = F.instance_normalization(
            out, gamma=None, beta=None, channel_axis=1)
        out = PF.prelu(out)
        out = F.pad(out, (1, 1, 1, 1), 'reflect')
        out = PF.convolution(
            out, out_ch, kernel=(
                3, 3), stride=(
                1, 1), name='conv2')
        out = F.instance_normalization(
            out, gamma=None, beta=None, channel_axis=1)
        out += residual
        out = PF.prelu(out)
    return out
コード例 #4
0
def test_pf_prelu_execution(g_rng, inshape, base_axis, shared, slope_init,
                            fix_parameters):

    slope_shape = tuple() if shared else (inshape[base_axis], )
    slope_init = process_param_init(slope_init, slope_shape, g_rng)

    kw = {}
    insert_if_not_none(kw, 'slope_init', slope_init)
    insert_if_not_default(kw, 'base_axis', base_axis, 1)
    insert_if_not_default(kw, 'shared', shared, True)
    insert_if_not_default(kw, 'fix_parameters', fix_parameters, False)

    x = nn.Variable.from_numpy_array(g_rng.randn(*inshape))

    # Check execution
    y = PF.prelu(x, **kw)
    y.forward()
    y.backward()

    # Check values
    # TODO

    # Check args
    assert y.parent.info.type_name == 'PReLU'
    args = y.parent.info.args
    assert args['base_axis'] == base_axis

    # Check created parameters
    assert y.parent.inputs[0] == x
    assert len(y.parent.inputs) == 2
    assert len(nn.get_parameters()) == 1
    slope = nn.get_parameters()['prelu/slope']
    assert slope.shape == slope_shape
    assert slope.need_grad
    assert y.parent.inputs[1].need_grad == (not fix_parameters)
    if isinstance(slope_init, np.ndarray):
        assert np.allclose(slope_init, slope.d)
コード例 #5
0
 def af(x):
     return PF.prelu(x)
コード例 #6
0
ファイル: segan.py プロジェクト: wangtao201919/SEGAN
 def af(x, name=None):
     return PF.prelu(x, name=name)
コード例 #7
0
    def network(self, x, test=False):
        # Input:x -> 3,256,256
        # Convolution_5 -> 16,256,256
        h = PF.convolution(x, 16, (3, 3), (1, 1), name='Convolution_5')
        # BatchNormalization_9
        h = PF.batch_normalization(h, (1, ),
                                   0.9,
                                   0.0001,
                                   not test,
                                   name='BatchNormalization_9')
        # PReLU_8
        h = PF.prelu(h, 1, False, name='PReLU_8')
        # Convolution_6
        h = PF.convolution(h, 16, (3, 3), (1, 1), name='Convolution_6')
        # BatchNormalization_5
        h = PF.batch_normalization(h, (1, ),
                                   0.9,
                                   0.0001,
                                   not test,
                                   name='BatchNormalization_5')
        # PReLU_7
        h = PF.prelu(h, 1, False, name='PReLU_7')
        # Convolution_4
        h = PF.convolution(h, 16, (3, 3), (1, 1), name='Convolution_4')
        # BatchNormalization_2
        h = PF.batch_normalization(h, (1, ),
                                   0.9,
                                   0.0001,
                                   not test,
                                   name='BatchNormalization_2')
        # PReLU_6
        h = PF.prelu(h, 1, False, name='PReLU_6')
        # MaxPooling_2 -> 16,128,128
        h = F.max_pooling(h, (2, 2), (2, 2), False)
        # Convolution_2 -> 32,128,128
        h = PF.convolution(h, 32, (3, 3), (1, 1), name='Convolution_2')
        # BatchNormalization_4
        h = PF.batch_normalization(h, (1, ),
                                   0.9,
                                   0.0001,
                                   not test,
                                   name='BatchNormalization_4')
        # PReLU_5
        h = PF.prelu(h, 1, False, name='PReLU_5')
        # MaxPooling -> 32,64,64
        h = F.max_pooling(h, (2, 2), (2, 2), False)

        # Convolution_3 -> 64,64,64
        h = PF.convolution(h, 64, (3, 3), (1, 1), name='Convolution_3')
        # BatchNormalization
        h = PF.batch_normalization(h, (1, ),
                                   0.9,
                                   0.0001,
                                   not test,
                                   name='BatchNormalization')
        # PReLU_4
        h = PF.prelu(h, 1, False, name='PReLU_4')
        # MaxPooling_4 -> 64,32,32
        h = F.max_pooling(h, (2, 2), (2, 2), False)
        # Convolution_7 -> 128,32,32
        h = PF.convolution(h, 128, (3, 3), (1, 1), name='Convolution_7')
        # BatchNormalization_7
        h = PF.batch_normalization(h, (1, ),
                                   0.9,
                                   0.0001,
                                   not test,
                                   name='BatchNormalization_7')
        # PReLU_3
        h = PF.prelu(h, 1, False, name='PReLU_3')
        # MaxPooling_3 -> 128,16,16
        h = F.max_pooling(h, (2, 2), (2, 2), False)
        # Convolution_8 -> 256,16,16
        h = PF.convolution(h, 256, (3, 3), (1, 1), name='Convolution_8')
        # BatchNormalization_10
        h = PF.batch_normalization(h, (1, ),
                                   0.9,
                                   0.0001,
                                   not test,
                                   name='BatchNormalization_10')
        # PReLU_2
        h = PF.prelu(h, 1, False, name='PReLU_2')
        # MaxPooling_5 -> 256,8,8
        h = F.max_pooling(h, (2, 2), (2, 2), False)
        # Convolution -> 512,8,8
        h = PF.convolution(h, 512, (3, 3), (1, 1), name='Convolution')
        # BatchNormalization_8
        h = PF.batch_normalization(h, (1, ),
                                   0.9,
                                   0.0001,
                                   not test,
                                   name='BatchNormalization_8')
        # PReLU
        h = PF.prelu(h, 1, False, name='PReLU')

        # AveragePooling -> 512,1,1
        h = F.average_pooling(h, (8, 8), (8, 8))
        # BatchNormalization_6
        h = PF.batch_normalization(h, (1, ),
                                   0.9,
                                   0.0001,
                                   not test,
                                   name='BatchNormalization_6')
        # PReLU_9
        h = PF.prelu(h, 1, False, name='PReLU_9')
        # Affine -> 1
        h = PF.affine(h, (1, ), name='Affine')
        # BatchNormalization_3
        h = PF.batch_normalization(h, (1, ),
                                   0.9,
                                   0.0001,
                                   not test,
                                   name='BatchNormalization_3')
        # y'
        h = F.sigmoid(h)
        return h