Ejemplo n.º 1
0
def resblock(x, dim_out, w_init=None, epsilon=1e-05):
    assert dim_out == x.shape[1], "The number of input / output channels must match."
    h = PF.convolution(x, dim_out, kernel=(3, 3), pad=(
        1, 1), with_bias=False, w_init=w_init, name="1st")
    h = PF.instance_normalization(h, eps=epsilon, name="1st")
    h = F.relu(h, inplace=True)
    h = PF.convolution(h, dim_out, kernel=(3, 3), pad=(
        1, 1), with_bias=False, w_init=w_init, name="2nd")
    h = PF.instance_normalization(h, eps=epsilon, name="2nd")
    return x + h
Ejemplo n.º 2
0
def downblock(x, out_features, norm=False, kernel_size=4, pool=False, sn=False, test=False):
    out = x

    if sn:
        def apply_w(w): return PF.spectral_norm(w, dim=0, test=test)
    else:
        apply_w = None

    inmaps, outmaps = out.shape[1], out_features
    k_w = I.calc_normal_std_he_forward(
        inmaps, outmaps, kernel=(kernel_size, kernel_size)) / np.sqrt(2.)
    k_b = I.calc_normal_std_he_forward(inmaps, outmaps) / np.sqrt(2.)
    w_init = I.UniformInitializer((-k_w, k_w))
    b_init = I.UniformInitializer((-k_b, k_b))

    out = PF.convolution(out, out_features,
                         kernel=(kernel_size, kernel_size), pad=(0, 0),
                         stride=(1, 1), w_init=w_init, b_init=b_init,
                         apply_w=apply_w)

    if norm:
        out = PF.instance_normalization(out)

    out = F.leaky_relu(out, 0.2, inplace=True)

    if pool:
        out = F.average_pooling(out, kernel=(2, 2))

    return out
Ejemplo n.º 3
0
def resblock(x, n=256, test=False, norm_type="batch_norm"):
    r = x
    r = F.pad(r, (1, 1, 1, 1), 'reflect')
    with nn.parameter_scope('block1'):
        r = PF.convolution(r, n, (3, 3), with_bias=False)
        if norm_type == "instance_norm":
            r = PF.instance_normalization(r, eps=1e-05)
        else:
            r = PF.batch_normalization(r, batch_stat=not test)
        r = F.relu(r)

    r = F.pad(r, (1, 1, 1, 1), 'reflect')
    with nn.parameter_scope('block2'):
        r = PF.convolution(r, n, (3, 3), with_bias=False)
        if norm_type == "instance_norm":
            r = PF.instance_normalization(r, eps=1e-05)
        else:
            r = PF.batch_normalization(r, batch_stat=not test)
    return x + r
Ejemplo n.º 4
0
def generator(x, c, conv_dim=64, c_dim=5, num_downsample=2, num_upsample=2, repeat_num=6, w_init=None, epsilon=1e-05):
    assert len(c.shape) == 4
    c = F.tile(c, (1, 1) + x.shape[2:])
    concat_input = F.concatenate(x, c, axis=1)

    h = PF.convolution(concat_input, conv_dim, kernel=(7, 7), pad=(
        3, 3), stride=(1, 1), with_bias=False, w_init=w_init, name="init_conv")
    h = PF.instance_normalization(h, eps=epsilon, name="init_inst_norm")
    h = F.relu(h, inplace=True)

    # Down-sampling layers.
    curr_dim = conv_dim
    for i in range(num_downsample):
        h = PF.convolution(h, curr_dim*2, kernel=(4, 4), pad=(1, 1), stride=(2, 2),
                           with_bias=False, w_init=w_init, name="downsample_{}".format(i))
        h = PF.instance_normalization(
            h, eps=epsilon, name="downsample_{}".format(i))
        h = F.relu(h, inplace=True)
        curr_dim = curr_dim * 2

    # Bottleneck layers.
    for i in range(repeat_num):
        with nn.parameter_scope("bottleneck_{}".format(i)):
            h = resblock(h, dim_out=curr_dim)

    # Up-sampling layers.
    for i in range(num_upsample):
        h = PF.deconvolution(h, curr_dim//2, kernel=(4, 4), pad=(1, 1), stride=(
            2, 2), w_init=w_init, with_bias=False, name="upsample_{}".format(i))
        h = PF.instance_normalization(
            h, eps=epsilon, name="upsample_{}".format(i))
        h = F.relu(h, inplace=True)
        curr_dim = curr_dim // 2

    h = PF.convolution(h, 3, kernel=(7, 7), pad=(3, 3), stride=(
        1, 1), with_bias=False, w_init=w_init, name="last_conv")
    h = F.tanh(h)
    return h
Ejemplo n.º 5
0
    def residual_block(self, x, o_channels):
        pad_width = get_symmetric_padwidth(1, channel_last=self.channel_last)

        with nn.parameter_scope("residual_1"):
            h = F.pad(x, pad_width=pad_width, mode=self.padding_type)
            h = PF.convolution(h, o_channels, (3, 3), **self.conv_opts)
            h = self.instance_norm_relu(h)

        with nn.parameter_scope("residual_2"):
            h = F.pad(h, pad_width=pad_width, mode=self.padding_type)
            h = PF.convolution(h, o_channels, (3, 3), **self.conv_opts)
            h = PF.instance_normalization(h, **self.norm_opts)

        return x + h
Ejemplo n.º 6
0
def convblock(x,
              n=64,
              k=(3, 3),
              s=(2, 2),
              p=(1, 1),
              test=False,
              norm_type="batch_norm"):
    x = PF.convolution(x, n, k, pad=p, stride=s, with_bias=False)
    if norm_type == "instance_norm":
        x = PF.instance_normalization(x, eps=1e-05)
    else:
        x = PF.batch_normalization(x, batch_stat=not test)
    x = F.relu(x)
    return x
Ejemplo n.º 7
0
def test_pf_instance_normalization(g_rng, inshape, batch_axis, channel_axis,
                                   output_stat, fix_parameters, param_init):
    from nnabla.normalization_functions import _force_list, _get_axes_excluding

    def ref_instance_normalization(x, beta, gamma, channel_axis, batch_axis,
                                   eps, output_stat):

        ignore_axes = _force_list(batch_axis) + [
            channel_axis,
        ]

        axes = tuple(_get_axes_excluding(len(x.shape), ignore_axes))

        x_mean = x.mean(axis=axes, keepdims=True)
        x_std = x.std(axis=axes, keepdims=True)

        if output_stat:
            return (x - x_mean) / (x_std + eps) * gamma + beta, x_mean, x_std

        return (x - x_mean) / (x_std + eps) * gamma + beta

    eps = 1e-5

    p_shape = tuple(
        [inshape[i] if i == channel_axis else 1 for i in range(len(inshape))])

    x_npy = g_rng.randn(*inshape)

    if param_init:
        beta_init = np.ones(p_shape)
        gamma_init = np.ones(p_shape) * 2
        param_init = dict(beta=beta_init, gamma=gamma_init)
    else:
        beta_init = np.zeros(p_shape)
        gamma_init = np.ones(p_shape)

    x = nn.Variable.from_numpy_array(x_npy)

    kw = {}
    insert_if_not_default(kw, 'channel_axis', channel_axis, 1)
    insert_if_not_default(kw, 'batch_axis', batch_axis, 0)
    insert_if_not_default(kw, 'eps', eps, 1e-5)
    insert_if_not_default(kw, 'output_stat', output_stat, False)
    insert_if_not_default(kw, 'fix_parameters', fix_parameters, False)
    insert_if_not_none(kw, 'param_init', param_init)

    # Check creation
    y = PF.instance_normalization(x, **kw)
    y = _force_list(y)  # just to simplify after execution

    # Check parameter values before execution
    h = y[0]
    b = h.parent.inputs[1]
    g = h.parent.inputs[0].parent.inputs[1]
    assert np.allclose(b.d, beta_init)
    assert np.allclose(g.d, gamma_init)

    # Check execution
    forward_backward_all(*y)

    # Check values
    ref = ref_instance_normalization(x_npy, beta_init, gamma_init,
                                     channel_axis, batch_axis, eps,
                                     output_stat)
    if not output_stat:
        ref = [ref]

    for i in range(len(ref)):
        assert np.allclose(y[i].d, ref[i], atol=1e-2, rtol=1e-5)

    # Check created parameters
    assert len(nn.get_parameters()) == 2
    assert len(nn.get_parameters(grad_only=False)) == 2
    beta, gamma = [
        nn.get_parameters()['instance_normalization/' + name]
        for name in ['beta', 'gamma']
    ]
    assert beta.shape == p_shape
    assert gamma.shape == p_shape

    assert beta.need_grad
    assert gamma.need_grad

    b = h.parent.inputs[1]
    g = h.parent.inputs[0].parent.inputs[1]
    assert b.need_grad == (not fix_parameters)
    assert g.need_grad == (not fix_parameters)
Ejemplo n.º 8
0
 def instance_norm_lrelu(self, x, alpha=0.2):
     norm = PF.instance_normalization(x, no_scale=True, no_bias=True)
     return F.leaky_relu(norm, alpha=alpha, inplace=True)
Ejemplo n.º 9
0
 def instance_norm_relu(self, x):
     # return F.relu(PF.layer_normalization(x, **self.norm_opts))
     return F.relu(PF.instance_normalization(x, **self.norm_opts))