Example #1
0
 def conv2d(self,
            conv_input,
            out_channels,
            kernel_size,
            stride,
            bias=True,
            name='',
            dilation=1,
            pad=0):
     '''
     Define 2D-Convolution Layer
     '''
     if self.init_method == 'xavier':
         sigma = I.calc_normal_std_glorot(conv_input.shape[1],
                                          out_channels,
                                          kernel=(kernel_size, kernel_size))
         w_init = I.NormalInitializer(sigma)
     elif self.init_method == 'normal':
         w_init = I.NormalInitializer(sigma=0.01)
     else:
         w_init = None
     conv_out = PF.convolution(conv_input,
                               out_channels,
                               kernel=(kernel_size, kernel_size),
                               stride=(stride, stride),
                               with_bias=bias,
                               dilation=(dilation, dilation),
                               pad=(pad, pad),
                               name=name,
                               w_init=w_init)
     conv_out.apply(recompute=self.recompute)
     return conv_out
Example #2
0
def weight_init_fn(shape,
                   gain=1,
                   use_wscale=True,
                   lrmul=1,
                   weight_var='affine',
                   return_init=False):
    """
    Weight Initialization function taken from original StyleGANv2 code
    """
    fan_in = np.prod(
        shape[:-1])  # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
    he_std = gain / np.sqrt(fan_in)  # He init

    # Equalized learning rate and custom learning rate multiplier.
    if use_wscale:
        init_std = 1.0 / lrmul
        runtime_coef = he_std * lrmul
    else:
        init_std = he_std / lrmul
        runtime_coef = lrmul

    init = I.NormalInitializer(sigma=init_std)
    if return_init:
        return init

    return nn.parameter.get_parameter_or_create(
        name=f'{weight_var}/W', shape=shape,
        initializer=init), nn.parameter.get_parameter_or_create(
            name=f'{weight_var}/b', shape=(shape[-1], ))
Example #3
0
 def __init__(self, padding_type="reflect", channel_last=False):
     self.padding_type = padding_type
     self.channel_last = channel_last
     # currently deconv dose not support channel last.
     self.conv_opts = dict(w_init=I.NormalInitializer(0.02))
     # don't use adaptive parameter
     self.norm_opts = dict(no_scale=True, no_bias=True)
    def __call__(self, x):
        # First conv
        h = self.conv_bn_act(x, int(self.maps0 * self.depth_mul),
                             stride=(2, 2), act="hswish", name="first-conv")

        # Inverted residual blocks
        for i, elm in enumerate(self.settings):
            maps, kernel, stride, ef, act, se = elm
            maps = round(maps * self.depth_mul)
            name = "mbconv-{:03d}".format(i)
            h = self.inverted_residual(
                h, maps, kernel, stride, ef, act, se, name=name)

        # Conv -> Avepool -> Conv
        h = self.conv_bn_act(h, int(self.maps1 * self.depth_mul), (1, 1), act="hswish",
                             name="last-conv-1")
        pool_shape = get_spatial_shape(x.shape, self.channel_last)
        h = F.average_pooling(h, pool_shape, channel_last=self.channel_last)
        h = self.conv_act(h, int(self.maps2 * self.depth_mul), (1, 1), act="hswish",
                          name="last-conv-2")

        # Classifier
        if not self.test:
            h = F.dropout(h, 0.2)
        h = PF.affine(h, self.num_classes,
                      w_init=I.NormalInitializer(0.01), name="linear")

        return h, {}
Example #5
0
def pf_affine(r, num_classes=1000, channel_last=False):
    r = PF.convolution(r,
                       num_classes, (1, 1),
                       channel_last=channel_last,
                       w_init=I.NormalInitializer(sigma=0.01, rng=RNG),
                       name='fc')
    return F.reshape(r, (r.shape[0], -1), inplace=False)
    def __call__(self, x):
        h = self.conv_bn_relu(x, 32, stride=(2, 2), name="first-conv")
        h = self.depthwise_separable_conv(
            h, 64, stride=(1, 1), name="conv-ds-1")
        h = self.depthwise_separable_conv(
            h, 128, stride=(2, 2), name="conv-ds-2")
        h = self.depthwise_separable_conv(
            h, 128, stride=(1, 1), name="conv-ds-3")
        h = self.depthwise_separable_conv(
            h, 256, stride=(2, 2), name="conv-ds-4")
        h = self.depthwise_separable_conv(
            h, 256, stride=(1, 1), name="conv-ds-5")
        h = self.depthwise_separable_conv(
            h, 512, stride=(2, 2), name="conv-ds-6")
        h = self.depthwise_separable_conv(
            h, 512, stride=(1, 1), name="conv-ds-7")
        h = self.depthwise_separable_conv(
            h, 512, stride=(1, 1), name="conv-ds-8")
        h = self.depthwise_separable_conv(
            h, 512, stride=(1, 1), name="conv-ds-9")
        h = self.depthwise_separable_conv(
            h, 512, stride=(1, 1), name="conv-ds-10")
        h = self.depthwise_separable_conv(
            h, 512, stride=(1, 1), name="conv-ds-11")
        h = self.depthwise_separable_conv(
            h, 1024, stride=(2, 2), name="conv-ds-12")
        h = self.depthwise_separable_conv(
            h, 1024, stride=(1, 1), name="conv-ds-13")

        pool_shape = get_spatial_shape(x.shape, self.channel_last)
        h = F.average_pooling(h, pool_shape, channel_last=self.channel_last)
        h = PF.affine(h, self.num_classes,
                      w_init=I.NormalInitializer(0.01), name="linear")
        return h, {}
    def __call__(self, x):
        # First conv
        h = self.conv_bn_relu6(x, int(self.init_maps * self.depth_mul),
                               stride=(2, 2), name="first-conv")

        # Inverted residual blocks
        for i, elm in enumerate(self.settings):
            t, c, n, s = elm
            # TODO: where to multiply depth_mul
            c = round(c * self.depth_mul)
            mbconv_s = partial(self.inverted_residual,
                               maps=c, stride=(s, s), ef=t)
            mbconv_1 = partial(self.inverted_residual,
                               maps=c, stride=(1, 1), ef=t)
            for j in range(n):
                name = "mbconv-{:02d}-{:02d}".format(i, j)
                h = mbconv_s(h, name=name) if j == 0 else mbconv_1(
                    h, name=name)
        # Last conv
        h = self.conv_bn_relu6(h, int(1280 * self.depth_mul),
                               kernel=(1, 1), name="last-conv")

        # Classifier
        if not self.test:
            h = F.dropout(h, 0.2)
        pool_shape = get_spatial_shape(x.shape, self.channel_last)
        h = F.average_pooling(h, pool_shape, channel_last=self.channel_last)
        h = PF.affine(h, self.num_classes,
                      w_init=I.NormalInitializer(0.01), name="linear")

        return h, {}
Example #8
0
def decode(input_feature, output_nc, n_downsampling, ngf, norm_layer,
           use_bias):
    h = input_feature
    w_init = I.NormalInitializer(sigma=0.02, rng=None)

    for i in range(n_downsampling):
        with nn.parameter_scope("dec_downsampling_{}".format(i)):
            mult = 2**(n_downsampling - i)
            h = PF.deconvolution(h,
                                 int(ngf * mult / 2),
                                 kernel=(4, 4),
                                 stride=(2, 2),
                                 pad=(1, 1),
                                 w_init=w_init,
                                 with_bias=use_bias)
            # kernel changed 3 -> 4 to make the output fit to the desired size.
            h = norm_layer(h)
            h = F.relu(h)

    h = F.pad(h, (3, 3, 3, 3), 'reflect')
    h = PF.convolution(h,
                       output_nc,
                       kernel=(7, 7),
                       w_init=w_init,
                       with_bias=use_bias,
                       name="dec_last_conv")
    h = F.tanh(h)

    return h
Example #9
0
def truncated_normal(w_shape, mean, std):
    """
    Numpy truncated normal
    """
    init = I.NormalInitializer()
    tmp = init(w_shape + (4, ))
    valid = np.logical_and((np.less(tmp, 2)), (np.greater(tmp, -2)))
    ind = np.argmax(valid, axis=-1)
    ind1 = (np.expand_dims(ind, -1))
    trunc_norm = np.take_along_axis(tmp, ind1, axis=4).squeeze(-1)
    trunc_norm = trunc_norm * std + mean
    return trunc_norm
Example #10
0
def pf_convolution(x, ochannels, kernel, stride=(1, 1), channel_last=False):
    axes = [3 if channel_last else 1]
    ichannels = x.shape[axes[0]]
    init = I.NormalInitializer(sigma=I.calc_normal_std_he_backward(
        ichannels, ochannels, kernel=kernel),
                               rng=RNG)
    pad = tuple([int((k - 1) // 2) for k in kernel])
    return PF.convolution(x,
                          ochannels,
                          kernel,
                          stride=stride,
                          pad=pad,
                          with_bias=False,
                          channel_last=channel_last,
                          w_init=init)
Example #11
0
    def __call__(self, x):
        depth_coef = self.net_setting["depth_coef"]
        width_coef = self.net_setting["width_coef"]
        resolution = self.net_setting["resolution"]
        p = self.net_setting["p"]
        assert get_spatial_shape(x.shape, self.channel_last) == [resolution, resolution], \
            "(x.shape = {}, resolution = {})".format(x.shape, resolution)

        # First conv
        maps = self.round_filters(32, width_coef)
        h = self.conv_bn(x, maps, stride=(2, 2), name="first-conv")

        # Inverted residual blocks
        for i, elm in enumerate(self.mbc_settings):
            t, c, k, n, s = elm
            c = self.round_filters(c, width_coef)
            n = int(np.ceil(n * depth_coef))
            mbconv_s = partial(self.inverted_residual,
                               maps=c,
                               kernel=(k, k),
                               stride=(s, s),
                               ef=t)
            mbconv_1 = partial(self.inverted_residual,
                               maps=c,
                               kernel=(k, k),
                               stride=(1, 1),
                               ef=t)
            for j in range(n):
                name = "mbconv-{:02d}-{:02d}".format(i, j)
                h = mbconv_s(h, name=name) if j == 0 else mbconv_1(h,
                                                                   name=name)
        # Last conv
        maps = self.round_filters(1280, width_coef)
        h = self.conv_bn_swish(h, maps, kernel=(1, 1), name="last-conv")

        # Classifier
        if not self.test:
            h = F.dropout(h, p)
        pool_shape = get_spatial_shape(x.shape, self.channel_last)
        h = F.average_pooling(h, pool_shape, channel_last=self.channel_last)
        h = PF.affine(h,
                      self.num_classes,
                      w_init=I.NormalInitializer(0.01),
                      name="linear")

        return h, {}
Example #12
0
def conv(x, planes, kernel, pad, stride, dilation, with_bias):
    inchannels = x.shape[1]
    outchannels = planes
    s = I.calc_normal_std_he_backward(inchannels, outchannels, kernel)
    w_init = I.NormalInitializer(s)
    if dilation[0] > 1:
        pad2 = dilation
    else:
        pad2 = pad
    h = PF.convolution(x,
                       planes,
                       kernel=kernel,
                       pad=pad2,
                       stride=stride,
                       dilation=dilation,
                       with_bias=with_bias,
                       w_init=w_init)

    return h
Example #13
0
def resnetblock(x, dim, padding_type, norm_layer, use_dropout, use_bias):
    assert dim == x.shape[
        1], "The number of input / output channels must match."
    h = x

    p = 0
    if padding_type == 'reflect':
        h = F.pad(h, (1, 1, 1, 1), 'reflect')
    elif padding_type == 'zero':
        p = 1
    else:
        raise NotImplementedError(
            'padding {} is not implemented'.format(padding_type))
    w_init = I.NormalInitializer(sigma=0.02, rng=None)

    h = PF.convolution(h,
                       dim,
                       kernel=(3, 3),
                       pad=(p, p),
                       w_init=w_init,
                       with_bias=use_bias,
                       name="1st")
    h = norm_layer(h, name="1st")
    h = F.relu(h)

    if use_dropout:
        h = F.dropout(h, 0.5)

    if padding_type == 'reflect':
        h = F.pad(h, (1, 1, 1, 1), 'reflect')

    h = PF.convolution(h,
                       dim,
                       kernel=(3, 3),
                       pad=(p, p),
                       w_init=w_init,
                       with_bias=use_bias,
                       name="2nd")
    h = norm_layer(h, name="2nd")

    out = F.add2(x, h)

    return out
Example #14
0
def feature_extractor(input_variable, input_nc, ndf, n_layers, kw, padw,
                      norm_layer, use_bias):
    w_init = I.NormalInitializer(sigma=0.02, rng=None)
    x = input_variable

    with nn.parameter_scope("feature_extractor_init_conv"):

        def apply_w(w):
            return PF.spectral_norm(w, dim=0)

        h = PF.convolution(x,
                           ndf,
                           kernel=(kw, kw),
                           stride=(2, 2),
                           pad=(padw, padw),
                           w_init=w_init,
                           apply_w=apply_w)
        h = F.leaky_relu(h, alpha=0.2)

    nf_mult = 1
    nf_mult_prev = 1

    for n in range(1, n_layers):
        nf_mult_prev = nf_mult
        nf_mult = min(2**n, 8)
        with nn.parameter_scope("feature_extractor_stage_{}".format(n)):

            def apply_w(w):
                return PF.spectral_norm(w, dim=0)

            h = PF.convolution(h,
                               ndf * nf_mult,
                               kernel=(kw, kw),
                               stride=(2, 2),
                               pad=(padw, padw),
                               w_init=w_init,
                               with_bias=use_bias,
                               apply_w=apply_w)
            h = norm_layer(h)
            h = F.leaky_relu(h, alpha=0.2)

    return h
Example #15
0
def encode(input_variable, input_nc, n_downsampling, ngf, norm_layer,
           use_dropout, n_blocks, padding_type, use_bias):
    """
    encoder is used for both image and mask.
    """
    w_init = I.NormalInitializer(sigma=0.02, rng=None)

    x = input_variable
    h = F.pad(x, (3, 3, 3, 3), 'reflect')
    h = PF.convolution(h,
                       ngf,
                       kernel=(7, 7),
                       w_init=w_init,
                       with_bias=use_bias,
                       name="enc_initial_conv")
    h = norm_layer(h, name="enc_initial_norm")
    h = F.relu(h)

    for i in range(n_downsampling):
        with nn.parameter_scope("enc_downsampling_{}".format(i)):
            mult = 2**i
            h = PF.convolution(h,
                               ngf * mult * 2,
                               kernel=(3, 3),
                               stride=(2, 2),
                               pad=(1, 1),
                               w_init=w_init,
                               with_bias=use_bias)
            h = norm_layer(h)
            h = F.relu(h)

    mult = 2**n_downsampling
    for i in range(n_blocks):
        with nn.parameter_scope("resblock_{}".format(i)):
            h = resnetblock(h,
                            ngf * mult,
                            padding_type=padding_type,
                            norm_layer=norm_layer,
                            use_dropout=use_dropout,
                            use_bias=use_bias)

    return h
Example #16
0
def classifier(input_feature, ndf, n_layers, kw, padw, norm_layer,
               use_sigmoid):
    w_init = I.NormalInitializer(sigma=0.02, rng=None)
    h = input_feature
    nf_mult_prev = min(2**(n_layers - 1), 8)
    nf_mult = min(2**n_layers, 8)

    with nn.parameter_scope("dis_classifier_1"):

        def apply_w(w):
            return PF.spectral_norm(w, dim=0)

        h = PF.convolution(h,
                           ndf * nf_mult,
                           kernel=(kw, kw),
                           stride=(1, 1),
                           pad=(padw, padw),
                           w_init=w_init,
                           apply_w=apply_w)
        h = norm_layer(h)
        h = F.leaky_relu(h, alpha=0.2)

    # Use spectral normalization
    with nn.parameter_scope("dis_classifier_2"):

        def apply_w(w):
            return PF.spectral_norm(w, dim=0)

        h = PF.convolution(h,
                           1,
                           kernel=(kw, kw),
                           stride=(1, 1),
                           pad=(padw, padw),
                           w_init=w_init,
                           apply_w=apply_w)

    if use_sigmoid:
        h = F.sigmoid(h)
    return h
Example #17
0
def pf_convolution(x,
                   ochannels,
                   kernel,
                   stride=(1, 1),
                   group=1,
                   channel_last=False,
                   with_bias=False):
    axes = [get_channel_axis(channel_last)]
    ichannels = x.shape[axes[0]]
    init = I.NormalInitializer(sigma=I.calc_normal_std_he_forward(
        ichannels, ochannels, kernel=kernel),
                               rng=RNG)
    pad = tuple([int((k - 1) // 2) for k in kernel])
    return PF.convolution(x,
                          ochannels,
                          kernel,
                          stride=stride,
                          pad=pad,
                          group=group,
                          with_bias=with_bias,
                          channel_last=channel_last,
                          w_init=init)
Example #18
0
    def __init__(self,
                 n_layers=4,
                 base_ndf=64,
                 n_scales=2,
                 use_sigmoid=False,
                 use_spectral_normalization=True):
        """
        PatchGAN discriminator.

        Args:
            n_layers:
        :param base_ndf:
        :param n_scales:
        :param use_sigmoid:
        """

        self.n_layers = n_layers
        self.base_ndf = base_ndf
        self.n_scales = n_scales
        self.use_sigmoid = use_sigmoid

        self.conv_opts = dict(w_init=I.NormalInitializer(0.02))
        if use_spectral_normalization:
            self.conv_opts["apply_w"] = spectral_norm_callback(dim=0)
Example #19
0
from __future__ import print_function
import nnabla as nn
import nnabla.initializer as I

import sys
import importlib
import time
from collections import namedtuple, OrderedDict
import csv

import pytest


Inspec = namedtuple("Inspec", ['shape', 'init', 'need_grad'])
Inspec.__new__.__defaults__ = (I.NormalInitializer(), True)
BenchmarkStat = namedtuple("Benchmark", ['mean_time', 'run_count'])


class Timer:

    """Timer.

    See :func:`Timer.lap()`.

    """

    def __init__(self):
        self.start = time.time()
        self.lap_time = self.start
    y = F.sink(*vv)
    y.forward()
    y.backward()


def check_none_arg(arg, val, none_case):
    if val is None:
        assert arg == none_case
        return
    assert arg == val


@pytest.mark.parametrize("inshape", [(8, 2, 2, 2), (16, 1, 8)])
@pytest.mark.parametrize("n_outmaps", [16, 32])
@pytest.mark.parametrize("base_axis", [1, 2])
@pytest.mark.parametrize("w_init", [None, I.NormalInitializer(), True])
@pytest.mark.parametrize("b_init", [None, I.ConstantInitializer(), True])
@pytest.mark.parametrize("with_bias", [False, True])
@pytest.mark.parametrize("fix_parameters", [False, True])
@pytest.mark.parametrize("rng", [None, True])
def test_pf_affine_execution(g_rng, inshape, n_outmaps, base_axis, w_init,
                             b_init, with_bias, fix_parameters, rng):

    w_shape = (int(np.prod(inshape[base_axis:])), n_outmaps)
    b_shape = (n_outmaps, )
    w_init = process_param_init(w_init, w_shape, g_rng)
    b_init = process_param_init(b_init, b_shape, g_rng)
    rng = process_rng(rng)

    kw = {}
    insert_if_not_none(kw, 'w_init', w_init)
def decoder(target_action,
            target_action_type,
            target_node_type,
            target_parent_rule,
            target_parent_index,
            query_embed,
            query_embed_mask,
            rule_num,
            token_num,
            node_type_num,
            embedding_size,
            node_type_embedding_size,
            state_size,
            hidden_size,
            previous_action_embed=None,
            initial_state=None,
            initial_cell=None,
            hist=None,
            dropout=0.0,
            train=True):
    """
    target_action: (batch_size, max_action_length, 3)
    target_action_type: (batch_size, max_action_length, 3)
    target_node_type: (batch_size, max_action_length)
    target_parent_rule: (batch_size, max_action_length)
    target_parent_index: (batch_size, max_action_length)
    """
    batch_size, max_action_length, _ = target_action.shape

    # Node type ebedding
    with nn.parameter_scope("node_type_embedding"):
        target_node_type_embed = embedding(target_node_type,
                                           node_type_num,
                                           node_type_embedding_size,
                                           mask_zero=False,
                                           init=I.NormalInitializer(0.01))

    # Previous action embedding
    ## (batch_size, max_action_length)
    target_apply_rule, target_gen_token, target_copy_token = split(
        target_action, axis=2)
    with nn.parameter_scope("rule_embedding"):
        # (batch_size, max_action_length, embedding_size)
        target_apply_rule_embed = embedding(target_apply_rule,
                                            rule_num,
                                            embedding_size,
                                            mask_zero=False,
                                            init=I.NormalInitializer(0.01))
        target_apply_rule_embed = F.reshape(
            target_apply_rule_embed,
            (batch_size, max_action_length, 1, embedding_size))
    with nn.parameter_scope("token_embedding"):
        # (batch_size, max_action_length, embedding_size)
        target_gen_token_embed = embedding(target_gen_token,
                                           token_num,
                                           embedding_size,
                                           mask_zero=False,
                                           init=I.NormalInitializer(0.01))
        target_gen_token_embed = F.reshape(
            target_gen_token_embed,
            (batch_size, max_action_length, 1, embedding_size))
    target_copy_token = F.reshape(target_copy_token,
                                  (batch_size, max_action_length, 1, 1))
    target_copy_token = F.broadcast(
        target_copy_token, (batch_size, max_action_length, 1, embedding_size))
    target_copy_token *= 0
    # (batch_size, max_action_length, 3, embedding_size)
    target_action_embed = concatenate(target_apply_rule_embed,
                                      target_gen_token_embed,
                                      target_copy_token,
                                      axis=2)
    target_action_type2 = F.reshape(target_action_type,
                                    (batch_size, max_action_length, 3, 1))
    target_action_type2 = F.broadcast(
        target_action_type2,
        (batch_size, max_action_length, 3, embedding_size))
    # (batch_size, max_action_length, 3, embedding_size)
    target_action_embed = target_action_embed * target_action_type2
    # (batch_size, max_action_length, embedding_size)
    target_action_embed = F.sum(target_action_embed, axis=2)

    # Shift action
    if previous_action_embed is None:
        previous_action_embed = nn.Variable((batch_size, 1, embedding_size),
                                            need_grad=False)
        previous_action_embed.data.zero()
    # (batch_size, max_action_length + 1, embedding_size)
    target_action_embed = concatenate(previous_action_embed,
                                      target_action_embed,
                                      axis=1)
    # (batch_size, max_action_length, embedding_size)
    target_action_embed = F.slice(
        target_action_embed,
        start=[0, 0, 0],
        stop=[batch_size, max_action_length, embedding_size])

    # Parent action embedding
    parent_rule_mask = 1 - F.equal_scalar(target_parent_rule,
                                          0)  # (batch_size, max_action_length)
    parent_rule_mask = F.reshape(parent_rule_mask,
                                 (batch_size, max_action_length, 1))
    parent_rule_mask = F.broadcast(
        parent_rule_mask, (batch_size, max_action_length, embedding_size))
    with nn.parameter_scope("rule_embedding"):
        target_parent_rule_embed = embedding(target_parent_rule,
                                             rule_num,
                                             embedding_size,
                                             mask_zero=False)
    target_parent_rule_embed = parent_rule_mask * target_parent_rule_embed

    # (batch_size, max_action_length, embedding_size * 2 + node_type_embedding_size)
    decoder_input = concatenate(target_action_embed,
                                target_node_type_embed,
                                target_parent_rule_embed,
                                axis=2)
    target_action_mask = 1 - F.equal_scalar(F.sum(
        target_action_type, axis=2), 0)  # (batch_size, max_action_length)
    with nn.parameter_scope("decoder"):
        decoder_hidden_states, decoder_cells, ctx_vectors, new_hist = cond_att_lstm(
            decoder_input,
            target_parent_index,
            target_action_mask,
            query_embed,
            query_embed_mask,
            state_size,
            hidden_size,
            initial_state=initial_state,
            initial_cell=initial_cell,
            hist=hist,
            dropout=dropout,
            train=train)
    return target_action_embed, decoder_hidden_states, decoder_cells, ctx_vectors, target_action_mask, new_hist
Example #22
0
 def affine_act(self, x, dims, name):
     c = x.shape[1]
     s = I.calc_normal_std_he_forward(c, dims)
     w_init = I.NormalInitializer(s, )
     return self.act(PF.affine(x, dims, w_init=w_init, name=name))
def Wave_U_Net(Noisy):
    ds_outputs = list()
    num_initial_filters = 24
    num_layers = 12
    filter_size = 15
    merge_filter_size = 5
    b = I.ConstantInitializer()
    w = I.NormalInitializer(sigma=0.02)

    ##     Sub-functions
    ## ---------------------------------

    #   Convolution
    def conv(x, output_ch, karnel=(15,), pad=(7,), stride=(1,), name=None):
        return PF.convolution(x, output_ch, karnel, pad=pad, stride=stride, w_init=w, b_init=b, name=name)

    #   Activation Function
    def af(x, alpha=0.2):
        return F.leaky_relu(x, alpha)

    #
    def crop_and_concat(x1, x2):

        def crop(tensor, target_times):
            shape = tensor.shape[2]
            diff = shape - target_times
            if diff == 0:
                return tensor
            crop_start = diff // 2
            crop_end = diff - crop_start
            return F.slice(tensor, start=(0, 0, crop_start), stop=(tensor.shape[0], tensor.shape[1], shape - crop_end),
                           step=(1, 1, 1))

        x1 = crop(x1, x2.shape[2])
        return F.concatenate(x1, x2, axis=1)

    def downsampling_block(x, i):
        with nn.parameter_scope(('ds_block-%2d' % i)):
            ds = af(conv(x, (num_initial_filters + num_initial_filters * i), (filter_size,), (7,), name='conv'))
            ds_slice = F.slice(ds, start=(0, 0, 0), stop=ds.shape, step=(1, 1, 2))  # Decimate by factor of 2
            # ds_slice = F.average_pooling(ds, kernel=(1, 1,), stride=(1, 2,), pad=(0, 0,))
            return ds, ds_slice

    def upsampling_block(x, i):

        with nn.parameter_scope(('us_block-%2d' % i)):
            up = F.unpooling(af(x), (2,))
            cac_x = crop_and_concat(ds_outputs[-i - 1], up)
            us = af(conv(cac_x, num_initial_filters + num_initial_filters * (num_layers - i - 1), (merge_filter_size,),
                         (2,), name='conv'))
            return us

    with nn.parameter_scope('Wave-U-Net'):
        current_layer = Noisy
        ## downsampling block
        for i in range(num_layers):
            ds, current_layer = downsampling_block(current_layer, i)
            ds_outputs.append(ds)
        ## latent variable
        with nn.parameter_scope('latent_variable'):
            current_layer = af(conv(current_layer, num_initial_filters + num_initial_filters * num_layers))
        ## upsampling block
        for i in range(num_layers):
            current_layer = upsampling_block(current_layer, i)

        current_layer = crop_and_concat(Noisy, current_layer)

        ## output layer
        target_1 = F.tanh(conv(current_layer, 1, (1,), (0,), name='target_1'))
        target_2 = F.tanh(conv(current_layer, 1, (1,), (0,), name='target_2'))
        return target_1, target_2
Example #24
0
def w_init(x, out_dims, gain=0.02, type="xavier"):
    if type == "xavier":
        return I.NormalInitializer(
            sigma=I.calc_normal_std_glorot(x.shape[1], out_dims) * gain)

    raise ValueError("unsupported init type: {}.".format(type))
Example #25
0
    def __init__(self):
        self.n_layers = 4
        self.base_fdim = 64

        self.conv_opts = dict(w_init=I.NormalInitializer(0.02))