Ejemplo n.º 1
0
 def __init__(self,
              in_dim,
              hidden_dim,
              out_dim,
              num_mixture,
              auto_regressive=False):
     self.sampling_bias = 1
     self.IN_DIM = in_dim
     self.HIDDEN_DIM = hidden_dim
     self.OUT_DIM = out_dim
     self.NUM_MIXTURE = num_mixture
     self.AUTO_REGRESSIVE = auto_regressive
     if auto_regressive:
         self.future_out_dim = 1
     else:
         self.future_out_dim = out_dim
     super(RobotController, self).__init__(
         ln1_=L.LayerNormalization(),
         ln2_=L.LayerNormalization(),
         ln3_=L.LayerNormalization(),
         l1_=L.LSTM(in_dim + 4, hidden_dim),
         l2_=L.LSTM(hidden_dim + in_dim + 4, hidden_dim),
         l3_=L.LSTM(hidden_dim + in_dim + 4, hidden_dim),
         # FC1_=L.Linear(hidden_dim, self.future_out_dim)
         mixing_=L.Linear(3 * hidden_dim, num_mixture),
         mu_=L.Linear(3 * hidden_dim, num_mixture * self.future_out_dim),
         sigma_=L.Linear(3 * hidden_dim, num_mixture))
Ejemplo n.º 2
0
 def __init__(self, n_units, h=8, dropout=0.1, nopad=False):
     n_inner_units = n_units * 2
     super(EncoderLayer, self).__init__(
         W_1=L.Linear(n_units, n_inner_units),
         W_2=L.Linear(n_inner_units, n_units),
         SelfAttention=AttentionLayer(n_units, h),
         LN_1=L.LayerNormalization(n_units),
         LN_2=L.LayerNormalization(n_units),
     )
     self.dropout = dropout
def _create_ln(*args, **kwargs):
    flag = chainer.disable_experimental_feature_warning
    chainer.disable_experimental_feature_warning = True
    try:
        return links.LayerNormalization(*args, **kwargs)
    finally:
        chainer.disable_experimental_feature_warning = flag
Ejemplo n.º 4
0
 def __init__(self, in_size, out_size, dropout):
     super(LSTM_normalization, self).__init__()
     initializer = chainer.initializers.Normal()
     with self.init_scope():
         self.lstm = L.NStepLSTM(1, in_size, out_size, dropout)
         self.layer_norm = L.LayerNormalization(out_size,
                                                eps=1e-5,
                                                initial_gamma=initializer)
Ejemplo n.º 5
0
    def __init__(self, ch=128):
        super(Discriminator, self).__init__()
        self.ch = ch
        self.up_sample_dim = ((4, 4, 2))
        self.out_size = self.xp.prod(self.up_sample_dim)

        with self.init_scope():

            w = chainer.initializers.HeNormal(0.02)
            self.c1 = L.Convolution3D(3,
                                      ch // 8,
                                      ksize=4,
                                      stride=2,
                                      pad=1,
                                      initialW=w)
            self.c2 = L.Convolution3D(ch // 8,
                                      ch // 4,
                                      ksize=4,
                                      stride=2,
                                      pad=1,
                                      initialW=w)
            self.c3 = L.Convolution3D(ch // 4,
                                      ch // 2,
                                      ksize=4,
                                      stride=2,
                                      pad=1,
                                      initialW=w)
            self.c4 = L.Convolution3D(ch // 2,
                                      ch,
                                      ksize=4,
                                      stride=2,
                                      pad=1,
                                      initialW=w)
            self.c5 = L.Convolution3D(ch,
                                      1,
                                      ksize=3,
                                      stride=1,
                                      pad=1,
                                      initialW=w)
            self.l1 = L.Linear(self.out_size, 1, initialW=w)

            self.ln2 = L.LayerNormalization(
            )  # all have layer norm except first and last layer
            self.ln3 = L.LayerNormalization()
            self.ln4 = L.LayerNormalization()
    def __init__(self, n_hidden=128, bottom_width=4, ch=512, wscale=0.02):
        super(Generator, self).__init__()
        self.n_hidden = n_hidden
        self.ch = ch
        self.bottom_width = bottom_width

        with self.init_scope():
            w = chainer.initializers.Normal(wscale)
            self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
                               initialW=w)
            self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
            self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
            self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
            self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
            self.bn0 = L.LayerNormalization(bottom_width * bottom_width * ch)
            self.bn1 = L.LayerNormalization(ch // 2)
            self.bn2 = L.LayerNormalization(ch // 4)
            self.bn3 = L.LayerNormalization(ch // 8)
Ejemplo n.º 7
0
    def __init__(self, n_channel, ratio=4):

        super(GCBlock, self).__init__()
        reduction_size = n_channel // ratio

        with self.init_scope():
            self.context = L.Convolution2D(n_channel, 1, ksize=1, nobias=True)
            self.down = L.Linear(n_channel, reduction_size)
            self.ln = L.LayerNormalization(reduction_size)
            self.up = L.Linear(reduction_size, n_channel)
Ejemplo n.º 8
0
    def __init__(self, ch0, ch1, \
                nn='conv', \
                norm='bn', \
                activation=F.relu, \
                dropout=False, \
                noise=None, \
                w_init=None, \
                k_size = 3, \
                normalize_input=False ):

        self.norm = norm
        self.normalize_input = normalize_input
        self.activation = activation
        self.dropout = dropout
        self.noise = noise
        self.nn = nn
        layers = {}

        if w_init == None:
            w = chainer.initializers.GlorotNormal()
        else:
            w = w_init

        if nn == 'down_conv':
            layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)

        elif nn == 'up_deconv':
            layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)

        elif nn == 'up_subpixel':
            pad = k_size//2
            layers['c'] = L.Convolution2D(ch0, ch1*4, k_size, 1, pad, initialW=w)

        elif nn=='conv' or nn=='up_unpooling':
            pad = k_size//2
            layers['c'] = L.Convolution2D(ch0, ch1, k_size, 1, pad, initialW=w)

        elif nn=='linear':
            layers['c'] = L.Linear(ch0, ch1, initialW=w)

        else:
            raise Exception("Cannot find method %s" % nn)

        if self.norm == 'bn':
            if self.noise:
                layers['n'] = L.BatchNormalization(ch1, use_gamma=False)
            else:
                layers['n'] = L.BatchNormalization(ch1)
        elif self.norm == 'ln':
                layers['n'] = L.LayerNormalization(ch1)

        super(NNBlock, self).__init__(**layers)
Ejemplo n.º 9
0
 def __init__(self,
              idim,
              n_layers,
              n_units,
              e_units=2048,
              h=8,
              dropout=0.1):
     super(TransformerEncoder, self).__init__()
     with self.init_scope():
         self.linear_in = L.Linear(idim, n_units)
         self.lnorm_in = L.LayerNormalization(n_units)
         self.pos_enc = PositionalEncoding(n_units, dropout, 5000)
         self.n_layers = n_layers
         self.dropout = dropout
         for i in range(n_layers):
             setattr(self, '{}{:d}'.format("lnorm1_", i),
                     L.LayerNormalization(n_units))
             setattr(self, '{}{:d}'.format("self_att_", i),
                     MultiHeadSelfAttention(n_units, h))
             setattr(self, '{}{:d}'.format("lnorm2_", i),
                     L.LayerNormalization(n_units))
             setattr(self, '{}{:d}'.format("ff_", i),
                     PositionwiseFeedForward(n_units, e_units, dropout))
         self.lnorm_out = L.LayerNormalization(n_units)
Ejemplo n.º 10
0
 def __init__(self,
              hidden_channels=16,
              n_edge_types=5,
              activation=functions.relu):
     super(GNNFiLMUpdate, self).__init__()
     self.n_edge_types = n_edge_types
     self.activation = activation
     with self.init_scope():
         self.W_linear = GraphLinear(in_size=None,
                                     out_size=self.n_edge_types *
                                     hidden_channels,
                                     nobias=True)  # W_l in eq. (6)
         self.W_g = GraphLinear(in_size=None,
                                out_size=self.n_edge_types *
                                hidden_channels * 2,
                                nobias=True)  # g in eq. (6)
         self.norm_layer = links.LayerNormalization()  # l in eq. (6)
Ejemplo n.º 11
0
    def __init__(self,
                 n_lstm_layers,
                 n_mid_units,
                 n_out,
                 win_size,
                 batch_size,
                 att_units_size,
                 frame_level=True,
                 dropout=0.5):
        super(RNN, self).__init__()
        ### actual number of lstm layers is 2*n_lstm_layers   ###

        initializer = chainer.initializers.Normal()

        self.batch_size = batch_size
        ######   local attention related   #####
        xp = cuda.cupy
        self.Zu_init = xp.zeros((batch_size, n_out), dtype=np.float32)
        self.pad_size = int((win_size - 1) / 2)

        self.pad_zero = xp.zeros((self.pad_size, n_mid_units),
                                 dtype=np.float32)
        self.pad_inf = xp.full((self.pad_size, 1), -1e20, dtype=np.float32)

        self.win_size = win_size
        self.att_size = 1 if frame_level else n_mid_units

        ########################################

        with self.init_scope():
            self.l1 = L.Linear(None, n_mid_units, initialW=initializer)
            self.encoder1 = L.NStepLSTM(1, n_mid_units, n_mid_units, dropout)
            self.encoder2 = L.NStepLSTM(1, n_mid_units, n_mid_units, dropout)
            self.encoder3 = L.NStepLSTM(1, n_mid_units, n_mid_units, dropout)

            self.lstm2 = L.NStepLSTM(n_lstm_layers - 3, n_mid_units,
                                     n_mid_units, dropout)
            self.attend = Additive_Attention(n_mid_units, win_size, batch_size,
                                             att_units_size)
            self.attend_ln = L.LayerNormalization(n_mid_units,
                                                  initial_gamma=initializer)

            self.output = L.Linear(n_mid_units * 2,
                                   n_out,
                                   initialW=initializer)
Ejemplo n.º 12
0
    def __init__(self,
                 in_size,
                 n_units1,
                 n_units2,
                 n_out=1,
                 out_size=3,
                 train=True,
                 batch_norm=False,
                 layer_norm=False):
        super(LSTM, self).__init__(
            l1=L.Linear(in_size, n_units1),
            bn1=L.BatchNormalization(n_units1),
            l2=L.LSTM(n_units1, n_units2),
            ln1=L.LayerNormalization(n_units2),
            out=MultiOut(n_units2, n_out, out_size),
        )

        self.train = train
        self.target_num = n_out
        self.batch_norm = batch_norm
        self.layer_norm = layer_norm
Ejemplo n.º 13
0
def build_mlp(n_out,
              n_units=256,
              layers=5,
              normalize=None,
              activation='leaky_relu',
              dropout=0.0):
    net = chainer.Sequential()
    if normalize == 'BN' or normalize == 'LN':
        nobias = True
    else:
        nobias = False

    for _ in range(layers):
        net.append(L.Linear(n_units, nobias=nobias))
        if normalize == 'BN':
            net.append(L.BatchNormalization(n_units))
        elif normalize == 'LN':
            net.append(L.LayerNormalization())
        net.append(mF.get_function(activation))
        net.append(partial(F.dropout, ratio=dropout))
    net.append(L.Linear(n_out))
    return net
Ejemplo n.º 14
0
 def __init__(self, bottom_width=4, ch=512, wscale=0.02):
     w = chainer.initializers.Normal(wscale)
     super(Discriminator, self).__init__()
     with self.init_scope():
         self.c0_0 = L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w)
         self.c0_1 = L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w)
         self.c1_0 = L.Convolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w)
         self.c1_1 = L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w)
         self.c2_0 = L.Convolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w)
         self.c2_1 = L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w)
         self.c3_0 = L.Convolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w)
         self.l4 = L.Linear(bottom_width * bottom_width * ch, 1, initialW=w)
         self.bn0_1 = L.LayerNormalization(ch // 4)
         self.bn1_0 = L.LayerNormalization(ch // 4)
         self.bn1_1 = L.LayerNormalization(ch // 2)
         self.bn2_0 = L.LayerNormalization(ch // 2)
         self.bn2_1 = L.LayerNormalization(ch // 1)
         self.bn3_0 = L.LayerNormalization(ch // 1)
Ejemplo n.º 15
0
 def __init__(self, sublayer, N):
     super().__init__()
     with self.init_scope():
         self.sub_layers = sublayer.repeat(N, mode='copy')
         self.norm = L.LayerNormalization(sublayer.size)
Ejemplo n.º 16
0
 def __init__(self, layer, size, dropout_ratio=0.1):
     super().__init__()
     self.dropout_ratio = dropout_ratio
     with self.init_scope():
         self.layer = layer
         self.norm = L.LayerNormalization(size)
Ejemplo n.º 17
0
    def init_enc_dec_attn(self, RNN_CONFIG):
        xp = cuda.cupy if self.gpuid >= 0 else np
        """
        Add encoder RNN layers
        if bi-rnn, then hidden units in each direction = hidden units / 2
        """
        self.rnn_enc = [
            "L{0:d}_enc".format(i) for i in range(RNN_CONFIG['enc_layers'])
        ]

        self.bi_rnn = RNN_CONFIG['bi_rnn']
        if RNN_CONFIG['bi_rnn']:
            enc_lstm_units = RNN_CONFIG['hidden_units'] // 2
            self.rnn_rev_enc = [
                "L{0:d}_rev_enc".format(i)
                for i in range(RNN_CONFIG['enc_layers'])
            ]
        else:
            enc_lstm_units = RNN_CONFIG['hidden_units']
            self.rnn_rev_enc = []

        # Add each layer
        self.rnn_ln = RNN_CONFIG['ln']
        for i, rnn_name in enumerate(self.rnn_enc + self.rnn_rev_enc):
            self.add_link(rnn_name, L.LSTM(None, enc_lstm_units))
            # Add layer normalization
            if RNN_CONFIG['ln']:
                self.add_link("{0:s}_ln".format(rnn_name),
                              L.LayerNormalization(enc_lstm_units))
        # end for enc rnn
        # Add linear projection with Batch Norm
        self.rnn_linear_proj = False
        if 'linear_proj' in RNN_CONFIG and RNN_CONFIG['linear_proj']:
            self.rnn_linear_proj = True
            proj_units = RNN_CONFIG['hidden_units']
            # self.add_link(f"enc_proj", L.Linear(proj_units, proj_units))
            # self.add_link(f"enc_proj_bn", L.BatchNormalization((proj_units)))
            for i in range(RNN_CONFIG['enc_layers'] - 1):
                self.add_link(f"enc_proj{i}", L.Linear(proj_units, proj_units))
                self.add_link(f"enc_proj{i}_bn",
                              L.BatchNormalization((proj_units)))
        print(f"RNN linear projection layer: {self.rnn_linear_proj}")
        """
        Add attention layers
        """
        a_units = RNN_CONFIG['attn_units']
        self.add_link(
            "attn_Wa",
            L.Linear(RNN_CONFIG['hidden_units'], RNN_CONFIG['hidden_units']))
        self.n_attn = 1 if 'n_attn' not in RNN_CONFIG else RNN_CONFIG['n_attn']
        if self.n_attn > 1:
            print(f"# Attention layers: {self.n_attn}")

        feed_attn = True if 'feed_attn' not in self.cfg["rnn_config"] else \
                            self.cfg["rnn_config"]['feed_attn']
        print(f"Feed attention to next decode step: {feed_attn}")
        for i in range(1, self.n_attn):
            self.add_link(
                f"attn_Wa{i}",
                L.Linear(RNN_CONFIG['hidden_units'],
                         RNN_CONFIG['hidden_units']))
        # Context layer = 1*h_units from enc + 1*h_units from dec
        # self.add_link("context", L.Linear(2*RNN_CONFIG['hidden_units'],
        #                                   a_units))
        self.add_link(
            "context",
            L.Linear((self.n_attn + 1) * RNN_CONFIG['hidden_units'], a_units))
        """
        Add decoder layers
        Embedding layer
        """
        e_units = RNN_CONFIG['embedding_units']
        self.add_link("embed_dec",
                      L.EmbedID(RNN_CONFIG["dec_vocab_size"], e_units))

        # Add decoder rnns
        self.rnn_dec = [
            "L{0:d}_dec".format(i) for i in range(RNN_CONFIG['dec_layers'])
        ]

        # decoder rnn input = emb + prev. context vector
        dec_lstm_units = RNN_CONFIG['hidden_units']
        for i, rnn_name in enumerate(self.rnn_dec):
            self.add_link(rnn_name, L.LSTM(None, dec_lstm_units))
            # Add layer normalization
            if RNN_CONFIG['ln']:
                self.add_link("{0:s}_ln".format(rnn_name),
                              L.LayerNormalization(RNN_CONFIG['hidden_units']))
        # end for
        """
        Add output layers
        """
        self.add_link("out", L.Linear(a_units, RNN_CONFIG["dec_vocab_size"]))
        # create masking array for pad id
        with cupy.cuda.Device(self.gpuid):
            self.mask_pad_id = xp.ones(RNN_CONFIG["dec_vocab_size"],
                                       dtype=xp.float32)
        # set PAD ID to 0, so as to not compute any loss for it
        self.mask_pad_id[0] = 0
 def __init__(self, config):
     super(LayerNormalization3D, self).__init__()
     with self.init_scope():
         self.ln = L.LayerNormalization(config.unit_num)
Ejemplo n.º 19
0
    def __init__(self):
        super(LayerNormalizationConv2D, self).__init__()

        with self.init_scope():
            self.norm = L.LayerNormalization()