Example #1
0
 def __init__(self, emb_dim, vocab_size, layer_dims, label_dim, z_dim):
     super(SequenceDecoder, self).__init__(
         rnn=Rnn(emb_dim, vocab_size, layer_dims, label_dim, suppress_output=False),
     )
     ls_zh = ChainList()
     for d in layer_dims:
         ls_zh.add_link(L.Linear(z_dim, d))
     self.add_link('ls_zh', ls_zh)
Example #2
0
class NNAutoEncoder():
    def __init__(self,
                 encoder,
                 decoder,
                 optimizer,
                 epoch=20,
                 batch_size=100,
                 log_path="",
                 export_path="",
                 gpu_flag=-1):
        self.encoder = encoder
        self.decoder = decoder
        self.optimizer = optimizer
        self.epoch = epoch
        self.batch_size = batch_size
        self.log_path = log_path
        self.export_path = export_path
        self.autoencoded = ChainList()
        self.gpu_flag = gpu_flag

    def fit(self, x_train):
        for layer in range(0, len(self.encoder)):
            # Creating model
            self.model = ChainList(self.encoder[layer].copy(),
                                   self.decoder[layer].copy())
            NNManager.forward = self.forward
            nn = NNManager(self.model,
                           self.optimizer,
                           F.mean_squared_error,
                           self.epoch,
                           self.batch_size,
                           self.log_path,
                           gpu_flag=self.gpu_flag)

            # Training
            x_data = self.encode(x_train, layer).data
            nn.fit(x_data, x_data)
            self.autoencoded.add_link(nn.model[0].copy())

        if self.export_path != "":
            if self.gpu_flag >= 0:
                self.autoencoded.to_cpu()
            pickle.dump(self.autoencoded, open(self.export_path, 'wb'), -1)
        return self

    def predict(self, x_test):
        raise Exception("Prediction for AutoEncoder is not implemented.")

    def encode(self, x, n):
        if n == 0:
            return Variable(x)
        else:
            h = self.encode(x, n - 1)
            return F.relu(self.autoencoded[n - 1](h))

    def forward(self, x):
        h = F.dropout(F.relu(self.model[0](x)))
        return F.dropout(F.relu(self.model[1](h)))
Example #3
0
    def __init__(self, in_dim, hidden_dims, active):
        super(_Mlp, self).__init__()
        self.active = active

        ds = [in_dim] + hidden_dims
        ls = ChainList()
        for d_in, d_out in zip(ds, ds[1:]):
            l = L.Linear(d_in, d_out)
            ls.add_link(l)
        self.add_link('ls', ls)
Example #4
0
    def __init__(self, in_dim, hidden_dims, active):
        super(_Mlp, self).__init__()
        self.active = active

        ds = [in_dim] + hidden_dims
        ls = ChainList()
        for d_in, d_out in zip(ds, ds[1:]):
            l = L.Linear(d_in, d_out)
            ls.add_link(l)
        self.add_link('ls', ls)
Example #5
0
 def __init__(self, emb_dim, vocab_size, layer_dims, label_dim, z_dim):
     super(SequenceDecoder, self).__init__(rnn=Rnn(emb_dim,
                                                   vocab_size,
                                                   layer_dims,
                                                   label_dim,
                                                   suppress_output=False), )
     ls_zh = ChainList()
     for d in layer_dims:
         ls_zh.add_link(L.Linear(z_dim, d))
     self.add_link('ls_zh', ls_zh)
Example #6
0
 def __init__(self, emb_dim, vocab_size, layer_dims, label_dim, z_dim):
     super(SequenceEncoder, self).__init__(
         rnn=Rnn(emb_dim, vocab_size, layer_dims, label_dim, suppress_output=True),
     )
     ls_mu = ChainList()
     ls_ln_var = ChainList()
     for d in layer_dims:
         ls_mu.add_link(L.Linear(d, z_dim))
         ls_ln_var.add_link(L.Linear(d, z_dim))
     self.add_link('ls_mu', ls_mu)
     self.add_link('ls_ln_var', ls_ln_var)
Example #7
0
 def __init__(self, emb_dim, vocab_size, layer_dims, label_dim, z_dim):
     super(SequenceEncoder, self).__init__(rnn=Rnn(emb_dim,
                                                   vocab_size,
                                                   layer_dims,
                                                   label_dim,
                                                   suppress_output=True), )
     ls_mu = ChainList()
     ls_ln_var = ChainList()
     for d in layer_dims:
         ls_mu.add_link(L.Linear(d, z_dim))
         ls_ln_var.add_link(L.Linear(d, z_dim))
     self.add_link('ls_mu', ls_mu)
     self.add_link('ls_ln_var', ls_ln_var)
Example #8
0
    def __init__(self, n_input, n_output, n_hidden=10, n_hidden_layers=1, link=L.LSTM):
        """

        :param n_input: number of inputs
        :param n_hidden: number of hidden units
        :param n_output: number of outputs
        :param n_hidden_layers: number of hidden layers
        :param link: used recurrent link (LSTM)

        """

        links = ChainList()
        if n_hidden_layers == 0:
            links.add_link(L.Linear(n_input, n_output))
        else:
            links.add_link(link(n_input, n_hidden))
            for i in range(n_hidden_layers - 1):
                links.add_link(link(n_hidden, n_hidden))
            links.add_link(L.Linear(n_hidden, n_output))

        self.n_input = n_input
        self.n_hidden = n_hidden
        self.n_output = n_output
        self.n_hidden_layers = n_hidden_layers
        self.monitor = []

        super(RNN, self).__init__(links)
    def __init__(self, in_dim, hidden_dims, active):
        super(_Mlp, self).__init__()
        self.active = active

        ds = [in_dim] + hidden_dims
        ls = ChainList()
        bns = ChainList()
        for d_in, d_out in zip(ds, ds[1:]):
            l = L.Linear(d_in, d_out)
            bn = L.BatchNormalization(d_out)
            ls.add_link(l)
            bns.add_link(bn)
        self.add_link('ls', ls)
        self.add_link('bns', bns)
Example #10
0
    def __init__(self, n_input, n_output, n_hidden=10, n_hidden_layers=1, actfun=F.relu):
        """

        :param n_input: number of inputs
        :param n_output: number of outputs
        :param n_hidden: number of hidden units
        :param n_hidden_layers: number of hidden layers (1; standard MLP)
        :param actfun: used activation function (ReLU)
        """

        links = ChainList()
        if n_hidden_layers == 0:
            links.add_link(L.Linear(n_input, n_output))
        else:
            links.add_link(L.Linear(n_input, n_hidden))
            for i in range(n_hidden_layers - 1):
                links.add_link(L.Linear(n_hidden, n_hidden))
            links.add_link(L.Linear(n_hidden, n_output))

        self.n_input = n_input
        self.n_hidden = n_hidden
        self.n_output = n_output
        self.n_hidden_layers = n_hidden_layers
        self.actfun = actfun
        self.monitor = []

        super(MLP, self).__init__(links)
Example #11
0
    def __init__(self, ninput, nhidden, noutput, nlayer=2, actfun=F.relu):
        """

        :param ninput: number of inputs
        :param nhidden: number of hidden units
        :param noutput: number of outputs
        :param nlayer: number of weight matrices (2; standard MLP)
        :param actfun: used activation function (ReLU)
        """

        links = ChainList()
        if nlayer == 1:
            links.add_link(L.Linear(ninput, noutput))
        else:
            links.add_link(L.Linear(ninput, nhidden))
            for i in range(nlayer - 2):
                links.add_link(L.Linear(nhidden, nhidden))
            links.add_link(L.Linear(nhidden, noutput))

        self.ninput = ninput
        self.nhidden = nhidden
        self.noutput = noutput
        self.nlayer = nlayer
        self.actfun = actfun

        self.h = {}

        super(DeepNeuralNetwork, self).__init__(links)
Example #12
0
    def __init__(self, ninput, nhidden, noutput, nlayer=2, link=L.LSTM):
        """

        :param ninput: number of inputs
        :param nhidden: number of hidden units
        :param noutput: number of outputs
        :param nlayer: number of weight matrices (2 = standard RNN with one layer of hidden units)
        :param link: used recurrent link (LSTM)

        """

        links = ChainList()
        if nlayer == 1:
            links.add_link(L.Linear(ninput, noutput))
        else:
            links.add_link(link(ninput, nhidden))
            for i in range(nlayer - 2):
                links.add_link(link(nhidden, nhidden))
            links.add_link(L.Linear(nhidden, noutput))

        self.ninput = ninput
        self.nhidden = nhidden
        self.noutput = noutput
        self.nlayer = nlayer

        self.h = {}

        super(RecurrentNeuralNetwork, self).__init__(links)
Example #13
0
class NNAutoEncoder ():
    def __init__(self, encoder, decoder, optimizer,
        epoch=20, batch_size=100, log_path="", export_path="", gpu_flag=-1):
        self.encoder = encoder
        self.decoder = decoder
        self.optimizer = optimizer
        self.epoch = epoch
        self.batch_size = batch_size
        self.log_path = log_path
        self.export_path = export_path
        self.autoencoded = ChainList()
        self.gpu_flag= gpu_flag

    def fit(self, x_train):
        for layer in range(0, len(self.encoder)):
            # Creating model
            self.model = ChainList(self.encoder[layer].copy(), self.decoder[layer].copy())
            NNManager.forward = self.forward
            nn = NNManager(self.model, self.optimizer, F.mean_squared_error,
                self.epoch, self.batch_size, self.log_path, gpu_flag=self.gpu_flag)

            # Training
            x_data = self.encode(x_train, layer).data
            nn.fit(x_data, x_data)
            self.autoencoded.add_link(nn.model[0].copy())

        if self.export_path != "":
            if self.gpu_flag >= 0:
                self.autoencoded.to_cpu()
            pickle.dump(self.autoencoded, open(self.export_path, 'wb'), -1)
        return self

    def predict(self, x_test):
        raise Exception("Prediction for AutoEncoder is not implemented.")

    def encode(self, x, n):
        if n == 0:
            return Variable(x)
        else:
            h = self.encode(x, n-1)
            return F.relu(self.autoencoded[n-1](h))

    def forward(self, x):
        h = F.dropout(F.relu(self.model[0](x)))
        return F.dropout(F.relu(self.model[1](h)))
Example #14
0
    def __init__(self, in_vocab_size, hidden_dim, layer_num, out_vocab_size, gru, bidirectional, pyramidal, dropout_ratio, src_vocab_size=None):
        super(AttentionalEncoderDecoder, self).__init__()

        if src_vocab_size is None:
            # use same vocabulary for source/target
            word_emb = L.EmbedID(in_vocab_size, hidden_dim, ignore_label=IGNORE_ID)
            self.add_link('word_emb', word_emb)
            self.word_emb_src = word_emb
            self.word_emb_trg = word_emb
        else:
            word_emb_src = L.EmbedID(src_vocab_size, hidden_dim, ignore_label=IGNORE_ID)
            word_emb_trg = L.EmbedID(in_vocab_size, hidden_dim, ignore_label=IGNORE_ID)
            self.add_link('word_emb_src', word_emb_src)
            self.add_link('word_emb_trg', word_emb_trg)

        rnns = ChainList()
        Rnn = GruRnn if gru else LstmRnn

        for i in range(layer_num):
            if bidirectional:
                rnn_f = Rnn(hidden_dim)
                rnn_b = Rnn(hidden_dim)
                rnn = BiRnn(rnn_f, rnn_b)
            else:
                rnn = Rnn(hidden_dim)
            rnns.add_link(rnn)
        multi_rnn = MultiLayerRnn(rnns, [hidden_dim] * layer_num, pyramidal, dropout_ratio)
        self.add_link('encoder', Encoder(self.word_emb_src, multi_rnn))
        self.add_link('decoder', AttentionalDecoder(self.word_emb_trg, hidden_dim, layer_num, out_vocab_size, gru, dropout_ratio))

        self.in_vocab_size = in_vocab_size
        self.hidden_dim = hidden_dim
        self.layer_num = layer_num
        self.out_vocab_size = out_vocab_size
        self.gru = gru
        self.bidirectional = bidirectional
        self.pyramidal = pyramidal
Example #15
0
    def __init__(self,
                 emb_dim,
                 vocab_size,
                 layer_dims,
                 feature_dim,
                 suppress_output,
                 eos_id=0):
        """
        Recurrent Neural Network with multiple layers.
        in_dim -> layers[0] -> ... -> layers[-1] -> out_dim (optional)

        :param int emb_dim: dimension of embeddings
        :param int vocab_size: size of vocabulary
        :param layer_dims: dimensions of hidden layers
        :param int feature_dim: dimesion of external feature
        :type layer_dims: list of int
        :param bool suppress_output: whether to suppress output
        :param int eos_id: ID of <BOS> and <EOS>
        """
        super(Rnn, self).__init__(emb=F.EmbedID(vocab_size, emb_dim))

        self.emb_dim = emb_dim
        self.vocab_size = vocab_size
        self.layer_dims = layer_dims
        self.feature_dim = feature_dim
        self.suppress_output = suppress_output
        self.eos_id = eos_id

        # add hidden layer_dims
        ls_xh = ChainList()
        ls_hh = ChainList()
        ls_fh = ChainList()
        layer_dims = [emb_dim] + layer_dims
        for in_dim, out_dim in zip(layer_dims, layer_dims[1:]):
            ls_xh.add_link(F.Linear(in_dim, out_dim * 4))
            ls_hh.add_link(F.Linear(out_dim, out_dim * 4))
            ls_fh.add_link(F.Linear(feature_dim, out_dim * 4))
        self.add_link('ls_xh', ls_xh)
        self.add_link('ls_hh', ls_hh)
        self.add_link('ls_fh', ls_fh)

        if not suppress_output:
            # add output layer
            self.add_link('l_y', F.Linear(layer_dims[-1], self.vocab_size))
Example #16
0
    def __init__(self, emb_dim, vocab_size, layer_dims, feature_dim, suppress_output, eos_id=0):
        """
        Recurrent Neural Network with multiple layers.
        in_dim -> layers[0] -> ... -> layers[-1] -> out_dim (optional)

        :param int emb_dim: dimension of embeddings
        :param int vocab_size: size of vocabulary
        :param layer_dims: dimensions of hidden layers
        :param int feature_dim: dimesion of external feature
        :type layer_dims: list of int
        :param bool suppress_output: whether to suppress output
        :param int eos_id: ID of <BOS> and <EOS>
        """
        super(Rnn, self).__init__(emb=F.EmbedID(vocab_size, emb_dim))

        self.emb_dim = emb_dim
        self.vocab_size = vocab_size
        self.layer_dims = layer_dims
        self.feature_dim = feature_dim
        self.suppress_output = suppress_output
        self.eos_id = eos_id

        # add hidden layer_dims
        ls_xh = ChainList()
        ls_hh = ChainList()
        ls_fh = ChainList()
        layer_dims = [emb_dim] + layer_dims
        for in_dim, out_dim in zip(layer_dims, layer_dims[1:]):
            ls_xh.add_link(F.Linear(in_dim, out_dim*4))
            ls_hh.add_link(F.Linear(out_dim, out_dim*4))
            ls_fh.add_link(F.Linear(feature_dim, out_dim*4))
        self.add_link('ls_xh', ls_xh)
        self.add_link('ls_hh', ls_hh)
        self.add_link('ls_fh', ls_fh)

        if not suppress_output:
            # add output layer
            self.add_link('l_y', F.Linear(layer_dims[-1], self.vocab_size))
Example #17
0
    def __init__(self, n_input, n_output, n_hidden1=10, n_hidden2=10, n_hidden_layers=1, link=L.LSTM):
        """

        :param n_input: nchannels x height x width
        :param n_hidden: number of hidden units
        :param n_output: number of outputs
        :param n_hidden_layers: number of hidden layers
        :param link: used recurrent link (LSTM)

        """
        k = 3 # kernel size
        s = 1 # stride
        p = 1 # padding
        n_linear = n_hidden1 * np.prod(1 + (np.array(n_input[1:]) - k + 2*p)/s)
        links = ChainList()
        if n_hidden_layers == 0:
            links.add_link(L.Convolution2D(n_input[0], n_hidden1, k, s, p))
            links.add_link(L.Linear(n_linear, n_output))
        else:
            links.add_link(L.Convolution2D(n_input[0], n_hidden1, k, s, p))
            links.add_link(link(n_linear, n_hidden2))
            for i in range(n_hidden_layers - 1):
                links.add_link(link(n_hidden2, n_hidden2))
            links.add_link(L.Linear(n_hidden2, n_output))

        self.n_input = n_input
        self.n_hidden1 = n_hidden1
        self.n_hidden2 = n_hidden2

        self.n_output = n_output
        self.n_hidden_layers = n_hidden_layers
        self.monitor = []

        super(CRNN3, self).__init__(links)