Exemple #1
0
    def __init__(self, inplanes, scales=6, mingrid=1):
        super(TUM, self).__init__()

        self.scales = scales

        with self.init_scope():
            ecs = []
            for s in range(scales-1):
                if s == 0:
                    conv = Conv2DBNActiv(inplanes, 256, 3, 2, pad=1, nobias=True)
                elif s == scales-2 and mingrid == 1:
                    conv = Conv2DBNActiv(256, 256, 3, 2, nobias=True)
                else:
                    conv = Conv2DBNActiv(256, 256, 3, 2, pad=1, nobias=True)
                ecs.append(conv)
            self.ecs = ChainList(*ecs)

            dcs = []
            for s in range(scales):
                if s == scales-1:
                    conv = Conv2DBNActiv(inplanes, 256, 3, pad=1, nobias=True)
                else:
                    conv = Conv2DBNActiv(256, 256, 3, pad=1, nobias=True)
                dcs.append(conv)
            self.dcs = ChainList(*dcs)

            self.scs = ChainList(*[Conv2DBNActiv(256, 128, 1, nobias=True) for _ in range(scales)])
Exemple #2
0
    def __init__(self, levels=8, scales=6, mingrid=1):
        super(MLFPN, self).__init__()

        self.levels = levels

        with self.init_scope():
            self.ffmv1 = FFMv1()
            self.tums = ChainList(*[TUM(768, scales, mingrid) if l == 0 else TUM(256, scales, mingrid) for l in range(levels)])
            self.ffmv2s = ChainList(*[FFMv2() for _ in range(levels-1)])
            self.sfam = SFAM(levels, scales)
 def __init__(self, emb_dim, vocab_size, layer_dims, label_dim, z_dim):
     super(SequenceEncoder, self).__init__(rnn=Rnn(emb_dim,
                                                   vocab_size,
                                                   layer_dims,
                                                   label_dim,
                                                   suppress_output=True), )
     ls_mu = ChainList()
     ls_ln_var = ChainList()
     for d in layer_dims:
         ls_mu.add_link(L.Linear(d, z_dim))
         ls_ln_var.add_link(L.Linear(d, z_dim))
     self.add_link('ls_mu', ls_mu)
     self.add_link('ls_ln_var', ls_ln_var)
Exemple #4
0
    def __init__(self, d, f, R):
        self.d = d
        self.f = f
        self.R = R
        g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])

        H = ChainList(*[
            ChainList(*[L.Linear(f, f) for i in six.moves.range(R)])
            for j in six.moves.range(5)
        ])
        W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R)])
        self.model = Chain(H=H, W=W, g=g)
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
    def __init__(self, in_dim, hidden_dims, active):
        super(_Mlp, self).__init__()
        self.active = active

        ds = [in_dim] + hidden_dims
        ls = ChainList()
        bns = ChainList()
        for d_in, d_out in zip(ds, ds[1:]):
            l = L.Linear(d_in, d_out)
            bn = L.BatchNormalization(d_out)
            ls.add_link(l)
            bns.add_link(bn)
        self.add_link('ls', ls)
        self.add_link('bns', bns)
Exemple #6
0
    def __init__(self, n_input, n_output, n_hidden1=10, n_hidden2=10, n_hidden_layers=1, link=L.LSTM):
        """

        :param n_input: nchannels x height x width
        :param n_hidden: number of hidden units
        :param n_output: number of outputs
        :param n_hidden_layers: number of hidden layers
        :param link: used recurrent link (LSTM)

        """
        k = 3 # kernel size
        s = 1 # stride
        p = 1 # padding
        n_linear = n_hidden1 * np.prod(1 + (np.array(n_input[1:]) - k + 2*p)/s)
        links = ChainList()
        if n_hidden_layers == 0:
            links.add_link(L.Convolution2D(n_input[0], n_hidden1, k, s, p))
            links.add_link(L.Linear(n_linear, n_output))
        else:
            links.add_link(L.Convolution2D(n_input[0], n_hidden1, k, s, p))
            links.add_link(link(n_linear, n_hidden2))
            for i in range(n_hidden_layers - 1):
                links.add_link(link(n_hidden2, n_hidden2))
            links.add_link(L.Linear(n_hidden2, n_output))

        self.n_input = n_input
        self.n_hidden1 = n_hidden1
        self.n_hidden2 = n_hidden2

        self.n_output = n_output
        self.n_hidden_layers = n_hidden_layers
        self.monitor = []

        super(CRNN3, self).__init__(links)
Exemple #7
0
    def __init__(self, n_input, n_output, n_hidden=10, n_hidden_layers=1, link=L.LSTM):
        """

        :param n_input: number of inputs
        :param n_hidden: number of hidden units
        :param n_output: number of outputs
        :param n_hidden_layers: number of hidden layers
        :param link: used recurrent link (LSTM)

        """

        links = ChainList()
        if n_hidden_layers == 0:
            links.add_link(L.Linear(n_input, n_output))
        else:
            links.add_link(link(n_input, n_hidden))
            for i in range(n_hidden_layers - 1):
                links.add_link(link(n_hidden, n_hidden))
            links.add_link(L.Linear(n_hidden, n_output))

        self.n_input = n_input
        self.n_hidden = n_hidden
        self.n_output = n_output
        self.n_hidden_layers = n_hidden_layers
        self.monitor = []

        super(RNN, self).__init__(links)
Exemple #8
0
 def __init__(self):
     super(QNet, self).__init__(_hidden_layers=ChainList(
         L.Linear(None, 64),
         L.Linear(None, 64),
         L.Linear(None, 32),
     ),
                                _output_layer=L.Linear(None, 2))
Exemple #9
0
    def __init__(self, ninput, nhidden, noutput, nlayer=2, link=L.LSTM):
        """

        :param ninput: number of inputs
        :param nhidden: number of hidden units
        :param noutput: number of outputs
        :param nlayer: number of weight matrices (2 = standard RNN with one layer of hidden units)
        :param link: used recurrent link (LSTM)

        """

        links = ChainList()
        if nlayer == 1:
            links.add_link(L.Linear(ninput, noutput))
        else:
            links.add_link(link(ninput, nhidden))
            for i in range(nlayer - 2):
                links.add_link(link(nhidden, nhidden))
            links.add_link(L.Linear(nhidden, noutput))

        self.ninput = ninput
        self.nhidden = nhidden
        self.noutput = noutput
        self.nlayer = nlayer

        self.h = {}

        super(RecurrentNeuralNetwork, self).__init__(links)
Exemple #10
0
    def __init__(self, n_input, n_output, n_hidden=10, n_hidden_layers=1, actfun=F.relu):
        """

        :param n_input: number of inputs
        :param n_output: number of outputs
        :param n_hidden: number of hidden units
        :param n_hidden_layers: number of hidden layers (1; standard MLP)
        :param actfun: used activation function (ReLU)
        """

        links = ChainList()
        if n_hidden_layers == 0:
            links.add_link(L.Linear(n_input, n_output))
        else:
            links.add_link(L.Linear(n_input, n_hidden))
            for i in range(n_hidden_layers - 1):
                links.add_link(L.Linear(n_hidden, n_hidden))
            links.add_link(L.Linear(n_hidden, n_output))

        self.n_input = n_input
        self.n_hidden = n_hidden
        self.n_output = n_output
        self.n_hidden_layers = n_hidden_layers
        self.actfun = actfun
        self.monitor = []

        super(MLP, self).__init__(links)
Exemple #11
0
    def __init__(self, ninput, nhidden, noutput, nlayer=2, actfun=F.relu):
        """

        :param ninput: number of inputs
        :param nhidden: number of hidden units
        :param noutput: number of outputs
        :param nlayer: number of weight matrices (2; standard MLP)
        :param actfun: used activation function (ReLU)
        """

        links = ChainList()
        if nlayer == 1:
            links.add_link(L.Linear(ninput, noutput))
        else:
            links.add_link(L.Linear(ninput, nhidden))
            for i in range(nlayer - 2):
                links.add_link(L.Linear(nhidden, nhidden))
            links.add_link(L.Linear(nhidden, noutput))

        self.ninput = ninput
        self.nhidden = nhidden
        self.noutput = noutput
        self.nlayer = nlayer
        self.actfun = actfun

        self.h = {}

        super(DeepNeuralNetwork, self).__init__(links)
 def __init__(self):
     super(PolicyNet, self).__init__(hidden_layers=ChainList(
         L.Linear(None, 32),
         L.Linear(None, 32),
         L.Linear(None, 16),
     ),
                                     output_layer=L.Linear(None, 2))
Exemple #13
0
 def __init__(self, d, f, R, gpu):
     self.d = d
     self.f = f
     self.R = R
     self.gpu = gpu
     g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])
     H = ChainList(*[L.Linear(f, f) for i in six.moves.range(R)])
     W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R + 1)])
     self.optimizer = optimizers.Adam()
     self.model = Chain(H=H, W=W, g=g)
     if gpu:
         self.model.to_gpu(0)
     self.optimizer.setup(self.model)
     self.to = [[] for i in six.moves.range(2)]
     self.atom_sid = [[] for i in six.moves.range(2)]
     self.anum = [[] for i in six.moves.range(2)]
Exemple #14
0
    def __init__(self, levels=8, scales=6, planes=1024):
        super(SFAM, self).__init__()

        self.levels = levels
        self.scales = scales

        with self.init_scope():
            self.ses = ChainList(*[SEBlock(planes) for _ in range(scales)])
Exemple #15
0
 def __init__(self, num_heads, model_dim, key_dim, value_dim):
     super().__init__()
     self.num_heads = num_heads
     self.model_dim = model_dim
     self.key_dim = key_dim
     self.value_dim = value_dim
     self.multi_head_dim = num_heads * value_dim
     self.scale = 1. / sqrt(key_dim)
     with self.init_scope():
         self.head_query_links = ChainList()
         self.head_key_links = ChainList()
         self.head_value_links = ChainList()
         for i in range(num_heads):
             self.head_query_links.append(L.Linear(model_dim, key_dim))
             self.head_key_links.append(L.Linear(model_dim, key_dim))
             self.head_value_links.append(L.Linear(model_dim, value_dim))
         self.output_link = L.Linear(self.multi_head_dim, model_dim)
Exemple #16
0
 def __init__(self, depth, num_heads, model_dim, ff_dim, p_drop):
     super().__init__()
     with self.init_scope():
         self.unit_links = ChainList()
         for i in range(depth):
             self.unit_links.append(
                 TransformerDecoderUnit(num_heads, model_dim, ff_dim,
                                        p_drop))
 def __init__(self, num_molecules, rep_dim, max_degree, num_levels):
     super(Mol2Vec2, self).__init__()
     num_degree_type = max_degree + 1
     with self.init_scope():
         self.mol_embed_layer = L.EmbedID(num_molecules, rep_dim)
         self.atom_embed_layer = L.EmbedID(MAX_NUMBER_ATOM, rep_dim)
         self.edge_layer = L.Linear(rep_dim, rep_dim * MAX_EDGE_TYPE)
         self.out = L.Linear(rep_dim, MAX_ATOM_TYPE)
         self.H = ChainList(*[ChainList(
             *[L.Linear(rep_dim, rep_dim)
               for i in six.moves.range(num_degree_type)])
                              for j in six.moves.range(num_levels)])
     # representation dim of molecules, substructures and atoms
     self.rep_dim = rep_dim
     self.max_degree_type = num_degree_type
     self.num_mol = num_molecules
     self.n_levels = num_levels
Exemple #18
0
    def __init__(self, n_output, resolution, n_stack, n_dilateStack,
                 n_in_channel, n_skip_channel, useGPU):
        self.n_output = n_output
        firstConv = L.Convolution2D(None, n_in_channel, ksize=1)
        wn = []
        for s in range(n_stack):
            for d in range(n_dilateStack):
                wn.append(WaveBlock(n_in_channel, n_skip_channel, 2**d,
                                    useGPU))

        lastConv0 = L.Convolution2D(n_skip_channel, n_skip_channel, ksize=1)
        lastConv1 = L.Convolution2D(n_skip_channel, n_skip_channel, ksize=1)
        linear = [L.Linear(None, resolution) for i in range(n_output)]
        super(Wavenet, self).__init__(firstConv=firstConv,
                                      waveBlocks=ChainList(*wn),
                                      lastConv0=lastConv0,
                                      lastConv1=lastConv1,
                                      linear=ChainList(*linear))
Exemple #19
0
 def __init__(self):
     super(ValueNet, self).__init__(
             _hidden_layers = ChainList(
                                 L.Linear(None, 32),
                                 L.Linear(None, 32),
                                 L.Linear(None, 16),
                             ),
             _output_layer = L.Linear(None, 1)
         )
Exemple #20
0
    def __init__(self,
                 emb_dim,
                 vocab_size,
                 layer_dims,
                 feature_dim,
                 suppress_output,
                 eos_id=0):
        """
        Recurrent Neural Network with multiple layers.
        in_dim -> layers[0] -> ... -> layers[-1] -> out_dim (optional)

        :param int emb_dim: dimension of embeddings
        :param int vocab_size: size of vocabulary
        :param layer_dims: dimensions of hidden layers
        :param int feature_dim: dimesion of external feature
        :type layer_dims: list of int
        :param bool suppress_output: whether to suppress output
        :param int eos_id: ID of <BOS> and <EOS>
        """
        super(Rnn, self).__init__(emb=F.EmbedID(vocab_size, emb_dim))

        self.emb_dim = emb_dim
        self.vocab_size = vocab_size
        self.layer_dims = layer_dims
        self.feature_dim = feature_dim
        self.suppress_output = suppress_output
        self.eos_id = eos_id

        # add hidden layer_dims
        ls_xh = ChainList()
        ls_hh = ChainList()
        ls_fh = ChainList()
        layer_dims = [emb_dim] + layer_dims
        for in_dim, out_dim in zip(layer_dims, layer_dims[1:]):
            ls_xh.add_link(F.Linear(in_dim, out_dim * 4))
            ls_hh.add_link(F.Linear(out_dim, out_dim * 4))
            ls_fh.add_link(F.Linear(feature_dim, out_dim * 4))
        self.add_link('ls_xh', ls_xh)
        self.add_link('ls_hh', ls_hh)
        self.add_link('ls_fh', ls_fh)

        if not suppress_output:
            # add output layer
            self.add_link('l_y', F.Linear(layer_dims[-1], self.vocab_size))
 def __init__(self, rnns, dims, pyramidal, dropout_ratio):
     super(MultiLayerRnn, self).__init__(rnns=rnns, )
     self.pyramidal = pyramidal
     if self.pyramidal:
         self.add_link('combine_twos', ChainList())
         for in_dim, out_dim in zip(dims, dims[1:]):
             combine_two = CombineTwo(in_dim, out_dim)
             self.combine_twos.add_link(combine_two)
         assert len(self.rnns) == len(self.combine_twos) + 1
     self.dropout_ratio = dropout_ratio
 def __init__(self, emb_dim, vocab_size, layer_dims, label_dim, z_dim):
     super(SequenceDecoder, self).__init__(rnn=Rnn(emb_dim,
                                                   vocab_size,
                                                   layer_dims,
                                                   label_dim,
                                                   suppress_output=False), )
     ls_zh = ChainList()
     for d in layer_dims:
         ls_zh.add_link(L.Linear(z_dim, d))
     self.add_link('ls_zh', ls_zh)
Exemple #23
0
    def __init__(self, in_dim, hidden_dims, active):
        super(_Mlp, self).__init__()
        self.active = active

        ds = [in_dim] + hidden_dims
        ls = ChainList()
        for d_in, d_out in zip(ds, ds[1:]):
            l = L.Linear(d_in, d_out)
            ls.add_link(l)
        self.add_link('ls', ls)
Exemple #24
0
    def __init__(self):
        self.log = {
            ('test', 'accuracy'): (),
            ('test', 'loss'): (),
            ('training', 'accuracy'): (),
            ('training', 'loss'): ()
        }
        self.model = ChainList(_WaveNet(), _CRF())
        self.optimizer = optimizers.Adam(0.0002, 0.5)

        self.optimizer.setup(self.model)
Exemple #25
0
	def __init__(self, size, levels, first_channels, last_channels):
		super().__init__()
		in_channels = [first_channels] * levels
		out_channels = [last_channels] * levels
		for i in range(1, levels):
			channels = min(first_channels, last_channels * 2 ** i)
			in_channels[-i] = channels
			out_channels[-i - 1] = channels
		with self.init_scope():
			self.init = InitialSkipArchitecture(size, in_channels[0], out_channels[0])
			self.skips = ChainList(*[SkipArchitecture(size, i, o) for i, o in zip(in_channels[1:], out_channels[1:])])
Exemple #26
0
    def __init__(self):
        super(PolicyNet, self).__init__(
                _hidden_layers = ChainList(
                                    L.Linear(None, 32),
                                    L.Linear(None, 32),
                                    L.Linear(None, 16),
                                ),
                _output_layer = L.Linear(None, 2)
            )

        self._eps = 1e-5 #sigma=0を避けるための微小数
	def __init__(self, probability=0.5):
		super().__init__()
		self.probability = probability
		with self.init_scope():
			self.manipulations = ChainList(*[
				Mirror(),
				Rotation(),
				Shift(),
				AffineTransformation(),
				ColorAffineTransformation(),
				AdditiveNoise(),
				Cutout()])
Exemple #28
0
 def __init__(self, word_emb, hidden_dim, layer_num, out_vocab_size, gru, dropout_ratio):
     super(AttentionalDecoder, self).__init__(
         softmax_linear=L.Linear(hidden_dim * 2, out_vocab_size),
         phi1_linear=L.Linear(hidden_dim, hidden_dim),    # TODO: make out dim adjustable
         phi2_linear=L.Linear(hidden_dim, hidden_dim),    # TODO: make out dim adjustable
         rnns=ChainList(),
     )
     self.hidden_dim = hidden_dim
     self.layer_num = layer_num
     self.out_vocab_size = out_vocab_size
     Rnn = L.GRU if gru else L.StatelessLSTM
     for i in range(layer_num):
         rnn = Rnn(hidden_dim, hidden_dim)
         self.rnns.add_link(rnn)
     self.word_emb = word_emb
     self.gru = gru
     self.dropout_ratio = dropout_ratio
Exemple #29
0
 def __init__(self,
              encoder,
              decoder,
              optimizer,
              epoch=20,
              batch_size=100,
              log_path="",
              export_path="",
              gpu_flag=-1):
     self.encoder = encoder
     self.decoder = decoder
     self.optimizer = optimizer
     self.epoch = epoch
     self.batch_size = batch_size
     self.log_path = log_path
     self.export_path = export_path
     self.autoencoded = ChainList()
     self.gpu_flag = gpu_flag
Exemple #30
0
    def __init__(self, in_vocab_size, hidden_dim, layer_num, out_vocab_size, gru, bidirectional, pyramidal, dropout_ratio, src_vocab_size=None):
        super(AttentionalEncoderDecoder, self).__init__()

        if src_vocab_size is None:
            # use same vocabulary for source/target
            word_emb = L.EmbedID(in_vocab_size, hidden_dim, ignore_label=IGNORE_ID)
            self.add_link('word_emb', word_emb)
            self.word_emb_src = word_emb
            self.word_emb_trg = word_emb
        else:
            word_emb_src = L.EmbedID(src_vocab_size, hidden_dim, ignore_label=IGNORE_ID)
            word_emb_trg = L.EmbedID(in_vocab_size, hidden_dim, ignore_label=IGNORE_ID)
            self.add_link('word_emb_src', word_emb_src)
            self.add_link('word_emb_trg', word_emb_trg)

        rnns = ChainList()
        Rnn = GruRnn if gru else LstmRnn

        for i in range(layer_num):
            if bidirectional:
                rnn_f = Rnn(hidden_dim)
                rnn_b = Rnn(hidden_dim)
                rnn = BiRnn(rnn_f, rnn_b)
            else:
                rnn = Rnn(hidden_dim)
            rnns.add_link(rnn)
        multi_rnn = MultiLayerRnn(rnns, [hidden_dim] * layer_num, pyramidal, dropout_ratio)
        self.add_link('encoder', Encoder(self.word_emb_src, multi_rnn))
        self.add_link('decoder', AttentionalDecoder(self.word_emb_trg, hidden_dim, layer_num, out_vocab_size, gru, dropout_ratio))

        self.in_vocab_size = in_vocab_size
        self.hidden_dim = hidden_dim
        self.layer_num = layer_num
        self.out_vocab_size = out_vocab_size
        self.gru = gru
        self.bidirectional = bidirectional
        self.pyramidal = pyramidal