def __init__(self, n_vocab, n_char, n_tag, args):
        feature_dim = args[
            'word_embedding_dim'] + 2 * args['char_embedding_dim']
        super(ModelBase,
              self).__init__(char_embed=L.EmbedID(n_char,
                                                  args['char_embedding_dim'],
                                                  ignore_label=-1),
                             bi_char=L.NStepBiLSTM(1,
                                                   args['char_embedding_dim'],
                                                   args['char_embedding_dim'],
                                                   0),
                             word_embed=L.EmbedID(n_vocab,
                                                  args['word_embedding_dim'],
                                                  ignore_label=-1),
                             bi_word=L.NStepBiLSTM(1, feature_dim,
                                                   int(feature_dim / 2), 0),
                             l=L.Linear(feature_dim, n_tag),
                             crf=L.CRF1d(n_tag))

        # Initialize value for hyper parameters
        self.char_embedding_dim = args['char_embedding_dim']
        self.tag_embedding_dim = args['tag_embedding_dim']
        self.dropout_ratio = args['dropout_ratio']
        self.lr_param = args['lr_param']
        self.threshold = args['threshold']
        self.decay_rate = args['decay_rate']
        self.batch_size = args['batch_size']

        if args['mode'] == 'train':
            for w in self.bi_char:
                w.b1.data[:] = 1.0
                w.b5.data[:] = 1.0
            for w in self.bi_word:
                w.b1.data[:] = 1.0
                w.b5.data[:] = 1.0
Ejemplo n.º 2
0
    def __init__(
        self,
        n_speakers=4,
        dropout=0.25,
        in_size=513,
        hidden_size=256,
        n_layers=1,
        embedding_layers=1,
        embedding_size=20,
        dc_loss_ratio=0.5,
    ):
        """ BLSTM-based diarization model.

        Args:
          n_speakers (int): Number of speakers in recording
          dropout (float): dropout ratio
          in_size (int): Dimension of input feature vector
          hidden_size (int): Number of hidden units in LSTM
          n_layers (int): Number of LSTM layers after embedding
          embedding_layers (int): Number of LSTM layers for embedding
          embedding_size (int): Dimension of embedding vector
          dc_loss_ratio (float): mixing parameter for DPCL loss
        """
        super(BLSTMDiarization, self).__init__()
        with self.init_scope():
            self.bi_lstm1 = L.NStepBiLSTM(n_layers, hidden_size * 2,
                                          hidden_size, dropout)
            self.bi_lstm_emb = L.NStepBiLSTM(embedding_layers, in_size,
                                             hidden_size, dropout)
            self.linear1 = L.Linear(hidden_size * 2, n_speakers)
            self.linear2 = L.Linear(hidden_size * 2, embedding_size)
        self.dc_loss_ratio = dc_loss_ratio
        self.n_speakers = n_speakers
Ejemplo n.º 3
0
 def __init__(self, n_cell, size_hidden, rate_dropout):
     super(ONT_BiLSTM, self).__init__()
     self.rate_dropout = rate_dropout
     with self.init_scope():
         self.rnn_a = L.NStepBiLSTM(n_cell, 300, size_hidden, rate_dropout)
         self.rnn_b = L.NStepBiLSTM(n_cell, 300, size_hidden, rate_dropout)
         self.l1 = L.Linear(size_hidden * 4, size_hidden * 4)
         self.l2 = L.Linear(size_hidden * 4, 4)
Ejemplo n.º 4
0
 def __init__(self, hidden_dim):
     super(InferenceCompositionLayer, self).__init__()
     with self.init_scope():
         self.hypo_encoder = L.NStepBiLSTM(1,
                                           hidden_dim,
                                           int(hidden_dim / 8),
                                           dropout=0.3)
         self.premise_encoder = L.NStepBiLSTM(1,
                                              hidden_dim,
                                              int(hidden_dim / 8),
                                              dropout=0.3)
Ejemplo n.º 5
0
 def __init__(self, n_vocab, embed_dim, hidden_dim):
     super(InputEncodingLayer, self).__init__()
     with self.init_scope():
         self.embed_mat = L.EmbedID(n_vocab, embed_dim)
         self.hypo_encoder = L.NStepBiLSTM(1,
                                           hidden_dim,
                                           int(hidden_dim / 2),
                                           dropout=0.3)
         self.premise_encoder = L.NStepBiLSTM(1,
                                              hidden_dim,
                                              int(hidden_dim / 2),
                                              dropout=0.3)
Ejemplo n.º 6
0
 def __init__(self,
              n_layers=1,
              n_inputs=3,
              n_outputs=3,
              linear_num=786,
              cell_num=128,
              atten1=64,
              atten2=1,
              hidden1=64,
              dropout_rate=0.1):
     self.dropout_rate = dropout_rate
     super(SER, self).__init__()
     with self.init_scope():
         self.c1 = L.Convolution2D(n_inputs,
                                   128,
                                   ksize=(5, 3),
                                   stride=(1, 1),
                                   pad=(2, 1))
         self.c2 = L.Convolution2D(128,
                                   512,
                                   ksize=(5, 3),
                                   stride=(1, 1),
                                   pad=(2, 1))
         self.b1 = L.BatchNormalization(128)
         self.b2 = L.BatchNormalization(512)
         self.cl = L.Linear(None, linear_num)
         self.lstm = L.NStepBiLSTM(n_layers, linear_num, cell_num,
                                   dropout_rate)
         self.a1 = L.Linear(cell_num * 2, atten1)
         self.a2 = L.Linear(atten1, atten2)
         self.l3 = L.Linear(cell_num * 2, hidden1)
         self.b3 = L.BatchNormalization(hidden1)
         self.l4 = L.Linear(hidden1, n_outputs)
         self.af = F.leaky_relu
Ejemplo n.º 7
0
    def __init__(self,
                 n_layers,
                 insize,
                 outsize,
                 initialW=None,
                 use_bi_lstm=False):
        super(EdgeRNN, self).__init__()
        self.n_layer = n_layers
        self.outsize = outsize
        if use_bi_lstm:
            assert outsize % 2 == 0, outsize

        if not initialW:
            initialW = initializers.HeNormal()

        with self.init_scope():
            self.fc1 = L.Linear(insize, 256, initialW=initialW)
            self.fc2 = L.Linear(256, 256, initialW=initialW)
            if use_bi_lstm:
                self.lstm3 = L.NStepBiLSTM(self.n_layer,
                                           256,
                                           outsize // 2,
                                           initialW=initialW,
                                           dropout=0.1)  #dropout = 0.0
            else:
                self.lstm3 = L.NStepLSTM(self.n_layer,
                                         256,
                                         outsize,
                                         initialW=initialW,
                                         dropout=0.1)
Ejemplo n.º 8
0
 def __init__(self,
              n_layers,
              insize,
              outsize,
              initialW=None,
              use_bi_lstm=False):
     super(ConnLabelRNN, self).__init__()
     if not initialW:
         initialW = initializers.HeNormal()
     self.n_layer = n_layers
     with self.init_scope():
         if use_bi_lstm:
             self.lstm1 = L.NStepBiLSTM(self.n_layer,
                                        insize,
                                        512,
                                        initialW=initialW,
                                        dropout=0.1)
         else:
             self.lstm1 = L.NStepLSTM(self.n_layer,
                                      insize,
                                      1024,
                                      initialW=initialW,
                                      dropout=0.1)
         self.fc2 = L.Linear(1024, 512)
         self.fc3 = L.Linear(512, outsize)
Ejemplo n.º 9
0
    def __init__(self,
                 n_layers,
                 insize,
                 outsize,
                 initialW=None,
                 use_bi_lstm=False):
        super(NodeRNN, self).__init__()
        if not initialW:
            initialW = initializers.HeNormal()
        self.n_layer = n_layers

        with self.init_scope():
            if use_bi_lstm:
                self.lstm = L.NStepBiLSTM(self.n_layer,
                                          1024,
                                          outsize // 2,
                                          initialW=initialW,
                                          dropout=0.1)  #dropout = 0.0
            else:
                self.lstm = L.NStepLSTM(self.n_layer,
                                        1024,
                                        outsize,
                                        initialW=initialW,
                                        dropout=0.1)
            self.fc1 = L.Linear(insize, 1024, initialW=initialW)
            self.fc2 = L.Linear(1024, 1024, initialW=initialW)
Ejemplo n.º 10
0
    def __init__(self, n_word_types, n_trig_types, n_role_types,
                 n_entity_types, params):
        super(SearchBasedModel, self).__init__()
        with self.init_scope():
            self.dim_embed = params['dim_embed']
            self.dim_bilstm = params['dim_bilstm']
            self.dim_role_type = params['dim_role_type']
            self.dim_arg_type = params['dim_arg_type']
            self.dropout = params['dropout']
            self.threshold = params['threshold']
            self.n_best = params['n_best']
            self.margin = params['margin']
            self.action_dim = params['dim_action']
            self.max_pos = params['max_pos']
            self.dim_pos = params['dim_pos']

            self.arg_type_and_word_dim = self.dim_arg_type + (self.dim_bilstm *
                                                              2) + self.dim_pos

            self.relation_dim = self.arg_type_and_word_dim + self.dim_role_type + self.arg_type_and_word_dim
            self.hidden_dim = int(self.arg_type_and_word_dim / 2)

            embed_init = chainer.initializers.Normal()
            self.embed_positiontype = L.EmbedID(self.max_pos,
                                                self.dim_pos,
                                                initialW=embed_init,
                                                ignore_label=-1)
            self.embed_wordtype = L.EmbedID(n_word_types,
                                            self.dim_embed,
                                            initialW=embed_init,
                                            ignore_label=-1)
            self.embed_trigtype = L.EmbedID(n_trig_types,
                                            self.dim_arg_type,
                                            initialW=embed_init,
                                            ignore_label=-1)
            self.embed_roletype = L.EmbedID(n_role_types,
                                            self.dim_role_type,
                                            initialW=embed_init,
                                            ignore_label=-1)
            self.embed_enttype = L.EmbedID(n_entity_types,
                                           self.dim_arg_type,
                                           initialW=embed_init,
                                           ignore_label=-1)
            self.embed_action = L.EmbedID(self.action_dim,
                                          self.action_dim,
                                          initialW=embed_init,
                                          ignore_label=-1)

            self.bilstm = L.NStepBiLSTM(1, self.dim_embed, self.dim_bilstm, 0)

            self.linear_structure = L.Linear(
                None, self.relation_dim + self.action_dim)
            self.linear_buffer = L.Linear(None,
                                          self.relation_dim + self.action_dim)

            self.state_representation = L.Linear(None,
                                                 self.arg_type_and_word_dim)
            self.linear1 = L.Linear(None, self.hidden_dim)
            self.linear2 = L.Linear(None, self.hidden_dim)
            self.linear = L.Linear(None, 1)
    def __init__(self,
                 vocab_size: int,
                 word_size: int,
                 hidden_size: int,
                 dropout: float = 0.5,
                 **kwargs):
        """Initialize the Relevant Sentences Detector model.

        Parameters
        ----------
        vocab_size : int
            The number of items in the vocabulary of the input.
        word_size : int
            The number of dimensions used to represent the vocabulary.
        hidden_size : int
            The number of dimensions used to represent the hidden state of the LSTM.
        dropout : float, optional
            The dropout used for the LSTM (default: 0.5).
        kwargs
            Optional keyword arguments passed to the super class.
        """
        super().__init__(**kwargs)
        with self.init_scope():
            self.embed = links.EmbedID(vocab_size, word_size)
            self.rnn = links.NStepBiLSTM(1, word_size, hidden_size, dropout)
            self.l_out = links.Linear(2)
    def __init__(self, inputDim, outDimPdf, outDimPho):
        super(LSTM, self).__init__()
        with self.init_scope():

            global args

            self.nl = L.NStepBiLSTM(args.layer, inputDim, args.hiddenNode,
                                    args.dropout)

            self.bn = L.BatchNormalization(args.hiddenNode * 2, 0.95)

            #for param in self.params():
            #    param.array[...] = np.random.uniform(-0.1, 0.1, param.shape)

            initializerW = None  #chainer.initializers.HeNormal()
            initializerBias = None  #chainer.initializers.Zero()

            self.ln1 = L.Linear(None,
                                outDimPdf,
                                initialW=initializerW,
                                initial_bias=initializerBias)
            self.ln2 = L.Linear(None,
                                outDimPho,
                                initialW=initializerW,
                                initial_bias=initializerBias)
Ejemplo n.º 13
0
    def __init__(self, database, n_layers:int, in_size:int, out_size:int, initialW=None,
                 spatial_edge_model: SpatialEdgeMode = SpatialEdgeMode.all_edge,
                 recurrent_block_type: RecurrentType = RecurrentType.rnn, attn_heads=8, bi_lstm=False):
        super(SpaceTimeRNN, self).__init__()
        self.neg_pos_ratio = 3
        self.database = database
        self.spatial_edge_mode = spatial_edge_model
        self.out_size = out_size
        self.in_size = in_size
        self.frame_node_num = config.BOX_NUM[self.database]
        self.mid_size = 1024
        NodeRecurrentModule = AttentionBlock if recurrent_block_type == RecurrentType.attention_block else TemporalRNN
        if recurrent_block_type == RecurrentType.no_temporal:
            NodeRecurrentModule = PositionwiseFeedForwardLayer

        with self.init_scope():
            if not initialW:
                initialW = initializers.HeNormal()

            self.top = dict()
            for i in range(self.frame_node_num):
                if recurrent_block_type == RecurrentType.rnn:
                    self.add_link("Node_{}".format(i),
                                  TemporalRNN(n_layers, self.in_size, self.mid_size, use_bi_lstm=bi_lstm))
                else:
                    self.add_link("Node_{}".format(i),
                                  NodeRecurrentModule(n_layers, self.in_size, self.mid_size))
                self.top[str(i)] = getattr(self, "Node_{}".format(i))

            fc_in_len = self.mid_size
            if spatial_edge_model != SpatialEdgeMode.no_edge:
                self.space_lstm = L.NStepBiLSTM(n_layers, self.in_size, self.mid_size//2, dropout=0.1, initialW=initialW)
                fc_in_len = self.mid_size * 2

            self.fc = L.Linear(fc_in_len, self.out_size, initialW=initialW)
Ejemplo n.º 14
0
    def __init__(self,
                 idim,
                 elayers,
                 cdim,
                 hdim,
                 subsample,
                 dropout,
                 typ="lstm"):
        super(BRNNP, self).__init__()
        with self.init_scope():
            for i in six.moves.range(elayers):
                if i == 0:
                    inputdim = idim
                else:
                    inputdim = hdim
                setattr(
                    self, "birnn%d" % i,
                    L.NStepBiLSTM(1, inputdim, cdim, dropout) if typ == "lstm"
                    else L.NStepBiGRU(1, inputdim, cdim, dropout))
                # bottleneck layer to merge
                setattr(self, "bt%d" % i, L.Linear(2 * cdim, hdim))

        self.elayers = elayers
        self.cdim = cdim
        self.subsample = subsample
        self.typ = typ
Ejemplo n.º 15
0
    def setUp(self):
        shape = (self.n_layer * 2, len(self.lengths), self.out_size)
        if self.hidden_none:
            self.h = self.c = numpy.zeros(shape, 'f')
        else:
            self.h = numpy.random.uniform(-1, 1, shape).astype('f')
            self.c = numpy.random.uniform(-1, 1, shape).astype('f')
        self.xs = [
            numpy.random.uniform(-1, 1, (l, self.in_size)).astype('f')
            for l in self.lengths
        ]

        self.gh = numpy.random.uniform(-1, 1, shape).astype('f')
        self.gc = numpy.random.uniform(-1, 1, shape).astype('f')
        self.gys = [
            numpy.random.uniform(-1, 1, (l, self.out_size * 2)).astype('f')
            for l in self.lengths
        ]
        self.rnn = links.NStepBiLSTM(self.n_layer, self.in_size, self.out_size,
                                     self.dropout)

        for layer in self.rnn:
            for p in layer.params():
                p.array[...] = numpy.random.uniform(-1, 1, p.shape)
        self.rnn.cleargrads()
Ejemplo n.º 16
0
 def __init__(self, n_vocab, embed, hidden, dropout, initialW):
     n_layers = 1
     super(WordEncoder, self).__init__()
     with self.init_scope():
         self.embed = L.EmbedID(n_vocab, embed, initialW=initialW)
         self.Nlstm = L.NStepBiLSTM(n_layers, embed, hidden, dropout)
     self.hidden = hidden
Ejemplo n.º 17
0
    def __init__(self,
                 n_layers=consts.LAYERS,
                 n_in=consts.IN * 2,
                 n_units=consts.UNITS * 2,
                 n_tag=9,
                 dropout=consts.DROPOUT):
        super(RNN_TOP, self).__init__()

        # パラメータを持つ層の登録
        with self.init_scope():
            self.xh = L.NStepBiLSTM(n_layers, n_in, n_units, dropout)
            self.h_100_self = L.Linear(n_units * 2, 100 * 2)
            self.h_1_self = L.Linear(100 * 2, 1)
            self.h_100_qyn = L.Linear(n_units * 2, 100 * 2)
            self.h_1_qyn = L.Linear(100 * 2, 1)
            self.h_100_qw = L.Linear(n_units * 2, 100 * 2)
            self.h_1_qw = L.Linear(100 * 2, 1)
            self.h_100_ayn = L.Linear(n_units * 2, 100 * 2)
            self.h_1_ayn = L.Linear(100 * 2, 1)
            self.h_100_aw = L.Linear(n_units * 2, 100 * 2)
            self.h_1_aw = L.Linear(100 * 2, 1)
            self.h_100_res = L.Linear(n_units * 2, 100 * 2)
            self.h_1_res = L.Linear(100 * 2, 1)
            self.h_100_fil = L.Linear(n_units * 2, 100 * 2)
            self.h_1_fil = L.Linear(100 * 2, 1)
            self.h_100_con = L.Linear(n_units * 2, 100 * 2)
            self.h_1_con = L.Linear(100 * 2, 1)
            self.h_100_req = L.Linear(n_units * 2, 100 * 2)
            self.h_1_req = L.Linear(100 * 2, 1)
Ejemplo n.º 18
0
 def __init__(self, num_classes):
     super(Model, self).__init__()
     with self.init_scope():
         self.conv1 = L.ConvolutionND(ndim=1,
                                      in_channels=3,
                                      out_channels=48,
                                      ksize=5,
                                      stride=1,
                                      pad=2)
         self.batch2 = L.BatchNormalization(48)
         self.conv2 = L.ConvolutionND(ndim=1,
                                      in_channels=48,
                                      out_channels=64,
                                      ksize=5,
                                      stride=1,
                                      pad=2)
         self.batch3 = L.BatchNormalization(64)
         self.conv3 = L.ConvolutionND(ndim=1,
                                      in_channels=64,
                                      out_channels=96,
                                      ksize=3,
                                      stride=1,
                                      pad=1)
         self.lstm = L.NStepBiLSTM(n_layers=3,
                                   in_size=96,
                                   out_size=128,
                                   dropout=0.3)
         self.fc = L.Linear(in_size=256, out_size=num_classes)
Ejemplo n.º 19
0
    def __init__(self,
                 n_vocab=None,
                 emb_dim=100,
                 hidden_dim=200,
                 init_emb=None,
                 add_dim=0,
                 use_dropout=0.33,
                 n_layers=1,
                 pos_dim=0,
                 n_pos=0):
        feature_dim = emb_dim + add_dim + pos_dim
        super(FastBiLSTM,
              self).__init__(word_embed=L.EmbedID(n_vocab,
                                                  emb_dim,
                                                  ignore_label=-1),
                             bi_lstm=L.NStepBiLSTM(n_layers=n_layers,
                                                   in_size=feature_dim,
                                                   out_size=hidden_dim,
                                                   dropout=use_dropout,
                                                   use_cudnn=True))
        if n_pos:
            pos_embed = L.EmbedID(n_pos, pos_dim, ignore_label=-1)
            self.add_link('pos_embed', pos_embed)

        self.n_pos = n_pos
        self.hidden_dim = hidden_dim
        self.train = True
        self.use_dropout = use_dropout
        self.n_layers = n_layers

        # Forget gate bias => 1.0
        # MEMO: Values 1 and 5 reference the forget gate.
        for w in self.bi_lstm:
            w.b1.data[:] = 1.0
            w.b5.data[:] = 1.0
Ejemplo n.º 20
0
    def __init__(self,
                 n_layers=consts.LAYERS,
                 n_in=consts.IN * 2,
                 n_units=consts.UNITS * 2,
                 dropout=consts.DROPOUT):
        super(RNN_CONNECT_AT, self).__init__()

        with self.init_scope():
            self.xh = L.NStepBiLSTM(n_layers, n_in, n_units, dropout)
            self.hy1 = L.Linear(n_units * 2, 100 * 2)

            self.h_100_self = L.Linear(n_units * 2, 100 * 2)
            self.h_100_qyn = L.Linear(n_units * 2, 100 * 2)
            self.h_100_qw = L.Linear(n_units * 2, 100 * 2)
            self.h_100_ayn = L.Linear(n_units * 2, 100 * 2)
            self.h_100_aw = L.Linear(n_units * 2, 100 * 2)
            self.h_100_res = L.Linear(n_units * 2, 100 * 2)
            self.h_100_fil = L.Linear(n_units * 2, 100 * 2)
            self.h_100_con = L.Linear(n_units * 2, 100 * 2)
            self.h_100_req = L.Linear(n_units * 2, 100 * 2)

            self.at = L.Linear(100 * 2, 1)
            #            self.at1 = L.Linear(100*2, 100) # 200->100->1じゃなくて200->1でもいいかも
            #            self.at2 = L.Linear(100, 1)

            self.out = L.Linear(100 * 2, 9)  # 200->9じゃなくて200->100->9でもいいかも
Ejemplo n.º 21
0
    def __init__(self, n_layers, n_source_vocab, trans_data, n_units,
                 v_eos_src, n_maxsize):
        super(Seq2Tree_Flatten, self).__init__()

        # for each nodetype, for each move, the result array.
        self.trans_data = trans_data
        self.embed_idx = []
        ns = 0

        def inc():
            nonlocal ns
            ns += 1
            return ns - 1

        self.embed_idx = [[[inc() for v in vs] for vs in moves]
                          for moves in self.trans_data]
        self.embed_root_idx = ns
        self.embed_y_size = ns + 1

        self.choicerange = []
        self.choice_idx = []
        self.is_trivial = []
        s = 0
        for d in self.trans_data:
            ist = len(d) <= 1
            self.is_trivial.append(ist)
            #if ist:
            #	self.choicerange.append(None)
            #	self.choice_idx.append([0])
            #	continue
            b = s
            s += len(d)
            self.choicerange.append((b, s))
            self.choice_idx.append(list(range(b, s)))
        #self.choice_num_sum = sum(list(map(lambda d: len(d),self.trans_data)))

        self.type_size = len(self.embed_idx)
        self.n_all_choice = sum(map(lambda x: len(x), self.trans_data))

        with self.init_scope():
            self.embed_x = L.EmbedID(n_source_vocab, n_units)
            #self.embed_y = L.EmbedID(self.embed_y_size, n_units) # maybe mergable
            self.embed_y_0 = L.EmbedID(self.embed_y_size,
                                       n_units)  # maybe mergable
            self.embed_y_1 = L.EmbedID(self.type_size,
                                       n_units)  # maybe mergable

            self.encoder = L.NStepBiLSTM(n_layers, n_units, n_units, 0.1)
            self.decoder = L.NStepLSTM(n_layers, n_units * 2, n_units * 2, 0.1)
            self.Wc = L.Linear(n_units * 4, n_units)
            self.Ws = L.Linear(n_units, self.n_all_choice)

            #self.att = Attention(n_units)
            self.att = GlobalGeneralAttention(n_units)

        self.n_layers = n_layers
        self.n_units = n_units
        self.v_eos_src = v_eos_src
        self.n_maxsize = n_maxsize
        self.rootidx = len(trans_data) - 1
Ejemplo n.º 22
0
    def __init__(self, ch):
        super(Link_NStepBiLSTM, self).__init__(L.NStepBiLSTM(1, 1, 1, 0))
        # code.InteractiveConsole({'ch': ch}).interact()

        hd = ch.children().__next__()
        if not (hd.w0 is None):
            self.n_in = hd.w0.shape[1]
        else:
            self.n_in = None

        self.out_size = ch.out_size
        self.n_layers = ch.n_layers
        self.dropout = ch.dropout

        self.ws = []
        self.bs = []
        for i in range(self.n_layers * 2):
            ws = []
            bs = []
            for j in range(8):
                ws.append(
                    helper.make_tensor_value_info(('/%d/w%d' % (i, j)),
                                                  TensorProto.FLOAT, ["TODO"]))
                bs.append(
                    helper.make_tensor_value_info(('/%d/b%d' % (i, j)),
                                                  TensorProto.FLOAT, ["TODO"]))
            self.ws.append(ws)
            self.bs.append(bs)
Ejemplo n.º 23
0
 def __init__(self, n_layers=consts.LAYERS, n_in=consts.IN*2, n_units=consts.UNITS*2, n_tag=9, dropout=consts.DROPOUT):
     super(RNN_FINETUNING, self).__init__()
     
     # パラメータを持つ層の登録
     with self.init_scope():
         self.xh = L.NStepBiLSTM(n_layers, n_in, n_units, dropout)
         self.hy1 = L.Linear(n_units*2, 100*2)
         self.hy2 = L.Linear(100*2, n_tag)
Ejemplo n.º 24
0
 def __init__(self, n_vocab=30000, n_units=200, n_layers=2, dropout=0.5):
     super(SentRepRNN, self).__init__()
     with self.init_scope():
         self.embed = L.EmbedID(n_vocab,
                                n_units)  # word embedding(入力は単一のリスト)
         self.encoder = L.NStepBiLSTM(
             n_layers, n_units, n_units,
             dropout)  # (embed時と異なり、)ラベル内の各文ごとに文ベクトルを作成
Ejemplo n.º 25
0
 def __init__(self, vocab_size, embed_size, hidden_size, dropout):
     n_layers = 1
     super(Encoder, self).__init__()
     with self.init_scope():
         self.embed = L.EmbedID(vocab_size, embed_size)
         self.Nlstm = L.NStepBiLSTM(n_layers, embed_size, hidden_size,
                                    dropout)
     self.hidden_size = hidden_size
Ejemplo n.º 26
0
 def __init__(self, input_size, hidden_size, num_layers, num_classes):
     super(BiRNN, self).__init__()
     self.hidden_size = hidden_size
     self.num_layers = num_layers
     with self.init_scope():
         self.nsteplstm = L.NStepBiLSTM(num_layers, input_size, hidden_size,
                                        dropout=0)  # chainer.Links.NStepLSTM and torch.nn.LSTM are alike.
         self.fc = L.Linear(2 * hidden_size, num_classes)
Ejemplo n.º 27
0
 def __init__(self, vocab_size):
     super(LSTMChain, self).__init__()
     with self.init_scope():
         self.embed = L.EmbedID(vocab_size, 200)
         self.bi_lstm = L.NStepBiLSTM(1, 200, 300, 0.2)
         self.mid = L.Linear(600, 200)
         self.out = L.Linear(200, 11)
         self.dropout = 0.2
    def __init__(self, n_word_types, n_trig_types, n_role_types,
                 n_entity_types, trigger_type2id, entity_type2id, DIM_EMBED,
                 DIM_EVENT, DIM_BILSTM, DIM_TRIG_TYPE, DIM_ROLE_TYPE,
                 DIM_ARG_TYPE, DIM_IO, DROPOUT, REPLACE_TYPE, GENERALISATION,
                 THRESHOLD):
        super(Loader, self).__init__()
        with self.init_scope():
            self.DIM_EMBED = DIM_EMBED
            self.DIM_EVENT = DIM_EVENT
            self.DIM_BILSTM = DIM_BILSTM
            self.DIM_TRIG_TYPE = DIM_TRIG_TYPE
            self.DIM_ROLE_TYPE = DIM_ROLE_TYPE
            self.DIM_ARG_TYPE = DIM_ARG_TYPE
            self.DIM_IO = DIM_IO
            self.DROPOUT = DROPOUT
            self.THRESHOLD = THRESHOLD
            self.GENERALISATION = GENERALISATION
            self.REPLACE_TYPE = REPLACE_TYPE
            self.DROPOUT = DROPOUT

            self.DIM_TREE_LSTM_INPUT = self.DIM_TRIG_TYPE + (
                self.DIM_BILSTM *
                2) + self.DIM_ROLE_TYPE + self.DIM_TRIG_TYPE + (
                    self.DIM_BILSTM * 2) + self.DIM_IO
            self.DIM_ARG = self.DIM_TRIG_TYPE + (self.DIM_BILSTM * 2)

            self.id2triggertype = {v: k for k, v in trigger_type2id.items()}
            self.id2entitytype = {v: k for k, v in entity_type2id.items()}

            self.embed = L.EmbedID(n_word_types,
                                   self.DIM_EMBED,
                                   ignore_label=-1)
            self.bilstm = L.NStepBiLSTM(1, self.DIM_EMBED, self.DIM_BILSTM, 0)
            self.embed_trigtype = L.EmbedID(n_trig_types,
                                            self.DIM_TRIG_TYPE,
                                            ignore_label=-1)
            self.embed_roletype = L.EmbedID(n_role_types,
                                            self.DIM_ROLE_TYPE,
                                            ignore_label=-1)
            self.embed_argtype = L.EmbedID(n_entity_types,
                                           self.DIM_ARG_TYPE,
                                           ignore_label=-1)
            self.embed_io = L.EmbedID(2, self.DIM_IO, ignore_label=-1)

            self.treelstm = L.ChildSumTreeLSTM(self.DIM_TREE_LSTM_INPUT,
                                               self.DIM_EVENT)

            self.l1 = L.Linear(None, self.DIM_EVENT)
            self.y = L.Linear(None, self.DIM_EVENT)
            self.final = L.Linear(None, 1)  # event or non-event
            self.reducedEvent = L.Linear(None, self.DIM_ARG)

            self.len_type_and_arg = self.DIM_TRIG_TYPE + (self.DIM_BILSTM * 2)
            self.len_relation = self.DIM_TRIG_TYPE + (self.DIM_BILSTM * 2) + self.DIM_ROLE_TYPE + self.DIM_ARG_TYPE + \
                               (self.DIM_BILSTM * 2) + self.DIM_IO

            self.trigger_type2id = trigger_type2id
            self.entity_type2id = entity_type2id
Ejemplo n.º 29
0
    def __init__(self, inSize, hidSize, outSize):
        # out size is usually class size
        super().__init__()

        bilstm = L.NStepBiLSTM(1, inSize, hidSize, dropout=0.3)
        linear = L.Linear(hidSize * 2, outSize)

        self.add_link('bilstm', bilstm)
        self.add_link('linear', linear)
 def __init__(self, train=True):
     self.train = train
     super(Mynet, self).__init__()
     with self.init_scope():
         self.h = L.NStepBiLSTM(n_layers=1,
                                in_size=num_classes,
                                out_size=128,
                                dropout=0)
         self.out = L.Linear(None, num_classes)