Пример #1
0
    def build_model(self):

        self.get_glove()
        if not self.config['n_layers'] in [1,2]:
            print ("Only 1 or two layers are supported! ")
        
        
        self.rnn_left_ = GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout'] , trainable=True  )
        self.rnn_right_ = GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout']  , trainable=True )

        self.rnn_left_2 = ( GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout'] , trainable=True  ))
        self.rnn_right_2 = ( GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout']  , trainable=True ))
        
        if self.config['n_layers'] == 2:
            self._rnn_left_ = GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout'] , trainable=True  )
            self._rnn_right_ = GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout']  , trainable=True )

            self._rnn_left_2 = ( GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout'] , trainable=True  ))
            self._rnn_right_2 = ( GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout']  , trainable=True ))

        self.stitch_layer = CrossStitch()
        if self.config['n_layers'] == 2:
            self.stitch_layer2 = CrossStitch()


        aux_inps , aux_outs = self.getAuxM()
        prim_inps , prim_outs = self.getPrimM()

        
        self.model = Model( prim_inps+aux_inps , prim_outs+aux_outs )
        Trainer.build_model( self  )
Пример #2
0
    def build_model(self):
        self.get_glove()
        self.rnn_left_ = GRU(
            self.config['nHidden'],
            return_sequences=True,
            dropout=self.config['dropout'],
            recurrent_dropout=self.config['recurrent_dropout'],
            trainable=True)
        self.rnn_right_ = GRU(
            self.config['nHidden'],
            return_sequences=True,
            dropout=self.config['dropout'],
            recurrent_dropout=self.config['recurrent_dropout'],
            trainable=True)

        if self.config['n_layers'] == 2:
            self.rnn_left_2 = GRU(
                self.config['nHidden'],
                return_sequences=True,
                dropout=self.config['dropout'],
                recurrent_dropout=self.config['recurrent_dropout'],
                trainable=True)
            self.rnn_right_2 = GRU(
                self.config['nHidden'],
                return_sequences=True,
                dropout=self.config['dropout'],
                recurrent_dropout=self.config['recurrent_dropout'],
                trainable=True)

        aux_inps, aux_outs = self.getAuxM()
        prim_inps, prim_outs = self.getPrimM()
        self.model = Model(prim_inps + aux_inps, prim_outs + aux_outs)
        Trainer.build_model(self)
Пример #3
0
    def build_model(self):

        config = self.config
        embed = Embedding(self.config['vocab_size'],
                          self.config['embed_dim'],
                          mask_zero=True)

        if self.config['mode'] == 'share':
            rnn_hi = (LSTM(self.config['nHidden'], return_sequences=True))
            rnn_enhi = (LSTM(self.config['nHidden'], return_sequences=True))
        elif self.config['mode'] == 'concat':
            rnn_en = (LSTM(self.config['nHidden'], return_sequences=True))
            rnn_hi = (LSTM(self.config['nHidden'], return_sequences=True))
            rnn_enhi = (LSTM(self.config['nHidden'], return_sequences=True))

        # hi

        inp_hi = Input((self.config['sent_len'], ))
        x = embed(inp_hi)
        if self.config['mode'] == 'share':
            x = rnn_hi(x)
        if self.config['mode'] == 'concat':
            x = rnn_hi(x)
        out_hi = TimeDistributed(
            Dense(config['n_class_hi'], activation='softmax'))(x)

        # en

        inp_en = Input((self.config['sent_len'], ))
        x = embed(inp_en)
        if self.config['mode'] == 'share':
            x = rnn_hi(x)
        if self.config['mode'] == 'concat':
            x = rnn_hi(x)
        out_en = TimeDistributed(
            Dense(config['n_class_en'], activation='softmax'))(x)

        inp_enhi = Input((self.config['sent_len'], ))
        x = embed(inp_enhi)

        if self.config['mode'] == 'share':
            x_en = rnn_hi(x)
            x = x_en
            x = rnn_enhi(x)
        if self.config['mode'] == 'concat':
            x_en = rnn_en(x)
            x_hi = rnn_hi(x)
            x = Concatenate(-1)([x_en, x_hi])
            x = rnn_enhi(x)

        out_enhi = TimeDistributed(
            Dense(self.config['n_class_enhi'], activation='softmax'))(x)

        self.model = Model([inp_hi, inp_en, inp_enhi],
                           [out_hi, out_en, out_enhi])
        Trainer.build_model(self)
Пример #4
0
    def build_model(self):

        self.get_glove()

        self.aux_w_l, self.aux_w_r = self.get_document_level_rnn_weights()
        self.rnn_left_aux_cell__ = GRUCell(64, trainable=False)
        self.rnn_right_aux_cell__ = GRUCell(64, trainable=False)

        aux_inps, aux_outs = self.getAuxM()
        prim_inps, prim_outs = self.getPrimM()

        self.model = Model(prim_inps + aux_inps, prim_outs + aux_outs)
        Trainer.build_model(self)
Пример #5
0
    def build_model(self):

        config = self.config
        embed = Embedding(self.config['vocab_size'], self.config['embed_dim'])

        rnn_es = LSTM(self.config['nHidden'])
        rnn_en = LSTM(self.config['nHidden'])
        rnn_enes = LSTM(self.config['nHidden'])
        if self.config['mode'] == 'parallel':
            rnn_shared = LSTM(self.config['nHidden'], return_sequences=False)
        else:
            rnn_shared = LSTM(self.config['nHidden'], return_sequences=True)

        inp_en = Input((self.config['sent_len'], ))
        x_en = embed(inp_en)
        if self.config['mode'] == 'parallel':

            x_en_1 = rnn_en(x_en)
            x_en_2 = rnn_shared(x_en)
            x_en = Concatenate()([x_en_1, x_en_2])
        else:
            x_en = rnn_en(Concatenate()([x_en, rnn_shared(x_en)]))

        inp_es = Input((self.config['sent_len'], ))
        x_es = embed(inp_es)
        if self.config['mode'] == 'parallel':
            x_es_1 = rnn_es(x_es)
            x_es_2 = rnn_shared(x_es)
            x_es = Concatenate()([x_es_1, x_es_2])
        else:
            x_es = rnn_es(Concatenate()([x_es, rnn_shared(x_es)]))

        inp_enes = Input((self.config['sent_len'], ))
        x_enes = embed(inp_enes)
        if self.config['mode'] == 'parallel':
            x_enes_1 = rnn_enes(x_enes)
            x_enes_2 = rnn_shared(x_enes)
            x_enes = Concatenate()([x_enes_1, x_enes_2])
        else:
            x_enes = rnn_enes(Concatenate()([x_enes, rnn_shared(x_enes)]))

        out_enes = (Dense(self.config['n_class_enes'],
                          activation='softmax'))(x_enes)
        out_es = (Dense(self.config['n_class_es'], activation='softmax'))(x_es)
        out_en = (Dense(self.config['n_class_en'], activation='softmax'))(x_en)

        self.model = Model([inp_en, inp_es, inp_enes],
                           [out_en, out_es, out_enes])
        Trainer.build_model(self)
Пример #6
0
    def build_model(self):

        config = self.config
        emb = Embedding(self.config['vocab_size'], self.config['embed_dim'])

        rnn_en = LSTM(self.config['nHidden'], return_sequences=True)
        if self.config['mode'] == 'concat':
            rnn_es = LSTM(self.config['nHidden'], return_sequences=True)
        rnn_enes = LSTM(self.config['nHidden'])

        inp_en = Input((self.config['sent_len'], ))
        x = emb(inp_en)
        if self.config['mode'] == 'share':
            x = rnn_en(x)
            x = Lambda(lambda x: x[:, -1])(x)
        else:
            x = rnn_en(x)
            x = Lambda(lambda x: x[:, -1])(x)
        out_en = (Dense(self.config['n_class_en'], activation='softmax'))(x)

        inp_es = Input((self.config['sent_len'], ))
        x = emb(inp_es)
        if self.config['mode'] == 'share':
            x = rnn_en(x)
            x = Lambda(lambda x: x[:, -1])(x)
        else:
            x = rnn_es(x)
            x = Lambda(lambda x: x[:, -1])(x)
        out_es = (Dense(self.config['n_class_es'], activation='softmax'))(x)

        inp_enes = Input((self.config['sent_len'], ))
        x = emb(inp_enes)
        if self.config['mode'] == 'share':
            x_es1 = rnn_en(x)
            x = x_es1
            x = rnn_enes(x)
        else:
            x_es1 = rnn_es(x)
            x_en1 = rnn_en(x)
            x = Concatenate(-1)([x_es1, x_en1])
            x = rnn_enes(x)

        out_enes = (Dense(self.config['n_class_enes'],
                          activation='softmax'))(x)

        self.model = Model([inp_en, inp_es, inp_enes],
                           [out_en, out_es, out_enes])
        Trainer.build_model(self)
Пример #7
0
    def build_model(self):

        config = self.config
        embed = Embedding(self.config['vocab_size'],
                          self.config['embed_dim'],
                          mask_zero=True)

        rnn_hi = LSTM(self.config['nHidden'], return_sequences=True)
        rnn_en = LSTM(self.config['nHidden'], return_sequences=True)

        # en

        inp_en = Input((self.config['sent_len'], ))
        x = embed(inp_en)
        x = rnn_en(x)
        out_en = TimeDistributed(
            Dense(config['n_class_en'], activation='softmax'))(x)

        # hi

        inp_hi = Input((self.config['sent_len'], ))
        x = embed(inp_hi)
        x = rnn_hi(x)
        out_hi = TimeDistributed(
            Dense(config['n_class_hi'], activation='softmax'))(x)
        cell_combined = GiretTwoCell(rnn_hi.cell, rnn_en.cell,
                                     self.config['nHidden'])

        inp_enhi = Input((self.config['sent_len'], ))
        x = embed(inp_enhi)

        x_att = x
        x_att = Bidirectional(LSTM(32, return_sequences=True))(x)
        bider_h = x_att
        x_att = TimeDistributed(Dense(3, activation='softmax'))(x_att)
        x_att = Lambda(lambda x: x[..., 1:])(x_att)

        x = Concatenate(-1)([x_att, x])

        x = RNN(cell_combined, return_sequences=True)(x)
        out_enhi = TimeDistributed(
            Dense(self.config['n_class_enhi'], activation='softmax'))(x)

        self.model = Model([inp_hi, inp_en, inp_enhi],
                           [out_hi, out_en, out_enhi])
        Trainer.build_model(self)
Пример #8
0
    def build_model(self):

        config = self.config
        emb = Embedding(self.config['vocab_size'], self.config['embed_dim'])
        rnn_en = LSTM(self.config['nHidden'])
        rnn_es = LSTM(self.config['nHidden'])

        inp_en = Input((self.config['sent_len'], ))
        x_en = emb(inp_en)
        x_en = rnn_en(x_en)
        out_en = Dense(self.config['n_class_en'], activation='softmax')(x_en)

        inp_es = Input((self.config['sent_len'], ))
        x_es = emb(inp_es)
        x_es = rnn_es(x_es)
        out_es = Dense(self.config['n_class_es'], activation='softmax')(x_es)

        cell_en = rnn_en.cell
        cell_es = rnn_es.cell

        cell_combined = GiretTwoCell(cell_en, cell_es, self.config['nHidden'])

        inp_enes = Input((self.config['sent_len'], ))
        x = emb(inp_enes)

        x_att = Bidirectional(LSTM(32, return_sequences=True))(x)

        bider_last = Lambda(lambda x: x[:, -1, :])(x_att)

        x_att = TimeDistributed(Dense(3, activation='softmax'))(x_att)
        x_att = Lambda(lambda x: x[..., 1:])(x_att)

        x = Concatenate(-1)([x_att, x])

        x = RNN(cell_combined)(x)

        x = Concatenate()([bider_last, x])

        out_enes = Dense(self.config['n_class_enes'], activation='softmax')(x)

        self.model = Model([inp_en, inp_es, inp_enes],
                           [out_en, out_es, out_enes])
        Trainer.build_model(self)
Пример #9
0
    def build_model(self):

        config = self.config
        embed = Embedding( self.config['vocab_size']  ,  self.config['embed_dim']  , mask_zero=True)
        
        rnn = (LSTM( self.config['nHidden'] , return_sequences=True ))
        if config['n_layers'] == 2:
            rnn2 = (LSTM( self.config['nHidden'] , return_sequences=True ))

        # hi

        inp_hi = Input(( self.config['sent_len'] , ))
        x = embed(inp_hi)
        if config['n_layers'] == 2:
            x = rnn2(rnn( x ))
        else:
            x = rnn( x )
        out_hi = TimeDistributed(Dense( config['n_class_hi'] , activation='softmax'))(x)
        
        
        # en

        inp_en = Input(( self.config['sent_len'] , ))
        x = embed(inp_en)
        if config['n_layers'] == 2:
            x = rnn2(rnn( x ))
        else:
            x = rnn( x )
        out_en = TimeDistributed(Dense( config['n_class_en']  , activation='softmax'))(x)


        
        inp_enhi = Input(( self.config['sent_len'] , ))
        x = embed(inp_enhi)
        if config['n_layers'] == 2:
            x = rnn2(rnn( x ))
        else:
            x = rnn( x )
        out_enhi = TimeDistributed(Dense(  self.config['n_class_enhi'] , activation='softmax'))(x)

        
        self.model = Model( [inp_hi , inp_en , inp_enhi  ] , [ out_hi , out_en , out_enhi ] )
        Trainer.build_model( self  )
Пример #10
0
    def build_model(self):
        
        assert self.config[ 'sharing_scheme' ] in [ 'parallel' , 'stacked' ] 

        self.get_glove()
        
        
        self.rnn_left_ = GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout'] , trainable=True  )
        self.rnn_right_ = GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout']  , trainable=True )

        self.rnn_left_2 = ( GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout'] , trainable=True  ))
        self.rnn_right_2 = ( GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout']  , trainable=True ))
        
        self.rnn_left_shared = ( GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout'] , trainable=True  ))
        self.rnn_right_shared = ( GRU( self.config['nHidden'] , return_sequences=True , dropout=self.config['dropout'] , recurrent_dropout=self.config['recurrent_dropout']  , trainable=True ))
        
        aux_inps , aux_outs = self.getAuxM()
        prim_inps , prim_outs = self.getPrimM()

        self.model = Model( prim_inps+aux_inps , prim_outs+aux_outs )
        Trainer.build_model( self  )
Пример #11
0
    def build_model(self):

        config = self.config
        emb = Embedding(self.config['vocab_size'], self.config['embed_dim'])

        if self.config['n_layers'] == 2:
            rnn = LSTM(self.config['nHidden'], return_sequences=True)
            rnn2 = LSTM(self.config['nHidden'])
        else:
            rnn = LSTM(self.config['nHidden'])

        inp_en = Input((self.config['sent_len'], ))
        x = emb(inp_en)
        if self.config['n_layers'] == 2:
            x = rnn2(rnn(x))
        else:
            x = rnn(x)
        out_en = Dense(self.config['n_class_en'], activation='softmax')(x)

        inp_es = Input((self.config['sent_len'], ))
        x = emb(inp_es)
        if self.config['n_layers'] == 2:
            x = rnn2(rnn(x))
        else:
            x = rnn(x)
        out_es = Dense(self.config['n_class_es'], activation='softmax')(x)

        inp_enes = Input((self.config['sent_len'], ))
        x = emb(inp_enes)
        if self.config['n_layers'] == 2:
            x = rnn2(rnn(x))
        else:
            x = rnn(x)
        out_enes = Dense(self.config['n_class_enes'], activation='softmax')(x)

        self.model = Model([inp_en, inp_es, inp_enes],
                           [out_en, out_es, out_enes])
        Trainer.build_model(self)
Пример #12
0
    def build_model(self):

        gloveSize = 100
        vocabSize = 1193515

        #nEn = env_arg("nEn" , 16 , int  )

        maxSentenceL = self.config[
            'maxSentenceL']  # self.config['maxSentenceL']
        rnn_type = self.config['rnn_type']
        nHidden = self.config['nHidden']  #env_arg("nH" , 64 , int  )
        opt_name = 'adam'
        #batch_size = self.config['batch_size'] # env_arg("batch_size" , 64 , int  )
        dropout = self.config['dropout']  # env_arg("drop" , 0.2 , float  )
        recurrent_dropout = self.config[
            'recurrent_dropout']  #  env_arg("rec_drop" , 0.2 , float  )
        lr = -1  # env_arg("lr" , -1.0 , float  ) # default keras
        # epochs =  3# env_arg("epochs" , 3 , int  )
        #exp_name =  env_arg("exp_name" , "naive_mtl" , str  )

        if glov_gob[0] is None:
            gf = h5py.File("./data/glovePrepped.h5")
            gloveVecs = np.array(gf['twitter_100_vecs'])
            glov_gob[0] = gloveVecs

        gloveVecs = glov_gob[0]

        embed = (Embedding(vocabSize,
                           gloveSize,
                           weights=[gloveVecs],
                           trainable=False))

        # Loading the weights of the pre trainid Model

        if rnn_type == 'lstm':
            rnn = LSTM
        elif rnn_type == 'gru':
            rnn = GRU

        def getM():

            # setting the weights from the pretrained model to the new model
            if rnn_type == 'lstm':
                rnn = LSTM
            elif rnn_type == 'gru':
                rnn = GRU

            rnn_left_ = rnn(nHidden,
                            return_sequences=True,
                            dropout=dropout,
                            recurrent_dropout=recurrent_dropout,
                            trainable=True)
            rnn_right_ = rnn(nHidden,
                             return_sequences=True,
                             dropout=dropout,
                             recurrent_dropout=recurrent_dropout,
                             trainable=True)

            def getPrimModel(
            ):  # will return a submodel with the shard weights and both the LSTMs

                left_i = Input((maxSentenceL, ))
                right_i = Input((maxSentenceL, ))

                left_x = left_i
                right_x = right_i

                left_x = embed(left_x)
                right_x = embed(right_x)

                left_x_1 = rnn_left_(left_x)
                right_x_1 = rnn_right_(right_x)

                left_x_1 = Lambda(lambda x: x[:, -1, :])(
                    left_x_1)  # coz return seq true
                right_x_1 = Lambda(lambda x: x[:, -1, :])(right_x_1)

                x = Concatenate()([left_x_1, right_x_1])
                x = Dense(3)(x)
                out = Activation('softmax')(x)

                return left_i, right_i, out

            left_i_prim, right_i_prim, out_prim = getPrimModel()
            inp_aux = Input(
                (maxSentenceL, ))  # dummy input # not actually do anything

            m = Model([left_i_prim, right_i_prim, inp_aux], [out_prim])

            if lr > 0:
                opt = getattr(keras.optimizers, opt_name)(lr=lr)
            else:
                opt = getattr(keras.optimizers, opt_name)()

            m.compile(opt, 'categorical_crossentropy', metrics=['accuracy'])

            return m

        self.model = getM()
        Trainer.build_model(self)
Пример #13
0
    def build_model(self):

        self.get_glove()

        self.rnn_left_ = GRU(
            self.config['nHidden'],
            return_sequences=True,
            dropout=self.config['dropout'],
            recurrent_dropout=self.config['recurrent_dropout'],
            trainable=True)
        self.rnn_right_ = GRU(
            self.config['nHidden'],
            return_sequences=True,
            dropout=self.config['dropout'],
            recurrent_dropout=self.config['recurrent_dropout'],
            trainable=True)

        self.rnn_left_2 = (GRU(
            self.config['nHidden'],
            return_sequences=True,
            dropout=self.config['dropout'],
            recurrent_dropout=self.config['recurrent_dropout'],
            trainable=True))
        self.rnn_right_2 = (GRU(
            self.config['nHidden'],
            return_sequences=True,
            dropout=self.config['dropout'],
            recurrent_dropout=self.config['recurrent_dropout'],
            trainable=True))

        self._rnn_left_ = GRU(
            self.config['nHidden'],
            return_sequences=True,
            dropout=self.config['dropout'],
            recurrent_dropout=self.config['recurrent_dropout'],
            trainable=True)
        self._rnn_right_ = GRU(
            self.config['nHidden'],
            return_sequences=True,
            dropout=self.config['dropout'],
            recurrent_dropout=self.config['recurrent_dropout'],
            trainable=True)

        self._rnn_left_2 = (GRU(
            self.config['nHidden'],
            return_sequences=True,
            dropout=self.config['dropout'],
            recurrent_dropout=self.config['recurrent_dropout'],
            trainable=True))
        self._rnn_right_2 = (GRU(
            self.config['nHidden'],
            return_sequences=True,
            dropout=self.config['dropout'],
            recurrent_dropout=self.config['recurrent_dropout'],
            trainable=True))

        self.stitch_layer = CrossStitch()
        self.osel = OutPutSelector()

        aux_inps, aux_outs = self.getAuxM()
        prim_inps, prim_outs = self.getPrimM()

        self.model = Model(prim_inps + aux_inps, prim_outs + aux_outs)
        Trainer.build_model(self)
Пример #14
0
    def build_model(self):

        gloveSize = 100
        vocabSize = 1193515

        #nEn = env_arg("nEn" , 16 , int  )

        maxSentenceL = self.config[
            'maxSentenceL']  # self.config['maxSentenceL']
        rnn_type = self.config['rnn_type']
        nHidden = self.config['nHidden']  #env_arg("nH" , 64 , int  )
        opt_name = 'adam'
        #batch_size = self.config['batch_size'] # env_arg("batch_size" , 64 , int  )
        dropout = self.config['dropout']  # env_arg("drop" , 0.2 , float  )
        recurrent_dropout = self.config[
            'recurrent_dropout']  #  env_arg("rec_drop" , 0.2 , float  )
        lr = -1  # env_arg("lr" , -1.0 , float  ) # default keras
        # epochs =  3# env_arg("epochs" , 3 , int  )
        #exp_name =  env_arg("exp_name" , "naive_mtl" , str  )

        # Loading the weights of the pre trainid Model

        if rnn_type == 'lstm':
            rnn = LSTM
        elif rnn_type == 'gru':
            rnn = GRU

        inp = Input((maxSentenceL, ))  # left
        inp_x = inp

        embed = (Embedding(vocabSize, gloveSize, trainable=False))

        inp_x = embed(inp_x)

        inp_rev = Lambda(lambda x: K.reverse(x, axes=1))(inp_x)  # right

        rnn_left = rnn(nHidden,
                       return_sequences=True,
                       dropout=dropout,
                       recurrent_dropout=recurrent_dropout)
        rnn_right = rnn(nHidden,
                        return_sequences=True,
                        dropout=dropout,
                        recurrent_dropout=recurrent_dropout)

        left_x = rnn_left(inp_x)
        right_x = rnn_right(inp_rev)
        right_x = Lambda(lambda x: K.reverse(x, axes=1))(right_x)

        c_x = Concatenate(axis=-1)([left_x, right_x])

        c_x = GlobalAvgPool1D()(c_x)
        x = Dense(3)(c_x)
        out = Activation('softmax')(x)

        m = Model(inp, out)
        m.load_weights("./data/lr_lstm_glove_3.2ft_2_ep0.h5")

        def getM():

            # setting the weights from the pretrained model to the new model
            if rnn_type == 'lstm':
                rnn = LSTM
            elif rnn_type == 'gru':
                rnn = GRU

            rnn_left_ = rnn(nHidden,
                            return_sequences=True,
                            dropout=dropout,
                            recurrent_dropout=recurrent_dropout,
                            trainable=False)
            rnn_right_ = rnn(nHidden,
                             return_sequences=True,
                             dropout=dropout,
                             recurrent_dropout=recurrent_dropout,
                             trainable=False)

            rnn_left_2 = rnn(nHidden,
                             return_sequences=True,
                             dropout=dropout,
                             recurrent_dropout=recurrent_dropout,
                             trainable=True)
            rnn_right_2 = rnn(nHidden,
                              return_sequences=True,
                              dropout=dropout,
                              recurrent_dropout=recurrent_dropout,
                              trainable=True)

            def getPrimModel(
            ):  # will return a submodel with the shard weights and both the LSTMs

                left_i = Input((maxSentenceL, ))
                right_i = Input((maxSentenceL, ))

                left_x = left_i
                right_x = right_i

                left_x = embed(left_x)
                right_x = embed(right_x)

                left_x_1 = rnn_left_(left_x)
                right_x_1 = rnn_right_(right_x)

                rnn_left_.set_weights(rnn_left.get_weights())
                rnn_right_.set_weights(rnn_right.get_weights())

                left_x_1 = Lambda(lambda x: x[:, -1, :])(
                    left_x_1)  # coz return seq true
                right_x_1 = Lambda(lambda x: x[:, -1, :])(right_x_1)

                x = Concatenate()([left_x_1, right_x_1])
                x = Dense(3)(x)
                out = Activation('softmax')(x)

                return left_i, right_i, out

            def getAuxModel():

                inp = Input((maxSentenceL, ))  # left
                inp_x = inp
                inp_x = embed(inp_x)
                inp_rev = Lambda(lambda x: K.reverse(x, axes=1))(
                    inp_x)  # right

                left_x = rnn_left_(inp_x)
                right_x = rnn_right_(inp_rev)
                right_x = Lambda(lambda x: K.reverse(x, axes=1))(right_x)

                rnn_left_.set_weights(rnn_left.get_weights())
                rnn_right_.set_weights(rnn_right.get_weights())

                c_x = Concatenate(axis=-1)([left_x, right_x])
                c_x = GlobalAvgPool1D()(c_x)
                x = Dense(3)(c_x)
                out = Activation('softmax')(x)

                return inp, out

            left_i_prim, right_i_prim, out_prim = getPrimModel()
            inp_aux, out_aux = getAuxModel()

            m = Model([left_i_prim, right_i_prim, inp_aux],
                      [out_prim, out_aux])

            if lr > 0:
                opt = getattr(keras.optimizers, opt_name)(lr=lr)
            else:
                opt = getattr(keras.optimizers, opt_name)()

            m.compile(opt, 'categorical_crossentropy', metrics=['accuracy'])

            return m

        self.model = getM()
        Trainer.build_model(self)
Пример #15
0
    def build_model(self):
        
        config = self.config
        
        
        embed = Embedding(  self.config['vocab_size'] , self.config['embed_dim']  )

        
        
        if self.config['n_layers'] == 2:
            rnn_es = LSTM( self.config['nHidden']  , return_sequences=True )
            rnn_en = LSTM( self.config['nHidden']  , return_sequences=True )
            rnn_enes = LSTM( self.config['nHidden'] , return_sequences=True )
        
            rnn_es2 = LSTM( self.config['nHidden'] )
            rnn_en2 = LSTM( self.config['nHidden'] )
            rnn_enes2 = LSTM( self.config['nHidden'] )
        else:
            rnn_es = LSTM( self.config['nHidden']  )
            rnn_en = LSTM( self.config['nHidden']  )
            rnn_enes = LSTM( self.config['nHidden']  )

        stitch_layer = CrossStitch()
        stitch_layer.supports_masking  = True
        
        if self.config['n_layers'] == 2:
            stitch_layer2 = CrossStitch()
            stitch_layer2.supports_masking  = True
            
            
            
        def cal_cs1l( inp ):
            x = embed(inp)
            x_es = rnn_es( x )

            # en 
            x = embed(inp)
            x_en = rnn_en( x )


            x = embed(inp)
            x_enes = rnn_enes( x )

            [ x_es , x_en, x_enes ] = stitch_layer([ x_es , x_en , x_enes ])

            return [ x_es , x_en, x_enes ]
        
        def cal_cs2l( inp ):
            x = embed(inp)
            x_es = rnn_es( x )

            # en 
            x = embed(inp)
            x_en = rnn_en( x )


            x = embed(inp)
            x_enes = rnn_enes( x )

            [ x_es , x_en, x_enes ] = stitch_layer([ x_es , x_en , x_enes ])


            x_es = rnn_es2( x_es )
            x_en = rnn_en2( x_en )
            x_enes = rnn_enes2( x_enes )

            [ x_es , x_en, x_enes ] = stitch_layer2([ x_es , x_en , x_enes ])

            return [ x_es , x_en, x_enes ]
        
        
        if self.config['n_layers'] == 2:
            cal_cs = cal_cs2l
        else:
            cal_cs = cal_cs1l


        
        inp_en = Input(( self.config['sent_len']  ,))
        inp_es = Input(( self.config['sent_len']  ,))
        inp_enes = Input(( self.config['sent_len'] ,))
        
        [ x_es , _ , _ ] = cal_cs( inp_es )
        [ _ , x_en , _ ] = cal_cs( inp_en )
        [ _ , _ , x_enes ] = cal_cs( inp_enes )
        
            
        out_enes = (Dense( self.config['n_class_enes'] , activation='softmax'))(x_enes)
        out_es = (Dense( self.config['n_class_es'] , activation='softmax'))(x_es)
        out_en = (Dense( self.config['n_class_en'] , activation='softmax'))(x_en)
            
        
        
        self.model = Model([inp_en , inp_es , inp_enes] , [out_en , out_es , out_enes] )
        Trainer.build_model( self  )
Пример #16
0
    def build_model(self):
        config = self.config
        embed = Embedding(self.config['vocab_size'],
                          self.config['embed_dim'],
                          mask_zero=True)

        rnn_hi = (LSTM(self.config['nHidden'], return_sequences=True))
        rnn_en = (LSTM(self.config['nHidden'], return_sequences=True))
        rnn_enhi = (LSTM(self.config['nHidden'], return_sequences=True))

        rnn_hi2 = (LSTM(self.config['nHidden'], return_sequences=True))
        rnn_en2 = (LSTM(self.config['nHidden'], return_sequences=True))
        rnn_enhi2 = (LSTM(self.config['nHidden'], return_sequences=True))

        stitch_layer = CrossStitch()
        stitch_layer.supports_masking = True

        osel = OutPutSelector()
        osel.supports_masking = True

        def desectOut(xx):
            l = xx.shape[-1]
            return Lambda(lambda x: [x[..., :l / 2], x[..., l / 2:]])(xx)

        def cal_cs(inp):
            x = embed(inp)
            x_hi = rnn_hi(x)

            # en
            x = embed(inp)
            x_en = rnn_en(x)

            x = embed(inp)
            x_enhi = rnn_enhi(x)

            [x_hi1, x_hi2] = desectOut(x_hi)
            [x_en1, x_en2] = desectOut(x_en)
            [x_enhi1, x_enhi2] = desectOut(x_enhi)

            [x_hi1, x_en1, x_enhi1, x_hi2, x_en2, x_enhi2
             ] = stitch_layer([x_hi1, x_en1, x_enhi1, x_hi2, x_en2, x_enhi2])

            x_hi = Concatenate()([x_hi1, x_hi2])
            x_en = Concatenate()([x_en1, x_en2])
            x_enhi = Concatenate()([x_enhi1, x_enhi2])

            x_hi_p = x_hi
            x_en_p = x_en
            x_enhi_p = x_enhi

            x_hi = rnn_hi2(x_hi)
            x_en = rnn_en2(x_en)
            x_enhi = rnn_enhi2(x_enhi)

            [x_hi1, x_hi2] = desectOut(x_hi)
            [x_en1, x_en2] = desectOut(x_en)
            [x_enhi1, x_enhi2] = desectOut(x_enhi)

            [x_hi1, x_en1, x_enhi1, x_hi2, x_en2, x_enhi2
             ] = stitch_layer([x_hi1, x_en1, x_enhi1, x_hi2, x_en2, x_enhi2])

            x_hi = Concatenate()([x_hi1, x_hi2])
            x_en = Concatenate()([x_en1, x_en2])
            x_enhi = Concatenate()([x_enhi1, x_enhi2])

            x_hi = osel([x_hi, x_hi_p])
            x_en = osel([x_en, x_en_p])
            x_enhi = osel([x_enhi, x_enhi_p])

            return [x_hi, x_en, x_enhi]

        # hi
        inp_hi = Input((self.config['sent_len'], ))
        # en
        inp_en = Input((self.config['sent_len'], ))

        inp_enhi = Input((self.config['sent_len'], ))

        [x_hi, _, _] = cal_cs(inp_hi)
        [_, x_en, _] = cal_cs(inp_en)
        [_, _, x_enhi] = cal_cs(inp_enhi)

        out_enhi = TimeDistributed(
            Dense(self.config['n_class_enhi'], activation='softmax'))(x_enhi)
        out_hi = TimeDistributed(
            Dense(config['n_class_hi'], activation='softmax'))(x_hi)

        out_en = TimeDistributed(
            Dense(config['n_class_en'], activation='softmax'))(x_en)

        self.model = Model([inp_hi, inp_en, inp_enhi],
                           [out_hi, out_en, out_enhi])
        Trainer.build_model(self)
Пример #17
0
    def build_model(self):

        config = self.config
        embed = Embedding(self.config['vocab_size'],
                          self.config['embed_dim'],
                          mask_zero=True)

        rnn_hi = (LSTM(self.config['nHidden'], return_sequences=True))
        rnn_en = (LSTM(self.config['nHidden'], return_sequences=True))
        rnn_enhi = (LSTM(self.config['nHidden'], return_sequences=True))

        if config['n_layers'] == 2:
            rnn_hi2 = (LSTM(self.config['nHidden'], return_sequences=True))
            rnn_en2 = (LSTM(self.config['nHidden'], return_sequences=True))
            rnn_enhi2 = (LSTM(self.config['nHidden'], return_sequences=True))

        stitch_layer = CrossStitch()
        stitch_layer.supports_masking = True

        if config['n_layers'] == 2:
            stitch_layer2 = CrossStitch()
            stitch_layer2.supports_masking = True

        def cal_cs_1l(inp):
            x = embed(inp)
            x_hi = rnn_hi(x)

            # en
            x = embed(inp)
            x_en = rnn_en(x)

            x = embed(inp)
            x_enhi = rnn_enhi(x)

            [x_hi, x_en, x_enhi] = stitch_layer([x_hi, x_en, x_enhi])

            return [x_hi, x_en, x_enhi]

        def cal_cs_2l(inp):
            x = embed(inp)
            x_hi = rnn_hi(x)

            # en
            x = embed(inp)
            x_en = rnn_en(x)

            x = embed(inp)
            x_enhi = rnn_enhi(x)

            [x_hi, x_en, x_enhi] = stitch_layer([x_hi, x_en, x_enhi])

            x_hi = rnn_hi2(x_hi)
            x_en = rnn_en2(x_en)
            x_enhi = rnn_enhi2(x_enhi)

            [x_hi, x_en, x_enhi] = stitch_layer2([x_hi, x_en, x_enhi])

            return [x_hi, x_en, x_enhi]

        if config['n_layers'] == 1:
            cal_cs = cal_cs_1l
        if config['n_layers'] == 2:
            cal_cs = cal_cs_2l

        # hi
        inp_hi = Input((self.config['sent_len'], ))
        # en
        inp_en = Input((self.config['sent_len'], ))

        inp_enhi = Input((self.config['sent_len'], ))

        [x_hi, _, _] = cal_cs(inp_hi)
        [_, x_en, _] = cal_cs(inp_en)
        [_, _, x_enhi] = cal_cs(inp_enhi)

        out_enhi = TimeDistributed(
            Dense(self.config['n_class_enhi'], activation='softmax'))(x_enhi)
        out_hi = TimeDistributed(
            Dense(config['n_class_hi'], activation='softmax'))(x_hi)

        out_en = TimeDistributed(
            Dense(config['n_class_en'], activation='softmax'))(x_en)

        self.model = Model([inp_hi, inp_en, inp_enhi],
                           [out_hi, out_en, out_enhi])
        Trainer.build_model(self)