Пример #1
0
    def __init__(self,
                 embed_dim=128,
                 vocab_size=vocab_size,
                 learning_rate=0.001):
        self.embed_dim = embed_dim
        self.vocab_size = vocab_size
        self.learning_rate = learning_rate
        self.embedding = buildEmbedding().astype("float32")
        print(self.embedding.shape)
        self.query_len = 30
        self.passage_len = 301
        self.ckpt_path = get_weight_path(self, base_weight_path)
        tf.reset_default_graph()

        self.query_in = tf.placeholder(dtype=tf.int32,
                                       shape=[None, self.query_len])
        self.passage_in = tf.placeholder(dtype=tf.int32,
                                         shape=[None, self.passage_len])
        self.overlap_in = tf.placeholder(dtype=tf.int32,
                                         shape=[None, self.passage_len])
        self.starts_in = tf.placeholder(dtype=tf.int32, shape=[None])
        self.ends_in = tf.placeholder(dtype=tf.int32, shape=[None])
        self.score_in = tf.placeholder(dtype=tf.float32, shape=[None])

        self.get_model(self.query_in, self.passage_in, self.overlap_in,
                       self.starts_in, self.ends_in, self.score_in)

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
Пример #2
0
    def __init__(self):
        self.emb_dim = config.emb_dim
        self.weight_path = get_weight_path(self, config.base_weight_path)
        self.question_len = question_len

        #构建模型
        question_in = Input((question_len, ))
        #embedding层
        emb = Embedding(input_dim=len(vocab),
                        output_dim=self.emb_dim,
                        weights=[embedding],
                        mask_zero=False)(question_in)
        #卷积层
        cnn = Conv1D(200, 3, padding='same', activation="tanh")(emb)
        pool = MaxPooling1D(2)(cnn)
        cnn = Conv1D(200, 3, padding='same', activation="tanh")(pool)
        pool = MaxPooling1D(2)(cnn)
        flat = Flatten()(pool)
        dropout = Dropout(0.5)(flat)
        out = Dense(self.question_len,
                    activation='sigmoid',
                    kernel_regularizer=l2(0.001))(dropout)
        model = Model(question_in, out)
        model.compile(optimizer='adadelta',
                      loss=seq_binary_entropy_loss,
                      metrics=['accuracy'])
        self.model = model
Пример #3
0
    def __init__(self,samples_num=1000):
        self.weight_path=get_weight_path(self,config.base_weight_path)
        self.emb_dim=config.emb_dim
        self.question_len=50
        self.predicate_len=20
        
        #问题rnn
        input_1=Input(shape=(50,),dtype='int32')
        emb=Embedding(input_dim=base_dssm.size,output_dim=128,weights=[embedding])
        dropout=Dropout(0.25)
        max_pool=Lambda(lambda x:K.max(x,axis=1,keepdims=False),output_shape=lambda x:(x[0],x[2]))
        sum_pool=Lambda(lambda x:K.sum(x,axis=1,keepdims=False),output_shape=lambda x:(x[0],x[2]))
        
        #maxpooling
        emb_1=emb(input_1)
        lstm_f_q=LSTM(128,return_sequences=True,dropout=0.2)(emb_1)
        lstm_b_q=LSTM(128,return_sequences=True,dropout=0.2,go_backwards=True)(emb_1)
        question_cnn_f=Conv1D(128,3,padding='same')(lstm_f_q)
        question_cnn_b=Conv1D(128,3,padding='same')(lstm_b_q)
        question_pool_f=max_pool(question_cnn_f)
        question_pool_b=max_pool(question_cnn_b)
        question_drop_f=dropout(question_pool_f)
        question_drop_b=dropout(question_pool_b)
        question_drop=add([question_drop_f,question_drop_b])
        question_out=Dense(128)(question_drop)
        

        #谓语属性Model
        input_2=Input(shape=(5,),dtype='int32')
        emb_2=emb(input_2)
        lstm_f_p=LSTM(128,return_sequences=True,dropout=0.2)(emb_2)
        lstm_b_p=LSTM(128,return_sequences=True,dropout=0.2,go_backwards=True)(emb_2)
        predicate_cnn_f=Conv1D(128,3,padding='same')(lstm_f_p)
        predicate_cnn_b=Conv1D(128,3,padding='same')(lstm_b_p)
        predicate_pool_f=max_pool(predicate_cnn_f)
        predicate_pool_b=max_pool(predicate_cnn_b)
        predicate_drop_f=dropout(predicate_pool_f)
        predicate_drop_b=dropout(predicate_pool_b)
        predicate_drop=add([predicate_drop_f,predicate_drop_b])
        predicate_out=Dense(128)(predicate_drop)
        
        

        sim=Lambda(lambda x:base_dssm.cosine(x[0],x[1]),output_shape=lambda x:(None,1))([question_out,predicate_out])
        sim_model=Model([input_1,input_2],sim)
        model_1=Model(input_1,question_out)
        model_1.compile(optimizer='adam',loss='mse')
        model_2=Model(input_2,predicate_out)
        model_2.compile(optimizer='adam',loss='mse')
        self.model_1=model_1
        self.model_2=model_2
        self.sim_model=sim_model
        self.build()
Пример #4
0
    def __init__(self, samples_num=1000):
        self.weight_path = get_weight_path(self, config.base_weight_path)
        self.emb_dim = config.emb_dim
        self.question_len = 50
        self.predicate_len = 20

        #问题rnn
        input_1 = Input(shape=(self.question_len, ), dtype='int32')
        emb = Embedding(input_dim=size,
                        output_dim=self.emb_dim,
                        weights=[embedding])
        dropout = Dropout(0.25)
        sum_pool = Lambda(lambda x: K.sum(x, axis=1, keepdims=False),
                          output_shape=lambda x: (x[0], x[2]))

        #maxpooling
        emb_1 = emb(input_1)
        question_cnn = Conv1D(128, 5, padding='same', activation="relu")(emb_1)
        question_pool = MaxPooling1D(5)(question_cnn)
        question_cnn = Conv1D(128, 3, padding='same',
                              activation='tanh')(question_pool)
        question_flat = sum_pool(question_cnn)
        question_drop = dropout(question_flat)
        question_out = Dense(128)(question_drop)

        #谓语属性Model
        input_2 = Input(shape=(self.predicate_len, ), dtype='int32')
        emb_2 = emb(input_2)
        predicate_cnn = Conv1D(128, 5, padding='same',
                               activation="tanh")(emb_2)
        predicate_pool = MaxPooling1D(2)(predicate_cnn)
        predicate_cnn = Conv1D(128, 3, padding='same',
                               activation="tanh")(predicate_pool)
        predicate_flat = sum_pool(predicate_cnn)
        predicate_drop = dropout(predicate_flat)
        predicate_out = Dense(128)(predicate_drop)

        #sim=merge(inputs=[question_out,predicate_out],mode=lambda x:cosine(x[0],x[1]),output_shape=lambda x:(None,1))

        sim = Lambda(lambda x: cosine(x[0], x[1]),
                     output_shape=lambda x:
                     (None, 1))([question_out, predicate_out])
        sim_model = Model([input_1, input_2], sim)
        self.sim_model = sim_model

        model_1 = Model(input_1, question_out)
        model_1.compile(optimizer='adam', loss='mse')
        model_2 = Model(input_2, predicate_out)
        model_2.compile(optimizer='adam', loss='mse')
        self.model_1 = model_1
        self.model_2 = model_2

        self.build()
Пример #5
0
 def __init__(self,l2_scale=0.001):
     self.emb_dim=config.emb_dim
     self.weight_path=get_weight_path(self,config.base_weight_path)
     #构建模型
     question_in=Input((question_len,))
     #embedding层
     emb=Embedding(input_dim=len(vocab),output_dim=self.emb_dim,weights=[embedding],mask_zero=False)(question_in)
     #emb=Dropout(0.5)(emb)
     #卷积层
     cnn=Conv1D(128,5,padding='same',activation="tanh",kernel_regularizer=l2(l2_scale))(emb)
     dropout=Dropout(0.3)(cnn)
     cnn=Conv1D(1,5,padding='same',activation="sigmoid",kernel_regularizer=l2(l2_scale))(dropout)
     out=Flatten()(cnn)
     model=Model(question_in,out)
     model.compile(optimizer=Adam(0.001),loss=seq_binary_entropy_loss,metrics=['accuracy'])
     self.model=model
Пример #6
0
    def __init__(self, l2_scale=0.001):
        self.emb_dim = config.emb_dim
        self.weight_path = get_weight_path(self, config.base_weight_path)
        self.question_len = 20
        #构建模型
        question_in = Input((self.question_len, ))
        #embedding层
        emb = Embedding(input_dim=len(embedding),
                        output_dim=self.emb_dim,
                        weights=[embedding])(question_in)

        emb = Dropout(0.8)(emb)
        #卷积层
        cnns = [
            Conv1D(64,
                   i,
                   padding='same',
                   activation="relu",
                   kernel_regularizer=l2(l2_scale))(emb) for i in [3, 5, 7]
        ]
        cnn = concatenate(cnns, axis=-1)
        cnn = Dropout(0.6)(cnn)

        cnns = [
            Conv1D(96,
                   i,
                   padding='same',
                   activation="relu",
                   kernel_regularizer=l2(l2_scale))(cnn) for i in [3, 5, 7]
        ]
        cnn = concatenate(cnns, axis=-1)

        dropout = Dropout(0.5)(cnn)
        cnn = Conv1D(1,
                     5,
                     padding='same',
                     activation="sigmoid",
                     kernel_regularizer=l2(l2_scale))(dropout)
        out = Flatten()(cnn)
        model = Model(question_in, out)
        model.compile(optimizer=Adam(0.001),
                      loss=seq_binary_entropy_loss,
                      metrics=['accuracy'])
        self.model = model
Пример #7
0
    def __init__(self, samples_num=1000):
        self.weight_path = get_weight_path(self, base_weight_path)
        self.emb_dim = 128
        self.question_len = question_len
        self.predicate_len = predicate_len

        emb = Embedding(input_dim=size,
                        output_dim=self.emb_dim,
                        weights=[embedding],
                        trainable=False)
        dropout = Dropout(0.25)
        sum_pool = Lambda(lambda x: K.sum(x, axis=1, keepdims=False),
                          output_shape=lambda x: (x[0], x[2]))
        max_pool = Lambda(lambda x: K.max(x, axis=1, keepdims=False),
                          output_shape=lambda x: (x[0], x[2]))

        #问题rnn
        question_in = Input(shape=(self.question_len, ), dtype='int32')
        question_embeded = emb(question_in)
        question_bigru = Bidirectional(GRU(256,
                                           return_sequences=True,
                                           activation="tanh"),
                                       merge_mode="concat")(question_embeded)

        #gate
        gate = Conv1D(1, 3, padding="same",
                      activation="sigmoid")(question_bigru)
        att = Lambda(lambda x: 1 - x, output_shape=lambda x: x)(gate)
        question_subject = Lambda(lambda x: x[0] * x[1],
                                  output_shape=lambda x: x[0])(
                                      [question_bigru, gate])
        question_predicate = Lambda(lambda x: x[0] * (1 - x[1]),
                                    output_shape=lambda x: x[0])(
                                        [question_bigru, gate])

        question_subject = [
            Conv1D(200, kernel_size=size, padding="same",
                   activation="relu")(question_subject) for size in [3, 4, 5]
        ]
        question_subject = concatenate(question_subject)
        #        question_subject=Dropout(0.3)(question_subject)
        #subject predict
        question_tags = Conv1D(filters=1,
                               kernel_size=3,
                               activation="sigmoid",
                               padding="SAME",
                               kernel_regularizer=l2(0.001))(question_subject)
        question_tags = Flatten()(question_tags)

        ################### questioin predicate semantic model ###################################
        #        question_predicate=[Conv1D(200,size,padding="same",activation="relu")(question_predicate) for size in [3,4,5]]
        #        question_predicate=concatenate(question_predicate)
        #        question_predicate=Dropout(0.3)(question_predicate)
        #        question_predicate=Conv1D(self.emb_dim,3,padding='same',activation='relu')(question_predicate)
        question_predicate = Conv1D(self.emb_dim,
                                    3,
                                    padding='same',
                                    activation='linear')(question_predicate)
        question_pool = sum_pool(question_predicate)

        att = Lambda(lambda x: K.mean(x, axis=-1),
                     output_shape=lambda x: [x[0], x[1]])(gate)

        question_shallow = Lambda(lambda x: x[0] * (1 - x[1]),
                                  output_shape=lambda x: x[0])(
                                      [question_embeded, gate])
        question_sum = sum_pool(question_shallow)

        #谓语属性Model
        predicate_in = Input(shape=(self.predicate_len, ), dtype='int32')
        predicate_embeded = emb(predicate_in)
        predicate_bigru = Bidirectional(GRU(256,
                                            return_sequences=True,
                                            activation='tanh'),
                                        merge_mode="concat")(predicate_embeded)

        #        predicate_cnn=[Conv1D(200,size,padding='same',activation="relu")(predicate_bigru) for size in [3,4,5]]
        #        predicate_cnn=concatenate(predicate_cnn)
        #        predicate_cnn=Dropout(0.3)(predicate_cnn)
        predicate_cnn = Conv1D(self.emb_dim,
                               3,
                               padding='same',
                               activation='linear')(predicate_bigru)

        predicate_pool = sum_pool(predicate_cnn)

        predicate_sum = sum_pool(predicate_embeded)

        question_out = GatedLayer(self.emb_dim, 256, activation="linear")(
            [question_pool, question_sum])
        predicate_out = GatedLayer(self.emb_dim, 256, activation='linear')(
            [predicate_pool, predicate_sum])

        #subject model
        subject_model = Model(inputs=question_in, outputs=question_tags)
        subject_model.compile(optimizer="adam", loss="mse")
        self.subject_model = subject_model
        #attention
        att_model = Model(inputs=question_in, outputs=att)
        att_model.compile(optimizer="adam", loss="mse")
        self.att_model = att_model
        #sim=merge(inputs=[question_out,predicate_out],mode=lambda x:cosine(x[0],x[1]),output_shape=lambda x:(None,1))
        sim = Lambda(lambda x: cosine(x[0], x[1]),
                     output_shape=lambda x:
                     (None, 1))([question_out, predicate_out])
        sim_model = Model([question_in, predicate_in], sim)
        question_model = Model(question_in, question_out)
        question_model.compile(optimizer='adam', loss='mse')
        predicate_model = Model(predicate_in, predicate_out)
        predicate_model.compile(optimizer='adam', loss='mse')
        self.question_model = question_model
        self.predicate_model = predicate_model
        self.sim_model = sim_model

        self.all_model = Model(inputs=[question_in, predicate_in],
                               outputs=[question_tags, sim])
        self.all_model.compile(optimizer="adam", loss="mse")
        self.build()