コード例 #1
0
def main1():
    # Load the data
    train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation_moe(
    )
    num_features = train_data.shape[1]

    print('Training data shape = {}'.format(train_data.shape))
    print('Validation data shape = {}'.format(validation_data.shape))
    print('Test data shape = {}'.format(test_data.shape))

    #print('Training laebl shape = {}'.format(len(train_label)))

    # Set up the input layer
    input_layer = Input(shape=(num_features, ))

    # Set up MMoE layer
    mmoe_layers = MMoE(units=16, num_experts=8, num_tasks=2)(input_layer)

    output_layers = []

    output_info = ['y0', 'y1']

    # Build tower layer from MMoE layer
    for index, task_layer in enumerate(mmoe_layers):
        tower_layer = Dense(units=8,
                            activation='relu',
                            kernel_initializer=VarianceScaling())(task_layer)
        output_layer = Dense(units=1,
                             name=output_info[index],
                             activation='linear',
                             kernel_initializer=VarianceScaling())(tower_layer)
        output_layers.append(output_layer)

    # Compile model
    model = Model(inputs=[input_layer], outputs=output_layers)
    learning_rates = [1e-4, 1e-3, 1e-2]
    adam_optimizer = Adam(lr=learning_rates[0])
    model.compile(loss={
        'y0': 'mean_squared_error',
        'y1': 'mean_squared_error'
    },
                  optimizer=adam_optimizer,
                  metrics=[metrics.mae])

    # Print out model architecture summary
    model.summary()

    # Train the model
    model.fit(x=train_data,
              y=train_label,
              validation_data=(validation_data, validation_label),
              epochs=100)
    return model
# 학습
from tf.keras.callbacks import EarlyStopping
# 조기종료 콜백함수 정의

xInput = Input(batch_shape=(None, right_idx3, 256))
xBiLstm = Bidirectional(LSTM(240, return_sequences=True),
                        merge_mode='concat')(xInput)
xOutput = TimeDistributed(Dense(1, activation='sigmoid'))(xBiLstm)
# 각 스텝에서 cost가 전송되고, 오류가 다음 step으로 전송됨.

model1 = Model(xInput, xOutput)
model1.compile(loss='binary_crossentropy',
               optimizer='rmsprop',
               metrics=['accuracy'])
model1.summary()

from keras.callbacks import EarlyStopping

early_stopping = EarlyStopping(monitor='val_loss', patience=3)  # 조기종료 콜백함수 정의
# In[24]:

########## 3gram
# 교차검증 kfold
from sklearn.model_selection import KFold

# Accuracy, Precision, Recall, F1-Score
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score

# Confusion Matrix, ROC Curve
from sklearn.metrics import confusion_matrix, roc_auc_score
コード例 #3
0
class NNRF(GenericModel):
    """Non-incremental model role-filler

    """
    def __init__(self,
                 n_word_vocab=50001,
                 n_role_vocab=7,
                 n_factors_emb=256,
                 n_factors_cls=512,
                 n_hidden=256,
                 word_vocabulary={},
                 role_vocabulary={},
                 unk_word_id=50000,
                 unk_role_id=7,
                 missing_word_id=50001,
                 using_dropout=False,
                 dropout_rate=0.3,
                 optimizer='adagrad',
                 loss='sparse_categorical_crossentropy',
                 metrics=['accuracy']):
        super(NNRF, self).__init__(n_word_vocab, n_role_vocab, n_factors_emb,
                                   n_hidden, word_vocabulary, role_vocabulary,
                                   unk_word_id, unk_role_id, missing_word_id,
                                   using_dropout, dropout_rate, optimizer,
                                   loss, metrics)

        # minus 1 here because one of the role is target role
        self.input_length = n_role_vocab - 1

        # each input is a fixed window of frame set, each word correspond to one role
        input_words = Input(
            shape=(self.input_length, ), dtype=tf.uint32,
            name='input_words')  # Switched dtype to tf specific (team1-change)
        input_roles = Input(
            shape=(self.input_length, ), dtype=tf.uint32,
            name='input_roles')  # Switched dtype to tf specific (team1-change)
        target_role = Input(
            shape=(1, ), dtype=tf.uint32,
            name='target_role')  # Switched dtype to tf specific (team1-change)

        # role based embedding layer
        embedding_layer = role_based_word_embedding(
            input_words, input_roles, n_word_vocab, n_role_vocab,
            glorot_uniform(), missing_word_id, self.input_length,
            n_factors_emb, True, using_dropout, dropout_rate)

        # sum on input_length direction;
        # obtaining context embedding layer, shape is (batch_size, n_factors_emb)
        event_embedding = Lambda(
            lambda x: K.sum(x, axis=1),
            name='event_embedding',
            output_shape=(n_factors_emb, ))(embedding_layer)

        # fully connected layer, output shape is (batch_size, input_length, n_hidden)
        hidden = Dense(n_hidden,
                       activation='linear',
                       input_shape=(n_factors_emb, ),
                       name='projected_event_embedding')(event_embedding)

        # non-linear layer, using 1 to initialize
        non_linearity = PReLU(alpha_initializer='ones',
                              name='context_embedding')(hidden)

        # hidden layer
        hidden_layer2 = target_word_hidden(non_linearity,
                                           target_role,
                                           n_word_vocab,
                                           n_role_vocab,
                                           glorot_uniform(),
                                           n_factors_cls,
                                           n_hidden,
                                           using_dropout=using_dropout,
                                           dropout_rate=dropout_rate)

        # softmax output layer
        output_layer = Dense(n_word_vocab,
                             activation='softmax',
                             input_shape=(n_factors_cls, ),
                             name='softmax_word_output')(hidden_layer2)

        self.model = Model(inputs=[input_words, input_roles, target_role],
                           outputs=[output_layer])

        self.model.compile(optimizer, loss, metrics)

    def set_0_bias(self):
        """ This function is used as a hack that set output bias to 0.
            According to Ottokar's advice in the paper, during the *evaluation*, the output bias needs to be 0 
            in order to replicate the best performance reported in the paper.
        """
        word_output_weights = self.model.get_layer(
            "softmax_word_output").get_weights()
        word_output_kernel = word_output_weights[0]
        word_output_bias = np.zeros(self.n_word_vocab)
        self.model.get_layer("softmax_word_output").set_weights(
            [word_output_kernel, word_output_bias])

        return word_output_weights[1]

    def set_bias(self, bias):
        word_output_weights = self.model.get_layer(
            "softmax_word_output").get_weights()
        word_output_kernel = word_output_weights[0]
        self.model.get_layer("softmax_word_output").set_weights(
            [word_output_kernel, bias])

        return bias

    # Deprecated temporarily
    def train(self,
              i_w,
              i_r,
              t_w,
              t_r,
              t_w_c,
              t_r_c,
              batch_size=256,
              epochs=100,
              validation_split=0.05,
              verbose=0):
        train_result = self.model.fit([i_w, i_r, t_r], t_w_c, batch_size,
                                      epochs, validation_split, verbose)
        return train_result

    def test(self,
             i_w,
             i_r,
             t_w,
             t_r,
             t_w_c,
             t_r_c,
             batch_size=256,
             verbose=0):
        test_result = self.model.evaluate([i_w, i_r, t_r], t_w_c, batch_size,
                                          verbose)
        return test_result

    def train_on_batch(self, i_w, i_r, t_w, t_r, t_w_c, t_r_c):
        train_result = self.model.train_on_batch([i_w, i_r, t_r], t_w_c)
        return train_result

    def test_on_batch(self,
                      i_w,
                      i_r,
                      t_w,
                      t_r,
                      t_w_c,
                      t_r_c,
                      sample_weight=None):
        test_result = self.model.test_on_batch([i_w, i_r, t_r], t_w_c,
                                               sample_weight)
        return test_result

    def predict(self, i_w, i_r, t_r, batch_size=1, verbose=0):
        """ Return the output from softmax layer. """
        predict_result = self.model.predict([i_w, i_r, t_r], batch_size,
                                            verbose)
        return predict_result

    def summary(self):
        self.model.summary()

    def predict_class(self, i_w, i_r, t_r, batch_size=1, verbose=0):
        """ Return predicted target word from prediction. """
        predict_result = self.predict(i_w, i_r, t_r, batch_size, verbose)
        return np.argmax(predict_result, axis=1)

    def p_words(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return the output scores given target words. """
        predict_result = self.predict(i_w, i_r, t_r, batch_size, verbose)
        return predict_result[range(batch_size), list(t_w)]

    def top_words(self, i_w, i_r, t_r, topN=20, batch_size=1, verbose=0):
        """ Return top N target words given context. """
        predict_result = self.predict(i_w, i_r, t_r, batch_size, verbose)
        rank_list = np.argsort(predict_result, axis=1)
        return [r[-topN:][::-1] for r in rank_list]

    def list_top_words(self, i_w, i_r, t_r, topN=20, batch_size=1, verbose=0):
        """ Return a list of decoded top N target words.
            (Only for reference, can be removed.)
        """
        top_words_lists = self.top_words(i_w, i_r, t_r, topN, batch_size,
                                         verbose)
        print(
            type(top_words_lists))  # Updated to python3 syntax (team1-change)
        result = []
        for i in range(batch_size):
            top_words_list = top_words_lists[i]
            result.append([self.word_decoder[w] for w in top_words_list])
        return result
コード例 #4
0
decoder_inputs = Input(shape=(None, ))
decoder_embedding = Embedding(vocab_size, 200, mask_zero=True)(decoder_inputs)
decoder_lstm = LSTM(200, return_state=True, return_sequences=True)
decoder_outputs, _, _ = decoder_lstm(decoder_embedding,
                                     initial_state=encoder_states)
decoder_dense = Dense(vocab_size, activation=tf.keras.activations.softmax)
output = decoder_dense(decoder_outputs)

model = Model([encoder_inputs, decoder_inputs], output)
model.compile(optimizer=optimizers.RMSprop(),
              loss='categorical_crossentropy',
              metrics=['accuracy'])
#参考链接:RMSprop<https://keras.io/zh/optimizers/#rmsprop>
#categorical_crossentropy<https://keras.io/zh/backend/#categorical_crossentropy>

model.summary()

# 模型训练以及保存
model.fit([encoder_input_data, decoder_input_data],
          decoder_output_data,
          batch_size=50,
          epochs=150)
model.save('model.h5')


# 人机交互
def make_inference_models():

    encoder_model = tf.keras.models.Model(encoder_inputs, encoder_states)

    decoder_state_input_h = tf.keras.layers.Input(shape=(200, ))
コード例 #5
0
class MTRFv4(GenericModel):
    """Multi-task non-incremental role-filler

    """
    def __init__(self,
                 n_word_vocab=50001,
                 n_role_vocab=7,
                 n_factors_emb=300,
                 n_hidden=300,
                 word_vocabulary=None,
                 role_vocabulary=None,
                 unk_word_id=50000,
                 unk_role_id=7,
                 missing_word_id=50001,
                 using_dropout=False,
                 dropout_rate=0.3,
                 optimizer='adagrad',
                 loss='sparse_categorical_crossentropy',
                 metrics=['accuracy'],
                 loss_weights=[1., 1.]):
        super(MTRFv4, self).__init__(n_word_vocab, n_role_vocab, n_factors_emb,
                                     n_hidden, word_vocabulary,
                                     role_vocabulary, unk_word_id, unk_role_id,
                                     missing_word_id, using_dropout,
                                     dropout_rate, optimizer, loss, metrics)

        # minus 1 here because one of the role is target role
        input_length = n_role_vocab - 1

        n_factors_cls = n_hidden

        # each input is a fixed window of frame set, each word correspond to one role
        input_words = Input(
            shape=(input_length, ), dtype=tf.uint32,
            name='input_words')  # Switched dtype to tf specific (team1-change)
        input_roles = Input(
            shape=(input_length, ), dtype=tf.uint32,
            name='input_roles')  # Switched dtype to tf specific (team1-change)
        target_word = Input(
            shape=(1, ), dtype=tf.uint32,
            name='target_word')  # Switched dtype to tf specific (team1-change)
        target_role = Input(
            shape=(1, ), dtype=tf.uint32,
            name='target_role')  # Switched dtype to tf specific (team1-change)

        # role based embedding layer
        embedding_layer = factored_embedding(input_words, input_roles,
                                             n_word_vocab, n_role_vocab,
                                             glorot_uniform(), missing_word_id,
                                             input_length, n_factors_emb,
                                             n_hidden, True, using_dropout,
                                             dropout_rate)

        # non-linear layer, using 1 to initialize
        non_linearity = PReLU(alpha_initializer='ones')(embedding_layer)

        # mean on input_length direction;
        # obtaining context embedding layer, shape is (batch_size, n_hidden)
        context_embedding = Lambda(lambda x: K.mean(x, axis=1),
                                   name='context_embedding',
                                   output_shape=(n_hidden, ))(non_linearity)

        # target word hidden layer
        tw_hidden = target_word_hidden(context_embedding,
                                       target_role,
                                       n_word_vocab,
                                       n_role_vocab,
                                       glorot_uniform(),
                                       n_hidden,
                                       n_hidden,
                                       using_dropout=using_dropout,
                                       dropout_rate=dropout_rate)

        # target role hidden layer
        tr_hidden = target_role_hidden(context_embedding,
                                       target_word,
                                       n_word_vocab,
                                       n_role_vocab,
                                       glorot_uniform(),
                                       n_hidden,
                                       n_hidden,
                                       using_dropout=using_dropout,
                                       dropout_rate=dropout_rate)

        # softmax output layer
        target_word_output = Dense(n_word_vocab,
                                   activation='softmax',
                                   input_shape=(n_hidden, ),
                                   name='softmax_word_output')(tw_hidden)

        # softmax output layer
        target_role_output = Dense(n_role_vocab,
                                   activation='softmax',
                                   input_shape=(n_hidden, ),
                                   name='softmax_role_output')(tr_hidden)

        self.model = Model(
            inputs=[input_words, input_roles, target_word, target_role],
            outputs=[target_word_output, target_role_output])

        self.model.compile(optimizer, loss, metrics, loss_weights)

    def set_0_bias(self):
        word_output_weights = self.model.get_layer(
            "softmax_word_output").get_weights()
        word_output_kernel = word_output_weights[0]
        word_output_bias = np.zeros(self.n_word_vocab)
        self.model.get_layer("softmax_word_output").set_weights(
            [word_output_kernel, word_output_bias])

        role_output_weights = self.model.get_layer(
            "softmax_role_output").get_weights()
        role_output_kernel = role_output_weights[0]
        role_output_bias = np.zeros(self.n_role_vocab)
        self.model.get_layer("softmax_role_output").set_weights(
            [role_output_kernel, role_output_bias])

        return word_output_weights[1], role_output_weights[1]

    def set_bias(self, bias):
        word_output_weights = self.model.get_layer(
            "softmax_word_output").get_weights()
        word_output_kernel = word_output_weights[0]
        self.model.get_layer("softmax_word_output").set_weights(
            [word_output_kernel, bias[0]])

        role_output_weights = self.model.get_layer(
            "softmax_role_output").get_weights()
        role_output_kernel = role_output_weights[0]
        self.model.get_layer("softmax_role_output").set_weights(
            [role_output_kernel, bias[1]])

        return bias

    # Train and test
    # Deprecated temporarily
    def train(self,
              i_w,
              i_r,
              t_w,
              t_r,
              t_w_c,
              t_r_c,
              batch_size=256,
              epochs=100,
              validation_split=0.05,
              verbose=0):
        train_result = self.model.fit([i_w, i_r, t_w, t_r], [t_w_c, t_r_c],
                                      batch_size, epochs, validation_split,
                                      verbose)
        return train_result

    def test(self,
             i_w,
             i_r,
             t_w,
             t_r,
             t_w_c,
             t_r_c,
             batch_size=256,
             verbose=0):
        test_result = self.model.evaluate([i_w, i_r, t_w, t_r], [t_w_c, t_r_c],
                                          batch_size, verbose)
        return test_result

    def train_on_batch(self, i_w, i_r, t_w, t_r, t_w_c, t_r_c):
        train_result = self.model.train_on_batch([i_w, i_r, t_w, t_r],
                                                 [t_w_c, t_r_c])
        return train_result

    def test_on_batch(self,
                      i_w,
                      i_r,
                      t_w,
                      t_r,
                      t_w_c,
                      t_r_c,
                      sample_weight=None):
        test_result = self.model.test_on_batch([i_w, i_r, t_w, t_r],
                                               [t_w_c, t_r_c], sample_weight)
        return test_result

    def predict(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return the output from softmax layer. """
        predict_result = self.model.predict([i_w, i_r, t_w, t_r], batch_size,
                                            verbose)
        return predict_result

    def predict_word(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return predicted target word from prediction. """
        predict_result = self.predict(i_w, i_r, t_w, t_r, batch_size, verbose)
        return np.argmax(predict_result[0], axis=1)

    def predict_role(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return predicted target role from prediction. """
        predict_result = self.predict(i_w, i_r, t_w, t_r, batch_size, verbose)
        return np.argmax(predict_result[1], axis=1)

    def p_words(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return the output scores given target words. """
        predict_result = self.predict(i_w, i_r, t_w, t_r, batch_size, verbose)
        return predict_result[0][range(batch_size), list(t_w)]

    def p_roles(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return the output scores given target roles. """
        predict_result = self.predict(i_w, i_r, t_w, t_r, batch_size, verbose)
        return predict_result[1][range(batch_size), list(t_r)]

    def top_words(self, i_w, i_r, t_w, t_r, topN=20, batch_size=1, verbose=0):
        """ Return top N target words given context. """
        predict_result = self.predict(i_w, i_r, t_w, t_r, batch_size,
                                      verbose)[0]
        rank_list = np.argsort(predict_result, axis=1)[0]
        return rank_list[-topN:][::-1]
        # return [r[-topN:][::-1] for r in rank_list]

    # TODO
    def list_top_words(self, i_w, i_r, t_r, topN=20, batch_size=1, verbose=0):
        """ Return a list of decoded top N target words.
            (Only for reference, can be removed.)
        """
        top_words_lists = self.top_words(i_w, i_r, t_r, topN, batch_size,
                                         verbose)
        print(
            type(top_words_lists))  # Updated to python3 syntax (team1-change)
        result = []
        for i in range(batch_size):
            top_words_list = top_words_lists[i]
            result.append([self.word_decoder[w] for w in top_words_list])
        return result

    def summary(self):
        self.model.summary()