def eval_GS(model_name,
            experiment_name,
            eval_file_name,
            model=None,
            print_result=True,
            verb_baseline=False):
    MODEL_NAME = experiment_name
    eval_file = os.path.join(EVAL_PATH, eval_file_name)
    result_file = os.path.join(MODEL_PATH, MODEL_NAME + '_' + eval_file_name)

    if model:
        net = model
    else:
        description = model_builder.load_description(MODEL_PATH, MODEL_NAME)
        net = model_builder.build_model(model_name, description)
        net.load(MODEL_PATH, MODEL_NAME, description)

    sent_layer = 'context_embedding'

    sent_model = Model(inputs=net.model.input,
                       outputs=net.model.get_layer(sent_layer).output)

    # if print_result:
    #     sent_model.summary()

    n_input_length = len(net.role_vocabulary) - 1

    print net.role_vocabulary

    scores = []
    similarities = []
    original_sim_f = []
    similarities_f = []
    lo_similarities = []
    hi_similarities = []
    records = []

    print("Embedding: " + experiment_name)
    print("=" * 60)
    print("\n")
    print("sentence1\tsentence2\taverage_score\tembedding_cosine")
    print("-" * 60)

    with open(eval_file, 'r') as f, \
        open(result_file, 'w') as f_out:

        first = True
        for line in f:
            # skip header
            if first:
                first = False
                continue

            s = line.split()
            sentence = " ".join(s[1:5])
            score = float(s[5])
            hilo = s[6].upper()

            # verb subject object landmark
            # A1 - object; A0 - subject
            V1, A0, A1, V2 = sentence.split()

            V1 = wnl.lemmatize(V1, wn.VERB)
            A0 = wnl.lemmatize(A0, wn.NOUN)
            A1 = wnl.lemmatize(A1, wn.NOUN)
            V2 = wnl.lemmatize(V2, wn.VERB)

            V1_i = net.word_vocabulary.get(V1, net.unk_word_id)
            A0_i = net.word_vocabulary.get(A0, net.unk_word_id)
            A1_i = net.word_vocabulary.get(A1, net.unk_word_id)
            V2_i = net.word_vocabulary.get(V2, net.unk_word_id)

            # if np.array([V1_i, A0_i, A1_i, V2_i]).any() == net.unk_word_id:
            #     print 'OOV: ', A0, A1, V1, V2

            V_ri = net.role_vocabulary['V']
            A0_ri = net.role_vocabulary['A0']
            A1_ri = net.role_vocabulary['A1']

            sent1_x = dict((r, net.missing_word_id)
                           for r in (net.role_vocabulary.values()))
            sent2_x = dict((r, net.missing_word_id)
                           for r in (net.role_vocabulary.values()))

            sent1_x.pop(n_input_length)
            sent2_x.pop(n_input_length)

            sent1_x[V_ri] = V1_i
            sent2_x[V_ri] = V2_i

            if not verb_baseline:
                sent1_x[A0_ri] = A0_i
                sent1_x[A1_ri] = A1_i
                sent2_x[A0_ri] = A0_i
                sent2_x[A1_ri] = A1_i

            zeroA = np.array([0])

            s1_w = np.array(sent1_x.values()).reshape((1, n_input_length))
            s1_r = np.array(sent1_x.keys()).reshape((1, n_input_length))
            s2_w = np.array(sent2_x.values()).reshape((1, n_input_length))
            s2_r = np.array(sent2_x.keys()).reshape((1, n_input_length))

            if re.search('NNRF', model_name):
                sent1_emb = sent_model.predict([s1_w, s1_r, zeroA])
                sent2_emb = sent_model.predict([s2_w, s2_r, zeroA])
            else:
                sent1_emb = sent_model.predict([s1_w, s1_r, zeroA, zeroA])
                sent2_emb = sent_model.predict([s2_w, s2_r, zeroA, zeroA])

            # Baseline
            #sent1_emb = V1_i
            #sent2_emb = V2_i
            # Compositional
            # sent1_emb = V1_i + A0_i + A1_i
            # sent2_emb = V2_i + A0_i + A1_i
            #sent1_emb = V1_i * A0_i * A1_i
            #sent2_emb = V2_i * A0_i * A1_i

            similarity = -(cosine(sent1_emb, sent2_emb) - 1.0
                           )  # convert distance to similarity

            if hilo == "HIGH":
                hi_similarities.append(similarity)
            elif hilo == "LOW":
                lo_similarities.append(similarity)
            else:
                raise Exception("Unknown hilo value %s" % hilo)

            if (V1, A0, A1, V2) not in records:
                records.append((V1, A0, A1, V2))
                # print "\"%s %s %s\"\t\"%s %s %s\"\t%.2f\t%.2f \n" % (A0, V1, A1, A0, V2, A1, score, similarity)

            scores.append(score)
            similarities.append(similarity)

            f_out.write("\"%s %s %s\"\t\"%s %s %s\"\t %.2f \t %.2f \n" %
                        (A0, V1, A1, A0, V2, A1, score, similarity))

    print("-" * 60)

    correlation, pvalue = spearmanr(scores, similarities)

    if print_result:
        print("Total number of samples: %d" % len(scores)
              )  #Added paranthesis to the print statements (team1-change)
        print("Spearman correlation: %.4f; 2-tailed p-value: %.10f" %
              (correlation, pvalue)
              )  #Added paranthesis to the print statements (team1-change)
        print("High: %.2f; Low: %.2f" %
              (np.mean(hi_similarities), np.mean(lo_similarities))
              )  #Added paranthesis to the print statements (team1-change)

        # import pylab
        # pylab.scatter(scores, similarities)
        # pylab.show()

    return correlation
예제 #2
0
class NNRF(GenericModel):
    """Non-incremental model role-filler

    """
    def __init__(self,
                 n_word_vocab=50001,
                 n_role_vocab=7,
                 n_factors_emb=256,
                 n_factors_cls=512,
                 n_hidden=256,
                 word_vocabulary={},
                 role_vocabulary={},
                 unk_word_id=50000,
                 unk_role_id=7,
                 missing_word_id=50001,
                 using_dropout=False,
                 dropout_rate=0.3,
                 optimizer='adagrad',
                 loss='sparse_categorical_crossentropy',
                 metrics=['accuracy']):
        super(NNRF, self).__init__(n_word_vocab, n_role_vocab, n_factors_emb,
                                   n_hidden, word_vocabulary, role_vocabulary,
                                   unk_word_id, unk_role_id, missing_word_id,
                                   using_dropout, dropout_rate, optimizer,
                                   loss, metrics)

        # minus 1 here because one of the role is target role
        self.input_length = n_role_vocab - 1

        # each input is a fixed window of frame set, each word correspond to one role
        input_words = Input(
            shape=(self.input_length, ), dtype=tf.uint32,
            name='input_words')  # Switched dtype to tf specific (team1-change)
        input_roles = Input(
            shape=(self.input_length, ), dtype=tf.uint32,
            name='input_roles')  # Switched dtype to tf specific (team1-change)
        target_role = Input(
            shape=(1, ), dtype=tf.uint32,
            name='target_role')  # Switched dtype to tf specific (team1-change)

        # role based embedding layer
        embedding_layer = role_based_word_embedding(
            input_words, input_roles, n_word_vocab, n_role_vocab,
            glorot_uniform(), missing_word_id, self.input_length,
            n_factors_emb, True, using_dropout, dropout_rate)

        # sum on input_length direction;
        # obtaining context embedding layer, shape is (batch_size, n_factors_emb)
        event_embedding = Lambda(
            lambda x: K.sum(x, axis=1),
            name='event_embedding',
            output_shape=(n_factors_emb, ))(embedding_layer)

        # fully connected layer, output shape is (batch_size, input_length, n_hidden)
        hidden = Dense(n_hidden,
                       activation='linear',
                       input_shape=(n_factors_emb, ),
                       name='projected_event_embedding')(event_embedding)

        # non-linear layer, using 1 to initialize
        non_linearity = PReLU(alpha_initializer='ones',
                              name='context_embedding')(hidden)

        # hidden layer
        hidden_layer2 = target_word_hidden(non_linearity,
                                           target_role,
                                           n_word_vocab,
                                           n_role_vocab,
                                           glorot_uniform(),
                                           n_factors_cls,
                                           n_hidden,
                                           using_dropout=using_dropout,
                                           dropout_rate=dropout_rate)

        # softmax output layer
        output_layer = Dense(n_word_vocab,
                             activation='softmax',
                             input_shape=(n_factors_cls, ),
                             name='softmax_word_output')(hidden_layer2)

        self.model = Model(inputs=[input_words, input_roles, target_role],
                           outputs=[output_layer])

        self.model.compile(optimizer, loss, metrics)

    def set_0_bias(self):
        """ This function is used as a hack that set output bias to 0.
            According to Ottokar's advice in the paper, during the *evaluation*, the output bias needs to be 0 
            in order to replicate the best performance reported in the paper.
        """
        word_output_weights = self.model.get_layer(
            "softmax_word_output").get_weights()
        word_output_kernel = word_output_weights[0]
        word_output_bias = np.zeros(self.n_word_vocab)
        self.model.get_layer("softmax_word_output").set_weights(
            [word_output_kernel, word_output_bias])

        return word_output_weights[1]

    def set_bias(self, bias):
        word_output_weights = self.model.get_layer(
            "softmax_word_output").get_weights()
        word_output_kernel = word_output_weights[0]
        self.model.get_layer("softmax_word_output").set_weights(
            [word_output_kernel, bias])

        return bias

    # Deprecated temporarily
    def train(self,
              i_w,
              i_r,
              t_w,
              t_r,
              t_w_c,
              t_r_c,
              batch_size=256,
              epochs=100,
              validation_split=0.05,
              verbose=0):
        train_result = self.model.fit([i_w, i_r, t_r], t_w_c, batch_size,
                                      epochs, validation_split, verbose)
        return train_result

    def test(self,
             i_w,
             i_r,
             t_w,
             t_r,
             t_w_c,
             t_r_c,
             batch_size=256,
             verbose=0):
        test_result = self.model.evaluate([i_w, i_r, t_r], t_w_c, batch_size,
                                          verbose)
        return test_result

    def train_on_batch(self, i_w, i_r, t_w, t_r, t_w_c, t_r_c):
        train_result = self.model.train_on_batch([i_w, i_r, t_r], t_w_c)
        return train_result

    def test_on_batch(self,
                      i_w,
                      i_r,
                      t_w,
                      t_r,
                      t_w_c,
                      t_r_c,
                      sample_weight=None):
        test_result = self.model.test_on_batch([i_w, i_r, t_r], t_w_c,
                                               sample_weight)
        return test_result

    def predict(self, i_w, i_r, t_r, batch_size=1, verbose=0):
        """ Return the output from softmax layer. """
        predict_result = self.model.predict([i_w, i_r, t_r], batch_size,
                                            verbose)
        return predict_result

    def summary(self):
        self.model.summary()

    def predict_class(self, i_w, i_r, t_r, batch_size=1, verbose=0):
        """ Return predicted target word from prediction. """
        predict_result = self.predict(i_w, i_r, t_r, batch_size, verbose)
        return np.argmax(predict_result, axis=1)

    def p_words(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return the output scores given target words. """
        predict_result = self.predict(i_w, i_r, t_r, batch_size, verbose)
        return predict_result[range(batch_size), list(t_w)]

    def top_words(self, i_w, i_r, t_r, topN=20, batch_size=1, verbose=0):
        """ Return top N target words given context. """
        predict_result = self.predict(i_w, i_r, t_r, batch_size, verbose)
        rank_list = np.argsort(predict_result, axis=1)
        return [r[-topN:][::-1] for r in rank_list]

    def list_top_words(self, i_w, i_r, t_r, topN=20, batch_size=1, verbose=0):
        """ Return a list of decoded top N target words.
            (Only for reference, can be removed.)
        """
        top_words_lists = self.top_words(i_w, i_r, t_r, topN, batch_size,
                                         verbose)
        print(
            type(top_words_lists))  # Updated to python3 syntax (team1-change)
        result = []
        for i in range(batch_size):
            top_words_list = top_words_lists[i]
            result.append([self.word_decoder[w] for w in top_words_list])
        return result
kf = KFold(n_splits=5, shuffle=True, random_state=None)  # KFold non shuffle 버전
K = 1

for train, validation in kf.split(test_gcc6_2_32_onehot_x,
                                  test_gcc6_2_32_onehot_y):
    print('======Training stage======')
    model1.fit(test_gcc6_2_32_onehot_x[train],
               test_gcc6_2_32_onehot_y[train],
               epochs=4,
               batch_size=32,
               callbacks=[early_stopping])
    # k_accuracy = '%.4f' %(model1.evaluate(data_10000x[validation], data_10000y[validation])[1])

    # 12. 교차검증결과 predict - 검증셋들
    # predict 값
    k_pr = model1.predict(test_gcc6_2_32_onehot_x[validation])

    # 테스트 predict 결과들 비교 (평가지표 보기위함)
    pred = np.round(np.array(k_pr).flatten().tolist())
    y_test = np.array(test_gcc6_2_32_onehot_y[validation]).flatten().tolist()

    # 13. 평가지표들 출력
    ## 평가지표들
    k_accuracy = float(accuracy_score(y_test, pred))
    k_recall = float(recall_score(y_test, pred))
    k_precision = float(precision_score(y_test, pred))
    k_f1_score = float(f1_score(y_test, pred))
    # k_cm = float(confusion_matrix(y_test, pred))

    print(K)
    K = K + 1
예제 #4
0
class MTRFv4(GenericModel):
    """Multi-task non-incremental role-filler

    """
    def __init__(self,
                 n_word_vocab=50001,
                 n_role_vocab=7,
                 n_factors_emb=300,
                 n_hidden=300,
                 word_vocabulary=None,
                 role_vocabulary=None,
                 unk_word_id=50000,
                 unk_role_id=7,
                 missing_word_id=50001,
                 using_dropout=False,
                 dropout_rate=0.3,
                 optimizer='adagrad',
                 loss='sparse_categorical_crossentropy',
                 metrics=['accuracy'],
                 loss_weights=[1., 1.]):
        super(MTRFv4, self).__init__(n_word_vocab, n_role_vocab, n_factors_emb,
                                     n_hidden, word_vocabulary,
                                     role_vocabulary, unk_word_id, unk_role_id,
                                     missing_word_id, using_dropout,
                                     dropout_rate, optimizer, loss, metrics)

        # minus 1 here because one of the role is target role
        input_length = n_role_vocab - 1

        n_factors_cls = n_hidden

        # each input is a fixed window of frame set, each word correspond to one role
        input_words = Input(
            shape=(input_length, ), dtype=tf.uint32,
            name='input_words')  # Switched dtype to tf specific (team1-change)
        input_roles = Input(
            shape=(input_length, ), dtype=tf.uint32,
            name='input_roles')  # Switched dtype to tf specific (team1-change)
        target_word = Input(
            shape=(1, ), dtype=tf.uint32,
            name='target_word')  # Switched dtype to tf specific (team1-change)
        target_role = Input(
            shape=(1, ), dtype=tf.uint32,
            name='target_role')  # Switched dtype to tf specific (team1-change)

        # role based embedding layer
        embedding_layer = factored_embedding(input_words, input_roles,
                                             n_word_vocab, n_role_vocab,
                                             glorot_uniform(), missing_word_id,
                                             input_length, n_factors_emb,
                                             n_hidden, True, using_dropout,
                                             dropout_rate)

        # non-linear layer, using 1 to initialize
        non_linearity = PReLU(alpha_initializer='ones')(embedding_layer)

        # mean on input_length direction;
        # obtaining context embedding layer, shape is (batch_size, n_hidden)
        context_embedding = Lambda(lambda x: K.mean(x, axis=1),
                                   name='context_embedding',
                                   output_shape=(n_hidden, ))(non_linearity)

        # target word hidden layer
        tw_hidden = target_word_hidden(context_embedding,
                                       target_role,
                                       n_word_vocab,
                                       n_role_vocab,
                                       glorot_uniform(),
                                       n_hidden,
                                       n_hidden,
                                       using_dropout=using_dropout,
                                       dropout_rate=dropout_rate)

        # target role hidden layer
        tr_hidden = target_role_hidden(context_embedding,
                                       target_word,
                                       n_word_vocab,
                                       n_role_vocab,
                                       glorot_uniform(),
                                       n_hidden,
                                       n_hidden,
                                       using_dropout=using_dropout,
                                       dropout_rate=dropout_rate)

        # softmax output layer
        target_word_output = Dense(n_word_vocab,
                                   activation='softmax',
                                   input_shape=(n_hidden, ),
                                   name='softmax_word_output')(tw_hidden)

        # softmax output layer
        target_role_output = Dense(n_role_vocab,
                                   activation='softmax',
                                   input_shape=(n_hidden, ),
                                   name='softmax_role_output')(tr_hidden)

        self.model = Model(
            inputs=[input_words, input_roles, target_word, target_role],
            outputs=[target_word_output, target_role_output])

        self.model.compile(optimizer, loss, metrics, loss_weights)

    def set_0_bias(self):
        word_output_weights = self.model.get_layer(
            "softmax_word_output").get_weights()
        word_output_kernel = word_output_weights[0]
        word_output_bias = np.zeros(self.n_word_vocab)
        self.model.get_layer("softmax_word_output").set_weights(
            [word_output_kernel, word_output_bias])

        role_output_weights = self.model.get_layer(
            "softmax_role_output").get_weights()
        role_output_kernel = role_output_weights[0]
        role_output_bias = np.zeros(self.n_role_vocab)
        self.model.get_layer("softmax_role_output").set_weights(
            [role_output_kernel, role_output_bias])

        return word_output_weights[1], role_output_weights[1]

    def set_bias(self, bias):
        word_output_weights = self.model.get_layer(
            "softmax_word_output").get_weights()
        word_output_kernel = word_output_weights[0]
        self.model.get_layer("softmax_word_output").set_weights(
            [word_output_kernel, bias[0]])

        role_output_weights = self.model.get_layer(
            "softmax_role_output").get_weights()
        role_output_kernel = role_output_weights[0]
        self.model.get_layer("softmax_role_output").set_weights(
            [role_output_kernel, bias[1]])

        return bias

    # Train and test
    # Deprecated temporarily
    def train(self,
              i_w,
              i_r,
              t_w,
              t_r,
              t_w_c,
              t_r_c,
              batch_size=256,
              epochs=100,
              validation_split=0.05,
              verbose=0):
        train_result = self.model.fit([i_w, i_r, t_w, t_r], [t_w_c, t_r_c],
                                      batch_size, epochs, validation_split,
                                      verbose)
        return train_result

    def test(self,
             i_w,
             i_r,
             t_w,
             t_r,
             t_w_c,
             t_r_c,
             batch_size=256,
             verbose=0):
        test_result = self.model.evaluate([i_w, i_r, t_w, t_r], [t_w_c, t_r_c],
                                          batch_size, verbose)
        return test_result

    def train_on_batch(self, i_w, i_r, t_w, t_r, t_w_c, t_r_c):
        train_result = self.model.train_on_batch([i_w, i_r, t_w, t_r],
                                                 [t_w_c, t_r_c])
        return train_result

    def test_on_batch(self,
                      i_w,
                      i_r,
                      t_w,
                      t_r,
                      t_w_c,
                      t_r_c,
                      sample_weight=None):
        test_result = self.model.test_on_batch([i_w, i_r, t_w, t_r],
                                               [t_w_c, t_r_c], sample_weight)
        return test_result

    def predict(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return the output from softmax layer. """
        predict_result = self.model.predict([i_w, i_r, t_w, t_r], batch_size,
                                            verbose)
        return predict_result

    def predict_word(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return predicted target word from prediction. """
        predict_result = self.predict(i_w, i_r, t_w, t_r, batch_size, verbose)
        return np.argmax(predict_result[0], axis=1)

    def predict_role(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return predicted target role from prediction. """
        predict_result = self.predict(i_w, i_r, t_w, t_r, batch_size, verbose)
        return np.argmax(predict_result[1], axis=1)

    def p_words(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return the output scores given target words. """
        predict_result = self.predict(i_w, i_r, t_w, t_r, batch_size, verbose)
        return predict_result[0][range(batch_size), list(t_w)]

    def p_roles(self, i_w, i_r, t_w, t_r, batch_size=1, verbose=0):
        """ Return the output scores given target roles. """
        predict_result = self.predict(i_w, i_r, t_w, t_r, batch_size, verbose)
        return predict_result[1][range(batch_size), list(t_r)]

    def top_words(self, i_w, i_r, t_w, t_r, topN=20, batch_size=1, verbose=0):
        """ Return top N target words given context. """
        predict_result = self.predict(i_w, i_r, t_w, t_r, batch_size,
                                      verbose)[0]
        rank_list = np.argsort(predict_result, axis=1)[0]
        return rank_list[-topN:][::-1]
        # return [r[-topN:][::-1] for r in rank_list]

    # TODO
    def list_top_words(self, i_w, i_r, t_r, topN=20, batch_size=1, verbose=0):
        """ Return a list of decoded top N target words.
            (Only for reference, can be removed.)
        """
        top_words_lists = self.top_words(i_w, i_r, t_r, topN, batch_size,
                                         verbose)
        print(
            type(top_words_lists))  # Updated to python3 syntax (team1-change)
        result = []
        for i in range(batch_size):
            top_words_list = top_words_lists[i]
            result.append([self.word_decoder[w] for w in top_words_list])
        return result

    def summary(self):
        self.model.summary()