def eval_test(self):
        """do the evaluation when training

        :return:
        """

        tvars = tf.trainable_variables()
        tvars_vals = self.sess.run(tvars)
        #for var, val in zip(tvars, tvars_vals):
        #   print(var)

        results = []
        #range_=[4,5]
        if config.app == "citeseerNew":
            for i in range(2):
                LPE = elp.LinkPredictEval(
                    config.emb_filenames[i + 4], config.test_filename,
                    config.test_neg_filename, config.n_node, config.n_embed,
                    config.n_features, config.pretrain_feature_filename,
                    config.emb_filenames[i + 6], config.modes[i])
                result = LPE.eval_link_prediction()
                #results.append(config.modes[i] + ":" + str(result) + "\n")
                results.append(config.modes[i] + ":" + "median--" +
                               str(result[0]) + "--" + str(result[1]) +
                               "   softmax--" + str(result[2]) + "\n")
                if (float(result[2]) >= self.maxSoftmaxGen and i == 0):
                    print(self.maxSoftmaxGen)
                    for var, val in zip(tvars, tvars_vals):
                        if "generator/Generator/Variable_1:0" in var.name:
                            np.savetxt(config.emb_filenames[10], val)
                    self.maxSoftmaxGen = float(result[2])
                    self.write_emb_to_txt(0, 2)

                if (float(result[2]) >= self.maxSoftmaxDis and i == 1):
                    print(self.maxSoftmaxDis)
                    for var, val in zip(tvars, tvars_vals):
                        if "discriminator/discriminator/Variable_1:0" in var.name:
                            np.savetxt(config.emb_filenames[11], val)
                    self.maxSoftmaxDis = float(result[2])
                    self.write_emb_to_txt(1, 3)

                if (float(result[1]) >= self.maxMedianGen and i == 0):
                    print(self.maxMedianGen)
                    for var, val in zip(tvars, tvars_vals):
                        if "generator/Generator/Variable_1:0" in var.name:
                            np.savetxt(config.emb_filenames[8], val)
                    self.maxMedianGen = float(result[1])
                    self.write_emb_to_txt(0, 0)

                if (float(result[1]) >= self.maxMedianDis and i == 1):
                    print(self.maxMedianDis)
                    for var, val in zip(tvars, tvars_vals):
                        if "discriminator/discriminator/Variable_1:0" in var.name:
                            np.savetxt(config.emb_filenames[9], val)
                    self.maxMedianDis = float(result[1])
                    self.write_emb_to_txt(1, 1)
        with open(config.result_filename, mode="a+") as f:
            f.writelines(results)
Exemple #2
0
    def eval_test(self):
        """do the evaluation when training

        :return:
        """
        results = []
        if config.app == "link_prediction":
            for i in range(2):
                LPE = elp.LinkPredictEval(config.emb_filenames[i],
                                          config.test_filename,
                                          config.test_neg_filename,
                                          config.n_node, config.n_embed)
                result = LPE.eval_link_prediction()
                results.append(config.modes[i] + ":" + str(result) + "\n")

        with open(config.result_filename, mode="a+") as f:
            f.writelines(results)
Exemple #3
0
def eval_test(config):
    """do the evaluation when training

    :return:
    """
    results = []
    if config.app == "link_prediction":
        LPE = elp.LinkPredictEval(config.emb_filename, config.test_filename,
                                  config.test_neg_filename, config.n_node,
                                  config.n_embed)
        result = LPE.eval_link_prediction()
        results.append(config.model + ":" + str(result) + "\n")

    with open(config.result_filename, mode="a+") as f:
        f.writelines(results)

    test_edges = utils.read_edges_from_file(config.test_filename)
    test_edges_neg = utils.read_edges_from_file(config.test_neg_filename)
    test_edges.extend(test_edges_neg)
    emd = utils.read_emd(config.emb_filename,
                         n_node=config.n_node,
                         n_embed=config.n_embed)
    score_res = []
    for i in range(len(test_edges)):
        score_res.append(np.dot(emd[test_edges[i][0]], emd[test_edges[i][1]]))
    test_label = np.array(score_res)
    bar = np.median(test_label)
    ind_pos = test_label >= bar
    ind_neg = test_label < bar
    test_label[ind_pos] = 1
    test_label[ind_neg] = 0
    true_label = np.zeros(test_label.shape)
    true_label[0:len(true_label) // 2] = 1
    f1 = f1_score(true_label, test_label, average='macro')
    result = config.model + ":" + str(f1) + "\n"
    print(result)
    with open(config.result_filename_f1, mode="a+") as f:
        f.writelines(result)