コード例 #1
0
 def eval(self, data_sentence, data_pos, data_label, batch_size=64):
     """
     Evaluate data result
     :param data_sentence:
     :param data_pos:
     :param data_label:
     :param batch_size:
     :return:
     """
     pred_labels = []
     nb_dev = int(len(data_label) / batch_size) + 1
     for i in range(nb_dev):
         sentence_feed = data_sentence[i * batch_size:(i + 1) * batch_size]
         pos_feed = data_pos[i * batch_size:(i + 1) * batch_size]
         label_feed = data_label[i * batch_size:(i + 1) * batch_size]
         feed_dict = {
             self.input_sentence_ph: sentence_feed,
             self.input_pos_ph: pos_feed,
             self.label_ph: label_feed,
             self.keep_prob_ph: 1.0,
             self.word_keep_prob_ph: 1.0,
             self.pos_keep_prob_ph: 1.0
         }
         pred_temp = self.sess.run(self.pred_op, feed_dict=feed_dict)
         pred_labels += list(pred_temp)
     true_labels = data_label[:len(pred_labels)]
     p, r, f = simple_evaluate(true_labels, pred_labels)
     return p, r, f
コード例 #2
0
 def paint(self):
     self.random_models = np.random.choice(self.style_models,
                                           size=3,
                                           replace=False)
     for random_model in self.random_models[-3:]:
         model_path = 'checkpoint/' + self.museum + '/' + random_model
         print(model_path)
         self.random_styled_images.append(
             simple_evaluate(self.final_image, model_path))
コード例 #3
0
ファイル: lstm.py プロジェクト: zjh-nudger/lstm_mlee
def simple_evaluate(f_pred,prepare_data,test_set):
    x,mask,y = prepare_data(test_set[0],test_set[1])
    prediction = f_pred(x,mask)
    precision,recall,f_score,micro_avg = evaluate.simple_evaluate(prediction,test_set)
    return precision,recall,f_score,micro_avg
コード例 #4
0
def simple_evaluate(f_pred,prepare_data,test_set):
    x,mask,y = prepare_data(test_set[0],test_set[1])
    x_1 = [x[t][::-1] for t in xrange(len(x))]
    prediction = f_pred(x,x_1,mask)
    precision,recall,f_score,micro_avg = evaluate.simple_evaluate(prediction,test_set)
    return precision,recall,f_score,micro_avg