def evaluate(self): print('evaluating...') f = open('./tmp/dev.pred', 'w') for j in range(len(self.X_valid_batch)): y_pred = self.predict(self.X_valid_batch[j]) for k in range(len(y_pred)): idx = j * self.batch_size + k if idx >= len(self.X_valid): break prob_of_true = y_pred[k, 1] + y_pred[k, 2] label = 'false' if prob_of_true > 0.5: label = 'true' f.write("%s %s 0 %20.16f %s\n" % (self.meta_valid[idx][0], self.meta_valid[idx][1], prob_of_true, label)) f.close() map = eval_reranker( res_fname= './data/eval/SemEval2016-Task3-CQA-QL-dev.xml.subtaskB.relevancy', pred_fname='./tmp/dev.pred') f = open('valid_map.txt', 'a') f.write(str(map) + '\n') f.close() print('=========================================') return map
def evaluate(self): print('evaluating...') y_pred = self.model.predict_proba(self.X_valid, batch_size=1) # y_pred = self.model.predict_classes(self.X_valid, batch_size=1) f = open('./tmp/dev.pred', 'w') for i in range(len(self.meta_valid)): prob_of_true = y_pred[i][1] + y_pred[i][2] label = 'false' if prob_of_true > 0.5: label = 'true' f.write("%s %s 0 %20.16f %s\n" % (self.meta_valid[i][0], self.meta_valid[i][1], prob_of_true, label)) # f.write( "%s %s 0 %20.16f %s\n" %(self.meta_valid[i][0], self.meta_valid[i][1], y_pred[i][1]+y_pred[i][2], self.meta_valid[i][2])) f.close() map = eval_reranker( res_fname= './data/eval/SemEval2016-Task3-CQA-QL-dev.xml.subtaskB.relevancy', pred_fname='./tmp/dev.pred') f = open('valid_map.txt', 'a') f.write(str(map) + '\n') f.close() print('=========================================') return map
def eval(self, fname): print('evaluating...') map=eval_reranker(res_fname='./data/eval/SemEval2016-Task3-CQA-QL-dev.xml.subtaskB.relevancy', pred_fname=fname) f=open('valid_map.txt', 'a') f.write(str(map)+'\n') f.close() print('=========================================')
def eval(self, fname): print('evaluating...') map = eval_reranker( res_fname= './data/eval/SemEval2016-Task3-CQA-QL-dev.xml.subtaskB.relevancy', pred_fname=fname) f = open('valid_map.txt', 'a') f.write(str(map) + '\n') f.close() print('=========================================')
def evaluate(self): print('evaluating...') y_pred = self.model.predict_proba(self.X_valid) f=open('./tmp/dev.pred', 'w') for i in range(len(self.meta_valid)): prob_of_true =y_pred[i][1]+y_pred[i][2] label='false' if prob_of_true>0.5: label='true' f.write( "%s %s 0 %20.16f %s\n" %(self.meta_valid[i][0], self.meta_valid[i][1], prob_of_true, label)) f.close() map=eval_reranker(res_fname='./data/eval/SemEval2016-Task3-CQA-QL-dev.xml.subtaskB.relevancy', pred_fname='./tmp/dev.pred') f=open('valid_map.txt', 'a') f.write(str(map)+'\n') f.close() print('=========================================') return map
def evaluate(self): print('evaluating...') f=open('./tmp/dev.pred', 'w') for j in range(len(self.X_valid_batch)): y_pred = self.predict(self.X_valid_batch[j]) for k in range(len(y_pred)): idx = j*self.batch_size+k if idx>=len(self.X_valid): break prob_of_true =y_pred[k,1]+y_pred[k,2] label='false' if prob_of_true>0.5: label='true' f.write( "%s %s 0 %20.16f %s\n" %(self.meta_valid[idx][0], self.meta_valid[idx][1], prob_of_true, label)) f.close() map=eval_reranker(res_fname='./data/eval/SemEval2016-Task3-CQA-QL-dev.xml.subtaskB.relevancy', pred_fname='./tmp/dev.pred') f=open('valid_map.txt', 'a') f.write(str(map)+'\n') f.close() print('=========================================') return map