示例#1
0
    def on_epoch_end(self, epoch, logs=None):
        '''
        print(type(self.validation_data))
        for i in range(len(self.validation_data)):
            print(np.shape(self.validation_data[i]))

        print("show data")
        for i in range(2,len(self.validation_data)):
            print(self.validation_data[i][:10])
        '''

        feed_data = {
            "seq1": self.validation_data[0],
            "seq2": self.validation_data[1]
        }
        predicts = self.model.predict(feed_data)  #[0]
        val_predict = np.argmax(predicts, axis=1)
        val_targ = np.argmax(self.validation_data[2], axis=1)

        #metrics print
        CM = metrics.confusion_matrix(val_targ, val_predict)
        tn, fp, fn, tp = CM.ravel()
        if tp + fp == 0:
            precision = 0
        else:
            precision = float(tp) / float(tp + fp)
        if fn + tp == 0:
            recall = 0
        else:
            recall = float(tp) / float(fn + tp)

        print("tp,fp,tn,fn", tp, fp, tn, fn)
        print(CM)
        print("precision,recall", precision, recall)
        # measure
        if precision * recall == 0:
            f1 = 0
        else:
            f1 = 2 * precision * recall / (precision + recall)

        print("f1-score=", f1)

        acc = float(tp + tn) / float(tn + fp + fn + tp)

        print("accuracy=", acc)

        print()
        return
示例#2
0
    def on_epoch_end(self, epoch, logs=None):

        feed_data = {"em1": self.validation_data[0], "em2": self.validation_data[1]}
        # feed_label={"label":self.validation_data[2],"Y":self.validation_data[3]}
        predicts=self.model.predict(feed_data)#[0]
        #print(predicts)
        val_predict = np.argmax(predicts,axis=1)
        val_targ = np.argmax(self.validation_data[2],axis=1)

        #metrics print
        CM=metrics.confusion_matrix(val_targ,val_predict)
        tn, fp, fn, tp = CM.ravel()
        if tp+fp==0:
            precision=0
        else:
            precision = float(tp) / float(tp + fp)
        if fn+tp==0:
            recall=0
        else:
            recall = float(tp) / float(fn + tp)

        print("tp,fp,tn,fn", tp, fp, tn, fn)
        print(CM)
        print("precision,recall", precision, recall)
        # measure
        if precision+recall==0:
            f1=0
        else:
            f1 = 2 * precision * recall / (precision + recall)

        print("f1-score=", f1)

        acc = float(tp + tn) / float(tn + fp + fn + tp)

        print("accuracy=", acc)

        print()
        return
示例#3
0
from sklearn.metrics import classification_report
predictions_bool = np.argmax(predictions[0], axis=1)
import sklearn.metrics as metrics
print(len(predictions))
predictions = np.asarray(predictions)
y_pred = (predictions > 0.5)
print('target_Val shape')
print(target_Val.shape)
print('y_pred shape')
print(y_pred.shape)
print('predictions shape')
print(predictions.shape)
print(type(y_pred[0]))
print(type(y_pred[1]))
y_predictions = y_pred[0]+y_pred[1]
matrix = metrics.confusion_matrix(target_Val.argmax(axis=1), predictions[0].argmax(axis=1))
print('predictions')
print(predictions[0])
print(predictions[0].argmax(axis=1))
print('y_pred')
print(y_pred[0])
print(y_pred[0].argmax(axis=1))
print(matrix)
micro = (matrix[0,0]+matrix[1,1])/(matrix[0,0]+matrix[1,1]+matrix[0,1]+matrix[1,0])
print('micro')
print(micro)
print('macro')
macro = (matrix[0,0]/(matrix[0,0]+matrix[1,0])+matrix[1,1]/(matrix[1,1]+matrix[1,0]))/2
print(macro)

print(classification_report(target_Val.argmax(axis=1), predictions_bool,digits=3))