def RGB_test(x_test,x_adv,y_test,classifier, pred_net): x_test, x_adv, y_test = get_testing_data(x_test, x_adv, y_test, classifier) x_adv_shape = x_adv.shape[0] outputs_adv = pred_net(x_adv) outputs = pred_net(x_test) adv_pre = outputs_adv[:, 1] x_pre = outputs[:, 1] outputs_adv = tf.argmax(outputs_adv, 1) outputs = tf.argmax(outputs, 1) adv_score = tf.reduce_sum(outputs_adv) x_score = tf.reduce_sum(outputs) print('total_number:', x_adv_shape) print('x_score:', x_score.numpy()) print('x_adv_score:', x_adv_shape - adv_score.numpy()) print('P:', x_score.numpy() / (x_score.numpy() + adv_score.numpy())) print('R:', x_score.numpy() / x_adv_shape) fpr, tpr, auc_score = compute_roc(adv_pre, x_pre) print('auc:', auc_score) concat = np.vstack((fpr, tpr)) return concat
print("Total Model Runtime: {}min, {:0.2f}sec".format( int(time_elapsed // 60), time_elapsed % 60)) probabilities = np.squeeze(np.array(probabilities)) predictions = np.argmax(probabilities, axis=1) labels = np.argmax(test_target, axis=1) test_accuracy = np.sum(np.equal(predictions, labels)) / labels.size print("Final Accuracy: {:0.4f}".format(test_accuracy)) # CNV [1, 0, 0 ,0] # DRUSEN [0, 1, 0 ,0] # DME [0, 0, 1 ,0] # NORMAL [0, 0, 0 ,1] LIST_OF_POS_IDX = [0] auc_0, se_0, sp_0, acc_0 = utils.compute_roc(probabilities, labels, LIST_OF_POS_IDX) print( "POS_IDX:{}, Final Model AUC: {:0.4f}, SE: {:0.4f}, SP: {:0.4f}, ACC: {:0.4f}" .format(LIST_OF_POS_IDX, auc_0, se_0, sp_0, acc_0)) LIST_OF_POS_IDX = [1] auc_1, se_1, sp_1, acc_1 = utils.compute_roc(probabilities, labels, LIST_OF_POS_IDX) print( "POS_IDX:{}, Final Model AUC: {:0.4f}, SE: {:0.4f}, SP: {:0.4f}, ACC: {:0.4f}" .format(LIST_OF_POS_IDX, auc_1, se_1, sp_1, acc_1)) LIST_OF_POS_IDX = [2] auc_2, se_2, sp_2, acc_2 = utils.compute_roc(probabilities, labels, LIST_OF_POS_IDX) print(