Exemple #1
0
def get_test(path):
    image_list = create_image_list(path=path, use_mr=False)
    X, y = get_input(image_list)
    return X, y
# Train on cluster1
image_list1 = create_image_clusters(image_list, image_names, clusters, 1)
print("Distribution of classes (0,1,2): ", get_counts(image_list1))
model1, weights1 = train_model(feature_layers, classification_layers,
                               image_list1, nb_epoch, nb_classes, img_rows,
                               img_cols, weights0)

# Train on cluster2
image_list2 = create_image_clusters(image_list, image_names, clusters, 2)
print("Distribution of classes (0,1,2): ", get_counts(image_list2))
model_final, weights2 = train_model(feature_layers, classification_layers,
                                    image_list2, nb_epoch, nb_classes,
                                    img_rows, img_cols, weight1)

json_string = model_final.to_json()
open('cluster_model_architecture.json', 'w').write(json_string)
model_final.save_weights('cluster_model_weights.h5')

image_list_test = create_image_list(path='/data/Test/ScannerA', use_mr=False)
print("Calculating distribution of train")
image_names, count_0, count_1, count_2 = test_cnn.get_names_count(image_list)
print("Creating X, y for test {}".format(len(image_list_test)))
X, y = get_input(image_list_test)
print("Predicting on test")
y_prob = test_cnn.predict_prob(model_final, X, y)
y_predicted = test_cnn.thresholding(y_prob, count_0 * 1. / y.size,
                                    count_1 * 1. / y.size,
                                    count_2 * 1. / y.size)
test_cnn.get_metrics(y, y_predicted)
Exemple #3
0
    precision = precision_score(y_true, y_predicted, labels=labels, average=method)
    recall = recall_score(y_true, y_predicted, labels=labels, average=method)
    accuracy = accuracy_score(y_true, y_predicted)
    print "Precision Score {}".format(precision)
    print "Recall Score {}".format(recall)
    y_0 = y_predicted[y_true==0]
    y_1 = y_predicted[y_true==1]
    y_2 = y_predicted[y_true==2]
    print "Non-mitosis: True = {}, Predicted = {}".format((y_true==0).sum(), (y_0==0).sum())
    print "Mitosis: True = {}, Predicted = {}".format((y_true==1).sum(), (y_1==1).sum())
    print "Background: True = {}, Predicted = {}".format((y_true==2).sum(), (y_2==2).sum())
    print confusion_matrix(y_true, y_predicted)

if __name__ == '__main__':
    print "Unpacking the model"
    model = unpack_model('../runs/gpu0/cnn1_model_architecture.json', '../runs/gpu0/cnn1_model_weights.h5')
    print "Unpickling the image list"
    with open ('../runs/gpu1/image_list.pkl') as f:
        image_list_train = pickle.load(f)
    print "Creating test image list"
    image_list_test = create_image_list(path='/data/Test/ScannerA', sample_size=[-1,-1,0,0,0], use_mr=False)
    print "Calculating distribution of train"
    image_names, count_0, count_1, count_2 = get_names_count(image_list_train)
    print "Creating X, y for test {}".format(len(image_list_test))
    X, y = get_input(image_list_test)
    print "Predicting on test"
    y_prob = predict_prob(model, X, y)
    y_predicted = np.argmax(y_prob, axis=1)
    #y_predicted = thresholding(y_prob, count_0*1./y.size, count_1*1./y.size, count_2*1./y.size)
    get_metrics(y, y_predicted, [0,1], "binary")
def get_test(path):
    image_list = create_image_list(path=path, use_mr=False)
    X, y = get_input(image_list)
    return X, y