Exemple #1
0
filtered = is_filtered()

start_time = timer()

print("Collecting Dataset...")

if filtered:
    # Split the dataset in 0.8 train, 0.1 test, 0.1 validation with shuffle (optionally seed)
    X_train, X_val, X_test, Y_train, Y_val, Y_test = get_dataset_reshaped(seed=100)
else:
    # Slit the dataset with the same indexes used in the paper (Only for CullPDB6133 not filtered)
    X_train, X_val, X_test, Y_train, Y_val, Y_test = get_resphaped_dataset_paper()

end_time = timer()
print("\n\nTime elapsed getting Dataset: " + "{0:.2f}".format((end_time - start_time)) + " s")

net = model.CNN_model()

#load Weights
net.load_weights("NewModelConvConv-best.hdf5")

scores = net.evaluate(X_test, Y_test)
#print(scores)
print("Loss: " + str(scores[0]) + ", Accuracy: " + str(scores[1]) + ", MAE: " + str(scores[2]))

CB_x, CB_y = get_cb513()

cb_scores = net.evaluate(CB_x, CB_y)
print("CB513 -- Loss: " + str(cb_scores[0]) + ", Accuracy: " + str(cb_scores[1]) + ", MAE: " + str(cb_scores[2]))


import numpy as np
from keras import optimizers, callbacks
from timeit import default_timer as timer
from dataset import get_dataset, split_with_shuffle, get_data_labels, split_like_paper, get_cb513
import model

dataset = get_dataset()

D_train, D_test, D_val = split_with_shuffle(dataset, 100)

X_train, Y_train = get_data_labels(D_train)
X_test, Y_test = get_data_labels(D_test)
X_val, Y_val = get_data_labels(D_val)

net = model.CNN_model()

#load Weights
net.load_weights("Whole_CullPDB-best.hdf5")

predictions = net.predict(X_test)

print("\n\nQ8 accuracy: " + str(model.Q8_accuracy(Y_test, predictions)) + "\n\n")

CB513_X, CB513_Y = get_cb513()

predictions = net.predict(CB513_X)

print("\n\nQ8 accuracy on CB513: " + str(model.Q8_accuracy(CB513_Y, predictions)) + "\n\n")