from preprocess import get_test_train_data from save_load import get_last_file_number from save_load import load from model import accuracy, loss from keras import backend import sys, os import tensorflow as tf from utils import visualize_layers if __name__ == '__main__': file = "F:\Projects\python\self_driving_game\data\dataset_mini.pz" if len(sys.argv)<=1: count = None else: count = int(sys.argv[1]); # Load model exp_folder = 'exp_' + '{0:03d}'.format(get_last_file_number(prefix='exp_', suffix='')) model = load(count, path=exp_folder) # Visualize model x_train, x_test, y_train, y_test = get_test_train_data(file, 10, tanh=True) visualization_folder = exp_folder + '/visualization' if not os.path.exists(visualization_folder): os.makedirs(visualization_folder) visualize_layers(model, x_test, path=visualization_folder) print('Visualization done...!')
file = os.environ['DATA_DIR'] + "/dataset_75p_gray.pz" x_train, x_test, y_train, y_test = get_test_train_data(file, 80000, tanh=False) # x_train, x_test, y_train, y_test = get_test_train_data(file, 1000, tanh=False) learning_rate = 0.0005 models = ['relu_with_scaled_sigmoid'] drop_rates = [0.1] for i in range(len(models)): for j in range(len(drop_rates)): # Print output to file outfolder = 'exp_' + '{0:03d}'.format( get_last_file_number(prefix='exp_', suffix='') + 1) os.makedirs(outfolder) outfile = outfolder + '/' + 'train_' + '{0:03d}'.format( get_last_file_number(path=outfolder) + 1) + '.log' print('Printing to logfile at', outfile) sys.stdout = open(outfile, 'w+') print( 'Title:', '{}_adam_{}_dropout_rate_{}'.format(models[i], learning_rate, drop_rates[j]), '\n\n') if models[i] == 'relu': model = relu_model(learning_rate=learning_rate) elif models[i] == 'relu_with_scaled_sigmoid': model = relu_with_scaled_sigmoid_model(
y_pred = model.predict(x_test) y_pred = backend.cast(y_pred, 'float32') current_accuracy = accuracy(y_test, y_pred) current_loss = loss(y_test, y_pred) return [current_loss, current_accuracy] if __name__ == '__main__': # file = "F:\Projects\python\self_driving_game\data\dataset_mini.pz" file = "data\dataset_mini.pz" if len(sys.argv) <= 1: count = None else: count = int(sys.argv[1]) # Load model exp_folder = 'experiments/comparison/activation_and_lrs/exp_' + '{0:03d}'.format( get_last_file_number(prefix='exp_', suffix='')) model = load(count, path=exp_folder) # Evaluate model x_train, x_test, y_train, y_test = get_test_train_data(file, 10000, tanh=True) print('x_test shape', x_test.shape) scores = evaluate_model(model, x_test, y_test) # Print scores print('\n\n') print("Loss: ", backend.get_value(scores[0])) print("Accuracy: ", backend.get_value(scores[1]) * 100, "%")
import sys, os import tensorflow as tf def evaluate_model(model, x_test, y_test): y_pred = model.predict(x_test) y_pred = backend.cast(y_pred, 'float32') current_accuracy = accuracy(y_test, y_pred) current_loss = loss(y_test, y_pred) return [current_loss, current_accuracy] if __name__ == '__main__': # file = "F:\Projects\python\self_driving_game\data\dataset_mini.pz" file = os.environ['DATA_DIR']+ "/dataset_75p_gray.pz" if len(sys.argv)<=1: count = None else: count = int(sys.argv[1]); # Load model exp_folder = 'experiments/comparison/activation_and_lrs/exp_' + '{0:03d}'.format(get_last_file_number(prefix='exp_', suffix='')) model = load(count, path=exp_folder) # Evaluate model x_train, x_test, y_train, y_test = get_test_train_data(file, 10000, tanh=True) print('x_test shape', x_test.shape) scores = evaluate_model(model, x_test, y_test) # Print scores print('\n\n') print("Loss: ", backend.get_value(scores[0])) print("Accuracy: ", backend.get_value(scores[1])*100, "%")