def evaluate_model(training_file, test_file, epochs, augment, batch_size, model_params): print("Learning rate: {}, dropout rate: {}, {} residual blocks, {} filters, bn: {}, fc: {}, augmenting {}, batch_size: {}".format(model_params['learning_rate'], model_params['dropout_rate'], model_params['residual_blocks'], model_params['filters'], model_params['batch_norm'], model_params['fc_layers'], augment, batch_size)) # Create a deep neural network regression classifier. # Build custom classifier classifier = deep_model.estimator(model_params) results = {} for epoch in range(epochs): # Train our model, use the previously function my_input_fn # Input to training is a file with training example classifier.train( input_fn=lambda: deep_model.my_input_fn(training_file, True, 1, augment, batch_size)) if epoch % 10 == 0 or epoch == (epochs - 1): print("Epoch: {}".format(epoch)) # Evaluate our model # Return value will contain evaluation_metrics such as: loss & average_loss # Evaluate on training set evaluate_result = classifier.evaluate( input_fn=lambda: deep_model.my_input_fn(training_file, False, 4, False, batch_size), name='train') print("Evaluation results") for key in evaluate_result: print(" {}, was: {}".format(key, evaluate_result[key])) results['training_loss'] = evaluate_result['loss'] results['training_accuracy'] = evaluate_result['accuracy'] # Evaluate on test set evaluate_result = classifier.evaluate( input_fn=lambda: deep_model.my_input_fn(test_file, False, 4, False, batch_size), name='test') print("Evaluation results") for key in evaluate_result: print(" {}, was: {}".format(key, evaluate_result[key])) results['test_loss'] = evaluate_result['loss'] results['test_accuracy'] = evaluate_result['accuracy'] return results
default=64, help="Minibatch size") parser.add_argument('--params', '-p', default='params.json', help="Parameters file") parser.add_argument('--gamma', '-g', default=0.9, type=float, help="Gamma, discount factor for future rewards") args = parser.parse_args() # Load hyperparameters from file with open(args.params, 'r') as f: params = json.load(f) # Load estimator estimator = deep_model.estimator(params) start = datetime.datetime.now() replay_memory = training_data.training_data() replay_memory.import_csv(args.input) #replay_memory.augment() train(estimator, replay_memory, args.gamma, args.iterations, args.minibatch_size) end = datetime.datetime.now() taken = end - start print("took {}".format(taken))