parameters.momentum, parameters.clip_delta, parameters.freeze_interval, parameters.batch_size, parameters.update_rule, rng, neural_network=myNN) test_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.00) # --- Instantiate agent --- agent = NeuralAgent( env, qnetwork, parameters.replay_memory_size, max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))), parameters.batch_size, rng, test_policy=test_policy) # --- Create unique filename for FindBestController --- h = hash(vars(parameters), hash_name="sha1") fname = "PLE_" + h print("The parameters hash is: {}".format(h)) print("The parameters are: {}".format(parameters)) # --- Bind controllers to the agent --- # Before every training epoch (periodicity=1), we want to print a summary of the agent's epsilon, discount and # learning rate as well as the training epoch number. agent.attach(bc.VerboseController( evaluate_on='episode',
"display_screen": True, "force_fps": True, "fps": 30 }) # --- Instantiate qnetwork --- qnetwork = MyQNetwork(env, parameters.rms_decay, parameters.rms_epsilon, parameters.momentum, parameters.clip_delta, parameters.freeze_interval, parameters.batch_size, parameters.network_type, parameters.update_rule, parameters.batch_accumulator, rng) # --- Instantiate agent --- agent = ALEAgent( env, qnetwork, parameters.replay_memory_size, max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))), parameters.batch_size, rng) # --- Create unique filename for FindBestController --- h = hash(vars(parameters), hash_name="sha1") fname = "PLE_" + h print("The parameters hash is: {}".format(h)) print("The parameters are: {}".format(parameters)) # --- Bind controllers to the agent --- # Before every training epoch (periodicity=1), we want to print a summary of the agent's epsilon, discount and # learning rate as well as the training epoch number. agent.attach(bc.VerboseController(evaluateOn='epoch', periodicity=1)) # During training epochs, we want to train the agent after every [parameters.update_frequency] action it takes.
parameters.momentum, parameters.clip_delta, parameters.freeze_interval, parameters.batch_size, parameters.network_type, parameters.update_rule, parameters.batch_accumulator, rng, ) # --- Instantiate agent --- agent = ALEAgent( env, qnetwork, parameters.replay_memory_size, max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))), parameters.batch_size, rng, ) # --- Create unique filename for FindBestController --- h = hash(vars(parameters), hash_name="sha1") fname = "PLE_" + h print("The parameters hash is: {}".format(h)) print("The parameters are: {}".format(parameters)) # --- Bind controllers to the agent --- # Before every training epoch (periodicity=1), we want to print a summary of the agent's epsilon, discount and # learning rate as well as the training epoch number. agent.attach(bc.VerboseController(evaluate_on="epoch", periodicity=1))