def evaluate(genome, config_file_path, driving_agent, normalise_obs,
             domain_params_in_obs, num_trials):

    #Instantiate the env
    env = PlarkEnvSparse(config_file_path=config_file_path,
                         image_based=False,
                         driving_agent=driving_agent,
                         normalise=normalise_obs,
                         domain_params_in_obs=domain_params_in_obs)

    num_inputs = len(env._observation())
    num_hidden_layers = 0
    neurons_per_hidden_layer = 0
    if trained_agent == 'panther':
        agent = PantherNN(num_inputs=num_inputs,
                          num_hidden_layers=num_hidden_layers,
                          neurons_per_hidden_layer=neurons_per_hidden_layer)
    else:
        agent = PelicanNN(num_inputs=num_inputs,
                          num_hidden_layers=num_hidden_layers,
                          neurons_per_hidden_layer=neurons_per_hidden_layer)

    agent.set_weights(genome)

    reward = 0

    for i in range(num_trials):
        env.reset()

        obs = env._observation()
        trial_reward = 0
        while True:
            action = agent.getAction(obs)
            obs, r, done, info = env.step(action)
            trial_reward += r
            if done:
                break
        reward += trial_reward

    #Average trial reward
    reward /= num_trials

    #agent.save_agent(obs_normalise=normalise_obs, domain_params_in_obs=domain_params_in_obs)

    #print("Finished at step num:", step_num)
    #print("Reward:", reward)
    #print("Status:", info['status'])

    #save_video(genome, agent, env, max_num_steps, file_name='evo.mp4')
    #exit()

    return [reward]
Пример #2
0
if __name__ == '__main__':

    #Env variables
    config_file_path = '/Components/plark-game/plark_game/game_config/10x10/nn/nn_coevolution_balanced.json'
    normalise_obs = True

    #Instantiate dummy env and dummy agent
    #I need to do this to ascertain the number of weights needed in the optimisation
    #procedure
    dummy_env = PlarkEnvSparse(config_file_path=config_file_path,
                               image_based=False,
                               driving_agent='panther',
                               normalise=normalise_obs)

    #Neural net variables
    num_inputs = len(dummy_env._observation())
    num_hidden_layers = 0
    neurons_per_hidden_layer = 0

    panther_dummy_agent = PantherNN(
        num_inputs=num_inputs,
        num_hidden_layers=num_hidden_layers,
        neurons_per_hidden_layer=neurons_per_hidden_layer)
    #I need to figure out how to get rid of the 139 magic number
    pelican_dummy_agent = PelicanNN(
        num_inputs=139,
        num_hidden_layers=num_hidden_layers,
        neurons_per_hidden_layer=neurons_per_hidden_layer)

    #num_panther_weights = panther_dummy_agent.get_num_weights()
    #num_pelican_weights = pelican_dummy_agent.get_num_weights()