def evaluate(genome, config_file_path, driving_agent, normalise_obs,
             domain_params_in_obs, num_trials):

    #Instantiate the env
    env = PlarkEnvSparse(config_file_path=config_file_path,
                         image_based=False,
                         driving_agent=driving_agent,
                         normalise=normalise_obs,
                         domain_params_in_obs=domain_params_in_obs)

    num_inputs = len(env._observation())
    num_hidden_layers = 0
    neurons_per_hidden_layer = 0
    if trained_agent == 'panther':
        agent = PantherNN(num_inputs=num_inputs,
                          num_hidden_layers=num_hidden_layers,
                          neurons_per_hidden_layer=neurons_per_hidden_layer)
    else:
        agent = PelicanNN(num_inputs=num_inputs,
                          num_hidden_layers=num_hidden_layers,
                          neurons_per_hidden_layer=neurons_per_hidden_layer)

    agent.set_weights(genome)

    reward = 0

    for i in range(num_trials):
        env.reset()

        obs = env._observation()
        trial_reward = 0
        while True:
            action = agent.getAction(obs)
            obs, r, done, info = env.step(action)
            trial_reward += r
            if done:
                break
        reward += trial_reward

    #Average trial reward
    reward /= num_trials

    #agent.save_agent(obs_normalise=normalise_obs, domain_params_in_obs=domain_params_in_obs)

    #print("Finished at step num:", step_num)
    #print("Reward:", reward)
    #print("Status:", info['status'])

    #save_video(genome, agent, env, max_num_steps, file_name='evo.mp4')
    #exit()

    return [reward]
Пример #2
0
    neurons_per_hidden_layer = 0

    panther_dummy_agent = PantherNN(
        num_inputs=num_inputs,
        num_hidden_layers=num_hidden_layers,
        neurons_per_hidden_layer=neurons_per_hidden_layer)
    #I need to figure out how to get rid of the 139 magic number
    pelican_dummy_agent = PelicanNN(
        num_inputs=139,
        num_hidden_layers=num_hidden_layers,
        neurons_per_hidden_layer=neurons_per_hidden_layer)

    #num_panther_weights = panther_dummy_agent.get_num_weights()
    #num_pelican_weights = pelican_dummy_agent.get_num_weights()

    #Let's try instantiating with dummy agents and setting the agents competing against each
    #other
    dummy_env.reset()

    max_num_steps = 200

    reward = 0
    obs = dummy_env._observation()
    for step_num in range(max_num_steps):
        action = panther_dummy_agent.getAction(obs)
        print(action)
        obs, r, done, info = dummy_env.step(action)
        reward += r
        if done:
            break
Пример #3
0
    driving_agent = 'pelican'

    random_panther_start_position = True
    random_pelican_start_position = True

    env = PlarkEnvSparse(
        config_file_path=config_file_path,
        driving_agent=driving_agent,
        random_panther_start_position=random_panther_start_position,
        random_pelican_start_position=random_pelican_start_position)

    #This is the only difference to a normal environment - one has to set the game
    #to a RuleBasedGame
    env.env.activeGames[len(env.env.activeGames) -
                        1] = create_rule_based_game(config_file_path)

    env.reset()

    reward = 0
    while True:
        _, r, done, info = env.step(None)
        reward += r
        if done:
            break

    print(info['status'])
    print("Reward:", reward)

    video_path = '/rule_v_rule.mp4'
    helper.make_video_plark_env(None, env, video_path, n_steps=200)