def save_video(genome, agent, env, num_steps, file_name='evo_run.mp4'):

    if genome is not None:
        agent.set_weights(genome)
    video_path = '/' + file_name
    helper.make_video_plark_env(agent, env, video_path, n_steps=num_steps)
예제 #2
0
    driving_agent = 'pelican'

    random_panther_start_position = True
    random_pelican_start_position = True

    env = PlarkEnvSparse(
        config_file_path=config_file_path,
        driving_agent=driving_agent,
        random_panther_start_position=random_panther_start_position,
        random_pelican_start_position=random_pelican_start_position)

    #This is the only difference to a normal environment - one has to set the game
    #to a RuleBasedGame
    env.env.activeGames[len(env.env.activeGames) -
                        1] = create_rule_based_game(config_file_path)

    env.reset()

    reward = 0
    while True:
        _, r, done, info = env.step(None)
        reward += r
        if done:
            break

    print(info['status'])
    print("Reward:", reward)

    video_path = '/rule_v_rule.mp4'
    helper.make_video_plark_env(None, env, video_path, n_steps=200)
예제 #3
0
    panther_dummy_agent = PantherNN(
        num_inputs=num_inputs,
        num_hidden_layers=num_hidden_layers,
        neurons_per_hidden_layer=neurons_per_hidden_layer)

    #pelican_dummy_agent = PelicanNN(file_dir_name='pelican_20210302_195211', game=game,
    #                                driving_agent=True)

    #Set agent
    #game.pelicanAgent = pelican_dummy_agent

    dummy_env.reset()
    '''
    max_num_steps = 1

    reward = 0
    obs = dummy_env._observation()
    for step_num in range(max_num_steps):
        action = panther_dummy_agent.getAction(obs)    
        obs, r, done, info = dummy_env.step(action)
        reward += r
        if done:
            break
    '''

    video_path = '/load_evo_non_driving_new.mp4'
    helper.make_video_plark_env(panther_dummy_agent,
                                dummy_env,
                                video_path,
                                n_steps=1)
예제 #4
0
    #                                num_hidden_layers=num_hidden_layers,
    #                                neurons_per_hidden_layer=neurons_per_hidden_layer)

    pelican_dummy_agent = PelicanNN(
        file_dir_name='pelican_20210309_100850_gen_2',
        game=game,
        driving_agent=True)

    #Set agent
    #game.pelicanAgent = pelican_dummy_agent

    dummy_env.reset()
    '''
    max_num_steps = 1

    reward = 0
    obs = dummy_env._observation()
    for step_num in range(max_num_steps):
        action = panther_dummy_agent.getAction(obs)
        obs, r, done, info = dummy_env.step(action)
        reward += r
        if done:
            break
    '''

    video_path = '/load_evo_non_driving_new.mp4'
    helper.make_video_plark_env(pelican_dummy_agent,
                                dummy_env,
                                video_path,
                                n_steps=200)