Esempio n. 1
0
import env_doom
import libs.libs_agent.agent_dqn
import libs.libs_agent.agent
import libs.libs_rysy_python.rysy as rysy

env = env_doom.EnvDoom("deathmatch")
env.print_info()

network_path = "doom_deathmatch_network/"

verbose = False


#print environment info
env.print_info()

#init DQN agent
gamma = 0.99
replay_buffer_size  = 16384
epsilon_training    = 1.0
epsilon_testing     = 0.1
epsilon_decay       = 0.99999

#init DQN agent
agent = libs.libs_agent.agent_dqn.DQNAgent(env, network_path + "network_config.json", gamma, replay_buffer_size, epsilon_training, epsilon_testing, epsilon_decay)

'''
agent.load(network_path + "trained/")

agent.run_best_enable()
Esempio n. 2
0
import env_doom
import libs.libs_agent.agent_dqn
import libs.libs_rysy_python.rysy as rysy

#network_path = "network_basic/"
#env = env_doom.EnvDoom("basic")
network_path = "network_defend_the_line/"
env = env_doom.EnvDoom("defend_the_line")
#network_path = "network_deadly_corridor/"
#env = env_doom.EnvDoom("deadly_corridor")

env.print_info()

#init DQN agent
gamma = 0.99
replay_buffer_size  = 2048
epsilon_training    = 1.0
epsilon_testing     = 0.1
epsilon_decay       = 0.99999

#init DQN agent
agent = libs.libs_agent.agent_dqn.DQNAgent(env, network_path)

agent.load(network_path)

#agent.run_best_enable()
#agent.kernel_visualisation(network_path + "kernel_visualisation/")
#agent.activity_visualisation(network_path + "activity_visualisation/")


#reset score
Esempio n. 3
0
import env_doom
import libs.libs_agent.agent
import time

#env = env_doom.EnvDoom("basic")
#env = env_doom.EnvDoom("health_gathering")
#env = env_doom.EnvDoom("defend_the_center")
#env = env_doom.EnvDoom("defend_the_line")
env = env_doom.EnvDoom("deadly_corridor")
#env = env_doom.EnvDoom("deathmatch")
env.print_info()

agent = libs.libs_agent.agent.Agent(env)

while True:
    agent.main()

    env.render_state(0)
    #if env.get_reward() != 0:
    #    print(env.get_iterations(), "reward = ", env.get_reward(), "\n\n")

    if env.get_iterations() % 256 == 0:
        env._print()
    #time.sleep(0.01)
import env_doom
import libs.libs_agent.agent_dqn
import libs.libs_agent.agent
import libs.libs_rysy_python.rysy as rysy

network_path = "network_defend_the_center/"
env = env_doom.EnvDoom("defend_the_center")
env.print_info()

#init DQN agent
agent = libs.libs_agent.agent_dqn.DQNAgent(
    env, network_path + "network_config.json")

training_progress_log = rysy.Log(network_path + "progress_training.log")
testing_progress_log = rysy.Log(network_path + "progress_testing.log")

#process training
total_games_to_play = 12000
while env.get_games_count() < total_games_to_play:
    agent.main()

    if env.get_iterations() % 256 == 0:
        str_progress = str(env.get_iterations()) + " "
        str_progress += str(env.get_games_count()) + " "
        str_progress += str(env.get_score()) + " "
        str_progress += str(env.get_kill_count()) + " "
        str_progress += str(env.get_death_count()) + " "
        str_progress += str(env.get_game_kd_ratio()) + " "
        str_progress += str(env.get_kd_ratio()) + " "
        str_progress += "\n"
        training_progress_log.put_string(str_progress)
import env_doom
import libs.libs_agent.agent_dqn
import libs.libs_agent.agent
import libs.libs_rysy_python.rysy as rysy

network_path = "network_health_gathering/"
env = env_doom.EnvDoom("health_gathering")
env.print_info()


#init DQN agent
agent = libs.libs_agent.agent_dqn.DQNAgent(env, network_path + "network_config.json")

training_progress_log = rysy.Log(network_path + "progress_training.log")
testing_progress_log = rysy.Log(network_path + "progress_testing.log")

#process training
total_games_to_play = 12000
while env.get_games_count() < total_games_to_play:
    agent.main()


    if env.get_iterations()%256 == 0:
        str_progress = str(env.get_iterations()) + " "
        str_progress+= str(env.get_games_count()) + " "
        str_progress+= str(env.get_score()) + " "
        str_progress+= str(env.get_kill_count()) + " "
        str_progress+= str(env.get_death_count()) + " "
        str_progress+= str(env.get_game_kd_ratio()) + " "
        str_progress+= str(env.get_kd_ratio()) + " "
        str_progress+= "\n"
Esempio n. 6
0
import env_doom
import libs.libs_agent.agent_dqn
import libs.libs_agent.agent
import libs.libs_rysy_python.rysy as rysy

network_path = "network_basic/"
env = env_doom.EnvDoom("basic")
env.print_info()

#init DQN agent
agent = libs.libs_agent.agent_dqn.DQNAgent(env, network_path)

training_progress_log = rysy.Log(network_path + "progress_training.log")
testing_progress_log = rysy.Log(network_path + "progress_testing.log")

#process training
total_games_to_play = 12000
while env.get_games_count() < total_games_to_play:
    agent.main()

    if env.get_iterations() % 256 == 0:
        str_progress = str(env.get_iterations()) + " "
        str_progress += str(env.get_games_count()) + " "
        str_progress += str(env.get_score()) + " "
        str_progress += str(env.get_kill_count()) + " "
        str_progress += str(env.get_death_count()) + " "
        str_progress += str(env.get_game_kd_ratio()) + " "
        str_progress += str(env.get_kd_ratio()) + " "
        str_progress += "\n"
        training_progress_log.put_string(str_progress)