Exemple #1
0
from keras.regularizers import l2
import sys
import os
import time
import random
from Qnetwork import Qnetwork
from ExperienceReplay import ExperienceReplay
from Utility import Utility
from Utility import Config

env = gym.make('GazeboTurtlebotMazeColor-v0')

observation = env.reset

#set parameter
config = Config()
config.path = "./DQN_maze_target_v9"
if not os.path.exists(config.path):
    os.makedirs(config.path)
config.loadOldFile()
config.saveOldFile()
config.load_model = False
config.pre_train_step = 1000
config.epsilon_decay = 1.0 / 1000
config.gamma = 0.99

network = Qnetwork(env.num_state, env.num_action)
replay = ExperienceReplay(config.path)

utility = Utility(config.path + config.reward_file,
                  config.path + config.step_file)
Exemple #2
0
    def __init__(self):
        self.config = Config.Config()

        self.log = Log.Logs