#rendering the model from ped_car_2 import PedestrianEnv import numpy as np import random from matplotlib import pyplot as plt from matplotlib.animation import FuncAnimation from keras.models import load_model from My_DDQN import DDQN env = PedestrianEnv() observation = env.reset() observation_space = len( observation) #we get the number of parameters in the state action_space = 10 #number of discrete velocities pedestrian can take agent = DDQN(observation_space, action_space) agent.exploration_rate = 0 agent.model = load_model('ddqn_ped_Learner.h5') death_toll = 0 safe_chicken = 0 done_count = 0 count = 0 Ped_Pos = [] Car_xPos = [] Car_yPos = [] d = env.d W = env.W env = PedestrianEnv() episodes = 3 for e in range(episodes): state = env.reset() state = np.reshape(state, [1, observation_space])
#rendering the model from ped_car_v11 import PedestrianEnv import numpy as np import random from matplotlib import pyplot as plt from matplotlib.animation import FuncAnimation from keras.models import load_model from My_DDQN import DDQN env = PedestrianEnv() observation = env.reset() observation_space = len( observation) #we get the number of parameters in the state action_space = 8 #number of discrete velocities pedestrian can take agent = DDQN(observation_space, action_space) agent.exploration_rate = 0 agent.model = load_model('ddqn_ped_v11.h5') death_toll = 0 safe_chicken = 0 done_count = 0 count = 0 Ped_Pos = [] Car_xPos = [] Car_yPos = [] d = env.d W = env.W env = PedestrianEnv() episodes = 3 for e in range(episodes): state = env.reset() state = np.reshape(state, [1, observation_space])
#rendering the model from ped_car_v11 import PedestrianEnv import numpy as np import random from matplotlib import pyplot as plt from matplotlib.animation import FuncAnimation from keras.models import load_model from My_DDQN import DDQN env = PedestrianEnv() observation = env.reset(np.random.randint(1, 5)) observation_space = len( observation) #we get the number of parameters in the state action_space = 3 #number of discrete velocities pedestrian can take agent = DDQN(observation_space, action_space) agent.exploration_rate = 0 agent.model = load_model('ddqn_ped_APPROX.h5') death_toll = 0 safe_chicken = 0 done_count = 0 count = 0 Ped_Pos = [] Car_xPos = [] Car_yPos = [] d = env.d W = env.W env = PedestrianEnv() episodes = 100 for e in range(episodes): state = env.reset(np.random.randint(1, 5)) state = np.reshape(state, [1, observation_space])
#rendering the model from car_ped_2 import CarEnv import numpy as np import random from matplotlib import pyplot as plt from matplotlib.animation import FuncAnimation from keras.models import load_model from My_DDQN import DDQN env=CarEnv() observation = env.reset() observation_space=len(observation) #we get the number of parameters in the state action_space=10 #number of discrete velocities pedestrian can take agent=DDQN(observation_space, action_space) agent.exploration_rate=0 agent.model=load_model('ddqn_car_Learner_1.h5') death_toll=0 safe_chicken=0 safe_car=0 done_count=0 count=0 Ped_Pos=[] Car_xPos=[] Car_yPos=[] d = env.d W = env.W env = CarEnv() episodes = 3 for e in range(episodes): state=env.reset() state = np.reshape(state, [1, observation_space])