Beispiel #1
0
def main():
    with tf.Session() as sess:
        while True:
            try:
                env = CarlaEnv()
                break
            except Exception as e:
                print(e)

        agent = Agent(sess=sess,
                      state_size=env.observation_space.shape[0],
                      action_size=env.action_space.shape[0])
        max_episodes = 1000
        max_steps = 1800

        for i in range(int(max_episodes)):

            state = env.reset()
            print(state.shape)
            ep_reward = 0
            ep_ave_max_q = 0
            # plt.clf()
            # if i:
            #     with open("ddpg_memory.pkl","wb") as hand:
            #         pickle.dump(replay_buffer,hand)
            #     actor.save_model()
            #     critic.save_model()
            #     print("Agent saved")

            for j in range(int(max_steps)):

                print("epoch: {}, step: {}".format(i, j))
                # env.render()

                # Added exploration noise
                # a = actor.predict(np.reshape(s, (1, 3))) + (1. / (1. + i))
                action = agent.get_action(state)
                # a = controller(s[0],s[1],s[3])
                # a = [a]
                next_state, reward, done, info = env.step(action)
                print("reward: {}".format(reward))

                agent.remember(state, action, reward, done, next_state)

                # Keep adding experience to the memory until
                # there are at least minibatch size samples
                agent.train()
Beispiel #2
0
def main():
    np.random.seed(1337)

    env = CarlaEnv()
    for _ in range(EPISODES):
        SAVE_PATH.mkdir(exist_ok=True)

        collect_episode(env,
                        SAVE_PATH / ('%03d' % len(list(SAVE_PATH.glob('*')))))
Beispiel #3
0
def main():
    np.random.seed(1337)

    for i in range(1, 8):
        with CarlaEnv(town='Town0%s' % i) as env:
            for episode in range(EPISODES):
                env.reset(n_vehicles=np.random.choice([50, 100, 200]),
                          n_pedestrians=np.random.choice([50, 100, 200]),
                          seed=np.random.randint(0, 256))
                env._player.set_autopilot(True)

                collect_episode(
                    env, SAVE_PATH / ('%03d' % len(list(SAVE_PATH.glob('*')))))
def main(num_runs):
    env = None
    RENDER = True
    MAX_STEPS_PER_EPISODE = 300
    WARMING_UP_STEPS = 50
    WINDOW_SIZE = 5
    
    RETURN_SEQUENCE = False
    GATHER_DATA = True
    SAVE_INFO = True
    TRAINING = True
    TEST = False
    
    try:
        
        pygame.init()
        pygame.font.init()

        # create environment
        env = CarlaEnv(render_pygame=RENDER,warming_up_steps=WARMING_UP_STEPS,window_size=WINDOW_SIZE)
        
        max_steps_per_episode = MAX_STEPS_PER_EPISODE

        if GATHER_DATA:
            gather_data(env, num_runs=10, max_steps_per_episode=max_steps_per_episode, save_info=SAVE_INFO)

        elif TRAINING: 

            pass
        
        elif TEST:
            pass




    finally:
        if env and env.world is not None:
            env.world.destroy()
            # env.world.destroy_all_actors()
            # env.sych_distroy()
            
            settings = env._carla_world.get_settings()
            settings.synchronous_mode = False
            env._carla_world.apply_settings(settings)
            print('\ndisabling synchronous mode.')

        pygame.quit()
def main(num_runs):
    env = None
    RENDER = True
    MAX_STEPS_PER_EPISODE = 300
    SAVE_INFO = True
    
    try:
        quit_flag = False
        pygame.init()
        pygame.font.init()

        # create in
        env = CarlaEnv(render_pygame=RENDER)

        max_steps_per_episode = MAX_STEPS_PER_EPISODE

        clock = pygame.time.Clock()

        CAV_infos = []
        HDV_infos = []

        for episode in range(num_runs):

            state = env.reset()
            episode_reward = 0

            for timestep in range(max_steps_per_episode):
                clock.tick()
                env.world.tick(clock)

                # check quit
                for event in pygame.event.get():
                    if event.type == pygame.QUIT:
                        quit_flag = True

                rl_actions = np.random.choice(3,2)

                state, reward, done, _ = env.step(rl_actions)

                # print(state)
                cav_control = env.world.cav_controller.current_control

                # print("current control: ", cav_control)
                # print("carla control: ", env.world.CAV.get_control())
                # print()
                for veh_id, state_vals in state.items():

                    if veh_id == 'CAV':
                        CAV_info = [veh_id,episode,timestep] +state_vals+ list(cav_control.values())

                        CAV_infos.append(CAV_info)
                    else:
                        hdv_info = [veh_id,episode,timestep] + state_vals
                        HDV_infos.append(hdv_info)

                episode_reward += reward


                if done:
                    break

                if quit_flag:
                    print("stop in the middle ... ")
                    return
            
            print("done in : ", timestep, " -- episode reward: ", episode_reward)
            # time.sleep(0.01)
    finally:
        if SAVE_INFO:
            cav_info = pd.DataFrame(CAV_infos,columns=['veh_id','episode','episode_step','px','py','sx','sy','ax','ay','throttle','steer','brake'])
            cav_info.to_csv('./experience_data/cav_info.csv',index=False)

            hdv_info = pd.DataFrame(HDV_infos,columns=['veh_id','episode','episode_step','px','py','sx','sy','ax','ay'])
            hdv_info.to_csv('./experience_data/hdv_info.csv',index=False)

        if env and env.world is not None:
            env.world.destroy()
            # env.world.destroy_all_actors()
            # env.sych_distroy()
            print('\ndisabling synchronous mode.')
            settings = env._carla_world.get_settings()
            settings.synchronous_mode = False
            env._carla_world.apply_settings(settings)
        

        pygame.quit()
from tqdm import tqdm

from carla_env import CarlaEnv

SYNCMODE = True
NUM_PLAYERS = 1

if __name__ == "__main__":
    print("Run.py")
    if not SYNCMODE:
        env = CarlaEnv(sync=SYNCMODE, num_players=NUM_PLAYERS)
        print("Env activated")
        env.reset()
        for i in tqdm(range(10000)):
            # print("IN Step ", i)
            env.step()
            # time.sleep(0.2)
    else:
        with CarlaEnv(sync=SYNCMODE, num_players=NUM_PLAYERS) as env:
            print("Env activated")
            env.reset()
            for i in tqdm(range(10000)):
                # print("IN Step ", i)
                env.step()
overwrite_experiment = args['overwrite_experiment']

directory_to_save = './_benchmarks_results/{}'.format(curr_town)
#if os.path.exists(directory_to_save):
#    if overwrite_experiment:
#        print("Removing {}".format(directory_to_save))
#        os.system("rm -rf {}".format(directory_to_save))
#    else:
#        print("ERROR: A directory called {} already exists.".format(directory_to_save))
#        print("Please make sure to move the contents as running this program will overwrite the contents of this directory.")
#        exit()

now = time.time()
print("Loading the Imitition Network and performing one simulation run for the baseline path..")
os.system("mkdir -p _benchmarks_results")
env = CarlaEnv(town=curr_town, task=curr_task, port=curr_port, save_images=False, gpu_num=curr_gpu, adversary_name=curr_adversary_name, intersection=curr_intersection)
print("Complete.")

baseSteer     = env.baseline_steer                   # get the steering angles for the baseline run
MAX_LEN       = int(len(env.baseline_steer)*.8)      # set maximum number of frames to 80 percent of baseline scenario
baseSteer     = baseSteer[:MAX_LEN]                  # subset steering angles to maximum number of allowed frames

    
def target(rot1,pos1,width1,length1):
    # specify our attack (in this case double black lines) as a dictionary to pass to the CarlaEnv object.
    # TODO: get the num_para
   
    numbers = {}

    adversary_properties = ['rot','pos','length','width','r','g','b']
    adversary_name = "rectangle1"
Beispiel #8
0
import sys
import time
import random
import pickle
sys.path.insert(0, '/home/wael/Desktop/golfcart/GEME6-CARLA/Carla_Gym/envs/')
from carla_env import CarlaEnv
import numpy as np

import matplotlib.pyplot as plt
from controller import Controller

# env = CarlaEnv()
while True:
    try:
        env = CarlaEnv()
        break
    except Exception as e:
        print(e)
max_episodes = 100
max_steps = 1800
control = Controller()

for i in range(int(max_episodes)):

    s = env.reset()

    counter = 0
    while True:
        if counter == 0:
            a = control.action(s[0], s[3], s[1], 0.001, controller_type="LQR")
Beispiel #9
0
                                           summary_vars[1]:
                                           ep_ave_max_q / float(j)
                                       })

                writer.add_summary(summary_str, i)
                writer.flush()

                print('| Reward: {:d} | Episode: {:d} | Qmax: {:.4f}'.format(int(ep_reward), \
                        i, (ep_ave_max_q / float(j))))
                break
            last_info = info


while True:
    try:
        env = CarlaEnv()
        break
    except Exception as e:
        print(e)
with tf.Session() as sess:
    action_bound = env.action_space.high
    Actor = ActorNetwork(sess=sess,
                         state_dim=env.observation_space.shape[0],
                         action_dim=env.action_space.shape[0],
                         action_bound=action_bound,
                         learning_rate=0.003,
                         tau=.125,
                         batch_size=128)
    Critic = CriticNetwork(sess=sess,
                           state_dim=env.observation_space.shape[0],
                           action_dim=env.action_space.shape[0],
directory_to_save = './_benchmarks_results/{}'.format(curr_town)
if os.path.exists(directory_to_save):
    if overwrite_experiment:
        print("Removing {}".format(directory_to_save))
        os.system("rm -rf {}".format(directory_to_save))
    else:
        print("ERROR: A directory called {} already exists.".format(directory_to_save))
        print("Please make sure to move the contents as running this program will overwrite the contents of this directory.")
        exit()


now = time.time()
print("Loading the Imitition Network and performing one simulation run for the baseline path..")
os.system("mkdir -p _benchmarks_results")
env = CarlaEnv(task=curr_task, town='Town01_nemesisA', scene=curr_scene,
               port=curr_port, save_images=False, gpu_num=curr_gpu)
print("Complete.")

baseSteer     = env.baseline_steer                   # get the steering angles for the baseline run
MAX_LEN       = int(len(env.baseline_steer)*.8)      # set maximum number of frames to 80 percent of baseline scenario

baseSteer     = baseSteer[:MAX_LEN]                  # subset steering angles to maximum number of allowed frames


def target(pos1, rot1, pos2=0, rot2=0, width=10, length=200, colorR=0, colorG=0, colorB=0):
    # specify our attack (in this case double black lines) as a dictionary to pass to the CarlaEnv object.
    dict_params = {
        # the first line
        0:{
            'pos': int(pos1),
            'rot': rot1,
        os.system("rm -rf {}".format(directory_to_save))
    else:
        print("ERROR: A directory called {} already exists.".format(
            directory_to_save))
        print(
            "Please make sure to move the contents to a new location as running this program will overwrite the contents of this directory."
        )
        exit()

os.system("mkdir -p _benchmarks_results")
print(
    "Loading the Imitition Network and performing one simulation run for the target path.."
)
env = CarlaEnv(task=target_task,
               town=curr_town,
               scene=target_scene,
               port=curr_port,
               save_images=False,
               gpu_num=curr_gpu)
print("Complete.")

targetSteer = env.get_steer()  # get the steering angles for the target run
MAX_LEN = int(
    len(env.get_steer()) *
    .8)  # set maximum number of frames to 80 percent of target scenario
targetSteer = targetSteer[:
                          MAX_LEN]  # subset steering angles to maximum number of allowed frames

env.task = baseline_task
env.scene = baseline_scene
env.experiment_name = 'baseline'