def set_system(stype, sim_config=None, portA='/dev/ttyACM0', portB='/dev/ttyACM1'): """Configures system type on which software is running. Since the low level manipulation of GPIO, image capture, and system type depends on the hardware upon which the software is running, we set the system type here. :param stype: options: ["sim", "raspi", "jetson"] :type stype: str :param sim_config: configuration with which to start the simulator :type sim_config: sim.SimConfig """ global system_type, comms, cam if stype not in ["sim", "raspi", "jetson"]: raise ValueError('system type not valid') system_type = stype if system_type == "sim": sim.start(sim_config) elif system_type == "raspi": comms = SerialCommunicator(portA=portA, portB=portB) elif system_type == "jetson": cam = JetsonCamera() comms = SerialCommunicator(portA=portA, portB=portB) else: raise ValueError(f'Invalid system type: {stype}')
def main(): env = sim.start(is_remote_client=True, render=True) forward = Action(throttle=1) done = False while True: while not done: observation, reward, done, info = env.step(forward) env.reset() print('Episode finished') done = False
def run_path_follower(args, camera_rigs): """ Runs the C++ PID-based path follower agent which uses a reference spline in the center of the lane, and speed annotations on tight turns to drive. Or on new maps a behavior tree based agent is used with communication between agents. Refer to https://github.com/deepdrive/deepdrive-sim/tree/b21e0a0bf8cec60538425fa41b5fc5ee28142556/Plugins/DeepDrivePlugin/Source/DeepDrivePlugin/Private/Simulation/Agent """ done = False gym_env = None try: sim_args = get_sim_args_from_command_args(args) if sim_args is not None: sim_args = sim_args.to_dict() else: sim_args = {} cameras = camera_rigs if isinstance(camera_rigs[0], list): cameras = cameras[0] sim_args['cameras'] = cameras gym_env = sim.start(**sim_args) log.info('Path follower drive mode') episode_num = 0 info = {} def should_stop(index, step_info): if dbox(step_info).should_close: return True elif args.max_episodes: return index >= args.max_episodes else: return False while not should_stop(episode_num, info): episode_num += 1 done = False while not done: action = sim.action(has_control=False) obz, reward, done, info = gym_env.step(action) if done: gym_env.reset() except KeyboardInterrupt: log.info('keyboard interrupt detected, closing') except Exception as e: log.error('Error running agent. %s', e) print(traceback.format_exc()) else: log.info('Last episode complete, closing') finally: if gym_env: gym_env.close()
def main(): # TODO: Add some asserts and get working on Jenkins env = sim.start(is_sync=True) forward = sim.action(throttle=1, steering=0, brake=0) done = False i = 0 while 1: i += 1 observation, reward, done, info = env.step(forward) if i % 10 == 0: env.reset()
def main(): env = sim.start( # map can be canyons, kevindale, kevindale_bare, or jamestown map='kevindale_bare', # scenario can be 0 => 5 - Scenario descriptions: # https://gist.github.com/crizCraig/855a5cc4bc96fc2765cb9bf5d6f953b4 scenario_index=1) forward = sim.action(throttle=0.75, steering=0, brake=0) done = False while not done: observation, reward, done, info = env.step(forward) env.close()
def run(env_id, bootstrap_net_path, resume_dir=None, experiment=None, camera_rigs=None, render=False, fps=c.DEFAULT_FPS, should_record=False, is_discrete=False, agent_name=MOBILENET_V2_NAME, is_sync=True, driving_style=DrivingStyle.NORMAL, is_remote_client=False, eval_only=False): tf_config = tf.ConfigProto( allow_soft_placement=True, intra_op_parallelism_threads=1, inter_op_parallelism_threads=1, gpu_options=tf.GPUOptions( per_process_gpu_memory_fraction=0.4, # leave room for the game, # NOTE: debugging python, i.e. with PyCharm can cause OOM errors, where running will not allow_growth=True), ) g_1 = tf.Graph() with g_1.as_default(): sess_1 = tf.Session(config=tf_config) with sess_1.as_default(): dagger_gym_env = sim.start( experiment=experiment, env_id=env_id, cameras=camera_rigs, render=render, fps=fps, is_sync=is_sync, driving_style=driving_style, is_remote_client=is_remote_client, should_record=should_record, image_resize_dims=MOBILENET_V2_IMAGE_SHAPE, should_normalize_image=True) dagger_agent = Agent(sess_1, should_jitter_actions=False, net_path=bootstrap_net_path, output_last_hidden=True, net_name=MOBILENET_V2_NAME) g_2 = tf.Graph() with g_2.as_default(): sess_2 = tf.Session(config=tf_config) with sess_2.as_default(): # Wrap step so we get the pretrained layer activations rather than pixels for our observation bootstrap_gym_env = BootstrapRLGymEnv(dagger_gym_env, dagger_agent, driving_style) if c.SIMPLE_PPO: minibatch_steps = 16 mlp_width = 5 else: minibatch_steps = 80 mlp_width = 64 train(bootstrap_gym_env, seed=c.RNG_SEED, sess=sess_2, is_discrete=is_discrete, minibatch_steps=minibatch_steps, mlp_width=mlp_width, eval_only=eval_only)
#!/usr/bin/env python3 """ File: sim_remote.py Author: Binit Shah Last Modified: Binit on 2/18 """ import sim if __name__ == "__main__": c = sim.SimConfig("") sim.start(c)
def start_env(): sim_args.cameras = cameras sim_args.use_sim_start_command = use_sim_start_command_first_lap sim_args.image_resize_dims = image_resize_dims return sim.start(**(sim_args.to_dict()))