def __init__(self):
     # Initialize Robot Simulation
     self.env = DuckietownEnv( seed=1, map_name = 'udem1', draw_curve = False, draw_bbox = False, distortion = False, domain_rand = False)
     self.env.reset()
     self.env.render()
     # self.action = np.array([0.44, 0.0])
     self.action = np.array([0.0, 0.0])
     
     # For image to ros
     self.bridge = CvBridge()
     
     # Initialize ROS Node
     self.node_name = rospy.get_name()
     rospy.loginfo("[%s] Initializing......" %(self.node_name))
     
     # Setup parameters
     self.framerate = self.setupParam("~framerate",self.env.unwrapped.frame_rate)
     
     # Setup Publisher
     self.pub_img= rospy.Publisher("image_raw",Image,queue_size=1)
     self.has_published = False
     
     # Setup Subscriber
     #self.sub_cmd = rospy.Subscriber("/cmd_vel", Twist, self.cbCmd, queue_size=1)
     
     
     # Setup timer
     self.timer_img_low = rospy.Timer(rospy.Duration.from_sec(1.0/self.framerate),self.cbTimer)
     rospy.loginfo("[%s] Initialized." %(self.node_name))
Example #2
0
def learning_system(algorithm, seed):
    from gym_duckietown.envs import DuckietownEnv
    import tensorflow as tf

    tf.set_random_seed(seed=seed)

    env = DuckietownEnv(
        domain_rand=False,
        max_steps=HORIZON,
        map_name=MAP_NAME,
        seed=seed,
        camera_width=160,
        camera_height=120
    )

    learner = NeuralNetworkPolicy(
        parametrization=MonteCarloDropoutResnetOneRegression(seed=seed, samples=SAMPLES),
        optimizer=tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE),
        storage_location=experimental_entry(
            algorithm=algorithm,
            seed=seed,
        ),
        batch_size=BATCH_SIZE,
        epochs=EPOCHS,
        seed=seed
    )

    teacher = UAPurePursuitPolicy(
        env=env
    )

    return env, teacher, learner
Example #3
0
def launch_and_wrap_duckieenv(env_config, seed=13, default_env_id=0):
    try:
        env_id = env_config.worker_index  # config is passed by rllib
    except AttributeError as err:
        logger.warning(err)
        env_id = default_env_id

    robot_speed = env_config.get('robot_speed', DEFAULT_ROBOT_SPEED)
    # If random robot speed is specified, the robot speed key holds a dictionary
    if type(robot_speed) is dict or robot_speed == 'default':
        robot_speed = DEFAULT_ROBOT_SPEED  # The initial robot speed won't be random

    print("our seed of the environment is now {}".format(seed))
    # The while loop and try block are necessary to prevent instant training crash from the
    # "Exception: Could not find a valid starting pose after 5000 attempts" in duckietown-gym-daffy 5.0.13
    spawn_successful = False
    while not spawn_successful:
        env = DuckietownEnv(map_name=resolve_multimap_name(
            env_config["training_map"], env_id),
                            domain_rand=False,
                            draw_bbox=False,
                            max_steps=1500,
                            seed=seed)
        assert robot_speed == 1.2
        spawn_successful = True
        # except Exception as e:
        #     seed += 1  # Otherwise it selects the same tile in the next attempt
        #     logger.error("{}; Retrying with new seed: {}".format(e, seed))
    logger.debug("Env init successful")
    env = wrap_env(env_config, env)
    return env
Example #4
0
def launch_env(map_name, randomize_maps_on_reset=False, domain_rand=False):
    environment = DuckietownEnv(
        domain_rand=domain_rand,
        max_steps=math.inf,
        map_name=map_name,
        randomize_maps_on_reset=False,
    )
    return environment
Example #5
0
def launch_env(map_name="udem_spooky"):
    import gym_duckietown
    from gym_duckietown.envs import DuckietownEnv
    env = DuckietownEnv(domain_rand=False,
                        max_steps=math.inf,
                        randomize_maps_on_reset=False,
                        map_name=map_name)
    return env
Example #6
0
 def __init__(self, path, **kwargs):
     DuckietownEnv.__init__(self, **kwargs)
     self.expert = PurePursuitPolicy(self)
     self.max_steps = math.inf
     self.map_name = 'udem1'
     self.domain_rand = True
     self.draw_bbox = False
     self.full_transparency = True
     self.accept_start_angle_deg = 90
     self.set_size = 1000
     self.path = path
     self.k_p = np.random.uniform(0.8, 1.2)
     self.k_d = np.random.uniform(0.8, 1.2)
     self.action_speed = 0.4
     self.images = np.zeros(shape=(self.set_size,
                                   *self.observation_space.shape),
                            dtype=self.observation_space.dtype)
     self.labels = np.zeros(shape=(self.set_size, 3), dtype=np.float32)
Example #7
0
def launch_env(map):
    import gym_duckietown
    from gym_duckietown.envs import DuckietownEnv
    env = DuckietownEnv(
        map_name=map,
        domain_rand=False,
        max_steps=math.inf,
    )
    return env
Example #8
0
    def __init__(self):
        # Initialize Robot Simulation
        # Other Maps: udem1, straight_road, small_loop, loop_empty, 4way, zigzag_dists, loop_obstacles
        # loop_pedestrians, loop_dyn_duckiebots, regress_4way_adam
        self.env = DuckietownEnv(seed=1,
                                 max_steps=5000,
                                 map_name='zigzag_dists',
                                 draw_curve=False,
                                 draw_bbox=False,
                                 distortion=True)
        self.env.reset()
        self.env.render()
        self.action = np.array([0.0, 0.0])
        self.framerate = self.env.unwrapped.frame_rate

        # Initialize the camera images and control streaming
        self._lock = threading.RLock()
        self._streaming = False

        self._framestream = None
        self._framestream_endpoints = dict()
        self._framestream_endpoints_lock = threading.RLock()
Example #9
0
def launch_env(map_name,
               randomize_maps_on_reset=False,
               domain_rand=False,
               frame_stacking=1):
    environment = DuckietownEnv(domain_rand=domain_rand,
                                max_steps=math.inf,
                                map_name=map_name,
                                randomize_maps_on_reset=False)

    tmp = environment._get_tile

    if frame_stacking != 1:
        environment = FrameStack(environment, frame_stacking)
        environment._get_tile = tmp
        environment.reset()  # Force reset to get fake frame buffer

    return environment
Example #10
0
def get_integration_dataset(num_samples):

    period = 10
    data = np.zeros(shape=(num_samples, 1, 480, 640, 3))
    labels = np.zeros(shape=(num_samples, 1))

    k_p = 1
    k_d = 1
    speed = 0.2

    env = DuckietownEnv(domain_rand=False, draw_bbox=False)
    iterations = env.max_steps = num_samples * period
    env.reset()

    for i in range(iterations):

        percent = round(i * 100 / iterations, 2)
        print(f'\rsimulator running: {percent} %', end='\r')

        lane_pose = get_lane_pos(env)
        distance_to_road_center = lane_pose.dist
        angle_from_straight_in_rad = lane_pose.angle_rad

        steering = k_p * distance_to_road_center + k_d * angle_from_straight_in_rad
        command = np.array([speed, steering])
        obs, _, done, _ = env.step(command)
        obs = integration_preprocess(obs)

        if i % period == 0:
            data[i // period, 0, :, :, :] = obs
            labels[i // period][0] = lane_pose.dist_to_edge * 100

        if done:
            env.reset()

    return list(zip(data, labels))
Example #11
0
parser.add_argument('--domain-rand',
                    action='store_true',
                    help='enable domain randomization')
parser.add_argument('--frame-skip',
                    default=1,
                    type=int,
                    help='number of frames to skip')
parser.add_argument('--seed', default=1, type=int, help='seed')
args = parser.parse_args()

if args.env_name and args.env_name.find('Duckietown') != -1:
    env = DuckietownEnv(
        seed=args.seed,
        map_name=args.map_name,
        draw_curve=args.draw_curve,
        draw_bbox=args.draw_bbox,
        domain_rand=args.domain_rand,
        frame_skip=args.frame_skip,
        distortion=args.distortion,
    )
else:
    env = gym.make(args.env_name)

env.reset()
env.render()
total_reward = 0


def _draw_pose(overlay, camera_params, tag_size, pose, z_sign=1):

    opoints = np.array([
Example #12
0
# `map_name` is the name of the map that will be loaded
#            use the value stored in `args` so we can change the name
#            of the map if ever needed
#
#            if you need an example, check out `simulator.py` in the
#            root of this repository or google the python `argparse` module
# `domain_rand` is the randomization setting that will distort the camera view
#               this is used when training models on a simulator, that are then
#               used on real-life robots
#
#               set it to False
# !!! YOUR CODE HERE
time_military_split = time.ctime().split()[3].split(':')
time_military = int(time_military_split[0] + time_military_split[1])
env = DuckietownEnv(seed=time_military,
                    map_name=args.map_name,
                    domain_rand=False)
# !!! ==============

# First we want to take a top-down "map" image of the entire map
# This serves as an example for how to implement the rest of this assignment,
# so PLEASE READ THIS CAREFFULLY!
env.reset()
env.mapmode = True
mapimg = env.render_obs(top_down=True)
np.save(f'data/maps/{args.map_name}.npy', mapimg)
env.mapmode = False

# Now, using a for-loop, take N samples of the raw camera view and the
# ground-truth semantic segmentation outputs from the simulator
#
                        help='set the total episoded number',
                        type=int)
    parser.add_argument("--logfile", type=str, default=None)
    parser.add_argument("--downscale", action="store_true")
    parser.add_argument("--filter-bad-data",
                        action="store_true",
                        help="discard data when reward is decreasing")
    args = parser.parse_args()

    #! Start Env
    if args.env_name is None:
        env = DuckietownEnv(
            map_name=args.map_name,
            max_steps=args.steps,
            draw_curve=args.draw_curve,
            draw_bbox=args.draw_bbox,
            domain_rand=args.domain_rand,
            distortion=args.distortion,
            accept_start_angle_deg=4,
            full_transparency=True,
        )
    else:
        env = gym.make(args.env_name)

    node = HumanDriver(env,
                       max_episodes=args.nb_episodes,
                       max_steps=args.steps,
                       log_file=args.logfile,
                       downscale=args.downscale,
                       playback=args.playback,
                       filter_bad_data=args.filter_bad_data)
Example #14
0
parser.add_argument('--dynamics_rand',
                    action='store_true',
                    help='enable dynamics randomization')
parser.add_argument('--frame-skip',
                    default=1,
                    type=int,
                    help='number of frames to skip')
parser.add_argument('--seed', default=1, type=int, help='seed')
args = parser.parse_args()

if args.env_name and args.env_name.find('Duckietown') != -1:
    env = DuckietownEnv(seed=args.seed,
                        map_name=args.map_name,
                        draw_curve=args.draw_curve,
                        draw_bbox=args.draw_bbox,
                        domain_rand=args.domain_rand,
                        frame_skip=args.frame_skip,
                        distortion=args.distortion,
                        camera_rand=args.camera_rand,
                        dynamics_rand=args.dynamics_rand)
else:
    env = gym.make(args.env_name)

env.reset()
env.render()


@env.unwrapped.window.event
def on_key_press(symbol, modifiers):
    """
    This handler processes keyboard commands that
Example #15
0
import random
from gym_duckietown.envs import DuckietownEnv
import matplotlib.pyplot as plt

parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default=None)
parser.add_argument('--map-name', default='4way')
parser.add_argument('--no-pause',
                    action='store_true',
                    help="don't pause on failure")
parser.add_argument('--mask-enable', action='store_true')
args = parser.parse_args()

if args.env_name is None:
    env = DuckietownEnv(map_name=args.map_name,
                        domain_rand=False,
                        draw_bbox=False,
                        draw_curve=False)
else:
    env = gym.make(args.env_name)

obs = env.reset()
env.render()
mask_enable = args.mask_enable
if mask_enable:
    cv2.startWindowThread()
    cv2.namedWindow('Red_Lines_Mask')

#Parametro impostabile per il tempo da aspettare in secondi negli incroci senza semaforo
TIME_TO_WAIT = 2

total_reward = 0
Example #16
0
import gym
from gym_duckietown.envs import DuckietownEnv

from train_imitation import Model
from utils import make_var

parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default='SimpleSim-v0')
parser.add_argument('--map-name', required=True)
parser.add_argument('--no-random', action='store_true', help='disable domain randomization')
parser.add_argument('--no-pause', action='store_true', help="don't pause on failure")
args = parser.parse_args()

if args.env_name == 'SimpleSim-v0':
    env = DuckietownEnv(
        map_name = args.map_name,
        domain_rand = not args.no_random
    )
    #env.max_steps = math.inf
    env.max_steps = 500
else:
    env = gym.make(args.env_name)

obs = env.reset()
env.render()

avg_frame_time = 0
max_frame_time = 0

def load_model():
    global model
    model = Model()
import time

# declare the arguments
parser = argparse.ArgumentParser()

# Do not change this
parser.add_argument('--max_steps', type=int, default=2000, help='max_steps')

# You should set them to different map name and seed accordingly
parser.add_argument('--map-name', default='map5')
parser.add_argument('--seed', type=int, default=11, help='random seed')
args = parser.parse_args()

env = DuckietownEnv(map_name=args.map_name,
                    domain_rand=False,
                    draw_bbox=False,
                    max_steps=args.max_steps,
                    seed=args.seed)

obs = env.reset()
env.render()

total_reward = 0

# please remove this line for your own policy
# actions = np.loadtxt('./map5_seed11.txt', delimiter=',')
actions = np.loadtxt('./test.txt', delimiter=',')

for (speed, steering) in actions:
    print(speed, steering)
Example #18
0
        return action


if __name__ == "__main__":
    # Example trajectory, as would be generated by path planning
    trajectory = [
        np.array([2., 1.]),
        np.array([2., 3.]),
        np.array([1., 3.]),
        np.array([1., 1.])
    ]

    env = DuckietownEnv(map_name='udem1',
                        user_tile_start=(1, 1),
                        domain_rand=False,
                        init_x=0.75,
                        init_z=0.75,
                        init_angle=0.)
    env.reset()
    env.render(top_down=True)

    # ======= DYNAMICS =========

    # Point-mass physics model
    # inputs: (n x dim_state), (n x dim_actions), float
    # output: next_states (n x dim_state)
    def simplified_dynamics(states, actions, dt):
        actions = tf.clip_by_value(actions, -1., 1.)
        next_states = [
            states + tf.stack([
                actions[:, 0] * tf.cos(states[:, 2]) * dt,
Example #19
0
import math
import numpy as np
import gym
from gym_duckietown.envs import DuckietownEnv

parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default=None)
parser.add_argument('--map-name', default='udem1')
parser.add_argument('--no-pause',
                    action='store_true',
                    help="don't pause on failure")
args = parser.parse_args()

if args.env_name is None:
    env = DuckietownEnv(map_name=args.map_name,
                        domain_rand=False,
                        draw_bbox=False)
else:
    env = gym.make(args.env_name)

obs = env.reset()
env.render()

total_reward = 0

while True:

    lane_pose = env.get_lane_pos2(env.cur_pos, env.cur_angle)
    distance_to_road_center = lane_pose.dist
    angle_from_straight_in_rads = lane_pose.angle_rad
from gym_duckietown.envs import DuckietownEnv
import cv2

# Parsing the input parameters
parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default=None)
parser.add_argument('--map-name', default='udem1')
parser.add_argument('--no-pause',
                    action='store_true',
                    help="don't pause on failure")
args = parser.parse_args()

# Creating the gym environment for the duckietown simulation
if args.env_name is None:
    env = DuckietownEnv(map_name=args.map_name,
                        domain_rand=False,
                        draw_bbox=False)
else:
    env = gym.make(args.env_name)

# Initial reset and render to start
obs = env.reset()
env.render()

# Variables for the control demonstration
Start = time.time()
Elapsed = 0
Speed = 0.6
Steering = 2.0
i = 0
Example #21
0
import sys
import argparse
import pyglet
import numpy as np
import gym
sys.path.append('../')
import gym_duckietown
from gym_duckietown.envs import DuckietownEnv
from gym_duckietown.wrappers import UndistortWrapper
sys.path.append('../../Car Interface Weeks 2-3')
import controller

#Initialize environment and draw a lane curve as a trajectory to follow
env = DuckietownEnv(
    seed=1,
    map_name='loop_empty',
    draw_curve=True,
    domain_rand=False,
)

#Preliminary rendering of environment
env.reset()
env.render()
'''
This is the PID class to fill out. Figure out what constants you want to save and what past information you would like to save
for the integral and derivative terms.
'''


class PID:
    def __init__(self, proportional=0, integral=0, derivative=0):
        '''
Example #22
0
#!/usr/bin/env python3

import numpy as np
import os
from gym_duckietown.envs import DuckietownEnv
import pyglet
from pyglet.window import key
from neural_perception.util.pid_controller import PID, calculate_out_lim
from neural_perception.util.util import preprocess, get_lane_pos
import tensorflow as tf

env = DuckietownEnv(domain_rand=False, draw_bbox=False)

obs = env.reset()
obs = preprocess(obs)
env.render()
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"


@env.unwrapped.window.event
def on_key_press(symbol, modifiers):
    global DEBUG
    global MANUAL_CONTROL

    if symbol == key.BACKSPACE:
        print("RESET")
        env.reset()
        env.render()
    elif symbol == key.ESCAPE:
        env.close()
        exit(0)
Example #23
0
def main():
    """ Main launcher that starts the gym thread when the command 'duckietown-start-gym' is invoked
    """

    # get parameters from environment (set during docker launch) otherwise take default
    map = os.getenv('DUCKIETOWN_MAP', DEFAULTS["map"])
    domain_rand = bool(
        os.getenv('DUCKIETOWN_DOMAIN_RAND', DEFAULTS["domain_rand"]))
    max_steps = os.getenv('DUCKIETOWN_MAX_STEPS', DEFAULTS["max_steps"])

    # if a challenge is set, it overrides the map selection

    misc = {}  # init of info field for additional gym data

    challenge = os.getenv('DUCKIETOWN_CHALLENGE', "")
    if challenge in ["LF", "LFV"]:
        print("Launching challenge:", challenge)
        map = DEFAULTS["challenges"][challenge]
        misc["challenge"] = challenge

    print("Using map:", map)

    env = DuckietownEnv(
        map_name=map,
        # draw_curve = args.draw_curve,
        # draw_bbox = args.draw_bbox,
        max_steps=max_steps,
        domain_rand=domain_rand)
    obs = env.reset()

    publisher_socket = None
    command_socket, command_poll = make_pull_socket()

    print("Simulator listening to incoming connections...")

    while True:
        if has_pull_message(command_socket, command_poll):
            success, data = receive_data(command_socket)
            if not success:
                print(data)  # in error case, this will contain the err msg
                continue

            reward = 0  # in case it's just a ping, not a motor command, we are sending a 0 reward
            done = False  # same thing... just for gym-compatibility
            misc_ = {}  # same same

            if data["topic"] == 0:
                obs, reward, done, misc_ = env.step(data["msg"])
                if DEBUG:
                    print("challenge={}, step_count={}, reward={}, done={}".
                          format(challenge, env.unwrapped.step_count,
                                 np.around(reward, 3), done))
                if done:
                    env.reset()

            if data["topic"] == 1:
                print("received ping:", data)

            if data["topic"] == 2:
                obs = env.reset()

            # can only initialize socket after first listener is connected - weird ZMQ bug
            if publisher_socket is None:
                publisher_socket = make_pub_socket(for_images=True)

            if data["topic"] in [0, 1]:
                misc.update(misc_)
                send_gym(publisher_socket, obs, reward, done, misc)
assert abs(m0 - m1) < 5

# Check that the observation shapes match
second_obs, _, _, _ = env.step(np.array([0.0, 0.0]))
assert first_obs.shape == env.observation_space.shape
assert second_obs.shape == env.observation_space.shape
assert first_obs.shape == second_obs.shape

# Try stepping a few times
for i in range(0, 10):
    obs, _, _, _ = env.step(np.array([0.1, 0.1]))

# Try loading each of the available map files
for map_file in os.listdir('gym_duckietown/maps'):
    map_name = map_file.split('.')[0]
    env = DuckietownEnv(map_name=map_name)
    env.reset()

# Test the multi-map environment
env = MultiMapEnv()
for i in range(0, 50):
    env.reset()

# Check that we do not spawn too close to obstacles
env = DuckietownEnv(map_name='loop_obstacles')
for i in range(0, 75):
    obs = env.reset()
    assert not env._collision(), "collision on spawn"
    env.step(np.array([0.05, 0]))
    assert not env._collision(), "collision after one step"
                    action='store_true',
                    help='draw collision detection bounding boxes')
parser.add_argument('--domain-rand',
                    action='store_true',
                    help='enable domain randomization')
parser.add_argument('--frame-skip',
                    default=1,
                    type=int,
                    help='number of frames to skip')
args = parser.parse_args()

if args.env_name is None:
    env = DuckietownEnv(
        map_name=args.map_name,
        draw_curve=args.draw_curve,
        draw_bbox=args.draw_bbox,
        domain_rand=args.domain_rand,
        frame_skip=args.frame_skip,
        distortion=args.distortion,
    )
else:
    env = gym.make(args.env_name)

env.reset()
env.render()


@env.unwrapped.window.event
def on_key_press(symbol, modifiers):
    """
    This handler processes keyboard commands that
    control the simulation
Example #26
0
# can ask it to register all custom arguments
parser = argparse.ArgumentParser(parents=[parser])
# this feature is disabled for now since we need shared arguments
# but we can't add duplicate arguments
# modules.globals.setup_arguments_for(args.mode, parser)

parser.add_argument('--car_model', default='simple')
parser.add_argument('--map-name', default='udem1')
parser.add_argument('--seed', default=1, type=int, help='seed')
args = parser.parse_args()

modules.globals.enable_fns[args.mode](args)

sys.path.append('duckietown-sim')
from gym_duckietown.envs import DuckietownEnv
env = DuckietownEnv(seed=args.seed, map_name=args.map_name, domain_rand=False)
modules.globals.env = env


def render():
    env.render(**modules.globals.render_config)


render()
modules.globals.render = render

from importlib import import_module
from car_iface.Car import Car
car = Car(args.mode, args.car_model)
car.start()
modules.globals.car = car
Example #27
0
# Check that the observation shapes match
second_obs, _, _, _ = env.step([0.0, 0.0])
assert first_obs.shape == env.observation_space.shape
assert first_obs.shape == second_obs.shape

# Test the PyTorch observation wrapper
env = PyTorchObsWrapper(env)
first_obs = env.reset()
second_obs, _, _, _ = env.step([0, 0])
assert first_obs.shape == env.observation_space.shape
assert first_obs.shape == second_obs.shape

# Try loading each of the available map files
for map_file in os.listdir('gym_duckietown/maps'):
    map_name = map_file.split('.')[0]
    env = DuckietownEnv(map_name=map_name)
    env.reset()

# Test the multi-map environment
env = MultiMapEnv()
for i in range(0, 50):
    env.reset()

# Check that we do not spawn too close to obstacles
env = DuckietownEnv(map_name='loop_obstacles')
for i in range(0, 75):
    obs = env.reset()
    assert not env._collision(get_agent_corners(
        env.cur_pos, env.cur_angle)), "collision on spawn"
    env.step(np.array([0.05, 0]))
    assert not env._collision(get_agent_corners(
Example #28
0
    red_img = np.zeros((480, 640, 3), dtype=np.uint8)
    red_img[:, :, 0] = 255
    blend = cv2.addWeighted(obs, 0.5, red_img, 0.5, 0)
    return blend


if __name__ == '__main__':
    # Se leen los argumentos de entrada
    parser = argparse.ArgumentParser()
    parser.add_argument('--env-name', default="Duckietown-udem1-v1")
    parser.add_argument('--map-name', default='free')
    args = parser.parse_args()
    # Definici贸n del environment
    if args.env_name and args.env_name.find('Duckietown') != -1:
        env = DuckietownEnv(
            map_name=args.map_name,
            domain_rand=False,
        )
    else:
        env = gym.make(args.env_name)
    # Se reinicia el environment
    env.reset()
    # Inicialmente no hay alerta
    alert = False
    # Posici贸n del pato en el mapa (fija)
    duck_pos = np.array([2, 0, 2])
    # Constante que se debe calcular
    f = 723.1875
    # f * dr (f es constante, dr es conocido)
    c = f * 0.08
    while True:
        # Captura la tecla que est谩 siendo apretada y almacena su valor en key
Example #29
0
                        action='store_true',
                        help='enable domain randomization')
    parser.add_argument('--frame-skip',
                        default=1,
                        type=int,
                        help='number of frames to skip')
    parser.add_argument('--seed', default=1, type=int, help='seed')
    args = parser.parse_args()

    # Definición del environment
    if args.env_name and args.env_name.find('Duckietown') != -1:
        env = DuckietownEnv(
            seed=args.seed,
            map_name=args.map_name,
            draw_curve=args.draw_curve,
            draw_bbox=args.draw_bbox,
            domain_rand=args.domain_rand,
            frame_skip=args.frame_skip,
            distortion=args.distortion,
        )
    else:
        env = gym.make(args.env_name)

    # Se reinicia el environment
    env.reset()

    #Se crea una llave auxiliar que permite partir quieto
    _key = '6'
    while True:

        # Captura la tecla que está siendo apretada y almacena su valor en key
"""
Simple exercise to construct a controller that controls the simulated Duckiebot using pose & visualizing processed images
"""
import gym
from gym_duckietown.envs import DuckietownEnv
import cv2
import numpy as np
from get_controller_params import get_params

env = DuckietownEnv(
    map_name=
    "udem1",  # "zigzag_dists", "4way", "loop_empty","small_loop", "small_loop_cw", "regress_4way_adam"
    domain_rand=True,
    distortion=False,
    max_steps=3000,
    draw_curve=False,
    draw_bbox=False)
obs = env.reset()
env.render()

total_reward = 0


def auto_canny(image, sigma=0.33):
    # compute the median of the single channel pixel intensities
    v = np.median(image)

    # apply automatic Canny edge detection using the computed median
    lower = int(max(0, (1.0 - sigma) * v))
    upper = int(min(255, (1.0 + sigma) * v))
    edged = cv2.Canny(image, lower, upper)