예제 #1
0
    def __init__(self, node_name):
        # initialize the DTROS parent class
        #color = rospy.get_param('~color')
        super(MyROSWrapperNode, self).__init__(node_name=node_name,
                                               node_type=NodeType.GENERIC)
        # construct Publisher and Subscriber
        self.pub = rospy.Publisher('/fakebot/camera_node/image/compressed',
                                   CompressedImage,
                                   queue_size=10)
        self.sub = rospy.Subscriber('/fakebot/wheels_driver_node/wheels_cmd',
                                    WheelsCmdStamped, self.callback)
        #self.sub = rospy.Subscriber('chatter', String, self.callback)
        self.iter = 0

        # create bag files:
        # self.bag = bag

        self.env = Simulator(
            seed=123,  # random seed
            map_name="loop_empty",
            max_steps=500001,  # we don't want the gym to reset itself
            domain_rand=0,
            camera_width=640,
            camera_height=480,
            accept_start_angle_deg=4,  # start close to straight
            full_transparency=True,
            distortion=True,
        )

        self.br = CvBridge()
        self.right_wheel = 0
        self.left_wheel = 0
예제 #2
0
    def __init__(self, node_name):
        super(DuckiegymRosWrapperNode,
              self).__init__(node_name=node_name, node_type=NodeType.GENERIC)

        self.action = [0, 0]  # wheel commands for simulator
        self.camera_info = self.load_camera_info(
            "/data/config/calibrations/camera_intrinsic/default.yaml")

        self.bridge = CvBridge()

        vehicle_name = os.environ.get("VEHICLE_NAME")
        image_topic = "/" + vehicle_name + "/camera_node/image/compressed"
        self.pub_img = rospy.Publisher(image_topic, CompressedImage)
        camera_info_topic = "/" + vehicle_name + "/camera_node/camera_info"
        self.pub_camera_info = rospy.Publisher(camera_info_topic, CameraInfo)
        wheels_cmd_topic = "/" + vehicle_name + "/wheels_driver_node/wheels_cmd"
        self.sub_wheels_cmd = rospy.Subscriber(wheels_cmd_topic,
                                               WheelsCmdStamped,
                                               self.callback_wheels_cmd,
                                               queue_size=1)

        self.sim = Simulator(
            seed=123,  # random seed
            map_name="loop_empty",
            max_steps=500001,  # we don't want the gym to reset itself
            domain_rand=0,
            camera_width=640,
            camera_height=480,
            accept_start_angle_deg=4,  # start close to straight
            full_transparency=True,
            distortion=False,
        )
예제 #3
0
    def __init__(self,
                 gain=1.0,
                 trim=0.0,
                 radius=0.0318,
                 k=27.0,
                 limit=1.0,
                 line=None,
                 GUI=None,
                 **kwargs):
        Publisher.__init__(self)
        Simulator.__init__(self, **kwargs)
        logger.info('using DuckietownEnv')
        threading.Thread.__init__(self)
        self.action_space = spaces.Box(low=np.array([-1, -1]),
                                       high=np.array([1, 1]),
                                       dtype=np.float32)

        # Should be adjusted so that the effective speed of the robot is 0.2 m/s
        self.gain = gain

        self.line = line

        # Directional trim adjustment
        self.trim = trim

        self.register(GUI)

        # Wheel radius
        self.radius = radius

        # Motor constant
        self.k = k

        # Wheel velocity limit
        self.limit = limit
    def __init__(self, node_name):
        # initialize the DTROS parent class
        super(ROSWrapper, self).__init__(node_name=node_name,
                                         node_type=NodeType.GENERIC)

        # construct rendered observation publisher
        self.camera_pub = rospy.Publisher('~/' + VEHICLE_NAME +
                                          '/camera_node/image/compressed',
                                          CompressedImage,
                                          queue_size=1)

        # Construct subscriber for wheel_cmd topic
        self.wheels_cmd_sub = rospy.Subscriber(
            '~/' + VEHICLE_NAME + '/wheels_driver_node/wheels_cmd',
            WheelsCmdStamped,
            self.wheels_cmd_cb,
            queue_size=1)

        # Initialize the simulator
        self.env = Simulator(
            seed=123,  # random seed
            map_name="loop_empty",
            max_steps=500001,  # we don't want the gym to reset itself
            domain_rand=0,
            camera_width=640,
            camera_height=480,
            accept_start_angle_deg=4,  # start close to straight
            full_transparency=True,
            distortion=True,
        )
        # Create action variable
        self.action = [0.0, 0.0]
예제 #5
0
    def __init__(self,
                 gain=1.0,
                 trim=0.0,
                 radius=0.0318,
                 k=27.0,
                 limit=1.0,
                 **kwargs):
        Simulator.__init__(self, **kwargs)

        self.action_space = spaces.Box(low=np.array([-1, -1]),
                                       high=np.array([1, 1]),
                                       dtype=np.float32)
예제 #6
0
def launch_env(id=None):
    env = None
    if id is None:
        # Needed for Running on Sagemaker.
        import os
        #status = os.system("Xvfb :99 -screen 0 1024x768x24 -ac +extension GLX +render -noreset &")
        os.environ['DISPLAY'] = ":99"

        # Launch the environment
        from gym_duckietown.simulator import Simulator
        env = Simulator(
            seed=123,  # random seed
            map_name="loop_empty",
            max_steps=500001,  # we don't want the gym to reset itself
            domain_rand=0,
            camera_width=640,
            camera_height=480,
            accept_start_angle_deg=4,  # start close to straight
            full_transparency=True,
            distortion=True,
        )
    else:
        env = gym.make(id)

    return env
def launch_env(id=None):
    env = None
    if id is None:
        from gym_duckietown.simulator import Simulator
        env = Simulator(
            seed=123,  # random seed
            map_name="loop_empty",
            max_steps=500001,  # we don't want the gym to reset itself
            domain_rand=False,
            camera_width=640,
            camera_height=480,
            accept_start_angle_deg=4,  # start close to straight
            full_transparency=True,
            distortion=True,
        )
    else:
        env = gym.make(id)

    # Wrappers
    env = ResizeWrapper(env)
    env = NormalizeWrapper(env)
    env = ImgWrapper(env)  # to make the images from 160x120x3 into 3x160x120
    env = SteeringToWheelVelWrapper(env)
    env = ActionWrapper(env)
    #env = DtRewardWrapper(env)

    return env
예제 #8
0
def launch_env(map_name='loop_empty'):
    from gym_duckietown.simulator import Simulator
    env = Simulator(
        seed=123,  # random seed
        map_name=map_name,
        max_steps=500001,  # we don't want the gym to reset itself
        domain_rand=0,
        camera_width=640,
        camera_height=480,
        distortion=True,
    )
    return env
예제 #9
0
def init():
    env = Simulator(
        seed=123,  # random seed
        map_name="loop_empty",
        max_steps=500001,  # we don't want the gym to reset itself
        domain_rand=0,
        camera_width=640,
        camera_height=480,
        accept_start_angle_deg=4,  # start close to straight
        full_transparency=True,
        distortion=True,
    )
    return env
예제 #10
0
    def step(self, simulator: Simulator):
        """
        Take a step, implemented as a PID controller
        """

        # Find the curve point closest to the agent, and the tangent at that point
        closest_point, closest_tangent = simulator.closest_curve_point(
            simulator.cur_pos, simulator.cur_angle)

        iterations = 0

        lookup_distance = self.follow_dist
        curve_point = None
        while iterations < self.max_iterations:
            # Project a point ahead along the curve tangent,
            # then find the closest point to to that
            follow_point = closest_point + closest_tangent * lookup_distance
            curve_point, curve_tangent = simulator.closest_curve_point(
                follow_point, simulator.cur_angle)

            # If we have a valid point on the curve, stop
            if curve_point is not None:
                break

            iterations += 1
            lookup_distance *= 0.5

        # Compute a normalized vector to the curve point
        point_vec = curve_point - simulator.cur_pos
        point_vec /= np.linalg.norm(point_vec)

        magic = (curve_tangent + point_vec) / np.linalg.norm(
            np.linalg.norm(point_vec))
        e = np.dot(self.get_right_vec(simulator.cur_angle), magic)
        de = e - self.prev_e
        self.prev_e = e
        steering = self.P * e + self.D * de
        return np.clip(np.array([1 + steering, 1 - steering]), 0., 1.)
	def create_env(self):
		self.env = Simulator(
			seed=48304,
			map_name=self.map_name,
			max_steps=self.max_steps,
			domain_rand=0,
			camera_width=640,
			camera_height=480,
			accept_start_angle_deg=4,
			full_transparency=True,
			distortion=True
		)

		self.env.action_space.np_random.seed(48304)

		return self.env
예제 #12
0
def launch_and_wrap_env(env_config, seed=3, default_env_id=0):
    try:
        env_id = env_config.worker_index  # config is passed by rllib
    except AttributeError as err:
        logger.warning(err)
        env_id = default_env_id

    robot_speed = env_config.get('robot_speed', DEFAULT_ROBOT_SPEED)
    # If random robot speed is specified, the robot speed key holds a dictionary
    if type(robot_speed) is dict or robot_speed == 'default':
        robot_speed = DEFAULT_ROBOT_SPEED  # The initial robot speed won't be random

    print("our seed of the environment is now {}".format(seed))
    # The while loop and try block are necessary to prevent instant training crash from the
    # "Exception: Could not find a valid starting pose after 5000 attempts" in duckietown-gym-daffy 5.0.13
    spawn_successful = False
    while not spawn_successful:
        env = Simulator(
            seed=seed,  # random seed
            map_name=resolve_multimap_name(env_config["training_map"], env_id),
            max_steps=env_config.get("episode_max_steps", 500),
            domain_rand=env_config["domain_rand"],
            # dynamics_rand=env_config["dynamics_rand"],
            # camera_rand=env_config["camera_rand"],
            camera_width=CAMERA_WIDTH,
            camera_height=CAMERA_HEIGHT,
            accept_start_angle_deg=env_config["accepted_start_angle_deg"],
            full_transparency=True,
            distortion=env_config["distortion"],
            frame_rate=env_config["simulation_framerate"],
            frame_skip=env_config["frame_skip"],
            robot_speed=robot_speed)
        # env = DuckietownEnv(
        #     map_name=resolve_multimap_name(env_config["training_map"], env_id),
        #     domain_rand=False,
        #     draw_bbox=False,
        #     max_steps=500001,
        #     seed=seed
        # )
        assert robot_speed == 1.2
        spawn_successful = True
        # except Exception as e:
        #     seed += 1  # Otherwise it selects the same tile in the next attempt
        #     logger.error("{}; Retrying with new seed: {}".format(e, seed))
    logger.debug("Env init successful")
    env = wrap_env(env_config, env)
    return env
예제 #13
0
def launch_env(id=None):
    if id is None:
        from gym_duckietown.simulator import Simulator
        env = Simulator(
            seed=123,  # random seed
            map_name="loop_empty",
            max_steps=500001,  # we don't want the gym to reset itself
            domain_rand=0,
            camera_width=640,
            camera_height=480,
            accept_start_angle_deg=4,  # start close to straight
            full_transparency=True,
            distortion=True,
        )
    else:
        env = gym.make(id)
    return env
예제 #14
0
def launch_env(id=None):
    from gym_duckietown.simulator import Simulator
    env = Simulator(seed=123,
                    map_name="zigzag_dists",
                    max_steps=5000001,
                    domain_rand=True,
                    camera_width=640,
                    camera_height=480,
                    accept_start_angle_deg=4,
                    full_transparency=True,
                    distortion=True,
                    randomize_maps_on_reset=True,
                    draw_curve=False,
                    draw_bbox=False,
                    frame_skip=4)

    return env
예제 #15
0
def launch_env(seed,map_name="loop_empty",id=None):
    env = None
    if id is None:
        # Launch the environment
        env = Simulator(
            seed=seed, # random seed
            map_name=map_name,
            max_steps=500001, # we don't want the gym to reset itself
            domain_rand=0,
            camera_width=640,
            camera_height=480,
            accept_start_angle_deg=4, # start close to straight
            full_transparency=True,
            distortion=True,
        )
    else:
        env = gym.make(id)

    return env
예제 #16
0
def draw_map_gymd(map_name: str, output: AbsDirPath, style: str):
    try:
        from gym_duckietown.simulator import Simulator
    except ImportError:
        return

    sim = Simulator(
        map_name,
        enable_leds=True,
        domain_rand=False,
        num_tris_distractors=0,
        camera_width=640,
        camera_height=480,
        # distortion=True,
        color_ground=[0, 0.3, 0],  # green
        style=style,
    )

    sim.reset()

    logger.info("rendering obs")
    img = sim.render_obs()

    out = os.path.join(output, "cam.jpg")
    save_rgb_to_jpg(img, out)

    sim.cur_pos = [-100.0, -100.0, -100.0]
    from gym_duckietown.simulator import FrameBufferMemory

    td = FrameBufferMemory(width=1024, height=1024)
    # noinspection PyProtectedMember
    horiz = sim._render_img(
        width=td.width,
        height=td.height,
        multi_fbo=td.multi_fbo,
        final_fbo=td.final_fbo,
        img_array=td.img_array,
        top_down=True,
    )
    # img = sim.render("top_down")
    out = cast(FilePath, os.path.join(output, "top_down.jpg"))
    save_rgb_to_jpg(horiz, out)
class DuckietownSimulator(DTROS):
    def __init__(self, node_name):
        # initialize the DTROS parent class
        super(DuckietownSimulator, self).__init__(node_name=node_name,
                                                  node_type=NodeType.GENERIC)
        # construct publisher
        self.cap = cv2.VideoCapture(2)
        self.sub = rospy.Subscriber('wheels_driver_node/wheels_cmd',
                                    WheelsCmdStamped, self.callback)
        self.pub = rospy.Publisher('camera_node/image/compressed',
                                   CompressedImage,
                                   queue_size=10)
        self._cvbr = CvBridge()
        rospy.loginfo("Started Duckietown Simulator ROS node...")

    def callback(self, msg):
        left_wheel = msg.vel_left
        right_wheel = msg.vel_right

        action = [left_wheel, right_wheel]
        observation, reward, done, misc = self._env.step(action)
        # self.env.render()
        if done:
            self._env.reset()
        observation = cv2.cvtColor(observation, cv2.COLOR_RGB2BGR)
        image_cmpr_msg = self._cvbr.cv2_to_compressed_imgmsg(observation)
        self.pub.publish(image_cmpr_msg)

    _env = Simulator(
        seed=123,  # random seed
        map_name="loop_empty",
        max_steps=500001,  # we don't want the gym to reset itself
        domain_rand=0,
        camera_width=640,
        camera_height=480,
        accept_start_angle_deg=4,  # start close to straight
        full_transparency=True,
        distortion=True)
예제 #18
0
    def step(self, action):
        vel, angle = action

        # Distance between the wheels
        baseline = self.unwrapped.wheel_dist

        # assuming same motor constants k for both motors
        k_r = self.k
        k_l = self.k

        # adjusting k by gain and trim
        k_r_inv = (self.gain + self.trim) / k_r
        k_l_inv = (self.gain - self.trim) / k_l

        omega_r = (vel + 0.5 * angle * baseline) / self.radius
        omega_l = (vel - 0.5 * angle * baseline) / self.radius

        # conversion from motor rotation rate to duty cycle
        u_r = omega_r * k_r_inv
        u_l = omega_l * k_l_inv

        # limiting output to limit, which is 1.0 for the duckiebot
        u_r_limited = max(min(u_r, self.limit), -self.limit)
        u_l_limited = max(min(u_l, self.limit), -self.limit)

        vels = np.array([u_l_limited, u_r_limited])

        obs, reward, done, info = Simulator.step(self, vels)
        mine = {}
        mine['k'] = self.k
        mine['gain'] = self.gain
        mine['train'] = self.trim
        mine['radius'] = self.radius
        mine['omega_r'] = omega_r
        mine['omega_l'] = omega_l
        info['DuckietownEnv'] = mine
        return obs, reward, done, info
예제 #19
0
import numpy as np
from ddpg import DDPG
from utils import seed
from args import get_ddpg_args_test
from gym_duckietown.simulator import Simulator
import torch
import cv2

env = Simulator(seed=123,
                map_name="zigzag_dists",
                max_steps=5000001,
                domain_rand=True,
                camera_width=640,
                camera_height=480,
                accept_start_angle_deg=4,
                full_transparency=True,
                distortion=True,
                randomize_maps_on_reset=True,
                draw_curve=False,
                draw_bbox=True,
                frame_skip=4,
                draw_DDPG_features=True)

# Set seeds
args = get_ddpg_args_test()
seed(args.seed)

state_dim = env.get_features().shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
예제 #20
0
class DuckiegymRosWrapperNode(DTROS):
    """
    Run a gym-duckietown simulator, redirect the simulator's image to a ros topic, redirect the wheel commands
    to simulator actions.
    """
    def __init__(self, node_name):
        super(DuckiegymRosWrapperNode,
              self).__init__(node_name=node_name, node_type=NodeType.GENERIC)

        self.action = [0, 0]  # wheel commands for simulator
        self.camera_info = self.load_camera_info(
            "/data/config/calibrations/camera_intrinsic/default.yaml")

        self.bridge = CvBridge()

        vehicle_name = os.environ.get("VEHICLE_NAME")
        image_topic = "/" + vehicle_name + "/camera_node/image/compressed"
        self.pub_img = rospy.Publisher(image_topic, CompressedImage)
        camera_info_topic = "/" + vehicle_name + "/camera_node/camera_info"
        self.pub_camera_info = rospy.Publisher(camera_info_topic, CameraInfo)
        wheels_cmd_topic = "/" + vehicle_name + "/wheels_driver_node/wheels_cmd"
        self.sub_wheels_cmd = rospy.Subscriber(wheels_cmd_topic,
                                               WheelsCmdStamped,
                                               self.callback_wheels_cmd,
                                               queue_size=1)

        self.sim = Simulator(
            seed=123,  # random seed
            map_name="loop_empty",
            max_steps=500001,  # we don't want the gym to reset itself
            domain_rand=0,
            camera_width=640,
            camera_height=480,
            accept_start_angle_deg=4,  # start close to straight
            full_transparency=True,
            distortion=False,
        )

    def run_simulator(self):
        """
        Main loop of this node, runs simulation loop and camera publishers.
        """
        while not rospy.is_shutdown():
            observation, reward, done, misc = self.sim.step(self.action)
            self.publish_camera_info()
            self.publish_camera_image(observation)
            self.sim.render()
            if done:
                self.sim.reset()

    def callback_wheels_cmd(self, msg):
        """
        Update action variable (parameter for simulator) on callback.
        """
        self.action = [msg.vel_left, msg.vel_right]

    def publish_camera_image(self, observation):
        """
        Publish simulated camera frame as CompressedImage message.
        """
        img = self.bridge.cv2_to_compressed_imgmsg(cv2.cvtColor(
            observation, cv2.COLOR_RGB2BGR),
                                                   dst_format="jpg")
        img.header.stamp = rospy.Time.now()
        self.pub_img.publish(img)

    def publish_camera_info(self):
        """
        Publish calibration info about camera.
        """
        self.camera_info.header.stamp = rospy.Time.now()
        self.pub_camera_info.publish(self.camera_info)

    def load_camera_info(self, filename):
        """
        Load calibration info about camera from config file.
        """
        with open(filename, 'r') as stream:
            calib_data = yaml.load(stream)
        cam_info = CameraInfo()
        cam_info.width = calib_data['image_width']
        cam_info.height = calib_data['image_height']
        cam_info.K = calib_data['camera_matrix']['data']
        cam_info.D = calib_data['distortion_coefficients']['data']
        cam_info.R = calib_data['rectification_matrix']['data']
        cam_info.P = calib_data['projection_matrix']['data']
        cam_info.distortion_model = calib_data['distortion_model']
        return cam_info
예제 #21
0
#!/usr/bin/env python3
import gym_duckietown
from gym_duckietown.simulator import Simulator
env = Simulator(
    seed=123,  # random seed
    map_name="loop_empty",
    max_steps=500001,  # we don't want the gym to reset itself
    domain_rand=0,
    camera_width=640,
    camera_height=480,
    accept_start_angle_deg=4,  # start close to straight
    full_transparency=True,
    distortion=True,
)
while True:
    action = [0.1, 0.1]
    observation, reward, done, misc = env.step(action)
    #print(observation)
    env.render()
    if done:
        env.reset()
class ROSWrapper(DTROS):
    '''
    Class that creates a wrapper between the duckietown simulator and the ROS
    interface of a fake robot simulated in the host. The node serves as interface
    for the following parts:
        - Subscribes to the wheels command topic and transforms it into actions in simulation
        - Publishes the rendered observations in the simulator to the camera image topic
    '''
    def __init__(self, node_name):
        # initialize the DTROS parent class
        super(ROSWrapper, self).__init__(node_name=node_name,
                                         node_type=NodeType.GENERIC)

        # construct rendered observation publisher
        self.camera_pub = rospy.Publisher('~/' + VEHICLE_NAME +
                                          '/camera_node/image/compressed',
                                          CompressedImage,
                                          queue_size=1)

        # Construct subscriber for wheel_cmd topic
        self.wheels_cmd_sub = rospy.Subscriber(
            '~/' + VEHICLE_NAME + '/wheels_driver_node/wheels_cmd',
            WheelsCmdStamped,
            self.wheels_cmd_cb,
            queue_size=1)

        # Initialize the simulator
        self.env = Simulator(
            seed=123,  # random seed
            map_name="loop_empty",
            max_steps=500001,  # we don't want the gym to reset itself
            domain_rand=0,
            camera_width=640,
            camera_height=480,
            accept_start_angle_deg=4,  # start close to straight
            full_transparency=True,
            distortion=True,
        )
        # Create action variable
        self.action = [0.0, 0.0]

    def wheels_cmd_cb(self, msg):
        '''
        Callback that maps wheel commands into actions in the simulator.

            Creates the simulated robot actions based on the received command
            through the main topic in the duckietown infrastructure.

            Args:
                msg (WheelsCmdStamped): Velocity command
        '''

        vel_left = msg.vel_left
        vel_right = msg.vel_right

        self.action = [vel_left, vel_right]

    def run(self):
        '''
        Continuously run the wrapper to map ROS interface with simulator.

            This method runs the node, and begins an iterative process where the
            image is retrieved from the simulator and published in the camera image topic,
            and the wheels command received by subscribing to the wheels_cmd topic is translated
            into actions executed in the simulator. The process iterates until the node is terminated

        '''

        while not rospy.is_shutdown():

            # Update the simulator
            # the action message is updated every time the wheels_cmd_cb is called
            # that is, every time a message arrives.
            observation, reward, done, misc = self.env.step(self.action)
            # Render the new state
            self.env.render()

            # Set again action to stop, in case no more messages are being received
            self.action = [0.0, 0.0]

            if done:
                # Reset the simulator when the robot goes out of the road.
                self.env.reset()

            # Transalte the observation into a CompressedImage message to publish it
            bgr_obs = cv2.cvtColor(observation, cv2.COLOR_RGB2BGR)
            bridge = CvBridge()
            img_msg = bridge.cv2_to_compressed_imgmsg(cvim=bgr_obs,
                                                      dst_format="jpg")

            # Publish the image in the /image/compressed topic
            self.camera_pub.publish(img_msg)
예제 #23
0
class MyROSWrapperNode(DTROS):
    def __init__(self, node_name):
        # initialize the DTROS parent class
        #color = rospy.get_param('~color')
        super(MyROSWrapperNode, self).__init__(node_name=node_name,
                                               node_type=NodeType.GENERIC)
        # construct Publisher and Subscriber
        self.pub = rospy.Publisher('/fakebot/camera_node/image/compressed',
                                   CompressedImage,
                                   queue_size=10)
        self.sub = rospy.Subscriber('/fakebot/wheels_driver_node/wheels_cmd',
                                    WheelsCmdStamped, self.callback)
        #self.sub = rospy.Subscriber('chatter', String, self.callback)
        self.iter = 0

        # create bag files:
        # self.bag = bag

        self.env = Simulator(
            seed=123,  # random seed
            map_name="loop_empty",
            max_steps=500001,  # we don't want the gym to reset itself
            domain_rand=0,
            camera_width=640,
            camera_height=480,
            accept_start_angle_deg=4,  # start close to straight
            full_transparency=True,
            distortion=True,
        )

        self.br = CvBridge()
        self.right_wheel = 0
        self.left_wheel = 0

    def callback(self, data):
        if data:
            # update wheel command to action
            self.right_wheel = data.vel_right
            self.left_wheel = data.vel_left

            action = [self.left_wheel, self.right_wheel]
            print("I got command:", action)
            observation, reward, done, misc = self.env.step(action)
            # env.render()

            # print("observation.shape:",observation.shape )
            # observation.shape: (480, 640, 3)
            # print("observation.type:", type(observation))
            # observation.type: <class 'numpy.ndarray'>

            # publish image to viewer
            # np_arr = np.fromstring(data.data, np.uint8)
            # img = cv2.imdecode(observation, cv2.IMREAD_COLOR)

            img = cv2.cvtColor(np.asarray(observation), cv2.COLOR_RGB2BGR)
            compressed_img_msg = self.br.cv2_to_compressed_imgmsg(
                img, dst_format='jpg')
            self.pub.publish(compressed_img_msg)

            if done:
                self.env.reset()
        else:
            rospy.loginfo("waiting for Publisher")
예제 #24
0
    def move(self, velocities):
        obs, reward, done, info = Simulator.step(self, np.array(velocities))

        return obs  # , reward, done
예제 #25
0
def make_scenario_main(args=None):
    setup_logging()
    parser = argparse.ArgumentParser()

    # parser.add_argument("--config", help="Configuration", required=True)
    parser.add_argument("-o",
                        "--output",
                        help="Destination directory",
                        required=True)
    parser.add_argument("-n",
                        "--num",
                        type=int,
                        help="Number of scenarios to generate",
                        required=True)
    parser.add_argument(
        "--styles",
        default="synthetic-F",
        help=
        "Draw preview in various styles, comma separated. (needs gym duckietown)",
    )

    parsed, rest = parser.parse_known_args(args=args)
    if parsed.styles == "all":
        styles = ["synthetic", "synthetic-F", "photos", "smooth"]
    else:
        styles = parsed.styles.split(",")
    for config in rest:
        basename = os.path.basename(config).split(".")[0]
        data = read_ustring_from_utf8_file(config)
        interpreted = yaml.load(data, Loader=yaml.Loader)
        n: int = parsed.num
        output: str = parsed.output
        params: ScenarioGenerationParam = object_from_ipce(
            interpreted, ScenarioGenerationParam, iedo=iedo)
        for i in range(n):
            scenario_name = f"{basename}-{i:03d}"
            yaml_str = _get_map_yaml(params.map_name)
            scenario = make_scenario(
                yaml_str=yaml_str,
                scenario_name=scenario_name,
                only_straight=params.only_straight,
                min_dist=params.min_dist,
                delta_y_m=params.delta_y_m,
                robots_npcs=params.robots_npcs,
                robots_parked=params.robots_parked,
                robots_pcs=params.robots_pcs,
                nduckies=params.nduckies,
                duckie_min_dist_from_other_duckie=params.
                duckie_min_dist_from_other_duckie,
                duckie_min_dist_from_robot=params.duckie_min_dist_from_robot,
                duckie_y_bounds=params.duckie_y_bounds,
                delta_theta_rad=np.deg2rad(params.theta_tol_deg),
                pc_robot_protocol=params.pc_robot_protocol,
                npc_robot_protocol=params.npc_robot_protocol,
                tree_density=params.tree_density,
                tree_min_dist=params.tree_min_dist,
            )

            # styles = ['smooth']
            for style in styles:
                try:
                    from gym_duckietown.simulator import Simulator
                except ImportError:
                    logger.warning(traceback.format_exc())
                    # noinspection PyUnusedLocal
                    Simulator = None
                else:
                    sim = Simulator(
                        "4way",
                        enable_leds=True,
                        domain_rand=False,
                        num_tris_distractors=0,
                        camera_width=640,
                        camera_height=480,
                        # distortion=True,
                        color_ground=[0, 0.3, 0],  # green
                        style=style,
                    )
                    logger.info("resetting")
                    sim.reset()
                    m = cast(
                        dw.MapFormat1,
                        yaml.load(scenario.environment, Loader=yaml.Loader))

                    if "objects" not in m:
                        m["objects"] = {}
                    obs: Dict[str, object] = m["objects"]

                    for robot_name, srobot in scenario.robots.items():
                        st = dw.SE2Transform.from_SE2(
                            pose_from_friendly(srobot.configuration.pose))

                        obs[robot_name] = dict(kind="duckiebot",
                                               pose=st.as_json_dict(),
                                               height=0.12,
                                               color=srobot.color)

                    for duckie_name, duckie in scenario.duckies.items():
                        st = dw.SE2Transform.from_SE2(
                            pose_from_friendly(duckie.pose))
                        obs[duckie_name] = dict(kind="duckie",
                                                pose=st.as_json_dict(),
                                                height=0.08,
                                                color=duckie.color)

                    sim._interpret_map(m)
                    sim.reset()
                    logger.info("rendering obs")
                    img = sim.render_obs()
                    out = os.path.join(output, scenario_name, style, "cam.png")
                    save_rgb_to_png(img, out)
                    out = os.path.join(output, scenario_name, style, "cam.jpg")
                    save_rgb_to_jpg(img, out)

                    sim.cur_pos = [-100.0, -100.0, -100.0]
                    from gym_duckietown.simulator import FrameBufferMemory

                    td = FrameBufferMemory(width=1024, height=1024)
                    horiz = sim._render_img(
                        width=td.width,
                        height=td.height,
                        multi_fbo=td.multi_fbo,
                        final_fbo=td.final_fbo,
                        img_array=td.img_array,
                        top_down=True,
                    )
                    # img = sim.render("top_down")
                    out = cast(
                        FilePath,
                        os.path.join(output, scenario_name, style,
                                     "top_down.jpg"))
                    save_rgb_to_jpg(horiz, out)
                    out = cast(
                        FilePath,
                        os.path.join(output, scenario_name, style,
                                     "top_down.png"))
                    save_rgb_to_png(horiz, out)

                    dw.Tile.style = style
                    dm = interpret_scenario(scenario)
                    output_dir = os.path.join(output, scenario_name, style)
                    dw.draw_static(dm, output_dir=output_dir)
                    export_gltf(dm, output_dir, background=False)

            scenario_struct = ipce_from_object(scenario, Scenario, ieso=ieso)
            scenario_yaml = yaml.dump(scenario_struct)
            filename = os.path.join(output, scenario_name, f"scenario.yaml")
            write_ustring_to_utf8_file(scenario_yaml, filename)
예제 #26
0
    '--show-observations',
    action='store_true',
    help='Show the cropped, downscaled observations, used as the policy input')
args = parser.parse_args()

if args.top_view:
    render_mode = 'top_down'
else:
    render_mode = 'human'

env = Simulator(
    seed=1234,
    map_name=args.map_name,
    domain_rand=args.domain_rand,
    dynamics_rand=args.domain_rand,
    camera_rand=args.domain_rand,
    distortion=args.distortion,
    accept_start_angle_deg=1,
    full_transparency=True,
    draw_curve=args.draw_curve,
    # user_tile_start=[2,1]
)
env = AIDOWrapper(env)
if args.show_observations:
    env = ClipImageWrapper(env, top_margin_divider=3)
    # env = ResizeWrapper(env, (84, 84))
# env = MotionBlurWrapper(env)
# env = RandomFrameRepeatingWrapper(env, {"frame_repeating": 0.33333})
# env = ObstacleSpawningWrapper(env, {'obstacles': {'duckie': {'density': 0.35,
#                                                              'static': True},
#                                                   'duckiebot': {'density': 0.25,
#                                                                 'static': True},