Exemplo n.º 1
0
        im.save('screen.png')

    if done1 or done2:
        print('done!')
        env1.reset()
        env2.reset()
        Car1.reset()
        env1.render()
        env2.render()

    env1.render()
    env2.render()

    # Save image
    from PIL import Image
    im1 = Image.fromarray(obs1)
    im2 = Image.fromarray(obs2)

    im1.save("img/car1/_" + str(i) + ".jpg")
    im2.save("img/car2/" + str(i) + ".jpg")
    i = i + 1


pyglet.clock.schedule_interval(update, 1.0 / env1.unwrapped.frame_rate)

# Enter main event loop
pyglet.app.run()

env1.close()
env2.close()
Exemplo n.º 2
0
        # Captura la tecla que está siendo apretada y almacena su valor en key
        key = cv2.waitKey(30)
        # Si la tecla es Esc, se sale del loop y termina el programa
        if key == 27:
            break
        # Se ejecuta la acción definida anteriormente
        action = mov_duckiebot(_key)
        # Se retorna la observación (obs), la evaluación (reward), etc
        obs, reward, done, info = env.step(action)
        # obs consiste en un imagen RGB de 640 x 480 x 3
        dim = (160, 120)
        # Se estandariza a imagen a las dimensiones que acepta el modelo
        resized = cv2.resize(obs, dim, interpolation=cv2.INTER_AREA)
        obs_ = np.expand_dims(resized, axis=0)
        #Se ejecuta la predicción
        _key = model.predict(obs_)
        _key = np.argmax(_key[0])
        print(_key)
        # done significa que el Duckiebot chocó con un objeto o se salió del camino
        if done:
            print('done!')

            # En ese caso se reinicia el simulador
            env.reset()

        # Se muestra en una ventana llamada "patos" la observación del simulador
        cv2.imshow("patos", cv2.cvtColor(obs, cv2.COLOR_RGB2BGR))

# Se cierra el environment y termina el programa
env.close()
Exemplo n.º 3
0
def main(duckie_env: DuckietownEnv, debug: bool):
    """
    Main loop that allows to control duckiebot with keyboard and uses visual servo when bot is detected
    Args:
        duckie_env: the environment in which our duckiebot evolves
        debug: will log debug message if True
    """
    duckie_env.reset()
    duckie_env.render()

    logger = logging.getLogger(__name__)
    if debug:
        logger.setLevel("DEBUG")
    else:
        logger.setLevel("INFO")

    pose_estimator = PoseEstimator(min_area=CIRCLE_MIN_AREA,
                                   min_dist_between_blobs=CIRCLE_MIN_DISTANCE,
                                   height=CIRCLE_PATTERN_HEIGHT,
                                   width=CIRCLE_PATTERN_WIDTH,
                                   target_distance=TARGET_DIST,
                                   camera_mode=CAMERA_MODE)

    # This is the object that computes the next command from the estimated pose
    trajectory = Trajectory()

    @duckie_env.unwrapped.window.event
    def on_key_press(symbol, modifier):
        """
        This handler processes keyboard commands that
        control the simulation

        Args:
            symbol: key pressed
        """
        if symbol in [key.BACKSPACE, key.SLASH]:
            logger.info("RESET")
            trajectory.reset()
            duckie_env.reset()
            duckie_env.render()
        elif symbol == key.PAGEUP:
            duckie_env.unwrapped.cam_angle[0] = 0
        elif symbol == key.ESCAPE:
            duckie_env.close()
            sys.exit(0)

    # Register a keyboard handler
    key_handler = key.KeyStateHandler()
    duckie_env.unwrapped.window.push_handlers(key_handler)

    def update(dt: float):
        """
        This function is called at every frame to handle
        movement/stepping and redrawing

        Args:
            dt: change in time (in secs) since last update
        """
        action = np.array([0.0, 0.0])

        if key_handler[key.UP]:
            action += np.array([0.44, 0.0])
        if key_handler[key.DOWN]:
            action -= np.array([0.44, 0])
        if key_handler[key.LEFT]:
            action += np.array([0, 1])
        if key_handler[key.RIGHT]:
            action -= np.array([0, 1])
        # Speed boost
        if key_handler[key.LSHIFT]:
            action *= 1.5

        # TODO get observation before taking step
        # For now, we do nothing, get image, compute action, execute it. So 2 steps for one action
        # It should be get observations, compute action, take step, repeat. (1 action/step)

        obs, reward, done, info = duckie_env.step(action)

        target_detected, estimated_pose = pose_estimator.get_pose(obs)

        # Only for debugging, slows things down considerably and is not necessary
        # if detect:
        #     cv2.drawChessboardCorners(obs,
        #                               (8, 3), centers, detection)
        #     im = Image.fromarray(obs)
        #     im.save("circle_grid.png")

        # Here we get the ground_truth to see the accuracy of our estimate
        # Note: This is in global frame, while the estimate is in robot frame.
        # Also, the exact distance from the center of the duck to the bumper is unknown so it is set approximately
        goal_position = np.array([2.5 - TARGET_DIST - BUMPER_TO_CENTER_DIST,
                                  0,
                                  2.5])  # Because duckie is at [2.5, 0. 2.5] in visual_servo.yaml env file
        goal_angle = 0  # Because duckie faces 0 angle in visual_servo.yaml env file
        cur_position = np.array(info["Simulator"]["cur_pos"])
        cur_angle_rad = info["Simulator"]["cur_angle"]
        cur_angle_deg = np.rad2deg(cur_angle_rad)
        if cur_angle_deg > 179:
            cur_angle_deg -= 360
        relative_position = goal_position - cur_position
        relative_angle = goal_angle - cur_angle_deg
        relative_pose = [relative_position, relative_angle]
        np.set_printoptions(precision=2)
        logger.debug(f"gt: {relative_pose}, estimate: {estimated_pose}")

        if target_detected:
            trajectory.update(estimated_pose)
        elif trajectory.is_initialized():
            trajectory.predict(dt)
        else:
            logger.warning("object not found, cannot compute initial trajectory")
            # TODO for now we can move the duckie with the arrows. Eventually we just want
            # to reset the environment, and maybe log the starting pose to plot where we detect or not.

        if trajectory.is_initialized():
            action = trajectory.get_commands()
            obs, reward, done, info = duckie_env.step(action)

        if key_handler[key.RETURN]:
            im = Image.fromarray(obs)
            im.save("screen.png")

        if done:
            logger.info("done!")
            duckie_env.reset()
            duckie_env.render()

        duckie_env.render()

    pyglet.clock.schedule_interval(update, 1.0 / duckie_env.unwrapped.frame_rate)

    # Enter main event loop
    pyglet.app.run()

    duckie_env.close()
class DuckiebotSim(object):
    def __init__(self):
        # Initialize Robot Simulation
        self.env = DuckietownEnv(seed=1,
                                 map_name='udem1',
                                 draw_curve=False,
                                 draw_bbox=False,
                                 distortion=False)
        self.env.reset()
        self.env.render()
        # self.action = np.array([0.44, 0.0])
        self.action = np.array([0.0, 0.0])

        # For image to ros
        self.bridge = CvBridge()

        # Initialize ROS Node
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        # Setup parameters
        self.framerate = self.setupParam("~framerate",
                                         self.env.unwrapped.frame_rate)

        # Setup Publisher
        self.pub_img = rospy.Publisher("image_raw", Image, queue_size=1)
        self.has_published = False

        # Setup Subscriber
        self.sub_cmd = rospy.Subscriber("/cmd_vel",
                                        Twist,
                                        self.cbCmd,
                                        queue_size=1)

        # Setup timer
        self.timer_img_low = rospy.Timer(
            rospy.Duration.from_sec(1.0 / self.framerate), self.cbTimer)
        rospy.loginfo("[%s] Initialized." % (self.node_name))

    def setupParam(self, param_name, default_value):
        value = rospy.get_param(param_name, default_value)
        rospy.set_param(param_name,
                        value)  #Write to parameter server for transparancy
        rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
        return value

    def cbTimer(self, event):
        if not rospy.is_shutdown():
            self.grabAndPublish(self.pub_img)

    def grabAndPublish(self, publisher):
        # Grab image from simulation and apply the action
        obs, reward, done, info = self.env.step(self.action)
        rospy.loginfo('[%s] step_count = %s, reward=%.3f' %
                      (self.node_name, self.env.unwrapped.step_count, reward))

        image = cv2.cvtColor(obs, cv2.COLOR_BGR2RGB)  # Correct color for cv2
        """
        ##show image
        cv2.namedWindow("Image")
        if (not image is None):
            cv2.imshow("Image",image)
        if cv2.waitKey(1)!=-1:     #Burak, needs to modify this line to work on your computer, THANKS!
            cv2.destroyAllWindows()
        """
        # Publish raw image
        image_msg = self.bridge.cv2_to_imgmsg(image)

        image_msg.header.stamp = rospy.Time.now()
        # Publish
        publisher.publish(image_msg)

        if not self.has_published:
            rospy.loginfo("[%s] Published the first image." % (self.node_name))
            self.has_published = True

        if done and (self.env.unwrapped.step_count != 1500):
            rospy.logwarn("[%s] DONE! RESETTING." % (self.node_name))
            self.env.reset()

        self.env.render()

    def cbCmd(self, cmd_msg):
        linear_vel = cmd_msg.linear.x  # Forward Velocity [-1 1]
        angular_vel = cmd_msg.angular.z  # Steering angle [-1 1]
        self.action = np.array([linear_vel, angular_vel])

    def onShutdown(self):
        rospy.loginfo("[%s] Shutdown." % (self.node_name))
        self.env.close()
class DuckiebotSim(object):
    def __init__(self):
        # Initialize Robot Simulation
        self.env = DuckietownEnv( seed=1, map_name = 'udem1', draw_curve = False, draw_bbox = False, distortion = False, domain_rand = False)
        self.env.reset()
        self.env.render()
        # self.action = np.array([0.44, 0.0])
        self.action = np.array([0.0, 0.0])
        
        # For image to ros
        self.bridge = CvBridge()
        
        # Initialize ROS Node
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" %(self.node_name))
        
        # Setup parameters
        self.framerate = self.setupParam("~framerate",self.env.unwrapped.frame_rate)
        
        # Setup Publisher
        self.pub_img= rospy.Publisher("image_raw",Image,queue_size=1)
        self.has_published = False
        
        # Setup Subscriber
        #self.sub_cmd = rospy.Subscriber("/cmd_vel", Twist, self.cbCmd, queue_size=1)
        
        
        # Setup timer
        self.timer_img_low = rospy.Timer(rospy.Duration.from_sec(1.0/self.framerate),self.cbTimer)
        rospy.loginfo("[%s] Initialized." %(self.node_name))
    
    def setupParam(self,param_name,default_value):
        value = rospy.get_param(param_name,default_value)
        rospy.set_param(param_name,value) #Write to parameter server for transparancy
        rospy.loginfo("[%s] %s = %s " %(self.node_name,param_name,value))
        return value
        
    def cbTimer(self,event):
        if not rospy.is_shutdown():
            self.grabAndPublish(self.pub_img)
        
    def grabAndPublish(self,publisher):
        self.action = self.basic_control()
        # Grab image from simulation and apply the action
        obs, reward, done, info = self.env.step(self.action)
        #rospy.loginfo('[%s] step_count = %s, reward=%.3f' % (self.node_name, self.env.unwrapped.step_count, reward))
        
        image = cv2.cvtColor(obs, cv2.COLOR_BGR2RGB) # Correct color for cv2

        # Publish raw image
        image_msg = self.bridge.cv2_to_imgmsg(image)
                
        image_msg.header.stamp = rospy.Time.now()
        # Publish 
        publisher.publish(image_msg)

        if not self.has_published:
            rospy.loginfo("[%s] Published the first image." %(self.node_name))
            self.has_published = True
        
        # if done and (self.env.unwrapped.step_count != 1500):
        #     rospy.logwarn("[%s] DONE! RESETTING." %(self.node_name))
        #     self.env.reset()
            
        #self.env.render()
        
    def cbCmd(self,cmd_msg):
        linear_vel = cmd_msg.linear.x # Forward Velocity [-1 1]
        angular_vel = cmd_msg.angular.z # Steering angle [-1 1]
        self.action = np.array([linear_vel, angular_vel])
        
    def onShutdown(self):
        rospy.loginfo("[%s] Shutdown." %(self.node_name))
        self.env.close()
        
    def basic_control(self):
        lane_pose = self.env.get_lane_pos2(self.env.cur_pos, self.env.cur_angle)
        distance_to_road_center = lane_pose.dist 
        angle_from_straight_in_rads = lane_pose.angle_rad 
        rospy.loginfo(" dist = %.2f" %(distance_to_road_center))
        rospy.loginfo("theta = %.2f" %(angle_from_straight_in_rads))

        k_p = 10
        k_d = 5

        speed = 0.2

        steering = k_p*distance_to_road_center + k_d*angle_from_straight_in_rads

        return [speed, steering]
Exemplo n.º 6
0
class gymDuckiebotSimRRService(object):
    def __init__(self):
        # Initialize Robot Simulation
        # Other Maps: udem1, straight_road, small_loop, loop_empty, 4way, zigzag_dists, loop_obstacles
        # loop_pedestrians, loop_dyn_duckiebots, regress_4way_adam
        self.env = DuckietownEnv(seed=1,
                                 max_steps=5000,
                                 map_name='zigzag_dists',
                                 draw_curve=False,
                                 draw_bbox=False,
                                 distortion=True)
        self.env.reset()
        self.env.render()
        self.action = np.array([0.0, 0.0])
        self.framerate = self.env.unwrapped.frame_rate

        # Initialize the camera images and control streaming
        self._lock = threading.RLock()
        self._streaming = False

        self._framestream = None
        self._framestream_endpoints = dict()
        self._framestream_endpoints_lock = threading.RLock()

    #Capture a frame, apply the action and return a CamImage structure to the client
    def CaptureFrame_n_Action(self):
        with self._lock:
            image = RRN.NewStructure("experimental.gymDuckiebotSim.CamImage")
            # Grab image from simulation and apply the action
            obs, reward, done, info = self.env.step(self.action)

            #if done:
            #    self.env.reset()

            frame = cv2.cvtColor(obs,
                                 cv2.COLOR_RGB2BGR)  # Correct color for cv2
            # frame = obs

            image.width = frame.shape[1]
            image.height = frame.shape[0]
            image.step = frame.shape[1] * 3
            image.data = frame.reshape(frame.size, order='C')

            return image

    #Start the thread that captures images and sends them through connected
    #FrameStream pipes
    def StartStreaming(self):
        if (self._streaming):
            raise Exception("Already streaming")
        self._streaming = True
        t = threading.Thread(target=self.frame_threadfunc)
        t.start()

    #Stop the streaming thread
    def StopStreaming(self):
        if (not self._streaming):
            raise Exception("Not streaming")
        self._streaming = False

    #FrameStream pipe member property getter and setter
    @property
    def FrameStream(self):
        return self._framestream

    @FrameStream.setter
    def FrameStream(self, value):
        self._framestream = value
        #Create the PipeBroadcaster and set backlog to 3 so packets
        #will be dropped if the transport is overloaded
        self._framestream_broadcaster = RR.PipeBroadcaster(value, 3)

    #Function that will send a frame at ideally (self.framerate) fps, although in reality it
    #will be lower because Python is quite slow.  This is for
    #demonstration only...
    def frame_threadfunc(self):
        #Loop as long as we are streaming
        while (self._streaming):
            #Capture a frame
            try:
                frame = self.CaptureFrame_n_Action()
            except:
                #TODO: notify the client that streaming has failed
                self._streaming = False
                return
            #Send the new frame to the broadcaster.  Use AsyncSendPacket
            #and a blank handler.  We really don't care when the send finishes
            #since we are using the "backlog" flow control in the broadcaster.
            self._framestream_broadcaster.AsyncSendPacket(frame, lambda: None)

            # Put in a 100 ms delay
            time.sleep(1.0 / self.framerate)

    def setAction(self, v, w):
        with self._lock:
            # v = Forward Velocity [-1 1]
            # w = Steering angle [-1 1]
            self.action = np.array([v, w])

    def Shutdown(self):
        print("Duckiebot Simulation RR Service Shutdown.")
        self._streaming = False
        self.env.close()
Exemplo n.º 7
0
class DuckiebotSim(object):
    def __init__(self):
        # Initialize Robot Simulation
        self.env = DuckietownEnv(seed=1,
                                 map_name='loop_sid',
                                 draw_curve=False,
                                 draw_bbox=False,
                                 distortion=True,
                                 domain_rand=False,
                                 camera_width=640,
                                 camera_height=480,
                                 user_tile_start=(1, 3))
        self.env.reset()
        self.env.render()
        # self.action = np.array([0.44, 0.0])
        self.action = np.array([0.0, 0.0])

        # For image to ros
        self.bridge = CvBridge()

        # Initialize ROS Node
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        # Setup parameters
        self.framerate = self.setupParam("~framerate",
                                         self.env.unwrapped.frame_rate)

        # Setup Publisher
        #self.pub_img= rospy.Publisher("cv_camera/image_raw",Image,queue_size=1)
        #self.pub_cam = rospy.Publisher("cv_camera/camera_info", CameraInfo, queue_size=1)
        self.pub_img = rospy.Publisher("image_raw", Image, queue_size=1)
        self.pub_cam = rospy.Publisher("camera_info", CameraInfo, queue_size=1)
        self.has_published = False

        # Setup Subscriber
        self.sub_cmd = rospy.Subscriber("/cmd_vel",
                                        Twist,
                                        self.cbCmd,
                                        queue_size=1)

        # Setup timer
        self.timer_img_low = rospy.Timer(
            rospy.Duration.from_sec(1.0 / self.framerate), self.cbTimer)
        #self.timer_img_low = rospy.Timer(rospy.Duration.from_sec(1.0/10.0),self.cbTimer)
        rospy.loginfo("[%s] Initialized." % (self.node_name))

    def setupParam(self, param_name, default_value):
        value = rospy.get_param(param_name, default_value)
        rospy.set_param(param_name,
                        value)  #Write to parameter server for transparancy
        rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
        return value

    def cbTimer(self, event):
        if not rospy.is_shutdown():
            self.grabAndPublish(self.pub_img)

    def grabAndPublish(self, publisher):
        # Grab image from simulation and apply the action
        obs, reward, done, info = self.env.step(self.action)
        #rospy.loginfo('[%s] step_count = %s, reward=%.3f' % (self.node_name, self.env.unwrapped.step_count, reward))

        image = cv2.cvtColor(obs, cv2.COLOR_BGR2RGB)  # Correct color for cv2
        """
        ##show image
        cv2.namedWindow("Image")
        if (not image is None):
            cv2.imshow("Image",image)
        if cv2.waitKey(1)!=-1:     #Burak, needs to modify this line to work on your computer, THANKS!
            cv2.destroyAllWindows()
        """
        # Publish raw image
        image_msg = self.bridge.cv2_to_imgmsg(image, "bgr8")

        image_msg.header.stamp = rospy.Time.now()
        # Publish
        publisher.publish(image_msg)

        cam_msg = CameraInfo()
        cam_msg.height = 480
        cam_msg.width = 640
        #cam_msg.height = 240
        #cam_msg.width= 320
        cam_msg.header.stamp = image_msg.header.stamp
        cam_msg.header.frame_id = 'cam'

        cam_msg.distortion_model = 'plumb_bob'
        cam_msg.D = [
            -0.2, 0.0305, 0.0005859930422629722, -0.0006697840226199427, 0
        ]
        cam_msg.K = [
            305.5718893575089,
            0,
            303.0797142544728,
            0,
            308.8338858195428,
            231.8845403702499,
            0,
            0,
            1,
        ]
        cam_msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        cam_msg.P = [
            220.2460277141687,
            0,
            301.8668918355899,
            0,
            0,
            238.6758484095299,
            227.0880056118307,
            0,
            0,
            0,
            1,
            0,
        ]

        self.pub_cam.publish(cam_msg)

        if not self.has_published:
            rospy.loginfo("[%s] Published the first image." % (self.node_name))
            self.has_published = True

        #if done and (self.env.unwrapped.step_count != 1500):
        #rospy.logwarn("[%s] DONE! RESETTING." %(self.node_name))
        #self.env.reset()

        self.env.render()

    def cbCmd(self, cmd_msg):
        linear_vel = cmd_msg.linear.x  # Forward Velocity [-1 1]
        angular_vel = cmd_msg.angular.z  # Steering angle [-1 1]
        self.action = np.array([linear_vel, angular_vel])

    def onShutdown(self):
        rospy.loginfo("[%s] Shutdown." % (self.node_name))
        self.env.close()