Beispiel #1
0
if args.env_name and args.env_name.find('Duckietown') != -1:
    env = DuckietownEnv(
        seed=args.seed,
        map_name=args.map_name,
        draw_curve=args.draw_curve,
        draw_bbox=args.draw_bbox,
        domain_rand=args.domain_rand,
        frame_skip=args.frame_skip,
        distortion=args.distortion,
    )
else:
    env = gym.make(args.env_name)

env.reset()
env.render()
total_reward = 0


def _draw_pose(overlay, camera_params, tag_size, pose, z_sign=1):

    opoints = np.array([
        -1,
        -1,
        0,
        1,
        -1,
        0,
        1,
        1,
        0,
Beispiel #2
0
parser.add_argument('--no-random', action='store_true', help='disable domain randomization')
parser.add_argument('--no-pause', action='store_true', help="don't pause on failure")
args = parser.parse_args()

if args.env_name == 'SimpleSim-v0':
    env = DuckietownEnv(
        map_name = args.map_name,
        domain_rand = not args.no_random
    )
    #env.max_steps = math.inf
    env.max_steps = 500
else:
    env = gym.make(args.env_name)

obs = env.reset()
env.render()

avg_frame_time = 0
max_frame_time = 0

def load_model():
    global model
    model = Model()

    try:
        state_dict = torch.load('trained_models/imitate.pt', map_location=lambda storage, loc: storage)
        model.load_state_dict(state_dict)
    except:
        print('failed to load model')

    model.eval()
assert first_obs.shape == second_obs.shape

# Try stepping a few times
for i in range(0, 10):
    obs, _, _, _ = env.step(np.array([0.1, 0.1]))

# Try loading each of the available map files
for map_file in os.listdir('gym_duckietown/maps'):
    map_name = map_file.split('.')[0]
    env = DuckietownEnv(map_name=map_name)
    env.reset()

# Test the multi-map environment
env = MultiMapEnv()
for i in range(0, 50):
    env.reset()

# Check that we do not spawn too close to obstacles
env = DuckietownEnv(map_name='loop_obstacles')
for i in range(0, 75):
    obs = env.reset()
    assert not env._collision(), "collision on spawn"
    env.step(np.array([0.05, 0]))
    assert not env._collision(), "collision after one step"

# Test the draw_bbox mode
env = DuckietownEnv(map_name='udem1', draw_bbox=True)
env.render('rgb_array')
env = DuckietownEnv(map_name='loop_obstacles', draw_bbox=True)
env.render('rgb_array')
Beispiel #4
0
    # Example trajectory, as would be generated by path planning
    trajectory = [
        np.array([2., 1.]),
        np.array([2., 3.]),
        np.array([1., 3.]),
        np.array([1., 1.])
    ]

    env = DuckietownEnv(map_name='udem1',
                        user_tile_start=(1, 1),
                        domain_rand=False,
                        init_x=0.75,
                        init_z=0.75,
                        init_angle=0.)
    env.reset()
    env.render(top_down=True)

    # ======= DYNAMICS =========

    # Point-mass physics model
    # inputs: (n x dim_state), (n x dim_actions), float
    # output: next_states (n x dim_state)
    def simplified_dynamics(states, actions, dt):
        actions = tf.clip_by_value(actions, -1., 1.)
        next_states = [
            states + tf.stack([
                actions[:, 0] * tf.cos(states[:, 2]) * dt,
                actions[:, 0] * -tf.sin(states[:, 2]) * dt, actions[:, 1] * dt
            ], 1)
        ]
        return next_states
Beispiel #5
0
args = parser.parse_args()

env1 = DuckietownEnv(
    seed=args.seed,
    map_name=args.map_name,
    draw_curve=args.draw_curve,
    draw_bbox=args.draw_bbox,
    domain_rand=args.domain_rand,
    frame_skip=args.frame_skip,
    distortion=args.distortion,
)

env1.do_color_relabeling = False

env1.reset()
env1.render()

env2 = DuckietownEnv(
    seed=args.seed,
    map_name=args.map_name,
    draw_curve=args.draw_curve,
    draw_bbox=args.draw_bbox,
    domain_rand=args.domain_rand,
    frame_skip=args.frame_skip,
    distortion=args.distortion,
)

env2.do_color_relabeling = True

env2.reset()
env2.render()
Beispiel #6
0
parser.add_argument('--env-name', default=None)
parser.add_argument('--map-name', default='straight_road')
parser.add_argument('--no-pause',
                    action='store_true',
                    help="don't pause on failure")
args = parser.parse_args()

if args.env_name is None:
    env = DuckietownEnv(map_name=args.map_name,
                        domain_rand=False,
                        draw_bbox=False)
else:
    env = gym.make(args.env_name)

obs = env.reset()
env.render()

total_reward = 0

while True:
    duck = 0
    lane_pose = env.get_lane_pos2(env.cur_pos, env.cur_angle)
    distance_to_road_center = lane_pose.dist
    angle_from_straight_in_rads = lane_pose.angle_rad

    img = env.render('rgb_array')
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    bnw = cv2.inRange(img, (0, 150, 170), (0, 220, 255))
    contours, ret = cv2.findContours(bnw, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    if contours:
        contours = sorted(contours, key=cv2.contourArea, reverse=True)
parser.add_argument('--backwards', action='store_true', help="drive backwards")
parser.add_argument('--render-human',
                    action='store_true',
                    help="render view from robot camera")
args = parser.parse_args()

if args.env_name is None:
    env = DuckietownEnv(map_name=args.map_name,
                        domain_rand=False,
                        draw_bbox=False,
                        max_steps=5000)
else:
    env = gym.make(args.env_name)

obs = env.reset()
env.render()

render_mode = 'human' if args.render_human else 'top_down'
route = np.array([(2.0, 0, 1.3), (1.5, 0, 1.3), (1.3, 0, 1.5), (1.3, 0, 2.0),
                  (1.3, 0, 3.0), (1.3, 0, 3.5), (1.5, 0, 3.7), (2.0, 0, 3.7),
                  (3.0, 0, 3.7), (3.15, 0, 3.85), (3.3, 0, 4.0), (3.3, 0, 5.0),
                  (3.15, 0, 5.15), (3.0, 0, 5.3),
                  (2.0, 0, 5.3), (1.85, 0, 5.15), (1.7, 0, 5.0), (1.7, 0, 4.0),
                  (1.85, 0, 3.85), (2.0, 0, 3.7), (3.0, 0, 3.7), (3.5, 0, 3.7),
                  (3.7, 0, 3.5), (3.7, 0, 3.0), (3.7, 0, 2.0), (3.7, 0, 1.5),
                  (3.5, 0, 1.3), (3.0, 0, 1.3)])
route = route * env.road_tile_size

pt_index = np.argmin([np.linalg.norm(pt - env.cur_pos) for pt in route])

while True:
Beispiel #8
0
predictions = []

errors = []
straight_tile_errors = []
curve_left_errors = []
curve_right_errors = []
three_way_errors = []
four_way_errors = []

crashes = 0

obs = env.reset()
obs = preprocess(obs)

if visual:
    env.render()

t0 = time.perf_counter()

for i in range(steps):

    percent = round(i * 100 / steps, 2)
    print(f'\rrunning: {percent} %', end='\r')

    omega = expert.predict()[1]

    omega_hat = model(obs)[0][0].numpy()

    omega_err = abs(omega_hat - omega)

    errors.append(omega_err)
Beispiel #9
0
def main(duckie_env: DuckietownEnv, debug: bool):
    """
    Main loop that allows to control duckiebot with keyboard and uses visual servo when bot is detected
    Args:
        duckie_env: the environment in which our duckiebot evolves
        debug: will log debug message if True
    """
    duckie_env.reset()
    duckie_env.render()

    logger = logging.getLogger(__name__)
    if debug:
        logger.setLevel("DEBUG")
    else:
        logger.setLevel("INFO")

    pose_estimator = PoseEstimator(min_area=CIRCLE_MIN_AREA,
                                   min_dist_between_blobs=CIRCLE_MIN_DISTANCE,
                                   height=CIRCLE_PATTERN_HEIGHT,
                                   width=CIRCLE_PATTERN_WIDTH,
                                   target_distance=TARGET_DIST,
                                   camera_mode=CAMERA_MODE)

    # This is the object that computes the next command from the estimated pose
    trajectory = Trajectory()

    @duckie_env.unwrapped.window.event
    def on_key_press(symbol, modifier):
        """
        This handler processes keyboard commands that
        control the simulation

        Args:
            symbol: key pressed
        """
        if symbol in [key.BACKSPACE, key.SLASH]:
            logger.info("RESET")
            trajectory.reset()
            duckie_env.reset()
            duckie_env.render()
        elif symbol == key.PAGEUP:
            duckie_env.unwrapped.cam_angle[0] = 0
        elif symbol == key.ESCAPE:
            duckie_env.close()
            sys.exit(0)

    # Register a keyboard handler
    key_handler = key.KeyStateHandler()
    duckie_env.unwrapped.window.push_handlers(key_handler)

    def update(dt: float):
        """
        This function is called at every frame to handle
        movement/stepping and redrawing

        Args:
            dt: change in time (in secs) since last update
        """
        action = np.array([0.0, 0.0])

        if key_handler[key.UP]:
            action += np.array([0.44, 0.0])
        if key_handler[key.DOWN]:
            action -= np.array([0.44, 0])
        if key_handler[key.LEFT]:
            action += np.array([0, 1])
        if key_handler[key.RIGHT]:
            action -= np.array([0, 1])
        # Speed boost
        if key_handler[key.LSHIFT]:
            action *= 1.5

        # TODO get observation before taking step
        # For now, we do nothing, get image, compute action, execute it. So 2 steps for one action
        # It should be get observations, compute action, take step, repeat. (1 action/step)

        obs, reward, done, info = duckie_env.step(action)

        target_detected, estimated_pose = pose_estimator.get_pose(obs)

        # Only for debugging, slows things down considerably and is not necessary
        # if detect:
        #     cv2.drawChessboardCorners(obs,
        #                               (8, 3), centers, detection)
        #     im = Image.fromarray(obs)
        #     im.save("circle_grid.png")

        # Here we get the ground_truth to see the accuracy of our estimate
        # Note: This is in global frame, while the estimate is in robot frame.
        # Also, the exact distance from the center of the duck to the bumper is unknown so it is set approximately
        goal_position = np.array([2.5 - TARGET_DIST - BUMPER_TO_CENTER_DIST,
                                  0,
                                  2.5])  # Because duckie is at [2.5, 0. 2.5] in visual_servo.yaml env file
        goal_angle = 0  # Because duckie faces 0 angle in visual_servo.yaml env file
        cur_position = np.array(info["Simulator"]["cur_pos"])
        cur_angle_rad = info["Simulator"]["cur_angle"]
        cur_angle_deg = np.rad2deg(cur_angle_rad)
        if cur_angle_deg > 179:
            cur_angle_deg -= 360
        relative_position = goal_position - cur_position
        relative_angle = goal_angle - cur_angle_deg
        relative_pose = [relative_position, relative_angle]
        np.set_printoptions(precision=2)
        logger.debug(f"gt: {relative_pose}, estimate: {estimated_pose}")

        if target_detected:
            trajectory.update(estimated_pose)
        elif trajectory.is_initialized():
            trajectory.predict(dt)
        else:
            logger.warning("object not found, cannot compute initial trajectory")
            # TODO for now we can move the duckie with the arrows. Eventually we just want
            # to reset the environment, and maybe log the starting pose to plot where we detect or not.

        if trajectory.is_initialized():
            action = trajectory.get_commands()
            obs, reward, done, info = duckie_env.step(action)

        if key_handler[key.RETURN]:
            im = Image.fromarray(obs)
            im.save("screen.png")

        if done:
            logger.info("done!")
            duckie_env.reset()
            duckie_env.render()

        duckie_env.render()

    pyglet.clock.schedule_interval(update, 1.0 / duckie_env.unwrapped.frame_rate)

    # Enter main event loop
    pyglet.app.run()

    duckie_env.close()
                        map_name=args.map_name,
                        draw_curve=args.draw_curve,
                        draw_bbox=args.draw_bbox,
                        domain_rand=args.domain_rand,
                        frame_skip=args.frame_skip,
                        distortion=args.distortion,
                        accept_start_angle_deg=90)
    env = DTDroneImageGenerator(env, drone_sim='random')
    env = DTLaneFollowingRewardWrapper(env)
    env = DTConstantVelWrapper(env)
    env = DTDistAngleObsWrapper(env)
else:
    env = gym.make(args.env_name)

env.reset()
env.render('drone')


@env.unwrapped.window.event
def on_key_press(symbol, modifiers):
    """
    This handler processes keyboard commands that
    control the simulation
    """

    if symbol == key.BACKSPACE or symbol == key.SLASH:
        print('RESET')
        obs = env.reset()
        print('Reset obs: x: {}, theta: {}'.format(obs[0], obs[1]))
        env.render('drone')
    elif symbol == key.PAGEUP:
class DuckiebotSim(object):
    def __init__(self):
        # Initialize Robot Simulation
        self.env = DuckietownEnv(seed=1,
                                 map_name='udem1',
                                 draw_curve=False,
                                 draw_bbox=False,
                                 distortion=False)
        self.env.reset()
        self.env.render()
        # self.action = np.array([0.44, 0.0])
        self.action = np.array([0.0, 0.0])

        # For image to ros
        self.bridge = CvBridge()

        # Initialize ROS Node
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        # Setup parameters
        self.framerate = self.setupParam("~framerate",
                                         self.env.unwrapped.frame_rate)

        # Setup Publisher
        self.pub_img = rospy.Publisher("image_raw", Image, queue_size=1)
        self.has_published = False

        # Setup Subscriber
        self.sub_cmd = rospy.Subscriber("/cmd_vel",
                                        Twist,
                                        self.cbCmd,
                                        queue_size=1)

        # Setup timer
        self.timer_img_low = rospy.Timer(
            rospy.Duration.from_sec(1.0 / self.framerate), self.cbTimer)
        rospy.loginfo("[%s] Initialized." % (self.node_name))

    def setupParam(self, param_name, default_value):
        value = rospy.get_param(param_name, default_value)
        rospy.set_param(param_name,
                        value)  #Write to parameter server for transparancy
        rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
        return value

    def cbTimer(self, event):
        if not rospy.is_shutdown():
            self.grabAndPublish(self.pub_img)

    def grabAndPublish(self, publisher):
        # Grab image from simulation and apply the action
        obs, reward, done, info = self.env.step(self.action)
        rospy.loginfo('[%s] step_count = %s, reward=%.3f' %
                      (self.node_name, self.env.unwrapped.step_count, reward))

        image = cv2.cvtColor(obs, cv2.COLOR_BGR2RGB)  # Correct color for cv2
        """
        ##show image
        cv2.namedWindow("Image")
        if (not image is None):
            cv2.imshow("Image",image)
        if cv2.waitKey(1)!=-1:     #Burak, needs to modify this line to work on your computer, THANKS!
            cv2.destroyAllWindows()
        """
        # Publish raw image
        image_msg = self.bridge.cv2_to_imgmsg(image)

        image_msg.header.stamp = rospy.Time.now()
        # Publish
        publisher.publish(image_msg)

        if not self.has_published:
            rospy.loginfo("[%s] Published the first image." % (self.node_name))
            self.has_published = True

        if done and (self.env.unwrapped.step_count != 1500):
            rospy.logwarn("[%s] DONE! RESETTING." % (self.node_name))
            self.env.reset()

        self.env.render()

    def cbCmd(self, cmd_msg):
        linear_vel = cmd_msg.linear.x  # Forward Velocity [-1 1]
        angular_vel = cmd_msg.angular.z  # Steering angle [-1 1]
        self.action = np.array([linear_vel, angular_vel])

    def onShutdown(self):
        rospy.loginfo("[%s] Shutdown." % (self.node_name))
        self.env.close()
class DuckiebotSim(object):
    def __init__(self):
        # Initialize Robot Simulation
        self.env = DuckietownEnv( seed=1, map_name = 'udem1', draw_curve = False, draw_bbox = False, distortion = False, domain_rand = False)
        self.env.reset()
        self.env.render()
        # self.action = np.array([0.44, 0.0])
        self.action = np.array([0.0, 0.0])
        
        # For image to ros
        self.bridge = CvBridge()
        
        # Initialize ROS Node
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" %(self.node_name))
        
        # Setup parameters
        self.framerate = self.setupParam("~framerate",self.env.unwrapped.frame_rate)
        
        # Setup Publisher
        self.pub_img= rospy.Publisher("image_raw",Image,queue_size=1)
        self.has_published = False
        
        # Setup Subscriber
        #self.sub_cmd = rospy.Subscriber("/cmd_vel", Twist, self.cbCmd, queue_size=1)
        
        
        # Setup timer
        self.timer_img_low = rospy.Timer(rospy.Duration.from_sec(1.0/self.framerate),self.cbTimer)
        rospy.loginfo("[%s] Initialized." %(self.node_name))
    
    def setupParam(self,param_name,default_value):
        value = rospy.get_param(param_name,default_value)
        rospy.set_param(param_name,value) #Write to parameter server for transparancy
        rospy.loginfo("[%s] %s = %s " %(self.node_name,param_name,value))
        return value
        
    def cbTimer(self,event):
        if not rospy.is_shutdown():
            self.grabAndPublish(self.pub_img)
        
    def grabAndPublish(self,publisher):
        self.action = self.basic_control()
        # Grab image from simulation and apply the action
        obs, reward, done, info = self.env.step(self.action)
        #rospy.loginfo('[%s] step_count = %s, reward=%.3f' % (self.node_name, self.env.unwrapped.step_count, reward))
        
        image = cv2.cvtColor(obs, cv2.COLOR_BGR2RGB) # Correct color for cv2

        # Publish raw image
        image_msg = self.bridge.cv2_to_imgmsg(image)
                
        image_msg.header.stamp = rospy.Time.now()
        # Publish 
        publisher.publish(image_msg)

        if not self.has_published:
            rospy.loginfo("[%s] Published the first image." %(self.node_name))
            self.has_published = True
        
        # if done and (self.env.unwrapped.step_count != 1500):
        #     rospy.logwarn("[%s] DONE! RESETTING." %(self.node_name))
        #     self.env.reset()
            
        #self.env.render()
        
    def cbCmd(self,cmd_msg):
        linear_vel = cmd_msg.linear.x # Forward Velocity [-1 1]
        angular_vel = cmd_msg.angular.z # Steering angle [-1 1]
        self.action = np.array([linear_vel, angular_vel])
        
    def onShutdown(self):
        rospy.loginfo("[%s] Shutdown." %(self.node_name))
        self.env.close()
        
    def basic_control(self):
        lane_pose = self.env.get_lane_pos2(self.env.cur_pos, self.env.cur_angle)
        distance_to_road_center = lane_pose.dist 
        angle_from_straight_in_rads = lane_pose.angle_rad 
        rospy.loginfo(" dist = %.2f" %(distance_to_road_center))
        rospy.loginfo("theta = %.2f" %(angle_from_straight_in_rads))

        k_p = 10
        k_d = 5

        speed = 0.2

        steering = k_p*distance_to_road_center + k_d*angle_from_straight_in_rads

        return [speed, steering]
done = False

printed_msg = False

if from_file:
    for (speed, steering) in actions:
        print(speed, steering)

        obs, reward, done, info = env.step([speed, steering])
        total_reward += reward

        print('Steps = %s, Timestep Reward=%.3f, Total Reward=%.3f' %
              (env.step_count, reward, total_reward))

        env.render()

        time.sleep(0.1)
else:
    try:
        while not done:
            rgbobs = env.render('rgb_array')
            # if env.step_count == len(loaded_actions):
            #     policy.turn_left_incoming = policy.turn_right_incoming = False
            #     policy.seeing_stop = True
            #     policy.remaining_steps_of_slow = 0

            if env.map_name == 'map5':
                print('map5 predict function') if not printed_msg else None
                action = policy.predict5(rgb_array=np.array(rgbobs),
                                         raw_obs=obs)
Beispiel #14
0
class gymDuckiebotSimRRService(object):
    def __init__(self):
        # Initialize Robot Simulation
        # Other Maps: udem1, straight_road, small_loop, loop_empty, 4way, zigzag_dists, loop_obstacles
        # loop_pedestrians, loop_dyn_duckiebots, regress_4way_adam
        self.env = DuckietownEnv(seed=1,
                                 max_steps=5000,
                                 map_name='zigzag_dists',
                                 draw_curve=False,
                                 draw_bbox=False,
                                 distortion=True)
        self.env.reset()
        self.env.render()
        self.action = np.array([0.0, 0.0])
        self.framerate = self.env.unwrapped.frame_rate

        # Initialize the camera images and control streaming
        self._lock = threading.RLock()
        self._streaming = False

        self._framestream = None
        self._framestream_endpoints = dict()
        self._framestream_endpoints_lock = threading.RLock()

    #Capture a frame, apply the action and return a CamImage structure to the client
    def CaptureFrame_n_Action(self):
        with self._lock:
            image = RRN.NewStructure("experimental.gymDuckiebotSim.CamImage")
            # Grab image from simulation and apply the action
            obs, reward, done, info = self.env.step(self.action)

            #if done:
            #    self.env.reset()

            frame = cv2.cvtColor(obs,
                                 cv2.COLOR_RGB2BGR)  # Correct color for cv2
            # frame = obs

            image.width = frame.shape[1]
            image.height = frame.shape[0]
            image.step = frame.shape[1] * 3
            image.data = frame.reshape(frame.size, order='C')

            return image

    #Start the thread that captures images and sends them through connected
    #FrameStream pipes
    def StartStreaming(self):
        if (self._streaming):
            raise Exception("Already streaming")
        self._streaming = True
        t = threading.Thread(target=self.frame_threadfunc)
        t.start()

    #Stop the streaming thread
    def StopStreaming(self):
        if (not self._streaming):
            raise Exception("Not streaming")
        self._streaming = False

    #FrameStream pipe member property getter and setter
    @property
    def FrameStream(self):
        return self._framestream

    @FrameStream.setter
    def FrameStream(self, value):
        self._framestream = value
        #Create the PipeBroadcaster and set backlog to 3 so packets
        #will be dropped if the transport is overloaded
        self._framestream_broadcaster = RR.PipeBroadcaster(value, 3)

    #Function that will send a frame at ideally (self.framerate) fps, although in reality it
    #will be lower because Python is quite slow.  This is for
    #demonstration only...
    def frame_threadfunc(self):
        #Loop as long as we are streaming
        while (self._streaming):
            #Capture a frame
            try:
                frame = self.CaptureFrame_n_Action()
            except:
                #TODO: notify the client that streaming has failed
                self._streaming = False
                return
            #Send the new frame to the broadcaster.  Use AsyncSendPacket
            #and a blank handler.  We really don't care when the send finishes
            #since we are using the "backlog" flow control in the broadcaster.
            self._framestream_broadcaster.AsyncSendPacket(frame, lambda: None)

            # Put in a 100 ms delay
            time.sleep(1.0 / self.framerate)

    def setAction(self, v, w):
        with self._lock:
            # v = Forward Velocity [-1 1]
            # w = Steering angle [-1 1]
            self.action = np.array([v, w])

    def Shutdown(self):
        print("Duckiebot Simulation RR Service Shutdown.")
        self._streaming = False
        self.env.close()
Beispiel #15
0
class DuckiebotSim(object):
    def __init__(self):
        # Initialize Robot Simulation
        self.env = DuckietownEnv(seed=1,
                                 map_name='loop_sid',
                                 draw_curve=False,
                                 draw_bbox=False,
                                 distortion=True,
                                 domain_rand=False,
                                 camera_width=640,
                                 camera_height=480,
                                 user_tile_start=(1, 3))
        self.env.reset()
        self.env.render()
        # self.action = np.array([0.44, 0.0])
        self.action = np.array([0.0, 0.0])

        # For image to ros
        self.bridge = CvBridge()

        # Initialize ROS Node
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        # Setup parameters
        self.framerate = self.setupParam("~framerate",
                                         self.env.unwrapped.frame_rate)

        # Setup Publisher
        #self.pub_img= rospy.Publisher("cv_camera/image_raw",Image,queue_size=1)
        #self.pub_cam = rospy.Publisher("cv_camera/camera_info", CameraInfo, queue_size=1)
        self.pub_img = rospy.Publisher("image_raw", Image, queue_size=1)
        self.pub_cam = rospy.Publisher("camera_info", CameraInfo, queue_size=1)
        self.has_published = False

        # Setup Subscriber
        self.sub_cmd = rospy.Subscriber("/cmd_vel",
                                        Twist,
                                        self.cbCmd,
                                        queue_size=1)

        # Setup timer
        self.timer_img_low = rospy.Timer(
            rospy.Duration.from_sec(1.0 / self.framerate), self.cbTimer)
        #self.timer_img_low = rospy.Timer(rospy.Duration.from_sec(1.0/10.0),self.cbTimer)
        rospy.loginfo("[%s] Initialized." % (self.node_name))

    def setupParam(self, param_name, default_value):
        value = rospy.get_param(param_name, default_value)
        rospy.set_param(param_name,
                        value)  #Write to parameter server for transparancy
        rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
        return value

    def cbTimer(self, event):
        if not rospy.is_shutdown():
            self.grabAndPublish(self.pub_img)

    def grabAndPublish(self, publisher):
        # Grab image from simulation and apply the action
        obs, reward, done, info = self.env.step(self.action)
        #rospy.loginfo('[%s] step_count = %s, reward=%.3f' % (self.node_name, self.env.unwrapped.step_count, reward))

        image = cv2.cvtColor(obs, cv2.COLOR_BGR2RGB)  # Correct color for cv2
        """
        ##show image
        cv2.namedWindow("Image")
        if (not image is None):
            cv2.imshow("Image",image)
        if cv2.waitKey(1)!=-1:     #Burak, needs to modify this line to work on your computer, THANKS!
            cv2.destroyAllWindows()
        """
        # Publish raw image
        image_msg = self.bridge.cv2_to_imgmsg(image, "bgr8")

        image_msg.header.stamp = rospy.Time.now()
        # Publish
        publisher.publish(image_msg)

        cam_msg = CameraInfo()
        cam_msg.height = 480
        cam_msg.width = 640
        #cam_msg.height = 240
        #cam_msg.width= 320
        cam_msg.header.stamp = image_msg.header.stamp
        cam_msg.header.frame_id = 'cam'

        cam_msg.distortion_model = 'plumb_bob'
        cam_msg.D = [
            -0.2, 0.0305, 0.0005859930422629722, -0.0006697840226199427, 0
        ]
        cam_msg.K = [
            305.5718893575089,
            0,
            303.0797142544728,
            0,
            308.8338858195428,
            231.8845403702499,
            0,
            0,
            1,
        ]
        cam_msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        cam_msg.P = [
            220.2460277141687,
            0,
            301.8668918355899,
            0,
            0,
            238.6758484095299,
            227.0880056118307,
            0,
            0,
            0,
            1,
            0,
        ]

        self.pub_cam.publish(cam_msg)

        if not self.has_published:
            rospy.loginfo("[%s] Published the first image." % (self.node_name))
            self.has_published = True

        #if done and (self.env.unwrapped.step_count != 1500):
        #rospy.logwarn("[%s] DONE! RESETTING." %(self.node_name))
        #self.env.reset()

        self.env.render()

    def cbCmd(self, cmd_msg):
        linear_vel = cmd_msg.linear.x  # Forward Velocity [-1 1]
        angular_vel = cmd_msg.angular.z  # Steering angle [-1 1]
        self.action = np.array([linear_vel, angular_vel])

    def onShutdown(self):
        rospy.loginfo("[%s] Shutdown." % (self.node_name))
        self.env.close()