Ejemplo n.º 1
0
class ImageAvNode(object):
	def __init__(self):
		self.node_name = "Image Av"
		self.sub_image = rospy.Subscriber("/ernie/camera_node/image/compressed", CompressedImage,self.flipImageNP)
		self.pub_image = rospy.Publisher("~mirror_image/compressed", CompressedImage, queue_size=1)
		self.bridge = CvBridge()
	def flipImageNP(self,msg):
		np_array = np.fromstring(msg.data, np.uint8)
		image_np = cv2.imdecode(np_array, cv2.CV_LOAD_IMAGE_COLOR)		
		#flip_arr = np.fliplr(np_arr)
		imageflip_np=cv2.flip(image_np,1)
		flip_im = CompressedImage()
		flip_im.data = np.array(cv2.imencode('.jpg', imageflip_np)[1]).tostring()
		#flip_im.data = flip_arr.tostring()
		flip_im.header.stamp = msg.header.stamp
		self.pub_image.publish(flip_im)


	def flipImage(self,msg):
        # Decode from compressed image
        # with OpenCV
		cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")

		#image_cv = cv2.imdecode(np.fromstring(msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
		hei_original = image_cv.shape[0]
		wid_original = image_cv.shape[1]
		#reverseimage = image_cv[:,:,-1]
		reverseimg=cv2.flip(cv_image,1)
		image_msg_out = self.bridge.cv2_to_imgmsg(reverseimage, "bgr8")
		image_msg_out.header.stamp = image_msg.header.stamp
		self.pub_image.publish(image_msg_out)
Ejemplo n.º 2
0
 def callback(self, data):
 	bridge=CvBridge()
 	try:
 		cv_image=bridge.img_to_cv(data, "bgr8")
 	except CvBridgeError, e:
 		#print won't work, need to use rospy.log_ format
 		rospy.loginfo("Media failed to convert to OpenCV format")
class Echo:
    def __init__(self):
        self.node_name = "BlobDetector"
        self.thread_lock = threading.Lock()
        self.sub_image = rospy.Subscriber("/camera/rgb/image_rect_color",\
                Image, self.cbImage, queue_size=1)
        self.pub_image = rospy.Publisher("/blob_image",\
                Image, queue_size=1)
        self.pub_data = rospy.Publisher("/blob_detections", BlobDetections, queue_size = 1)
        self.bridge = CvBridge()

        rospy.loginfo("[%s] Initialized." %(self.node_name))

    def cbImage(self,image_msg):
        thread = threading.Thread(target=self.processImage,args=(image_msg,))
        thread.setDaemon(True)
        thread.start()


    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            return
        image_cv = self.bridge.imgmsg_to_cv2(image_msg)
        msg, img = process(image_cv) 
        try:
            self.pub_image.publish(self.bridge.cv2_to_imgmsg(img, "bgr8"))
            self.pub_data.publish(msg)
        except CvBridgeError as e:
            print(e)
        self.thread_lock.release()
def run():
  global right_image
  global left_image

  # Init node
  rospy.init_node("stereo_vision");

  # Subscribe for two "eyes"
  rospy.Subscriber("/stereo/right/image_raw", Image, right_image_callback)
  rospy.Subscriber("/stereo/left/image_raw", Image, left_image_callback)

  stereo_publisher = rospy.Publisher("/vision/stereo", Image, queue_size=1000)

  # Action
  print "Start processing..."

  bridge = CvBridge()
  worker = StereoVision()

  # rate = rospy.Rate(10)

  while not rospy.is_shutdown():

    if right_image != None and left_image != None:

      cv_right_image = bridge.imgmsg_to_cv2(right_image, desired_encoding="bgr8")

      cv_left_image = bridge.imgmsg_to_cv2(left_image, desired_encoding="bgr8")

      cv_stereo_image = worker.create_stereo_image( cv_right_image, cv_left_image )

      stereo_image = bridge.cv2_to_imgmsg(cv_stereo_image, encoding="bgr8")

      stereo_publisher.publish( stereo_image )
Ejemplo n.º 5
0
	def convertData(self, bagfile):
		seq = 0
		bridge = CvBridge()
		while(True):
			# Capture frame-by-frame
			ret, cvImage = self.capture.read()
			try:
				imageMsg = bridge.cv2_to_imgmsg(cvImage, "bgr8") # TODO: format spec as cmd option?
			except CvBridgeError, e:
				print e

			# creating ros message
			seq = seq + 1

			imageMsg.header.seq = seq
			# TODO: temporary hack, time sync/source is needed
			imageMsg.header.stamp =  rospy.Time.from_sec(time.time()) 

			# write message to bag file
			bagfile.write(self.topic, imageMsg, imageMsg.header.stamp)

			# this is not so important for conversion
			if self.showFrames == True:
				cv2.imshow('frame', cvImage)
				if cv2.waitKey(1) & 0xFF == ord('q'):
					break
  def spin(self):
    time.sleep(1.0)
    started = time.time()
    counter = 0
    cvim = numpy.zeros((480, 640, 1), numpy.uint8)
    ball_xv = 10
    ball_yv = 10
    ball_x = 100
    ball_y = 100

    cvb = CvBridge()

    while not rospy.core.is_shutdown():

      cvim.fill(0)
      cv2.circle(cvim, (ball_x, ball_y), 10, 255, -1)

      ball_x += ball_xv
      ball_y += ball_yv
      if ball_x in [10, 630]:
        ball_xv = -ball_xv
      if ball_y in [10, 470]:
        ball_yv = -ball_yv

      self.pub.publish(cvb.cv2_to_imgmsg(cvim))

      time.sleep(0.03)
def shapeCallback(req):
	rosimg = req.im;
	cvbridge = CvBridge()
	try:
		bw_img = cvbridge.imgmsg_to_cv2(rosimg, desired_encoding="passthrough")
	except CvBridgeError, e:
		print e
Ejemplo n.º 8
0
def tachyon():
    rospy.init_node('tachyon')

    image_topic = rospy.get_param('~image', '/tachyon/image')
    image_pub = rospy.Publisher(image_topic, Image, queue_size=5)

    bridge = CvBridge()

    mode = rospy.get_param('~mode', 'mono8')
    config_file = rospy.get_param('~config', 'tachyon.yml')

    path = rospkg.RosPack().get_path('mashes_tachyon')
    config_filename = os.path.join(path, 'config', config_file)

    tachyon = Tachyon(config=config_filename)
    tachyon.connect()
    
    tachyon.calibrate(24)
    
    while not rospy.is_shutdown():
        try:
            frame, header = tachyon.read_frame()
            frame = tachyon.process_frame(frame)
            if mode == 'rgb8':
                frame = LUT_IRON[frame]
            else:
                frame = np.uint8(frame >> 2)
            image_msg = bridge.cv2_to_imgmsg(frame, encoding=mode)
            image_msg.header.stamp = rospy.Time.now()
            image_pub.publish(image_msg)
        except CvBridgeError, e:
            print e
Ejemplo n.º 9
0
class PatchSaver(object):

    def __init__(self):
        rospy.init_node('patch_saver',log_level=rospy.DEBUG)

        self.patch_array_sub = rospy.Subscriber('patch_array', PatchArray,
                self.patch_array_callback, None, 1)

        self.debug_img_pub = rospy.Publisher('patch_array_debug_img', Image)

        self.bridge = CvBridge()

        self.path = rospy.get_param('~save_path')

    def patch_array_callback(self, PatchArray):
        rospy.loginfo("Patch Array Callback")
        num = np.random.random()
        if ((len(PatchArray.patch_array) > 1) and num > 0.9) or (num > 0.95):
            for idx, patch in enumerate(PatchArray.patch_array):
                image = np.asarray(self.bridge.imgmsg_to_cv2(patch.image,'bgr8'))
                mask = np.asarray(self.bridge.imgmsg_to_cv2(patch.mask,'mono8'))
                cv2.imwrite(self.path +
                        str(int(PatchArray.header.stamp.to_sec() * 1000))
                        + "_" + str(idx) + "_image.png", image)
                cv2.imwrite(self.path +
                        str(int(PatchArray.header.stamp.to_sec() * 1000))
                        + "_" + str(idx) + "_mask.png", mask)
Ejemplo n.º 10
0
def send_camera_data(rgb_image, thresh_image):
    #global cv_depth_thresh
    bridge = CvBridge()
    cd = CameraData()
    cd.rgb_image = rgb_image
    cd.thresh_image = bridge.cv2_to_imgmsg(thresh_image, "passthrough")
    return cd
Ejemplo n.º 11
0
def processFrame(image):

    global pub1, pub2
    conVerter = CvBridge()

    #Convert from a ROS image to a CV image
    try:
        image = conVerter.imgmsg_to_cv2(image, "bgr8")
    except CvBridgeError as e:
        print (e)

    result = bT.computeSpheres(image)
    msgBallCoords = ballcoords()
    msgModImage = Image()
    if result == None:
        return
    msgBallCoords.color= "RED"
    msgBallCoords.x = result[0]
    msgBallCoords.y = result[1]
    msgBallCoords.radius = result[2]

    # convert from a CV image to a ROS image

    resImage = result[3]

    #resImage = conVerter.cv2_to_imgmsg(resImage , "bgr8")
    msgModImage = conVerter.cv2_to_imgmsg(resImage , "bgr8")

    rospy.loginfo(msgBallCoords)
    pub1.publish(msgBallCoords)
    pub2.publish(msgModImage)
def save_images(bagfile):
    if not os.path.isdir("video_images"):
        os.mkdir("video_images")
    if not os.path.isdir("depth_images"):
        os.mkdir("depth_images")
    bag = rosbag.Bag(bagfile)
    bridge = CvBridge()
    count = 0
    o = open('image_log.csv','w')
    o.write('im,secs,nsecs,extra\n')
    
    for topic, msg, t in bag.read_messages():        
        if topic == '/camera/rgb/image_rect_color':
            img = bridge.imgmsg_to_cv(msg, "bgr8")
            img_name = "video_images/img_%06d.png" %count
            o.write('im,%d,%d,\n' % (t.secs,t.nsecs))
            count += 1
            cv.SaveImage(img_name, img)

        elif topic == '/camera/depth_registered/image_rect':
            img = bridge.imgmsg_to_cv(msg, "bgr8")
            img_name = "depth_images/img_%06d.png" %count
            #o.write('de,%d,%d,\n' % (t.secs,t.nsecs))
            #count += 1
            cv.SaveImage(img_name, img)
Ejemplo n.º 13
0
def sendImg():
    global FPS
    global DEV
    global last_fpsGlobal
    rospy.init_node('camera_publisher_node', anonymous=True)
    image_pub = rospy.Publisher("/camera_publisher/output" + str(DEV) + "_" + str(FPS), Image, queue_size=10)
    bridge = CvBridge()
    cap = cv2.VideoCapture(DEV)
    cap.set(cv2.cv.CV_CAP_PROP_FPS, FPS)
    threading.Timer(1, printFPS).start ()
    while not rospy.is_shutdown():
        start = datetime.now()	
    	# Capture frame-by-frame
    	ret, frame = cap.read()
        fpsGlobal = 1000000 / (datetime.now() - start).microseconds
        last_fpsGlobal = (fpsGlobal * 0.1) + (last_fpsGlobal * 0.9)
        if GUI:
            #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            cv2.imshow("output" + str(DEV) + "_" + str(FPS), frame)
    	    if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        try:
            image_pub.publish(bridge.cv2_to_imgmsg(frame, "bgr8"))
        except:
            pass
 def get_img(self, bag):
     bridge = CvBridge()
     for topic, msg, t in bag.read_messages(topics=['/cv_camera/image_raw/']):
         img = bridge.imgmsg_to_cv2(msg, "bgr8")
         rects = self.getCircles(img, 35)
         for rect in rects:
             yield rect
Ejemplo n.º 15
0
	def on_enter(self,userdata):
		bridge =  CvBridge()
		cv_image = bridge.imgmsg_to_cv2(userdata.Image, desired_encoding="passthrough")
		self._filename = os.path.expanduser('~/picture_'+str(rospy.Time.now())+'.jpg')
		print 'Saving file to ' , self._filename
		cv2.imwrite(self._filename,cv_image)
		print 'Picture has been saved to the home folder'   
Ejemplo n.º 16
0
class ImageDisplay:

    def __init__(self, device_num=0, color=True):
        cv.NamedWindow("Display",1)
        
        self.bridge = CvBridge()
        self.color = color

        self.device_num = device_num
        sub_name = 'camera_' + str(self.device_num)
        self.image_sub = rospy.Subscriber(sub_name,Image,self.image_callback)
        node_name = 'image_display' + str(self.device_num)
        rospy.init_node(node_name, anonymous=True)

    def image_callback(self,data):
        try:
            if self.color:
                cv_image = self.bridge.imgmsg_to_cv(data, "rgb8")
            else:
                cv_image = self.bridge.imgmsg_to_cv(data, "mono8")
        except CvBridgeError, e:
          print e

        cv.ShowImage("Display", cv_image)
        cv.WaitKey(3)
Ejemplo n.º 17
0
class ImageReducer:
#    # Convert a ROS Image to the Numpy matrix used by cv2 functions
#    def rosimg2cv(self, ros_image):
#        # Convert from ROS Image to old OpenCV image
#        frame = self.cvbridge.imgmsg_to_cv(ros_image, ros_image.encoding)
#        # Convert from old OpenCV image to Numpy matrix
#        return np.array(frame, dtype=np.uint8) #TODO: find out actual dtype


    def __init__(self, topics, factor):
        self.cvbridge = CvBridge()
        self.topics = topics
        self.factor = factor

        for topic in topics:
            outTopic = "/debug" + topic
            callback = self.reducerCallback(topic, outTopic)
            rospy.Subscriber(topic, Image, callback)


    # Returns a callback for a particular Image topic which reduces the image
    # and publishes it
    def reducerCallback(self, imageTopic, outTopic):
        publisher = rospy.Publisher(outTopic, Image)
        factor = self.factor
        def callback(rosImage):
            try:
                cvimg = self.cvbridge.imgmsg_to_cv(rosImage, desired_encoding="passthrough")
                outimg = cv2.cv.CreateMat(int(cvimg.rows * factor), int(cvimg.cols * factor), cvimg.type)
                cv2.cv.Resize(cvimg, outimg)
                publisher.publish(self.cvbridge.cv_to_imgmsg(outimg, encoding="bgr8")) #TODO: figure out actual encoding
            except CvBridgeError, e:
                print e

        return callback
Ejemplo n.º 18
0
    def test_target_image(self):
        """Tests posting telemetry data through client."""
        # Set up test data.
        url = "http://interop"
        client_args = (url, "testuser", "testpass", 1.0)
        target_id = 1

        width = 40
        height = 30
        nparr = np.random.randint(0, 256, (width, height, 3)).astype(np.uint8)

        bridge = CvBridge()
        ros_img = bridge.cv2_to_imgmsg(nparr)

        img = TargetImageSerializer.from_msg(ros_img)

        with InteroperabilityMockServer(url) as server:
            # Setup mock server.
            server.set_root_response()
            server.set_login_response()
            server.set_post_target_image_response(target_id)
            server.set_get_target_image_response(target_id, img, "image/png")
            server.set_delete_target_image_response(target_id)

            # Connect client.
            client = InteroperabilityClient(*client_args)
            client.wait_for_server()
            client.login()
            client.post_target_image(target_id, ros_img)
            client.get_target_image(target_id)
            client.delete_target_image(target_id)
Ejemplo n.º 19
0
class image_converter:
  def __init__(self):
    rospy.init_node('image_converter', anonymous=True)
    self.image_pub = rospy.Publisher("analyzed_image", Image, queue_size=10)

    self.bridge = CvBridge()
    self.image_sub = rospy.Subscriber("image_raw", Image, self.callback)

    self.stabilizer = VideoStabilizer([[409, 250], [300, 762], [1123, 848], [1600, 238]])


  def callback(self, data):
    try:
      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError as e:
      print(e)

    #cv_image = self.analyze_image(cv_image)
    #self.showImage(cv_image)

    try:
      self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
    except CvBridgeError as e:
      print(e)


  def analyze_image(self, image):
    image = self.stabilizer.stabilize_frame(image)
    return image


  def showImage(self, image):
    cv2.imshow("Image window", image)
    cv2.waitKey(3)
Ejemplo n.º 20
0
def buffer_data(bag, input_topic, compressed):
    image_buff = []
    time_buff  = []
    start_time = None
    depthData = []
    bridge     = CvBridge()
    #bag = rosbag.Bag(bagFile)
    #Buffer the images, timestamps from the rosbag
    for topic, msg, t in bag.read_messages(topics=[input_topic]):
        depthData+=msg.data
        if start_time is None:
            start_time = t

        #Get the image
        if not compressed:
            try:
                cv_image = bridge.imgmsg_to_cv2(msg, "32FC1")
            except CvBridgeError as e:
                print e
        else:
            nparr = np.fromstring(msg.data, np.uint8)
            cv_image = cv2.imdecode(nparr, cv2.CV_LOAD_IMAGE_COLOR)

        # normalize depth image 0 to 255
        depthImg = np.array(cv_image, dtype=np.float32)
        cv2.normalize(depthImg, depthImg, 0, 255, cv2.NORM_MINMAX)
        time_buff.append(t.to_sec() - start_time.to_sec())
        image_buff.append(depthImg)

    return image_buff, time_buff  
Ejemplo n.º 21
0
 def test_numpy_types(self):
     import cv2
     import numpy as np
     bridge_ = CvBridge()
     self.assertRaises(TypeError, lambda: bridge_.cv2_to_imgmsg(1, "rgba8"))
     if hasattr(cv2, 'cv'):
         self.assertRaises(TypeError, lambda: bridge_.cv2_to_imgmsg(cv2.cv(), "rgba8"))
def callback(data):

  bridge = CvBridge()
#  cv_image = bridge.imgmsg_to_cv2(data, "bgr8")
  cv_image = bridge.imgmsg_to_cv2(data, "mono8")
  cv2.flip(cv_image,1)
  color_presentation(cv_image)
Ejemplo n.º 23
0
	def __init__(self):
		rospy.init_node('GoForward', anonymous=False)

		rospy.loginfo("To stop TurtleBot CTRL + C")  
		rospy.on_shutdown(self.shutdown)
	
		###----------- IMAGE ----------###
		# turtlebot
		#self.image_sub = rospy.Subscriber("/camera/depth/image",Image, self.callback, queue_size = 1)
		# gazebo
		#self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image, self.callback, queue_size = 1)
		#self.image_sub = rospy.Subscriber("/camera/depth/image_raw",Image, self.callback, queue_size = 1)
		self.bridge = CvBridge()
		self.time = time.time()
		

		rospy.Subscriber('scan', LaserScan, self.laser_callback, queue_size = 1)
		
		#print scan
		scan = np.random.uniform(low=-10.0, high=10.0, size=(640,1))
		self.scanim = self.scan2im(scan)
		self.counter = 0

		self.r = rospy.Rate(10);
		
		self.move_cmd = Twist()
		#self.move_cmd.linear.x = 0.2
		#self.move_cmd.angular.z = 0

		self.bridge = CvBridge()
		
		while not rospy.is_shutdown():
			#self.cmd_vel.publish(self.move_cmd)
			self.r.sleep()
Ejemplo n.º 24
0
    def test_encode_decode_cv2_compressed(self):
        import numpy as np
        # from: http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags)
        formats = ["jpg", "jpeg", "jpe", "png", "bmp", "dib", "ppm", "pgm", "pbm",
                   "jp2", "sr", "ras", "tif", "tiff"]  # this formats rviz is not support

        cvb_en = CvBridge()
        cvb_de = CvBridge()

        for w in range(100, 800, 100):
            for h in range(100, 800, 100):
                for f in formats:
                    for channels in ([], 1, 3):
                        if channels == []:
                            original = np.uint8(np.random.randint(0, 255, size=(h, w)))
                        else:
                            original = np.uint8(np.random.randint(0, 255, size=(h, w, channels)))
                        compress_rosmsg = cvb_en.cv2_to_compressed_imgmsg(original, f)
                        newimg          = cvb_de.compressed_imgmsg_to_cv2(compress_rosmsg)
                        self.assert_(original.dtype == newimg.dtype)
                        if channels == 1:
                            # in that case, a gray image has a shape of size 2
                            self.assert_(original.shape[:2] == newimg.shape[:2])
                        else:
                            self.assert_(original.shape == newimg.shape)
                        self.assert_(len(original.tostring()) == len(newimg.tostring()))
Ejemplo n.º 25
0
def face_detection(image):
    bridge = CvBridge()
    
    try:
        cv_image = bridge.imgmsg_to_cv2(image, "bgr8")
    except CvBridgeError as e:
        print e
    
    new_size = (int(cv_image.shape[1]/2), int(cv_image.shape[0]/2))
    #cv_array = np.array(cv_image, dtype=np.uint8)
    gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
    small_img = cv2.resize(gray, new_size, interpolation = cv2.INTER_LINEAR)
    small_img = cv2.equalizeHist(small_img)
    faces = faceCascade.detectMultiScale(
        small_img,
        scaleFactor=1.2,
        minNeighbors=2,
        minSize=(45, 45),
        flags=0
    )
    
    print len(faces)
    # Draw a rectangle around the faces
    for (x, y, w, h) in faces:
        #print "test"
        point1=(int(x*2), int(y*2))
        point2=(int((x+w)*2), int((y+h)*2))
        cv2.rectangle(cv_image, point1, point2, (0, 255, 0), 3, 8, 0)

    cv2.imshow("Image window", cv_image)
    cv2.waitKey(1)
    def map_segmentation_action_client_(self):                      ######## this function is called
        mat = cv.fromarray(self.map_)
#         cv.ShowImage( "map_image", mat )
#         cv.WaitKey()   
        #creates a action client object     
        client = actionlib.SimpleActionClient( 'segment_map', autopnp_scenario.msg.MapSegmentationAction )               
        cv_image = CvBridge()
        #filling the goal msg format for map segmentation action server        
        goal = autopnp_scenario.msg.MapSegmentationGoal( input_map = cv_image.cv_to_imgmsg( mat , "mono8"), 
                                                    map_resolution = self.map_resolution_, 
                                                    map_origin_x = self.map_origin_x_ , 
                                                    map_origin_y = self.map_origin_y_,
                                                    return_format_in_pixel = True )        
        rospy.loginfo("waiting for the map segmentation action server to start.....")
        client.wait_for_server()        
        rospy.loginfo("map segmentation action server started, sending goal.....")        
        client.send_goal(goal)        
        finished_before_timeout = client.wait_for_result(rospy.Duration(30.0))        
        if finished_before_timeout:
            state = client.get_state()
            if state is 3:                
                state = 'SUCCEEDED'
                rospy.loginfo("action finished: %s " % state)                
            else:
                rospy.loginfo("action finished: %s " % state)    
        else:
            rospy.loginfo("Action did not finish before the time out.")                
        return client.get_result()
Ejemplo n.º 27
0
    def test_encode_decode_cv2(self):
        import cv2
        import numpy as np
        fmts = [cv2.CV_8U, cv2.CV_8S, cv2.CV_16U, cv2.CV_16S, cv2.CV_32S, cv2.CV_32F, cv2.CV_64F]

        cvb_en = CvBridge()
        cvb_de = CvBridge()

        for w in range(100, 800, 100):
            for h in range(100, 800, 100):
                for f in fmts:
                    for channels in ([], 1, 2, 3, 4, 5):
                        if channels == []:
                            original = np.uint8(np.random.randint(0, 255, size=(h, w)))
                        else:
                            original = np.uint8(np.random.randint(0, 255, size=(h, w, channels)))
                        rosmsg = cvb_en.cv2_to_imgmsg(original)
                        newimg = cvb_de.imgmsg_to_cv2(rosmsg)

                        self.assert_(original.dtype == newimg.dtype)
                        if channels == 1:
                            # in that case, a gray image has a shape of size 2
                            self.assert_(original.shape[:2] == newimg.shape[:2])
                        else:
                            self.assert_(original.shape == newimg.shape)
                        self.assert_(len(original.tostring()) == len(newimg.tostring()))
Ejemplo n.º 28
0
class image_converter:

  def __init__(self):
    self.image_pub = rospy.Publisher("image_topic_2",Image)

    cv2.namedWindow("Image window", 1)
    self.bridge = CvBridge()
    self.image_sub = rospy.Subscriber("image_topic",Image,self.callback)

  def callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError as e:
      print(e)

    (rows,cols,channels) = cv_image.shape
    if cols > 60 and rows > 60 :
      cv2.circle(cv_image, (50,50), 10, 255)

    cv2.imshow("Image window", cv_image)
    cv2.waitKey(3)

    try:
      self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
    except CvBridgeError as e:
      print(e)
Ejemplo n.º 29
0
class WebcamBridge:
    """
    class for moving webcam images from opencv to ros
    """
    def __init__(self):
        self.image_pub = rospy.Publisher("/tennisballs", Image)
        # self.circ_ctr_pub = rospy.Publisher("/circ_ctr", )
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/image_raw", Image, self.callback)       
        self.size = None
        rospy.sleep(1)
                  
    def callback(self, data):
        """When the camera publishes a new frame, it sends through the cv 
        processing, and publishes a new output image
        """
        print('In WebcamBridge.callback')
        # cv.WaitKey(3)
        try:
            raw = self.bridge.imgmsg_to_cv(data, 'bgr8')
            found = self.find_tennisball(raw)
            msg = self.bridge.cv_to_imgmsg(found, 'bgr8')
            self.image_pub.publish(msg)
        except CvBridgeError, e:
            print e
Ejemplo n.º 30
0
class circle_detect:

    def __init__(self):
        self.image_pub = rospy.Publisher("circle_detection", Image, queue_size = 10)

        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/rgb_image", Image, self.callback)

    def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        x0 = 320
        y0 = 240
        idx = None
        minC = 800
        numC = None

        img = cv2.medianBlur(cv_image,5)
        imgg = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

        cimg = cv2.cvtColor(imgg, cv2.COLOR_GRAY2BGR)
        circles = cv2.HoughCircles(imgg,cv2.cv.CV_HOUGH_GRADIENT, 1.2, 100,  param1 = 200, param2 = 30, minRadius = 5, maxRadius = 20)


        if circles is None:
           cv2.imshow("Image Windows", cv_image)
           rospy.loginfo('No circles detected')
        #circles = np.uint16(np.around(circles))  # .astype("int")     # convert the (x,y) coordinates and radius of the circles to integers
        #numC = len(circles[0,:])
        else:
            print circles.shape


        # rospy.loginfo('circles found, number of circles {}'.format(numC))

        # for i in circles:
        #     dist[i] = distance(x0,y0, i[0],i[1])
        #     if dist[i] < minC:
        #         minC = dist[i]
        #         idx = i

        #for i in circles[0,:]:
        # print idx
        # cv2.circle(cv_image, (idx[0],idx[1]),idx[2], (0,255,0),1)     # draw the outer cirlce
        # cv2.circle(cv_image, (idx[0],idx[1]), 2, (0,0,255), 3)      # draw the center of the circle

            for i in circles[0,:]:
                cv2.circle(cv_image, (i[0],i[1]),i[2], (0,255,0),1)     # draw the outer cirlce
                cv2.circle(cv_image, (i[0],i[1]), 2, (0,0,255), 3)      # draw the center of the circle

        cv2.imshow("Image Windows", cv_image)
        cv2.waitKey(3)

        try:
            self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
        except CvBridgeError as e:
            print(e)
Ejemplo n.º 31
0
class AllSensorBot(object):
    def __init__(self, 
                 use_lidar=False, use_camera=True, use_imu=False,
                 use_odom=False, use_joint_states=False):

        # velocity publisher
        self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)

        # image publisher 追加した
        self.image_pub = rospy.Publisher('processed_image', Image, queue_size=10)

        # enemy dircetion publisher
        self.enemydir_pub = rospy.Publisher('enemy_direction', String,queue_size=1)

        # lidar scan subscriber
        if use_lidar:
            self.scan = LaserScan()
            self.lidar_sub = rospy.Subscriber('scan', LaserScan, self.lidarCallback)

        # camera subscribver
        # please uncoment out if you use camera
        if use_camera:
            # for convert image topic to opencv obj
#            self.img = None # うまくいかなかった
            cols = 640
            rows = 480
            self.img = np.full((rows, cols, 3), 0, dtype=np.uint8)
            self.bridge = CvBridge()
            self.image_sub = rospy.Subscriber('image_raw', Image, self.imageCallback)

        # imu subscriber
        if use_imu:
            self.imu_sub = rospy.Subscriber('imu', Imu, self.imuCallback)

        # odom subscriber
        if use_odom:
            self.odom_sub = rospy.Subscriber('odom', Odometry, self.odomCallback)

        # joint_states subscriber
        if use_joint_states:
            self.odom_sub = rospy.Subscriber('joint_states', JointState, self.jointstateCallback)

    def strategy(self):
        '''
        calc Twist and publish cmd_vel topic
        '''
        r = rospy.Rate(1)

        while not rospy.is_shutdown():
            # update twist
            twist = Twist()
            twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0
            twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0

            # publish twist topic
            self.vel_pub.publish(twist)
            r.sleep()


    # lidar scan topic call back sample
    # update lidar scan state
    def lidarCallback(self, data):
        self.scan = data
        rospy.loginfo(self.scan)

    # camera image call back sample
    # comvert image topic to opencv object and show
    def imageCallback(self, data):
        try:
            self.img = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            rospy.logerr(e)

        """ # dockerの人はコメントアウト
        cv2.imshow("Image window", self.img)
        cv2.waitKey(1)
        """

    # imu call back sample
    # update imu state
    def imuCallback(self, data):
        self.imu = data
        rospy.loginfo(self.imu)

    # odom call back sample
    # update odometry state
    def odomCallback(self, data):
        self.pose_x = data.pose.pose.position.x
        self.pose_y = data.pose.pose.position.y
        rospy.loginfo("odom pose_x: {}".format(self.pose_x))
        rospy.loginfo("odom pose_y: {}".format(self.pose_y))

    # jointstate call back sample
    # update joint state
    def jointstateCallback(self, data):
        self.wheel_rot_r = data.position[0]
        self.wheel_rot_l = data.position[1]
        rospy.loginfo("joint_state R: {}".format(self.wheel_rot_r))
        rospy.loginfo("joint_state L: {}".format(self.wheel_rot_l))
Ejemplo n.º 32
0
    def __init__(self):
        self.image_pub = rospy.Publisher("/usb_cam/image_raw",
                                         Image,
                                         queue_size=1)

        self.bridge = CvBridge()
Ejemplo n.º 33
0
import smach
import smach_ros

import time
import numpy as np

import cv2
from cv_bridge import CvBridge, CvBridgeError

from std_msgs.msg import String, Empty
from sensor_msgs.msg import Image, CompressedImage
from geometry_msgs.msg import Twist, Vector3, Pose, Vector3Stamped

################################################################################

bridge = CvBridge()

# Trackers
tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
tracker_type = tracker_types[2]
tracker = cv2.TrackerKCF_create()

atraso = 1.5  # Atraso aceitavel pela Camera

xTela = 300
yTela = 200

Move_X = 0
Move_Y = 0

velRobot = 0.2
    def __init__(self, gen_pcl=True):
        """
        Constructor
        \param gen_pcl (bool) whether generate point cloud, if set to true the node will subscribe to depth image
        """
        self.real_sense = rospy.get_param('/semantic_pcl/real_sense')
        self.imgCounter = 0
        self.imgSemanticCounter = 0
        #self.labels_pub  = rospy.Publisher("/semantic_pcl/labels", OverlayText, queue_size=1)
        self.labels_list = []
        #self.text = OverlayText()
        # Get image size
        self.img_width, self.img_height = rospy.get_param(
            '/camera/width'), rospy.get_param('/camera/height')
        self.throttle_rate = rospy.get_param('/semantic_pcl/throttle_rate')
        self.last_time = rospy.Time.now()
        # Set up CNN is use semantics
        print('Setting up CNN model...')
        # Set device
        #self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        # Get dataset
        self.dataset = rospy.get_param('/semantic_pcl/dataset')
        # Setup model
        model_name = 'vgg_unet'
        model_path = rospy.get_param('/semantic_pcl/model_path')
        model_json_path = rospy.get_param('/semantic_pcl/model_json_path')
        test_image_path_input = rospy.get_param(
            '/model_params/test_image_path_input')
        test_image_path_output = rospy.get_param(
            '/model_params/test_image_path_output')
        model_input_height = rospy.get_param(
            '/model_params/model_input_height')
        model_input_width = rospy.get_param('/model_params/model_input_width')
        model_output_height = rospy.get_param(
            '/model_params/model_output_height')
        model_output_width = rospy.get_param(
            '/model_params/model_output_width')
        model_n_classes = rospy.get_param('/model_params/model_n_classes')
        model_checkpoints_path = rospy.get_param(
            '/model_params/model_checkpoints_path')
        self.test_image_path_output_folder = rospy.get_param(
            '/model_params/test_image_path_output_folder')

        if self.dataset == 'kucarsRisk':
            self.n_classes = 7  # Semantic class number
            # load the model + weights
            # Recreate the exact same model, including its weights and the optimizer
            #self.new_model = model_from_json(model_path,custom_objects=None)
            #clear_session()
            self.new_model = load_model(model_path)  #,custom_objects=None)
            self.new_model.input_width = model_input_width
            self.new_model.input_height = model_input_height
            self.new_model.output_width = model_output_width
            self.new_model.output_height = model_output_height
            self.new_model.n_classes = model_n_classes
            # Show the model architecture
            self.new_model.summary()
            print("Loaded model from disk")

            self.new_model.compile(loss='categorical_crossentropy',
                                   optimizer='adadelta',
                                   metrics=['accuracy'])
            ##### One Image Prediction ####

            img = cv2.imread(test_image_path_input)
            predict(model=self.new_model,
                    inp=test_image_path_input,
                    out_fname=test_image_path_output)
            out = cv2.imread(test_image_path_output)
            out_rgb = cv2.cvtColor(out, cv2.COLOR_BGR2RGB)
#plt.imshow(out)
#plt.show()

        self.cmap = color_map(
            N=self.n_classes,
            normalized=True)  # Color map for semantic classes
        # Declare array containers
        # Set up ROS
        print('Setting up ROS...')
        self.bridge = CvBridge(
        )  # CvBridge to transform ROS Image message to OpenCV image
        # Semantic image publisher
        self.sem_img_pub = rospy.Publisher(
            "/semantic_pcl/semantic_hazard_image", Image, queue_size=1)
        # Set up ros image subscriber
        # Set buff_size to average msg size to avoid accumulating delay
        if gen_pcl:
            # Point cloud frame id
            frame_id = rospy.get_param('/semantic_pcl/frame_id')
            # Camera intrinsic matrix
            fx = rospy.get_param('/camera/fx')
            fy = rospy.get_param('/camera/fy')
            cx = rospy.get_param('/camera/cx')
            cy = rospy.get_param('/camera/cy')
            intrinsic = np.matrix([[fx, 0, cx], [0, fy, cy], [0, 0, 1]],
                                  dtype=np.float32)
            self.pcl_pub = rospy.Publisher("/semantic_pcl/semantic_pcl",
                                           PointCloud2,
                                           queue_size=1)
            self.color_sub = message_filters.Subscriber(
                rospy.get_param('/semantic_pcl/color_image_topic'),
                Image,
                queue_size=1,
                buff_size=30 * 480 * 640)
            self.depth_sub = message_filters.Subscriber(
                rospy.get_param('/semantic_pcl/depth_image_topic'),
                Image,
                queue_size=1,
                buff_size=40 * 480 * 640
            )  # increase buffer size to avoid delay (despite queue_size = 1)
            self.ts = message_filters.ApproximateTimeSynchronizer(
                [self.color_sub, self.depth_sub], queue_size=1, slop=0.3
            )  # Take in one color image and one depth image with a limite time gap between message time stamps
            self.ts.registerCallback(self.color_depth_callback)
            #self.cloud_generator = ColorPclGenerator(intrinsic, self.img_width,self.img_height, frame_id , self.point_type)
            self.cloud_generator = ColorPclSemanticGenerator(
                intrinsic, self.img_width, self.img_height, frame_id)
        else:
            print("No Cloud generation")
            self.image_sub = rospy.Subscriber(
                rospy.get_param('/semantic_pcl/color_image_topic'),
                Image,
                self.color_callback,
                queue_size=1,
                buff_size=30 * 480 * 640)

        semantic_colored_labels_srv = rospy.Service(
            'get_semantic_colored_labels', GetSemanticColoredLabels,
            self.get_semantic_colored_labels)
        print('Ready.')
class SemanticCloud:
    """
    Class for ros node to take in a color image (bgr) and do semantic segmantation on it to produce an image with semantic class colors (chair, desk etc.)
    Then produce point cloud based on depth information
    CNN: PSPNet (https://arxiv.org/abs/1612.01105) (with resnet50) pretrained on ADE20K, fine tuned on SUNRGBD or not
    """
    def __init__(self, gen_pcl=True):
        """
        Constructor
        \param gen_pcl (bool) whether generate point cloud, if set to true the node will subscribe to depth image
        """
        self.real_sense = rospy.get_param('/semantic_pcl/real_sense')
        self.imgCounter = 0
        self.imgSemanticCounter = 0
        #self.labels_pub  = rospy.Publisher("/semantic_pcl/labels", OverlayText, queue_size=1)
        self.labels_list = []
        #self.text = OverlayText()
        # Get image size
        self.img_width, self.img_height = rospy.get_param(
            '/camera/width'), rospy.get_param('/camera/height')
        self.throttle_rate = rospy.get_param('/semantic_pcl/throttle_rate')
        self.last_time = rospy.Time.now()
        # Set up CNN is use semantics
        print('Setting up CNN model...')
        # Set device
        #self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        # Get dataset
        self.dataset = rospy.get_param('/semantic_pcl/dataset')
        # Setup model
        model_name = 'vgg_unet'
        model_path = rospy.get_param('/semantic_pcl/model_path')
        model_json_path = rospy.get_param('/semantic_pcl/model_json_path')
        test_image_path_input = rospy.get_param(
            '/model_params/test_image_path_input')
        test_image_path_output = rospy.get_param(
            '/model_params/test_image_path_output')
        model_input_height = rospy.get_param(
            '/model_params/model_input_height')
        model_input_width = rospy.get_param('/model_params/model_input_width')
        model_output_height = rospy.get_param(
            '/model_params/model_output_height')
        model_output_width = rospy.get_param(
            '/model_params/model_output_width')
        model_n_classes = rospy.get_param('/model_params/model_n_classes')
        model_checkpoints_path = rospy.get_param(
            '/model_params/model_checkpoints_path')
        self.test_image_path_output_folder = rospy.get_param(
            '/model_params/test_image_path_output_folder')

        if self.dataset == 'kucarsRisk':
            self.n_classes = 7  # Semantic class number
            # load the model + weights
            # Recreate the exact same model, including its weights and the optimizer
            #self.new_model = model_from_json(model_path,custom_objects=None)
            #clear_session()
            self.new_model = load_model(model_path)  #,custom_objects=None)
            self.new_model.input_width = model_input_width
            self.new_model.input_height = model_input_height
            self.new_model.output_width = model_output_width
            self.new_model.output_height = model_output_height
            self.new_model.n_classes = model_n_classes
            # Show the model architecture
            self.new_model.summary()
            print("Loaded model from disk")

            self.new_model.compile(loss='categorical_crossentropy',
                                   optimizer='adadelta',
                                   metrics=['accuracy'])
            ##### One Image Prediction ####

            img = cv2.imread(test_image_path_input)
            predict(model=self.new_model,
                    inp=test_image_path_input,
                    out_fname=test_image_path_output)
            out = cv2.imread(test_image_path_output)
            out_rgb = cv2.cvtColor(out, cv2.COLOR_BGR2RGB)
#plt.imshow(out)
#plt.show()

        self.cmap = color_map(
            N=self.n_classes,
            normalized=True)  # Color map for semantic classes
        # Declare array containers
        # Set up ROS
        print('Setting up ROS...')
        self.bridge = CvBridge(
        )  # CvBridge to transform ROS Image message to OpenCV image
        # Semantic image publisher
        self.sem_img_pub = rospy.Publisher(
            "/semantic_pcl/semantic_hazard_image", Image, queue_size=1)
        # Set up ros image subscriber
        # Set buff_size to average msg size to avoid accumulating delay
        if gen_pcl:
            # Point cloud frame id
            frame_id = rospy.get_param('/semantic_pcl/frame_id')
            # Camera intrinsic matrix
            fx = rospy.get_param('/camera/fx')
            fy = rospy.get_param('/camera/fy')
            cx = rospy.get_param('/camera/cx')
            cy = rospy.get_param('/camera/cy')
            intrinsic = np.matrix([[fx, 0, cx], [0, fy, cy], [0, 0, 1]],
                                  dtype=np.float32)
            self.pcl_pub = rospy.Publisher("/semantic_pcl/semantic_pcl",
                                           PointCloud2,
                                           queue_size=1)
            self.color_sub = message_filters.Subscriber(
                rospy.get_param('/semantic_pcl/color_image_topic'),
                Image,
                queue_size=1,
                buff_size=30 * 480 * 640)
            self.depth_sub = message_filters.Subscriber(
                rospy.get_param('/semantic_pcl/depth_image_topic'),
                Image,
                queue_size=1,
                buff_size=40 * 480 * 640
            )  # increase buffer size to avoid delay (despite queue_size = 1)
            self.ts = message_filters.ApproximateTimeSynchronizer(
                [self.color_sub, self.depth_sub], queue_size=1, slop=0.3
            )  # Take in one color image and one depth image with a limite time gap between message time stamps
            self.ts.registerCallback(self.color_depth_callback)
            #self.cloud_generator = ColorPclGenerator(intrinsic, self.img_width,self.img_height, frame_id , self.point_type)
            self.cloud_generator = ColorPclSemanticGenerator(
                intrinsic, self.img_width, self.img_height, frame_id)
        else:
            print("No Cloud generation")
            self.image_sub = rospy.Subscriber(
                rospy.get_param('/semantic_pcl/color_image_topic'),
                Image,
                self.color_callback,
                queue_size=1,
                buff_size=30 * 480 * 640)

        semantic_colored_labels_srv = rospy.Service(
            'get_semantic_colored_labels', GetSemanticColoredLabels,
            self.get_semantic_colored_labels)
        print('Ready.')

    def get_semantic_colored_labels(self, GetSemanticColoredLabels):

        print("Get Semantic Colored Labels Service called")
        ret = GetSemanticColoredLabelsResponse()
        scls = SemanticColoredLabels()
        scl = SemanticColoredLabel()

        for i in range(0, self.n_classes):
            scl = SemanticColoredLabel()
            label = labels[i]
            scl.label = label
            print(self.cmap[i, 0])
            print(self.cmap[i, 1])
            print(self.cmap[i, 2])
            scl.color_r = self.cmap[i, 0]
            scl.color_g = self.cmap[i, 1]
            scl.color_b = self.cmap[i, 2]
            scls.semantic_colored_labels.append(scl)
        print(scls)
        ret = scls
        return ret
        '''
        label = labels[0]; 
        scl.label = label
        scl.color_r =  np.uint8(107.0) #self.cmap[i,0]
        scl.color_g =  np.uint8(194.0) #self.cmap[i,1]
        scl.color_b =  np.uint8(216.0) #self.cmap[i,2]
        scls.semantic_colored_labels.append(scl)
        scl = SemanticColoredLabel()
        label = labels[1]; 
        scl.label = label
        scl.color_r = np.uint8(122.0) #self.cmap[i,0]
        scl.color_g = np.uint8(77.0) #self.cmap[i,1]
        scl.color_b = np.uint8(200.0) #self.cmap[i,2]
        scls.semantic_colored_labels.append(scl)        
        scl = SemanticColoredLabel()
        label = labels[2]; 
        scl.label = label
        scl.color_r = np.uint8(103.0) #self.cmap[i,0]
        scl.color_g = np.uint8(130.0) #self.cmap[i,1]
        scl.color_b = np.uint8(66.0) #self.cmap[i,2]
        scls.semantic_colored_labels.append(scl)      
        scl = SemanticColoredLabel()
        label = labels[3]; 
        scl.label = label
        scl.color_r = np.uint8(158.0) #self.cmap[i,0]
        scl.color_g = np.uint8(193.0) #self.cmap[i,1]
        scl.color_b = np.uint8(72.0) #self.cmap[i,2]
        scls.semantic_colored_labels.append(scl)      
        scl = SemanticColoredLabel()
        label = labels[4]; 
        scl.label = label
        scl.color_r = np.uint8(129.0) #self.cmap[i,0]
        scl.color_g = np.uint8(232.0) #self.cmap[i,1]
        scl.color_b = np.uint8(149.0) #self.cmap[i,2]
        scls.semantic_colored_labels.append(scl)      
	ret = scls
        scl = SemanticColoredLabel()
        label = labels[5]; 
        scl.label = label
        scl.color_r = np.uint8(251.0) #self.cmap[i,0]
        scl.color_g = np.uint8(232.0) #self.cmap[i,1]
        scl.color_b = np.uint8(64.0) #self.cmap[i,2]
        scls.semantic_colored_labels.append(scl)      
        scl = SemanticColoredLabel()
        label = labels[6]; 
        scl.label = label
        scl.color_r = np.uint8(79.0) #self.cmap[i,0]
        scl.color_g = np.uint8(230.0) #self.cmap[i,1]
        scl.color_b = np.uint8(207.0) #self.cmap[i,2]
        scls.semantic_colored_labels.append(scl)      
	print(scls)
        ret = scls
        return ret
        '''

    def get_label(self, pred_label):
        #print(" ============= ")
        unique_labels = np.unique(pred_label)
        count = 0

        self.text.width = 200
        self.text.height = 800
        self.text.left = 10
        self.text.top = 10 + 20 * count
        self.text.text_size = 12
        self.text.line_width = 2
        self.text.font = "DejaVu Sans Mono"
        self.text.fg_color = ColorRGBA(25 / 255.0, 1.0, 240.0 / 255.0, 1.0)
        self.text.bg_color = ColorRGBA(255, 255, 255, 0.5)
        for color_index in unique_labels:
            label = ''
            label = labels[color_index]
            count += 1
            if not label in self.labels_list:
                self.labels_list.append(label)
                self.text.text += """<span style="color: rgb(%d,%d,%d);">%s</span>
                """ % (self.cmap[color_index, 0], self.cmap[color_index, 1],
                       self.cmap[color_index, 2], label.capitalize())
            #print("Published Label Name with index:" + str(color_index) + " is:" + label)
        self.labels_pub.publish(self.text)

    def color_callback(self, color_img_ros):
        """
        Callback function for color image, do semantic segmantation and show the decoded image. For test purpose
        \param color_img_ros (sensor_msgs.Image) input ros color image message
        """

        print('callback')
        try:
            color_img = self.bridge.imgmsg_to_cv2(
                color_img_ros, "bgr8")  # Convert ros msg to numpy array
        except CvBridgeError as e:
            print(e)

# Do semantic segmantation
        seg = predict(model=self.new_model, inp=color_img)
        seg = seg.astype(np.uint8)
        #print (seg.shape)
        #print (color_img.shape)

        # Do semantic segmantation
        '''
        class_probs = self.predict(color_img)
        confidence, label = class_probs.max(1)
        confidence, label = confidence.squeeze(0).numpy(), label.squeeze(0).numpy()
        label = resize(label, (self.img_height, self.img_width), order = 0, mode = 'reflect', preserve_range = True) # order = 0, nearest neighbour
        label = label.astype(np.int)

        # Add semantic class colors
        decoded = decode_segmap(label, self.n_classes, self.cmap)        # Show input image and decoded image
        confidence = resize(confidence, (self.img_height, self.img_width),  mode = 'reflect', preserve_range = True)
        '''

        cv2.imshow('Camera image', color_img)
        cv2.imshow('seg', seg)
        #cv2.imshow('confidence', confidence)
        #cv2.imshow('Semantic segmantation', decoded)
        cv2.waitKey(3)

    def color_depth_callback(self, color_img_ros, depth_img_ros):
        """
        Callback function to produce point cloud registered with semantic class color based on input color image and depth image
        \param color_img_ros (sensor_msgs.Image) the input color image (bgr8)
        \param depth_img_ros (sensor_msgs.Image) the input depth image (registered to the color image frame) (float32) values are in meters
        """
        tic = rospy.Time.now()
        diff = tic - self.last_time
        if diff.to_sec() < self.throttle_rate:
            return

        self.last_time = rospy.Time.now()
        # Convert ros Image message to numpy array
        try:
            color_img = self.bridge.imgmsg_to_cv2(color_img_ros, "bgr8")
            depth_img = self.bridge.imgmsg_to_cv2(depth_img_ros, "32FC1")
        except CvBridgeError as e:
            print(e)
        # Resize depth
        if depth_img.shape[0] is not self.img_height or depth_img.shape[
                1] is not self.img_width:
            depth_img = resize(
                depth_img, (self.img_height, self.img_width),
                order=0,
                mode='reflect',
                preserve_range=True)  # order = 0, nearest neighbour
            depth_img = depth_img.astype(np.float32)
            # realsense camera gives depth measurements in mm
            if self.real_sense:
                depth_img = depth_img / 1000.0

################## Save all the Images for debugging #####################
        '''	
	filename = self.test_image_path_output_folder + "image" + str(self.imgCounter) + ".png" 
	print (filename)
	cv2.imwrite(filename, color_img) 
	self.imgCounter = self.imgCounter + 1 ; 
	'''
        ##########################################################################

        semantic_color = predict(model=self.new_model, inp=color_img)

        ################## Save all the Images for debugging #####################
        '''
	filename = self.test_image_path_output_folder + "semanticimage" + str(self.imgSemanticCounter) + ".png" 
	print (filename)
	cv2.imwrite(filename, semantic_color) 
	self.imgSemanticCounter = self.imgSemanticCounter + 1 ; 
	'''
        ##########################################################################
        #label_d = semantic_color.max(1)
        #label_d = resize(label_d, (self.img_height, self.img_width), order = 0, mode = 'reflect', preserve_range = True) # order = 0, nearest neighbour
        #label_d = label_d.astype(np.uint8)
        semantic_color = semantic_color.astype(np.uint8)
        #decoded = decode_segmap(label_d, self.n_classes, self.cmap)        # Show input image and decoded image
        # Publish semantic image
        if self.sem_img_pub.get_num_connections() > 0:
            try:
                semantic_color_msg = self.bridge.cv2_to_imgmsg(semantic_color,
                                                               encoding="bgr8")
                #rgb_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                self.sem_img_pub.publish(semantic_color_msg)
            except CvBridgeError as e:
                print(e)

        cloud_ros = self.cloud_generator.generate_cloud_semantic(
            color_img, depth_img, semantic_color, color_img_ros.header.stamp)
        #Publish point cloud
        #self.get_label(pred_labels)
        self.pcl_pub.publish(cloud_ros)

    '''
Ejemplo n.º 36
0
class Camera(object):
    def __init__(self):
        self.node_name = rospy.get_name()
        # get ROS parameters
        self.resource = rospy.get_param('~rtsp_resource')
        self.new_resource = ""
        self.image_topic = rospy.get_param('~image_topic')
        # open RTSP stream
        self.cap = cv2.VideoCapture(self.resource)
        if not self.cap.isOpened():
            rospy.logerr("Error opening resource `%s`. Please check." %
                         self.resource)
            exit(0)
        rospy.loginfo("Resource successfully opened")

        # create publishers
        self.image_pub = rospy.Publisher(self.image_topic,
                                         CompressedImage,
                                         queue_size=1)
        self.camera = rospy.Subscriber("~camera_switch",
                                       String,
                                       self.camera_switch_cb,
                                       queue_size=1)

        # initialize ROS_CV_Bridge
        self.ros_cv_bridge = CvBridge()
        self.change = False
        self.is_shutdown = False

    def startCapturing(self):
        # initialize variables
        print "Correctly opened resource, starting to publish feed."
        rval, cv_image = self.cap.read()

        # process frames
        while rval:
            if self.change:
                rospy.loginfo("Change now")
                self.cap.release()
                self.resource = self.new_resource
                self.cap = cv2.VideoCapture(self.resource)
                self.change = False
                rospy.loginfo("Changed")
            # get new frame
            rval, cv_image = self.cap.read()
            # handle Ctrl-C
            key = cv2.waitKey(20)
            if rospy.is_shutdown(
            ) or self.is_shutdown or key == 27 or key == 1048603:
                break
            # convert CV image to ROS message
            image_msg = self.ros_cv_bridge.cv2_to_compressed_imgmsg(cv_image)
            image_msg.header.stamp = rospy.Time.now()
            self.image_pub.publish(image_msg)

    def camera_switch_cb(self, data):
        rospy.loginfo(data.data)
        self.new_resource = data.data
        self.change = True

    def onShutdown(self):
        rospy.loginfo("[%s] Closing ip camera." % (self.node_name))
        self.is_shutdown = True
        rospy.loginfo("[%s] Shutdown." % (self.node_name))
Ejemplo n.º 37
0
class image_converter:

  def __init__(self):
    self.image_pub = rospy.Publisher("/camera/rgb/image_mono",Image)

    #cv2.namedWindow("Image window", 1)
    #cv2.namedWindow("Depth window" , 1)	

    self.c = 0
    self.x = 0
    self.bridge = CvBridge()
    self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.callback)
    self.depth_sub = rospy.Subscriber("/camera/depth_registered/image_raw", Image, self.depth_callback)
        

  def callback(self,data):
    global reddetect
    global targetpos
    self.c = self.c+1
    if self.c==5 :
	    try:
	      frame = self.bridge.imgmsg_to_cv2(data, "bgr8")
	    except CvBridgeError, e:
	      print e

	    (rows,cols,channels) = frame.shape
	    frame = np.array(frame, dtype=np.uint8)	
	    #blue mask
	    # Convert BGR to HSV
	    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

	    # define range of blue color in HSV
	    lower_blue = np.array([150,168,20])
	    upper_blue = np.array([179,255,255])

	    # Threshold the HSV image to get only blue colors
	    mask = cv2.inRange(hsv, lower_blue, upper_blue)

	    # Bitwise-AND mask and original image
	    #res = cv2.bitwise_and(frame,frame, mask= mask)
	    res = cv2.morphologyEx(mask,cv2.MORPH_OPEN,(12,12))
	    count = np.sum(res)/255
	    self.x = self.findCOM(res)

	    if (self.x>5 and self.x<595 and count>900):
		    reddetect = 1
		    targetpos = self.x
		    for i in range(self.x-5,self.x+5):
			for j in range(240-5,240+5):
				res[j][i] = 150
	    else:
		    reddetect = 0
            #rospy.loginfo("avg: "  + str(self.x) + "total: " + str(count))

	    # Bitwise-AND mask and original image
	    #res = cv2.bitwise_and(frame,frame, mask= mask)
	    #res = cv2.morphologyEx(res,cv2.MORPH_CLOSE,(5,5))

	    #cv2.imshow('frame',frame)
	    #cv2.imshow('mask',mask)
	    cv2.imshow('res',res)
	    #k = cv2.waitKey(5) & 0xFF

	    #cv2.imshow("Image window", frame)
	    cv2.waitKey(3)

	    try:
	      self.image_pub.publish(self.bridge.cv2_to_imgmsg(frame, "bgr8"))
	    except CvBridgeError, e:
	      print e
    	    self.c=0
Ejemplo n.º 38
0
class Bridge(object):
    def __init__(self, conf, server):
        rospy.init_node('styx_server')
        self.server = server
        self.vel = 0.
        self.yaw = None
        self.angular_vel = 0.
        self.bridge = CvBridge()

        self.callbacks = {
            '/vehicle/steering_cmd': self.callback_steering,
            '/vehicle/throttle_cmd': self.callback_throttle,
            '/vehicle/brake_cmd': self.callback_brake,
        }

        self.subscribers = [
            rospy.Subscriber(e.topic, TYPE[e.type], self.callbacks[e.topic])
            for e in conf.subscribers
        ]

        self.publishers = {
            e.name: rospy.Publisher(e.topic, TYPE[e.type], queue_size=1)
            for e in conf.publishers
        }

    def create_light(self, x, y, z, yaw, state):
        light = TrafficLight()

        light.header = Header()
        light.header.stamp = rospy.Time.now()
        light.header.frame_id = '/world'

        light.pose = self.create_pose(x, y, z, yaw)
        light.state = state

        return light

    def create_pose(self, x, y, z, yaw=0.):
        pose = PoseStamped()

        pose.header = Header()
        pose.header.stamp = rospy.Time.now()
        pose.header.frame_id = '/world'

        pose.pose.position.x = x
        pose.pose.position.y = y
        pose.pose.position.z = z

        q = tf.transformations.quaternion_from_euler(0., 0.,
                                                     math.pi * yaw / 180.)
        pose.pose.orientation = Quaternion(*q)

        return pose

    def create_float(self, val):
        fl = Float()
        fl.data = val
        return fl

    def create_twist(self, velocity, angular):
        tw = TwistStamped()
        tw.twist.linear.x = velocity
        tw.twist.angular.z = angular
        return tw

    def create_steer(self, val):
        st = SteeringReport()
        st.steering_wheel_angle_cmd = val * math.pi / 180.
        st.enabled = True
        st.speed = self.vel
        return st

    def calc_angular(self, yaw):
        angular_vel = 0.
        if self.yaw is not None:
            angular_vel = (yaw - self.yaw) / (rospy.get_time() -
                                              self.prev_time)
        self.yaw = yaw
        self.prev_time = rospy.get_time()
        return angular_vel

    def create_point_cloud_message(self, pts):
        header = Header()
        header.stamp = rospy.Time.now()
        header.frame_id = '/world'
        cloud_message = pcl2.create_cloud_xyz32(header, pts)
        return cloud_message

    def broadcast_transform(self, name, position, orientation):
        br = tf.TransformBroadcaster()
        br.sendTransform(position, orientation, rospy.Time.now(), name,
                         "world")

    def publish_odometry(self, data):
        pose = self.create_pose(data['x'], data['y'], data['z'], data['yaw'])

        position = (data['x'], data['y'], data['z'])
        orientation = tf.transformations.quaternion_from_euler(
            0, 0, math.pi * data['yaw'] / 180.)
        self.broadcast_transform("base_link", position, orientation)

        self.publishers['current_pose'].publish(pose)
        self.vel = data['velocity'] * 0.44704
        self.angular = self.calc_angular(data['yaw'] * math.pi / 180.)
        self.publishers['current_velocity'].publish(
            self.create_twist(self.vel, self.angular))

    def publish_controls(self, data):
        steering, throttle, brake = data['steering_angle'], data[
            'throttle'], data['brake']
        self.publishers['steering_report'].publish(self.create_steer(steering))
        self.publishers['throttle_report'].publish(self.create_float(throttle))
        self.publishers['brake_report'].publish(self.create_float(brake))

    def publish_obstacles(self, data):
        for obs in data['obstacles']:
            pose = self.create_pose(obs[0], obs[1], obs[2])
            self.publishers['obstacle'].publish(pose)
        header = Header()
        header.stamp = rospy.Time.now()
        header.frame_id = '/world'
        cloud = pcl2.create_cloud_xyz32(header, data['obstacles'])
        self.publishers['obstacle_points'].publish(cloud)

    def publish_lidar(self, data):
        self.publishers['lidar'].publish(
            self.create_point_cloud_message(
                zip(data['lidar_x'], data['lidar_y'], data['lidar_z'])))

    def publish_traffic(self, data):
        x, y, z = data['light_pos_x'], data['light_pos_y'], data[
            'light_pos_z'],
        yaw = [
            math.atan2(dy, dx)
            for dx, dy in zip(data['light_pos_dx'], data['light_pos_dy'])
        ]
        status = data['light_state']

        lights = TrafficLightArray()
        header = Header()
        header.stamp = rospy.Time.now()
        header.frame_id = '/world'
        lights.lights = [
            self.create_light(*e) for e in zip(x, y, z, yaw, status)
        ]
        self.publishers['trafficlights'].publish(lights)

    def publish_dbw_status(self, data):
        self.publishers['dbw_status'].publish(Bool(data))

    def publish_camera(self, data):
        imgString = data["image"]
        image = PIL_Image.open(BytesIO(base64.b64decode(imgString)))
        image_array = np.asarray(image)

        image_message = self.bridge.cv2_to_imgmsg(image_array, encoding="rgb8")
        self.publishers['image'].publish(image_message)

    def callback_steering(self, data):
        self.server(
            'steer',
            data={'steering_angle': str(data.steering_wheel_angle_cmd)})

    def callback_throttle(self, data):
        self.server('throttle', data={'throttle': str(data.pedal_cmd)})

    def callback_brake(self, data):
        rospy.loginfo('[Server.send] brake = %.2f', data.pedal_cmd)
        self.server('brake', data={'brake': str(data.pedal_cmd)})
Ejemplo n.º 39
0
  def __init__(self):

    self.image_pub = rospy.Publisher("image_topic_2",Image, queue_size = 10)

    self.bridge = CvBridge()
    self.image_sub = rospy.Subscriber("/sensors/camera/infra1/image_rect_raw",Image,self.callback)
Ejemplo n.º 40
0
class KinectHelper(object):
    def __init__(self, color_cam, range_cam, timestep):
        self.kinect_range = range_cam
        self.kinect_color = color_cam
        # Enable cameras
        self.kinect_range.enable(2 * timestep)
        self.kinect_color.enable(2 * timestep)
        # Init messages
        self.init_messages()
        # ROS publishers
        self.camera_rgb_pub = rospy.Publisher('rgb/image_raw',
                                              Image,
                                              queue_size=1)
        self.camera_info_rgb_pub = rospy.Publisher('rgb/camera_info',
                                                   CameraInfo,
                                                   queue_size=1)
        self.camera_depth_pub = rospy.Publisher('depth/image_raw',
                                                Image,
                                                queue_size=1)
        self.camera_info_depth_pub = rospy.Publisher('depth/camera_info',
                                                     CameraInfo,
                                                     queue_size=1)
        # OpenCV bridge
        self.bridge = CvBridge()

    def init_image(self, camera):
        msg = CameraInfo()
        msg.width = camera.getWidth()
        msg.height = camera.getHeight()
        f = camera.getWidth() / (2 * math.tan(camera.getFov() / 2))
        msg.K[0] = f
        msg.K[4] = f
        msg.K[2] = camera.getWidth() / 2
        msg.K[5] = camera.getHeight() / 2
        msg.K[8] = 1
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [
            f, 0.0,
            camera.getWidth() / 2, 0.0, 0.0, f,
            camera.getHeight() / 2, 0.0, 0.0, 0.0, 1.0, 0.0
        ]
        return msg

    def init_messages(self):
        self.info_rgb_msg = self.init_image(self.kinect_color)
        self.info_depth_msg = self.init_image(self.kinect_range)

    def broadcast_depth_image(self):
        time_now = rospy.Time.now()
        raw = self.kinect_range.getRangeImageArray()
        if raw is None: return
        img = np.transpose(np.array(raw, dtype=np.float32))
        image = self.bridge.cv2_to_imgmsg(img, "passthrough")
        image.header.stamp = time_now
        image.header.frame_id = "camera_depth_optical_frame"
        self.camera_depth_pub.publish(image)
        # Camera info
        self.info_depth_msg.header.stamp = time_now
        self.info_depth_msg.header.frame_id = "camera_depth_optical_frame"
        self.camera_info_depth_pub.publish(self.info_depth_msg)

    def broadcast_color_image(self):
        time_now = rospy.Time.now()
        raw = self.kinect_color.getImageArray()
        if raw is None: return
        img = np.transpose(np.array(raw, dtype=np.uint8), (1, 0, 2))
        image = self.bridge.cv2_to_imgmsg(img, "rgb8")
        image.header.stamp = time_now
        image.header.frame_id = "camera_rgb_optical_frame"
        self.camera_rgb_pub.publish(image)
        # Camera info
        self.info_rgb_msg.header.stamp = time_now
        self.info_rgb_msg.header.frame_id = "camera_rgb_optical_frame"
        self.camera_info_rgb_pub.publish(self.info_rgb_msg)
Ejemplo n.º 41
0
import numpy as np
import cv2
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError

CAMERA_WIDTH = 1280
CAMERA_HEIGHT = 720

BRIDGE = CvBridge()


def set_cam_params(cam_num):
    cap = cv2.VideoCapture(cam_num)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT)
    cap.set(cv2.CAP_PROP_FPS, 5)
    cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
    return cap


left_image_pub = rospy.Publisher("/sj_camera/left_image_raw",
                                 Image,
                                 queue_size=1)
right_image_pub = rospy.Publisher("/sj_camera/right_image_raw",
                                  Image,
                                  queue_size=1)
down_image_pub = rospy.Publisher("/sj_camera/down_image_raw",
                                 Image,
                                 queue_size=1)
Ejemplo n.º 42
0
 def __init__(self):
     super(ImageContainer, self).__init__()
     self.Image = None
     self._bridge = CvBridge()
Ejemplo n.º 43
0
    def __init__(self):
        ##USER PARAMETERS
        self.angle_setpoint = 90
        self.angle = 0
        self.fin_time = 0
        self.integral = 0
        self.prev_error = 0
        self.logs = [0, 0]
        ##Initiate variables
        self.leftLine = 0
        self.midLine = 0
        self.rightLine = 0
        self.distance = 0
        self.leftSpeed = 0
        self.rightSpeed = 0
        self.pan = 0
        self.tilt = 0
        self.bridge = CvBridge()

        #Setup Publishers
        self.motorPub = rospy.Publisher('motors', motors, queue_size=10)
        self.servoPub = rospy.Publisher('servos', servos, queue_size=10)
        self.blobpub = rospy.Publisher('imageProc', Image, queue_size=10)

        #Create Subscriber callbacks
        def lineCallback(data):
            self.leftLine = data.leftLine
            self.midLine = data.midLine
            self.rightLine = data.rightLine

        def distanceCallback(data):
            self.distance = data.distance

        def imageProcessing(data):
            # st_time = time.time()
            try:
                frame = self.bridge.imgmsg_to_cv2(
                    data, desired_encoding="passthrough")
            except CvBridgeError as e:
                print(e)

            ##Place image processing code here!
            frame, self.logs = imageProcessQueue(frame)
            print(self.logs)
            self.angle = self.logs[1]

            self.blobpub.publish(self.bridge.cv2_to_imgmsg(frame, "bgr8"))
            # print("Time diff: %f\n Steering angle: %f\n" % (((st_time - self.fin_time)*1000), self.angle))
            # print(logs)
            # self.fin_time = st_time

        def speed_callback(data):
            ## timing code
            st_time = time.time()
            # print("Time diff: %f\n" % ((st_time - self.fin_time)*1000))
            turn_speed_here = 0.3
            turn_speed = self.pid()
            self.leftSpeed = (0.2 + turn_speed) * 0.9
            self.rightSpeed = (0.2 - turn_speed) * 0.9
            self.fin_time = st_time
            if self.logs[0] == 0 or self.distance <= 0.4:
                self.leftSpeed = 0
                self.rightSpeed = 0
                self.integral = 0
                self.prev_error = 0

            # if only one lane is detected
            # if self.logs[0] == 1:
            # 	if self.logs[2][0] == 1:
            # 		self.leftSpeed = turn_speed_here
            # 		self.rightSpeed = -turn_speed_here
            # 		self.integral = 0
            # 		self.prev_error = 0
            # 	if self.logs[2][1] == 1:
            # 		self.leftSpeed = -turn_speed_here
            # 		self.rightSpeed = turn_speed_here
            # 		self.integral = 0
            # 		self.prev_error = 0

            ##Leave these lines at the end

            self.publishMotors()

        #Subscribe to topics
        rospy.Subscriber('raspicam_node/image', Image, imageProcessing)
        rospy.Subscriber('imageProc', Image, speed_callback)
        rospy.Subscriber('distance', distance, distanceCallback)

        # initialize
        rospy.init_node('core', anonymous=True)
        self.rate = rospy.Rate(10)
Ejemplo n.º 44
0
class image_converter:

  def __init__(self):

    self.image_pub = rospy.Publisher("image_topic_2",Image, queue_size = 10)

    self.bridge = CvBridge()
    self.image_sub = rospy.Subscriber("/sensors/camera/infra1/image_rect_raw",Image,self.callback)


  def callback(self,data):

    #convert the ros image to a cv image
    try:
      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError as e:
      print(e)

    # the intrinsic_parameters from the CameraInfo topic
    intrinsic_parameters = np.float32([[383.7944641113281, 0.0, 322.3056945800781],[0.0, 383.7944641113281, 241.67051696777344],[0.0, 0.0, 1.0]])

    # the distortion coefficients from the CameraInfo topic
    dist_coeffs = np.array([0.0, 0.0, 0.0, 0.0, 0.0])

    #convert the image to a grayscale image
    cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)

    # hide bigger white places on the field by occulude them with black rectangles
    rows, cols = cv_image.shape
    cv_image = cv2.rectangle(cv_image, (0,0), (cols-1,106), (0,0,0), -1)
    cv_image = cv2.rectangle(cv_image, (0,rows-130), (cols-1,rows-1), (0,0,0), -1)
    cv_image = cv2.rectangle(cv_image, ((cols/2)-120,rows-230), ((cols/2) + 150,rows-1), (0,0,0), -1)

    #convert the image to binary
    binary_img =  cv2.threshold(cv_image, 240, 255, cv2.THRESH_BINARY)[1]

    # find contours in the binary image
    im2, contours, hierarchy = cv2.findContours(binary_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

    #index for the points to increment in the loop
    counter = 0
    #dict to store the 6 points
    points_on_ground = np.float32([[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]])
    points_on_car =  np.array([[0.5,0.2,0],[0.5,-0.2,0],[0.8,0.2,0],[0.8,-0.2,0],[1.1,0.2,0],[1.1,-0.2,0]])

    for c in contours:
       # calculate moments for each contour
       M = cv2.moments(c)

       # calculate x,y coordinate of center
       cX = int(M["m10"] / M["m00"])
       cY = int(M["m01"] / M["m00"])
       cv2.circle(binary_img, (cX, cY), 1, (0, 0, 0), -1)
       points_on_ground[counter] = [cX, cY]
       counter += 1

    #compute extrinsic parameters with solvePnP
    #ret, rvec, tvec = cv2.solvePnP(object_points, marker_points, camera_matrix, dist_coeff)
    ret, rvec, tvec = cv2.solvePnP(points_on_car, points_on_ground, intrinsic_parameters, dist_coeffs)

    rotation_matrix = np.arange(3,3)

    rotation_matrix, wasanderes = cv2.Rodrigues(rvec)

    transformation_matrix = [[rotation_matrix[0][0], rotation_matrix[0][1], rotation_matrix[0][2],tvec[0][0]],
                   [rotation_matrix[1][0], rotation_matrix[1][1], rotation_matrix[1][2],tvec[1][0]],
                   [rotation_matrix[2][0], rotation_matrix[2][1], rotation_matrix[2][2],tvec[2][0]],
                   [0.0, 0.0, 0.0, 1.0]]

    #print("transformation matrix: ",transformation_matrix)
    #print("\n\n\n\n\n\n")

    inverse_transformation = np.linalg.inv(transformation_matrix)


    print("\n******************************** Output solvePnP **********************************\n")
    print("rvec: \n",rvec)
    #print("inverse transformation: ",inverse_transformation)
    print("\n\n")

    print("tvec: \n",tvec)
    #print("inverse transformation: ",inverse_transformation)
    print("\n\n")

    print("\n******************************* Transofmation Matrix an Inverse ***********************************\n")
    print("Transformation Matrix: \n",transformation_matrix)
    #print("inverse transformation: ",inverse_transformation)
    print("\n\n")

    print("Inverse Transformation Matrix: \n",inverse_transformation)
    #print("inverse transformation: ",inverse_transformation)
    print("\n\n")

    print("\n************************ Check Inverse Matrix with a Point P ***************************************\n")
    print("Point P for check inverse: ",[points_on_ground[0][0],points_on_ground[0][1],0,1])
    pointtransform = np.dot(transformation_matrix,[points_on_ground[0][0],points_on_ground[0][1],1,1])
    print("\n\n")

    print("Dot Product (P * Transformation Matrix) = D :",pointtransform)
    print("\n\n")
    inversetransformp = np.dot(inverse_transformation, pointtransform)

    print("Dot Product (D * Inverse Transformation Matrix) = P ",inversetransformp)

    '''
    transformation_matrix:

    [0.016633132510232462, -0.99773441110706473,    -0.065187297809735922,  0.03167831,
    -0.52853957600128143,   0.046568970831419976,   -0.84763037201134284,   0.25024891,
     0.84874569542548017,   0.048552815042389619,   -0.52656753474571261,   0.09192414,
     0.0,                    0.0,                    0.0,                   1.0        ]
    '''

    cv2.imshow("Image window", binary_img)
    cv2.waitKey(3)

    try:
      self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
    except CvBridgeError as e:
      print(e)
Ejemplo n.º 45
0
class RandomBot():
    def __init__(self, bot_name="NoName"):
        # bot name
        self.name = bot_name
        #wall distance
        self.near_wall_range = 0.2  # [m]
        # lidar scan
        self.scan = []
        # target_id
        self.id_list = np.zeros(1)
        # position
        self.pose_x = 0
        self.pose_y = 0
        self.th = 0
        # speed [m/s]
        self.speed = 0.1

        self.enemy_dist = []
        self.enemy_list = []

        self.bridge = CvBridge()

        # publisher
        self.vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
        # subscriber
        self.lidar_sub = rospy.Subscriber('scan', LaserScan,
                                          self.LidarCallback)
        self.id_sub = rospy.Subscriber('target_id', MarkerArray,
                                       self.IdCallback)
        self.pose_sub = rospy.Subscriber('odom', Odometry, self.PoseCallback)
        self.image_sub = rospy.Subscriber('image_raw', Image,
                                          self.ImageCallback)
        #actionlib
        self.client = actionlib.SimpleActionClient('move_base', MoveBaseAction)

        self.twist = Twist()
        self.twist.linear.x = 0
        self.twist.linear.y = 0.
        self.twist.linear.z = 0.
        self.twist.angular.x = 0.
        self.twist.angular.y = 0.
        self.twist.angular.z = 0.
        #self.enemy_detector = EnemyDetector()

        self.image_state = 0

    def LidarCallback(self, data):
        scan = data.ranges
        self.scan = scan
        #print(len(self.scan),self.scan[0],self.scan[280])
        self.near_wall = self.NearWall(scan)

    def NearWall(self, scan):
        if not len(scan) == 360:
            return False
        forword_scan = scan[:20] + scan[-20:]
        back_scan = scan[160:200]
        # drop too small value ex) 0.0
        forword_scan = [x for x in forword_scan if x > 0.1]
        back_scan = [x for x in back_scan if x > 0.1]
        if min(forword_scan) < self.near_wall_range:
            return 1
        elif min(back_scan) < self.near_wall_range:
            return 2
        return False

    def IdCallback(self, data):
        id = data.markers[0].id
        if self.id_list[-1] != id:
            self.id_list = np.append(self.id_list, id)
        else:
            pass

    def PoseCallback(self, data):
        '''
        pose topic from amcl localizer
        update robot twist
        '''
        pose_x = data.pose.pose.position.x
        self.pose_x = pose_x
        pose_y = data.pose.pose.position.y
        self.pose_y = pose_y
        quaternion = data.pose.pose.orientation
        rpy = tf.transformations.euler_from_quaternion(
            (quaternion.x, quaternion.y, quaternion.z, quaternion.w))
        th = rpy[2]
        self.th = th
        #print(self.pose_x,self.pose_y,self.th)

    def ImageCallback(self, image):
        try:
            frame = self.bridge.imgmsg_to_cv2(image, "bgr8")
        except CvBridgeError, e:
            print e
        input_image = np.array(frame, dtype=np.uint8)

        rects = self.process_image(input_image)
        #print(rects)
        if rects != []:
            if rects[0][0] + rects[0][2] / 2 < 220:
                self.image_state = 3
            elif rects[0][0] + rects[0][2] / 2 > 260:
                self.image_state = 4
            else:
                self.image_state = 5
        else:
            self.image_state = 6
        cv2.waitKey(1)
Ejemplo n.º 46
0
class DroneStabilization:

    image_sub = None

    def __init__(self):

        rospy.Subscriber("/state_machine/state", String, self.state_callback)
        self.pub_condition = rospy.Publisher(
            "/state_machine/window/find_condition", String, queue_size=10)
        rospy.init_node('window', anonymous=True)
        self.yolo = YOLO()

        self.count_stable = 0

    def setup_window(self):

        self.image_pub = rospy.Publisher("image_topic_2", Image, queue_size=10)
        self.vel_pub = rospy.Publisher('controle/velocity',
                                       Vector3D,
                                       queue_size=10)

        self.bridge = CvBridge()

        # self.stab = camStab()
        self.detect = detect_window()
        self.vec_vel = Vector3D()
        self.vec_vel.x = 0
        self.vec_vel.y = 0
        self.vec_vel.z = 0
        self.pixel_movement = [0, 0]

        self.frame = 0
        self.count_callbacks = 0
        self.opt_flow = camStab()
        self.image_sub = rospy.Subscriber("/bebop/image_raw",
                                          Image,
                                          self.callback,
                                          queue_size=None)

        rospy.loginfo("setup ok")
        # self.pub_condition.publish("ok")

    def state_callback(self, data):
        if (data.data == "find_window"):
            rospy.loginfo(" WINDOW !!!")
            self.setup_window()
            # condition!!
        elif (self.image_sub != None):
            rospy.loginfo(" end! ")
            cv2.destroyAllWindows()
            self.image_sub.unregister()
            self.image_sub = None

    def callback(self, data):
        # if self.count_callbacks < 10:
        #     self.count_callbacks += 1
        #     return
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        (rows, cols, channels) = cv_image.shape
        if not (cols > 60 and rows > 60):  # returns if data have unvalid shape
            return
        # self.detect.update(cv_image)

        if self.frame == 0:

            pil_image = PIL.Image.fromarray(cv_image)
            r_image, out_boxes, out_score = self.yolo.detect_image(pil_image)
            # window detected with min precision of (defined on yolo file)
            if out_score.shape[0] > 0:
                rospy.loginfo("detect")
                m_box = out_boxes.mean(0)

                self.mean_box_cord = m_box[:]
                self.image_h = r_image.shape[0]
                self.image_w = r_image.shape[1]
                self.image_center = np.array(
                    [self.image_w / 2, self.image_h / 2])

                box_margin = 30
                max_score_index = np.argmax(out_score)
                box_lt = (max(
                    int(out_boxes[max_score_index][1]) - box_margin,
                    0), max(
                        int(out_boxes[max_score_index][0]) - box_margin, 0))
                box_rb = (min(
                    int(out_boxes[max_score_index][3]) + box_margin,
                    self.image_w),
                          min(
                              int(out_boxes[max_score_index][2]) + box_margin,
                              self.image_h))

                img = cv2.rectangle(r_image, box_lt, box_rb, (0, 220, 200), 2)

                self.opt_flow.resetFeatures(
                    cv_image,
                    interval=[box_lt[1], box_rb[1], box_lt[0], box_rb[0]],
                    get_corners_only=True)
                # print(init_feature + np.array(box_lt))
                # self.opt_flow.set_p0(init_feature + np.array(box_lt))
                # self.opt_flow.set_initial_img(cv_image)

                self.frame += 1

                cv2.imshow("window", img)
            else:
                rospy.loginfo("no windows on view, trying again")
                # self.vel_pub.publish(self.vec_vel)
                cv2.imshow("window", r_image)
        else:
            self.opt_flow.update(cv_image)
            print(self.opt_flow.get_square_shape())
            self.adjust_position(self.opt_flow.get_square_center(),
                                 self.opt_flow.get_square_shape())

        k = cv2.waitKey(30) & 0xff
        if k == 27:
            exit()
        elif k == 'r' or k == ord('r'):
            print("r pressed ")
            self.frame = 0
        elif k == ord('p') or k == 'p':
            print("through_window ")
            self.pub_condition.publish("ok")

        try:
            # self.vel_pub.publish(self.vec_vel)
            # print(self.vec_vel.x)
            # self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
            pass
        except CvBridgeError as e:
            print(e)
        self.count_callbacks = 0

    def adjust_position(self,
                        window_center_pos,
                        window_shape,
                        ideal_window_image_rate=0.60):
        delta = self.image_center - window_center_pos
        # rospy.loginfo(delta)
        self.vec_vel.x = 0
        self.vec_vel.y = 0
        self.vec_vel.z = 0
        if abs(delta[0]) > 5 or abs(delta[1]) > 5:
            self.vec_vel.y = np.sign(delta[0]) * min(
                np.abs(delta[0]) / 20, 0.2)
            self.vec_vel.z = np.sign(delta[1]) * min(
                np.abs(delta[1]) / 20, 0.2)
            self.count_stable = 0

        rate = window_shape[1] / self.image_h
        delta_rate = ideal_window_image_rate - rate
        # (x,y) goes back
        if abs(delta_rate) > 0.02 and abs(delta[0]) < 20 and abs(
                delta[1]) < 20:
            self.vec_vel.x = delta_rate * 2
            self.count_stable = 0

        self.vel_pub.publish(self.vec_vel)
        self.count_stable += 1
        if self.count_stable > 3:
            rospy.loginfo("GOOD!!")
            self.pub_condition.publish("ok")
Ejemplo n.º 47
0
    def __init__(self):
        # type: () -> None
        """
        Initiate VisualCompassHandler

        return: None
        """
        # Init ROS package
        rospack = rospkg.RosPack()
        self.package_path = rospack.get_path('bitbots_visual_compass')

        rospy.init_node('bitbots_visual_compass_setup')
        rospy.loginfo('Initializing visual compass setup')

        self.bridge = CvBridge()

        self.config = {}
        self.image_msg = None
        self.compass = None
        self.hostname = socket.gethostname()

        # TODO: docs
        self.base_frame = 'base_footprint'
        self.camera_frame = 'camera_optical_frame'
        self.tf_buffer = tf2.Buffer(cache_time=rospy.Duration(50))
        self.listener = tf2.TransformListener(self.tf_buffer)

        self.pub_head_mode = rospy.Publisher('head_mode',
                                             HeadMode,
                                             queue_size=1)

        # Register VisualCompassConfig server for dynamic reconfigure and set callback
        Server(VisualCompassConfig, self.dynamic_reconfigure_callback)

        rospy.logwarn("------------------------------------------------")
        rospy.logwarn("||                 WARNING                    ||")
        rospy.logwarn("||Please remove the LAN cable from the Robot, ||")
        rospy.logwarn("||after pressing 'YES' you have 10 Seconds    ||")
        rospy.logwarn("||until the head moves OVER the LAN port!!!   ||")
        rospy.logwarn("------------------------------------------------\n\n")

        try:
            input = raw_input
        except NameError:
            pass

        accept = input("Do you REALLY want to start? (YES/n)")

        if accept == "YES":
            rospy.logwarn("REMOVE THE LAN CABLE NOW!!!!!")

            rospy.sleep(10)

            head_mode = HeadMode()
            head_mode.headMode = 10
            self.pub_head_mode.publish(head_mode)
            rospy.loginfo("Head mode has been set!")

            rospy.spin()
        else:
            rospy.signal_shutdown(
                "You aborted the process! Shuting down correctly.")
Ejemplo n.º 48
0
class autonomy(object):
    def __init__(self):
        ##USER PARAMETERS
        self.angle_setpoint = 90
        self.angle = 0
        self.fin_time = 0
        self.integral = 0
        self.prev_error = 0
        self.logs = [0, 0]
        ##Initiate variables
        self.leftLine = 0
        self.midLine = 0
        self.rightLine = 0
        self.distance = 0
        self.leftSpeed = 0
        self.rightSpeed = 0
        self.pan = 0
        self.tilt = 0
        self.bridge = CvBridge()

        #Setup Publishers
        self.motorPub = rospy.Publisher('motors', motors, queue_size=10)
        self.servoPub = rospy.Publisher('servos', servos, queue_size=10)
        self.blobpub = rospy.Publisher('imageProc', Image, queue_size=10)

        #Create Subscriber callbacks
        def lineCallback(data):
            self.leftLine = data.leftLine
            self.midLine = data.midLine
            self.rightLine = data.rightLine

        def distanceCallback(data):
            self.distance = data.distance

        def imageProcessing(data):
            # st_time = time.time()
            try:
                frame = self.bridge.imgmsg_to_cv2(
                    data, desired_encoding="passthrough")
            except CvBridgeError as e:
                print(e)

            ##Place image processing code here!
            frame, self.logs = imageProcessQueue(frame)
            print(self.logs)
            self.angle = self.logs[1]

            self.blobpub.publish(self.bridge.cv2_to_imgmsg(frame, "bgr8"))
            # print("Time diff: %f\n Steering angle: %f\n" % (((st_time - self.fin_time)*1000), self.angle))
            # print(logs)
            # self.fin_time = st_time

        def speed_callback(data):
            ## timing code
            st_time = time.time()
            # print("Time diff: %f\n" % ((st_time - self.fin_time)*1000))
            turn_speed_here = 0.3
            turn_speed = self.pid()
            self.leftSpeed = (0.2 + turn_speed) * 0.9
            self.rightSpeed = (0.2 - turn_speed) * 0.9
            self.fin_time = st_time
            if self.logs[0] == 0 or self.distance <= 0.4:
                self.leftSpeed = 0
                self.rightSpeed = 0
                self.integral = 0
                self.prev_error = 0

            # if only one lane is detected
            # if self.logs[0] == 1:
            # 	if self.logs[2][0] == 1:
            # 		self.leftSpeed = turn_speed_here
            # 		self.rightSpeed = -turn_speed_here
            # 		self.integral = 0
            # 		self.prev_error = 0
            # 	if self.logs[2][1] == 1:
            # 		self.leftSpeed = -turn_speed_here
            # 		self.rightSpeed = turn_speed_here
            # 		self.integral = 0
            # 		self.prev_error = 0

            ##Leave these lines at the end

            self.publishMotors()

        #Subscribe to topics
        rospy.Subscriber('raspicam_node/image', Image, imageProcessing)
        rospy.Subscriber('imageProc', Image, speed_callback)
        rospy.Subscriber('distance', distance, distanceCallback)

        # initialize
        rospy.init_node('core', anonymous=True)
        self.rate = rospy.Rate(10)

    def publishMotors(self):
        motorMsg = motors()
        motorMsg.leftSpeed = self.leftSpeed
        motorMsg.rightSpeed = self.rightSpeed
        # rospy.loginfo(motorMsg)
        self.motorPub.publish(motorMsg)

    def publishServo(self):
        servoMsg = servos()
        servoMsg.pan = self.pan
        servoMsg.tilt = self.tilt
        # rospy.loginfo(servoMsg)
        self.servoPub.publish(servoMsg)

    def runner(self):

        while not rospy.is_shutdown():

            ## timing code
            # st_time = time.time()
            # # print("Time diff: %f\n" % ((st_time - self.fin_time)*1000))
            # turn_speed = self.pid()
            # self.leftSpeed = 0.18 + turn_speed
            # self.rightSpeed = 0.18 - turn_speed
            # self.fin_time = st_time
            # if self.logs[0] == 0:
            # 	self.leftSpeed = 0
            # 	self.rightSpeed = 0
            # 	self.integral = 0
            # ##Leave these lines at the end
            # self.publishMotors()
            # self.publishServo()
            self.rate.sleep()

    def pid(self):
        kp = 0.9
        ki = 0
        kd = 0.5

        # if np.sum(self.logs[2]) == 1:
        # 	kp=0.9
        # 	ki= 0.001
        # 	kd=0
        # integral clamping
        if self.integral > 2:
            self.integral = 2
        elif self.integral < -2:
            self.integral = -2

        # pid control
        error = (self.angle - self.angle_setpoint) / (self.angle_setpoint)
        self.integral += (error / 10)
        derivative = (error - self.prev_error) * 10
        speed = kp * error + ki * self.integral + kd * derivative
        self.prev_error = error
        speed /= 3
        # speed clamping
        if speed > 0.2:
            speed = 0.2
        elif speed < -0.2:
            speed = -0.2
        # print("Speed: ", speed/10000)

        if self.logs[0] == 1:
            if self.logs[2][0] == 1:
                speed = abs(speed) / 1.1
            if self.logs[2][1] == 1:
                speed = -abs(speed) / 1.1
        print(speed)
        return (speed)

    def move(self):
        ## copying.....................................................
        ## ...............................................................
        # Calculate the error in the x and y directions
        max_speed = 0.2
        min_speed = 0.05
        kp_f = 0.8
        kd_f = 40

        # A fiducial was detected since last iteration of this loop
        if self.found:

            forward_error = self.arucoZ - 0.8
            lateral_error = self.arucoX

            # Calculate the amount of turning needed towards the fiducial
            # atan2 works for any point on a circle (as opposed to atan)
            # angular_error = math.degrees(math.atan2(self.arucoX, self.arucoZ))
            if abs(lateral_error) > self.lateral_error_thres:
                self.stopCar()
                self.wait(0.5)
                self.fix_angle()

            # Set the forward speed based distance
            linSpeed = kp_f * forward_error + kd_f * (forward_error -
                                                      self.previous_error) / 10
            if linSpeed <= -max_speed:
                self.forward(-max_speed)
            elif linSpeed > -max_speed and linSpeed < -min_speed:
                self.forward(linSpeed)
            elif linSpeed >= max_speed:
                self.forward(max_speed)
            elif linSpeed > min_speed and linSpeed < max_speed:
                self.forward(linSpeed)
            else:
                self.stopCar()

            print "Errors: forward %f lateral %f speed %f" % \
              (forward_error, lateral_error, linSpeed)
            self.previous_error = forward_error

        else:
            self.stopCar()

        self.rate.sleep()

    def fix_angle(self):

        forward_error = self.arucoZ - 0.8
        lateral_error = self.arucoX

        while abs(lateral_error) > self.lateral_error_thres:
            print "Errors: forward %f lateral %f " % (forward_error,
                                                      lateral_error)
            if lateral_error > self.lateral_error_thres:
                self.turnRight(0.2)
            elif lateral_error < -self.lateral_error_thres:
                self.turnLeft(0.2)
            else:
                print("Done Alignment................................")
                self.stopCar()
                self.wait(1)
                exit

            self.rate.sleep()
            lateral_error = self.arucoX

    def forward(self, speed):
        self.leftSpeed = speed
        self.rightSpeed = speed
        self.publishMotors()

    def turnLeft(self, speed):
        self.leftSpeed = -speed
        self.rightSpeed = speed
        self.publishMotors()

    def turnRight(self, speed):
        self.leftSpeed = speed
        self.rightSpeed = -speed
        self.publishMotors()

    def stopCar(self):
        self.leftSpeed = 0
        self.rightSpeed = 0
        self.publishMotors()

    def wait(self, dur):
        start = time.time()
        while True:
            if time.time() - start > dur:
                break
        self.stopCar()
Ejemplo n.º 49
0
class image_converter:
    def __init__(self):
        #############	#zum testen
        self.image_pub = rospy.Publisher("/control/", Image, queue_size=1)
        #############
        self.error_pub = rospy.Publisher("/error", Float32, queue_size=1)
        rate = rospy.Rate(100)

        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/camera/color/image_raw",
                                          Image,
                                          self.callback,
                                          queue_size=1,
                                          buff_size=2**24)
        rate.sleep()

    def callback(self, data):
        curve = 1
        array = []
        help_array = []
        try:
            image1 = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)
        #gray
        image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)

        height = image1.shape[0]
        width = image1.shape[1]

        #get coordinates for ransac
        for y in range(200, height - 1):
            for x in range(100, width - 200):

                rgb = image1[y, x]

                if (rgb >= (200)):
                    help_array.append((x, y))
            #mittleren Punkt der weissen Linie nehmen
            if (help_array != []):
                array.append(help_array[(len(help_array) / 2) - 1])
            help_array = []
        print len(array)

        #drive curve if line is left and not in front of the car
        for i in range(0, len(array)):
            if (array[i][0] > 200):
                curve = 0

        if curve == 1:
            self.error_pub.publish(-4)
            rospy.sleep(5)
        else:
            #begin of ransac
            n = 60
            d = 1
            t1 = len(array) / 4
            set_final = []
            set_tmp = []
            a_final = (0, 0)
            b_final = (0, 0)
            while (n > 0):
                set_tmp = []
                a = random.choice(array)
                b = random.choice(array)
                while (b == a):
                    b = random.choice(array)
                for i in range(0, len(array)):
                    a_np = np.asarray(a)
                    b_np = np.asarray(b)
                    c = np.asarray(array[i])
                    d1 = numpy.norm(np.cross(
                        b_np - a_np, a_np - c)) / numpy.norm(b_np - a_np)
                    if (d1 < d):
                        set_tmp.append(array[i])
                if (len(set_tmp) >= t1 and len(set_tmp) > len(set_final)):
                    a_final = a
                    b_final = b
                    set_final = set_tmp
                n -= 1

    ####################	#nur zum testen
            print a_final
            print b_final
            cv2.line(image1, a_final, b_final, (0), 3)
            cv2.imwrite('img_lines.png', image1)
            #####################
            m1 = (b_final[1] - a_final[1]) / float(b_final[0] - a_final[0])
            b1 = a_final[1] - m1 * a_final[0]
            print "\n"
            print "m: "
            print m1
            print "b: "
            print b1
            print "\n"

            row = 240
            column = 320

            intersection = int((row - b1) / m1)
            #intersection
            image1[row, intersection] = 150

            error = intersection - column

            print error
            #publish error
            self.error_pub.publish(error)

            try:
                self.image_pub.publish(
                    self.bridge.cv2_to_imgmsg(image1, "mono8"))
            except CvBridgeError as e:
                print(e)
Ejemplo n.º 50
0
Classes to handle Carla sensors
"""
import math
import numpy as np
import tf

from cv_bridge import CvBridge
from geometry_msgs.msg import TransformStamped
from sensor_msgs.msg import CameraInfo
from sensor_msgs.point_cloud2 import create_cloud_xyz32
from std_msgs.msg import Header

from carla.sensor import Camera, Lidar, Sensor
from carla_ros_bridge.transforms import carla_transform_to_ros_transform

cv_bridge = CvBridge(
)  # global cv bridge to convert image between opencv and ros


class SensorHandler(object):
    """
    Generic Sensor Handler

    A sensor handler compute the associated ros message to a carla sensor data, and the transform asssociated to the
    sensor.
    These messages are passed to a *process_msg_fun* which will take care of publishing them.
    """
    def __init__(self,
                 name,
                 params=None,
                 carla_sensor_class=Sensor,
                 carla_settings=None,
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.has_image = False
        self.lights = []

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        self.stop_wp_mapping = {}
        self.tl_initialized = False
        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic
        light in 3D map space and helps you acquire an accurate ground truth
        data source for the traffic light classifier by sending the current
        color state of all traffic lights in the simulator. When testing on
        the vehicle, the color state will not be available. You'll need to rely
        on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights',
                                TrafficLightArray,
                                self.traffic_cb,
                                queue_size=1)
        sub6 = rospy.Subscriber('/image_color',
                                Image,
                                self.image_cb,
                                queue_size=1,
                                buff_size=2**24)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.tl_initialized = True
        rospy.logwarn("TLClassifier initialized")
        # self.listener = tf.TransformListener()

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1,
                                                      latch=True)
        self.debug_image_pub = rospy.Publisher('/debug_image',
                                               Image,
                                               queue_size=1,
                                               latch=True)

    def spin(self):
        rate = rospy.Rate(5)
        while not rospy.is_shutdown():
            rate.sleep()
            self.publish_tl()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        if self.waypoints is None:
            self.waypoints = waypoints
        if len(self.waypoints.waypoints) != len(waypoints.waypoints):
            rospy.logwarn("Map changed!")
            self.stop_wp_mapping = {}
            self.waypoints = waypoints

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes
           the index of the waypoint closest to the red light's stop line
           to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg

    def publish_tl(self):
        if not self.tl_initialized:
            rospy.logwarn("Waiting for TLClassifier")
            return
        light_wp, state = self.process_traffic_lights()
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        # DONE(see--): implement
        dists = [
            dist_fn(x, wp.pose.pose.position.x, y, wp.pose.pose.position.y)
            for wp in self.waypoints.waypoints
        ]
        min_index = min(range(len(dists)), key=dists.__getitem__)
        return min_index

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in
                 styx_msgs/TrafficLight)

        """
        if not self.has_image:
            rospy.logwarn("TLDetector: No image")
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        # Get classification
        tl_state, debug_image = self.light_classifier.get_classification(
            cv_image)
        self.debug_image_pub.publish(
            self.bridge.cv2_to_imgmsg(debug_image, encoding="rgb8"))

        # tl_state = TrafficLight.GREEN
        return tl_state

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a
                 traffic light (-1 if none exists)
            int: ID of traffic light color (specified
                 in styx_msgs/TrafficLight)

        """
        light = None
        car_wp = None
        # List of positions that correspond to the line to stop in front
        # of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if self.pose and self.waypoints:
            min_index_diff = 100000
            car_wp = self.get_closest_waypoint(self.pose.pose.position.x,
                                               self.pose.pose.position.y)
            for tl_pos in stop_line_positions:
                tl_key = str(tl_pos[0]) + str(tl_pos[1])
                if tl_key not in self.stop_wp_mapping:
                    stop_wp = self.get_closest_waypoint(tl_pos[0], tl_pos[1])
                    self.stop_wp_mapping[tl_key] = stop_wp
                else:
                    stop_wp = self.stop_wp_mapping[tl_key]
                tl_wp_index = min(stop_wp + STOP_TL_OFFSET,
                                  len(self.waypoints.waypoints) - 1)
                index_diff = tl_wp_index - car_wp

                # check that traffic light is in front: index_diff >= 0
                if index_diff < min_index_diff \
                   and index_diff >= 0:
                    light = tl_wp_index
                    min_index_diff = index_diff

        elif self.pose is None:
            rospy.logwarn_throttle(2, "TLDetector: No pose")
        elif self.waypoints is None:
            rospy.logwarn_throttle(2, "TLDetector: No wps")

        # TODO find the closest visible traffic light (if one exists)
        if light is not None:
            state = self.get_light_state(light)
            return light, state

        # self.waypoints = None
        return -1, TrafficLight.UNKNOWN
Ejemplo n.º 52
0
class VisualCompassSetup():
    # type: () -> None
    """
    TODO docs

    Subscribes to raw image
    This sets the head behavior to a special head mode, where it scans for image features above the fieldboundary.
    The head behavior sends a trigger message, if it reaches predefined points. If this node gets trigged the current image features are saved in our feature map.
    Afterwards the map is saved on the robot and the robot handler can download it. Than hes is able to share it with the other robots.
    """
    def __init__(self):
        # type: () -> None
        """
        Initiate VisualCompassHandler

        return: None
        """
        # Init ROS package
        rospack = rospkg.RosPack()
        self.package_path = rospack.get_path('bitbots_visual_compass')

        rospy.init_node('bitbots_visual_compass_setup')
        rospy.loginfo('Initializing visual compass setup')

        self.bridge = CvBridge()

        self.config = {}
        self.image_msg = None
        self.compass = None
        self.hostname = socket.gethostname()

        # TODO: docs
        self.base_frame = 'base_footprint'
        self.camera_frame = 'camera_optical_frame'
        self.tf_buffer = tf2.Buffer(cache_time=rospy.Duration(50))
        self.listener = tf2.TransformListener(self.tf_buffer)

        self.pub_head_mode = rospy.Publisher('head_mode',
                                             HeadMode,
                                             queue_size=1)

        # Register VisualCompassConfig server for dynamic reconfigure and set callback
        Server(VisualCompassConfig, self.dynamic_reconfigure_callback)

        rospy.logwarn("------------------------------------------------")
        rospy.logwarn("||                 WARNING                    ||")
        rospy.logwarn("||Please remove the LAN cable from the Robot, ||")
        rospy.logwarn("||after pressing 'YES' you have 10 Seconds    ||")
        rospy.logwarn("||until the head moves OVER the LAN port!!!   ||")
        rospy.logwarn("------------------------------------------------\n\n")

        try:
            input = raw_input
        except NameError:
            pass

        accept = input("Do you REALLY want to start? (YES/n)")

        if accept == "YES":
            rospy.logwarn("REMOVE THE LAN CABLE NOW!!!!!")

            rospy.sleep(10)

            head_mode = HeadMode()
            head_mode.headMode = 10
            self.pub_head_mode.publish(head_mode)
            rospy.loginfo("Head mode has been set!")

            rospy.spin()
        else:
            rospy.signal_shutdown(
                "You aborted the process! Shuting down correctly.")

    def dynamic_reconfigure_callback(self, config, level):
        # type: (dict, TODO) -> None
        """
        TODO docs
        """
        self.compass = VisualCompass(config)

        if self.changed_config_param(config, 'compass_type') or \
            self.changed_config_param(config, 'compass_matcher') or \
            self.changed_config_param(config, 'compass_multiple_map_image_count'):

            self.feature_map_images_count = 0
            self.processed_set_all_feature_map_images = False

            rospy.loginfo(
                'Loaded configuration: compass type: %(type)s | matcher type: %(matcher)s | map images: %(feature_map_count)d'
                % {
                    'type': config['compass_type'],
                    'matcher': config['compass_matcher'],
                    'feature_map_count':
                    config['compass_multiple_map_image_count']
                })

        # Subscribe to Image-message
        if self.changed_config_param(config, 'img_msg_topic') or \
            self.changed_config_param(config, 'img_msg_queue_size'):
            if hasattr(self, 'sub_image_msg'):
                self.sub_image_msg.unregister()
            self.sub_image_msg = rospy.Subscriber(
                config['img_msg_topic'],
                Image,
                self.image_callback,
                queue_size=config['img_msg_queue_size'],
                tcp_nodelay=True,
                buff_size=60000000)
            # https://github.com/ros/ros_comm/issues/536

        # Register message server to call set truth callback
        if self.changed_config_param(config, 'feature_map_trigger_topic') or \
            self.changed_config_param(config, 'feature_map_trigger_queue_size'):
            if hasattr(self, 'sub_trigger_set_feature_map'):
                self.sub_image_msg.unregister()
            self.sub_trigger_set_feature_map = rospy.Subscriber(
                config['feature_map_trigger_topic'],
                Header,
                self.set_truth_callback,
                queue_size=config['feature_map_trigger_queue_size'])

        self.config = config

        self.check_image_count()

        return self.config

    def set_truth_callback(self, request):
        if self.image_msg:
            # TODO: check timestamps

            orientation = self.tf_buffer.lookup_transform(
                self.base_frame,
                self.camera_frame,
                self.image_msg.header.stamp,
                timeout=rospy.Duration(0.5)).transform.rotation
            yaw_angle = (euler_from_quaternion(
                (orientation.x, orientation.y, orientation.z,
                 orientation.w))[2] + 0.5 * math.pi) % (2 * math.pi)

            image = self.bridge.imgmsg_to_cv2(self.image_msg, 'bgr8')

            self.compass.set_truth(yaw_angle, image)
            self.feature_map_images_count += 1
            self.check_image_count()

        else:
            rospy.logwarn('No image received yet.')

    def image_callback(self, image_msg):
        # type: (Image) -> None
        """
        TODO docs
        """
        # Drops old images
        # TODO: fix
        # image_age = rospy.get_rostime() - image_msg.header.stamp
        # if image_age.to_sec() > 0.1:
        #     print("Visual Compass: Dropped Image-message")  # TODO debug printer
        #     return

        self.image_msg = image_msg

    def check_image_count(self):
        # type: () -> None
        """
        TODO docs
        """
        config_feature_map_images_count = self.config[
            'compass_multiple_map_image_count']
        if self.feature_map_images_count != config_feature_map_images_count:
            rospy.loginfo(
                'Visual compass: %(var)d of %(config)d map images set. More images are needed.'
                % {
                    'var': self.feature_map_images_count,
                    'config': config_feature_map_images_count
                })
            self.processed_set_all_feature_map_images = False
        else:
            if not (self.processed_set_all_feature_map_images):
                rospy.loginfo(
                    'Visual compass: All map images have been processed.')
                self.save_feature_map(self.config['feature_map_file_path'])
            self.processed_set_all_feature_map_images = True

    def save_feature_map(self, feature_map_file_path):
        # type (str) -> None
        """
        TODO docs
        """
        converter = KeyPointConverter()

        # get keypoints and mean feature count per image
        features = self.compass.get_feature_map()

        mean_feature_count = self.compass.get_mean_feature_count()

        # convert keypoints to basic values
        keypoints = features[0]
        keypoint_values = [converter.key_point2values(kp) for kp in keypoints]

        descriptors = features[1]

        meta = {
            'field':
            self.config['feature_map_field'],
            'date':
            datetime.now(),
            'device':
            self.hostname,
            'compass_type':
            self.config['compass_type'],
            'compass_matcher':
            self.config['compass_matcher'],
            'compass_multiple_map_image_count':
            self.config['compass_multiple_map_image_count'],
            'keypoint_count':
            len(keypoint_values),
            'descriptor_count':
            len(descriptors),
            'mean_feature_count':
            mean_feature_count,
        }

        dump_features = {
            'keypoint_values': keypoint_values,
            'descriptors': descriptors,
            'meta': meta
        }

        # generate file path
        file_path = self.package_path + feature_map_file_path
        # warn, if file does exist allready
        if path.isfile(file_path):
            rospy.logwarn(
                'Map file at: %(path)s does ALLREADY EXIST. This will be overwritten.'
                % {'path': file_path})
        # save keypoints in pickle file
        with open(file_path, 'wb') as f:
            pickle.dump(dump_features, f)
        info_str = "\n\t-----------------------------------------------------------------------------------------------------------------\n" + \
        "\tSaved map file at: %(path)s\n" % {'path': file_path} + \
        "\tRUN the following command on your system (NOT THE ROBOT) to save the map file in your current directory:\n" + \
        "\n\tscp bitbots@%(host)s:%(path)s .\n" % {'path': file_path, 'host': self.hostname} + \
        "\t-----------------------------------------------------------------------------------------------------------------"
        rospy.loginfo(info_str)

        # shutdown setup process
        rospy.signal_shutdown('Visual compass setup finished cleanly.')

    def changed_config_param(self, config, param_name):
        # type: (dict, str) -> bool
        """
        TODO
        """
        return param_name not in self.config or config[
            param_name] != self.config[param_name]
Ejemplo n.º 53
0
class RecognizeEmotionSM(ActionSMBase):
    def __init__(self, timeout=120.0,
                 image_topic='/cam3d/rgb/image_raw',
                 emotion_model_path=None,
                 labels=None,
                 image_size=(0, 0, 0),
                 max_recovery_attempts=1):
        super(RecognizeEmotionSM, self).__init__('RecognizeEmotion', [], max_recovery_attempts)
        self.timeout = timeout
        self.labels = labels
        self.image_size = image_size
        self.image_publisher = rospy.Publisher(image_topic, Image, queue_size=1)
        self.emotion_model_path = emotion_model_path
        self.bridge = CvBridge()
        self.emotion_model = None
        self.computation_graph = None

    def init(self):
        try:
            rospy.loginfo('[recognize_emotion] Loading model %s', self.emotion_model_path)
            self.emotion_model = load_model(emotion_model_path)

            # the following two lines are necessary for avoiding
            # https://github.com/keras-team/keras/issues/2397
            self.emotion_model._make_predict_function()
            self.computation_graph = tf.get_default_graph()
        except:
            rospy.logerr('[recognize_emotion] Model %s could not be loaded', self.emotion_model_path)
        return FTSMTransitions.INITIALISED

    def running(self):
        number_of_faces = self.goal.number_of_faces
        bounding_boxes = self.goal.bounding_boxes
        emotions = []

        rospy.loginfo('[recognize_emotion] Recognizing emotions')
        rgb_image = self.__ros2cv(self.goal.image)
        gray_image = self.__rgb2gray(rgb_image)
        for face in bounding_boxes:
            x, y, w, h = face.bounding_box_coordinates
            face = gray_image[y: (y + h), x: (x + w)]
            face = cv2.resize(face, self.image_size[0:2])
            face = np.expand_dims(face, 0)
            face = np.expand_dims(face, -1)
            face = self.__preprocess_image(face)
            predicted_emotion = self.__predict_emotion(face)
            emotions.append(predicted_emotion)

            rgb_cv2 = cv2.rectangle(rgb_image, (x, y), (x + w, y + h),
                                    (0, 0, 255), 2)
            cv2.putText(rgb_image, predicted_emotion, (x, y - 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0),
                        1, cv2.LINE_AA)
        output_ros_image = self.bridge.cv2_to_imgmsg(rgb_image, 'bgr8')
        self.image_publisher.publish(output_ros_image)

        self.result = self.set_result(True, emotions)
        return FTSMTransitions.DONE

    def set_result(self, success, emotions):
        result = RecognizeEmotionResult()
        result.success = success
        result.emotions = emotions
        return result

    def __preprocess_image(self, image):
        image = image / 255.0
        return image

    def __predict_emotion(self, face):
        label = -1
        with self.computation_graph.as_default():
            class_predictions = self.emotion_model.predict(face)
            label = self.labels[np.argmax(class_predictions)]
        return label

    def __ros2cv(self, ros_image):
        cv_image = self.bridge.imgmsg_to_cv2(ros_image, 'bgr8')
        return np.array(cv_image, dtype=np.uint8)

    def __rgb2gray(self, image):
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        return gray_image
Ejemplo n.º 54
0
import rospy
from sensor_msgs.msg import Image

if __name__ == '__main__':
    rgb_image_root = '../data/rgb_image_0048'
    file_list = []
    tmp_list = os.listdir(rgb_image_root)
    tmp_list.sort()
    for f in tmp_list:
        cur_file = os.path.join(rgb_image_root, f)
        file_list.append(cur_file)

    rospy.init_node('pub_rgb_image')
    pub_velo = rospy.Publisher("rgb_image", Image, queue_size=1)
    rate = rospy.Rate(0.2)
    bridge = CvBridge()

    pc_num_counter = 0
    while not rospy.is_shutdown():
        rospy.loginfo(file_list[pc_num_counter])

        # pc_data = np.fromfile(file_list[pc_num_counter], dtype=np.float32).reshape(-1, 4)
        cv_image = cv2.imread(file_list[pc_num_counter])
        image_message = bridge.cv2_to_imgmsg(cv_image, encoding="passthrough")
        pub_velo.publish(image_message)
        # print(pc_data[0,0,:])
        #pc_data = pc_data[:, :, :4]
        #pc_data = np.fromfile(file_list[pc_num_counter], dtype=np.float32).reshape(-1, 4)

        pc_num_counter = pc_num_counter + 1
        if pc_num_counter >= len(file_list):
Ejemplo n.º 55
0
    def processSegments(self, segment_list_msg):
        if not self.active:
            return
        t_start = rospy.get_time()

        if self.use_propagation:
            self.propagateBelief()
            self.t_last_update = rospy.get_time()

        # initialize measurement likelihood
        measurement_likelihood = np.zeros(self.d.shape)

        for segment in segment_list_msg.segments:
            if segment.color != segment.WHITE and segment.color != segment.YELLOW:
                continue
            if segment.points[0].x < 0 or segment.points[1].x < 0:
                continue

            d_i, phi_i, l_i = self.generateVote(segment)
            if d_i > self.d_max or d_i < self.d_min or phi_i < self.phi_min or phi_i > self.phi_max:
                continue
            if self.use_max_segment_dist and (l_i > self.max_segment_dist):
                continue

            i = floor((d_i - self.d_min) / self.delta_d)
            j = floor((phi_i - self.phi_min) / self.delta_phi)

            if self.use_distance_weighting:
                dist_weight = self.dwa * l_i**3 + self.dwb * l_i**2 + self.dwc * l_i + self.zero_val
                if dist_weight < 0:
                    continue
                measurement_likelihood[
                    i, j] = measurement_likelihood[i, j] + dist_weight
            else:
                measurement_likelihood[
                    i, j] = measurement_likelihood[i, j] + 1 / (l_i)

        if np.linalg.norm(measurement_likelihood) == 0:
            return
        measurement_likelihood = measurement_likelihood / np.sum(
            measurement_likelihood)

        if self.use_propagation:
            self.updateBelief(measurement_likelihood)
        else:
            self.beliefRV = measurement_likelihood

        # TODO entropy test:
        #print self.beliefRV.argmax()

        maxids = np.unravel_index(self.beliefRV.argmax(), self.beliefRV.shape)
        # rospy.loginfo('maxids: %s' % maxids)
        self.lanePose.header.stamp = segment_list_msg.header.stamp
        self.lanePose.d = self.d_min + maxids[0] * self.delta_d
        self.lanePose.phi = self.phi_min + maxids[1] * self.delta_phi
        self.lanePose.status = self.lanePose.NORMAL

        # publish the belief image
        bridge = CvBridge()
        belief_img = bridge.cv2_to_imgmsg(
            (255 * self.beliefRV).astype('uint8'), "mono8")
        belief_img.header.stamp = segment_list_msg.header.stamp

        max_val = self.beliefRV.max()
        self.lanePose.in_lane = max_val > self.min_max and len(
            segment_list_msg.segments) > self.min_segs and np.linalg.norm(
                measurement_likelihood) != 0
        self.pub_lane_pose.publish(self.lanePose)
        self.pub_belief_img.publish(belief_img)

        # print "time to process segments:"
        # print rospy.get_time() - t_start

        # Publish in_lane according to the ent
        in_lane_msg = BoolStamped()
        in_lane_msg.header.stamp = segment_list_msg.header.stamp
        in_lane_msg.data = self.lanePose.in_lane
        # ent = entropy(self.beliefRV)
        # if (ent < self.max_entropy):
        #     in_lane_msg.data = True
        # else:
        #     in_lane_msg.data = False
        self.pub_in_lane.publish(in_lane_msg)
Ejemplo n.º 56
0
    def __init__(self):
        self.image_pub = rospy.Publisher("/eyes/right", Image)

        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/camera2/usb_cam2/image_raw", Image,
                                          self.callback)
Ejemplo n.º 57
0
haar_flags = 0
display = True

if __name__ == '__main__':
    opencv_dir = '/usr/share/opencv/haarcascades/'

    face_cascade = cv2.CascadeClassifier(opencv_dir +
                                         'haarcascade_frontalface_default.xml')
    if face_cascade.empty():
        print "Could not find face cascade"
        sys.exit(-1)
    eye_cascade = cv2.CascadeClassifier(opencv_dir + 'haarcascade_eye.xml')
    if eye_cascade.empty():
        print "Could not find eye cascade"
        sys.exit(-1)
    br = CvBridge()
    rospy.init_node('facedetect')
    display = rospy.get_param("~display", True)

    def detect_and_draw(imgmsg):
        img = br.imgmsg_to_cv2(imgmsg, "bgr8")
        # allocate temporary images
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, 1.3, 3)
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            roi_gray = gray[y:y + h, x:x + w]
            roi_color = img[y:y + h, x:x + w]
            eyes = eye_cascade.detectMultiScale(roi_gray)
            for (ex, ey, ew, eh) in eyes:
                cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
Ejemplo n.º 58
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--in_bag_file", help="input ROS bag")
    parser.add_argument("--out_bag_file", help="output ROS bag")
    parser.add_argument("--image_topic", help="image topic")
    parser.add_argument("--cam_info_topic", help="camera info topic")
    parser.add_argument("--desired_width", type=int)
    parser.add_argument("--desired_height", type=int)

    args = parser.parse_args()
    in_bag = rosbag.Bag(args.in_bag_file, "r")
    bridge = CvBridge()
    count = 0

    orig_width = None
    orig_height = None
    desired_width = args.desired_width
    desired_height = args.desired_height

    # figure out orig_width and orig_height first
    for topic, msg, t in in_bag.read_messages(args.cam_info_topic):
        orig_width = msg.width
        orig_height = msg.height
        break

    # scaling factors for camera_info in x and y directions
    s_x = desired_width / float(orig_width)
    s_y = desired_height / float(orig_height)

    with rosbag.Bag(args.out_bag_file, 'w') as outbag:
        for topic, msg, t in in_bag.read_messages():
            # resize camera_info msgs' K and P accordingly
            if (topic == args.cam_info_topic):
                orig_K = msg.K
                orig_P = msg.P
                new_msg = deepcopy(msg)
                new_msg.width = desired_width
                new_msg.height = desired_height
                new_msg.K = [s_x*orig_K[0], orig_K[1], s_x*orig_K[2],\
                         orig_K[3], s_y*orig_K[4], s_y*orig_K[5],\
                         orig_K[6], orig_K[7], orig_K[8]]
                new_msg.P = [s_x*orig_P[0], orig_P[1], s_x*orig_P[2], orig_P[3],\
                        orig_P[4], s_y*orig_P[5], s_y*orig_P[6], orig_P[7],\
                        orig_P[8], orig_P[9], orig_P[10], orig_P[11]]
                # print new_msg
                outbag.write(topic, new_msg, t)

            # resize images accordingly
            elif (topic == args.image_topic):
                # read bayered image as a long vector
                bayer_img = np.frombuffer(msg.data, dtype='uint8')
                # resize bayered image to 2D
                bayer_img = np.reshape(bayer_img, (orig_height, orig_width, 1))
                # debayer the raw image
                color_img = cv2.cvtColor(bayer_img, cv2.COLOR_BAYER_BG2BGR)
                # resize the debayered image
                color_img = cv2.resize(color_img,
                                       (desired_width, desired_height))
                # cv2.imshow("color_img", color_img)
                # cv2.waitKey(1)
                # conver back to imgmsg
                new_msg = bridge.cv2_to_imgmsg(color_img, encoding='bgr8')
                outbag.write(topic, new_msg, t)

            else:
                outbag.write(topic, msg, t)
        in_bag.close()
class YellowLineFollower:

  SPEED = 0.6
  QUEUE_SIZE = 1
  WHEELBASE = 0.33

  def __init__(self):
    self.bridge_object = CvBridge()
    self.image_sub = rospy.Subscriber('/camera/color/image_raw', 
                                      Image, self.camera_callback)
    # self.drive_pub = rospy.Publisher('/vesc/low_level/ackermann_cmd_mux/input/teleop',
    #                                    AckermannDriveStamped, queue_size= self.QUEUE_SIZE)
    self.drive_pub = rospy.Publisher('/vesc/low_level/ackermann_cmd_mux/output',
                                       AckermannDriveStamped, queue_size= self.QUEUE_SIZE)
    # /vesc/low_level/ackermann_cmd_mux/output 
    self.last_angle = None
    self.test1_pub = rospy.Publisher('/test/res_raw', Image, queue_size = self.QUEUE_SIZE)
    self.test2_pub = rospy.Publisher('/test/hls_raw', Image, queue_size = self.QUEUE_SIZE)
    self.test3_pub = rospy.Publisher('test/mask_raw', Image, queue_size = self.QUEUE_SIZE)

  def convert_angular_velocity_to_steering_angle(self, angular_velocity):
      if angular_velocity == 0:
        return 0
      return math.atan(angular_velocity * (self.WHEELBASE/self.SPEED))
    
  def camera_callback(self, data):

    try:
        cv_image = self.bridge_object.imgmsg_to_cv2(data, desired_encoding='bgr8')
    except CvBridgeError as e:
        print(e)

    height, width, channels = cv_image.shape   
    top_trunc = int(height/2)
    mid_width = width/2
    margin = width/4
    left_bound = int(mid_width - margin)
    right_bound = int(mid_width + margin)
    crop_img = cv_image[top_trunc: ,0:width] 
    # image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8')
    # hls = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)#.astype(np.float)

    # lower_yellow = np.array([ 0, 105,  175])
    # upper_yellow = np.array([29, 255, 255])
    # mask = cv2.inRange(hls, lower_yellow, upper_yellow)
    hls = cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB).astype(np.float)
    lower_yellow = np.array([171, 135, 82])
    upper_yellow = np.array([255, 255, 125])
    mask = cv2.inRange(hls, lower_yellow, upper_yellow)
    
    
    # rows_to_watch = 100
    # top_trunc = int(height/2)
    # bot_trunc = int(top_trunc + rows_to_watch)
    # crop_img = cv_image[top_trunc:bot_trunc, 0:width]
    m = cv2.moments(mask, False)
    try:
        cx, cy = m['m10']/m['m00'], m['m01']/m['m00']
    except ZeroDivisionError:
        cx, cy = width / 2, height / 2
    res = cv2.bitwise_and(crop_img, crop_img, mask = mask)
    cv2.circle(res, (int(cx), int(cy)), 10, (0, 0, 255), -1)
#    cv2.imshow("Original", cv_image)
#    cv2.imshow("HSV", hls)
#    cv2.imshow("MASK", mask)
#    cv2.imshow("RES", res)
#    cv2.waitKey(0)
    error_x = cx - width/2
    angular_z = -error_x/100
    if angular_z == 0:
        steering_angle = self.last_angle
    else:
        steering_angle = self.convert_angular_velocity_to_steering_angle(angular_z)
        self.last_angle = steering_angle
    drive_msg = AckermannDriveStamped()
    drive_msg.header.stamp = data.header.stamp
    drive_msg.drive.speed = self.SPEED
    steering_angle = np.clip(steering_angle, -0.4, 0.4)
    drive_msg.drive.steering_angle = steering_angle
    self.drive_pub.publish(drive_msg)
    self.test2_pub.publish(self.bridge_object.cv2_to_imgmsg(hls))
    self.test3_pub.publish(self.bridge_object.cv2_to_imgmsg(mask))
    self.test1_pub.publish(self.bridge_object.cv2_to_imgmsg(res))
Ejemplo n.º 60
0
class image_feature:

    def __init__(self):
        '''Initialize ros publisher, ros subscriber'''

        # topic where we publish
        self.image_pub = rospy.Publisher("/color_detection/image",
            Image, queue_size=10)
        self.bridge = CvBridge()
        #topic where the coordinates go
        self.cam_pose_pub = rospy.Publisher("/color_detection/cam_point", Point, queue_size=1)
        self.cam_pose = Point()
        # subscribed Topic
        self.subscriber = rospy.Subscriber("/uav_camera/image_raw_down", Image, self.callback,  queue_size=1)
        if VERBOSE :
            print "subscribed to /uav_camera/image_raw_down"


    def callback(self, ros_data):
        '''Callback function of subscribed topic.
        Here images get converted and features detected'''
        if VERBOSE :
            print 'received image of type: "%s"' % ros_data.format

        ###Set boundarys###
        lower_range = np.array([0, 255, 255], dtype=np.uint8) #The object color is 255,153,0
        upper_range = np.array([0, 0, 255], dtype=np.uint8)

        lower_blue = np.array([100, 150, 0])
        upper_blue = np.array([140, 255, 255])

        yellow_lr = np.array([20, 100, 100], dtype=np.uint8)
        yellow_ur = np.array([30, 255, 255], dtype=np.uint8)

        try:
            cv_image = self.bridge.imgmsg_to_cv2(ros_data, "bgr8")
        except CvBridgeError as e:
            print(e)

        hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, lower_blue, upper_blue)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        output = cv2.bitwise_and(cv_image, cv_image, mask=mask)
        image, contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        center = Point()
        if len(contours) > 0:
            c = max(contours, key=cv2.contourArea)
            x, y, w, h = cv2.boundingRect(c)

            M = cv2.moments(c)
            center.x = float(M["m10"] / M["m00"])
            center.y = float(M["m01"] / M["m00"])
            self.find_relative_pose(center.x, center.y, w, h)
            cv2.rectangle(cv_image, (x, y),(x+w, y+h), (0, 255, 255), 2)
        else:
            self.cam_pose = Point()
            self.cam_pose.x = float("inf")
            self.cam_pose.y = float("inf")
            self.cam_pose.z = float("inf")
            self.cam_pose_pub.publish(self.cam_pose)

        try:
            self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
        except CvBridgeError as e:
            print(e)

    def find_relative_pose(self, x, y, w, h):
        quad_3d = np.float32([[-0.0895, -0.0895, 0], [0.0895, -0.0895, 0], [0.0895, 0.0895, 0], [-0.0895, 0.0895, 0]])
        quad = np.float32([[x - w / 2, y - h / 2], [x - w / 2, y + h / 2], [x + w / 2, y + h / 2], [x + w / 2, y - h / 2]])
        
        K = np.float64([[1004.603218,    0       , 640.5],
                        [   0       , 1004.603218, 360.5],
                        [   0.0     ,    0.0     ,   1.0]])

        dist_coef = np.zeros(4)
        _ret, rvec, tvec = cv2.solvePnP(quad_3d, quad, K, dist_coef)
        rmat = cv2.Rodrigues(rvec)[0]
        cameraTranslatevector = -np.matrix(rmat).T * np.matrix(tvec)

        T0 = np.zeros((4, 4))
        T0[:3, :3] = rmat
        T0[:4, 3] = [0, 0, 0, 1]
        T0[:3, 3] = np.transpose(cameraTranslatevector)

        p0 = np.array([-0.0895/2, -0.0895/2, 0, 1])
        z0 = np.dot(T0, p0)

        self.cam_pose.x = z0.item(0)
        self.cam_pose.y = z0.item(1)
        self.cam_pose.z = z0.item(2)
        self.cam_pose_pub.publish(self.cam_pose)