Ejemplo n.º 1
0
class ImageDisplay:

    def __init__(self, device_num=0, color=True):
        cv.NamedWindow("Display",1)
        
        self.bridge = CvBridge()
        self.color = color

        self.device_num = device_num
        sub_name = 'camera_' + str(self.device_num)
        self.image_sub = rospy.Subscriber(sub_name,Image,self.image_callback)
        node_name = 'image_display' + str(self.device_num)
        rospy.init_node(node_name, anonymous=True)

    def image_callback(self,data):
        try:
            if self.color:
                cv_image = self.bridge.imgmsg_to_cv(data, "rgb8")
            else:
                cv_image = self.bridge.imgmsg_to_cv(data, "mono8")
        except CvBridgeError, e:
          print e

        cv.ShowImage("Display", cv_image)
        cv.WaitKey(3)
Ejemplo n.º 2
0
Archivo: view.py Proyecto: aplyer/kitti
class Viewer(object):
  def __init__(self):
    self.bridge = CvBridge()
    self.sub_00 = message_filters.Subscriber("image_00/image_raw",Image)
    self.sub_01 = message_filters.Subscriber("image_01/image_raw",Image)
    self.sub_02 = message_filters.Subscriber("image_02/image_raw",Image)
    self.sub_03 = message_filters.Subscriber("image_03/image_raw",Image)
    tmp = [self.sub_00, self.sub_01, self.sub_02, self.sub_03]
    self.ts = message_filters.TimeSynchronizer(tmp,10)
    self.ts.registerCallback(self.callback)
    cv.NamedWindow("Camera 00", cv.CV_WINDOW_NORMAL)
    cv.NamedWindow("Camera 01", cv.CV_WINDOW_NORMAL)
    cv.NamedWindow("Camera 02", cv.CV_WINDOW_NORMAL)
    cv.NamedWindow("Camera 03", cv.CV_WINDOW_NORMAL)
    cv.NamedWindow("Anaglyf col", cv.CV_WINDOW_NORMAL)
    
  def callback(self,I0,I1,I2,I3):
    try:
      cv_i0 = self.bridge.imgmsg_to_cv(I0, "mono8")
      cv_i1 = self.bridge.imgmsg_to_cv(I1, "mono8")
      cv_i2 = self.bridge.imgmsg_to_cv(I2, "bgr8")
      cv_i3 = self.bridge.imgmsg_to_cv(I3, "bgr8")
    except CvBridgeError, e:
      print e
    cv.ShowImage("Camera 00", cv_i0)
    cv.ShowImage("Camera 01", cv_i1)
    cv.ShowImage("Camera 02", cv_i2)
    cv.ShowImage("Camera 03", cv_i3)
    merge=cv.CreateImage(cv.GetSize(cv_i2),8,3)
    makeMagic(cv_i3,cv_i2,merge)
    cv.ShowImage("Anaglyf col", merge)
    cv.WaitKey(3)
def save_images(bagfile):
    if not os.path.isdir("video_images"):
        os.mkdir("video_images")
    if not os.path.isdir("depth_images"):
        os.mkdir("depth_images")
    bag = rosbag.Bag(bagfile)
    bridge = CvBridge()
    count = 0
    o = open('image_log.csv','w')
    o.write('im,secs,nsecs,extra\n')
    
    for topic, msg, t in bag.read_messages():        
        if topic == '/camera/rgb/image_rect_color':
            img = bridge.imgmsg_to_cv(msg, "bgr8")
            img_name = "video_images/img_%06d.png" %count
            o.write('im,%d,%d,\n' % (t.secs,t.nsecs))
            count += 1
            cv.SaveImage(img_name, img)

        elif topic == '/camera/depth_registered/image_rect':
            img = bridge.imgmsg_to_cv(msg, "bgr8")
            img_name = "depth_images/img_%06d.png" %count
            #o.write('de,%d,%d,\n' % (t.secs,t.nsecs))
            #count += 1
            cv.SaveImage(img_name, img)
class PersonExtracter():
    def __init__(self, image_topic, labels_topic):

        self.bridge = CvBridge()

        # Synchronized subscribers
        sub_rgb = message_filters.Subscriber(image_topic, Image)
        sub_labels = message_filters.Subscriber(labels_topic, Image)

        ts = message_filters.TimeSynchronizer([sub_rgb, sub_labels], 1)
        ts.registerCallback(self.callback)

    def callback(self, image, labels):
        # Prep the images
        try:
            image_cv = self.bridge.imgmsg_to_cv(image)  # to CvMat
            labels_cv = self.bridge.imgmsg_to_cv(labels)

            image_np = numpy.asarray(image_cv)  # to numpy
            labels_np = numpy.asarray(labels_cv)
        except CvBridgeError, e:
            print e

        # Apply the mask
        mask = numpy.logical_not(labels_np)  # "True" where there are NOT people
        for channel in range(3):  # R, G, and B
            image_np[:, :, channel] *= mask;

        # in case we need to publish it
        try:
            image = self.bridge.cv2_to_imgmsg(image_np)
        except CvBridgeError, e:
            print e
Ejemplo n.º 5
0
def handle_detector_cb(req):
	vws = read_vws(req.geo_vw_sets)

	bridge = CvBridge()
	try:
		cv_image = bridge.imgmsg_to_cv(req.image, "bgr8")
		cv_mask  = bridge.imgmsg_to_cv(req.mask, "bgr8")
	except CvBridgeError, e:
		print e
class anaglyph:

	def __init__(self):
		self.pub = rospy.Publisher('anaglyph_img', Image)
		self.sub_depth = rospy.Subscriber('/kinect/depth/image_raw', Image, self.cb_depth)
		self.sub_rgb = rospy.Subscriber('/kinect/rgb/image_raw', Image, self.cb_rgb)

		self.bridge = CvBridge()

		self.img_out = cv.CreateMat(480, 640, cv.CV_8UC3)
		self.img_depth = cv.CreateMat(480, 640, cv.CV_8UC1)
		self.img_r = cv.CreateMat(480, 640, cv.CV_8UC1)
		self.img_g = cv.CreateMat(480, 640, cv.CV_8UC1)
		self.img_b = cv.CreateMat(480, 640, cv.CV_8UC1)
		self.img_rgb = cv.CreateMat(480, 640, cv.CV_8UC3)

		self.r_offset = cv.CreateMat(480, 640, cv.CV_8UC1)
		self.g_offset = cv.CreateMat(480, 640, cv.CV_8UC1)
		self.b_offset = cv.CreateMat(480, 640, cv.CV_8UC1)


	def cb_depth(self, data):
		self.img_depth = self.bridge.imgmsg_to_cv(data)

	def cb_rgb(self, data):
		rgb = self.bridge.imgmsg_to_cv(data)
		cv.MixChannels([rgb], [self.img_b, self.img_g, self.img_r], [(0,0), (1,1), (2,2)])	

		self.generate_output()

	def generate_output(self):
		cv.Copy(self.img_r, self.r_offset)
		cv.Copy(self.img_b, self.b_offset)
		cv.Copy(self.img_g, self.g_offset)
		
		for i in range (self.img_r.height-1):
			for j in range (self.img_r.width-1):
				if self.img_depth[i,j] < 255 and self.img_depth[i,j] >= 0:
					disp = int(round(self.img_depth[i,j]/5))
					print disp
					if j-disp > 0 and j-disp < self.img_r.width-1:

						self.r_offset[i,j] = self.img_r[i, j-disp]
						#self.r_offset[i,j-disp] = self.img_r[i, j]
						#self.b_offset[i,j-disp] = self.img_b[i, j]
						self.b_offset[i,j] = self.img_b[i, j-disp]
					if j+disp > 0 and j+disp < self.img_b.width-1:	
						#self.g_offset[i,j+disp] = self.img_g[i, j]
						self.g_offset[i,j] = self.img_g[i, j+disp]

		cv.MixChannels([self.r_offset, self.g_offset, self.b_offset], [self.img_out], [(0,0), (1,1), (2,2)])
		#cv.MixChannels([r_offset, self.img_g, self.img_b], [self.img_out], [(0,0), (1,1), (2,2)])
		img = self.bridge.cv_to_imgmsg(self.img_out)
		self.pub.publish(img)	
Ejemplo n.º 7
0
class ROSStereoListener:
    def __init__(self, topics, rate=30.0, name='stereo_listener'):
        self.listener = ru.GenericListener(name, [Image, Image], topics, rate)
        self.lbridge = CvBridge()
        self.rbridge = CvBridge()

    def next(self):
        lros, rros =  self.listener.read(allow_duplication=False, willing_to_wait=True, warn=False, quiet=True)
        lcv = cv.CloneMat(self.lbridge.imgmsg_to_cv(lros, 'bgr8'))
        rcv = cv.CloneMat(self.rbridge.imgmsg_to_cv(rros, 'bgr8'))
        return lcv, rcv
Ejemplo n.º 8
0
class HoleFiller():
    def __init__(self, topic_base, topic_mask, topic_out):
        self.bridge = CvBridge()
        self.image_out = Image()
        self.have_mask = False
        self.need_to_publish = False
        rospy.Subscriber(topic_base, Image, self.base_callback)
        rospy.Subscriber(topic_mask, Image, self.mask_callback)
        self.pub = rospy.Publisher(topic_out, Image)

        self.publish()

    def mask_callback(self, mask):
        image_cv = self.bridge.imgmsg_to_cv(mask, 'bgr8')
        image_cv2 = numpy.array(image_cv, dtype=numpy.uint8)
        image_cv2 = image_cv2[:, :, 0]
        
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (50, 50))
        image_filled = cv2.morphologyEx(image_cv2, cv2.MORPH_CLOSE, kernel, borderValue=255)

        image_filled.astype(bool)
        image_filled = numpy.logical_not(image_filled)
        self.mask = image_filled

        self.have_mask = True


    def base_callback(self, image_in):
        if self.have_mask:
            image_cv = self.bridge.imgmsg_to_cv(image_in, 'bgr8')
            image_cv2 = numpy.array(image_cv, dtype=numpy.uint8)
            
            # Filter
            for dim in range(3): image_cv2[:, :, dim] *= self.mask    # take out filtered pixels
            image_cv2[:, :, 1] += numpy.logical_not(self.mask)*255  # turn 'em green
            
            image_filled_cv = cv.fromarray(image_cv2)
            self.image_out = self.bridge.cv_to_imgmsg(image_filled_cv, 'bgr8')
            self.need_to_publish = True

    def publish(self):
        r = rospy.Rate(5)
        while not rospy.is_shutdown():
            if self.need_to_publish:
                self.pub.publish(self.image_out)
                self.need_to_publish = False
                
            r.sleep()
Ejemplo n.º 9
0
class main_node:
    def __init__(self):
        rospy.init_node('openCVodometry')

        self.cv_window_name = "OpenCV Odometry"

        cv.NamedWindow(self.cv_window_name, 1)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/camera/rgb/image_color",\
        ImageMsg, self.callback)

        root = Tk()
        app = TKWindow(master=root)
        try:
            app.mainloop()
            root.destroy()
        except KeyboardInterrupt:
            print "Shutting node."
            cv.DestroyAllWindows()

    def callback(self, data):
        try:
          cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
        except CvBridgeError, e:
          print e

        cv.ShowImage(self.cv_window_name, cv_image)
        cv.WaitKey(3)
Ejemplo n.º 10
0
class ImageDisplay:

    def __init__(self):
        self.image_sub = rospy.Subscriber("UndistortedImage", Image, self.image_callback)

        cv.NamedWindow("Display",1)
        self.bridge = CvBridge()

        self.color_max = 255
        self.font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX,0.5,0.5)
        self.images_initialized = False

    def initialize_images(self,cv_image):
        self.im_display = cv.CreateImage(cv.GetSize(cv_image),cv.IPL_DEPTH_8U,3)
        self.images_initialized = True

    def image_callback(self,data):
        # Convert ROS image to OpenCV image
        try:
          cv_image = cv.GetImage(self.bridge.imgmsg_to_cv(data, "passthrough"))
        except CvBridgeError, e:
          print e

        if not self.images_initialized:
            self.initialize_images(cv_image)

        cv.CvtColor(cv_image,self.im_display,cv.CV_GRAY2RGB)

        cv.ShowImage("Display", self.im_display)
        cv.WaitKey(3)
Ejemplo n.º 11
0
class DetectObjects():
    def __init__(self):
        self.bridge = CvBridge()

        self.fix_count = 0

        self.angle = None

        # init camera
        rospy.Subscriber("/camera/rgb/image_rect_color", Image, self.cb_image)

        
    def cb_image(self, msg):
        try:
            frame = self.bridge.imgmsg_to_cv(msg, "bgr8")
            frame = np.array(frame, dtype=np.uint8)
            self.process_image(frame)
            key = cv.WaitKey(5)
            #print key
            if key == ord('r'):
                # restart
                rospy.sleep(3)

        except CvBridgeError, e:
            print e
class ardroneTracker:

  def __init__(self, tracker):
    self.point_pub = rospy.Publisher("/ardrone_tracker/found_point", Point )

    self.bridge = CvBridge()
    self.image_sub = rospy.Subscriber("/ardrone/front/image_raw",Image,self.callback)
    self.image_pub = rospy.Publisher( "/ardrone_tracker/image", Image )

    self.tracker = tracker

  def callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
    except CvBridgeError, e:
      print e
  
    numpy_image = np.asarray( cv_image )
    trackData = self.tracker.track( numpy_image )
    if trackData:
      x,y,z = trackData[0],trackData[1],trackData[2]

      point = Point( x,y,z )
      self.point_pub.publish( point )
    else:
      self.point_pub.publish( Point( 0, 0, -1 ) )

    try:
      vis = self.tracker.get_vis()
      image_message = self.bridge.cv_to_imgmsg(cv2.cv.fromarray( vis ), encoding="passthrough")
      self.image_pub.publish( image_message )
    except CvBridgeError, e:
      print e
Ejemplo n.º 13
0
class roskoki:

  def __init__(self):
    self.image_pub = rospy.Publisher("image_topic_2",Image)

    cv.NamedWindow("Image window", 1)
    self.bridge = CvBridge()
    self.image_sub = rospy.Subscriber("image_topic",Image,self.callback)

  def callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
    except CvBridgeError, e:
      print e

    koki = PyKoki()

    params = CameraParams(Point2Df( cv_image.width/2, cv_image.height/2 ),
                                      Point2Df(571, 571),
                                      Point2Di( cv_image.width, cv_image.height ))

    print koki.find_markers( cv_image, 0.1, params )

    cv.ShowImage("Image window", cv_image)
    cv.WaitKey(3)

    try:
      self.image_pub.publish(self.bridge.cv_to_imgmsg(cv_image, "bgr8"))
    except CvBridgeError, e:
      print e
class ObserveDartboard(threading.Thread):

  BGsample = 30 # number of frames to gather BG samples (average) from at start of capture
  
   
  def __init__(self, camSource):
    threading.Thread.__init__(self)    
#    self.capture = cv2.VideoCapture(0)
#    self.ret, self.frame = self.capture.read()
#    width = self.capture.get(3)
#    height = self.capture.get(4)
#    print 'size = ' + str(width) + ' x ' + str(height)  
    self.bridge = CvBridge()
    self.frame = 0

  def receive_image_from_camera_callback(self, data):
    try:
      cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
      self.frame = cv_image
     
    except CvBridgeError, e:
      print e
      
#    cv.ShowImage("Image window", cv_image)
#    cv.WaitKey(3)
    print time.time()
class SkeletonSketcher():
    def __init__(self, image_topic, skeletons_topic):
        self.got_skeletons = False
        self.skeleton_array = SkeletonArray()
        self.bridge = CvBridge()
        rospy.Subscriber(skeletons_topic, SkeletonArray, self.skeletons_callback)
        rospy.Subscriber(image_topic, Image, self.image_callback)

    def skeletons_callback(self, skeleton_array):
        self.skeleton_array = skeleton_array
        self.got_skeletons = True
        rospy.loginfo('Got some skeletons!')

    def image_callback(self, image):
        """ Draws joints on image as dots. """
        skeleton_array = self.skeleton_array   # shorthand
        if self.got_skeletons:
            image_cv = self.bridge.imgmsg_to_cv(image)
            for s in range(len(skeleton_array.skeletons)):
                for j in range(len(skeleton_array.skeletons[s].joints)):
                    u = int(skeleton_array.skeletons[s].joints[j].uv[0])
                    v = int(skeleton_array.skeletons[s].joints[j].uv[1])
                    x = skeleton_array.skeletons[s].joints[j].xyz[0]
                    y = skeleton_array.skeletons[s].joints[j].xyz[1]
                    z = skeleton_array.skeletons[s].joints[j].xyz[2]
                    cv.Circle(image_cv, (u, v), 4, (0, 0, 255), -1)
                    print x, y, z, '-', u, v

            print ''
            cv.ShowImage('Image with Skeletons', image_cv)
            cv.WaitKey(5)
Ejemplo n.º 16
0
class ImageReducer:
#    # Convert a ROS Image to the Numpy matrix used by cv2 functions
#    def rosimg2cv(self, ros_image):
#        # Convert from ROS Image to old OpenCV image
#        frame = self.cvbridge.imgmsg_to_cv(ros_image, ros_image.encoding)
#        # Convert from old OpenCV image to Numpy matrix
#        return np.array(frame, dtype=np.uint8) #TODO: find out actual dtype


    def __init__(self, topics, factor):
        self.cvbridge = CvBridge()
        self.topics = topics
        self.factor = factor

        for topic in topics:
            outTopic = "/debug" + topic
            callback = self.reducerCallback(topic, outTopic)
            rospy.Subscriber(topic, Image, callback)


    # Returns a callback for a particular Image topic which reduces the image
    # and publishes it
    def reducerCallback(self, imageTopic, outTopic):
        publisher = rospy.Publisher(outTopic, Image)
        factor = self.factor
        def callback(rosImage):
            try:
                cvimg = self.cvbridge.imgmsg_to_cv(rosImage, desired_encoding="passthrough")
                outimg = cv2.cv.CreateMat(int(cvimg.rows * factor), int(cvimg.cols * factor), cvimg.type)
                cv2.cv.Resize(cvimg, outimg)
                publisher.publish(self.cvbridge.cv_to_imgmsg(outimg, encoding="bgr8")) #TODO: figure out actual encoding
            except CvBridgeError, e:
                print e

        return callback
Ejemplo n.º 17
0
class image_converter:

  def __init__(self):
    self.image_pub = rospy.Publisher("image_topic_2",Image)

    cv.NamedWindow("Image window", 1)
    self.bridge = CvBridge()
    self.image_sub = rospy.Subscriber("/camera/image_raw",Image,self.callback)

  def callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
    except CvBridgeError, e:
      print e

    (cols,rows) = cv.GetSize(cv_image)
    if cols > 60 and rows > 60 :
      cv.Circle(cv_image, (50,50), 10, 255)

    cv.ShowImage("Image window", cv_image)
    cv.WaitKey(3)

    try:
      self.image_pub.publish(self.bridge.cv_to_imgmsg(cv_image, "bgr8"))
    except CvBridgeError, e:
      print e
Ejemplo n.º 18
0
class DetectObjects():
    def __init__(self):
        self.bridge = CvBridge()
        self.fix_count = 0
        self.angle = None
        self.object_type = None

#        self.detector  = cv2.FastFeatureDetector(16, True) #cv2.SURF()
        
        # init camera
        rospy.Subscriber("/camera/rgb/image_rect_color", Image, self.cb_image)
        self.img_pub = rospy.Publisher("/vision/image", Image)
        self.pose_pub = rospy.Publisher("/vision/detected_object", PoseStampedLabeled)
        
    def cb_image(self, msg):
        try:
            frame = self.bridge.imgmsg_to_cv(msg, "bgr8")
            frame = np.array(frame, dtype=np.uint8)
            self.process_image(frame)
            key = cv.WaitKey(5)
            #print key
            if key == ord('r'):
                # restart
                rospy.sleep(3)

            if key >= 48 and key <= 57:
                self.object_type = OBJECTS[key - 48]
            else:
                self.object_type = None

        except CvBridgeError, e:
            print e
Ejemplo n.º 19
0
class imageAnalyzer(object):
    
  def __init__(self):
    self.bridge = CvBridge()
    
    # Estimated circle center point pulisher
    self.circleCenterPublisher = rospy.Publisher("est_ccPos",Pose2D) 
    
  def callback(self,data):
    try:
        img = self.bridge.imgmsg_to_cv(data, "mono8")
    except CvBridgeError, e:
        print e

    moments = cv.Moments(img)
    
    data = Pose2D()
    m00 = cv.GetSpatialMoment(moments, 0, 0)
    data.x = cv.GetSpatialMoment(moments, 1, 0) / m00
    data.y = cv.GetSpatialMoment(moments, 0, 1) / m00
    data.theta = 0.0
    
    self.circleCenterPublisher.publish(data)
    
    print data.x / 500
    print data.y / 500
Ejemplo n.º 20
0
class SnapshotFilter:

    ##   The constructor
    #    @param self The object pointer
    def __init__(self):
        input_topic = rospy.get_param("~input","defaultFilterInput")
        output_topic = rospy.get_param("~output","defaultFilterOutput")
        self.get_extended_params()
        self.name = rospy.get_name()
        self.bridge = CvBridge()
        self.input_topic = input_topic
        self.output_topic = output_topic
        self.output_pub = rospy.Publisher(self.output_topic,Snapshot)
        self.input_sub = rospy.Subscriber(self.input_topic,Snapshot,self.handle_input)
    
    def get_extended_params(self):
        #Do nothing
        return
    
    #Takes a snapshot as input, returns a filtered snapshot as output
    def handle_input(self,snapshot):
        try:
            input_cv_image = self.bridge.imgmsg_to_cv(snapshot.image, "bgr8")
            copy_input_cv_image = cv.CreateImage((input_cv_image.width,input_cv_image.height),8,3)
            cv.Copy(input_cv_image,copy_input_cv_image)
        except CvBridgeError, e:
            print e
        output_cv_image = self.image_filter(cv_image=input_cv_image,info=snapshot.info,copy=copy_input_cv_image)
        output_info = snapshot.info
        try:
            output_image = self.bridge.cv_to_imgmsg(output_cv_image, "bgr8")
        except CvBridgeError, e:
            print e
Ejemplo n.º 21
0
class Image_Converter:

  def __init__(self, camera=None):
  
    if camera is None:
        camera = ''
    self.image_source = camera + "/image_raw"
    self.pub_name = camera + "/image_float"
    self.bridge = CvBridge()  
    
    #self.np_image_pub = rospy.Publisher(self.pub_name, numpy_msg(Uint16s))
    self.float_image_pub = rospy.Publisher(self.pub_name, FloatArray)
    rospy.init_node('image_converter', anonymous=True)
    
    self.image_sub = rospy.Subscriber(self.image_source,Image,self.image_callback)
    
    

  def image_callback(self,data):
  
    try:
        cv_image = self.bridge.imgmsg_to_cv(data, "mono8")
        np_image =  numpy.asarray(cv_image, dtype=numpy.float32)
        msg = FloatArray()
        msg.shape = [cv_image.height, cv_image.width]
        msg.data = [i for i in np_image.reshape(cv_image.height*cv_image.width, 1)]
        self.float_image_pub.publish(msg)
          
    except CvBridgeError, e:
      print e
Ejemplo n.º 22
0
class StitchCapture(CaptureInterface):
    """
    Bag capture is not thread-safe.
    """     
    def start(self, pano_id, goal):
        """
        set up a new pano.
        """
        self.bridge = CvBridge()
        self._bag_capture = BagCapture()
        self._bag_capture.start(pano_id, goal)
        self.img_pub = rospy.Publisher('~stitch',Image)
        

    def __call__(self, image, camera_info):
        if self._bag_capture.n_captures < 1:
            self.setupCamera(camera_info)
            self.setupStitch()
        try:
            cv_image = self.bridge.imgmsg_to_cv(image, "rgb8")
                           
            pano_image = pcv.convertCvMat2Mat(cv_image)
            self.stitcher.addNewImage(pano_image)
        except CvBridgeError, e:
            print e
        #capture to bag for back up and later stitching        
        self._bag_capture(image,camera_info)
class image_shower:

	def __init__(self,argv,show):
		self.show = show
		
		self.image_sub_topic = "/prosilica/image_color";
		self.points_sub_topic = "/camera/depth_registered/points";
		
		cv.NamedWindow(window_name, 1)
		self.bridge = CvBridge()
		
		self.image_sub = rospy.Subscriber(self.image_sub_topic,Image,self.image_callback)
		
		self.image = None
		
		self.interval = rospy.Duration(3)
		self.last_image_time = None
	def image_callback(self,data):
		#print 'img'
		if self.last_image_time is None or (data.header.stamp - self.last_image_time > self.interval):
			stamp = data.header.stamp.to_time()
			self.image = data
			self.last_image_time = data.header.stamp
			print "got image  at %s  with size (%d,%d)" % (time.strftime("%Y_%m_%d_T%H_%M_%S",time.localtime(stamp)),self.image.width,self.image.height)
			self.image_pub.publish(self.image)
		if self.show:
			try:
				cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
				cv.ShowImage(window_name, cv_image)
				cv.WaitKey(3)
			except CvBridgeError, e:
				print e
Ejemplo n.º 24
0
class WebcamBridge:
    """
    class for moving webcam images from opencv to ros
    """
    def __init__(self):
        self.image_pub = rospy.Publisher("/tennisballs", Image)
        # self.circ_ctr_pub = rospy.Publisher("/circ_ctr", )
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/image_raw", Image, self.callback)       
        self.size = None
        rospy.sleep(1)
                  
    def callback(self, data):
        """When the camera publishes a new frame, it sends through the cv 
        processing, and publishes a new output image
        """
        print('In WebcamBridge.callback')
        # cv.WaitKey(3)
        try:
            raw = self.bridge.imgmsg_to_cv(data, 'bgr8')
            found = self.find_tennisball(raw)
            msg = self.bridge.cv_to_imgmsg(found, 'bgr8')
            self.image_pub.publish(msg)
        except CvBridgeError, e:
            print e
Ejemplo n.º 25
0
    def process_image(self, msg):
        print 'pi'
        bridge = CvBridge()
        # Use cv_bridge() to convert the ROS image to OpenCV format
        try:
            # The depth image is a single-channel float32 image
            depth_image = bridge.imgmsg_to_cv(msg, "32FC1")
            
            floor_distance = 0.25
            mid_row_idx = depth_image.height//2

            d = 4209183091839
            idx = 0
            for x in xrange(depth_image.width):
                for y in xrange(mid_row_idx-2, mid_row_idx + 2 ):
                    tmp_depth = depth_image[y, x]
                    if tmp_depth != 'nan' and tmp_depth < d:
                        d = tmp_depth 
                        idx = x


            # print depth_image.width, depth_image.height, depth_image[depth_image.height//2, idx]
            theta = self.angle_calculation(depth_image.width, idx)
            # print theta
            self.twist = self.evaluate_twist(theta, d)
        except CvBridgeError, e:
            print e
    def __init__(self, hmi_callback, im_server, image):
        self.hmi_callback = hmi_callback
        self.im_server = im_server
        # convert goal image to opencv
        self.frame_id = image.header.frame_id
        cv_bridge = CvBridge()
        self.image = cv_bridge.imgmsg_to_cv(image)

        rospy.loginfo("created BoundingBox with frame %s" % image.header.frame_id)
        
        # pixels-per-m in published markers
        self.ppm = rospy.get_param("~ppm", 100)

        # starting coordinates of ROI; need to be floats!
        # (This is arbitrary - just needs to be on top of the image)
        self.x1 = 0.25*self.image.cols/self.ppm
        self.x2 = 0.75*self.image.cols/self.ppm
        self.y1 = 0.25*self.image.rows/self.ppm
        self.y2 = 0.75*self.image.rows/self.ppm

        # used to store the result once it's been computed
        self.result = None

        self.add_image()
        self.add_bounding_box()
Ejemplo n.º 27
0
def convert(filename):
    '''
    Creates directory (if there are images to be saved) for filename and
    save any images that it finds in this bag file from the spec'd
    topic.

    Args:
        filename (str): Full system path of bag file.
    '''
    save_dir = filename.split('.bag')[-2] + '-images/'
    # print 'bag file:        ', filename
    # print 'saving images in:', save_dir

    # Use a CvBridge to convert ROS images to OpenCV images so they can
    # be saved.
    bridge = CvBridge()
    with rosbag.Bag(filename, 'r') as bag:
        idx = 1
        for topic, msg, t in bag.read_messages():
            if topic == "/head_mount_kinect/rgb/image_color":
                # Wait to create image dir until we know we have one.
                if not os.path.exists(save_dir):
                    print 'creating dir:    ', save_dir
                    os.makedirs(save_dir)
                image_name = save_dir + str(idx) + ".jpg"
                if not os.path.exists(image_name):
                    print 'saving:          ', image_name
                    cv_image = bridge.imgmsg_to_cv(msg, "bgr8")
                    cv.SaveImage(image_name, cv_image)
                else:
                    pass
                    # print 'skipping:        ', image_name
                idx += 1
Ejemplo n.º 28
0
class ColorViewer():
    def __init__(self, topic):
        self.bridge = CvBridge()
        rospy.Subscriber(topic, Image, self.image_callback)
        self.pub = rospy.Publisher(topic + '_viewer', Image)

    def image_callback(self, image_in):
        """ Get image to which we're subscribed. """
        image_cv = self.bridge.imgmsg_to_cv(image_in, 'bgr8')
        image_cv2 = numpy.array(image_cv, dtype=numpy.uint8)
        image_hsv = cv2.cvtColor(image_cv2, cv2.COLOR_BGR2HSV)
        image_hsv = cv2.blur(image_hsv, (3,3))

        h = [150, 480-150]  # max 480
        w = [200, 640-200]  # max 640

        cv2.rectangle(image_cv2, (w[0], h[0]), (w[1], h[1]), 255)
        
        selected = image_hsv[h[0]:h[1], w[0]:w[1], :]
        print selected.shape
        print numpy.amin(numpy.amin(selected, 0), 0)
        print numpy.mean(numpy.mean(selected, 0), 0)
        print numpy.amax(numpy.amax(selected, 0), 0)
        print ''

        # Convert back to ROS Image msg
        image_cv = cv.fromarray(image_cv2)
        image_out = self.bridge.cv_to_imgmsg(image_cv, 'bgr8')
        self.pub.publish(image_out)
Ejemplo n.º 29
0
def imgproc(d):
	bridge = CvBridge()
	cvimg = bridge.imgmsg_to_cv(d,"bgr8")
	npimg = numpy.asarray(cvimg)

	cv2.imshow("Output",npimg)
	cv2.waitKey(1)
class data_saver:

	def __init__(self,argv,show):
		self.show = show
		
		self.image_sub_topic = "/prosilica/image_color";
		self.points_sub_topic = "/camera/depth_registered/points";
		
		self.image_pub = rospy.Publisher("/google_goggles/input_image",Image)
		self.points_pub = rospy.Publisher("/google_goggles/input_points",PointCloud2)
		
		if self.show:
			cv.NamedWindow(window_name, 1)
		self.bridge = CvBridge()
		
		self.image_sub = rospy.Subscriber(self.image_sub_topic,Image,self.image_callback)
		self.points_sub = rospy.Subscriber(self.points_sub_topic,PointCloud2,self.points_callback)
		
		self.image = None
		self.points = None
		
		self.interval = rospy.Duration(3)
		self.last_image_time = None
		
		self.last_points_time = None
		
		self.name = "data"
		self.folder = "../data/bags"
		
		
		if len(argv) >= 1:
			self.name = argv[0]
			
		if len(argv) >= 2:
			self.interval = rospy.Duration(int(argv[1]))
		
		self.filename = self.folder + "/" + self.name + "_" + time.strftime("%Y_%m_%d_T%H_%M_%S",time.localtime(rospy.Time.now().to_time())) + ".bag";
		
		#self.bag = rosbag.Bag(self.filename, 'w')
		
		
		
		#print "Saving to %s" % self.filename
		print "prefix: %s, interval: %d" % (self.name,self.interval.to_sec())

	def image_callback(self,data):
		#print 'img'
		if self.last_image_time is None or (data.header.stamp - self.last_image_time > self.interval):
			stamp = data.header.stamp.to_time()
			self.image = data
			self.last_image_time = data.header.stamp
			print "got image  at %s  with size (%d,%d)" % (time.strftime("%Y_%m_%d_T%H_%M_%S",time.localtime(stamp)),self.image.width,self.image.height)
			self.image_pub.publish(self.image)
		if self.show:
			try:
				cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
				cv.ShowImage(window_name, cv_image)
				cv.WaitKey(3)
			except CvBridgeError, e:
				print e
Ejemplo n.º 31
0
class TowerDetectViewerServer:
    # name of tower
    TOWER_LOWEST = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.TOWER_LOWEST
    TOWER_MIDDLE = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.TOWER_MIDDLE
    TOWER_HIGHEST = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.TOWER_HIGHEST
    PLATE_SMALL = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.PLATE_SMALL
    PLATE_MIDDLE = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.PLATE_MIDDLE
    PLATE_LARGE = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.PLATE_LARGE
    PLATE_HEIGHT_LOWEST = 0
    PLATE_HEIGHT_MIDDLE = 1
    PLATE_HEIGHT_HIGHEST = 2
    ROBOT0_BASE_FRAME_ID = "/R1/L0"
    ROBOT1_BASE_FRAME_ID = "/R2/L0"

    def __init__(self):
        # initialize the position of the tower
        self.tower_position = {
            self.TOWER_LOWEST: Point(),
            self.TOWER_MIDDLE: Point(),
            self.TOWER_HIGHEST: Point()
        }
        self.radius = rospy.get_param("radius", 0.075)
        self.circle0 = Drawer3DCircle("/image_marker", 1, "/cluster00",
                                      self.radius, [1, 0, 0])
        self.circle1 = Drawer3DCircle("/image_marker", 2, "/cluster01",
                                      self.radius, [0, 1, 0])
        self.circle2 = Drawer3DCircle("/image_marker", 3, "/cluster02",
                                      self.radius, [0, 0, 1])
        self.circles = [self.circle0, self.circle1, self.circle2]
        # bgr
        self.color_indices = [[0, 0, 255], [0, 255, 0], [255, 0, 0]]
        self.cluster_num = -1
        self.circle0.advertise()
        self.circle1.advertise()
        self.circle2.advertise()
        self.bridge = CvBridge()
        self.state = State("/browser/state")
        self.tf_listener = tf.TransformListener()
        self.browser_click_sub = rospy.Subscriber("/browser/click", Point,
                                                  self.clickCB)
        self.browser_message_pub = rospy.Publisher("/browser/message", String)
        self.image_sub = rospy.Subscriber("/image_marked", Image, self.imageCB)
        self.cluster_num_sub = rospy.Subscriber(
            "/pcl_nodelet/clustering/cluster_num", Int32Stamped,
            self.clusterNumCB)
        self.check_circle_srv = rospy.Service("/browser/check_circle",
                                              CheckCircle, self.checkCircleCB)
        self.pickup_srv = rospy.Service("/browser/pickup", TowerPickUp,
                                        self.pickupCB)
        self.state.updateState(State.INITIAL)

        # waiting for ik server
        if rospy.get_param("~wait_robot_move_command", False):
            rospy.loginfo("waiting for robot server")
            rospy.wait_for_service("/browser/tower_robot_move_command")
        self.robot_command = rospy.ServiceProxy(
            "/browser/tower_robot_move_command", TowerRobotMoveCommand)
        rospy.loginfo("connected to robot_move server")

        # initialize the position of the towers from TL
        self.updateTowerPosition(self.TOWER_LOWEST)
        self.updateTowerPosition(self.TOWER_MIDDLE)
        self.updateTowerPosition(self.TOWER_HIGHEST)
        self.S_TOWER = self.TOWER_MIDDLE
        self.G_TOWER = None
        self.I_TOWER = None

    def towerNameToFrameId(self, tower_name):
        if tower_name == self.TOWER_LOWEST:
            return "/cluster02"
        elif tower_name == self.TOWER_MIDDLE:
            return "/cluster01"
        elif tower_name == self.TOWER_HIGHEST:
            return "/cluster00"
        else:
            raise Exception("unknown tower: %d" % (tower_name))

    def resolveTowerName(self, tower_id):
        if tower_id == self.TOWER_LOWEST:
            return "TOWER_LOWEST"
        elif tower_id == self.TOWER_MIDDLE:
            return "TOWER_MIDDLE"
        elif tower_id == self.TOWER_HIGHEST:
            return "TOWER_HIGHEST"
        else:
            raise Exception("unknown tower: %d" % (tower_id))

    def resolvePlateName(self, plate_id):
        if plate_id == self.PLATE_SMALL:
            return "PLATE_SMALL"
        elif plate_id == self.PLATE_MIDDLE:
            return "PLATE_MIDDLE"
        elif plate_id == self.PLATE_LARGE:
            return "PLATE_LARGE"
        else:
            raise Exception("unknown plate id: %d" % (plate_id))

    def resolvePlateHeightOffset(self, height_id):
        """
        return the offset of z-axis of `height_id'
        """
        return 0.0

    def resolvePlateHeight(self, height_id):
        if height_id == self.PLATE_HEIGHT_LOWEST:
            return "lowest"
        elif height_id == self.PLATE_HEIGHT_MIDDLE:
            return "middle"
        elif height_id == self.PLATE_HEIGHT_HIGHEST:
            return "highest"
        else:
            raise Exception("unknown plate height: %d" % (height_id))

    def robotBaseFrameId(self, index):  #index is 0 or 1
        if index == 0:
            return self.ROBOT0_BASE_FRAME_ID
        elif index == 1:
            return self.ROBOT1_BASE_FRAME_ID
        else:
            raise Exception("unknown index: %d" % (index))

    def updateTowerPosition(self, tower_name):
        frame_id = self.towerNameToFrameId(tower_name)
        rospy.loginfo("resolving %s" % (frame_id))
        try:
            (trans,
             rot) = self.tf_listener.lookupTransform("/origin", frame_id,
                                                     rospy.Time(0))
            rospy.loginfo("%s => %s: (%f, %f, %f)" %
                          ("/origin", frame_id, trans[0], trans[1], trans[2]))
            self.tower_position[tower_name].x = trans[0]
            self.tower_position[tower_name].y = trans[1]
            self.tower_position[tower_name].z = trans[2]
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            rospy.logerr("failed to lookup transform: %s => %s" %
                         ("/origin", frame_id))

    def clusterNumCB(self, msg):
        self.cluster_num = msg.data

    def moveRobot(self, plate, from_tower, to_tower, from_height, to_height):
        robot_index = 0  #use the 1st robot first
        robot_frame_id = self.robotBaseFrameId(robot_index)
        rospy.loginfo(
            "moving: %s from %s(%s) to %s(%s)" %
            (self.resolvePlateName(plate), self.resolveTowerName(from_tower),
             self.resolvePlateHeight(from_height),
             self.resolveTowerName(to_tower),
             self.resolvePlateHeight(to_height)))
        from_target_position = self.tower_position[from_tower]
        to_target_position = self.tower_position[to_tower]
        self.robot_command(
            jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.ROBOT1,
            plate, from_tower, to_tower,
            jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.OPTION_NONE)
        # self.robot_server1(Header(), from_target_position, 0)
        # self.robot_server1(Header(), to_target_position, 1)
    def runMain(self):
        # first of all, resolve tf and store the position of the tower
        # but, we don't need to update `S' tower's position.
        # update the tf value
        self.updateTowerPosition(self.I_TOWER)
        self.updateTowerPosition(self.G_TOWER)
        self.state.updateState(State.MOVE_LARGE_S_G)
        self.moveRobot(self.PLATE_LARGE, self.S_TOWER, self.G_TOWER,
                       self.PLATE_HEIGHT_HIGHEST, self.PLATE_HEIGHT_LOWEST)

        self.state.updateState(State.MOVE_MIDDLE_S_I)
        self.moveRobot(self.PLATE_MIDDLE, self.S_TOWER, self.I_TOWER,
                       self.PLATE_HEIGHT_MIDDLE, self.PLATE_HEIGHT_LOWEST)

        self.state.updateState(State.MOVE_LARGE_G_I)
        self.moveRobot(self.PLATE_LARGE, self.G_TOWER, self.I_TOWER,
                       self.PLATE_HEIGHT_LOWEST, self.PLATE_HEIGHT_MIDDLE)

        self.state.updateState(State.MOVE_SMALL_S_G)
        self.moveRobot(self.PLATE_SMALL, self.S_TOWER, self.G_TOWER,
                       self.PLATE_HEIGHT_LOWEST, self.PLATE_HEIGHT_LOWEST)

        self.state.updateState(State.MOVE_LARGE_I_S)
        self.moveRobot(self.PLATE_LARGE, self.I_TOWER, self.S_TOWER,
                       self.PLATE_HEIGHT_MIDDLE, self.PLATE_HEIGHT_LOWEST)

        self.state.updateState(State.MOVE_MIDDLE_I_G)
        self.moveRobot(self.PLATE_MIDDLE, self.I_TOWER, self.G_TOWER,
                       self.PLATE_HEIGHT_LOWEST, self.PLATE_HEIGHT_MIDDLE)

        self.state.updateState(State.MOVE_LARGE_S_G)
        self.moveRobot(self.PLATE_LARGE, self.S_TOWER, self.G_TOWER,
                       self.PLATE_HEIGHT_LOWEST, self.PLATE_HEIGHT_HIGHEST)

    def pickupCB(self, req):
        target_index = req.tower_index
        # first of all, resolveing S, I and G name binding
        # S is the START tower
        # I is the INTERMEDIATE tower
        # G is the GOAL tower
        self.G_TOWER = req.tower_index
        # lookup I
        self.I_TOWER = (
            set([self.TOWER_LOWEST, self.TOWER_MIDDLE, self.TOWER_HIGHEST]) -
            set([self.G_TOWER, self.S_TOWER])).pop()

        # update the position of the tower
        self.state.updateState(State.MOVE_LARGE_S_G)
        self.state.publish()
        self.runMain()
        self.state.updateState(State.INITIAL)
        # update S
        self.S_TOWER = self.G_TOWER
        return TowerPickUpResponse()

    def checkCircleCB(self, req):
        (width, height) = cv.GetSize(self.cv_image)
        x = int(width * req.point.x)
        y = int(height * req.point.y)
        click_index = -1
        if self.checkColor(self.cv_image[y, x], self.color_indices[0]):
            click_index = self.TOWER_HIGHEST
        elif self.checkColor(self.cv_image[y, x], self.color_indices[1]):
            click_index = self.TOWER_MIDDLE
        elif self.checkColor(self.cv_image[y, x], self.color_indices[2]):
            click_index = self.TOWER_LOWEST
        if click_index == self.S_TOWER:
            msg = "the tower the user clicked equals to the start tower"
            rospy.logerr(msg)
            return CheckCircleResponse(False, click_index, msg)
        else:
            return CheckCircleResponse(click_index != -1, click_index, "")

    def checkColor(self, image_color, array_color):
        return (image_color[0] == array_color[0]
                and image_color[1] == array_color[1]
                and image_color[2] == array_color[2])

    def clickCB(self, msg):
        (width, height) = cv.GetSize(self.cv_image)
        # msg.x and msg.y is on a relative coordinate (u, v)
        x = int(width * msg.x)
        y = int(height * msg.y)
        output_str = str([x, y]) + " - " + str(self.cv_image[y, x])
        click_index = -1
        if self.checkColor(self.cv_image[y, x], self.color_indices[0]):
            output_str = output_str + " cluster00 clicked"
            click_index = self.TOWER_HIGHEST
        elif self.checkColor(self.cv_image[y, x], self.color_indices[1]):
            output_str = output_str + " cluster01 clicked"
            click_index = self.TOWER_MIDDLE
        elif self.checkColor(self.cv_image[y, x], self.color_indices[2]):
            output_str = output_str + " cluster02 clicked"
            click_index = self.TOWER_LOWEST
        self.browser_message_pub.publish(String(output_str))

    def imageCB(self, data):
        try:
            self.cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
        except CvBridgeError, e:
            print e
Ejemplo n.º 32
0
class CalibrateRobotCamera(metaclass.AutoReloader):
    class CalibrationError(Exception):
        def __init__(self, value, action=None):
            self.value = value
            self.action = action

        def __str__(self):
            return repr(self.value) + '; action=' + str(self.action)

    def __init__(self,
                 image_raw='image_raw',
                 pattern=None,
                 KK=None,
                 kc=None,
                 timeout=4.0):
        self.image_raw = image_raw
        self.bridge = CvBridge()
        self.pattern = pattern
        px = self.pattern['corners_x'] * self.pattern['spacing_x']
        py = self.pattern['corners_y'] * self.pattern['spacing_y']
        self.pattern['type'] = """<KinBody name="calibration">
  <Body name="calibration">
    <Geom type="box">
      <extents>%f %f 0.001</extents>
      <translation>%f %f 0</translation>
      <diffusecolor>0 0.5 0</diffusecolor>
    </Geom>
    <Geom type="box">
      <extents>%f %f 0.002</extents>
      <translation>%f %f 0</translation>
      <diffusecolor>0 1 0</diffusecolor>
    </Geom>
  </Body>
</KinBody>
""" % (px * 0.5 + 2.0 * self.pattern['spacing_x'],
        py * 0.5 + 2.0 * self.pattern['spacing_y'], px * 0.5, py * 0.5,
        px * 0.5, py * 0.5, px * 0.5, py * 0.5)
        self.obstaclexml = """<KinBody name="obstacle">
  <Body name="obstacle">
    <Geom type="box">
      <extents>%f %f 0.001</extents>
      <translation>0 0 0.001</translation>
      <diffusecolor>1 0 0</diffusecolor>
    </Geom>
  </Body>
</KinBody>""" % (px + 0.1, py + 0.1)
        self.timeout = timeout
        self.cvKK = None if KK is None else cv.fromarray(KK)
        self.cvkc = None if kc is None else cv.fromarray(kc)

    def detect(self, cvimage):
        corners_x = self.pattern['corners_x']
        corners_y = self.pattern['corners_y']
        found, corners = cv.FindChessboardCorners(
            cvimage, (corners_x, corners_y), cv.CV_CALIB_CB_ADAPTIVE_THRESH)
        if found:
            board_corners = (corners[0], corners[corners_x - 1],
                             corners[(corners_y - 1) * corners_x],
                             corners[len(corners) - 1])
            #find the perimeter of the checkerboard
            perimeter = 0.0
            for i in range(len(board_corners)):
                next = (i + 1) % 4
            xdiff = board_corners[i][0] - board_corners[next][0]
            ydiff = board_corners[i][1] - board_corners[next][1]
            perimeter += math.sqrt(xdiff * xdiff + ydiff * ydiff)
            #estimate the square size in pixels
            square_size = perimeter / ((corners_x - 1 + corners_y - 1) * 2)
            radius = int(square_size * 0.5 + 0.5)
            corners = array(
                cv.FindCornerSubPix(
                    cvimage, corners, (radius, radius), (-1, -1),
                    (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 30, 0.01)),
                float64)
            return corners
        else:
            return None

    def getObjectPoints(self):
        object_points = zeros(
            (self.pattern['corners_x'] * self.pattern['corners_y'], 3),
            float64)
        X, Y = meshgrid(
            arange(self.pattern['corners_x']) * self.pattern['spacing_x'],
            arange(self.pattern['corners_y']) * self.pattern['spacing_y'])
        object_points[:, 0] = X.flatten()
        object_points[:, 1] = Y.flatten()
        return object_points

    def calibrateIntrinsicCamera(self,
                                 all_object_points,
                                 all_corners,
                                 imagesize,
                                 usenonlinoptim=True,
                                 fixprincipalpoint=False,
                                 computegradients=False):
        pointCounts = vstack(
            [len(object_points) for object_points in all_object_points])
        cvKK = cv.CreateMat(3, 3, cv.CV_64F)
        cvkc = cv.CreateMat(5, 1, cv.CV_64F)
        cvrvecs = cv.CreateMat(len(pointCounts), 3, cv.CV_64F)
        cvtvecs = cv.CreateMat(len(pointCounts), 3, cv.CV_64F)
        flags = cv.CV_CALIB_FIX_PRINCIPAL_POINT if fixprincipalpoint else 0
        cv.CalibrateCamera2(cv.fromarray(vstack(all_object_points)),
                            cv.fromarray(vstack(all_corners)),
                            cv.fromarray(pointCounts), (1024, 768), cvKK, cvkc,
                            cvrvecs, cvtvecs, flags)
        rvecs = array(cvrvecs)
        tvecs = array(cvtvecs)
        KK = array(cvKK)
        kc = array(cvkc)
        Ts = []
        for i in range(len(pointCounts)):
            T = matrixFromAxisAngle(rvecs[i])
            T[0:3, 3] = tvecs[i]
            Ts.append(T)
        error = None
        if usenonlinoptim:
            # for some reason, the opencv solver is not doing as good of a job as it can... (it also uses levmarq)
            x0 = r_[KK[0, 0], KK[1, 1]]
            if not fixprincipalpoint:
                x0 = r_[x0, KK[0, 2], KK[1, 2]]
            x0 = r_[x0, kc[:, 0]]
            for i in range(len(pointCounts)):
                x0 = r_[x0, rvecs[i], tvecs[i]]
            N = pointCounts[0]
            cv_image_points = cv.CreateMat(N, 2, cv.CV_64F)
            cv_object_points = [cv.fromarray(x) for x in all_object_points]

            def errorfn(x):
                xoff = 2
                cvKK[0, 0] = x[0]
                cvKK[1, 1] = x[1]
                if not fixprincipalpoint:
                    cvKK[0, 2] = x[2]
                    cvKK[1, 2] = x[3]
                    xoff += 2
                for i in range(5):
                    cvkc[i, 0] = x[xoff + i]
                xoff += 5
                e = zeros(len(all_object_points) * N * 2)
                off = 0
                for i in range(len(all_object_points)):
                    for j in range(3):
                        cvrvecs[0, j] = x[xoff + 6 * i + j]
                        cvtvecs[0, j] = x[xoff + 6 * i + 3 + j]
                    cv.ProjectPoints2(cv_object_points[i], cvrvecs[0],
                                      cvtvecs[0], cvKK, cvkc, cv_image_points)
                    image_points = array(cv_image_points)
                    e[off:(off + len(image_points)
                           )] = all_corners[i][:, 0] - image_points[:, 0]
                    off += len(image_points)
                    e[off:(off + len(image_points)
                           )] = all_corners[i][:, 1] - image_points[:, 1]
                    off += len(image_points)
                #print 'rms: ',sqrt(sum(e**2))
                return e

            x, success = leastsq(errorfn, x0, maxfev=100000, epsfcn=1e-7)
            if not success:
                raise CalibrationError('failed to converge to answer')
            e = errorfn(x)
            abse = sqrt(sum(e**2))
            e2 = reshape(e, [len(all_object_points), 2 * N])**2
            error = mean(sqrt(e2[:, 0:N] + e2[:, N:]), 1)
            KK[0, 0] = x[0]
            KK[1, 1] = x[1]
            xoff = 2
            if not fixprincipalpoint:
                KK[0, 2] = x[2]
                KK[1, 2] = x[3]
                xoff += 2
            for i in range(5):
                kc[i, 0] = x[xoff + i]
            if computegradients:
                deltas = r_[0.01 * ones(2 if fixprincipalpoint else 4),
                            0.0001 * ones(5)]
                grad = []
                normalization = 1.0 / (len(all_object_points) * N * 2)
                for i, delta in enumerate(deltas):
                    x[i] += delta
                    e_p = errorfn(x)
                    abse_p = sqrt(sum(e_p**2))
                    x[i] -= 2 * delta
                    e_n = errorfn(x)
                    abse_n = sqrt(sum(e_n**2))
                    x[i] += delta
                    grad.append(
                        normalization * (abse_p + abse_n - 2 * abse) /
                        (delta**
                         2))  #sum((e_p-e)**2) + sum((e-e_n)**2))/(2.0*delta))
                return KK, kc, Ts, error, array(grad)
        return KK, kc, Ts, error

    def validateCalibrationData(self, all_object_points, all_corners):
        """Builds up linear equations using method of Zhang 1999 "Flexible Camera Calibration By Viewing a Plane From Unknown Orientations". The condition number of the linear equations can tell us how good the data is. The object data all needs to lie on the Z=0 plane (actually any plane would probably do?)
        """
        cvH = cv.fromarray(eye(3))
        B = zeros((6, 1))
        B[1, 0] = 1  # skewness is 0 constraint
        # for every observation, solve for the homography and build up a matrix to solve for the intrinsicparameters
        for corners, object_points in izip(all_corners, all_object_points):
            if not all(object_points[:, 2] == 0):
                raise ValueError(
                    'The object data all needs to lie on the Z=0 plane.')

            cv.FindHomography(cv.fromarray(object_points[:, 0:2]),
                              cv.fromarray(corners), cvH, 0)
            H = array(cvH)
            v = []
            for i, j in [(0, 1), (0, 0), (1, 1)]:
                v.append([
                    H[0, i] * H[0, j], H[0, i] * H[1, j] + H[1, i] * H[0, j],
                    H[1, i] * H[1, j], H[2, i] * H[0, j] + H[0, i] * H[2, j],
                    H[2, i] * H[1, j] + H[1, i] * H[2, j], H[2, i] * H[2, j]
                ])
            B = c_[B, array(v[0]), array(v[1]) - array(v[2])]
        U, s, Vh = linalg.svd(dot(B, transpose(B)))
        b = U[:, -1]
        A = array([[b[0], b[1], b[3]], [b[1], b[2], b[4]], [b[3], b[4], b[5]]])
        # A has to be positive definite
        Aeigs = linalg.eigvals(A)
        if not all(sign(Aeigs) > 0) and not all(sign(Aeigs) < 0):
            return None
        #print 'eigenvalues: ',sqrt(s)
        print 'condition is: ', sqrt(s[-2] / s[-1])
        if False:
            # compute the intrinsic parameters K = [alpha c u0; 0 beta v0; 0 0 1]
            b = U[:, -1]
            v0 = (b[1] * b[3] - b[0] * b[4]) / (b[0] * b[2] - b[1] * b[1])
            lm = b[5] - (b[3] * b[3] + v0 * (b[1] * b[3] - b[0] * b[4])) / b[0]
            alpha = sqrt(lm / b[0])
            beta = sqrt(lm * b[0] / (b[0] * b[2] - b[1] * b[1]))
            c = -b[1] * alpha * alpha * beta / lm
            u0 = c * v0 / alpha - b[3] * alpha * alpha / lm
            print alpha, beta, u0, v0
        return sqrt(s) / len(all_object_points)

    def waitForRobot(self, robot):
        # wait for robot to stop moving
        lastvalues = robot.GetJointValues()
        print 'waiting for robot to stop...'
        while True:
            time.sleep(0.5)  # sleep some time for better values difference
            newvalues = robot.GetJointValues()
            if sqrt(sum((lastvalues - newvalues)**2)) < 0.001:
                break
            lastvalues = newvalues
        time.sleep(0.5)  # stabilize camera images?

    def waitForPattern(self, timeout=None, waitforkey=False, robot=None):
        # wait for robot to stop moving
        if robot:
            self.waitForRobot(robot)
        if timeout is None:
            timeout = self.timeout
        timestart = rospy.get_rostime()
        donecond = threading.Condition()
        data = [None]

        def detcb(imagemsg):
            if imagemsg.header.stamp > timestart:
                cvimage = self.bridge.imgmsg_to_cv(imagemsg, 'mono8')
                corners = self.detect(cvimage)
                if corners is not None:
                    with donecond:
                        # get the pose with respect to the attached sensor tf frame
                        if self.cvKK is None or self.cvkc is None:
                            KK, kc, Ts, error = self.calibrateIntrinsicCamera(
                                [self.getObjectPoints()], [corners],
                                (cvimage.width, cvimage.height),
                                usenonlinoptim=False)
                            T = Ts[0]
                        else:
                            cvrvec = cv.CreateMat(1, 3, cv.CV_64F)
                            cvtvec = cv.CreateMat(1, 3, cv.CV_64F)
                            cv.FindExtrinsicCameraParams2(
                                cv.fromarray(self.getObjectPoints()),
                                cv.fromarray(corners), self.cvKK, self.cvkc,
                                cvrvec, cvtvec)
                            T = matrixFromAxisAngle(array(cvrvec)[0])
                            T[0:3, 3] = array(cvtvec)[0]
                        data[0] = {
                            'T': T,
                            'corners_raw': corners,
                            'image_raw': array(cvimage)
                        }
                        if 'type' in self.pattern:
                            data[0]['type'] = self.pattern['type']
                        donecond.notifyAll()

        image_sub = rospy.Subscriber(self.image_raw,
                                     sensor_msgs.msg.Image,
                                     detcb,
                                     queue_size=4)
        try:
            with donecond:
                donecond.wait(timeout)
                if data[0] is not None:
                    print 'found pattern'
        finally:
            image_sub.unregister()
        if waitforkey:
            cmd = raw_input('press any key to continue (q-quit)... ')
            if cmd == 'q':
                raise KeyboardInterrupt()
        return data[0]

    def waitForObjectDetectionMessage(self,
                                      timeout=None,
                                      waitforkey=False,
                                      robot=None):
        # wait for robot to stop moving
        if robot:
            self.waitForRobot(robot)
        if timeout is None:
            timeout = self.timeout
        timestart = rospy.get_rostime()
        donecond = threading.Condition()
        data = [None]

        def detcb(imagemsg, detmsg):
            if len(detmsg.objects) > 0 and detmsg.header.stamp > timestart:
                with donecond:
                    # get the pose with respect to the attached sensor tf frame
                    q = detmsg.objects[0].pose.orientation
                    t = detmsg.objects[0].pose.position
                    cvimage = self.bridge.imgmsg_to_cv(imagemsg)
                    data[0] = {
                        'T':
                        matrixFromPose([q.w, q.x, q.y, q.z, t.x, t.y, t.z]),
                        'type': detmsg.objects[0].type,
                        'image': array(cvimage)
                    }
                    donecond.notifyAll()

        image_sub = message_filters.Subscriber(self.image_raw,
                                               sensor_msgs.msg.Image)
        det_sub = message_filters.Subscriber(
            self.objectdetection, posedetection_msgs.msg.ObjectDetection)
        ts = message_filters.TimeSynchronizer([image_sub, det_sub], 10)
        ts.registerCallback(detcb)
        try:
            with donecond:
                donecond.wait(timeout)
        finally:
            del ts  # explicitly delete
        if waitforkey:
            cmd = raw_input('press any key to continue (q-quit)... ')
            if cmd == 'q':
                raise KeyboardInterrupt()
        return data[0]

    @staticmethod
    def moveRobot(robot, values):
        basemanip = BaseManipulation(robot)
        with robot:
            robot.SetActiveDOFs(arange(robot.GetDOF()))
            basemanip.MoveActiveJoints(values)
        while not robot.GetController().IsDone():
            time.sleep(0.01)

    def gatherCalibrationData(self,
                              robot,
                              sensorname,
                              waitforkey=False,
                              forcecalibintrinsic=False,
                              calibextrinsic=True,
                              maxconedist=0.1,
                              maxconeangle=0.6,
                              sensorrobot=None,
                              **kwargs):
        waitcond = lambda: self.waitForPattern(robot=robot,
                                               waitforkey=waitforkey)
        origvalues = None
        env = robot.GetEnv()
        obstacle = None
        observations = []
        try:
            if forcecalibintrinsic or (self.cvKK is None or self.cvkc is None):
                origvalues = robot.GetJointValues()
                averagedist = 0.03
                angledelta = 0.3
                while True:
                    observations += examples.calibrationviews.CalibrationViews(
                        robot=robot,
                        sensorrobot=sensorrobot,
                        sensorname=sensorname).computeAndMoveToObservations(
                            waitcond=waitcond,
                            usevisibility=False,
                            angledelta=angledelta,
                            maxconedist=maxconedist,
                            maxconeangle=maxconeangle,
                            averagedist=averagedist,
                            **kwargs)
                    pickle.dump(observations,
                                open('observations_initial.pp', 'w'))
                    try:
                        KKorig, kcorig, Tcamera, info = self.calibrateFromObservations(
                            observations, False, fixprincipalpoint=True)
                        break
                    except self.CalibrationError, e:
                        print 'cannot compute stable KK, attempting more views', e
                        self.moveRobot(robot, origvalues)
                        averagedist = 0.02
                        angledelta = 0.2
                print 'num observations is %d: ' % len(
                    observations), KKorig, kcorig
                self.cvKK = cv.fromarray(KKorig)
                self.cvkc = cv.fromarray(kcorig)
            if not calibextrinsic:
                return KKorig, kcorig, None, {'observations': observations}
            if origvalues is not None:
                print 'moving robot back to original values'
                self.moveRobot(robot, origvalues)

            # add an obstacle around the pattern
            if self.obstaclexml is not None:
                data = waitcond()
                obstacle = env.ReadKinBodyXMLData(self.obstaclexml)
                env.AddKinBody(obstacle, True)
                obstacle.SetTransform(
                    dot(robot.GetSensor(sensorname).GetTransform(), data['T']))
            newobservations, target = examples.calibrationviews.CalibrationViews.gatherCalibrationData(
                robot=robot,
                sensorrobot=sensorrobot,
                sensorname=sensorname,
                waitcond=waitcond,
                **kwargs)
            observations += newobservations
            KKorig, kcorig, Tcamera, info = self.calibrateFromObservations(
                observations,
                Tcameraestimate=array(
                    robot.GetSensor(sensorname).GetRelativeTransform(),
                    float64))
            info['observations'] = observations
            return KKorig, kcorig, Tcamera, info
        finally:
class DetectRV20(object):
    def __init__(self):
        self.bridge = CvBridge()
        self.depth_frame = None
        rospy.Subscriber("/softkinetic_camera/depth/image_raw", Image,
                         self.cb_depth)

        while self.depth_frame is None:
            if rospy.is_shutdown():
                return
            rospy.logwarn('RV20 waiting for first depth frame')
            rospy.sleep(0.1)
        rospy.Service("/vision/check_rv20", CheckRV20, self.cb_start_check)

    def cb_start_check(self, req):
        res = CheckRV20Response()
        rate = rospy.Rate(20)

        R20 = 0
        V20 = 0
        rospy.sleep(1.0)
        for i in xrange(NUM_FRAMES):
            while self.depth_frame is None:
                if rospy.is_shutdown():
                    return res
                rate.sleep()
            depth_frame = self.bridge.imgmsg_to_cv(self.depth_frame, "32FC1")

            self.depth_frame = None
            depth_frame = np.array(depth_frame, dtype=np.float32)
            tmp_img = np.copy(depth_frame)
            cv2.rectangle(tmp_img, (crop_x[0], crop_y[0]),
                          (crop_x[1], crop_y[1]), (255, 255, 255), 2)
            cv2.imshow('full image', tmp_img)
            depth_frame = depth_frame[crop_y[0]:crop_y[1], crop_x[0]:crop_x[1]]
            num_circles = self.process_image(depth_frame)
            if num_circles > 0:
                R20 += 1
            else:
                V20 += 1
        print R20, V20, 'R, V'
        print 'MIN_SEEN_CIRCLES' + str(MIN_SEEN_CIRCLES)
        if R20 > MIN_SEEN_CIRCLES:
            res.result = 'R20'
        else:
            res.result = 'V20'
        return res

    def cb_depth(self, msg):
        self.depth_frame = msg

    def process_image(self, depth_frame):

        filtered = self.process_depth(depth_frame)

        circles = cv2.HoughCircles(filtered,
                                   cv.CV_HOUGH_GRADIENT,
                                   1,
                                   20,
                                   param1=50,
                                   param2=30,
                                   minRadius=0,
                                   maxRadius=0)
        count = 0
        if circles is not None:
            circles = np.uint16(np.around(circles))
            if VISUALIZE:
                for i in circles[0, :]:
                    # draw the outer circle
                    cv2.circle(depth_frame, (i[0], i[1]), i[2], (0, 255, 0), 2)
                    # draw the center of the circle
                    cv2.circle(depth_frame, (i[0], i[1]), 2, (0, 0, 255), 3)
            count = len(circles[0, :])
        if VISUALIZE:
            cv2.imshow('depth', depth_frame)
            cv2.waitKey(5)
        return count

    def process_depth(self, depth_img):
        # # Filter NaN
        idx = np.isnan(depth_img)
        depth_img[idx] = 0
        # # Convert to UINT8 image
        #print np.min(depth_img), np.max(depth_img), np.mean(depth_img)
        depth_img = filter_low_high(depth_img, MIN, MAX)
        #print np.min(depth_img), np.max(depth_img), np.mean(depth_img)

        depth_img = depth_img / (MAX) * 255
        depth_img = np.uint8(depth_img)
        depth_img = cv2.medianBlur(depth_img, 9)

        frame_filter = cv2.adaptiveThreshold(
            depth_img,
            255,
            cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
            # cv2.ADAPTIVE_THRESH_MEAN_C,
            cv2.THRESH_BINARY,
            11,  # neighbourhood
            2)
        cv2.bitwise_not(frame_filter, frame_filter)
        kernel = np.ones((1, 1), 'uint8')
        frame_filter = cv2.dilate(frame_filter, kernel)

        return frame_filter
Ejemplo n.º 34
0
    def detect_and_draw(imgmsg):

        global skp, sd
        global im_ref
        #print 'number of KeyPoint objects skp', len(skp)

        br = CvBridge()
        temp = br.imgmsg_to_cv(imgmsg, "bgr8")

        im_height = temp.height
        im_length = temp.width

        cv.ShowImage("view", temp)

        cv.WaitKey(10)
        template = np.asarray(temp)

        tkp = detector.detect(template)
        tkp, td = descriptor.compute(template, tkp)

        #print 'number of KeyPoint objects tkp', len(tkp)
        #print 'number of KeyPoint objects skp', len(skp)

        flann_params = dict(algorithm=1, trees=4)
        flann = cv2.flann_Index(sd, flann_params)
        idx, dist = flann.knnSearch(td, 1, params={})
        del flann

        dist = dist[:, 0] / 2500.0
        dist = dist.reshape(-1, ).tolist()
        idx = idx.reshape(-1).tolist()
        indices = range(len(dist))
        indices.sort(key=lambda i: dist[i])
        dist = [dist[i] for i in indices]
        idx = [idx[i] for i in indices]

        skp_final = []
        for i, dis in itertools.izip(idx, dist):
            if dis < threshold:
                skp_final.append(skp[i])
            else:
                break

        tkp_final = []
        for i, dis in itertools.izip(range(len(idx)), dist):
            if dis < threshold:
                tkp_final.append(tkp[indices[i]])
            else:
                break

        h1, w1 = im_ref.shape[:2]
        h2, w2 = template.shape[:2]
        nWidth = w1 + w2
        nHeight = max(h1, h2)
        hdif = (h1 - h2) / 2
        newimg = np.zeros((nHeight, nWidth, 3), np.uint8)
        newimg[hdif:hdif + h2, :w2] = template
        newimg[:h1, w2:w1 + w2] = im_ref

        tkp_final
        skp_final

        #print 'number of KeyPoint objects in skp_final', len(skp_final)
        #print 'number of KeyPoint objects in tkp_final', len(tkp_final)

        for i in range(min(len(tkp), len(skp_final))):
            pt_a = (int(tkp_final[i].pt[0]), int(tkp_final[i].pt[1] + hdif))
            pt_b = (int(skp_final[i].pt[0] + w2), int(skp_final[i].pt[1]))

            cv2.circle(newimg, pt_a, int(tkp_final[i].size), (160, 32, 240), 1)
            cv2.circle(newimg, pt_b, int(skp_final[i].size), (160, 32, 240), 1)
            cv2.line(newimg, pt_a, pt_b, (255, 0, 0))

            cv.ShowImage("sift", cv.fromarray(newimg))

        kp_array = sift_keypoints_array()
        kp_array.skp = [sift_keypoint(*k.pt) for k in skp_final]
        kp_array.tkp = [sift_keypoint(*k.pt) for k in tkp_final]

        pk_pub.publish(kp_array)

        key = cv.WaitKey(10) & 0xFF

        if key == ord('d'):
            im_ref = template
            skp = tkp
            sd = td
Ejemplo n.º 35
0
class ROSProxy(object):
	def __init__(self):
		self.mans = {}
		self.mods = {}
		self.bridge = CvBridge()
		self.imgCls = self.msgClassFromTypeString('sensor_msgs/Image')

	def __GetTopics(self):
		return [x[0] for x in rospy.get_published_topics()]
	topics = property(fget=__GetTopics)

	def __GetServices(self):
		return rosservice.get_service_list()
	services = property(fget=__GetServices)

	def typeStringFromTopic(self, topic):
		try:
			return [x[1] for x in rospy.get_published_topics() if x[0] == topic][0]
		except:
			return None

	def typeStringFromService(self, service):
		try:
			return rosservice.get_service_type(service)
		except:
			return None

	def __classFromTypeString(self, typeString, subname):
		basemodule, itype = typeString.split('/')

		if not (basemodule in self.mans):
			try:
				roslib.load_manifest(basemodule)	
				self.mans[basemodule] = True;
			except:
				return None

		modname = basemodule + '.'+ subname + '._' + itype
		if not (modname in self.mods):
			try:
				mod = __import__(modname)
				self.mods['modname'] = mod
			except:
				return None

		mod = self.mods['modname']

		return getattr(getattr(getattr(mod,subname),'_' + itype), itype)

	def msgClassFromTypeString(self, typeString):
		return self.__classFromTypeString(typeString, 'msg')

	def srvClassFromTypeString(self, typeString):
		return self.__classFromTypeString(typeString, 'srv')

	def classFromTopic(self, topic):
		return self.msgClassFromTypeString(self.typeStringFromTopic(topic))

	def classFromService(self, service):
		return self.srvClassFromTypeString(self.typeStringFromService(service))

	def callService(self, service, arguments, callback=False, wait=True):
		def defCallback(x):
			pass

		if callback == False:
			callback = defCallback

		if wait:
			try:
				rospy.wait_for_service(service)
			except:
				callback(None)
		try:
			function = rospy.ServiceProxy(service, self.classFromService(service))
			response = apply(function, arguments)
			callback(response)
		except:
			callback(None)

	def generalize(self, inst, encoding='jpg', width=160, height=120, qual=30):
		if isinstance(inst, self.imgCls):
			cvImg = self.bridge.imgmsg_to_cv(inst,"rgb8")
			size = cv.GetSize(cvImg)
			img = Image.fromstring("RGB", size, cvImg.tostring())
			if width != size[0] or height != size[1]:
				img = img.resize((width,height),Image.NEAREST)
			buf = StringIO.StringIO()
			if encoding == 'png':
				img.save(buf, 'PNG', optimize=1)
			else:
				img.save(buf, 'JPEG', quality=qual)
			data = buf.getvalue()
			buf.close()
			return {'uri':'data:image/' + encoding + ';base64,' + standard_b64encode(data)}
		elif hasattr(inst,'__slots__'):
			obj = {}
			for field in inst.__slots__:
				obj[field] = self.generalize(getattr(inst,field), encoding, width, height, qual)
			return obj
		elif isinstance(inst,tuple) or isinstance(inst,list):
			return [self.generalize(x, encoding, width, height, qual) for x in inst]
		else:
			return inst

	braces = re.compile(r'\[[^\]]*\]') 
	atomics = ['bool', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'float32', 'float64', 'string']

	def specify(self, typeStr, obj):
		if isinstance(typeStr,list):
			lst = []
			for i in xrange(len(typeStr)):
				lst.append(self.specify(typeStr[i],obj[i]))
			return lst
		elif typeStr != self.braces.sub('',typeStr):
			return [self.specify(self.braces.sub('',typeStr), x) for x in obj]
		elif typeStr in self.atomics:
			if typeStr == 'string':
				return obj.encode('ascii','ignore')
			return obj
		elif typeStr == 'time' or typeStr == 'duration':
			inst = None
			if typeStr == 'time':
				inst = roslib.rostime.Time()
			else:
				inst = roslib.rostime.Duration()
			inst.nsecs = obj['nsecs']
			inst.secs = obj['secs']
			return inst
		else:
			cls = self.msgClassFromTypeString(typeStr)
			inst = cls()
			for i in xrange(len(cls._slot_types)):
				field = cls.__slots__[i]
				typ = cls._slot_types[i]
				if field in obj:
					setattr(inst,field,self.specify(typ,obj[field]))
			return inst
Ejemplo n.º 36
0
class image_converter:

	def __init__(self):
		self.image_pub = rospy.Publisher('/image_circle',Image)

		self.win = "Image window"
		cv.NamedWindow(self.win, 1)
#		cv.MoveWindow(self.win, 25, 800)
#		cv.ResizeWindow(self.win, 160, 120)
		self.bridge = CvBridge()
		self.image_sub = rospy.Subscriber('/camera/rgb/image_raw',Image,self.callback)
		feature_name = 'surf'
		self.detector, self.matcher = init_feature(feature_name)
		if self.detector != None:
			print 'using', feature_name
		else:
			print 'unknown feature:', feature_name
			sys.exit(1)

		self.eyeimg = cv2.imread('eye1.jpg', cv.CV_LOAD_IMAGE_COLOR)
		self.eye_kp, self.eye_desc = self.detector.detectAndCompute(self.eyeimg, None)

	def callback(self,data):
		try:
			cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
		except CvBridgeError, e:
			print e

#		cv2.imshow("eyeimg", self.eyeimg)
#		cv.ShowImage("cv_image", cv_image)
		cv_image = np.array(cv_image)
#		cv2.cvtColor(cv_image, cv.CV_BGR2GRAY)

		kp, desc = self.detector.detectAndCompute(cv_image, None)
		print 'eye - %d features, frame - %d features' % (len(self.eye_kp), len(kp))

#		def match_and_draw(win):
		def match_and_draw():
			print 'matching...'
			raw_matches = self.matcher.knnMatch(self.eye_desc, trainDescriptors = desc, k = 2) #2
			p1, p2, kp_pairs = filter_matches(self.eye_kp, kp, raw_matches)
			print 'found %d' % len(p1)
			if len(p1) >= 4:
				H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
				print '%d / %d	inliers/matched' % (np.sum(status), len(status))
				vis = explore_match(self.eyeimg, cv_image, kp_pairs, status, H)

				new_image = self.convert_image(vis)
				try:
					self.image_pub.publish(self.bridge.cv_to_imgmsg(new_image, "bgr8"))		
				except CvBridgeError, e:
					print e

#				vis = explore_match(win, self.eyeimg, cv_image, kp_pairs, status, H)
#				while True:
#					c = cv.WaitKey(7) % 0x100
#					if c == 27:
#						break
#				print 'Continue...'
#			else:
#				H, status = None, None
				print '%d matches found, not enough for homography estimation' % len(p1)
Ejemplo n.º 37
0
class ROS2OpenCV2(object):
    def __init__(self, node_name):
        self.node_name = node_name

        rospy.init_node(node_name)
        rospy.loginfo("Starting node " + str(node_name))

        rospy.on_shutdown(self.cleanup)

        # A number of parameters to determine what gets displayed on the
        # screen. These can be overridden the appropriate launch file
        self.show_text = rospy.get_param("~show_text", True)
        self.show_features = rospy.get_param("~show_features", True)
        self.show_boxes = rospy.get_param("~show_boxes", True)
        self.flip_image = rospy.get_param("~flip_image", False)
        self.feature_size = rospy.get_param("~feature_size", False)

        # Initialize the Region of Interest and its publisher
        self.ROI = RegionOfInterest()
        self.roi_pub = rospy.Publisher("/roi", RegionOfInterest)

        # Initialize a number of global variables
        self.frame = None
        self.frame_size = None
        self.frame_width = None
        self.frame_height = None
        self.depth_image = None
        self.marker_image = None
        self.display_image = None
        self.grey = None
        self.prev_grey = None
        self.selected_point = None
        self.selection = None
        self.drag_start = None
        self.keystroke = None
        self.detect_box = None
        self.track_box = None
        self.display_box = None
        self.keep_marker_history = False
        self.night_mode = False
        self.auto_face_tracking = False
        self.cps = 0  # Cycles per second = number of processing loops per second.
        self.cps_values = list()
        self.cps_n_values = 20
        self.busy = False
        self.resize_window_width = 0
        self.resize_window_height = 0

        # Create the main display window
        self.cv_window_name = self.node_name
        cv.NamedWindow(self.cv_window_name, cv.CV_WINDOW_NORMAL)
        if self.resize_window_height > 0 and self.resize_window_width > 0:
            cv.ResizeWindow(self.cv_window_name, self.resize_window_width,
                            self.resize_window_height)
        else:
            cv.ResizeWindow(self.cv_window_name, 640, 480)

        # Create the cv_bridge object
        self.bridge = CvBridge()

        # Set a call back on mouse clicks on the image window
        cv.SetMouseCallback(self.node_name, self.on_mouse_click, None)

        # Subscribe to the image and depth topics and set the appropriate callbacks
        # The image topic names can be remapped in the appropriate launch file
        self.image_sub = rospy.Subscriber("input_rgb_image", Image,
                                          self.image_callback)
        self.depth_sub = rospy.Subscriber("input_depth_image", Image,
                                          self.depth_callback)

    def on_mouse_click(self, event, x, y, flags, param):
        # This function allows the user to selection a ROI using the mouse
        if self.frame is None:
            return

        if event == cv.CV_EVENT_LBUTTONDOWN and not self.drag_start:
            self.features = []
            self.track_box = None
            self.detect_box = None
            self.selected_point = (x, y)
            self.drag_start = (x, y)

        if event == cv.CV_EVENT_LBUTTONUP:
            self.drag_start = None
            self.classifier_initialized = False
            self.detect_box = self.selection

        if self.drag_start:
            xmin = max(0, min(x, self.drag_start[0]))
            ymin = max(0, min(y, self.drag_start[1]))
            xmax = min(self.frame_width, max(x, self.drag_start[0]))
            ymax = min(self.frame_height, max(y, self.drag_start[1]))
            self.selection = (xmin, ymin, xmax - xmin, ymax - ymin)

    def image_callback(self, data):
        # Store the image header in a global variable
        self.image_header = data.header

        # Time this loop to get cycles per second
        start = time.time()

        # Convert the ROS image to OpenCV format using a cv_bridge helper function
        frame = self.convert_image(data)

        # Some webcams invert the image
        if self.flip_image:
            frame = cv2.flip(frame, 0)

        # Store the frame width and height in a pair of global variables
        if self.frame_width is None:
            self.frame_size = (frame.shape[1], frame.shape[0])
            self.frame_width, self.frame_height = self.frame_size

        # Create the marker image we will use for display purposes
        if self.marker_image is None:
            self.marker_image = np.zeros_like(frame)

        # Copy the current frame to the global image in case we need it elsewhere
        self.frame = frame.copy()

        # Reset the marker image if we're not displaying the history
        if not self.keep_marker_history:
            self.marker_image = np.zeros_like(self.marker_image)

        # Process the image to detect and track objects or features
        processed_image = self.process_image(frame)

        # If the result is a greyscale image, convert to 3-channel for display purposes """
        #if processed_image.channels == 1:
        #cv.CvtColor(processed_image, self.processed_image, cv.CV_GRAY2BGR)
        #else:

        # Make a global copy
        self.processed_image = processed_image.copy()

        # Display the user-selection rectangle or point
        self.display_selection()

        # Night mode: only display the markers
        if self.night_mode:
            self.processed_image = np.zeros_like(self.processed_image)

        # Merge the processed image and the marker image
        self.display_image = cv2.bitwise_or(self.processed_image,
                                            self.marker_image)

        # If we have a track box, then display it.  The track box can be either a regular
        # cvRect or a rotated Rect.
        if self.track_box is not None and self.is_rect_nonzero(self.track_box):
            try:
                (center, size, angle) = self.track_box
                pt1 = (int(center[0] - size[0] / 2),
                       int(center[1] - size[1] / 2))
                pt2 = (int(center[0] + size[0] / 2),
                       int(center[1] + size[1] / 2))
                if self.show_boxes:
                    #cv.EllipseBox(cv.fromarray(self.display_image), self.track_box, cv.CV_RGB(50, 255, 50), self.feature_size)
                    cv2.rectangle(self.display_image, pt1, pt2,
                                  cv.RGB(50, 255, 50), self.feature_size, 8, 0)
            except:
                try:
                    x, y, w, h = self.track_box
                    size = w, h
                    center = x + w / 2, y + h / 2
                    pt1 = (int(center[0] - size[0] / 2),
                           int(center[1] - size[1] / 2))
                    pt2 = (int(center[0] + size[0] / 2),
                           int(center[1] + size[1] / 2))
                    cv2.rectangle(self.display_image, pt1, pt2,
                                  cv.RGB(50, 255, 50), self.feature_size, 8, 0)
                except:
                    pass

        # Otherwise, if we have a detect box then display it
        elif self.detect_box is not None and self.is_rect_nonzero(
                self.detect_box):
            (pt1_x, pt1_y, w, h) = self.detect_box
            if self.show_boxes:
                cv2.rectangle(self.display_image,
                              (pt1_x, pt1_y), (pt1_x + w, pt1_y + h),
                              cv.RGB(50, 255, 50), self.feature_size, 8, 0)

        # Publish the ROI
        self.publish_roi()

        # Handle keyboard events
        self.keystroke = cv.WaitKey(5)

        # Compute the time for this loop and estimate CPS as a running average
        end = time.time()
        duration = end - start
        fps = int(1.0 / duration)
        self.cps_values.append(fps)
        if len(self.cps_values) > self.cps_n_values:
            self.cps_values.pop(0)
        self.cps = int(sum(self.cps_values) / len(self.cps_values))

        # Display CPS and image resolution if asked to
        if self.show_text:
            font_face = cv2.FONT_HERSHEY_SIMPLEX
            font_scale = 0.5
            """ Print cycles per second (CPS) and resolution (RES) at top of the image """
            if self.frame_size[0] >= 640:
                vstart = 25
                voffset = int(50 + self.frame_size[1] / 120.)
            elif self.frame_size[0] == 320:
                vstart = 15
                voffset = int(35 + self.frame_size[1] / 120.)
            else:
                vstart = 10
                voffset = int(20 + self.frame_size[1] / 120.)
            cv2.putText(self.display_image, "CPS: " + str(self.cps),
                        (10, vstart), font_face, font_scale,
                        cv.RGB(255, 255, 0))
            cv2.putText(
                self.display_image, "RES: " + str(self.frame_size[0]) + "X" +
                str(self.frame_size[1]), (10, voffset), font_face, font_scale,
                cv.RGB(255, 255, 0))

        # Update the image display
        cv2.imshow(self.node_name, self.display_image)

        # Process any keyboard commands
        if 32 <= self.keystroke and self.keystroke < 128:
            cc = chr(self.keystroke).lower()
            if cc == 'n':
                self.night_mode = not self.night_mode
            elif cc == 'f':
                self.show_features = not self.show_features
            elif cc == 'b':
                self.show_boxes = not self.show_boxes
            elif cc == 't':
                self.show_text = not self.show_text
            elif cc == 'q':
                # The has press the q key, so exit
                rospy.signal_shutdown("User hit q key to quit.")

    def depth_callback(self, data):
        # Convert the ROS image to OpenCV format using a cv_bridge helper function
        depth_image = self.convert_depth_image(data)

        # Some webcams invert the image
        if self.flip_image:
            depth_image = cv2.flip(depth_image, 0)

        # Process the depth image
        processed_depth_image = self.process_depth_image(depth_image)

        # Make global copies
        self.depth_image = depth_image.copy()
        self.processed_depth_image = processed_depth_image.copy()

    def convert_image(self, ros_image):
        # Use cv_bridge() to convert the ROS image to OpenCV format
        try:
            cv_image = self.bridge.imgmsg_to_cv(ros_image, "bgr8")
            return np.array(cv_image, dtype=np.uint8)
        except CvBridgeError, e:
            print e
            if depth_image.header.stamp - rgb_image_color.header.stamp > rospy.Duration.from_sec(1/30.0):
                continue
            
            frame += 1
            if frame % float(args.nth) == 0:
                if args.skip > 0:
                    args.skip -= 1
                else:
                    # store messages
                    outbag.write("/camera/depth_registered/camera_info",depth_camera_info,t)
                    outbag.write("/camera/depth_registered/image",depth_image,t)
                    outbag.write("/camera/rgb/camera_info",rgb_camera_info,t)
                    outbag.write("/camera/rgb/image_color",rgb_image_color,t)

                    # generate monochrome image from color image
                    cv_rgb_image_mono = bridge.imgmsg_to_cv(rgb_image_color, "mono8")
                    rgb_image_mono = bridge.cv_to_imgmsg(cv_rgb_image_mono)
                    rgb_image_mono.header = rgb_image_color.header
                    outbag.write("/camera/rgb/image_mono",rgb_image_mono,t)
    
                    # generate depth and colored point cloud
                    cv_depth_image = bridge.imgmsg_to_cv(depth_image, "passthrough")
                    cv_rgb_image_color = bridge.imgmsg_to_cv(rgb_image_color, "bgr8")

                    centerX = depth_camera_info.K[2]
                    centerY = depth_camera_info.K[5]
                    depthFocalLength = depth_camera_info.K[0]
                    depth_points = sensor_msgs.msg.PointCloud2()
                    depth_points.header = depth_image.header
                    depth_points.width = depth_image.width
                    depth_points.height  = depth_image.height
class bag2data:
    def __init__(self, filename):
        print filename
        try:
            self.bag = rosbag.Bag(filename, 'r')
        except rosbag.bag.ROSBagUnindexedException:
            print 't'
            self.bag = rosbag.Bag(filename, 'w', allow_unindexed=True)
            print 't2'
            self.bag.reindex()
            #			self.bag.close()
            #			self.bag = rosbag.Bag(filename, 'r', allow_unindexed=False)
            print 'Bag reindexed'

        self.pub_img = rospy.Publisher('img_out', Image)
        self.img = None

        self.bridge = CvBridge()

    def view_depth(self):
        print 'start'
        t0 = None
        for topic, msg, t in self.bag.read_messages():
            print t
            self.img = msg
            if self.img != None:
                self.pub_img.publish(self.img)
        print 'end'

    def convert(self):

        #Make a directory in .../kinect_tools/output/TIME to put the images
        out_dir_ext = time.ctime()  #name output folder by time
        output_dir = working_dir + 'kinect_tools/output/' + out_dir_ext + '/'
        os.mkdir(output_dir)
        self.output_dir = output_dir

        topic_list = []
        topic_frame = [0]
        i_frame = 0

        #Go through all of the messages in the bag
        for topic, msg, t in self.bag.read_messages():
            i_num = 0
            img_num = None

            #Check which image it is (ie rgb or depth). Title the image differently dependend on name. Output is .../kinect_tools/output/TIME/imgNUM_FRAME.jpg
            # NUM is the topic index and FRAME is the message number
            for i_topic in topic_list:
                if topic == i_topic:
                    img_num = i_num
                    i_frame = topic_frame[i_num]
                    break
                else:
                    i_num += 1

            if img_num == None:
                topic_list.append(topic)
                topic_frame.append(0)
                img_num = i_num
                i_frame = topic_frame[img_num]
                print topic_list

            try:
                self.img = np.array(self.bridge.imgmsg_to_cv(msg))
            except:
                print 'Recieved non-image message!'

            if self.img != None:
                filename = output_dir + 'img' + str(img_num) + '_' + str(
                    topic_frame[img_num]) + '.jpg'
                print filename
                try:
                    if img_num == 1:
                        self.img = self.img * 60
                    cv.SaveImage(filename, self.img)
                except:
                    'print error!!!!!!!!'
                self.pub_img.publish(self.bridge.cv_to_imgmsg(self.img))
                topic_frame[img_num] += 1
#				i_frame += 1
#				print 'Output frame: ' + str(i_frame-1)
#				print 'Output frame: ' + str(topic_frame[img_num])

#				time.sleep(1)

        print 'Done converting'

    def convert_3D(self):

        #Make a directory in .../kinect_tools/output/TIME to put the images
        out_dir_ext = time.ctime()  #name output folder by time
        output_dir = working_dir + 'kinect_tools/output/' + out_dir_ext + '/'
        os.mkdir(output_dir)
        self.output_dir = output_dir

        print "Output_dir: " + str(output_dir)

        msg_generator = self.bag.read_messages(
            topics=('/camera/depth/points2', '/camera/rgb/image_color'))

        color_index = 0
        depth_index = 0

        while 1:
            #			pdb.set_trace()
            #			try:
            topic, msg, t = msg_generator.next()
            print topic
            if topic == '/camera/depth/points2':
                self.save_depth_img(msg, depth_index)
                self.save_color_img(msg, color_index)
                depth_index += 1
            else:
                #					self.save_color_img(msg, color_index)
                color_index += 1
#			except:
#				print 'Done saving images'
#				break

    def save_depth_img(self, msg, img_num):
        img = np.empty((480, 640), dtype=np.float32)
        pts = []

        #		print msg.fields
        for p in point_cloud.read_points(msg):
            pts.append(p[2])

        img = np.reshape(pts, (480, 640))
        max_px = np.nanmax(img)
        min_px = np.nanmin(img)
        scale = int(255 / (max_px - min_px))
        filename = self.output_dir + 'img_depth_' + str(img_num) + '.jpg'
        cv.SaveImage(filename, img * scale)

    def save_color_img(self, msg, img_num):

        img = np.empty((480, 640), dtype=np.float32)
        pts = []

        #		print msg.fields
        for p in point_cloud.read_points(msg):
            #			print len(p)
            #			pts.append(p[3])
            x = 1
        print 0
        print p[3]
        x = struct.pack('f', p[3])
        print x

        img = np.reshape(pts, (480, 640))
        #		max_px = np.nanmax(img)
        #		min_px = np.nanmin(img)
        #		scale = int(255 / (max_px-min_px))
        filename = self.output_dir + 'img_color_' + str(img_num) + '.jpg'
        cv.SaveImage(filename, np.uint8(img))
Ejemplo n.º 40
0
class pr2WideDepth:
    def __init__(self, targetFrame=None, visual=False, tfListener=None):
        self.cloudTime = time.time()
        self.readTime = time.time()
        self.pointCloud = None
        self.visual = visual
        self.targetFrame = targetFrame
        self.updateNumber = 0

        self.points3D = None

        if tfListener is None:
            self.transformer = tf.TransformListener()
        else:
            self.transformer = tfListener

        self.bridge = CvBridge()
        self.imageData = None
        self.spoonImageData = None

        # RGB Camera
        self.rgbCameraFrame = None
        self.cameraWidth = None
        self.cameraHeight = None
        self.pinholeCamera = None

        # Gripper
        self.lGripperPosition = None
        self.lGripperRotation = None
        self.lGripperTransposeMatrix = None
        self.lGripX = None
        self.lGripY = None
        self.micLocation = None
        self.grips = []
        # Spoon
        self.spoonX = None
        self.spoonY = None
        self.spoon = None

        self.targetTrans = None
        self.targetRot = None
        self.gripperTrans = None
        self.gripperRot = None

        self.cloudSub = rospy.Subscriber('/wide_stereo/points2', PointCloud2,
                                         self.cloudCallback)
        print 'Connected to depth'
        self.imageSub = rospy.Subscriber('/wide_stereo/right/image_color',
                                         Image, self.imageCallback)
        print 'Connected to image'
        self.cameraSub = rospy.Subscriber('/wide_stereo/right/camera_info',
                                          CameraInfo,
                                          self.cameraRGBInfoCallback)
        print 'Connected to camera info'

    def getAllRecentPoints(self):
        print 'Time between read calls:', time.time() - self.readTime
        startTime = time.time()

        self.transformer.waitForTransform(self.targetFrame,
                                          self.rgbCameraFrame, rospy.Time(0),
                                          rospy.Duration(5))
        try:
            self.targetTrans, self.targetRot = self.transformer.lookupTransform(
                self.targetFrame, self.rgbCameraFrame, rospy.Time(0))
        except tf.ExtrapolationException:
            print 'TF Target Error!'
            pass

        self.transformer.waitForTransform('/l_gripper_tool_frame',
                                          self.targetFrame, rospy.Time(0),
                                          rospy.Duration(5))
        try:
            self.gripperTrans, self.gripperRot = self.transformer.lookupTransform(
                '/l_gripper_tool_frame', self.targetFrame, rospy.Time(0))
        except tf.ExtrapolationException:
            print 'TF Gripper Error!'
            pass

        print 'Read computation time:', time.time() - startTime
        self.readTime = time.time()
        return self.points3D, self.imageData, self.micLocation, self.spoon, [self.targetTrans, self.targetRot], [self.gripperTrans, self.gripperRot], \
               [self.lGripX, self.lGripY], [self.spoonX, self.spoonY]

    def cancel(self):
        self.cloudSub.unregister()
        self.cameraSub.unregister()
        self.imageSub.unregister()

    def imageCallback(self, data):
        if self.lGripX is None:
            return

        try:
            image = self.bridge.imgmsg_to_cv(data)
            self.imageData = np.asarray(image[:, :])
        except CvBridgeError, e:
            print e
            return
Ejemplo n.º 41
0
class image_converter:
    def __init__(self):
        self.image_pub = rospy.Publisher("image_converter1", Image)

        cv.NamedWindow("Image window", 1)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("ardrone/image_raw", Image,
                                          self.callback)

    def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
        except CvBridgeError, e:
            print e

        #(cols,rows) = cv.GetSize(cv_image)
        #if cols > 60 and rows > 60 :
        #  cv.Circle(cv_image, (50,50), 10, 255)

        #######################################
        #img1=cv.CreateImage((150,150),8,3)
        #cv.Rectangle(cv_image, (60,60),(70,90), (255,0,255))
        #sub1=cv.GetSubRect(cv_image,(60,60,150,150))
        #save1=cv.CloneMat(sub1)
        #sub2=cv.GetSubRect(img1,(0,0,150,150))
        #cv.Copy(save1,sub2)
        storage = cv.CreateMemStorage(0)
        img = cv.CreateImage(
            (cv.GetSize(cv_image)[1], cv.GetSize(cv_image)[1]), 8, 3)
        img1 = cv.CreateImage(
            (cv.GetSize(cv_image)[1], cv.GetSize(cv_image)[1]), 8, 3)
        #img=cv.CreateImage(cv.GetSize(cv_image),8,3)
        img_r = cv.CreateImage(cv.GetSize(img), 8, 1)
        img_g = cv.CreateImage(cv.GetSize(img), 8, 1)
        img_b = cv.CreateImage(cv.GetSize(img), 8, 1)
        img_g1 = cv.CreateImage(cv.GetSize(img), 8, 1)
        img_g2 = cv.CreateImage(cv.GetSize(img), 8, 1)

        img2 = cv.LoadImage("/home/petin/catkin_ws/src/vp_ardrone2/ris1.jpg",
                            cv.CV_LOAD_IMAGE_GRAYSCALE)

        sub1 = cv.GetSubRect(
            cv_image, (0, 0, cv.GetSize(cv_image)[1], cv.GetSize(cv_image)[1]))
        save1 = cv.CloneMat(sub1)
        sub2 = cv.GetSubRect(
            img, (0, 0, cv.GetSize(cv_image)[1], cv.GetSize(cv_image)[1]))
        cv.Copy(save1, sub2)

        #cv.CvtColor(img, img1, cv.CV_BGR2HSV)
        #cv.CvtPixToPlane(img1,img_h,img_s,img_v,None)
        #cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
        #cv.Smooth(gray,gray,cv.CV_GAUSSIAN,5,5)
        cv.Split(img, img_b, img_g, img_r, None)
        #
        #cv.ShowImage("Image window1", img)
        #cv.ShowImage("Image windowb", img_b)
        #cv.ShowImage("Image windowg", img_g)
        #cv.ShowImage("Image windowr", img_r)
        #
        cv.InRangeS(img_g, cv.Scalar(180), cv.Scalar(255), img_g1)
        #cv.InRangeS(img_s, cv.Scalar(135), cv.Scalar(255), img_s1);
        #cv.InRangeS(img_b, cv.Scalar(0), cv.Scalar(61), img_b1);
        #cv.Invert(img_g1,img_g2,cv.CV_SVD)
        cv.Smooth(img2, img2, cv.CV_GAUSSIAN, 9, 9)
        #
        cv.ShowImage("Image windowh1", img_g1)
        #cv.ShowImage("Image windowg1", img_h1)
        #cv.ShowImage("Image windowr1", img_r1)
        #cv.ShowImage("Image gray", gray)
        # search circle
        storage = cv.CreateMat(img2.width, 1, cv.CV_32FC3)
        cv.ShowImage("Image window1", img2)
        cv.HoughCircles(img2, storage, cv.CV_HOUGH_GRADIENT, 2, 100, 100, 50,
                        10, 400)
        #rospy.loginfo(storage.width)
        for i in xrange(storage.width - 1):
            radius = storage[i, 2]
            center = (storage[i, 0], storage[i, 1])
            rospy.loginfo('444')
            cv.Circle(cv_image, center, radius, (0, 0, 255), 3, 10, 200)
        #search_circle=cv.HoughCircles(img_g1,np.asarray(storage),3,10,150)
        #for res in search_circles:
        #   p = float (cv.GetSeqElem(res))
        #   pt = cv.Point( cv.Round( p[0] ), cv.Round( p[1] ) );
        #   cv.Circle( cv_image, pt, cv.Round( p[2] ), 255 );
        #
        #cv.And(img_g,img_r,img_a)
        #cv.And(img_a,img_b,img_a)
        #
        cv.ShowImage("Image window", cv_image)
        cv.WaitKey(3)

        try:
            self.image_pub.publish(self.bridge.cv_to_imgmsg(cv_image, "bgr8"))
        except CvBridgeError, e:
            print e
Ejemplo n.º 42
0
class CalibCam(script):

	def Initialize(self):
		self.bridge = CvBridge()
		self.image_sub_r = rospy.Subscriber("/stereo/right/image_color",Image,self.callback_r)
		self.cv_image_r = cv.CreateImage((1,1), 1 , 3)
		self.image_sub_l = rospy.Subscriber("/stereo/left/image_color",Image,self.callback_l)
		self.cv_image_l = cv.CreateImage((1,1), 1 , 3)
		self.image_sub_3d = rospy.Subscriber("/cam3d/rgb/image_color",Image,self.callback_3d)
		self.cv_image_3d = cv.CreateImage((1,1), 1 , 3)
		self.sss.init("head")
		self.sss.init("torso")
		self.sss.init("sdh")
		self.image_number_offset = 1
		self.listener = tf.TransformListener()
		self.file_path = "./"

	def Run(self):
		print "start"
		seed()
		maxVal_pan = 0.125 #max: 0.125
		maxVal_tilt = 0.20 #max: 0.20
		nr_images = 12

#joint_names: ['torso_lower_neck_pan_joint', 'torso_lower_neck_tilt_joint', 'torso_upper_neck_pan_joint', 'torso_upper_neck_tilt_joint']
#lower right  positions: [-0.10956629365682602, -0.049855709075927734, -0.16463811695575714, -0.074878953397274017]


		# move components to initial position
		self.sss.move("head","back")
		#self.sss.move("head","front")
		#self.sss.move("arm","calib")
		self.sss.move("torso","home")
		#self.sss.move("sdh","home")

		self.sss.wait_for_input()
		#self.sss.move("sdh","calib")
		#self.sss.wait_for_input()

		for j in range(1,4):
			if (j==1):
				# close (~ 0.75m)
				print "Move checkerboard to a close distance and strike any key when ready."
				self.sss.wait_for_input()
				maxVal_pan = 0.125 #max: 0.125
				maxVal_tilt = 0.15 #max: 0.20
			elif (j==2):
				# intermediate (~1.20m)
				print "Move checkerboard to an intermediate distance and strike any key when ready."
				self.sss.wait_for_input()
				maxVal_pan = 0.125 #max: 0.125
				maxVal_tilt = 0.20 #max: 0.20
			elif (j==3):
				# far (~1.90m)
				print "Move checkerboard to a far distance and strike any key when ready."
				self.sss.wait_for_input()
				maxVal_pan = 0.125 #max: 0.125
				maxVal_tilt = 0.20 #max: 0.20

			# start calbration routine
			for i in range(1,nr_images+1):
				if i==1:
					r1 = maxVal_pan
					r2 = maxVal_tilt
				elif i==2:
					r1 = 0
					r2 = maxVal_tilt
				elif i==3:
					r1 = -maxVal_pan
					r2 = maxVal_tilt
				elif i==4:
					r1 = -maxVal_pan
					r2 = 0
				elif i==5:
					r1 = 0
					r2 = 0
				elif i==6:
					r1 = maxVal_pan
					r2 = 0
				elif i==7:
					r1 = maxVal_pan
					r2 = -maxVal_tilt
				elif i==8:
					r1 = 0
					r2 = -maxVal_tilt
				elif i==9:
					r1 = -maxVal_pan
					r2 = -maxVal_tilt
				else:	
					r1 = (random()-0.5)*2*maxVal_pan;
					r2 = (random()-0.5)*2*maxVal_tilt;
				self.sss.move("torso",[[1./3.*r1,1./3.*r2,2./3.*r1,2./3.*r2]])
				self.sss.sleep(1)
				if not self.sss.parse:
					self.captureImage()

		self.sss.move("torso","home")
		print "Prepare checkerboard for further views and hit <Enter> for each image capture. Press any other key + <Enter> to finish.\n"

		key = self.sss.wait_for_input()
		if not self.sss.parse:
			while (key==''):
				self.captureImage()
				key = self.sss.wait_for_input()

		print "finished"

	def captureImage(self):
		try:
			(trans,rot) = self.listener.lookupTransform('/base_link', '/head_axis_link', rospy.Time(0))
			rpy = euler_from_quaternion(rot)
			cyaw = cos(rpy[2])
			syaw = sin(rpy[2])
			cpitch = cos(rpy[1])
			spitch = sin(rpy[1])
			croll = cos(rpy[0])
			sroll = sin(rpy[0])
			R11 = cyaw*cpitch
			R12 = cyaw*spitch*sroll-syaw*croll
			R13 = cyaw*spitch*croll+syaw*sroll
			R21 = syaw*cpitch
			R22 = syaw*spitch*sroll+cyaw*croll
			R23 = syaw*spitch*croll-cyaw*sroll
			R31 = -spitch
			R32 = cpitch*sroll
			R33 = cpitch*croll
			fout = open(self.file_path+'calpic'+str(self.image_number_offset)+'.coords','w')
			fout.write(str(R11)+' '+str(R12)+' '+str(R13)+' '+str(trans[0]*1000)+'\n'+str(R21)+' '+str(R22)+' '+str(R23)+' '+str(trans[1]*1000)+'\n'+str(R31)+' '+str(R32)+' '+str(R33)+' '+str(trans[2]*1000))
			fout.close()
		except (tf.LookupException, tf.ConnectivityException):
			print "tf exception"

		self.sss.sleep(1)
		cv.SaveImage(self.file_path+'right_'+str(self.image_number_offset)+'.png',self.cv_image_r)
		cv.SaveImage(self.file_path+'left_'+str(self.image_number_offset)+'.png',self.cv_image_l)
		cv.SaveImage(self.file_path+'cam3d_'+str(self.image_number_offset)+'.png',self.cv_image_3d)
		self.sss.sleep(1)
		self.image_number_offset = self.image_number_offset + 1
		
	def callback_r(self,data):
		try:
			self.cv_image_r = self.bridge.imgmsg_to_cv(data, "bgr8")
		except CvBridgeError, e:
			print e
Ejemplo n.º 43
0
class VisibilityCheckerNode():
    '''
    @summary: Captures Images from one or more cameras (Image message topics) to files.

    Number of cameras, output folder and file names are configurable via ROS parameters.
    After startup call "~capture_images" ROS service to save images of all
    cameras to output folder.
    '''

    def __init__(self):
        '''
        Initializes storage, gets parameters from parameter server and logs to rosinfo
        '''
        self.bridge = CvBridge()

        # set up Checkerboard and CheckerboardDetector
        self.board = Checkerboard((9, 6), 0.03)
        self.detector = CheckerboardDetector(self.board)
        rospy.init_node(NODE)
        self.counter = 0

        # Get params from ros parameter server or use default
        self.cams = rospy.get_param("~cameras")
        self.camera = [self.cams["reference"]["topic"]]
        if self.cams.has_key("further"):
            rospy.loginfo("FURTHER cameras specified")
            for cam in self.cams["further"]:
                 self.camera.append(cam["topic"])
        else:
            rospy.logwarn("No FURTHER cameras specified")
        self.numCams = len(self.camera)

        # Init images
        self.image = []
        self.image_received = [False] * self.numCams
        for id in range(self.numCams):
            self.image.append(Image())

        # Subscribe to images
        self.imageSub = []
        for id in range(self.numCams):
            self.imageSub.append(rospy.Subscriber(
                self.camera[id], Image, self._imageCallback, id))

        # Wait for image messages
        for id in range(self.numCams):
            rospy.wait_for_message(self.camera[id], Image, 5)

    def _imageCallback(self, data, id):
        '''
        Copy image message to local storage

        @param data: Currently received image message
        @type  data: ROS Image() message
        @param id: Id of camera from which the image was received
        @type  id: integer
        '''
        #print "cb executed"
        self.image[id] = data
        self.image_received[id] = True

    def _checkHandle(self, req):
        '''
        Service handle for "Capture" Service
        Grabs images, converts them and saves them to files

        @param req: service request
        @type  req: CaptureRequest() message

        @return: CaptureResponse() message
        '''
        self.image_received = [False] * self.numCams
        visible = []
        while not all(self.image_received):
            print "waiting for images"
            rospy.sleep(1)
        if all(self.image_received):
            print "=== ALL IMAGES RECEIVED ==="
        self.images_received = [False] * self.numCams
        for id in range(self.numCams):
            image = self.image[id]

            cvImage = self.bridge.imgmsg_to_cv(image, 'rgb8')
            img_raw = cv2util.cvmat2np(cvImage)
            try:
                self.detector.detect_image_points( img_raw, False, True)
                visible.append(True)
            except self.detector.NoPatternFoundException:
                rospy.logwarn("No cb found")
                visible.append(False)
            # grab image messages
        #print '%s checkerboards found --> return %s'%(sum(visible),all(visible))
        response = VisibleResponse()
        response.every = False
        response.master = False

        if all(visible):
            response.every = True
            response.master = True

        #elif visible[0]:
        #   response.master = True
        return response

    def run(self):
        # Start service
        srv = rospy.Service(
            '/image_capture/visibility_check', Visible, self._checkHandle)
        rospy.loginfo("service '/image_capture/visibility_check' started, waiting for requests...")
        rospy.spin()
Ejemplo n.º 44
0
class Ros2Python:
    def __init__(self,
                 objectDetector=None,
                 laneDetector=None,
                 grassDetector=None):
        # Subscribe to PointCloud2
        self.subImage = rospy.Subscriber("/left/image_rect_color",\
            Image, self.cbImage, queue_size=1)
        self.subPoints = rospy.Subscriber("/points2",\
            PointCloud2, self.cbPoints, queue_size=1)

        self.images = []
        self.mask = None
        self.imageStamps = []
        self.imageInd = 0

        self.bridge = CvBridge()

        self.showDetected = False

        self.obstacleCloud = np.array([])
        if objectDetector:
            self.objectDetector = objectDetector
        else:
            self.objectDetector = ObstacleDetector((240, 320))
            # We assume that the normal of the plane is pointing straight up
            # in the vision (-Y direction)
            self.objectDetector.setNormalConstraint(np.array([0., -1., -0.2]))
            # And allow the search to deviate only 30 degrees from that
            self.objectDetector.angleConstraint = 30.

        if laneDetector:
            self.laneDetector = laneDetector
        else:
            self.laneDetector = LaneDetector((240, 320))

        if grassDetector:
            self.grassDetector = grassDetector
        else:
            # Init with H,S,V thresholds
            self.grassDetector = GrassDetector(45, 35, 35)

        if VERBOSE:
            print "Subscribed to /left/image_rect_color"
            print "Subscribed to /points2"

    def run(self):
        rospy.init_node('Ros2Python', anonymous=True)
        try:
            rospy.spin()
        except KeyboardInterrupt:
            print "Shutting down liveObstacleDetection module"

    def cbPoints(self, point_msg):
        if VERBOSE:
            print 'Received points. Size: (', point_msg.width, ',', point_msg.height, ')'
        # Convert to numpy array (http://goo.gl/s6OGja)
        cloud = np.array(points2cloud_to_array(point_msg))

        # ObDetector only recalculates everything when the cloud is updated
        cloud = np.dstack((cloud['x'], cloud['y'], cloud['z']))

        pointT = point_msg.header.stamp.to_sec()

        #print 'Point Time:', pointT
        #print 'Image Times:', self.imageStamps
        image = None
        for i in range(len(self.imageStamps)):
            imageT = self.imageStamps[i]
            if imageT > pointT:
                image = self.images[i]
                break

        if image == None:
            image = self.images[i]
        self.images = self.images[i:]
        self.imageStamps = self.imageStamps[i:]

        self.objectDetector.updateImage(image)
        self.laneDetector.updateImage(image)
        self.grassDetector.updateImage(image)

        self.objectDetector.updatePoints(cloud)

        ##### Jared, these are the 2 most useful variables for you:
        ''' mask: is a numpy array the same size as the camera image. The
            elements in the mask are set to 1 if that pixel is part of the
            plane/ground, 2 if that pixel is an object, and 0 otherwise.
        '''

        self.mask = self.objectDetector.getPlaneObjectMask()
        self.grassDetector.updateMask(self.mask == 1)
        grassMask = np.logical_and(self.grassDetector.findGrass(),
                                   self.mask != 2)
        self.mask[grassMask] = 1

        model = self.objectDetector.model
        planeInd = model.coord2ind(np.array(np.where(self.mask == 1)).T)
        planeInd = planeInd.astype(int)
        success, coeff = model.optimiseModelCoefficients(\
            planeInd,self.objectDetector.coeff)
        success, newInd = model.selectWithinDistance(coeff, 0.15)
        self.objectDetector.coeff = coeff

        #print newInd.shape
        #print newInd
        self.mask[model.ind2coord(newInd)] = 1
        ''' obsacleCloud: a list of 2D points relative to the camera.
            The positive X-axis goes to the right of the camera.
            The positive Y-axis goes away from the camera.
            TODO: Double check this =S
        '''
        self.obstacleCloud = self.objectDetector.getObstacleCloud()

        # Do the lane detect also
        detectedLanes = self.laneDetector.findLines()
        self.detectedLanes = np.logical_and(detectedLanes, self.mask != 2)
        self.laneDetector.showImage()
        self.mask[self.detectedLanes] = 2

        self.maskall = self.mask

        if self.showDetected:
            #print 'Showing Detected'
            self.objectDetector.showDetected(self.mask)
            self.showDetected = False
            cv2.waitKey(10)
        #self.objectDetector.showDepth()
        # Show the plane and abjects
        # im = self.objectDetector.image.copy()
        # im[grassMask] = [255,0,0]
        # im[mask==2] = [0,0,255]
        # cv2.imshow('DetectedObjects', im)

        # Show the lines
        #self.laneDetector.showImage()
        # cv2.imshow('DetectedLines', detectedLanes.astype(float)*255)

        # cv2.waitKey(15)
    def getMaskAll(self):
        return self.maskall

    def cbImage(self, img_msg):
        image = np.asarray(self.bridge.imgmsg_to_cv(img_msg))
        self.images.append(image)
        self.imageStamps.append(img_msg.header.stamp.to_sec())
Ejemplo n.º 45
0
class CalibCam(script):
    def Initialize(self):
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/stereo/right/image_color", Image,
                                          self.callback)
        self.cv_image = cv.CreateImage((1, 1), 1, 3)
        self.sss.init("head")
        self.sss.init("torso")
        self.sss.init("sdh")

    def Run(self):
        print "start"
        seed()
        maxVal = 0.04
        file_path = "./"
        listener = tf.TransformListener()
        nr_images = 14

        # move components to initial position
        self.sss.move("head", "back")
        self.sss.move("arm", "calib")
        self.sss.move("torso", "home")
        self.sss.move("sdh", "home")

        self.sss.wait_for_input()
        self.sss.move("sdh", "calib")
        self.sss.wait_for_input()

        # start calbration routine
        for i in range(1, nr_images):
            if i == 1:
                r1 = maxVal
                r2 = maxVal
            elif i == 2:
                r1 = -maxVal
                r2 = maxVal
            elif i == 3:
                r1 = maxVal
                r2 = -maxVal
            elif i == 4:
                r1 = -maxVal
                r2 = -maxVal
            else:
                r1 = (random() - 0.5) * 2 * maxVal
                r2 = (random() - 0.5) * 2 * maxVal
            self.sss.move("torso", [[r1, r2, r1, r2]])
            self.sss.sleep(1)
            try:
                (trans,
                 rot) = listener.lookupTransform('/base_link',
                                                 '/head_color_camera_r_link',
                                                 rospy.Time(0))
                rpy = euler_from_quaternion(rot)
                cyaw = cos(rpy[2])
                syaw = sin(rpy[2])
                cpitch = cos(rpy[1])
                spitch = sin(rpy[1])
                croll = cos(rpy[0])
                sroll = sin(rpy[0])
                R11 = cyaw * cpitch
                R12 = cyaw * spitch * sroll - syaw * croll
                R13 = cyaw * spitch * croll + syaw * sroll
                R21 = syaw * cpitch
                R22 = syaw * spitch * sroll + cyaw * croll
                R23 = syaw * spitch * croll - cyaw * sroll
                R31 = -spitch
                R32 = cpitch * sroll
                R33 = cpitch * croll
                fout = open(file_path + 'calpic' + str(i) + '.coords', 'w')
                fout.write(
                    str(R11) + ' ' + str(R12) + ' ' + str(R13) + ' ' +
                    str(trans[0] * 1000) + '\n' + str(R21) + ' ' + str(R22) +
                    ' ' + str(R23) + ' ' + str(trans[1] * 1000) + '\n' +
                    str(R31) + ' ' + str(R32) + ' ' + str(R33) + ' ' +
                    str(trans[2] * 1000))
                fout.close()
            except (tf.LookupException, tf.ConnectivityException):
                print "tf exception"

            self.sss.sleep(1)
            cv.SaveImage(file_path + 'calpic' + str(i) + '.png', self.cv_image)
            self.sss.sleep(1)
        self.sss.move("torso", "home")
        print "finished"

    def callback(self, data):
        try:
            self.cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
        except CvBridgeError, e:
            print e
Ejemplo n.º 46
0
def get_image_from_topic(rgb_topic=RGB_TOPIC):
    msg = rospy.wait_for_message(rgb_topic, Image, 4)
    bridge = CvBridge()
    img = bridge.imgmsg_to_cv(msg, 'bgr8')
    np_img = np.asarray(img)
    return np_img
Ejemplo n.º 47
0
class ObjectFollower():
    def __init__(self):
        rospy.init_node("object_follower")

        # Set the shutdown function (stop the robot)
        rospy.on_shutdown(self.shutdown)

        # How often should we update the robot's motion?
        self.rate = rospy.get_param("~rate", 10)

        r = rospy.Rate(self.rate)

        # Scale the ROI by this factor to avoid background distance values around the edges
        self.scale_roi = rospy.get_param("~scale_roi", 0.9)

        # The max linear speed in meters per second
        self.max_linear_speed = rospy.get_param("~max_linear_speed", 0.3)

        # The minimum linear speed in meters per second
        self.min_linear_speed = rospy.get_param("~min_linear_speed", 0.02)

        # The maximum rotation speed in radians per second
        self.max_rotation_speed = rospy.get_param("~max_rotation_speed", 2.0)

        # The minimum rotation speed in radians per second
        self.min_rotation_speed = rospy.get_param("~min_rotation_speed", 0.5)

        # The x threshold (% of image width) indicates how far off-center
        # the ROI needs to be in the x-direction before we react
        self.x_threshold = rospy.get_param("~x_threshold", 0.1)

        # How far away from the goal distance (in meters) before the robot reacts
        self.z_threshold = rospy.get_param("~z_threshold", 0.05)

        # The maximum distance a target can be from the robot for us to track
        self.max_z = rospy.get_param("~max_z", 1.6)

        # The minimum distance to respond to
        self.min_z = rospy.get_param("~min_z", 0.5)

        # The goal distance (in meters) to keep between the robot and the person
        self.goal_z = rospy.get_param("~goal_z", 0.75)

        # How much do we weight the goal distance (z) when making a movement
        self.z_scale = rospy.get_param("~z_scale", 0.5)

        # How much do we weight (left/right) of the person when making a movement
        self.x_scale = rospy.get_param("~x_scale", 2.0)

        # Slow down factor when stopping
        self.slow_down_factor = rospy.get_param("~slow_down_factor", 0.8)

        # Initialize the global ROI
        self.roi = RegionOfInterest()

        # Publisher to control the robot's movement
        self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)

        # Intialize the movement command
        self.move_cmd = Twist()

        # Get a lock for updating the self.move_cmd values
        self.lock = thread.allocate_lock()

        # We will get the image width and height from the camera_info topic
        self.image_width = 0
        self.image_height = 0

        # We need cv_bridge to convert the ROS depth image to an OpenCV array
        self.cv_bridge = CvBridge()
        self.depth_array = None

        # Set flag to indicate when the ROI stops updating
        self.target_visible = False

        # Wait for the camera_info topic to become available
        rospy.loginfo("Waiting for camera_info topic...")

        rospy.wait_for_message('camera_info', CameraInfo)

        # Subscribe to the camera_info topic to get the image width and height
        rospy.Subscriber('camera_info', CameraInfo, self.get_camera_info)

        # Wait until we actually have the camera data
        while self.image_width == 0 or self.image_height == 0:
            rospy.sleep(1)

        # Wait for the depth image to become available
        rospy.loginfo("Waiting for depth_image topic...")

        rospy.wait_for_message('depth_image', Image)

        # Subscribe to the depth image
        self.depth_subscriber = rospy.Subscriber("depth_image",
                                                 Image,
                                                 self.convert_depth_image,
                                                 queue_size=1)

        # Subscribe to the ROI topic and set the callback to update the robot's motion
        rospy.Subscriber('roi', RegionOfInterest, self.set_cmd_vel)

        # Wait until we have an ROI to follow
        rospy.loginfo("Waiting for an ROI to track...")

        rospy.wait_for_message('roi', RegionOfInterest)

        rospy.loginfo("ROI messages detected. Starting follower...")

        # Begin the tracking loop
        while not rospy.is_shutdown():
            # Acquire a lock while we're setting the robot speeds
            self.lock.acquire()

            try:
                if not self.target_visible:
                    # If the target is not visible, stop the robot smoothly
                    self.move_cmd.linear.x *= self.slow_down_factor
                    self.move_cmd.angular.z *= self.slow_down_factor
                else:
                    # Reset the flag to False by default
                    self.target_visible = False

                # Send the Twist command to the robot
                self.cmd_vel_pub.publish(self.move_cmd)

            finally:
                # Release the lock
                self.lock.release()

            # Sleep for 1/self.rate seconds
            r.sleep()

    def set_cmd_vel(self, msg):
        # Acquire a lock while we're setting the robot speeds
        self.lock.acquire()

        try:
            # If the ROI has a width or height of 0, we have lost the target
            if msg.width == 0 or msg.height == 0:
                self.target_visible = False
                return
            else:
                self.target_visible = True

            self.roi = msg

            # Compute the displacement of the ROI from the center of the image
            target_offset_x = msg.x_offset + msg.width / 2 - self.image_width / 2

            try:
                percent_offset_x = float(target_offset_x) / (
                    float(self.image_width) / 2.0)
            except:
                percent_offset_x = 0

            # Rotate the robot only if the displacement of the target exceeds the threshold
            if abs(percent_offset_x) > self.x_threshold:
                # Set the rotation speed proportional to the displacement of the target
                speed = percent_offset_x * self.x_scale
                self.move_cmd.angular.z = -copysign(
                    max(self.min_rotation_speed,
                        min(self.max_rotation_speed, abs(speed))), speed)
            else:
                self.move_cmd.angular.z = 0

            # Now compute the depth component

            # Initialize a few depth variables
            n_z = sum_z = mean_z = 0

            # Shrink the ROI slightly to avoid the target boundaries
            scaled_width = int(self.roi.width * self.scale_roi)
            scaled_height = int(self.roi.height * self.scale_roi)

            # Get the min/max x and y values from the scaled ROI
            min_x = int(self.roi.x_offset + self.roi.width *
                        (1.0 - self.scale_roi) / 2.0)
            max_x = min_x + scaled_width
            min_y = int(self.roi.y_offset + self.roi.height *
                        (1.0 - self.scale_roi) / 2.0)
            max_y = min_y + scaled_height

            # Get the average depth value over the ROI
            for x in range(min_x, max_x):
                for y in range(min_y, max_y):
                    try:
                        # Get a depth value in millimeters
                        z = self.depth_array[y, x]

                        # Convert to meters
                        z /= 1000.0

                    except:
                        # It seems to work best if we convert exceptions to 0
                        z = 0

                    # Check for values outside max range
                    if z > self.max_z:
                        continue

                    # Increment the sum and count
                    sum_z = sum_z + z
                    n_z += 1

            # Stop the robot's forward/backward motion by default
            linear_x = 0

            # If we have depth values...
            if n_z:
                mean_z = float(sum_z) / n_z

                # Don't let the mean fall below the minimum reliable range
                mean_z = max(self.min_z, mean_z)

                # Check the mean against the minimum range
                if mean_z > self.min_z:
                    # Check the max range and goal threshold
                    if mean_z < self.max_z and (abs(mean_z - self.goal_z) >
                                                self.z_threshold):
                        speed = (mean_z - self.goal_z) * self.z_scale
                        linear_x = copysign(
                            min(self.max_linear_speed,
                                max(self.min_linear_speed, abs(speed))), speed)

            if linear_x == 0:
                # Stop the robot smoothly
                self.move_cmd.linear.x *= self.slow_down_factor
            else:
                self.move_cmd.linear.x = linear_x

        finally:
            # Release the lock
            self.lock.release()

    def convert_depth_image(self, ros_image):
        # Use cv_bridge() to convert the ROS image to OpenCV format
        try:
            # The depth image is a single-channel float32 image
            depth_image = self.cv_bridge.imgmsg_to_cv(ros_image, "32FC1")
        except CvBridgeError, e:
            print e

        # Convert the depth image to a Numpy array
        self.depth_array = np.array(depth_image, dtype=np.float32)
Ejemplo n.º 48
0
class ImageRenderer:
    def __init__(self, ns):
        self.lock = threading.Lock()
        self.image_time = rospy.Time(0)
        self.info_time = rospy.Time(0)
        self.image = None
        self.interval = 0
        self.features = None
        self.bridge = CvBridge()
        self.ns = ns
        self.max_interval = rospy.get_param('filter_intervals/min_duration')

        self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX,
                                0.30,
                                1.5,
                                thickness=2)
        self.font1 = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX,
                                 0.10,
                                 1,
                                 thickness=1)
        self.info_sub = rospy.Subscriber(ns + '/camera_info', CameraInfo,
                                         self.info_cb)
        self.image_sub = rospy.Subscriber(ns + '/image_throttle', Image,
                                          self.image_cb)
        self.interval_sub = rospy.Subscriber(ns + '/settled_interval',
                                             Interval, self.interval_cb)
        self.features_sub = rospy.Subscriber(ns + '/features',
                                             CalibrationPattern,
                                             self.features_cb)

    def info_cb(self, msg):
        with self.lock:
            self.info_time = rospy.Time.now()

    def image_cb(self, msg):
        with self.lock:
            self.image_time = rospy.Time.now()
            self.image = msg

    def interval_cb(self, msg):
        with self.lock:
            self.interval = (msg.end - msg.start).to_sec()

    def features_cb(self, msg):
        with self.lock:
            self.features = msg

    def render(self, window):
        with self.lock:
            if self.image and self.image_time + rospy.Duration(
                    8.0) > rospy.Time.now(
                    ) and self.info_time + rospy.Duration(
                        8.0) > rospy.Time.now():
                cv.Resize(self.bridge.imgmsg_to_cv(self.image, 'rgb8'), window)
                # render progress bar
                interval = min(1, (self.interval / self.max_interval))
                cv.Rectangle(
                    window,
                    (int(0.05 * window.width), int(window.height * 0.9)),
                    (int(interval * window.width * 0.9 + 0.05 * window.width),
                     int(window.height * 0.95)),
                    (0, interval * 255, (1 - interval) * 255),
                    thickness=-1)
                cv.Rectangle(
                    window,
                    (int(0.05 * window.width), int(window.height * 0.9)),
                    (int(window.width * 0.9 + 0.05 * window.width),
                     int(window.height * 0.95)),
                    (0, interval * 255, (1 - interval) * 255))
                cv.PutText(window, self.ns,
                           (int(window.width * .05), int(window.height * 0.1)),
                           self.font1, (0, 0, 255))
                if self.features and self.features.header.stamp + rospy.Duration(
                        4.0) > self.image.header.stamp:
                    w_scaling = float(window.width) / self.image.width
                    h_scaling = float(window.height) / self.image.height
                    if self.features.success:
                        corner_color = (0, 255, 0)
                        for cur_pt in self.features.image_points:
                            cv.Circle(window, (int(cur_pt.x * w_scaling),
                                               int(cur_pt.y * h_scaling)),
                                      int(w_scaling * 5), corner_color)
                    else:
                        window = add_text(window,
                                          ["Could not detect", "checkerboard"],
                                          False)
                else:
                    window = add_text(
                        window, ["Timed out waiting", "for checkerboard"],
                        False)

            else:
                # Generate random white noise (for fun)
                noise = numpy.random.rand(window.height, window.width) * 256
                numpy.asarray(window)[:, :, 0] = noise
                numpy.asarray(window)[:, :, 1] = noise
                numpy.asarray(window)[:, :, 2] = noise
                cv.PutText(window, self.ns,
                           (int(window.width * .05), int(window.height * .95)),
                           self.font, (0, 0, 255))
Ejemplo n.º 49
0
class pipe_task:
    def __init__(self):
        self.cvbridge = CvBridge()

        self.cam_sub_ = rospy.Subscriber('camera/image_rect_color', Image,
                                         self.ImageCB)
        self.cam_info_sub_ = rospy.Subscriber('camera/camera_info', CameraInfo,
                                              self.CameraInfoCB)

        self.cameraInfo_initialized_ = False
        self.pipe_length_total_ = 0.305
        self.pipe_length_little_ = 0.0
        self.find_times_ = 0
        self.find_times_limit_ = 30

        self.pipePose_pub_ = rospy.Publisher('/pipe_pose', pipe_pose)

    def CameraInfoCB(self, info_msg):
        if not self.cameraInfo_initialized_:
            self.camera_info_ = info_msg
            self.cameraInfo_initialized_ = True

    def ImageCB(self, image_msg):
        if not self.cameraInfo_initialized_:
            return

        try:
            iplimg = self.cvbridge.imgmsg_to_cv(image_msg, image_msg.encoding)
            cvimg = np.array(iplimg, dtype=np.uint8)
        except CvBridgeError, e:
            rospy.logerr('cvbridge exception: ' + str(e))
            return

        hsv_image = cv2.cvtColor(cvimg, cv2.cv.CV_BGR2HSV)
        color_min = np.array([20, 10, 20], np.uint8)
        color_max = np.array([90, 50, 100], np.uint8)

        pipe_orange_threshold = cv2.inRange(hsv_image, color_min, color_max)

        opening_size = 1
        opening_element = cv2.getStructuringElement(
            cv2.MORPH_RECT, (2 * opening_size + 1, 2 * opening_size + 1))
        closing_size = 5
        closing_element = cv2.getStructuringElement(
            cv2.MORPH_RECT, (2 * closing_size + 1, 2 * closing_size + 1))

        pipe_orange_threshold = cv2.morphologyEx(pipe_orange_threshold,
                                                 cv2.MORPH_OPEN,
                                                 opening_element)
        pipe_orange_threshold = cv2.morphologyEx(pipe_orange_threshold,
                                                 cv2.MORPH_CLOSE,
                                                 closing_element)

        cv2.imshow('raw_threshold', pipe_orange_threshold)

        white_pixel_num = np.count_nonzero(pipe_orange_threshold)

        copy_for_contour = pipe_orange_threshold.copy()

        contours, _ = cv2.findContours(copy_for_contour, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
        for i in range(1, len(contours)):
            cv2.line(pipe_orange_threshold, tuple(contours[i - 1][0][0]),
                     tuple(contours[i][0][0]), 255, 2)

        cv2.imshow('pipe_outer', pipe_orange_threshold)

        copy_for_contour2 = pipe_orange_threshold.copy()
        single_contour, _ = cv2.findContours(copy_for_contour2,
                                             cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)

        pipe_skeleton_pose = pipe_pose()
        #pipe_skeleton_pose.header = iplimg.header

        if white_pixel_num > 200:
            minArea_rectangle = cv2.minAreaRect(single_contour[0])
            color_rect = cv2.cvtColor(pipe_orange_threshold,
                                      cv2.cv.CV_GRAY2BGR)

            rect_points = cv2.cv.BoxPoints(minArea_rectangle)
            for j in range(4):
                pt1 = tuple(np.int32(rect_points[j]))
                pt2 = tuple(np.int32(rect_points[(j + 1) % 4]))
                cv2.line(color_rect, pt1, pt2, (255, 0, 0), 3, 8)

            pipe_skeleton_pose.detect_pipe, lowest_pt, second_lowest_pt = self.Calc_pose(
                rect_points, pipe_skeleton_pose)
            pt1 = tuple(np.int32(lowest_pt))
            pt2 = tuple(np.int32(second_lowest_pt))
            cv2.line(color_rect, pt1, pt2, (0, 0, 255), 3, 8)

            cv2.imshow('color_rect', color_rect)

        if pipe_skeleton_pose.detect_pipe:
            self.find_times_ += 1
        else:
            self.find_times_ -= 1

        pipe_skeleton_pose.detection_stable = self.find_times_ > self.find_times_limit_

        self.pipePose_pub_.publish(pipe_skeleton_pose)

        cv2.imshow('orange_threshold', pipe_orange_threshold)
        cv2.imshow('pipe_outer', pipe_orange_threshold)
        cv2.waitKey(2)
Ejemplo n.º 50
0
def callback(data):
    global imagePub
    global markerPub

    #Convert image to CV image
    bridge = CvBridge()
    cv_image = bridge.imgmsg_to_cv(data, "mono8")

    # Convert the image to a Numpy array since most cv2 functionsrequire Numpy arrays.
    searched = np.array(cv_image, dtype=np.uint8)

    #Create a copy for sobel comparison
    searchedCopy = copy.copy(searched)
    searchedCopy[searchedCopy == 255] = 0

    #Take Sobel Derivatives
    sobel_x = np.uint8(
        np.absolute(cv2.Sobel(searched, cv2.CV_16S, 1, 0, ksize=1)))
    sobel_y = np.uint8(
        np.absolute(cv2.Sobel(searched, cv2.CV_16S, 0, 1, ksize=1)))
    sobel_xy = cv2.addWeighted(sobel_x, 0.5, sobel_y, 0.5, 0)
    sobel_x_base = np.uint8(
        np.absolute(cv2.Sobel(searchedCopy, cv2.CV_16S, 1, 0, ksize=1)))
    sobel_y_base = np.uint8(
        np.absolute(cv2.Sobel(searchedCopy, cv2.CV_16S, 0, 1, ksize=1)))
    sobel_xy_base = cv2.addWeighted(sobel_x_base, 0.5, sobel_y_base, 0.5, 0)
    ret, sobel_xy_thres = cv2.threshold(sobel_xy, 0, 255, cv2.THRESH_BINARY)
    ret, sobel_xy_base_thres = cv2.threshold(sobel_xy_base, 0, 255,
                                             cv2.THRESH_BINARY)

    #Subtract Comparisons for frontiers only
    sobelCombined = sobel_xy_base_thres - sobel_xy_thres
    ret, sobelCombined_thresh = cv2.threshold(sobelCombined, 0, 255,
                                              cv2.THRESH_BINARY)

    #Dialate Combined
    dialate = cv2.dilate(sobelCombined_thresh, np.ones((3, 3), 'uint8'))

    #Find Contour Centorids
    dialateCopy = copy.copy(dialate)
    contours, hier = cv2.findContours(dialateCopy, cv2.RETR_LIST,
                                      cv2.CHAIN_APPROX_SIMPLE)
    centroids = []
    colors = []
    for i in contours:
        if len(i) > 10:
            moments = cv2.moments(i)
            cx = int(moments['m10'] / moments['m00'])
            cy = int(moments['m01'] / moments['m00'])
            centroidPoint = Point()
            centroidColor = ColorRGBA()
            centroidPoint.x = cx / 20.0 - 20
            centroidPoint.y = cy / 20.0 - 32.8
            centroidPoint.z = 0
            #centroidColor = (0,0,1,1)
            centroidColor.r = 0
            centroidColor.g = 0
            centroidColor.b = 1
            centroidColor.a = 1
            centroids.append(centroidPoint)
            colors.append(centroidColor)

    #code.interact(local=locals())
    #Display Image in PLT Window
    '''plt.subplot(1,5,1),plt.imshow(searched,cmap='gray'),plt.title('Searched Space'),plt.xticks([]), plt.yticks([])
	plt.subplot(1,5,2),plt.imshow(sobel_xy_thres,cmap='gray'),plt.title('Sobel All'),plt.xticks([]), plt.yticks([])
	plt.subplot(1,5,3),plt.imshow(sobel_xy_base_thres,cmap='gray'),plt.title('Sobel All'),plt.xticks([]), plt.yticks([])
	plt.subplot(1,5,4),plt.imshow(sobelCombined,cmap='gray'),plt.title('Frontiers Map'),plt.xticks([]), plt.yticks([])
	plt.subplot(1,5,5),plt.imshow(dialate,cmap='gray'),plt.title('Dialate'),plt.xticks([]), plt.yticks([])
	plt.draw()
	plt.pause(0.001)'''

    #Convert the image back to mat format and publish as ROS image
    frontiers = cv.fromarray(dialate)
    imagePub.publish(bridge.cv_to_imgmsg(frontiers, "mono8"))

    #Publish centorids of frontiers as marker
    markerMsg = Marker()
    markerMsg.header.frame_id = "/map"
    markerMsg.header.stamp = rospy.Time.now()
    markerMsg.ns = ""
    markerMsg.id = 0
    markerMsg.type = 7  #Sphere List
    markerMsg.action = 0  #Add
    markerMsg.scale.x = 0.5
    markerMsg.scale.y = 0.5
    markerMsg.scale.z = 0.5
    markerMsg.points = centroids
    markerMsg.colors = colors
    markerPub.publish(markerMsg)
    print 'yes'
Ejemplo n.º 51
0
class HumanDetector(AbstractVisionModule):
    def __init__(self,
                 host,
                 port,
                 freq,
                 source="camera/depth/image_raw",
                 transform=[[0, 0, 0.63]]):
        super(HumanDetector, self).__init__(host, port)
        self.logger = logging.getLogger("Borg.Brain.Vision.HumanDetector")
        print "Human detection running"
        self.bridge = CvBridge()
        self.cv_image = None
        self.FPS_counter = 0
        self.FPS_time = time.time()
        self.camera_to_base_transform = transform
        self.source = source
        self.freq = freq

        #Parameters
        self.__body_line = 120
        self.__leg_line = 320
        self.__line_height = 10
        self.__max_depth = 2000
        self.__depth_search_limit = 1000
        self.__depth_step = 500
        self.__min_width_body = 60
        self.__max_width_body = 250
        self.__min_width_leg = 15
        self.__max_width_leg = 100
        self.__search_depth_leg = 500

        self.__hori_fov = 57.0  # Horizontal field of view
        self.__vert_fov = 43.0  # Vertical
        self.__hori_res = 640.0  # Horizontal resolution
        self.__vert_res = 480.0  # Vertical

        #self.vid_mem_reader = util.vidmemreader.VidMemReader([self.source])
        self.Operators = []
        rospy.Subscriber(source, Image, self.newImageCB)
        print "Subscribing to %s" % source

    def train(self):
        pass

    def run(self):
        """Start loop which simple sends a random fake observation every 5 secondes."""
        t = Ticker(self.freq)
        while True:
            t.tick()
            #Name, used to identify the observation in memory:
            Operators = self.Operators
            if Operators:
                self.add_property('name', 'HumanDetector')
                self.add_property('Operators', Operators)
                self.store_observation()

            self.update()

    def computeFPS(self):
        self.FPS_counter += 1
        if (time.time() - self.FPS_time > 1):
            print "FPS: ", self.FPS_counter
            self.FPS_time = time.time()
            self.FPS_counter = 0

    #Filter depth in the image
    def filterDepth(self, depth_image, minimum, maximum):
        depth_image[depth_image > maximum] = 0
        depth_image[depth_image < minimum] = 0
        depth_image[depth_image > minimum] = 50000
        return depth_image

    #Select image line from image
    def selectLine(self, image, line, height):
        return image[line:line + height, 0:640]

    #Remove noise from the image.
    #If there are 1 or more zeros in one column -> Entire column is set to zero
    def removeNoise(self, image):
        image[:, sum(image == 0) >= 1] = 0
        return image

    #Remove blobs from the list
    def removeBlobsFromList(self, Blobs, key, comparison, value):
        if not Blobs == None:
            if comparison == '<':
                return [x for x in Blobs if x[str(key)] > value]
            elif comparison == '>':
                return [x for x in Blobs if x[str(key)] < value]
            else:
                print "Wrong comparison"
        else:
            return []

    #Compute the depth value of the blob. Remove outliers
    def computeDepth(self, image_depth, x, size):
        temp = np.copy(image_depth[0:10, x:x + size])
        return np.mean(temp[abs(temp - np.mean(temp)) < np.std(temp)])

    #Sort the list with blobs
    def sortBlobs(self, blobs):
        return sorted(blobs, key=lambda k: k['width'], reverse=True)

    #Detect blobs in the image
    def detectBlobs(self, image, image_depth):
        detections = np.argwhere(image[0, :])
        if np.size(detections) > 5:
            blobs = []
            start_x = detections[0]
            for i in range(0, np.size(detections) - 5):
                if detections[i + 1] - detections[i] > 2:
                    blobs.append({
                        'x':
                        int(start_x),
                        'width':
                        int(detections[i] - start_x),
                        'depth':
                        self.computeDepth(image_depth, int(start_x),
                                          int(detections[i] - start_x))
                    })
                    start_x = detections[i + 1]

            blobs.append({
                'x':
                int(start_x),
                'width':
                int(detections[i + 1] - start_x),
                'depth':
                self.computeDepth(image_depth, int(start_x),
                                  int(detections[i + 1] - start_x))
            })

            return self.sortBlobs(blobs)
        return None

        #Return a list with the blobs

    def detectOperator(self, body, legs):
        if len(legs) >= 2:
            legs = self.sortBlobs(legs)
            if abs(legs[0]['width'] - legs[1]['width']) < 20:
                return True
            else:
                #print "Legs are different"
                return False

        else:
            #print "Not enough possible legs detected"
            return False

    def newImageCB(self, data):
        self.computeFPS()
        Operators = []
        #Create an array with depth information from the image
        temp = self.bridge.imgmsg_to_cv(data)
        image_temp = np.asarray(temp[:, :])

        depth = 500

        while depth < self.__max_depth:
            ##########################
            # Body Detection         #
            ##########################
            #Make a copy of the original image
            image = np.copy(image_temp)
            #Select a line at x = self.__body_line with a height of self.__line_height
            image_body = self.selectLine(image, self.__body_line,
                                         self.__line_height)
            #Create an array with the depth values
            image_body_depth = np.copy(image_body)
            #Filter on depth
            image_body = self.filterDepth(image_body, depth,
                                          depth + self.__depth_search_limit)
            #Remove noise from the image
            image_body = self.removeNoise(image_body)
            #Detect blobs in image
            body_blobs = self.detectBlobs(image_body, image_body_depth)

            #Remove small blobs
            body_blobs = self.removeBlobsFromList(body_blobs, 'width', '<',
                                                  self.__min_width_body)

            if depth >= 4000:
                body_blobs = self.removeBlobsFromList(body_blobs, 'width', '>',
                                                      150)
            else:
                body_blobs = self.removeBlobsFromList(body_blobs, 'width', '>',
                                                      self.__max_width_body)

            #If there are possible bodies detected
            if not body_blobs == None:
                #Make a copy of the original image
                #image = np.copy(image_temp)
                #Select a line for detecting legs
                image_legs = self.selectLine(image, self.__leg_line,
                                             self.__line_height)
                #Create an array with the depth values
                image_legs_depth = np.copy(image_legs)

                for body_blob in body_blobs:
                    #Minimum and maximum depth values for detecting legs
                    (min_depth, max_depth) = (body_blob['depth'] -
                                              self.__search_depth_leg,
                                              body_blob['depth'] +
                                              self.__search_depth_leg)
                    #Minimum and maximum position of legs
                    (min_leg_pos,
                     max_leg_pos) = (body_blob['x'] - 50,
                                     body_blob['x'] + body_blob['width'] + 50)
                    #Filter on depth
                    image_legs = self.filterDepth(image_legs, min_depth,
                                                  max_depth)
                    #Remove noise
                    image_legs = self.removeNoise(image_legs)

                    #Detect blobs in image
                    leg_blobs = self.detectBlobs(image_legs, image_legs_depth)

                    leg_blobs = self.removeBlobsFromList(
                        leg_blobs, 'x', '<', min_leg_pos)
                    leg_blobs = self.removeBlobsFromList(
                        leg_blobs, 'x', '>', max_leg_pos)
                    leg_blobs = self.removeBlobsFromList(
                        leg_blobs, 'width', '<', self.__min_width_leg)
                    leg_blobs = self.removeBlobsFromList(
                        leg_blobs, 'width', '>', self.__max_width_leg)

                    if self.detectOperator(body_blob, leg_blobs):
                        Operator = {
                            'x': (body_blob['x'] + (0.5 * body_blob['width'])),
                            'depth': body_blob['depth'],
                            'width': body_blob['width'],
                            'time': time.time()
                        }
                        Match = False

                        #Convert x, y values to real-world x, y values (in meters)
                        depth_dist = body_blob['depth'] / 1000

                        #Angle difference per pixel
                        hori_pp = self.__hori_fov / self.__hori_res
                        vert_pp = self.__vert_fov / self.__vert_res

                        #Locate x
                        x_from_center = self.__hori_res / 2.0 - body_blob['x']
                        x_alpha = (x_from_center * hori_pp) / 180.0 * 3.14195
                        x_actual = depth_dist * math.sin(x_alpha)

                        #Locate y
                        y_from_center = self.__vert_res / 2.0 - self.__body_line
                        y_alpha = y_from_center * vert_pp / 180.0 * 3.14195
                        y_actual = depth_dist * math.sin(y_alpha)

                        #Transform to base_link
                        Operator[
                            'y_actual'] = x_actual + self.camera_to_base_transform[
                                0][1]
                        Operator[
                            'z_actual'] = y_actual + self.camera_to_base_transform[
                                0][2]
                        Operator['x_actual'] = math.sqrt(
                            Operator['y_actual'] * Operator['y_actual'] +
                            depth_dist * depth_dist)

                        for i in range(0, len(Operators)):
                            if abs(Operators[i]['x'] -
                                   (body_blob['x'] +
                                    (0.5 * body_blob['width']))) < 30:
                                Match = True

                        if not Match:
                            Operators.append(Operator)

            depth += self.__depth_step

        #image_temp[self.__body_line:(self.__body_line + self.__line_height),0:640] = 10000
        #image_temp[self.__leg_line:(self.__leg_line + self.__line_height),0:640] = 10000
        #cv2.imshow("Human detector 1.0", image_temp)
        #cv2.waitKey(3)

        self.Operators = Operators
Ejemplo n.º 52
0
class anaglyph:
    def __init__(self):
        self.pub_img = rospy.Publisher('anaglyph_img', Image)
        self.sub_depth = rospy.Subscriber('/kinect/depth/image_raw',
                                          Image,
                                          self.subscribe_callback,
                                          queue_size=1)
        self.sub_rgb = rospy.Subscriber('/kinect/rgb/image_raw',
                                        Image,
                                        self.subscribe_callback,
                                        queue_size=1)

        self.bridge = CvBridge()

        self.img_anaglyph = np.zeros((480, 640, 3), dtype=np.uint8)
        self.img_rgb = np.zeros((480, 640, 3), dtype=np.uint8)

        self.depth = np.zeros((480, 640), dtype=np.uint8)
        self.img_depth = np.zeros((480, 640), dtype=np.uint8)
        #		self.r_offset  = np.zeros((480, 640), dtype=np.uint8)
        #		self.g_offset  = np.zeros((480, 640), dtype=np.uint8)
        #		self.b_offset  = np.zeros((480, 640), dtype=np.uint8)

        # Store offsets for green/magenta channels in one matrix
        self.offset_indices = np.zeros((480, 640, 2), dtype=np.uint16)

        self.index_basis_x = np.zeros((480, 640), dtype=np.uint16)
        self.index_basis_y = np.zeros((480, 640), dtype=np.uint16)

        # Store index base for x and y dimensions -- used to vectorize anaglyph operations
        for i in range(0, 640):
            self.index_basis_y[:, i] = i

        for i in range(0, 480):
            self.index_basis_x[i, :] = i

        self.processing = 0

        self.current_depth_time = time.time()
        self.current_rgb_time = time.time()

        self.timer_1 = time.time()
        self.timer_2 = time.time()

        self.rotation = np.array([[0.999979, 0.006497, -0.000801],
                                  [-0.006498, 0.999978, -0.001054],
                                  [0.000794, 0.001059, 0.999999]])

        #		self.transform = np.array([[526.6, 3.752, -201.2, 1.604e+04],
        #					[-3.215, 526.6, -14.29, 2.429e+04],
        #					[0.000794, 0.001059, -0.05648, 584.5]])

        print 'Done initializing'

    def subscribe_callback(self, data):
        # Assign incoming Kinect images to depth/rgb images and process data

        #Block data if it is currently being processed
        if self.processing == 0:

            #Incoming data:
            #Depth
            if data.encoding == 'mono8':
                depth = np.asarray(self.bridge.imgmsg_to_cv(data))
                self.img_depth = np.asarray(self.bridge.imgmsg_to_cv(data))
                self.current_depth_time = time.time()
            #RGB
            elif data.encoding == 'rgb8':
                self.img_rgb = np.asarray(self.bridge.imgmsg_to_cv(data))
                self.current_rgb_time = time.time()

            #If depth+rgb are both updated generate new anaglyph and publish output
            if np.abs(self.current_depth_time - self.current_rgb_time) < 0.1:
                self.generate_anaglyph()
                #				self.pub_img.publish( self.bridge.cv_to_imgmsg(self.img_depth))
                self.pub_img.publish(
                    self.bridge.cv_to_imgmsg(self.img_anaglyph))

    def generate_anaglyph(self):
        self.processing = 1
        timer = time.time()

        #		mask = (self.img_depth > 0) & (self.img_depth < 255)
        mask = np.logical_and((self.img_depth > 0), (self.img_depth < 255))
        #		self.depth = (5 - (self.img_depth*mask/4))*mask
        mod_depth = 2.5
        max_depth = (self.img_depth * mask).max() / mod_depth
        self.depth = (max_depth - (self.img_depth / mod_depth)) * mask
        #		self.depth = np.maximum( (max_depth -(self.img_depth/mod_depth)) , 0) * mask

        #		print 'Min depth: ' + str(self.depth.min())
        #		print 'Max depth: ' + str(self.depth.max())
        #		print 'Min depth: ' + str((self.img_depth*mask).min())
        #		print 'Max depth: ' + str((self.img_depth*mask).max())

        # Find length to prevent indices from being higher than the image resolution
        max_length = self.img_rgb.shape[1] - 1

        #Find offset green/magenta indices for new image (based on depth)
        #		Green
        self.offset_indices[:, :, 0] = np.maximum(
            np.minimum(self.index_basis_y + self.depth, max_length), 0)
        #		Magenta
        self.offset_indices[:, :, 1] = np.maximum(
            np.minimum(self.index_basis_y - self.depth, max_length), 0)

        #Do lookup based on offset indices
        self.img_anaglyph[:, :, 0] = self.img_rgb[self.index_basis_x,
                                                  self.offset_indices[:, :, 1],
                                                  2]  #red
        self.img_anaglyph[:, :, 2] = self.img_rgb[self.index_basis_x,
                                                  self.offset_indices[:, :, 1],
                                                  0]  #blue
        self.img_anaglyph[:, :, 1] = self.img_rgb[self.index_basis_x,
                                                  self.offset_indices[:, :, 0],
                                                  1]  #green
        #		self.r_offset = self.img_rgb[self.index_basis_x, self.offset_indices[:,:,1], 2] #red
        #		self.b_offset = self.img_rgb[self.index_basis_x, self.offset_indices[:,:,1], 0] #blue
        #		self.g_offset = self.img_rgb[self.index_basis_x, self.offset_indices[:,:,0], 1] #green

        print 'Framerate: ' + str(int(1 / (time.time() - timer)))

        #		cv.MixChannels([self.r_offset, self.g_offset, self.b_offset], [self.img_anaglyph], [(0,0), (1,1), (2,2)])

        self.processing = 0
Ejemplo n.º 53
0
class kinectDepth:
    def __init__(self, targetFrame=None, visual=False, tfListener=None):
        # ROS publisher for data points
        self.publisher = rospy.Publisher('visualization_marker', Marker)
        # List of features we are tracking
        self.clusterPoints = None
        self.nonClusterPoints = None

        self.dbscan = DBSCAN(eps=0.12, min_samples=10)
        self.cloudTime = time.time()
        self.pointCloud = None
        self.visual = visual
        self.targetFrame = targetFrame
        self.updateNumber = 0

        self.points3D = None

        if tfListener is None:
            self.transformer = tf.TransformListener()
        else:
            self.transformer = tfListener

        self.bridge = CvBridge()
        self.imageData = None

        # RGB Camera
        self.rgbCameraFrame = None
        self.cameraWidth = None
        self.cameraHeight = None
        self.pinholeCamera = None

        # Gripper
        self.lGripperPosition = None
        self.lGripperRotation = None
        self.lGripperTransposeMatrix = None
        self.lGripX = None
        self.lGripY = None
        self.micLocation = None
        self.grips = []
        # Spoon
        self.spoonX = None
        self.spoonY = None
        self.spoon = None

        self.targetTrans = None
        self.targetRot = None
        self.gripperTrans = None
        self.gripperRot = None

        self.cloudSub = rospy.Subscriber('/head_mount_kinect/depth_registered/points', PointCloud2, self.cloudCallback)
        print 'Connected to Kinect depth'
        # self.imageSub = rospy.Subscriber('/head_mount_kinect/rgb_lowres/image', Image, self.imageCallback)
        # print 'Connected to Kinect image'
        self.cameraSub = rospy.Subscriber('/head_mount_kinect/depth_lowres/camera_info', CameraInfo, self.cameraRGBInfoCallback)
        print 'Connected to Kinect camera info'

    def getAllRecentPoints(self):
        # print 'Time between read calls:', time.time() - self.cloudTime
        # startTime = time.time()

        self.transformer.waitForTransform(self.targetFrame, self.rgbCameraFrame, rospy.Time(0), rospy.Duration(5))
        try:
            self.targetTrans, self.targetRot = self.transformer.lookupTransform(self.targetFrame, self.rgbCameraFrame, rospy.Time(0))
        except tf.ExtrapolationException:
            print 'TF Target Error!'
            pass

        self.transformer.waitForTransform('/l_gripper_tool_frame', self.targetFrame, rospy.Time(0), rospy.Duration(5))
        try:
            self.gripperTrans, self.gripperRot = self.transformer.lookupTransform('/l_gripper_tool_frame', self.targetFrame, rospy.Time(0))
        except tf.ExtrapolationException:
            print 'TF Gripper Error!'
            pass

        # print 'Read computation time:', time.time() - startTime
        # self.cloudTime = time.time()
        return self.points3D, self.imageData, self.micLocation, self.spoon, [self.targetTrans, self.targetRot], [self.gripperTrans, self.gripperRot]


        # self.transformer.waitForTransform(self.targetFrame, self.rgbCameraFrame, rospy.Time(0), rospy.Duration(5))
        # try:
        #     targetTrans, targetRot = self.transformer.lookupTransform(self.targetFrame, self.rgbCameraFrame, rospy.Time(0))
        #     self.transMatrix = np.dot(tf.transformations.translation_matrix(targetTrans), tf.transformations.quaternion_matrix(targetRot))
        #     # print transMatrix
        # except tf.ExtrapolationException:
        #     print 'TF Error!'
        #     pass
        # points = np.c_[self.points3D, np.ones(len(self.points3D))]
        # values = np.dot(self.transMatrix, points.T).T[:, :3]
        # return values, np.dot(self.transMatrix, np.array([self.micLocation[0], self.micLocation[1], self.micLocation[2], 1.0]))[:3].tolist(), \
        #        np.dot(self.transMatrix, np.array([self.spoon[0], self.spoon[1], self.spoon[2], 1.0]))[:3].tolist()

    def cancel(self):
        self.publisher.unregister()
        self.cloudSub.unregister()
        self.cameraSub.unregister()
        # self.imageSub.unregister()

    def imageCallback(self, data):
        if self.lGripX is None:
            return

        try:
            image = self.bridge.imgmsg_to_cv(data)
            image = np.asarray(image[:,:])
        except CvBridgeError, e:
            print e
            return

        # Crop imageGray to bounding box size
        lowX, highX, lowY, highY = self.boundingBox()
        self.imageData = image[lowY:highY, lowX:highX, :]
Ejemplo n.º 54
0
class DataCollector():
    '''
    @summary: Collects data for robot calibration.

    Subscribes to various topics needed (e.g. images, camera infos, joint angles) and
    provides a service. When service is called, a set of samples is recorded,
    processed (e.g. checkerboards are detected) and combined to a RobotMeasurement message
    which is published as /robot_measurement.
    '''
    def __init__(self):
        '''
        Set up subscribers, publishers and local storage
        '''
        rospy.init_node(NODE)
        print "==> %s started " % NODE

        checkerboard = rospy.get_param("~calibration_pattern")
        self.checkerboard_square_size = checkerboard["square_size"]
        self.checkerboard_pattern_size = (
            int(checkerboard["pattern_size"].split("x")[0]),
            int(checkerboard["pattern_size"].split("x")[1]))
        with open(rospy.get_param("~sensors_yaml"), 'r') as a:
            sensors_yaml = yaml.load(a.read())
        # self._get_transformation_links(sensors_yaml)
        self._create_transformation_callbacks(sensors_yaml)
        #self.listener = tf.TransformListener()

        # CvBridge
        self.bridge = CvBridge()

        # initialize private storage
        self._images = {}
        self._images_received = {}

        self.counter = 1

        #  init publisher / subscriber
        self._robot_measurement_pub = rospy.Publisher("/robot_measurement",
                                                      RobotMeasurement)

        # left camera
        rospy.Subscriber(
            rospy.get_param("~cameras")["reference"]["topic"], Image,
            self._callback_image,
            rospy.get_param("~cameras")["reference"]["name"])
        self._images[rospy.get_param("~cameras")["reference"]["name"]] = {}
        self._images_received[rospy.get_param("~cameras")["reference"]
                              ["name"]] = False
        for camera in rospy.get_param("~cameras")["further"]:
            rospy.Subscriber(camera["topic"], Image, self._callback_image,
                             camera["name"])
            self._images[camera["name"]] = {}
            self._images_received[camera["name"]] = True
        print "==> done with initialization"

    def _callback_image(self, image_raw, id):
        '''
        Callback function for left camera message filter
        '''
        # print "DEBUG: callback left"
        self._images[id]["image"] = image_raw
        # if self._left_received == False:
        # print "--> left sample received (this only prints once!)"
        self._images_received[id] = True

    def _create_transformation_callbacks(self, sensors_yaml):
        # kinematic chains

        self.transformations = {}
        self.transformations_received = {}
        for _chain in sensors_yaml["chains"]:
            rospy.Subscriber(_chain["topic"], JointTrajectoryControllerState,
                             self._callback_jointstate, _chain["chain_id"])
            self.transformations_received[_chain["chain_id"]] = False

    def _callback_jointstate(self, data, id):
        self.transformations[id] = data
        self.transformations[id].header.frame_id = id
        self.transformations_received[id] = True

    def run(self):
        '''
        Main method, starts service to provide capture functionality
        '''
        rospy.sleep(1)
        # Start service
        rospy.Service('/collect_data/capture', Capture, self._collect)
        rospy.loginfo(
            "service '/collect_data/capture' started, waiting for requests...")
        rospy.spin()

    def _collect(self, data):
        '''
        Executed on service call. Logs and calls _capture_and_pub
        '''
        rospy.loginfo("capturing sample %.2i" % self.counter)
        res = self._capture_and_pub("sample%.2i" % self.counter,
                                    CHECKERBOARD_NAME, CHECKERBOARD_CHAIN,
                                    self.checkerboard_pattern_size,
                                    self.checkerboard_square_size)
        self.counter += 1
        return CaptureResponse(res)

    def _capture_and_pub(self, sample_id, target_id, chain_id, pattern_size,
                         square_size):
        '''
        Main capturing function. Gets a set of recent messages for all needed topics.
        Processes messages and creates RobotMeasuerment message which is published.

        @param sample_id: Sample identifier (e.g. sample01)
        @type  sample_id: string

        @param target_id: Name of checkerboard (e.g. cb_9x6)
        @type  target_id: string

        @param chain_id: Name of dh chain to which checkerboard is attached (e.g. arm_chain)
        @type  chain_id: string

        @param pattern_size: Size of checkerboard pattern as defined by opencv (e.g. (9, 6))
        @type  pattern_size: tuple(x, y)

        @param square_size: Size of checkerboard sqaures (im m)
        @type  square_size: float
        '''
        # capture measurements

        # --------------------
        # create robot measurement msg and publish
        # -----------------
        robot_msg = RobotMeasurement()
        robot_msg.sample_id = sample_id
        robot_msg.target_id = target_id
        robot_msg.chain_id = chain_id

        # --------------------
        # receive images
        # -----------------
        for v in self._images_received:
            self._images_received[v] = False
        for v in self.transformations_received:
            self.transformations_received[v] = False
        print self._images_received

        start_time = rospy.Time.now()
        while (not all(self._images_received.values())
               or not all(self.transformations.values())):
            rospy.sleep(0.005)
            # print warning every 2 seconds if one of the messages is still
            # missing...
            if start_time + rospy.Duration(2.0) < rospy.Time.now():
                for name, v in self._images_received.iteritems():
                    if not v:
                        print "--> still waiting for sample from %s" % name
                for name, v in self._transformations_received.iteritems():
                    if not v:
                        print "--> still waiting for sample from %s" % name
            start_time = rospy.Time.now()
        print "got sample"
        #latest_left = self._left

        # set up checkerboard and checkerboard detector
        # ---------------------------------------------
        checkerboard = Checkerboard(pattern_size, square_size)
        checkerboard_detector = CheckerboardDetector(checkerboard)

        # detect cb
        # --------------
        for name, image in self._images.iteritems():
            image = image["image"]
            #print image.header
            cvImage = self.bridge.imgmsg_to_cv(image, "mono8")
            imagecv = cv2util.cvmat2np(cvImage)

            try:
                corners = checkerboard_detector.detect_image_points(
                    imagecv, is_grayscale=True)
            except:
                # cb not found
                rospy.logwarn("No calibration pattern found for: '%s'" % name)
                return False
            else:
                print "cb found: %s" % name
                img_points = []
                for (x, y) in corners.reshape(-1, 2):
                    img_points.append(ImagePoint(x, y))

            # create camera msg left
            # ----------------------
            cam_msg = CameraMeasurement()
            cam_msg.camera_id = name
            #print "crash before"
            cam_msg.header.stamp = image.header.stamp
            #print "crash after"
            #cam_msg.cam_info = image["camera_info"]
            cam_msg.image_points = img_points
            cam_msg.verbose = False
            robot_msg.M_cam.append(cam_msg)
            cam_msg.image = image

# print cam_msg.camera_id
#print cam_msg.header
# cam_msg.image_rect   = latest_left["image_rect"]
# cam_msg.features    = # Not implemented here

#----------------------
#DEBUG publish pic
#-----------------
#self._image_pub_left.publish(latest_left["image"])

#----------------------
# Fill robot_msg
#----------------------
        robot_msg.M_chain = self.transformations.values()

        self._robot_measurement_pub.publish(robot_msg)

        return True
Ejemplo n.º 55
0
class DataCollector():
    '''
    @summary: Collects data for robot calibration.
    
    Subscribes to various topics needed (e.g. images, camera infos, joint angles) and 
    provides a service. When service is called, a set of samples is recorded, 
    processed (e.g. checkerboards are detected) and combined to a RobotMeasurement message 
    which is published as /robot_measurement.
    '''
    def __init__(self):
        '''
        Set up subscribers, publishers and local storage
        '''
        rospy.init_node(NODE)
        print "==> %s started " % NODE

        # get joint names for arm
        if rospy.has_param("arm_controller/joint_names"):  # real hardware
            self.arm_joint_names = rospy.get_param(
                "arm_controller/joint_names")
        elif rospy.has_param("arm_controller/joints"):  # simulation
            self.arm_joint_names = rospy.get_param("arm_controller/joints")
        else:
            print "Could not get joint names for arm from parameter server. exiting..."
            exit(-1)

        # get joint names for torso
        if rospy.has_param("torso_controller/joint_names"):  # real hardware
            self.torso_joint_names = rospy.get_param(
                "torso_controller/joint_names")
        elif rospy.has_param("torso_controller/joints"):  # simulation
            self.torso_joint_names = rospy.get_param("torso_controller/joints")
        else:
            print "Could not get joint names for torso from parameter server. exiting..."
            exit(-1)

        # CvBridge
        self.bridge = CvBridge()

        # initialize private storage
        self._arm_joint_msg_received = False
        self._arm_joint_msg = None
        self._torso_joint_msg_received = False
        self._torso_joint_msg = None
        self._left = {}
        self._left_received = False
        self._right = {}
        self._right_received = False
        self._kinect_rgb = {}
        self._kinect_rgb_received = False
        self.counter = 1

        #  init publisher / subscriber
        self._robot_measurement_pub = rospy.Publisher("/robot_measurement",
                                                      RobotMeasurement)
        self._image_pub_left = rospy.Publisher("/robot_measurement_image_left",
                                               Image)  #DEBUG
        self._image_pub_right = rospy.Publisher(
            "/robot_measurement_image_right", Image)  #DEBUG
        self._image_pub_kinect_rgb = rospy.Publisher(
            "/robot_measurement_image_kinect_rgb", Image)  #DEBUG
        self._sub_joint_states = rospy.Subscriber("/joint_states", JointState,
                                                  self._callback_joints)

        # left camera
        self._sub_left_info = message_filters.Subscriber(
            "/stereo/left/camera_info", CameraInfo)
        self._sub_left_image_color = message_filters.Subscriber(
            "/stereo/left/image_rect_color", Image)
        self._sub_left_image_rect = message_filters.Subscriber(
            "/stereo/left/image_rect", Image)
        self._sub_left = message_filters.TimeSynchronizer([
            self._sub_left_info, self._sub_left_image_color,
            self._sub_left_image_rect
        ], 15)
        self._sub_left.registerCallback(self._callback_left)

        # right camera
        self._sub_right_info = message_filters.Subscriber(
            "/stereo/right/camera_info", CameraInfo)
        self._sub_right_image_color = message_filters.Subscriber(
            "/stereo/right/image_rect_color", Image)
        self._sub_right_image_rect = message_filters.Subscriber(
            "/stereo/right/image_rect", Image)
        self._sub_right = message_filters.TimeSynchronizer([
            self._sub_right_info, self._sub_right_image_color,
            self._sub_right_image_rect
        ], 15)
        self._sub_right.registerCallback(self._callback_right)

        # kinect rgb
        self._sub_kinect_rgb_info = message_filters.Subscriber(
            "/cam3d/rgb/camera_info", CameraInfo)
        self._sub_kinect_rgb_image_color = message_filters.Subscriber(
            "/cam3d/rgb/image_color", Image)
        self._sub_kinect_rgb = message_filters.TimeSynchronizer(
            [self._sub_kinect_rgb_info, self._sub_kinect_rgb_image_color], 15)
        self._sub_kinect_rgb.registerCallback(self._callback_kinect_rgb)
        print "==> done with initialization"

    def _callback_left(self, camera_info, image_color, image_rect):
        '''
        Callback function for left camera message filter
        '''
        #print "DEBUG: callback left"
        self._left["camera_info"] = camera_info
        self._left["image_color"] = image_color
        self._left["image_rect"] = image_rect
        #        if self._left_received == False:
        #            print "--> left sample received (this only prints once!)"
        self._left_received = True

    def _callback_right(self, camera_info, image_color, image_rect):
        '''
        Callback function for right camera message filter
        '''
        #print "DEBUG: callback right"
        self._right["camera_info"] = camera_info
        self._right["image_color"] = image_color
        self._right["image_rect"] = image_rect
        #        if self._right_received == False:
        #            print "--> right sample received (this only prints once!)"
        self._right_received = True

    def _callback_kinect_rgb(self, camera_info, image_color):
        '''
        Callback function for kinect rgb message filter
        '''
        #print "DEBUG: callback kinect_rgb"
        self._kinect_rgb["camera_info"] = camera_info
        self._kinect_rgb["image_color"] = image_color
        #        if self._kinect_rgb_received == False:
        #            print "--> kinect sample received (this only prints once!)"
        self._kinect_rgb_received = True

    def _callback_joints(self, msg):
        '''
        Callback function for joint angles messages
        '''
        #print "DEBUG: callback joints"

        # torso
        if self.torso_joint_names[0] in msg.name:
            pos = []
            header = msg.header
            for name in self.torso_joint_names:
                pos.append(msg.position[msg.name.index(name)])

            # create JointState message
            joint_msg = JointState()
            joint_msg.header = msg.header
            joint_msg.name = self.torso_joint_names
            joint_msg.position = pos

            # safe joint state msg
            self._torso_joint_msg = joint_msg
            #           if self._torso_joint_msg_received == False:
            #               print "--> torso joint state received (this only prints once!)"
            self._torso_joint_msg_received = True

        # arm
        if self.arm_joint_names[0] in msg.name:
            pos = []
            header = msg.header
            for name in self.arm_joint_names:
                pos.append(msg.position[msg.name.index(name)])

            # create JointState message
            joint_msg = JointState()
            joint_msg.header = msg.header
            joint_msg.name = self.arm_joint_names
            joint_msg.position = pos

            # safe joint state msg
            self._arm_joint_msg = joint_msg
            #           if self._arm_joint_msg_received == False:
            #               print "--> arm joint state received (this only prints once!)"
            self._arm_joint_msg_received = True

    def run(self):
        '''
        Main method, starts service to provide capture functionality
        '''
        rospy.sleep(1)

        # Start service
        srv = rospy.Service('/collect_data/capture', Capture, self._collect)
        rospy.loginfo(
            "service '/collect_data/capture' started, waiting for requests...")
        rospy.spin()

    def _collect(self, data):
        '''
        Executed on service call. Logs and calls _capture_and_pub
        '''
        rospy.loginfo("capturing sample %.2i" % self.counter)
        res = self._capture_and_pub("sample%.2i" % self.counter,
                                    CHECKERBOARD_NAME, CHECKERBOARD_CHAIN,
                                    CHECKERBOARD_PATTERN_SIZE,
                                    CHECKERBOARD_SQUARE_SIZE)
        self.counter += 1
        return CaptureResponse(res)

    def _capture_and_pub(self, sample_id, target_id, chain_id, pattern_size,
                         square_size):
        '''
        Main capturing function. Gets a set of recent messages for all needed topics.
        Processes messages and creates RobotMeasuerment message which is published.
        
        @param sample_id: Sample identifier (e.g. sample01)
        @type  sample_id: string
        
        @param target_id: Name of checkerboard (e.g. cb_9x6)
        @type  target_id: string
        
        @param chain_id: Name of dh chain to which checkerboard is attached (e.g. arm_chain)
        @type  chain_id: string
        
        @param pattern_size: Size of checkerboard pattern as defined by opencv (e.g. (9, 6))
        @type  pattern_size: tuple(x, y)
        
        @param square_size: Size of checkerboard sqaures (im m)
        @type  square_size: float
        '''
        # capture measurements
        # --------------------
        self._left_received = False
        self._right_received = False
        self._kinect_rgb_received = False
        start_time = rospy.Time.now()
        while (not self._left_received or not self._right_received
               or not self._kinect_rgb_received):
            rospy.sleep(0.005)
            # print warning every 2 seconds if one of the messages is still missing...
            if start_time + rospy.Duration(2.0) < rospy.Time.now():
                if not self._left_received:
                    print "--> still waiting for sample from left"
                if not self._right_received:
                    print "--> still waiting for sample from right"
                if not self._kinect_rgb_received:
                    print "--> still waiting for sample from kinect"
                start_time = rospy.Time.now()
        latest_left = self._left
        latest_right = self._right
        latest_kinect_rgb = self._kinect_rgb

        self._torso_joint_msg_received = False
        self._arm_joint_msg_received = False
        start_time = rospy.Time.now()
        while (not self._torso_joint_msg_received
               or not self._arm_joint_msg_received):
            rospy.sleep(0.005)
            # print warning every 2 seconds if one of the messages is still missing...
            if start_time + rospy.Duration(2.0) < rospy.Time.now():
                if not self._torso_joint_msg_received:
                    print "--> still waiting for torso joint states"
                if not self._arm_joint_msg_received:
                    print "--> still waiting for srm joint states"
                start_time = rospy.Time.now()
        latest_torso = self._torso_joint_msg
        latest_arm = self._arm_joint_msg

        # set up checkerboard and checkerboard detector
        # ---------------------------------------------
        checkerboard = Checkerboard(pattern_size, square_size)
        checkerboard_detector = CheckerboardDetector(checkerboard)

        # detect cb left
        # --------------
        cvImage = self.bridge.imgmsg_to_cv(latest_left["image_rect"], "mono8")
        image = cv2util.cvmat2np(cvImage)

        corners = checkerboard_detector.detect_image_points(image,
                                                            is_grayscale=True)
        if corners != None:
            print "cb found: left"
            img_points_left = []
            for (x, y) in corners.reshape(-1, 2):
                img_points_left.append(ImagePoint(x, y))
        else:
            # cb not found
            return False

        # create camera msg left
        # ----------------------
        cam_msg_left = CameraMeasurement()
        cam_msg_left.camera_id = "left"
        cam_msg_left.header.stamp = latest_left["camera_info"].header.stamp
        cam_msg_left.cam_info = latest_left["camera_info"]
        cam_msg_left.image_points = img_points_left
        cam_msg_left.verbose = False
        #cam_ms_leftg.image        = latest_left["image_color"]
        #cam_msg_left.image_rect   = latest_left["image_rect"]
        #cam_msg_left.features    = # Not implemented here

        # detect cb right
        # --------------
        cvImage = self.bridge.imgmsg_to_cv(latest_right["image_rect"], "mono8")
        image = cv2util.cvmat2np(cvImage)

        corners = checkerboard_detector.detect_image_points(image,
                                                            is_grayscale=True)
        if corners != None:
            print "cb found: right"
            img_points_right = []
            for (x, y) in corners.reshape(-1, 2):
                img_points_right.append(ImagePoint(x, y))
        else:
            # cb not found
            return False

        # create camera msg right
        # -----------------------
        cam_msg_right = CameraMeasurement()
        cam_msg_right.camera_id = "right"
        cam_msg_right.header.stamp = latest_right["camera_info"].header.stamp
        cam_msg_right.cam_info = latest_right["camera_info"]
        cam_msg_right.image_points = img_points_right
        cam_msg_right.verbose = False
        #cam_msg_right.image        = latest_right["image_color"]
        #cam_msg_right.image_rect   = latest_right["image_rect"]
        #cam_msg_right.features    = # Not implemented here

        # detect cb kinect_rgb
        # --------------------
        cvImage = self.bridge.imgmsg_to_cv(latest_kinect_rgb["image_color"],
                                           "mono8")
        image = cv2util.cvmat2np(cvImage)

        corners = checkerboard_detector.detect_image_points(image,
                                                            is_grayscale=True)
        if corners != None:
            print "cb found: kinect_rgb"
            img_points_kinect_rgb = []
            for (x, y) in corners.reshape(-1, 2):
                img_points_kinect_rgb.append(ImagePoint(x, y))
        else:
            # cb not found
            return False

        # create camera msg kinect_rgb
        # ----------------------------
        cam_msg_kinect_rgb = CameraMeasurement()
        cam_msg_kinect_rgb.camera_id = "kinect_rgb"
        cam_msg_kinect_rgb.header.stamp = latest_kinect_rgb[
            "camera_info"].header.stamp
        cam_msg_kinect_rgb.cam_info = latest_kinect_rgb["camera_info"]
        cam_msg_kinect_rgb.image_points = img_points_kinect_rgb
        cam_msg_kinect_rgb.verbose = False
        #cam_ms_kinect_rgbg.image        = latest_kinect_rgb["image_color"]
        #cam_msg_kinect_rgb.image_rect   = latest_kinect_rgb["image_rect"]
        #cam_msg_kinect_rgb.features    = # Not implemented here

        # create torso_chain msg
        # ----------------------
        torso_chain_msg = ChainMeasurement()
        torso_chain_msg.header = latest_torso.header
        torso_chain_msg.chain_id = "torso_chain"
        torso_chain_msg.chain_state = latest_torso

        # create arm_chain msg
        # --------------------
        arm_chain_msg = ChainMeasurement()
        arm_chain_msg.header = latest_arm.header
        arm_chain_msg.chain_id = "arm_chain"
        arm_chain_msg.chain_state = latest_arm

        # DEBUG publish pic
        # -----------------
        self._image_pub_left.publish(latest_left["image_color"])
        self._image_pub_right.publish(latest_right["image_color"])
        self._image_pub_kinect_rgb.publish(latest_kinect_rgb["image_color"])

        # create robot measurement msg and publish
        # -----------------
        robot_msg = RobotMeasurement()
        robot_msg.sample_id = sample_id
        robot_msg.target_id = target_id
        robot_msg.chain_id = chain_id
        robot_msg.M_cam = [cam_msg_left, cam_msg_right, cam_msg_kinect_rgb]
        robot_msg.M_chain = [torso_chain_msg, arm_chain_msg]
        self._robot_measurement_pub.publish(robot_msg)

        return True
Ejemplo n.º 56
0
    bag_file_name = arguments[0]
    if arguments_size > 1:
        topic_name = arguments[1]
    if arguments_size > 2:
        directory = arguments[2]

    # Open the bag file:
    bag = rosbag.Bag(bag_file_name)
    bridge = CvBridge()

    # Sweep through messages that match *topic_name*:
    index = 0
    previous_time = None
    for topic, msg, t in bag.read_messages(topic_name):
        # Extract *cv_image* in RGB8 mode:
        index += 1
        try:
            cv_image = bridge.imgmsg_to_cv(msg, desired_encoding="rgb8")
        except CvBridgeError, e:
            print e

        # Save the image
        cv.SaveImage("{0}/Image{1:03d}.pnm".format(directory, index), cv_image)

        # Print out time changes:
        if previous_time == None:
            previous_time = t
        dt = t - previous_time
        previous_time = t
        print("[{0}]:{1}".format(index, dt))
Ejemplo n.º 57
0
class cvBridgeDemo():
    def __init__(self):
        self.node_name = "cv_bridge_demo"
        
        rospy.init_node(self.node_name)
        
        # What we do during shutdown
        rospy.on_shutdown(self.cleanup)
        
        # Create the OpenCV display window for the RGB image
        self.cv_window_name = self.node_name
        cv.NamedWindow(self.cv_window_name, cv.CV_WINDOW_NORMAL)
        cv.MoveWindow(self.cv_window_name, 25, 75)
        
        # And one for the depth image

        
        self.target_visible = False
        self.tl = tf.TransformListener()

        self.xt = -1
        self.yt = -1
        self.w = -1
        self.i = -1
        self.j = -1
        self.k = -1
        self.yaw = -1;
        self.roll= -1;
        self.pitch = -1;
        self.MarkId = -1;
        # Create the cv_bridge object
        self.bridge = CvBridge()
        
        # Subscribe to the camera image and depth topics and set
        # the appropriate callbacks
        self.image_sub = rospy.Subscriber("/camera/rgb/image_color", Image, self.image_callback)
       
        self.ar_sub = rospy.Subscriber('ar_pose_marker', AlvarMarkers, self.doSomething)

        self.is_visible = False;

        rospy.loginfo("Waiting for image topics...")


    def doSomething(self, msg):
    	"""
        		Try
      	"""
        try:

            rospy.loginfo("trying")
            rospy.loginfo("tags detected: %d", len(msg.markers))

            for tag in (msg.markers):
            	rospy.loginfo("%f, %f, %f",tag.pose.pose.position.x, tag.pose.pose.position.y, tag.pose.pose.position.z);
            	
            marker = msg.markers[0]
            #rospy.loginfo(marker.id)
            if not self.is_visible:
                rospy.loginfo("marker detected!!")
                self.is_visible = True;
                
                tX = marker.pose.pose.position.x
                tY = marker.pose.pose.position.y

                if sqrt((self.xt - tX)**2 + (self.yt - tY)**2) < 0.05:
                	rospy.loginfo("distance from prev: %f", sqrt((self.xt - tX)**2 + (self.yt - tY)**2))
                	#preventing from relocalising the target position
                	return
                	
                self.xt = marker.pose.pose.position.x
                self.yt = marker.pose.pose.position.y

                w = marker.pose.pose.orientation.w
                i = marker.pose.pose.orientation.x
                j = marker.pose.pose.orientation.y
                k = marker.pose.pose.orientation.z
                self.yaw = atan2(2*j*w - 2*i*k, 1 - 2*j*j - 2*k*k)*180/pi
                self.pitch = asin(2*i*j + 2*k*w)*180/pi
                self.roll = atan2(2*i*w - 2*j*k, 1 - 2*i*i - 2*k*k)*180/pi
                rospy.loginfo("position @ (%f, %f)", self.xt, self.yt)
                rospy.loginfo("yaw, pitch, roll @ (%f, %f, %f)", self.yaw, self.pitch, self.roll)

                if self.yaw < 110 and self.yaw > 70 and self.roll > -10 and self.roll < 10:
                	dx = 0.5; dy = self.yt; 
                	# q = Quaternion(0, 0, 0.7071, 0.7071)
                	rospy.loginfo("!!tag faceing along x-axis!!")
                elif self.yaw < -70 and self.yaw > -110 and self.roll > -10 and self.roll < 10:
                	dx = -0.5; dy = self.yt; 
                	# q = Quaternion(0, 0, 0.7071, 0.7071)
                	rospy.loginfo("!!tag faceing against x-axis!!")
                elif self.yaw < 110 and self.yaw > 70 and self.roll < -70 and self.roll > -110:
                	dx = self.xt; dy = 0.5; 
                	# q = Quaternion(0, 0, 0.7071, 0.7071)
                	rospy.loginfo("!!tag faceing along y-axis!!")
                elif self.yaw < 110 and self.yaw > 70 and self.roll < 110 and self.roll > 70:
                	dx = self.xt; dy = -0.5; 
                	# q = Quaternion(0, 0, 0.7071, 0.7071)
                	rospy.loginfo("!!tag faceing against y-axis!!")

                self.MarkId = marker.id	

                if marker.id == 3:
                    """
                        try rotating according to the tag
                    """
                    
            
            
        except:
            if self.is_visible:
                rospy.loginfo("marker not confirmed!!")
            self.is_visible = False

        return
   			



        

    def image_callback(self, ros_image):
        # Use cv_bridge() to convert the ROS image to OpenCV format
        try:
            frame = self.bridge.imgmsg_to_cv(ros_image, "bgr8")
        except CvBridgeError, e:
            print e
        
        # Convert the image to a Numpy array since most cv2 functions
        # require Numpy arrays.
        frame = np.array(frame, dtype=np.uint8)
        
        # Process the frame using the process_image() function
        frame = self.process_image(frame)
        font_face = cv2. FONT_HERSHEY_PLAIN
        font_scale = 1
        display_image = frame.copy()
        cv2.putText(display_image, "Marker@:" , (10,20),font_face, font_scale, cv.RGB(0, 0, 255) )    
        cv2.putText(display_image, "x: " + str(self.xt) , (10,40),font_face, font_scale, cv.RGB(0, 255, 0) )    
        cv2.putText(display_image, "y: " + str(self.yt), (10,54),font_face, font_scale, cv.RGB(0, 255, 0) )    
        cv2.putText(display_image, "Ort: " + str(self.yaw) + ", " + str(self.pitch) + ", " + str(self.roll), (10,68),font_face, font_scale, cv.RGB(0, 255, 0) )    
        cv2.putText(display_image, "iD: " + str(self.MarkId), (10,82),font_face, font_scale, cv.RGB(0, 255, 0) )    
           
        # cv2.putText(display_image, "FPS: " + str(self.fps), (10, 20), font_face, font_scale, cv.RGB(255, 255, 0))
        # cv2.putText(display_image, "Keyboard commands:", (20, int(self.frame_size[0] * 0.6)), font_face, font_scale, cv.RGB(255, 255, 0))
        # cv2.putText(display_image, " ", (20, int(self.Markx * 0.65)), font_face, font_scale, cv.RGB(255, 255, 0))
        # cv2.putText(display_image, "space - toggle pause/play", (20, int(self.frame_size[0] * 0.72)), font_face, font_scale, cv.RGB(255, 255, 0))
        # cv2.putText(display_image, "     r - restart video from beginning", (20, int(self.frame_size[0] * 0.79)), font_face, font_scale, cv.RGB(255, 255, 0))
        # cv2.putText(display_image, "     t - hide/show this text", (20, int(self.frame_size[0] * 0.86)), font_face, font_scale, cv.RGB(255, 255, 0))
        # cv2.putText(display_image, "     q - quit the program", (20, int(self.frame_size[0] * 0.93)), font_face, font_scale, cv.RGB(255, 255, 0))
            
            # Merge the original image and the display image (text overlay)
        display_image = cv2.bitwise_or(frame, display_image)

                       
        # Display the image.
        cv2.imshow(self.node_name, display_image)
        
        # Process any keyboard commands
        self.keystroke = cv.WaitKey(5)
        if 32 <= self.keystroke and self.keystroke < 128:
            cc = chr(self.keystroke).lower()
            if cc == 'q':
                # The user has press the q key, so exit
                rospy.signal_shutdown("User hit q key to quit.")
Ejemplo n.º 58
0
class video_processor:
    def __init__(self):
        self.sub = rospy.Subscriber('usb_cam/image_raw', Image, self.callback)
        self.pub = rospy.Publisher('heading', Twist)
        self.speed = float(1)
        self.bridge = CvBridge()
        cv.NamedWindow("Input Video")
        #cv.NamedWindow("Blur Video")
        #cv.NamedWindow("HSV Video")
        #cv.NamedWindow("Hue Video")
        #cv.NamedWindow("Saturation Video")
        #cv.NamedWindow("Value Video")
        cv.NamedWindow("Red-Orange Video")
        cv.NamedWindow("White Video")
        cv.NamedWindow("Red-Orange and White Video")
        #cv.WaitKey(0)

    def callback(self, image_in):
        try:
            input_image = self.bridge.imgmsg_to_cv(image_in,"bgr8")
        except CvBridgeError, e:
            print e
        cv.ShowImage("Input Video", input_image)

        blur_image = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC3)
        cv.Smooth(input_image,blur_image,cv.CV_BLUR, 10, 10)
        #cv.ShowImage("Blur Video", proc_image)
        proc_image = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC3)
        cv.CvtColor(blur_image, proc_image, cv.CV_BGR2HSV)
        #cv.ShowImage("HSV Video", proc_image)
        split_image = [cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1),cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1),cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)]
        cv.Split(proc_image, split_image[0],split_image[1],split_image[2], None )
        #hue = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        #sat = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        #val = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        #cv.Split(proc_image, hue,sat,val, None )
        #cv.ShowImage("Hue Video", hue)
        #cv.ShowImage("Saturation Video", sat)
        #cv.ShowImage("Value Video", val)

        thresh_0 = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        thresh_1 = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        thresh_2 = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        red_orange = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        cv.Threshold(split_image[1],thresh_0, 128,255,cv.CV_THRESH_BINARY) # > 50% saturation
        cv.Threshold(split_image[0],thresh_1, 220,255,cv.CV_THRESH_BINARY) # > Purple
        cv.Threshold(split_image[0],thresh_2, 10, 255,cv.CV_THRESH_BINARY_INV) # < Yellow-Orange
        cv.Add(thresh_1,thresh_2,red_orange)
        cv.And(red_orange,thresh_0,red_orange)
        cv.ShowImage("Red-Orange Video",red_orange)

        cv.CvtColor(blur_image, proc_image, cv.CV_BGR2HLS)
        cv.Split(proc_image, split_image[0], split_image[1],split_image[2], None )
        cv.Threshold(split_image[1],thresh_0, 204,255,cv.CV_THRESH_BINARY) # > 80% Lum
        cv.ShowImage("White Video",thresh_0)
        cv.Or(red_orange, thresh_0, thresh_0)
        cv.ShowImage("Red-Orange and White Video",thresh_0)
        cv.WaitKey(30)

        ang_z = 0
        x = 0
        for i in range(input_image.rows):
            y = -(input_image.cols / 2)
            row = cv.GetRow(thresh_0,i)
            for j in row.tostring():
                ang_z = ang_z + (x * y *ord(j))
                y = y + 1
            x = x + 1
        ang_z = (ang_z * pi * 2 * 2 * 4 / 255 / input_image.rows / input_image.rows / input_image.cols / input_image.cols)
        p = Twist()
        p.linear.x = self.speed
        p.angular.z = ang_z
        self.pub.publish(p)
Ejemplo n.º 59
0
class HistAnalyzer:
    def __init__(self,
                 background_noise,
                 mask,
                 topic='/playpen_kinect/rgb/image_color'):
        rospy.Subscriber(topic, Image, self.get_img)
        self.check = rospy.Service("playpen_check_success_hist", Check,
                                   self.serv_success)
        self.train_empty = rospy.Service("playpen_train_hist", Train,
                                         self.serv_train)
        self.background_noise = background_noise
        self.h_bins = 30
        self.s_bins = 32
        self.h_ranges = [0, 180]
        self.s_ranges = [0, 255]
        self.ranges = [self.h_ranges, self.s_ranges]
        self.hist = None
        self.mask = mask
        self.avg_noise = None
        self.online_img = None
        self.bridge = CvBridge()
        size = cv.GetSize(background_noise[0])
        self.IavgF = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.IdiffF = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.IprevF = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.IhiF = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.IlowF = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.Ilow1 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.Ilow2 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.Ilow3 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.Ihi1 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.Ihi2 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.Ihi3 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        cv.Zero(self.IavgF)
        cv.Zero(self.IdiffF)
        cv.Zero(self.IprevF)
        cv.Zero(self.IhiF)
        cv.Zero(self.IlowF)
        self.Icount = 0.00001
        self.Iscratch = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.Iscratch2 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.Igray1 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.Igray2 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.Igray3 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        self.Imaskt = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
        cv.Zero(self.Iscratch)
        cv.Zero(self.Iscratch2)
        self.first = 1

    def accumulateBackground(self, img):
        cv.CvtScale(img, self.Iscratch, 1, 0)
        if (not self.first):
            cv.Acc(self.Iscratch, self.IavgF)
            cv.AbsDiff(self.Iscratch, self.IprevF, self.Iscratch2)
            cv.Acc(self.Iscratch2, self.IdiffF)
            self.Icount += 1.0
        self.first = 0
        cv.Copy(self.Iscratch, self.IprevF)

    def setHighThresh(self, thresh):
        cv.ConvertScale(self.IdiffF, self.Iscratch, thresh)
        cv.Add(self.Iscratch, self.IavgF, self.IhiF)
        #cv.Split(self.IhiF, self.Ihi1, self.Ihi2, self.Ihi3, None)

    def setLowThresh(self, thresh):
        cv.ConvertScale(self.IdiffF, self.Iscratch, thresh)
        cv.Sub(self.IavgF, self.Iscratch, self.IlowF)
        #cv.Split(self.IlowF, self.Ilow1, self.Ilow2, self.Ilow3, None)

    def createModelsfromStats(self):
        cv.ConvertScale(self.IavgF, self.IavgF, float(1.0 / self.Icount))
        cv.ConvertScale(self.IdiffF, self.IdiffF, float(1.0 / self.Icount))

        cv.AddS(self.IdiffF, cv.Scalar(1.0, 1.0, 1.0), self.IdiffF)
        self.setHighThresh(200.0)
        self.setLowThresh(200.0)

    def backgroundDiff(self, img, Imask):
        print "got into backgroundDiff"
        cv.CvtScale(img, self.Iscratch, 1, 0)
        #cv.Split(self.Iscratch, self.Igray1, self.Igray2, self.Igray3, None)
        #cv.InRange(self.Igray1, self.Ilow1, self.Ihi1, Imask)
        cv.InRange(self.Iscratch, self.IlowF, self.IhiF, Imask)

        # cv.InRange(self.Igray2, self.Ilow2, self.Ihi2, self.Imaskt)
        # cv.Or(Imask, self.Imaskt, Imask)

        # cv.InRange(self.Igray3, self.Ilow3, self.Ihi3, self.Imaskt)
        # cv.Or(Imask, self.Imaskt, Imask)

        cv.SubRS(Imask, 255, Imask)
        #cv.SaveImage('/home/mkillpack/Desktop/mask.png', Imask)
        #cv.Erode(Imask, Imask)
        return Imask

    def get_img(self, msg):
        try:
            self.online_img = self.bridge.imgmsg_to_cv(msg, "bgr8")
        except CvBridgeError, e:
            print e
Ejemplo n.º 60
0
    #Come up with an output file root name
    outfileroot = "./" + os.path.basename(bagfile).split('.')[0]

    #Load the yaml output of calling rosbag info on the input file.
    fileInfo = yaml.load(
        subprocess.Popen(['rosbag', 'info', '--yaml', bagfile],
                         stdout=subprocess.PIPE).communicate()[0])

    #Create a cv bridge to convert the image
    bridge = CvBridge()

    #Read the messages
    bag = rosbag.Bag(bagfile)
    totalMotion = 0  #Start at zero
    startTime = None
    with open(outfileroot + "_cup.csv", 'w') as outfile:
        outfile.write("StartTime,Motion\n")
        for topic, msg, t in bag.read_messages(topics=['/gscam/image_raw']):
            if startTime is None:
                startTime = t
            try:
                cvImage = bridge.imgmsg_to_cv(msg, "bgr8")
            except CvBridgeError, e:
                print e
            timestamp = str((t - startTime).to_sec())
            location = getRedObject(cvImage, timestamp)
            #             motions = msg.move.states
            #             totalMotion += motions[1] #The Y axis, left or right on the arm
            outfile.write(timestamp + "," + str(location[0]) + "," +
                          str(location[1]) + "\n")