Exemplo n.º 1
0
def handle_pkt(pkt=None):
    ts_begin_loc = pkt.find(begin_timestamp_marker)
    ts_end_loc = pkt.find(end_timestamp_marker)

    if ts_begin_loc == -1 or ts_end_loc == -1:
        #something's fishy, discard jpeg
        print "JPEG discarded, malformed data"
        return 
        
    ts = float(pkt[ts_begin_loc+len(begin_timestamp_marker):ts_end_loc])
	
#    if ts > last_ts:
#	last_ts = ts
#    else:
#	return

    pkt = pkt[ts_end_loc+len(end_timestamp_marker):]

    print "{} bytes of JPEG recvd".format(len(pkt))
    msg = CompressedImage()
    msg.header.stamp  = rospy.Time(tango_clock_offset + float(ts))
    msg.header.frame_id = camera_name
    msg.data = pkt
    msg.format = 'jpeg'
    pub_camera.publish(msg)
Exemplo n.º 2
0
  def callback(self, ros_data):
    #### direct conversion to CV2 ####
    np_arr = np.fromstring(ros_data.data, np.uint8)
    image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
    
    #(rows,cols,channels) = image_np.shape
    #image_np = image_np[0:64, 0:480] 

    #equ = cv2.equalizeHist(image_np)
    #res = np.hstack((image_np,equ))

    #### Feature detectors using CV2 #### 
    # "","Grid","Pyramid" + 
    # "FAST","GFTT","HARRIS","MSER","ORB","SIFT","STAR","SURF", "GridFAST"
    #method = "GridFAST"
    #feat_det = cv2.FeatureDetector_create(method)
    #time1 = time.time()

    # convert np image to grayscale
    #featPoints = feat_det.detect( cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY))
    #time2 = time.time()
    #if VERBOSE :
    #  print '%s detector found: %s points in: %s sec.'%(method, len(featPoints),time2-time1)

    #for featpoint in featPoints:
    #  x,y = featpoint.pt
    #  cv2.circle(image_np,(int(x),int(y)), 3, (0,0,255), -1)

    #### Create CompressedIamge ####
    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
    # Publish new image
    self.image_pub.publish(msg)
Exemplo n.º 3
0
    def grabAndPublish(self,stream,publisher):
        # Grab image from stream
        stream.seek(0)
        img_data = stream.getvalue()

        if self.uncompress:
            # Publish raw image
            data = np.fromstring(img_data, dtype=np.uint8)
            image = cv2.imdecode(data, 1)
            image_msg = self.bridge.cv2_to_imgmsg(image)
        else:
            # Publish compressed image only
            image_msg = CompressedImage()
            image_msg.data = img_data
            image_msg.format = "jpeg"
            
        image_msg.header.stamp = rospy.Time.now()
        # Publish 
        publisher.publish(image_msg)
        # Clear stream
        stream.seek(0)
        stream.truncate()

        if not self.has_published:
            rospy.loginfo("[%s] Published the first image." %(self.node_name))
            self.has_published = True
Exemplo n.º 4
0
    def __init__(self):
        # Get the ~private namespace parameters from command line or launch file.
        self.UDP_IP = rospy.get_param('~UDP_IP', '192.168.50.37') #get_ip.get_local()
        #if self.UDP_IP != 'localhost':
        #    self.UDP_IP = int(self.UDP_IP)
        self.UDP_PORT = int(rospy.get_param('~UDP_PORT', '49155'))

        # Create a dynamic reconfigure server.
        #self.server = DynamicReconfigureServer(ConfigType, self.reconfigure)
        
        # create ros::Publisher to send Odometry messages
        cam_pub = rospy.Publisher('image_raw/compressed', CompressedImage)

        #crappy test
        t = True
        while not rospy.is_shutdown():
            #crappy test continued
            if t:
                print "Camera Running"
            t = False

            udp_string = self.receive_packet()
            jpg_string = self.parse_string(udp_string)

            cam_msg = CompressedImage()
            cam_msg.header.stamp = rospy.Time.now() #YOYOYOYO - should be from robot time
            cam_msg.format = "jpeg"
            cam_msg.data = jpg_string

            cam_pub.publish(cam_msg)
Exemplo n.º 5
0
def grabFrame(event):
   msg = CompressedImage()
   msg.header.frame_id = frame_id
   msg.format = "jpeg"
   resp, msg.data = http.request("http://" + host + request)
   msg.header.stamp = rospy.Time.now()
   pub.publish(msg)
Exemplo n.º 6
0
def ros_compressed_from_numpygray(np_arr):
    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    np_arr = 255*(1.0*np_arr/np.max(np_arr))
    msg.data = np.array(cv2.imencode('.jpg', np.array(np_arr, dtype = np.uint8))[1]).tostring()
    return msg
Exemplo n.º 7
0
  def stream(self):
    url = 'http://%s:%s@%s/mjpg/video.mjpg' % (self.axis.username, self.axis.password, self.axis.hostname)
    #url = url + "?resolution=%dx%d" % (self.axis.width, self.axis.height)
    fp = urllib.urlopen(url)

    while True:
      boundary = fp.readline()

      header = {}
      while 1:
        line = fp.readline()
        if line == "\r\n": break
        line = line.strip()

        parts = line.split(": ", 1)
        header[parts[0]] = parts[1]

      content_length = int(header['Content-Length'])

      img = fp.read(content_length)
      line = fp.readline()
      
      msg = CompressedImage()
      msg.header.stamp = rospy.Time.now()
      msg.header.frame_id = self.axis_frame_id
      msg.format = "jpeg"
      msg.data = img

      self.axis.pub.publish(msg)

      """
    def grabAndPublish(self,stream,publisher):
        while not self.update_framerate and not self.is_shutdown and not rospy.is_shutdown(): 
            yield stream
            # Construct image_msg
            # Grab image from stream
            stamp = rospy.Time.now()
            stream.seek(0)
            stream_data = stream.getvalue()
            # Generate compressed image
            image_msg = CompressedImage()
            image_msg.format = "jpeg"
            image_msg.data = stream_data

            image_msg.header.stamp = stamp
            image_msg.header.frame_id = self.frame_id
            publisher.publish(image_msg)
                        
            # Clear stream
            stream.seek(0)
            stream.truncate()
            
            if not self.has_published:
                rospy.loginfo("[%s] Published the first image." %(self.node_name))
                self.has_published = True

            rospy.sleep(rospy.Duration.from_sec(0.001))
Exemplo n.º 9
0
 def publish(self, raw_img):
     msg = CompressedImage()
     msg.header.stamp = rospy.Time.now()
     msg.format = "jpeg"
     msg.data = np.array(cv2.imencode(".jpg", raw_img)[1]).tostring()
     # TODO compile sensor_msg error , use String instead
     self.image_pub.publish(msg.data)
 def sendTestImage(self):
     img_name = '0'+str(self.sent_image_count)+'_orig.'+self.test_image_format
     test_img = cv2.imread(self.test_image_path+img_name)
     msg = CompressedImage()
     msg.header.stamp = rospy.Time.now()
     msg.format = self.test_image_format
     msg.data = np.array(cv2.imencode('.'+self.test_image_format, test_img)[1]).tostring()
     self.pub_test_image.publish(msg)
    def pubOriginal(self, msg):
        compressed_img_msg = CompressedImage()
        compressed_img_msg.format = "png"
        compressed_img_msg.data = np.array(cv2.imencode(".png", self.testImageOrig)[1]).tostring()

        # compressed_img_msg.header.stamp = msg.header.stamp
        # compressed_img_msg.header.frame_id = msg.header.frame_id
        self.pub_raw.publish(compressed_img_msg)
 def pubImage(self):
     filename = '0'+str(self.num_sent)+'_orig.png'
     image = cv2.imread(self.im_dir+filename)
     msg = CompressedImage()
     msg.header.stamp = rospy.Time.now()
     msg.format = 'png'
     msg.data = np.array(cv2.imencode('.png', image)[1]).tostring()
     self.pub_fake_images.publish(msg)
     rospy.loginfo("[%s] Image %s published" %(self.node_name,filename))
Exemplo n.º 13
0
def publish_image(pub, image_handle):
    image = CompressedImage()
    image.data = image_handle.read()
    image.format = 'jpeg'
    image.header.seq = 0
    image.header.stamp.secs = math.floor(time.time())
    image.header.stamp.nsecs = 0
    image.header.frame_id = "0"
    pub.publish(image)
Exemplo n.º 14
0
	def stream(self):
		"""
			Reads and process the streams from the camera	
		"""		 
		try:
			#If flag self.enable_auth is 'True' then use the user/password to access the camera. Otherwise use only self.url
			if self.enable_auth:
				req = urllib2.Request(self.url, None, {"Authorization": self.auth })
			else:
				req = urllib2.Request(self.url)
				
			fp = urllib2.urlopen(req, timeout = self.timeout)
			#fp = urllib2.urlopen(req)

			while self.run_camera and not rospy.is_shutdown():
				
				boundary = fp.readline()
				
				header = {}
				
				while not rospy.is_shutdown():
					line = fp.readline()
					#print 'read line %s'%line
					if line == "\r\n": break
					line = line.strip()
					#print line
					parts = line.split(": ", 1)
					header[parts[0]] = parts[1]

				content_length = int(header['Content-Length'])
				#print 'Length = %d'%content_length
				img = fp.read(content_length)
				line = fp.readline()
				
				msg = CompressedImage()
				msg.header.stamp = rospy.Time.now()
				msg.header.frame_id = self.axis_frame_id
				msg.format = "jpeg"
				msg.data = img
				# publish image
				self.compressed_image_publisher.publish(msg)
				
				cimsg = self.cinfo.getCameraInfo()
				cimsg.header.stamp = msg.header.stamp
				cimsg.header.frame_id = self.axis_frame_id
				# publish camera info
				self.caminfo_publisher.publish(cimsg)
				
				#print self.real_fps
				self.last_update = datetime.datetime.now()
				self.error_reading = False
				self.image_pub_freq.tick()
						
		except urllib2.URLError,e:
			self.error_reading = True
			self.error_reading_msg = e
			rospy.logerr('Axis:stream: Camera %s (%s:%d). Error: %s'%(self.camera_id, self.hostname, self.camera_number, e))
Exemplo n.º 15
0
 def cv2_to_compressed_ros(self, img):
     try:
         msg = CompressedImage()
         msg.header.stamp = rospy.Time.now()
         msg.format = "jpeg"
         msg.data = np.array(cv2.imencode('.jpg', img)[1]).tostring()
         return msg
     except CvBridgeError as e:
         rospy.logerr(e)
Exemplo n.º 16
0
	def pubOrig(self, args=None):
		image_msg_out = CompressedImage()
		image_msg_out.header.stamp = rospy.Time.now()
		image_msg_out.format = "png"
		image_msg_out.data = np.array(cv2.imencode('.png',
				self.original_image)[1]).tostring()

		self.pub_image.publish(image_msg_out)
		rospy.loginfo("Publishing original image")
Exemplo n.º 17
0
    def processImage(self, image_msg):
        image_cv = self.bridge.imgmsg_to_cv2(image_msg)

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_cv)[1]).tostring()
        self.pub_image_comp.publish(msg)
Exemplo n.º 18
0
  def stream(self):
    url = 'http://%s/mjpg/video.mjpg' % self.axis.hostname
    url = url + "?fps=0&resolultion=%dx%d" % (self.axis.width, self.axis.height)

    rospy.logdebug('opening ' + str(self.axis))

    # only try to authenticate if user/pass configured
    if self.axis.password != '' and self.axis.username != '':
      # create a password manager
      password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()

      # Add the username and password, use default realm.
      top_level_url = "http://" + self.axis.hostname
      password_mgr.add_password(None, top_level_url,
                                self.axis.username,
                                self.axis.password)
      handler = urllib2.HTTPBasicAuthHandler(password_mgr)

      # create "opener" (OpenerDirector instance)
      opener = urllib2.build_opener(handler)

      # ...and install it globally so it can be used with urlopen.
      urllib2.install_opener(opener)

    fp = urllib2.urlopen(url)

    while True:
      boundary = fp.readline()

      header = {}
      while 1:
        line = fp.readline()
        if line == "\r\n": break
        line = line.strip()

        parts = line.split(": ", 1)
        header[parts[0]] = parts[1]

      content_length = int(header['Content-Length'])

      img = fp.read(content_length)
      line = fp.readline()
      
      msg = CompressedImage()
      msg.header.stamp = rospy.Time.now()
      msg.header.frame_id = self.axis.frame_id
      msg.format = "jpeg"
      msg.data = img

      self.axis.pub.publish(msg)

      cimsg = self.axis.cinfo.getCameraInfo()
      cimsg.header.stamp = msg.header.stamp
      cimsg.header.frame_id = self.axis.frame_id
      cimsg.width = self.axis.width
      cimsg.height = self.axis.height
      self.axis.caminfo_pub.publish(cimsg)
Exemplo n.º 19
0
	def flipImageNP(self,msg):
		np_array = np.fromstring(msg.data, np.uint8)
		image_np = cv2.imdecode(np_array, cv2.CV_LOAD_IMAGE_COLOR)		
		#flip_arr = np.fliplr(np_arr)
		imageflip_np=cv2.flip(image_np,1)
		flip_im = CompressedImage()
		flip_im.data = np.array(cv2.imencode('.jpg', imageflip_np)[1]).tostring()
		#flip_im.data = flip_arr.tostring()
		flip_im.header.stamp = msg.header.stamp
		self.pub_image.publish(flip_im)
Exemplo n.º 20
0
  def callback(self,ros_data):
    np_arr = np.fromstring(ros_data.data, np.uint8)
    image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
    image_np = cv2.flip(image_np, self.flip_code)

    #### Create CompressedIamge ####
    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
    self.image_pub.publish(msg)
Exemplo n.º 21
0
    def receive_image(self):   
        """
        Retrives image data coming from the rover and then
        transforms it to numpy and string data for further
        manupulation with OpenCV and Kivy and returns the final
        data
        """
        data = ''
        ldata = array.array('c')
        image_buffer = CompressedImage()
        image_buffer.format = "jpeg"
#        start = ''
        found_start = False
        found_end = False
        start_pos = 0 #current position in ldata
        #position of the end of frame in the current array of data
        end_pos = 0
        while (not found_end):
            data = self.video_socket.recv(self.max_tcp_buffer)
            if(data == ''):
                continue
            #check for the message start token 'MO_V'
            for i in range(0, len(data)-2):
                if (data[i:(i+4)] == 'MO_V'):
                    if not found_start:
                        found_start = True
                        start_pos = i
                        #crop the data only include stuff after the
                        #start token.
                        data = data[start_pos:len(data)]
                        break
                    elif not found_end:
                        found_end = True
                        end_pos = i
                        break
            #if you have found the start but not the end (in the
            #middle of a image frame)
            if (found_start and not found_end):
                #add the recent data to ldata
                ldata.extend(list(data))
            if found_end:
               ldata.extend(list(data[0:end_pos]))
            data = ''
        l_len = len(ldata)
        #### Create CompressedIamge ####
        # convert to numpy to use with OpenCV, etc.
        image_buffer.header.stamp = rospy.Time.now()
        image_buffer.data = np.array(ldata[36:l_len]).tostring()

        return image_buffer
	def __init__(self):
		self.node_name = "Virtual Mirror Nbuckman Tester"
		self.fake_pub_image = rospy.Publisher("/ernie/camera_node/image/compressed", CompressedImage,queue_size=1)
		img_file = rospy.get_param("~img_file","/home/noambuckman/test_images/01_horz.jpg")
		rospy.loginfo(img_file)
		im_cv = cv2.imread(img_file)
		img_str = cv2.imencode('.jpg', im_cv)[1].tostring()		
		pub_im=CompressedImage()
		pub_im.header=rospy.Time.now()
		pub_im.data = img_str
		#pub_im.data = np.array(cv2.imencode('.jpg', im_np)[1]).tostring()
		#flip_im.data = flip_arr.tostring()
		#flip_im.header.stamp = msg.header.stamp
		self.fake_pub_image.publish(pub_im)
Exemplo n.º 23
0
	def publish(self):
		img = cv2.imread("/home/qian/dev/catkin_ws/src/smart_ar/script/aurora.png", cv2.CV_LOAD_IMAGE_COLOR)
		M = cv2.getRotationMatrix2D((img.shape[1]/2,img.shape[0]/2),self.ga*100, 1.0 / self.dist)
		img = cv2.warpAffine(img,M,(img.shape[1], img.shape[0]))
		bridge = CvBridge()
		msg = CompressedImage()
		msg.format = "jpeg"
		msg.data = np.array(cv2.imencode(".jpg",img)[1]).tostring()
		msg.header.stamp = rospy.Time.now()
		msg.header.frame_id = 'image_compressed_publisher'
		msg.header.seq = self.seq
		self.seq += 1
		self.imgPublisher.publish(msg)
		print self.seq, 'image published'
    def publishTestImage(self):
        original_img = cv2.imread(self.original_image_file, cv2.CV_LOAD_IMAGE_COLOR)

        #### Create CompressedIamge ####
        out = CompressedImage()
        out.header.stamp = rospy.Time.now()
        out.format = "png"
        np_arr_out = cv2.imencode('.png', original_img)[1]
        out.data = np.array(np_arr_out).tostring()

        # Publish new image
        rospy.sleep(1)
        self.pub_mirror.publish(out)
        rospy.loginfo("[%s] published image"%self.node_name)
Exemplo n.º 25
0
Arquivo: debug.py Projeto: Snafu/ros
 def cbImage(self, img):
   # decode image
   np_arr = np.fromstring(img.data, np.uint8)
   color = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
   
   self.overlayInfo(color)
   
   #### Create CompressedIamge ####
   msg = CompressedImage()
   msg.header.stamp = rospy.Time.now()
   msg.format = "jpeg"
   msg.data = np.array(cv2.imencode('.jpg', color)[1]).tostring()
   # Publish new image
   self.image_pub.publish(msg)
Exemplo n.º 26
0
def cropCallback(msg):
    time_stamp = msg.header.stamp
    img_bytes = BytesIO(msg.data)
    img = PIL.Image.open(img_bytes)
    
    resize = (80, 60)
    img = img.resize(resize, resample=PIL.Image.BILINEAR)
    np_img = np.asarray(img)
    
    msg = CompressedImage()
    msg.header.stamp = time_stamp
    msg.format = "png"
    msg.data = np.array(cv2.imencode('.png', np_img)[1]).tostring()
    publish_cropimg.publish(msg)
Exemplo n.º 27
0
def cam_callback(msg):
	np_arr = np.fromstring(msg.data, np.uint8)
	
	# Decode 
	image_rgb = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)

	# Mirror image laterally
	image_rgb_rev =	image_rgb[:,::-1,:]
	
	msg = CompressedImage()
	msg.header.stamp = rospy.Time.now()
	msg.format = "jpeg"
	msg.data = np.array(cv2.imencode('.jpg', image_rgb_rev)[1]).tostring()
	publisher.publish(msg)
	rospy.loginfo("Re Published ")
Exemplo n.º 28
0
def set_scene(index):
    image = CompressedImage()
    # image.header.frame_id = '/odom_combined'
    # If you ever want to drive the base, you will want to use the line above, or /map or something.
    # Using /base_link means the robot will always stay exactly in the middle of the backdrop.
    image.header.frame_id = '/base_link'
    image.format = 'jpeg'
    try:
        scene = scenes[index]
    except TypeError:
        index = main_window.scene_comboBox.findText(index)
        scene = scenes[index]
    image.data = open(scene).read()
    image.header.stamp = rospy.Time.now()
    backdrop_publisher.publish(image)
Exemplo n.º 29
0
    def on_image(self, ros_data):
        compressed_in = np.fromstring(ros_data.data, np.uint8)
        image_in      = cv2.imdecode(compressed_in, cv2.CV_LOAD_IMAGE_COLOR)
        if self.prvs == None :
            self.prvs = cv2.resize(cv2.cvtColor(image_in,cv2.COLOR_BGR2GRAY), (width,height))
        else:
            next      = cv2.resize(cv2.cvtColor(image_in,cv2.COLOR_BGR2GRAY), (width,height))
            image_out = cv2.resize(image_in, (width,height))
            if self.should_be_controlled() : self.pub.publish(self.apply_control(next, image_out))
            self.prvs = next

            msg              = CompressedImage()
            msg.header.stamp = rospy.Time.now()
            msg.format       = "jpeg"
            msg.data         = np.array(cv2.imencode('.jpg', image_out)[1]).tostring()
            self.ipub.publish(msg)
Exemplo n.º 30
0
    def _publish_image(self, header, image, timestamp):
        """Publish JPG image as a ROS message.

        :param header: The HTTP-like header read from the stream.
        :type header: dict
        :param image: The video frame in JPG.
        :type image: :py:obj:`basestring`
        :param timestamp: The time when the frame was captured (or its best estimation).
        :type timestamp: :py:class:`rospy.Time`
        """
        msg = CompressedImage()
        msg.header.stamp = timestamp
        msg.header.frame_id = self._axis._frame_id
        msg.format = "jpeg"
        msg.data = image
        self._axis._video_publisher.publish(msg)
Exemplo n.º 31
0
 def run(self):
     cap = cv2.VideoCapture(1)
     while (not rospy.is_shutdown()):
         ret, self.image_np = cap.read()
         t = time.time()
         self.hsv = cv2.cvtColor(self.image_np, cv2.COLOR_BGR2HSV)
         lower_blue = np.array([90, 30, 90])
         upper_blue = np.array([250, 230, 250])
         self.mask = cv2.inRange(self.hsv, lower_blue, upper_blue)
         self.mask = cv2.morphologyEx(self.mask, cv2.MORPH_OPEN, np.ones((5,5), np.uint8))
         M = cv2.moments(self.mask)
         try:
             cx = int(M["m10"]/M["m00"])
             cy = int(M["m01"]/M["m00"])
         except ZeroDivisionError:
             cx = 0
             cy = 0
     self.centroid = (cx, cy)
     cv2.circle(self.image_np, self.centroid, 5, (0, 0, 255), thickness=-1) 
         msg = CompressedImage()
         msg.header.stamp = rospy.Time.now()
         msg.format = "jpeg"
         msg.data = np.array(cv2.imencode('.jpg', self.image_np)[1]).tostring()
         self.image_pub.publish(msg)
Exemplo n.º 32
0
def pub_record():
    while True:
        try:
            global startflag
            if startflag is True:
                global submsg
                if submsg != '':
                    #image000 = cv2.imread('/home/nvidia/workspace/src/detectAndRecog/src/yolo_surface/data/output.jpg')
                    global ipstream_dev1
                    if ipstream_dev1.isOpened():
                        _, img = ipstream_dev1.read()
                        global picId
                        cv2.imwrite(RecordPath + str(picId) + '.jpg', img)
                        msg = CompressedImage()
                        msg.data = np.array(cv2.imencode('.jpg',
                                                         img)[1]).tostring()
                        msg.header.frame_id = submsg + '/' + str(
                            picId) + '/' + str(subMultipId) + '-' + str(
                                subSingleId) + '-' + ResultLable
                        record_pub.publish(msg)
                        picId = int(picId) + 1

        except:
            pass
Exemplo n.º 33
0
 def __init__(self):
     ''' Initialize the server to enable the collection and publication of image and camera data. '''
     rospy.init_node('image_server')
     self.port = rospy.get_param('~port_number')
     self.camera_name = rospy.get_param('~camera_name')
     self.ios_clock_offset = 0
     self.bridge = CvBridge()
     self.clock_sub = rospy.Subscriber('/ios_clock', Float64,
                                       self.handle_ios_clock)
     self.pub_camera = rospy.Publisher('/' + self.camera_name +
                                       '/image_raw/compressed',
                                       CompressedImage,
                                       queue_size=10)
     self.pub_lowres = rospy.Publisher('/' + self.camera_name +
                                       '_lowres/image_raw/compressed',
                                       CompressedImage,
                                       queue_size=10)
     self.pub_camera_info = rospy.Publisher('/' + self.camera_name +
                                            '/camera_info',
                                            CameraInfo,
                                            queue_size=10)
     self.pub_lowres_info = rospy.Publisher('/' + self.camera_name +
                                            '_lowres/camera_info',
                                            CameraInfo,
                                            queue_size=10)
     self.cvmsg = CompressedImage()
     self.image_data = {}
     self.msg = CompressedImage()
     self.intrinsic_msg = CameraInfo()
     self.updated_intrinsics = None
     self.last_packet_timestamp = rospy.Time(0.0)
     UDP_IP = "0.0.0.0"
     self.sock = socket.socket(
         socket.AF_INET,  #Internet
         socket.SOCK_DGRAM)  #UDP
     self.sock.bind((UDP_IP, self.port))
Exemplo n.º 34
0
    def callback(self, ros_data):
        # '''Callback function of subscribed topic.
        # Here images get converted and features detected'''
        # if VERBOSE :
        #     print 'received image of type: "%s"' % ros_data.format
        #
        # #### direct conversion to CV2 ####
        # # np_arr = np.fromstring(ros_data.data, np.uint8)
        # np_arr = np.fromstring(ros_data.data, np.uint8)
        # image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
        # #image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) # OpenCV >= 3.0:
        #
        #
        #
        # cv2.imshow('cv_img', image_np)
        # cv2.waitKey(2)

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
        # Publish new image
        self.image_pub.publish(msg)
Exemplo n.º 35
0
    def main(self):
        print('start')
        cam = cv2.VideoCapture(0)
        cv2.namedWindow('publisher')
        cv2.moveWindow('publisher', 40, 30)
        while True:
            ret_val, cv_image = cam.read()
            #publising compressed image
            msg_raw_image = CompressedImage()
            msg_raw_image.header.stamp = rospy.Time.now()
            msg_raw_image.format = "jpeg"
            msg_raw_image.data = np.array(cv2.imencode(
                '.jpg', cv_image)[1]).tostring()
            self._pub1.publish(msg_raw_image)

            #publishing raw image
            self._pub2.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
            cv2.imshow('publisher', cv_image)
            key = cv2.waitKey(20) & 0xFF
            if key != 255:
                break
        cam.release
        cv2.destroyAllWindows()
        exit()
def pub_img():
    '''Callback function of subscribed topic.
    Here images get converted and features detected'''
    cap = cv2.VideoCapture(1)
    print('Initilized started publishing')
    while (True):

        ret, frame = cap.read()

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', frame)[1]).tostring()
        # Publish new image
        pub = rospy.Publisher("/output/image_raw/compressed",
                              CompressedImage,
                              queue_size=1)
        r = rospy.Rate(10)
        try:
            pub.publish(msg)
            r.sleep()
        except:
            print('Could not publish')
Exemplo n.º 37
0
    def grab_and_publish(self, stream, publisher):
        # Can't change parameters during sequence so may have to break out
        while not self.needs_update and not rospy.is_shutdown():
            yield stream
            # Construct image_msg
            # Grab image from stream
            stamp = rospy.Time.now()
            stream.seek(0)
            stream_data = stream.getvalue()

            # Generate compressed image
            image_msg = CompressedImage()
            image_msg.format = "jpeg"
            image_msg.data = stream_data

            image_msg.header.stamp = stamp
            image_msg.header.frame_id = self.frame_id
            publisher.publish(image_msg)

            # Clear stream
            stream.seek(0)
            stream.truncate()

            rospy.sleep(0.001)  # 1ms
    def __init__(self):
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        self.framerate_high = self.setupParam("~framerate_high", 30.0)
        self.framerate_low = self.setupParam("~framerate_low", 15.0)
        self.res_w = self.setupParam("~res_w", 640)
        self.res_h = self.setupParam("~res_h", 480)

        self.image_msg = CompressedImage()

        # Setup PiCamera

        self.camera = PiCamera()
        self.framerate = self.framerate_high  # default to high
        self.camera.framerate = self.framerate
        self.camera.resolution = (self.res_w, self.res_h)

        # For intrinsic calibration
        rospack = rospkg.RosPack()
        self.cali_file_folder = rospack.get_path('pi_camera') + "/config/"

        self.frame_id = rospy.get_namespace().strip(
            '/') + "/camera_optical_frame"

        self.has_published = False
        self.pub_img = rospy.Publisher("~image/compressed",
                                       CompressedImage,
                                       queue_size=1)
        self.sub_switch_high = rospy.Subscriber("~framerate_high_switch",
                                                Bool,
                                                self.cbSwitchHigh,
                                                queue_size=1)

        # Create service (for camera_calibration)
        self.srv_set_camera_info = rospy.Service("~set_camera_info",
                                                 SetCameraInfo,
                                                 self.cbSrvSetCameraInfo)

        self.stream = io.BytesIO()

        #self.camera.exposure_mode = 'off'
        # self.camera.awb_mode = 'off'

        self.is_shutdown = False
        self.update_framerate = False
        # Setup timer
        rospy.loginfo("[%s] Initialized." % (self.node_name))
Exemplo n.º 39
0
    def execute_cb(self, goal):
        rospy.loginfo("Goal Received!")

        start_time = rospy.Time.now()

        result = inference_server.msg.InferenceResult()
        if not goal.input_image.data:
            self.inference_output, num_detected, detected_classes, detected_scores, detected_boxes = object_detection.detect(
                self.inference_input)
        else:
            np_arr = np.fromstring(goal.input_image.data, np.uint8)
            goal_inference_input = cv2.imdecode(np_arr,
                                                cv2.CV_LOAD_IMAGE_COLOR)
            self.inference_output, num_detected, detected_classes, detected_scores, detected_boxes = object_detection.detect(
                goal_inference_input)

        self.inference_image = CompressedImage()
        self.inference_image.header.stamp = rospy.Time.now()
        self.inference_image.format = "jpeg"
        self.inference_image.data = np.array(
            cv2.imencode('.jpg', self.inference_output)[1]).tostring()
        self.image_pub.publish(self.inference_image)

        result.image = self.inference_image
        result.num_detections = num_detected
        result.classes = detected_classes
        result.scores = detected_scores
        for i in range(len(detected_boxes)):
            box = RegionOfInterest()
            box.y_offset = detected_boxes[i][0]
            box.height = (detected_boxes[i][2] - detected_boxes[i][0])
            box.x_offset = detected_boxes[i][1]
            box.width = (detected_boxes[i][3] - detected_boxes[i][1])
            box.do_rectify = True
            result.bounding_boxes.append(box)

        total_inference_time = rospy.Time.now() - start_time
        total_inference_time = total_inference_time.to_sec()
        rospy.loginfo(
            "The inference took {} seconds".format(total_inference_time))

        try:
            self._as.set_succeeded(result)
        except Exception as e:
            rospy.logerr(str(e))
            self._as.set_aborted(text=str(e))
Exemplo n.º 40
0
    def __init__(self):
        """OpenCV feature detectors with ros CompressedImage Topics in python.
        
        This example subscribes to a ros topic containing sensor_msgs 
        CompressedImage. It converts the CompressedImage into a numpy.ndarray, 
        then detects and marks features in that image. It finally displays 
        and publishes the new image - again as CompressedImage topic.
        """
        self.publisher = rospy.Publisher("/image_raw/compressed",
                                         CompressedImage,
                                         queue_size=4)
        self.msg = CompressedImage()

        rospy.loginfo('Setting camera level...')
        self.camera = cv2.VideoCapture(int(rospy.get_param('camera_port', 0)))

        self.takeImage(lightSettingSnaps=30)
Exemplo n.º 41
0
 def gen_and_send(self):
     ts = time.time()
     data = []
     w = 32
     nvecs = w * w
     for i in range(w):
         for j in range(w):
             v = (math.sin(i / 16.0 + ts / 3.0) +
                  math.cos(j / 16.0 + ts / 3.0)) * 0.1
             data += [int(v * self.SCALE_FACTOR)]
     rvl.Clear()
     rvl.plain = data
     rvl.CompressRVL(chan=0)
     msg = CompressedImage(header=Header(
         frame_id=self.FRAME_ID, stamp=self.get_clock().now().to_msg()),
                           format="RVL",
                           data=rvl.encoded)
     self.pub.publish(msg)
Exemplo n.º 42
0
    def __init__(self, _starttime):
        self.floorcam_subscriber = rospy.Subscriber('/pi_floorcam/image_raw/compressed',
                                                    CompressedImage, self.callback_floorcam)
        self.pub_box = rospy.Publisher(
        '/pi_floorcam_box/image_raw/compressed', CompressedImage, queue_size=3)

        #initialize
        self.np_arr = np.empty(0)
        self.image_cv = np.zeros((0,0,3), np.uint8)
        self.image_cv_box = np.zeros((0,0,3), np.uint8)
        self.image_cv_gray = np.zeros((0,0,1), np.uint8)
        self.msg_box = CompressedImage()
        self.msg_box.format = "jpeg"
        self.imageReceived = False
        self.r = rospy.Rate(10) #10Hz
        # self.save_mode = False
        self.isStarted = False
        self.average_box = 0.0
Exemplo n.º 43
0
 def publish_rvl(self):
     now = self.robot.getTime()
     if now - self.lastUpdate < (self.range.getSamplingPeriod() / 1000.0):
         return
     self.robot.step(self.timestep)
     rvl.Clear()
     # Remap to 16-bit integer
     raw = self.range.getRangeImage()
     if raw is None:
         self.get_logger().info("No range image", throttle_duration_sec=3)
         return
     rvl.plain = [int(65535 * px / self.MAX_RANGE) for px in raw]
     rvl.CompressRVL(chan=0)
     header = Header(frame_id=self.FRAME_ID,
                     stamp=Time(sec=int(now),
                                nanosec=int((now - int(now)) * 1.0e+6)))
     msg = CompressedImage(header=header, format="RVL", data=rvl.encoded)
     self.pub.publish(msg)
     self.lastUpdate = self.robot.getTime()
Exemplo n.º 44
0
    def __init__(self, pipeline, image_pub):
        self.image_pub = image_pub

        self.pipeline = Gst.parse_launch(pipeline)
        if self.pipeline is None:
            raise PipelineException(
                'Failed to parse pipeline: {}'.format(pipeline))

        self.sink = self.pipeline.get_by_name('sink')
        if self.sink is None:
            raise PipelineException(
                'Could not find an element named "sink" in pipeline: {}'.
                format(pipeline))
        self.sink.set_property('emit-signals', True)
        self.sink.set_property('sync', False)
        self.sink.set_property('max-buffers', 2)
        self.sink.set_property('drop', True)
        self.sink.connect('new-sample', self._publish_sample, None)

        self.image_msg = CompressedImage()
Exemplo n.º 45
0
    def __init__(self,
                 calibration_object,
                 publisher_compressed_stream,
                 flip_img=False):
        super(GazeboStream,
              self).__init__(calibration_object, publisher_compressed_stream,
                             flip_img)

        self.__image_raw_sub = rospy.Subscriber('/gazebo_camera/image_raw',
                                                Image,
                                                self.__callback_sub_image_raw)
        self.__image_compressed_sub = rospy.Subscriber(
            '/gazebo_camera/image_raw/compressed', CompressedImage,
            self.__callback_sub_image_compressed)
        self.__bridge = CvBridge()
        self.__last_image_raw = None
        self.__last_image_compressed_msg = CompressedImage()

        self._running = True
        self._should_run = True
Exemplo n.º 46
0
    def __init__(self, publish_intrinsic_info, camera_info,
                 camera_info_intrinsics):
        self.done = False
        self.pub_img = rospy.Publisher("~image/compressed",
                                       CompressedImage,
                                       queue_size=1)

        self.pub_camera_info = rospy.Publisher("~camera_info",
                                               CameraInfo,
                                               queue_size=1)

        self.pub_camera_info_intrinsics = rospy.Publisher(
            "~camera_info_intrinsic_calibration", CameraInfo, queue_size=1)

        self.camera_info = camera_info
        self.camera_info_intrinsics = camera_info_intrinsics

        self.msg_img = CompressedImage()
        self.stream = io.BytesIO()
        self.publish_intrinsic_info = publish_intrinsic_info
Exemplo n.º 47
0
    def initCamera(self):

        self.get_logger().info('Initializing Camera ')

        self.image_msg = CompressedImage()

        #load parameter
        self.framerate = self.parameters["BasicParam"]["framerate"]
        self.res_width = self.parameters["BasicParam"]["res_width"]
        self.res_height = self.parameters["BasicParam"]["res_height"]
        self.vertical_flip = self.parameters["BasicParam"]["vertical_flip"]
        self.horizontal_flip = self.parameters["BasicParam"]["horizontal_flip"]

        #create Picamera object
        self.picamera = PiCamera()
        self.picamera.resolution = (self.res_width, self.res_height)
        self.picamera.vflip = self.vertical_flip
        self.picamera.hflip = self.horizontal_flip
        self.picamera.framerate = self.framerate
        self.compress_stream = io.BytesIO()
        self.get_logger().info('Initialized Camera ')
Exemplo n.º 48
0
def talker():
    pub = rospy.Publisher('cam_data', CompressedImage, queue_size=10)
    pub_enc = rospy.Publisher('cam_data_enc', CompressedImage, queue_size=10)
    rospy.init_node('drone_cam', anonymous=True)

    # rate = rospy.Rate(50) # 10hz
    # a = ""
    # for i in range(0,size):
    #     a = a + "a"
    # rospy.loginfo(sys.getsizeof(a))
    while not rospy.is_shutdown():
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.zeros((10, 10)).tostring()
        pub.publish(msg)
        msg_enc = CompressedImage()
        msg_enc.header.stamp = rospy.Time.now()
        msg_enc.format = "jpeg"
        msg_enc.data = encrypt(
            Padding.appendPadding(np.zeros((10, 10)).tostring(), mode='CMS'),
            key, AES.MODE_CBC, salt)
        pub_enc.publish(msg_enc)
Exemplo n.º 49
0
 def subscribe_robot_data(self):
     rospy.loginfo("Subscribing to robot data.")
     # Reset variables
     self.reset_variables()
     # Create image buffers
     self.raw_image_buffer = collections.deque(self.image_buffer_size*[Image()], self.image_buffer_size)
     self.compressed_image_buffer = collections.deque(self.image_buffer_size*[CompressedImage()], self.image_buffer_size)
     self.last_raw_image_time = rospy.Time.now()
     self.last_compressed_image_time = rospy.Time.now()
     # Create robot data subscribers
     self.robot_data_subscribers = []
     self.robot_data_subscribers.append(rospy.Subscriber(self.robot_mode_topic, RobotMode, self.robot_mode_callback))
     self.robot_data_subscribers.append(rospy.Subscriber(self.gps_fix_topic, NavSatFix, self.navsatfix_callback))
     self.robot_data_subscribers.append(rospy.Subscriber(self.local_pose_topic, Pose, self.pose_callback))
     self.robot_data_subscribers.append(rospy.Subscriber(self.map_topic, OccupancyGrid, self.map_callback))
     self.robot_data_subscribers.append(rospy.Subscriber(self.image_topic, Image, self.image_callback))
     self.robot_data_subscribers.append(rospy.Subscriber(self.image_compressed_topic, CompressedImage, self.compressed_image_callback))
     # If using tf, create tf listener for local position
     if self.use_tf:
         self.tfBuffer = tf2_ros.Buffer(rospy.Duration(self.send_pose_period))
         self.listener = tf2_ros.TransformListener(self.tfBuffer)
     pass
Exemplo n.º 50
0
    def pub_single_compressed_image(self):
        print "BUFFER 1"
        if self.cur_image_i < self.total_msg_count: # if not all the messages are sent yet.
            print "BUFFER 2"
            print self.cur_image_i
            view_output = next(self.view_generator) # view_output class : <class 'rosbag.bag.BagMessage'>, has return value (topic_name, msg, time_stamp of the message)

            # message generation
            msg = CompressedImage() # create a compressed image object to publish
            msg = view_output.message # message content of the view is what we want to publish
            rospy.loginfo("[{}] publishing image {}".format(self.node_name, self.cur_image_i))
            self.pub_compressed_image.publish(msg) # publish the message
            self.cur_image_i += 1
            print "BUFFER 3"
        else: # after sending all messages close the bag
            if self.send_out_all_images == False:
                rospy.loginfo('[{}] send all the messages'.format(self.node_name))
                self.input_rosbag.close()
                self.send_out_all_images = True
                self.send_status = rospy.set_param(self.pm_send_status,1)
            else:
                print("!!!!!!!!!! SHOULD NOT BE HERE 2 ")
    def __init__(self):
        super().__init__('acapture_camera_node',
                         allow_undeclared_parameters=True,
                         automatically_declare_parameters_from_overrides=True)

        self.node_name = self.get_name()
        self.log = self.get_logger()

        self.cap = acapture.open(0)  #/dev/video0

        #self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
        #self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        #self.cap.set(cv2.CAP_PROP_FPS, 5)

        self.log.info("successfully opened camera device!")

        # init ROS opencv bridge
        self.bridge = CvBridge()
        self.log.info("sucessfully created CvBridge.")

        self.pub_img = self.create_publisher(CompressedImage,
                                             "image/compressed", 1)
        self.image_msg = CompressedImage()
Exemplo n.º 52
0
    def __init__(self, main=None):
        self.main = main

        self.detected = False
        self.captured_image = CompressedImage()
        self.camera_info = CameraInfo()
        self.detection_data = AprilTagDetectionArray()

        self.best_score = -1
        self.best_idx = -1
        self.best_position = Point()

        # Register listener for detection.
        rospy.wait_for_message("tag_detections", AprilTagDetectionArray)
        rospy.Subscriber("tag_detections", AprilTagDetectionArray, self.get_detect_data)

        # Register listener for capture.
        rospy.wait_for_message("/camera/rgb/image_color/compressed", CompressedImage)
        rospy.Subscriber("/camera/rgb/image_color/compressed", CompressedImage, self.get_capture_data)

        # Register listener for camera info.
        rospy.wait_for_message("/camera/rgb/camera_info", CameraInfo)
        rospy.Subscriber("/camera/rgb/camera_info", CameraInfo, self.get_camera_info)
Exemplo n.º 53
0
    def callback(self, ros_data):
        '''Callback function of subscribed topic. 
        Here images get converted and encrypted'''

        # DEBUG:
        # print 'Received image of type: "%s"' % ros_data.format

        #### direct conversion to CV2 ####
        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)

        font = cv2.FONT_HERSHEY_SIMPLEX
        image_np_with_text = cv2.copyMakeBorder(image_np, 0, 0, 0, 0,
                                                cv2.BORDER_REPLICATE)

        #cv2.putText(image_np_with_text,"{:.2f}".format(self.fps.partialfps()),(10,470), font, 1,(255,255,255),2)
        #cv2.imshow('Before_Encryption', image_np_with_text)
        #cv2.waitKey(2)
        #self.fps.update()

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()

        if (self.enc_mode == MODE_3DES):
            msg.data = self.encrypt_chunk_3DES(msg.data, self.key_3DES,
                                               self.iv_3DES)

        elif (self.enc_mode == MODE_AES):
            msg.data = b64encode(msg.data)
            plaintext_padded = self.AddPadding(msg.data, INTERRUPT, PAD,
                                               BLOCK_SIZE)
            encrypted = self.encrypt_chunk_AES(plaintext_padded)
            msg.data = b64encode(encrypted)

        # Publish new image
        self.image_pub.publish(msg)
Exemplo n.º 54
0
    def __init__(self, json_path, weights_path):

        self.img = CompressedImage()
        self.grayimg = None
        self.gray_resize = None
        self.img_size = (200, 200)
        self.img_sub = rospy.Subscriber(
            '/zed/zed_node/left/image_rect_color/compressed', CompressedImage,
            self.img_callback)

        # DroNet
        self.config = tf.ConfigProto()
        self.config.gpu_options.allow_growth = True
        self.config.log_device_placement = True
        self.sess = tf.Session(config=self.config)
        self.setsess = set_session(self.sess)

        self.loaded_json = open(json_path, 'r')
        self.loaded_model = self.loaded_json.read()

        self.model = keras.models.model_from_json(self.loaded_model)
        self.model.load_weights(weights_path)
        self.isGet = False
        """
Exemplo n.º 55
0
def get_messages_filled_with_data(robot_name):
    array_msg = ArtifactArray()
    compressed_img_msg = CompressedImage()
    artifact_img_msg = ArtifactImg()

    array_msg.header.seq = 1
    array_msg.header.stamp = rospy.Time.now()
    array_msg.header.frame_id = ''
    array_msg.owner = robot_name

    id = 0

    for i in range(1, 20):
        id += 1
        array_msg.artifacts.append(make_message(id, i))

    cv_img = cv2.imread(ARTIFACT_IMG_PATH, cv2.IMREAD_COLOR)
    compressed_img_msg = CvBridge().cv2_to_compressed_imgmsg(cv_img)

    artifact_img_msg.header.stamp = rospy.Time.now()
    artifact_img_msg.artifact_img = compressed_img_msg
    artifact_img_msg.artifact_id = str(id)

    return array_msg, compressed_img_msg, artifact_img_msg
Exemplo n.º 56
0
    def __init__(self, filename, frame_id, camera_info_url, prefix):
        self.filename = filename
        self.frame_id = frame_id
        self.camera_info_url = camera_info_url
        
        self.msg = CompressedImage()        
        self.msg.header.frame_id = self.frame_id
        self.msg.format = "jpeg"
	im = cv2.imread(filename)
        self.width=im.shape[1]
        self.height=im.shape[0]
	f=open(filename, 'r+')
	self.msg.data = f.read()
        self.valid = True;
        self.updated = True;

        # generate a valid camera name based on the hostname
        self.cname = camera_info_manager.genCameraName(filename)
        self.cinfo = camera_info_manager.CameraInfoManager(cname = self.cname,
                                                   url = self.camera_info_url, namespace = prefix)
        self.cinfo.loadCameraInfo()         # required before getCameraInfo()
        self.st = None
        self.pub = rospy.Publisher(prefix + "/image_raw/compressed", CompressedImage, self)
        self.caminfo_pub = rospy.Publisher(prefix + "/camera_info", CameraInfo, self)
Exemplo n.º 57
0

detector = SignDetector((model_loc, model_det))


from sensor_msgs.msg import Image
from sensor_msgs.msg import CompressedImage
from std_msgs.msg import String
from cv_bridge import CvBridge, CvBridgeError

sign_pub = rospy.Publisher("sign_name", String, queue_size=10)
img_pub = rospy.Publisher("result_image/compressed", CompressedImage, queue_size=10)
# img_pub = rospy.Publisher("result_image", Image, queue_size=1)

bridge = CvBridge()
msgImg = CompressedImage()
msgImg.format = "jpeg"

class ImageProcessor:
    def __init__(self):
        rospy.Subscriber("sign_image", Image, self.callback, queue_size=1)
        self.cv_image = None

    def callback(self, data):
        try:
            self.cv_image = bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)


def print_fps(fps):
Exemplo n.º 58
0
    def callback(self, ros_data):
        aruco = cv2.aruco
        dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
        arucoMarkerLength = 0.096
        PI = 3.1415
        self.cameraMatrix = np.asarray([[255.64885718, 0., 163.44315533],
                                        [0., 255.85790733, 118.62161079],
                                        [0., 0., 1.]])
        self.distanceCoefficients = np.asarray(
            [0.20120402, -0.43881712, 0.00463485, 0.00622489, 0.12081623])
        '''Callback function of subscribed topic. 
        Here images get converted and features detected'''
        if VERBOSE:
            print 'received image of type: "%s"' % ros_data.format

        #### direct conversion to CV2 ####
        np_arr = np.fromstring(ros_data.data, np.uint8)
        # image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
        image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)  # OpenCV >= 3.0:

        # hsv_img = cv2.cvtColor(image_np, cv2.COLOR_BGR2HSV)

        # thresholded_img = cv2.inRange(hsv_img, (35, 80, 110), (60, 255,  255),)

        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            image_np, dictionary)
        aruco.drawDetectedMarkers(image_np, corners, ids, (0, 255, 0))

        if len(corners) > 0:
            self.rvec, self.tvec, _ = aruco.estimatePoseSingleMarkers(
                corners[0], arucoMarkerLength, self.cameraMatrix,
                self.distanceCoefficients)
            image_np = aruco.drawAxis(image_np, self.cameraMatrix,
                                      self.distanceCoefficients, self.rvec,
                                      self.tvec, 0.1)
            # (roll_angle, pitch_angle, yaw_angle) =  self.rvec[0][0][0], self.rvec[0][0][1], self.rvec[0][0][2]
            # if pitch_angle < 0:
            #     roll_angle, pitch_angle, yaw_angle = -roll_angle, -pitch_angle, -yaw_angle
            # print (roll_angle, pitch_angle, yaw_angle)
            # print(self.tvec)
            _quat = tf.transformations.quaternion_from_euler(
                -PI / 2.0, -PI, PI / 2.0)
            # _quat = tf.transformations.quaternion_from_euler(-PI / 2.0 + (roll_angle + PI), -PI + pitch_angle, PI / 2.0 + yaw_angle)

            # _quat = (0., 0., 0., 1.)
            self._broadcaster.sendTransform(
                (self.tvec[0][0][2], self.tvec[0][0][0], self.tvec[0][0][1]),
                _quat, rospy.Time.now(), "ar", "camera_face")
            # self._broadcaster.sendTransform((-0, 0, 0), _quat, rospy.Time.now(), "base_link", "ar")
            self.servo[0] -= 90.0 - math.degrees(
                math.atan2(self.tvec[0][0][2], self.tvec[0][0][0]))
            self.servo[1] += 90.0 - math.degrees(
                math.atan2(self.tvec[0][0][2], self.tvec[0][0][1]))

        self.servo[1] = Constrain(self.servo[1], 110, 30)
        self.servo[0] = Constrain(self.servo[0], 135, 45)

        self._camera_pub.publish(
            str(self.servo[0]) + ", " + str(self.servo[1]) + ",")
        # print(self.servo)

        # cv2.imshow('cv_img', image_np)
        # cv2.waitKey(2)

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
        # Publish new image
        self.image_pub.publish(msg)
	scld = cv2.resize(crop,None,fx=0.4,fy=1.2,interpolation=cv2.INTER_AREA)

	# 3. Color transform ---------------------------------------------------
	color = cv2.cvtColor(scld, cv2.COLOR_BGR2GRAY)
	hght, wdth = color.shape[:2]
	print(hght, wdth)
	
	# 4. Inverse Prespective Mapping ---------------------------------------
	pts1 = np.float32([[wdth/2-55,0],[wdth/2+55,0],[0,hght],[wdth,hght]])
	pts2 = np.float32([[0,0],[wdth,0],[wdth/2-40,hght],[wdth/2+40,hght]])
	
	M = cv2.getPerspectiveTransform(pts1,pts2)
	dst = cv2.warpPerspective(color,M,(wdth,hght))
	
	# Compress image to pub ------------------------------------------------
        cropImage = CompressedImage()
        cropImage.header.stamp = rospy.Time.now()
        cropImage.format = "jpeg"
        cropImage.data = np.array(cv2.imencode('.jpg',dst)[1]).tostring()
        pub.publish(cropImage)

	# Print stats ----------------------------------------------------------
	e2 = cv2.getTickCount()	
    	t = (e2 - e1)/cv2.getTickFrequency()	
	
	print('frame time:'+str(t)+'-------------------------------block end')
        

def main():

    global pub
Exemplo n.º 60
0
import cv2
from subprocess import Popen, PIPE
from shlex import split

from util import RobotUtil

# ROS Libraries
import rospy
import roslib

from sensor_msgs.msg import CompressedImage

VERBOSE = True

# CompressedImage Message Setup
msg = CompressedImage()
msg.format = 'png'


def image_pub():

    # ROS Node setup
    if VERBOSE:
        print('Starting image_pub node...')

    client = RobotUtil.VideoStreamClient(VERBOSE=VERBOSE, BGR2RGB=True)
    client.start()

    pub = rospy.Publisher('/PI_CAM/image_raw/compressed', CompressedImage)
    rospy.init_node('image_pub', anonymous=True)
    rate = rospy.Rate(100)  # 100 Hz to prevent aliasing of 40 FPS feed