Example #1
0
    def callback_right_with_3D(self, ros_data):
	'''Callback function of subscribed topic. 
        Here images get converted and features detected'''
        self.right_header = ros_data.header.stamp.secs        
        
        #### direct conversion to CV2 ####
        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, 1)

        # convert np image to grayscale
        gray_image = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY)

        if self.right_header == self.left_header and self.group_4_recieved:
            s_t = time.clock()
            print self.right_header, self.left_header
            self.group_4_recieved = False

            left_gray = cv2.cvtColor(self.left_image, cv2.COLOR_BGR2GRAY)

            group_4_out = self.group_4
            
#            PointCloudPose()
#            group_4_out.header = self.group_4.header
#std_msgs/Int16 pose_id
#std_msgs/Int16 pose_id_max
#sensor_msgs/CompressedImage image_left
#sensor_msgs/CompressedImage image_right
#geometry_msgs/Pose spin_center_pose
#sensor_msgs/PointCloud2 carmine_pointcloud 
#geometry_msgs/Pose carmine_pose
#sensor_msgs/PointCloud2 bumblebee_pointcloud
#geometry_msgs/Pose bumblebee_pose_left
#geometry_msgs/Pose bumblebee_pose_right            
            
            

            msg = CompressedImage()
            msg.header.stamp = rospy.Time.now()
            msg.format = "jpeg"
            msg.data = np.array(cv2.imencode('.jpg', left_gray)[1]).tostring()
            # Publish new image
            group_4_out.image_left = msg
            
            msg = CompressedImage()
            msg.header.stamp = rospy.Time.now()
            msg.format = "jpeg"
            msg.data = np.array(cv2.imencode('.jpg', gray_image)[1]).tostring()
            # Publish new image
            group_4_out.image_right = msg
            
            #group_4_out.bumblebee_pointcloud = pcloud            
            self.pub_group.publish(group_4_out)

            print "save time", time.clock() - s_t       
    def grabAndPublish(self,stream,publisher):
        # Grab image from stream
        stream.seek(0)
        img_data = stream.getvalue()

        if self.uncompress:
            # Publish raw image
            data = np.fromstring(img_data, dtype=np.uint8)
            image = cv2.imdecode(data, 1)
            image_msg = self.bridge.cv2_to_imgmsg(image)
        else:
            # Publish compressed image only
            image_msg = CompressedImage()
            image_msg.data = img_data
            image_msg.format = "jpeg"
            
        image_msg.header.stamp = rospy.Time.now()
        # Publish 
        publisher.publish(image_msg)
        # Clear stream
        stream.seek(0)
        stream.truncate()

        if not self.has_published:
            rospy.loginfo("[%s] Published the first image." %(self.node_name))
            self.has_published = True
Example #3
0
def handle_pkt(pkt=None):
    ts_begin_loc = pkt.find(begin_timestamp_marker)
    ts_end_loc = pkt.find(end_timestamp_marker)

    if ts_begin_loc == -1 or ts_end_loc == -1:
        #something's fishy, discard jpeg
        print "JPEG discarded, malformed data"
        return 
        
    ts = float(pkt[ts_begin_loc+len(begin_timestamp_marker):ts_end_loc])
	
#    if ts > last_ts:
#	last_ts = ts
#    else:
#	return

    pkt = pkt[ts_end_loc+len(end_timestamp_marker):]

    print "{} bytes of JPEG recvd".format(len(pkt))
    msg = CompressedImage()
    msg.header.stamp  = rospy.Time(tango_clock_offset + float(ts))
    msg.header.frame_id = camera_name
    msg.data = pkt
    msg.format = 'jpeg'
    pub_camera.publish(msg)
def grabFrame(event):
   msg = CompressedImage()
   msg.header.frame_id = frame_id
   msg.format = "jpeg"
   resp, msg.data = http.request("http://" + host + request)
   msg.header.stamp = rospy.Time.now()
   pub.publish(msg)
Example #5
0
 def publish(self, raw_img):
     msg = CompressedImage()
     msg.header.stamp = rospy.Time.now()
     msg.format = "jpeg"
     msg.data = np.array(cv2.imencode(".jpg", raw_img)[1]).tostring()
     # TODO compile sensor_msg error , use String instead
     self.image_pub.publish(msg.data)
  def callback(self, ros_data):
    #### direct conversion to CV2 ####
    np_arr = np.fromstring(ros_data.data, np.uint8)
    image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
    
    #(rows,cols,channels) = image_np.shape
    #image_np = image_np[0:64, 0:480] 

    #equ = cv2.equalizeHist(image_np)
    #res = np.hstack((image_np,equ))

    #### Feature detectors using CV2 #### 
    # "","Grid","Pyramid" + 
    # "FAST","GFTT","HARRIS","MSER","ORB","SIFT","STAR","SURF", "GridFAST"
    #method = "GridFAST"
    #feat_det = cv2.FeatureDetector_create(method)
    #time1 = time.time()

    # convert np image to grayscale
    #featPoints = feat_det.detect( cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY))
    #time2 = time.time()
    #if VERBOSE :
    #  print '%s detector found: %s points in: %s sec.'%(method, len(featPoints),time2-time1)

    #for featpoint in featPoints:
    #  x,y = featpoint.pt
    #  cv2.circle(image_np,(int(x),int(y)), 3, (0,0,255), -1)

    #### Create CompressedIamge ####
    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
    # Publish new image
    self.image_pub.publish(msg)
Example #7
0
def ros_compressed_from_numpygray(np_arr):
    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    np_arr = 255*(1.0*np_arr/np.max(np_arr))
    msg.data = np.array(cv2.imencode('.jpg', np.array(np_arr, dtype = np.uint8))[1]).tostring()
    return msg
Example #8
0
    def __init__(self):
        # Get the ~private namespace parameters from command line or launch file.
        self.UDP_IP = rospy.get_param('~UDP_IP', '192.168.50.37') #get_ip.get_local()
        #if self.UDP_IP != 'localhost':
        #    self.UDP_IP = int(self.UDP_IP)
        self.UDP_PORT = int(rospy.get_param('~UDP_PORT', '49155'))

        # Create a dynamic reconfigure server.
        #self.server = DynamicReconfigureServer(ConfigType, self.reconfigure)
        
        # create ros::Publisher to send Odometry messages
        cam_pub = rospy.Publisher('image_raw/compressed', CompressedImage)

        #crappy test
        t = True
        while not rospy.is_shutdown():
            #crappy test continued
            if t:
                print "Camera Running"
            t = False

            udp_string = self.receive_packet()
            jpg_string = self.parse_string(udp_string)

            cam_msg = CompressedImage()
            cam_msg.header.stamp = rospy.Time.now() #YOYOYOYO - should be from robot time
            cam_msg.format = "jpeg"
            cam_msg.data = jpg_string

            cam_pub.publish(cam_msg)
Example #9
0
  def stream(self):
    url = 'http://%s:%s@%s/mjpg/video.mjpg' % (self.axis.username, self.axis.password, self.axis.hostname)
    #url = url + "?resolution=%dx%d" % (self.axis.width, self.axis.height)
    fp = urllib.urlopen(url)

    while True:
      boundary = fp.readline()

      header = {}
      while 1:
        line = fp.readline()
        if line == "\r\n": break
        line = line.strip()

        parts = line.split(": ", 1)
        header[parts[0]] = parts[1]

      content_length = int(header['Content-Length'])

      img = fp.read(content_length)
      line = fp.readline()
      
      msg = CompressedImage()
      msg.header.stamp = rospy.Time.now()
      msg.header.frame_id = self.axis_frame_id
      msg.format = "jpeg"
      msg.data = img

      self.axis.pub.publish(msg)

      """
    def grabAndPublish(self,stream,publisher):
        while not self.update_framerate and not self.is_shutdown and not rospy.is_shutdown(): 
            yield stream
            # Construct image_msg
            # Grab image from stream
            stamp = rospy.Time.now()
            stream.seek(0)
            stream_data = stream.getvalue()
            # Generate compressed image
            image_msg = CompressedImage()
            image_msg.format = "jpeg"
            image_msg.data = stream_data

            image_msg.header.stamp = stamp
            image_msg.header.frame_id = self.frame_id
            publisher.publish(image_msg)
                        
            # Clear stream
            stream.seek(0)
            stream.truncate()
            
            if not self.has_published:
                rospy.loginfo("[%s] Published the first image." %(self.node_name))
                self.has_published = True

            rospy.sleep(rospy.Duration.from_sec(0.001))
 def sendTestImage(self):
     img_name = '0'+str(self.sent_image_count)+'_orig.'+self.test_image_format
     test_img = cv2.imread(self.test_image_path+img_name)
     msg = CompressedImage()
     msg.header.stamp = rospy.Time.now()
     msg.format = self.test_image_format
     msg.data = np.array(cv2.imencode('.'+self.test_image_format, test_img)[1]).tostring()
     self.pub_test_image.publish(msg)
    def pubOriginal(self, msg):
        compressed_img_msg = CompressedImage()
        compressed_img_msg.format = "png"
        compressed_img_msg.data = np.array(cv2.imencode(".png", self.testImageOrig)[1]).tostring()

        # compressed_img_msg.header.stamp = msg.header.stamp
        # compressed_img_msg.header.frame_id = msg.header.frame_id
        self.pub_raw.publish(compressed_img_msg)
Example #13
0
 def pubImage(self):
     filename = '0' + str(self.num_sent) + '_orig.png'
     image = cv2.imread(self.im_dir + filename)
     msg = CompressedImage()
     msg.header.stamp = rospy.Time.now()
     msg.format = 'png'
     msg.data = np.array(cv2.imencode('.png', image)[1]).tostring()
     self.pub_fake_images.publish(msg)
     rospy.loginfo("[%s] Image %s published" % (self.node_name, filename))
Example #14
0
	def stream(self):
		"""
			Reads and process the streams from the camera	
		"""		 
		try:
			#If flag self.enable_auth is 'True' then use the user/password to access the camera. Otherwise use only self.url
			if self.enable_auth:
				req = urllib2.Request(self.url, None, {"Authorization": self.auth })
			else:
				req = urllib2.Request(self.url)
				
			fp = urllib2.urlopen(req, timeout = self.timeout)
			#fp = urllib2.urlopen(req)

			while self.run_camera and not rospy.is_shutdown():
				
				boundary = fp.readline()
				
				header = {}
				
				while not rospy.is_shutdown():
					line = fp.readline()
					#print 'read line %s'%line
					if line == "\r\n": break
					line = line.strip()
					#print line
					parts = line.split(": ", 1)
					header[parts[0]] = parts[1]

				content_length = int(header['Content-Length'])
				#print 'Length = %d'%content_length
				img = fp.read(content_length)
				line = fp.readline()
				
				msg = CompressedImage()
				msg.header.stamp = rospy.Time.now()
				msg.header.frame_id = self.axis_frame_id
				msg.format = "jpeg"
				msg.data = img
				# publish image
				self.compressed_image_publisher.publish(msg)
				
				cimsg = self.cinfo.getCameraInfo()
				cimsg.header.stamp = msg.header.stamp
				cimsg.header.frame_id = self.axis_frame_id
				# publish camera info
				self.caminfo_publisher.publish(cimsg)
				
				#print self.real_fps
				self.last_update = datetime.datetime.now()
				self.error_reading = False
				self.image_pub_freq.tick()
						
		except urllib2.URLError,e:
			self.error_reading = True
			self.error_reading_msg = e
			rospy.logerr('Axis:stream: Camera %s (%s:%d). Error: %s'%(self.camera_id, self.hostname, self.camera_number, e))
Example #15
0
    def processImage(self, image_msg):
        image_cv = self.bridge.imgmsg_to_cv2(image_msg)

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_cv)[1]).tostring()
        self.pub_image_comp.publish(msg)
    def callback(self, ros_data):
        
        np_arr = np.frombuffer(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
        
        # convert it to pil format 
        im = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
        im_pil = Image.fromarray(im)
        # im_pil = im_pil.convert('RGB')
        
        transform = input_transform
        im_pil = transform(im_pil)
        # print(type(img))
        
        # Make prediction 
        im_pil = im_pil.cuda().unsqueeze(0)
        output = model.evaluate(im_pil)
        predict = torch.max(output, 1)[1].cpu().numpy() + 1
	    # Get color pallete for visualization
        mask = encoding.utils.get_mask_pallete(predict, 'minc_dataset')
        # print(type(mask))
        # print(mask)
        mask.save('deeplab_resnest101_rgb_202008042350.png')
        # mask.show()
        
        # time.sleep(5)

        # mask.close()

        img = np.asarray(mask)

        # pil2ros = cv2.cvtColor(pil2ros, cv2.COLOR_RGB2BGR)

        img3_shape = np.insert(img.shape,2,[3])
        img3 = np.zeros((img3_shape)).astype("uint8")
        img3[:,:,0] = img
        img3[:,:,1] = img
        img3[:,:,2] = img
        for i in range(24):
            img3 = np.where(img3==((i,i,i)),((mincpallete[i*3+2],mincpallete[i*3+1],mincpallete[i*3])),img3)
        img3 = np.array(img3, dtype=np.uint8)
        cv2.imshow("window",img3)
        k = cv2.waitKey(0)


        cv2.imwrite('deeplabresnest101_gray_202008042350.png',pil2ros)
        # print(pil2ros.shape)
        
        # show the mask
        cv2.imshow("Image window", pil2ros)
        cv2.waitKey(3)

        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', pil2ros)[1]).tostring()
        self.image_pub.publish(msg)
Example #17
0
    def processImage(self, image_msg):
        image_cv = self.bridge.imgmsg_to_cv2(image_msg)

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_cv)[1]).tostring()
        self.pub_image_comp.publish(msg)
    def callback(self, image):
        rectified_img = CompressedImage()
        rectified_img.header.stamp = image.header.stamp
        rectified_img.format = "jpeg"
        rectified_img.data = self.image_rectifier.process_image(
            rgb_from_ros(image))

        # publish new message
        self.publisher.publish(rectified_img)
Example #19
0
def img_publisher(img):
    publisher = rospy.Publisher("image", CompressedImage, queue_size=10)
    img_msg = CompressedImage()
    img_msg.header.stamp = rospy.Time().now()
    img_msg.format = 'jpeg'
    # img_msg.data = np.array(img,dtype = np.uint8).tostring()
    img_msg.data = np.array(cv2.imencode('.jpg', img)[1]).tostring()

    publisher.publish(img_msg)
def publish_image(pub, image_handle):
    image = CompressedImage()
    image.data = image_handle.read()
    image.format = 'jpeg'
    image.header.seq = 0
    image.header.stamp.secs = math.floor(time.time())
    image.header.stamp.nsecs = 0
    image.header.frame_id = "0"
    pub.publish(image)
Example #21
0
 def cv2_to_compressed_ros(self, img):
     try:
         msg = CompressedImage()
         msg.header.stamp = rospy.Time.now()
         msg.format = "jpeg"
         msg.data = np.array(cv2.imencode('.jpg', img)[1]).tostring()
         return msg
     except CvBridgeError as e:
         rospy.logerr(e)
Example #22
0
    def _publish_img(self, obs):
        # Publish the Numpy Array as Compressed Image)
        obs = cv2.cvtColor(obs, cv2.COLOR_BGR2RGB)
        img_msg = CompressedImage()
        img_msg.header.stamp = rospy.Time.now()
        img_msg.format = "jpeg"
        img_msg.data = np.array(cv2.imencode('.jpg', obs)[1]).tostring()

        self.cam_pub.publish(img_msg)
	def pubOrig(self, args=None):
		image_msg_out = CompressedImage()
		image_msg_out.header.stamp = rospy.Time.now()
		image_msg_out.format = "png"
		image_msg_out.data = np.array(cv2.imencode('.png',
				self.original_image)[1]).tostring()

		self.pub_image.publish(image_msg_out)
		rospy.loginfo("Publishing original image")
Example #24
0
  def stream(self):
    url = 'http://%s/mjpg/video.mjpg' % self.axis.hostname
    url = url + "?fps=0&resolultion=%dx%d" % (self.axis.width, self.axis.height)

    rospy.logdebug('opening ' + str(self.axis))

    # only try to authenticate if user/pass configured
    if self.axis.password != '' and self.axis.username != '':
      # create a password manager
      password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()

      # Add the username and password, use default realm.
      top_level_url = "http://" + self.axis.hostname
      password_mgr.add_password(None, top_level_url,
                                self.axis.username,
                                self.axis.password)
      handler = urllib2.HTTPBasicAuthHandler(password_mgr)

      # create "opener" (OpenerDirector instance)
      opener = urllib2.build_opener(handler)

      # ...and install it globally so it can be used with urlopen.
      urllib2.install_opener(opener)

    fp = urllib2.urlopen(url)

    while True:
      boundary = fp.readline()

      header = {}
      while 1:
        line = fp.readline()
        if line == "\r\n": break
        line = line.strip()

        parts = line.split(": ", 1)
        header[parts[0]] = parts[1]

      content_length = int(header['Content-Length'])

      img = fp.read(content_length)
      line = fp.readline()
      
      msg = CompressedImage()
      msg.header.stamp = rospy.Time.now()
      msg.header.frame_id = self.axis.frame_id
      msg.format = "jpeg"
      msg.data = img

      self.axis.pub.publish(msg)

      cimsg = self.axis.cinfo.getCameraInfo()
      cimsg.header.stamp = msg.header.stamp
      cimsg.header.frame_id = self.axis.frame_id
      cimsg.width = self.axis.width
      cimsg.height = self.axis.height
      self.axis.caminfo_pub.publish(cimsg)
Example #25
0
 def cv2_to_compressed_ros(self, img):
     try:
         msg = CompressedImage()
         msg.header.stamp = rospy.Time.now()
         msg.format = "jpeg"
         msg.data = np.array(cv2.imencode('.jpg', img)[1]).tostring()
         return msg
     except CvBridgeError as e:
         rospy.logerr(e)
 def pubImage(self):
     filename = '0'+str(self.num_sent)+'_orig.png'
     image = cv2.imread(self.im_dir+filename)
     msg = CompressedImage()
     msg.header.stamp = rospy.Time.now()
     msg.format = 'png'
     msg.data = np.array(cv2.imencode('.png', image)[1]).tostring()
     self.pub_fake_images.publish(msg)
     rospy.loginfo("[%s] Image %s published" %(self.node_name,filename))
    def callback(self, ros_data):
        '''Callback function of subscribed topic'''

        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)  # OpenCV >= 3.0:
        if self._resize > 0:
            frame = imutils.resize(image_np, width=self.resize)
        else:
            frame = image_np
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        rects = self._detector.detectMultiScale(gray,
                                                scaleFactor=1.1,
                                                minNeighbors=5,
                                                minSize=(30, 30),
                                                flags=cv2.CASCADE_SCALE_IMAGE)
        if self._VERBOSE:
            rospy.loginfo(f"face_finder: we found {len(rects)} faces")

        # compute the facial embeddings for each face bounding box
        boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]
        encodings = face_recognition.face_encodings(rgb, boxes)
        names = []
        matches = []

        for encoding in encodings:
            matches = face_recognition.compare_faces(self._data["encodings"],
                                                     encoding)

        name = "Unknown"
        if True in matches:
            matchedIdxs = [i for (i, b) in enumerate(matches) if b]
            counts = {}

            for i in matchedIdxs:
                name = self._data["names"][i]
                counts[name] = counts.get(name, 0) + 1

            name = max(counts, key=counts.get)

        names.append(name)

        if self._publishPic:
            for (top, right, bottom, left) in boxes:
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0),
                              2)
                y = top - 15 if top - 15 > 15 else top + 15
                cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
                            0.75, (0, 255, 0), 2)
            msg_raw_image = CompressedImage()
            msg_raw_image.header.stamp = rospy.Time.now()
            msg_raw_image.format = "jpeg"
            msg_raw_image.data = np.array(cv2.imencode('.jpg',
                                                       frame)[1]).tostring()
            self._publisher.publish(msg_raw_image)
Example #28
0
 def publish_image(self):
     """ publish images by publishing_names """
     msg = CompressedImage()
     msg.header.stamp = rospy.Time.now()
     msg.format = "jpeg"
     for str in self.publishing_names:
         if not str in self.images: continue
         msg.data = np.array(cv2.imencode('.jpg',
                                          self.images[str])[1]).tostring()
         self.publishers[str].publish(msg)
 def transform_image_and_repub(self, msg):
     img = cv2.imdecode(np.fromstring(msg.data, np.uint8), cv2.IMREAD_COLOR)
     img_rotated = np.flip(np.transpose(img, (1, 0, 2)),
                           0)[CHOP_TOP:-CHOP_BOT]
     repub_msg = CompressedImage()
     repub_msg.header = msg.header
     repub_msg.format = "jpeg"
     repub_msg.data = np.array(cv2.imencode('.jpg',
                                            img_rotated)[1]).tostring()
     self.image_repub.publish(repub_msg)
Example #30
0
def create_CI(image):
    '''
	This function creates a Compressed image object and it sets
	its parameters, it needs as argument the name of the image.
	'''
    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    msg.data = np.array(cv2.imencode('.jpg', image)[1]).tostring()
    return msg
Example #31
0
 def left_image_callback(self, data):
     """compress the images from pointgreycamera"""
     np_arr = np.fromstring(data.data, np.uint8)
     image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
     msg = CompressedImage()
     msg.header.stamp = data.header.stamp
     msg.format = "jpeg"
     msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
     # Publish new image
     self.image_pub.publish(msg)
Example #32
0
 def get_compressed_msg(self, image):
     """
     Get compressed image, takes image as inputs and returns a CompressedImage message
     :param image: cv2 image
     :return: sensor_msgs/CompressedImage
     """
     msg = CompressedImage()
     msg.header.timestamp = str(datetime.now())
     msg.format = "jpeg"
     return msg
Example #33
0
 def sendTestImage(self):
     img_name = '0' + str(
         self.sent_image_count) + '_orig.' + self.test_image_format
     test_img = cv2.imread(self.test_image_path + img_name)
     msg = CompressedImage()
     msg.header.stamp = rospy.Time.now()
     msg.format = self.test_image_format
     msg.data = np.array(
         cv2.imencode('.' + self.test_image_format,
                      test_img)[1]).tostring()
     self.pub_test_image.publish(msg)
Example #34
0
    def image_callback(self, ros_image):
        np_arr = np.frombuffer(ros_image.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
        img = image_np.copy()

        # image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)

        assert image_np.shape == (240, 320, 3)
        assert image_np.dtype == np.uint8

        # cv2.imshow(self.win, image_np)
        # cv2.waitKey(2)

        ## image_np = image_np[:,:,::-1]
        image_np = np.concatenate([image_np.astype(np.float32) / 255.0, self.xx, self.yy], axis=2)
        image_np = np.expand_dims(image_np, axis=0)

        '''
        self.interpreter.set_tensor(self.tf_input_index, image_np)
        self.interpreter.invoke()

        labels = self.interpreter.get_tensor(self.tf_output_index)[0]
        '''
        labels = self.model.predict(image_np)[0]

        coords = geometry_msgs.msg.PoseArray()
        coords.header = ros_image.header
        if labels[4] > 0.2: # TODO fix threshold
            coords.poses.append(geometry_msgs.msg.Pose())
            coords.poses.append(geometry_msgs.msg.Pose())
            coords.poses[0].position.x = labels[0]
            coords.poses[0].position.y = labels[1]
            coords.poses[1].position.x = labels[2]
            coords.poses[1].position.y = labels[3]
        # else: leave coords empty if no handle detected
        self.coords_pub.publish(coords)

        if self.image_pub.get_num_connections() > 0:
            # print(output_data)

            if labels[4] > 0:
                cv2.circle(img, (denormalizeCoords(labels[0], 320), (denormalizeCoords(labels[1], 240))), 3, (0, 0, 255),
                           -1)
                cv2.circle(img, (denormalizeCoords(labels[2], 320), (denormalizeCoords(labels[3], 240))), 3, (0, 255, 0),
                           -1)
            else:
                cv2.circle(img, (img.shape[1] // 2, img.shape[0] // 2), 30, (0, 255, 255), 1)

            msg = CompressedImage()
            msg.header.stamp = rospy.Time.now()
            msg.format = "jpeg"
            msg.data = np.array(cv2.imencode('.jpg', img)[1]).tobytes()
            # Publish new image
            self.image_pub.publish(msg)
Example #35
0
    def img_callback(self, img_msg):
        # Discard images if they are too old
        callback_start = rospy.Time.now()

        if (callback_start - img_msg.header.stamp) > self.max_delay:
            return
        else:
            rospy.loginfo(
                "Received image delayed by {}ns".format(callback_start -
                                                        img_msg.header.stamp))

        # Image conversion from msg -> cv2 (BGR) -> np (RGB)
        np_arr = np.fromstring(img_msg.data, np.uint8)
        np_img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
        np_img = np_img[:, :, ::-1]

        # Crop, resize and rescale pixel values from [0, 1] range to [-1, +1] range
        if self.args.crop:
            np_img = np_img[:, 380:1100, :]

        np_img = resize(np_img, (224, 224))
        np_img = np.multiply(np.subtract(np_img, 0.5), 2.0)

        # Calculate some durations for evaluation
        prep_end = rospy.Time.now()
        prep_dur = prep_end - callback_start

        # Do the actual control value prediction based on the current image right now
        output = self.model.predict(np.expand_dims(np_img, axis=0))
        prediction = self.helper.postprocess_output(output)

        # Create the Twist message and fill the respective fields
        cmd = Twist()
        cmd.linear.x = self.args.x_vel
        cmd.angular.z = prediction[0]

        pred_end = rospy.Time.now()
        pred_dur = pred_end - prep_end
        rospy.loginfo(
            "Predicted angular.z: {} in {}ns after {}ns prep.".format(
                cmd.angular.z, pred_dur, prep_dur))

        # Send the created message to the roscore
        self.pub.publish(cmd)

        # Send the resized image to roscore
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"  # "bgr8; jpeg compressed bgr8"
        msg.data = np.array(cv2.imencode('.jpg',
                                         np_img[:, :, ::-1])[1]).tostring()

        # Publish new image
        self.img_pub.publish(msg)
Example #36
0
    def publish_img(self, image):

        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"

        encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 80]
        result, encimg = cv2.imencode('.jpg', image, encode_param)
        msg.data = np.array(encimg).tostring()

        self.debug_img_pub.publish(msg)
 def loop(self):
     while not rospy.is_shutdown():
         if self.pub_img.get_num_connections() > 0:
             self.draw()
             msg = CompressedImage()
             msg.header.stamp = rospy.Time.now()
             msg.format = "jpeg"
             msg.data = np.array(cv2.imencode('.jpg',
                                              self.img)[1]).tostring()
             self.pub_img.publish(msg)
         rospy.sleep(0.1)
Example #38
0
  def callback(self,ros_data):
    np_arr = np.fromstring(ros_data.data, np.uint8)
    image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
    image_np = cv2.flip(image_np, self.flip_code)

    #### Create CompressedIamge ####
    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
    self.image_pub.publish(msg)
    def callback(self, ros_data):
        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
        image_np = cv2.flip(image_np, self.flip_code)

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
        self.image_pub.publish(msg)
def publish_image(image_np):
    #### Create CompressedIamge ####
    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
    logme = "image capture: len=%d, time=%s" % (len(
        msg.data), rospy.get_time())
    rospy.loginfo(logme)
    # Publish new image
    image_pub.publish(msg)
    def process_current_image(self):
        # No image data received
        if len(self.current_image.data) == 0:
            return

        # skip is no subscribers request for detections
        if self.pub_box.get_num_connections() == 0 and self.pub_image.get_num_connections() == 0:
            return

        np_arr = np.fromstring(self.current_image.data, np.uint8)
        frame_ori = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)

        frame = self.rotate_image(frame_ori, self.rotation_angle)

        orig = frame.copy()
        frame = Image.fromarray(frame)

        # make predictions on the input frame
        results = self.model.DetectWithImage(frame, threshold=self.threshold, keep_aspect_ratio=True, relative_coord=False)

        # loop over the results
        for r in results:
            # extract the bounding box
            box = r.bounding_box.flatten().astype("int")
            (startX, startY, endX, endY) = box
            
            if self.enable_labeling:
                label = self.labels[r.label_id]
                if label != self.tracked_object:
                    continue # skip current object
                y = startY - 15 if startY - 15 > 15 else startY + 15
                text = "{}: {:.2f}%".format(label, r.score * 100)
                cv2.putText(orig, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            # draw the bounding box and label on the image
            cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 2)

            box_msg = Int16MultiArray()
            box_msg.data = [r.label_id, startX, startY, endX, endY]
            self.pub_box.publish(box_msg)
            break

        # skip if no subscribers are registered
        if self.pub_image.get_num_connections() == 0:
            return

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', orig)[1]).tostring()

        # Publish image with face detections
        self.pub_image.publish(msg)
Example #42
0
def gpu_img_to_img_msg(img_gpu, width, height):
    m = CompressedImage()
    jetson.utils.cudaDeviceSynchronize()
    img_rgba = jetson.utils.cudaToNumpy(img_gpu, width, height,
                                        4).astype(np.uint8)

    img = cv2.cvtColor(img_rgba[:, :, 0:3], cv2.COLOR_BGR2RGB)
    m.data = np.array(cv2.imencode(".png", img)[1]).tostring()
    m.format = "png"

    return m
Example #43
0
 def publishImage(self, type="color"):
     response_image = self.client.simGetImage(CAMERA_NAME, IMAGE_TYPE)
     np_response_image = np.asarray(bytearray(response_image),
                                    dtype="uint8")
     # Publish to ROS
     msg = CompressedImage()
     msg.header.stamp = rospy.Time.now()
     msg.format = "jpeg"
     msg.data = np_response_image.tostring()
     # msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
     self.image_pub.publish(msg)
Example #44
0
    def publish_img(self, obs):
        img_msg = CompressedImage()
        time = rospy.get_rostime()

        img_msg.header.stamp.secs = time.secs
        img_msg.header.stamp.nsecs = time.nsecs

        img_msg.format = "jpeg"
        image = cv2.cvtColor(np.ascontiguousarray(obs), cv2.COLOR_BGR2RGB)
        img_msg.data = np.array(cv2.imencode('.jpg', image)[1]).tostring()

        self.pub_img.publish(img_msg)
Example #45
0
    def callback(self, ros_data):
        if VERBOSE:
            print "received image of type: %s" % ros_data.format

        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)

        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
        self.image_pub.publish(msg)
Example #46
0
    def displayImageStream(self, packedReturn, compressedImage):
        outputImage = packedReturn['output']
        image = CompressedImage()
        image.header.seq = self.seq
        self.seq += 1
        image.header.stamp = rospy.Time.now()
        image.header.frame_id = "blab"
        image.format = "jpeg"

        image.data = np.array(cv2.imencode('.jpg', outputImage)[1]).tostring()

        self.image_publisher.publish(image)
Example #47
0
    def callback(self, img):

        #image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
        image_np = cv2.imdecode(img, cv2.IMREAD_COLOR)  # OpenCV >= 3.0:

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
        # Publish new image
        self.image_pub.publish(msg)
Example #48
0
    def callback(self, msg):
        #### direct conversion to CV2 ####
        np_arr = np.fromstring(msg.data, np.uint8)
        cv_img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) # OpenCV >= 3.0:

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', cv_img)[1]).tostring()
        # Publish new image
        self.image_pub.publish(msg)
Example #49
0
 def timer_callback(self):
     if self.has_frame == True:
         # covert frame to image
         #resize_frame = cv2.resize(frame, (self.width, self.height))
         msg = CompressedImage()
         msg.header.stamp = self.get_clock().now().to_msg()
         msg.format = "jpeg"
         encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 60]
         # result, encimg = cv2.imencode('.jpg', img, encode_param)
         msg.data = np.array(cv2.imencode('.jpg', self.frame, encode_param)[1]).tobytes()
         self.pub.publish(msg)
         self.has_frame = False
    def publishTestImage(self):
        original_img = cv2.imread(self.original_image_file, cv2.CV_LOAD_IMAGE_COLOR)

        #### Create CompressedIamge ####
        out = CompressedImage()
        out.header.stamp = rospy.Time.now()
        out.format = "png"
        np_arr_out = cv2.imencode('.png', original_img)[1]
        out.data = np.array(np_arr_out).tostring()

        # Publish new image
        rospy.sleep(1)
        self.pub_mirror.publish(out)
        rospy.loginfo("[%s] published image"%self.node_name)
	def publish(self):
		img = cv2.imread("/home/qian/dev/catkin_ws/src/smart_ar/script/aurora.png", cv2.CV_LOAD_IMAGE_COLOR)
		M = cv2.getRotationMatrix2D((img.shape[1]/2,img.shape[0]/2),self.ga*100, 1.0 / self.dist)
		img = cv2.warpAffine(img,M,(img.shape[1], img.shape[0]))
		bridge = CvBridge()
		msg = CompressedImage()
		msg.format = "jpeg"
		msg.data = np.array(cv2.imencode(".jpg",img)[1]).tostring()
		msg.header.stamp = rospy.Time.now()
		msg.header.frame_id = 'image_compressed_publisher'
		msg.header.seq = self.seq
		self.seq += 1
		self.imgPublisher.publish(msg)
		print self.seq, 'image published'
def cropCallback(msg):
    time_stamp = msg.header.stamp
    img_bytes = BytesIO(msg.data)
    img = PIL.Image.open(img_bytes)
    
    resize = (80, 60)
    img = img.resize(resize, resample=PIL.Image.BILINEAR)
    np_img = np.asarray(img)
    
    msg = CompressedImage()
    msg.header.stamp = time_stamp
    msg.format = "png"
    msg.data = np.array(cv2.imencode('.png', np_img)[1]).tostring()
    publish_cropimg.publish(msg)
Example #53
0
File: debug.py Project: Snafu/ros
 def cbImage(self, img):
   # decode image
   np_arr = np.fromstring(img.data, np.uint8)
   color = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
   
   self.overlayInfo(color)
   
   #### Create CompressedIamge ####
   msg = CompressedImage()
   msg.header.stamp = rospy.Time.now()
   msg.format = "jpeg"
   msg.data = np.array(cv2.imencode('.jpg', color)[1]).tostring()
   # Publish new image
   self.image_pub.publish(msg)
Example #54
0
    def receive_image(self):   
        """
        Retrives image data coming from the rover and then
        transforms it to numpy and string data for further
        manupulation with OpenCV and Kivy and returns the final
        data
        """
        data = ''
        ldata = array.array('c')
        image_buffer = CompressedImage()
        image_buffer.format = "jpeg"
#        start = ''
        found_start = False
        found_end = False
        start_pos = 0 #current position in ldata
        #position of the end of frame in the current array of data
        end_pos = 0
        while (not found_end):
            data = self.video_socket.recv(self.max_tcp_buffer)
            if(data == ''):
                continue
            #check for the message start token 'MO_V'
            for i in range(0, len(data)-2):
                if (data[i:(i+4)] == 'MO_V'):
                    if not found_start:
                        found_start = True
                        start_pos = i
                        #crop the data only include stuff after the
                        #start token.
                        data = data[start_pos:len(data)]
                        break
                    elif not found_end:
                        found_end = True
                        end_pos = i
                        break
            #if you have found the start but not the end (in the
            #middle of a image frame)
            if (found_start and not found_end):
                #add the recent data to ldata
                ldata.extend(list(data))
            if found_end:
               ldata.extend(list(data[0:end_pos]))
            data = ''
        l_len = len(ldata)
        #### Create CompressedIamge ####
        # convert to numpy to use with OpenCV, etc.
        image_buffer.header.stamp = rospy.Time.now()
        image_buffer.data = np.array(ldata[36:l_len]).tostring()

        return image_buffer
Example #55
0
def cam_callback(msg):
	np_arr = np.fromstring(msg.data, np.uint8)
	
	# Decode 
	image_rgb = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)

	# Mirror image laterally
	image_rgb_rev =	image_rgb[:,::-1,:]
	
	msg = CompressedImage()
	msg.header.stamp = rospy.Time.now()
	msg.format = "jpeg"
	msg.data = np.array(cv2.imencode('.jpg', image_rgb_rev)[1]).tostring()
	publisher.publish(msg)
	rospy.loginfo("Re Published ")
Example #56
0
def set_scene(index):
    image = CompressedImage()
    # image.header.frame_id = '/odom_combined'
    # If you ever want to drive the base, you will want to use the line above, or /map or something.
    # Using /base_link means the robot will always stay exactly in the middle of the backdrop.
    image.header.frame_id = '/base_link'
    image.format = 'jpeg'
    try:
        scene = scenes[index]
    except TypeError:
        index = main_window.scene_comboBox.findText(index)
        scene = scenes[index]
    image.data = open(scene).read()
    image.header.stamp = rospy.Time.now()
    backdrop_publisher.publish(image)
    def _publish_image(self, header, image, timestamp):
        """Publish JPG image as a ROS message.

        :param header: The HTTP-like header read from the stream.
        :type header: dict
        :param image: The video frame in JPG.
        :type image: :py:obj:`basestring`
        :param timestamp: The time when the frame was captured (or its best estimation).
        :type timestamp: :py:class:`rospy.Time`
        """
        msg = CompressedImage()
        msg.header.stamp = timestamp
        msg.header.frame_id = self._axis._frame_id
        msg.format = "jpeg"
        msg.data = image
        self._axis._video_publisher.publish(msg)
Example #58
0
    def on_image(self, ros_data):
        compressed_in = np.fromstring(ros_data.data, np.uint8)
        image_in      = cv2.imdecode(compressed_in, cv2.CV_LOAD_IMAGE_COLOR)
        if self.prvs == None :
            self.prvs = cv2.resize(cv2.cvtColor(image_in,cv2.COLOR_BGR2GRAY), (width,height))
        else:
            next      = cv2.resize(cv2.cvtColor(image_in,cv2.COLOR_BGR2GRAY), (width,height))
            image_out = cv2.resize(image_in, (width,height))
            if self.should_be_controlled() : self.pub.publish(self.apply_control(next, image_out))
            self.prvs = next

            msg              = CompressedImage()
            msg.header.stamp = rospy.Time.now()
            msg.format       = "jpeg"
            msg.data         = np.array(cv2.imencode('.jpg', image_out)[1]).tostring()
            self.ipub.publish(msg)
    def imageCallback(self, msg):
        #### direct conversion to CV2 ####
        # print 'image received'
        np_arr = np.fromstring(msg.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)

        flipCode = 1 if self.flip_direction == "horz" else 0
        image_np_out = cv2.flip(image_np, flipCode)

        #### Create CompressedIamge ####
        out = CompressedImage()
        out.header.stamp = rospy.Time.now()
        out.format = "png"
        np_arr_out = cv2.imencode('.png', image_np_out)[1]
        out.data = np.array(np_arr_out).tostring()

        # Publish new image
        self.pub_mirror.publish(out)