Example #1
0
    def _publish_image(self, header, image, timestamp):
        """Publish JPG image as a ROS message.

        :param header: The HTTP-like header read from the stream.
        :type header: dict
        :param image: The video frame in JPG.
        :type image: :py:obj:`basestring`
        :param timestamp: The time when the frame was captured (or its best estimation).
        :type timestamp: :py:class:`rospy.Time`
        """
        msg = CompressedImage()
        msg.header.stamp = timestamp
        msg.header.frame_id = self._axis._frame_id
        msg.format = "jpeg"
        msg.data = image
        self._axis._video_publisher.publish(msg)
Example #2
0
    def callback(self, img):
       

     
       #image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
        image_np = cv2.imdecode(img, cv2.IMREAD_COLOR) # OpenCV >= 3.0:
        
       

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
        # Publish new image
        self.image_pub.publish(msg)
Example #3
0
    def callback(self, Img_data):

        perc_uppercut_ROI = 33  # define Region Of Intrest for the image (in percentage)(rectangular shape).
        # Top 0% (no remove) --> Bottom 100% (complete remove)

        # convert data from compressed form into compatible opencv form, and convert to GRAY scale
        np_arr = np.fromstring(Img_data.data, np.uint8)
        image_g = cv2.cvtColor(cv2.imdecode(np_arr, 1), cv2.COLOR_BGR2GRAY)

        # undistort image
        image_un = cv2.undistort(image_g, self.camera_matrix, self.dist_coeff)

        # filter GRAY scale image for blacks (black tape is used as lanes)
        image_g_filtered = cv2.inRange(image_un, 0, 50)

        # Canny algorithm as edge detector, coefficients used comes from experience
        image_C = cv2.Canny(image_g_filtered, 40, 90)

        # Hough lines (proportional) as line detector, coefficients used comes from experience
        linesP = cv2.HoughLinesP(self.ROI(image_C, perc_uppercut_ROI), 1,
                                 np.pi / 180, 100, None, 50, 40)

        # convert image to BGR for plotting colored lines in gray scale image
        image_un = cv2.cvtColor(image_un, cv2.COLOR_GRAY2BGR)

        # loop for plotting all the lines detected
        if linesP is not None:
            for i in range(0, len(linesP)):
                l = linesP[i][0]
                cv2.line(image_un, (l[0], l[1]), (l[2], l[3]), (0, 100, 0), 3,
                         cv2.LINE_AA)

        # compute average lines for positive and negative slopes
        lines = self.Average_lines(linesP, image_un, perc_uppercut_ROI)

        # compute and publish mid line angle for automatic steering of the vehicle
        if len(lines) > 0:
            angle = self.Mid_line(lines, image_un)
            msg2 = steering_angle()
            msg2.ang = angle
            self.pub2.publish(msg2)

        # publish modified image
        msg1 = CompressedImage()
        msg1.format = "jpeg"
        msg1.data = np.array(cv2.imencode('.jpg', image_un)[1]).tostring()
        self.pub1.publish(msg1)
Example #4
0
def camThread():

    width = 640
    height = 480
    #vidfps = 30

    cam = cv2.VideoCapture(0)
    cam.set(3, width)
    cam.set(4, height)
    cam.set(cv2.CAP_PROP_FPS, 15)
    #cam = cv2.VideoCapture(get_camerasrc(0), cv2.CAP_GSTREAMER)

    #cam.set(cv2.CAP_PROP_FPS, vidfps)
    #cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
    #cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)

    #cam = cv2.VideoCapture('rkisp device=/dev/video1 io-mode=4 path-iqf=/etc/cam_iq/ov13850.xml ! video/x-raw,format=NV12,width=740 ,height=360,framerate=30/1 ! videoconvert ! appsink', cv2.CAP_GSTREAMER)
    image_pub = rospy.Publisher("/output/image_raw/compressed",
                                CompressedImage,
                                queue_size=1)
    rospy.init_node('cam_stream', anonymous=True)
    rate = rospy.Rate(30)  # 10hz

    while not rospy.is_shutdown():
        try:
            ret, color_image = cam.read()
            if not ret:
                print("no image")
                continue
            msg = CompressedImage()
            msg.header.stamp = rospy.Time.now()
            msg.format = "jpeg"
            msg.data = np.array(cv2.imencode('.jpg',
                                             color_image)[1]).tostring()
            # Publish new image
            image_pub.publish(msg)

            res = None
        except BaseException as e:
            print("Exiting")
            print(e)
        except (KeyboardInterrupt, SystemExit):
            print("Camera release")
            cam.release()
            break

        rate.sleep()
    def __init__(self):
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        self.framerate_high = self.setupParam("~framerate_high", 30.0)
        self.framerate_low = self.setupParam("~framerate_low", 15.0)
        self.res_w = self.setupParam("~res_w", 640)
        self.res_h = self.setupParam("~res_h", 480)

        self.image_msg = CompressedImage()

        # Setup PiCamera

        self.camera = PiCamera()
        self.framerate = self.framerate_high  # default to high
        self.camera.framerate = self.framerate
        self.camera.resolution = (self.res_w, self.res_h)

        cam_config_root = '/data/config'
        self.cali_file_folder = cam_config_root + "/calibrations/camera_intrinsic/"

        self.frame_id = rospy.get_namespace().strip(
            '/') + "/camera_optical_frame"

        self.has_published = False
        self.pub_img = rospy.Publisher("~image/compressed",
                                       CompressedImage,
                                       queue_size=1)
        self.sub_switch_high = rospy.Subscriber("~framerate_high_switch",
                                                BoolStamped,
                                                self.cbSwitchHigh,
                                                queue_size=1)

        # Create service (for camera_calibration)
        self.srv_set_camera_info = rospy.Service("~set_camera_info",
                                                 SetCameraInfo,
                                                 self.cbSrvSetCameraInfo)

        self.stream = io.BytesIO()

        #self.camera.exposure_mode = 'off'
        # self.camera.awb_mode = 'off'

        self.is_shutdown = False
        self.update_framerate = False
        # Setup timer
        rospy.loginfo("[%s] Initialized." % (self.node_name))
Example #6
0
    def callback(self, ros_data):
        # global variables:
        global image_array        
        global label_array
        global steering_angle
        global esc_throttle
        global frame
        
        
        '''Callback function of subscribed topic. 
        Here images get converted and features detected'''
        if VERBOSE :
            print 'received image of type: "%s"' % ros_data.format

        #### direct conversion to CV2 ####
        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.IMREAD_GRAYSCALE)
        
        # recieved a new frame
        frame += 1        
        
        # select middle of camera image
        resized_image = image_np[50:150, :]
        
        # 1-D vector of all the interesting pixels
        temp_array = resized_image.reshape(1, 28000).astype(np.float32)        
        
        # adds the temp_array to the image_array
        image_array = np.vstack((image_array, temp_array))
        
        # add row to label_array
        label_array = np.vstack((label_array, np.array((steering_angle, esc_throttle))))
        
        cv2.imshow('cv_img', resized_image)
        cv2.waitKey(2)
        
        if VERBOSE:
            print 'Steering angle is: ', steering_angle
            print 'Throttle is: ', esc_throttle

        #### Create CompressedIamge ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
        # Publish new image
        self.image_pub.publish(msg)
Example #7
0
def images(drone):
    rospy.loginfo("Image info thread")
    if drone:
        drone.set_camera_view(True)
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        compressed_images = cv2.imencode('.jpg', drone.get_image())
        msg.data = np.array(compressed_images[1]).tostring()
        front_image_pub.publish(msg)

        datos = dict()
        datos = nav_data(drone)
        print(datos[0])

    else:
        rospy.loginfo("No inicio")
Example #8
0
    def process(self):
        if type(self.cv_img) == np.int:
            return
        self.lock = 1
        img = copy.copy(self.cv_img)
        self.lock = 0

        height, width = img.shape[:2]
        red_buoy_contours = self.get_filtered_contours(img, "RED_BUOY")
        greed_buoy_contours = self.get_filtered_contours(img, "GREEN_BUOY")
        buoy_contours, GREEN_BUOY_MID, RED_BUOY_MID = [], [], []

        # check if the list is empty
        if red_buoy_contours:
            buoy_contours.extend(red_buoy_contours)
        if greed_buoy_contours:
            buoy_contours.extend(greed_buoy_contours)

        for (cnt, box, mean_color, contour_type) in buoy_contours:
            # plot box around contour
            x, y, w, h = box
            # find middle point of the buoy
            if contour_type == "RED_BUOY":
                RED_BUOY_MID = [x + w / 2, y + h / 2]
            if contour_type == "GREEN_BUOY":
                GREEN_BUOY_MID = [x + w / 2, y + h / 2]
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(img, contour_type, (x, y - 5), font, 1, mean_color, 2)
            cv2.rectangle(img, (x, y), (x + w, y + h), mean_color, 8)

        # find the middle point of two buoy
        if not GREEN_BUOY_MID:
            middle_point = RED_BUOY_MID
        elif not RED_BUOY_MID:
            middle_point = GREEN_BUOY_MID
        else:
            middle_point = self.middle(GREEN_BUOY_MID, RED_BUOY_MID)

        #### Create CompressedImage ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', img)[1]).tostring()

        # Publish new image
        self.image_pub.publish(msg)
Example #9
0
def camPublisher():
    # Create and initialize a publisher
    pub = rospy.Publisher('/cameraOutput', CompressedImage, queue_size=1)
    rospy.init_node('camera', anonymous=True)
    rate = rospy.Rate(10)  # 10 Hz
    # Create a CompressedImage message
    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    while not rospy.is_shutdown():
        try:
            msg.data = cameraCap()
            pub.publish(msg)
        except:
            print("Error: " + str(sys.exc_info()[0]))
            pass
        rate.sleep()
Example #10
0
def callback(ros_data):
    np_arr = np.fromstring(ros_data.data, np.uint8)
    img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
    bounding_boxes = [[332.0, 144.0, 409.0, 411.0]]
    for box in bounding_boxes:
        x, y, x1, y1 = box
        roi = img[int(y):int(y1), int(x):int(x1)]
    cv2.imshow('img', roi)
    cv2.waitKey(50)

    # Prepares pub
    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    msg.data = np.array(cv2.imencode('.jpg', roi)[1]).tostring()
    # Publish new image
    image_pub.publish(msg)
Example #11
0
    def execute_cb(self, goal):
        rospy.loginfo("Goal Received!")

        start_time = rospy.Time.now()

        result = inference_server.msg.InferenceResult()
        if not goal.input_image.data:
            self.inference_output, num_detected, detected_classes, detected_scores, detected_boxes = object_detection.detect(
                self.inference_input)
        else:
            np_arr = np.fromstring(goal.input_image.data, np.uint8)
            goal_inference_input = cv2.imdecode(np_arr,
                                                cv2.CV_LOAD_IMAGE_COLOR)
            self.inference_output, num_detected, detected_classes, detected_scores, detected_boxes = object_detection.detect(
                goal_inference_input)

        self.inference_image = CompressedImage()
        self.inference_image.header.stamp = rospy.Time.now()
        self.inference_image.format = "jpeg"
        self.inference_image.data = np.array(
            cv2.imencode('.jpg', self.inference_output)[1]).tostring()
        self.image_pub.publish(self.inference_image)

        result.image = self.inference_image
        result.num_detections = num_detected
        result.classes = detected_classes
        result.scores = detected_scores
        for i in range(len(detected_boxes)):
            box = RegionOfInterest()
            box.y_offset = detected_boxes[i][0]
            box.height = (detected_boxes[i][2] - detected_boxes[i][0])
            box.x_offset = detected_boxes[i][1]
            box.width = (detected_boxes[i][3] - detected_boxes[i][1])
            box.do_rectify = True
            result.bounding_boxes.append(box)

        total_inference_time = rospy.Time.now() - start_time
        total_inference_time = total_inference_time.to_sec()
        rospy.loginfo(
            "The inference took {} seconds".format(total_inference_time))

        try:
            self._as.set_succeeded(result)
        except Exception as e:
            rospy.logerr(str(e))
            self._as.set_aborted(text=str(e))
 def publish(self):
     img = cv2.imread(
         "/home/qian/dev/catkin_ws/src/smart_ar/script/aurora.png",
         cv2.CV_LOAD_IMAGE_COLOR)
     M = cv2.getRotationMatrix2D((img.shape[1] / 2, img.shape[0] / 2),
                                 self.ga * 100, 1.0 / self.dist)
     img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]))
     bridge = CvBridge()
     msg = CompressedImage()
     msg.format = "jpeg"
     msg.data = np.array(cv2.imencode(".jpg", img)[1]).tostring()
     msg.header.stamp = rospy.Time.now()
     msg.header.frame_id = 'image_compressed_publisher'
     msg.header.seq = self.seq
     self.seq += 1
     self.imgPublisher.publish(msg)
     print self.seq, 'image published'
 def _receive_message(self,message):
     global my
     rospy.loginfo(rospy.get_caller_id() + " I heard a message of %s",self._message_type)
     rospy.loginfo(rospy.get_caller_id()  + " Time from previous message %s",(rospy.get_time()-my)  )#edw mou leei unicode type
     my=rospy.get_time()
     try:
         msg=CompressedImage()
         msg.header.seq=message['header']['seq']
         msg.header.stamp.secs=message['header']['stamp']['secs']
         msg.header.stamp.nsecs=message['header']['stamp']['nsecs'] 
         msg.header.frame_id=message['header']['frame_id'] #type unicode
         msg.format=message['format'] #type unicode
         msg.data=message['data'].decode('base64')
         self._rosPub=rospy.Publisher(self._local_topic_name, CompressedImage, queue_size=10) #message type is String instead of msg_stds/String
         self._rosPub.publish(msg)
     except:
        print('Error')        
    def modifyImageAndPublish(self,
                              cv_image_orig,
                              misalignment=0,
                              publisherId=1):

        cv_image = cv_image_orig.copy()
        publisher = self.image_pub1 if publisherId == 1 else self.image_pub2
        compressedPublisher = self.image_pub1_compressed if publisherId == 1 else self.image_pub2_compressed

        #Modify Image
        if True:
            #########################
            #Add recording indicator#
            #########################
            overlay = cv_image.copy()

            overlay = cv2.putText(overlay, self.display_message,
                                  (0 + misalignment, 25), self.font,
                                  self.fontSize, self.color, self.thickness,
                                  cv2.LINE_AA)
            rect_color = (0, 255, 0) if self.turn_on_square else (0, 0, 255)
            cv2.rectangle(cv_image, (600 + misalignment, 0),
                          (640 + misalignment, 40), rect_color, -1)

            if self.show_score_rect:
                cv2.rectangle(cv_image, (560 + misalignment, 0),
                              (600 + misalignment, 40), self.score_rect_color,
                              -1)

            cv2.addWeighted(overlay, self.alpha, cv_image, 1 - self.alpha, 0,
                            cv_image)

        #Publish modified Image
        try:
            publisher.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
        except CvBridgeError as e:
            print(e)

        #### Create and Publish Compressed Image ####
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', cv_image)[1]).tostring()
        compressedPublisher.publish(msg)

        return cv_image
Example #15
0
    def __createMsg(self, boards, img):
        rects = []
        labels = []

        for rect, dst, label in boards:
            poly = Polygon()
            for p in rect:
                poly.points.append(Point32(x=p[0], y=p[1], z=0))
            rects.append(poly)
            labels.append(int(label))

        msg = suchartVisionMsg(rects=rects, labels=labels)
        com = CompressedImage()
        com.format = "jpeg"
        com.data = np.array(cv2.imencode(".jpg", img)[1]).tostring()
        msg.img = com
        return msg
    def _publish_img(self, obs):
        """
        Publishes the image to the compressed_image topic, which triggers the lane following loop
        """

        # XXX: make this into a function (there were a few of these conversions around...)
        img_msg = CompressedImage()

        time = rospy.get_rostime()
        img_msg.header.stamp.secs = time.secs
        img_msg.header.stamp.nsecs = time.nsecs

        img_msg.format = "jpeg"
        contig = cv2.cvtColor(np.ascontiguousarray(obs), cv2.COLOR_BGR2RGB)
        img_msg.data = np.array(cv2.imencode(".jpg", contig)[1]).tostring()

        self.cam_pub.publish(img_msg)
Example #17
0
    def callback(self, ros_data):
        #print 'Hello'
        # Recuperation video
        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)

        time1 = time.time()
        # On isole une zone
        roi = image_np[10:90, 0:640]
        roiImg = np.array(roi)
        roi = cv.fromarray(roi)

        # On enleve le bruit
        #print type(roiImg)
        cv2.bitwise_not(roiImg, roiImg)
        erodeElmt = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        dilateElmt = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
        roiImg = cv2.erode(roiImg, erodeElmt, 1)
        roiImg = cv2.dilate(roiImg, dilateElmt, 1)
        roiImg = cv2.cvtColor(roiImg, cv2.COLOR_BGR2GRAY)
        # On cherche les contours de la ligne
        ret, thresh = cv2.threshold(roiImg, 127, 255, cv2.THRESH_BINARY)
        #print type(thresh)
        contours, hierarchy = cv2.findContours(thresh, 1, 2)

        # On cherche le centre de la zone
        cnt = contours[0]
        M = cv2.moments(cnt)
        area = cv2.contourArea(cnt)
        cx = int(M['m10'] / M['m00'])
        cy = int(M['m01'] / M['m00'])

        time2 = time.time()
        if VERBOSE:
            print 'Mesure faite en %s.' % (time2 - time1)

        cv.Circle(roi, (cx, cy), 2, (0, 255, 0), -1)
        #print cx
        #print cy
        #cv2.imshow('roi_img',roiImg)

        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
        self.image_pub.publish(msg)
    def __init__(self):
        super().__init__('zumi_camera_publish')
        """************************************************************
        ** Initialise ROS publishers and subscribers
        ************************************************************"""
        qos = QoSProfile(depth=10)
        self.img_pub = self.create_publisher(Image, '/raw_data', qos)

        self.get_logger().info("Zumin on ")

        HOST = ''
        PORT = 8485

        #TCP 사용
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        print('Socket created')

        #서버의 아이피와 포트번호 지정
        s.bind((HOST, PORT))
        print('Socket bind complete')
        # 클라이언트의 접속을 기다린다. (클라이언트 연결을 10개까지 받는다)
        s.listen(10)
        print('Socket now listening')

        #연결, conn에는 소켓 객체, addr은 소켓에 바인드 된 주소
        conn, addr = s.accept()
        self.cvBridge = CvBridge()

        while True:
            length = self.recvall(conn, 16)
            stringData = self.recvall(conn, int(length))
            data = np.fromstring(stringData, dtype='uint8')

            image_np = cv2.imdecode(data, cv2.IMREAD_COLOR)
            image_np_180 = cv2.rotate(image_np, cv2.ROTATE_180)
            #cv2.imshow('ImageWindow',image_np_180)
            cv2.waitKey(1)

            #### Create CompressedIamge ####
            msg = CompressedImage()
            #msg.header.stamp = rospy.Time.now()
            msg.format = "jpeg"
            msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()
            # Publish new image
            self.img_pub.publish(
                self.cvBridge.cv2_to_imgmsg(image_np_180, "bgr8"))
Example #19
0
    def subscribe(self, message):
        file_path = get_latest_modified_file_path("/tmp/ros/camera/")

        header = Header()
        header.seq = self.sequence
        self.sequence += 1
        header.stamp.secs = int(os.path.getmtime(file_path))
        header.frame_id = "0"

        response = CompressedImage()
        response.format = "jpeg"
        f = open(file_path)
        response.data = f.read()
        f.close()
        response.header = header
        rospy.loginfo('PUBLISH JPG IMAGE')
        self.stream_topic.publish(response)
Example #20
0
    def grabAndPublish(self, stream, publisher):
        while True:  #TODO not being triggere correctly when shutting down.
            if self.is_shutdown:
                rospy.loginfo("[%s] Stopping stream...." % (self.node_name))
                # raise StopIteration
                return

            yield stream
            # Construct image_msg
            # Grab image from stream
            stamp = rospy.Time.now()
            stream.seek(0)
            stream_data = stream.getvalue()
            if not self.decompress:
                # Generate compressed image
                image_msg = CompressedImage()
                image_msg.format = "jpeg"
                image_msg.data = stream_data
            else:
                # Generate raw image
                image_msg = Image()
                data = np.fromstring(stream_data,
                                     dtype=np.uint8)  #320 by 240 90Hz
                image = cv2.imdecode(
                    data, cv2.CV_LOAD_IMAGE_COLOR)  # drop to about 60Hz
                image_msg = self.bridge.cv2_to_imgmsg(
                    image)  # drop to about 30hz..

            image_msg.header.stamp = stamp
            image_msg.header.frame_id = self.frame_id
            publisher.publish(image_msg)

            # Clear stream
            stream.seek(0)
            stream.truncate()

            if self.publish_info:
                self.camera_info_msg.header.stamp = stamp
                self.pub_camera_info.publish(self.camera_info_msg)

            if not self.has_published:
                rospy.loginfo("[%s] Published the first image." %
                              (self.node_name))
                self.has_published = True

            rospy.sleep(rospy.Duration.from_sec(0.001))
    def __init__(self):
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        self.framerate_high = self.setupParam("~framerate_high", 15.0)
        self.framerate_low = self.setupParam("~framerate_low", 15.0)
        self.res_w = self.setupParam("~res_w", 640)
        self.res_h = self.setupParam("~res_h", 480)

        self.image_msg = CompressedImage()

        # Setup PiCamera

        self.camera = PiCamera()
        self.framerate = self.framerate_low  # default to high
        self.camera.framerate = self.framerate
        self.camera.resolution = (self.res_w, self.res_h)

        # For intrinsic calibration
        rospack = rospkg.RosPack()
        #self.cali_file_folder = get_duckiefleet_root() + "/calibrations/camera_intrinsic/"
        self.cali_file_folder = rospack.get_path(
            'pi_camera') + "/config/calibrations/camera_intrinsic/"

        self.frame_id = rospy.get_namespace().strip(
            '/') + "/camera_optical_frame"

        self.has_published = False
        self.pub_img = rospy.Publisher("~image/compressed",
                                       CompressedImage,
                                       queue_size=1)

        # Create service (for camera_calibration)
        self.srv_set_camera_info = rospy.Service("~set_camera_info",
                                                 SetCameraInfo,
                                                 self.cbSrvSetCameraInfo)

        self.stream = io.BytesIO()

        #self.camera.exposure_mode = 'off'
        # self.camera.awb_mode = 'off'

        self.is_shutdown = False
        self.update_framerate = False
        # Setup timer
        rospy.loginfo("[%s] Initialized." % (self.node_name))
Example #22
0
def talker():
    pub = rospy.Publisher("/cam_image",CompressedImage)
    rospy.init_node('cam_to_rostopic', anonymous=True)
    rate = rospy.Rate(10)
    cam = cv2.VideoCapture(0)
        
    
    while not rospy.is_shutdown():
        ret_val, img = cam.read()
        image_GRAY=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array( cv2.imencode('.jpg', img)[1]).tostring()
        pub.publish(msg)
        rate.sleep()
Example #23
0
    def __init__(self):
        """OpenCV feature detectors with ros CompressedImage Topics in python.
        
        This example subscribes to a ros topic containing sensor_msgs 
        CompressedImage. It converts the CompressedImage into a numpy.ndarray, 
        then detects and marks features in that image. It finally displays 
        and publishes the new image - again as CompressedImage topic.
        """
        self.publisher = rospy.Publisher("/image_raw/compressed",
                                         CompressedImage,
                                         queue_size=4)
        self.msg = CompressedImage()

        rospy.loginfo('Setting camera level...')
        self.camera = cv2.VideoCapture(int(rospy.get_param('camera_port', 0)))

        self.takeImage(lightSettingSnaps=30)
Example #24
0
    def visualize_cb(self, event):
        if (not self.visualize or self.img is None or self.header is None
                or self.bboxes is None or self.labels is None
                or self.scores is None):
            return

        with self.lock:
            vis_img = self.img.copy()
            header = copy.deepcopy(self.header)
            bboxes = self.bboxes.copy()
            labels = self.labels.copy()
            scores = self.scores.copy()

        # bbox
        cmap = matplotlib.cm.get_cmap('hsv')
        n = max(len(bboxes) - 1, 10)
        for i, (bbox, label, score) in enumerate(zip(bboxes, labels, scores)):
            rgba = np.array(cmap(1. * i / n))
            color = rgba[:3] * 255
            label_text = '{}, {:.2f}'.format(self.label_names[label], score)
            cv2.rectangle(vis_img, (bbox[1], bbox[0]), (bbox[3], bbox[2]),
                          color,
                          thickness=3)
            cv2.putText(vis_img,
                        label_text, (bbox[1], max(bbox[0] - 10, 0)),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5,
                        color,
                        thickness=2)

        if self.pub_image.get_num_connections() > 0:
            vis_msg = self.bridge.cv2_to_imgmsg(vis_img, 'rgb8')
            # BUG: https://answers.ros.org/question/316362/sensor_msgsimage-generates-float-instead-of-int-with-python3/  # NOQA
            vis_msg.step = int(vis_msg.step)
            vis_msg.header = header
            self.pub_image.publish(vis_msg)
        if self.pub_image_compressed.get_num_connections() > 0:
            # publish compressed http://wiki.ros.org/rospy_tutorials/Tutorials/WritingImagePublisherSubscriber  # NOQA
            vis_compressed_msg = CompressedImage()
            vis_compressed_msg.header = header
            vis_compressed_msg.format = "jpeg"
            vis_img_rgb = cv2.cvtColor(vis_img, cv2.COLOR_BGR2RGB)
            vis_compressed_msg.data = np.array(
                cv2.imencode('.jpg', vis_img_rgb)[1]).tostring()
            self.pub_image_compressed.publish(vis_compressed_msg)
Example #25
0
    def __init__(self):
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        self.framerate_high = self.setupParam("~framerate_high", 30.0)
        self.framerate_low = self.setupParam("~framerate_low", 15.0)
        self.res_w = self.setupParam("~res_w", 640)
        self.res_h = self.setupParam("~res_h", 480)

        self.image_msg = CompressedImage()

        # Setup PiCamera
        self.camera = PiCamera()
        self.framerate = self.framerate_low  # default to low
        # self.framerate = self.framerate_high  # default to high
        self.camera.framerate = 10
        self.camera.resolution = (self.res_w, self.res_h)

        # For intrinsic calibration
        self.cali_file_folder = rospkg.RosPack().get_path(
            'pi_cam') + "/camera_info/calibrations/"

        self.frame_id = rospy.get_namespace().strip(
            '/') + "/camera_optical_frame"

        self.has_published = False
        self.pub_img = rospy.Publisher("/pi_cam/image/compressed",
                                       CompressedImage,
                                       queue_size=1)

        # self.pub_img = rospy.Publisher("/ubiquityrobot/camera_node/image/compressed", CompressedImage, queue_size=1)
        # self.pub_img = rospy.Publisher("~image/compressed", CompressedImage, queue_size=1)
        # self.sub_switch_high = rospy.Subscriber("~framerate_high_switch", BoolStamped, self.cbSwitchHigh, queue_size=1)

        # Create service (for camera_calibration)
        self.srv_set_camera_info = rospy.Service("~set_camera_info",
                                                 SetCameraInfo,
                                                 self.cbSrvSetCameraInfo)

        self.stream = io.BytesIO()

        self.is_shutdown = False
        self.update_framerate = False
        # Setup timer
        rospy.loginfo("[%s] Initialized." % (self.node_name))
    def imageCallback(self, msg):
        #### direct conversion to CV2 ####
        # print 'image received'
        np_arr = np.fromstring(msg.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)

        flipCode = 1 if self.flip_direction == "horz" else 0
        image_np_out = cv2.flip(image_np, flipCode)

        #### Create CompressedIamge ####
        out = CompressedImage()
        out.header.stamp = rospy.Time.now()
        out.format = "png"
        np_arr_out = cv2.imencode('.png', image_np_out)[1]
        out.data = np.array(np_arr_out).tostring()

        # Publish new image
        self.pub_mirror.publish(out)
def on_image_received(data, thisid):
    if time.time() - last_time[thisid] < 1.0 / target_hz:
        return

    last_time[thisid] = time.time()

    img = bridge.imgmsg_to_cv2(data, "rgb8")
    img = img[::-1, ::-1, :]

    img = cv2.resize(img, (768, 576))

    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    msg.data = np.array(cv2.imencode('.jpg', img)[1]).tostring()

    image_raws[thisid].publish(msg)
    print("with in " + str(thisid))
Example #28
0
def generate_msg_from_image(img, compression_quality=90):
    """
    Generate ROS CompressedImage message from an OpenCV Image
    :param img: OpenCV Image
    :param compression_quality: integer between 1 - 100. The higher it is, the less information will be lost,
    but the heavier the compressed image will be
    :return: success, msg
    """
    result, compressed_img = compress_image(img, quality=compression_quality)
    if not result:
        return False, None

    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpg"
    msg.data = compressed_img

    return True, msg
Example #29
0
def publish_result(img, topic_name):
    """
        publish picture
    """
    #### Create CompressedIamge ####
    if img is None:
        img = np.zeros((200, 200))
        type = "gray"

    pub = rospy.Publisher(
        str(topic_name)+"/compressed", CompressedImage, queue_size=1)
    
    msg = CompressedImage()
    msg.header.stamp = rospy.Time.now()
    msg.format = "jpeg"
    msg.data = np.array(cv.imencode('.jpg', img)[1]).tostring()

    pub.publish(msg)
Example #30
0
 def __init__(self):
     self.node_name = "Virtual Mirror Nbuckman Tester"
     self.fake_pub_image = rospy.Publisher(
         "/ernie/camera_node/image/compressed",
         CompressedImage,
         queue_size=1)
     img_file = rospy.get_param(
         "~img_file", "/home/noambuckman/test_images/01_horz.jpg")
     rospy.loginfo(img_file)
     im_cv = cv2.imread(img_file)
     img_str = cv2.imencode('.jpg', im_cv)[1].tostring()
     pub_im = CompressedImage()
     pub_im.header = rospy.Time.now()
     pub_im.data = img_str
     #pub_im.data = np.array(cv2.imencode('.jpg', im_np)[1]).tostring()
     #flip_im.data = flip_arr.tostring()
     #flip_im.header.stamp = msg.header.stamp
     self.fake_pub_image.publish(pub_im)