Exemple #1
0
def read_image(image_msg):
    try:
        image = bgr_from_jpg(image_msg.data)
        return image
    except ValueError as e:
        rospy.logerr(e)
        return None
    def process_image(self, image_msg):
        if self.parametersChanged:
            self.log('Parameters changed.', 'info')
            self.refresh_parameters()
            self.parametersChanged = False

        try:
            self.image_lock.acquire()
            image = bgr_from_jpg(image_msg.data)
            self.image = image
            self.image_lock.release()
        except ValueError as e:
            self.log('Anti_instagram cannot decode image: %s' % e)
            self.image_lock.release()
            return

        color_balanced_image = self.ai.apply_color_balance(image,
                                   self.output_scale)

        if color_balanced_image is None:
            self.calculate_new_parameters(None)
            return

        corrected_image = self.bridge.cv2_to_compressed_imgmsg(
                              color_balanced_image)
        corrected_image.header.stamp = image_msg.header.stamp
        self.corrected_image_publisher.publish(corrected_image)
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = bgr_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # Set the image to be detected
        self.detector_used.setImage(image_cv)

        # Detect lines and normals

        #rospy.loginfo('segment_max_threshold: %s' % self.segment_max_threshold)

        white = self.detector_used.detectLines('white',
                                               self.segment_max_threshold)
        yellow = self.detector_used.detectLines('yellow',
                                                self.segment_max_threshold)
        red = self.detector_used.detectLines('red', self.segment_max_threshold)

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(white.lines) > 0:
            lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, white.normals,
                                  Segment.WHITE))
        if len(yellow.lines) > 0:
            lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, yellow.normals,
                                  Segment.YELLOW))
        if len(red.lines) > 0:
            lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, red.normals,
                                  Segment.RED))

        self.intermittent_log(
            '# segments: white %3d yellow %3d red %3d' %
            (len(white.lines), len(yellow.lines), len(red.lines)))

        tk.completed('prepared')

        # Publish segmentList

        #rospy.loginfo('segmentList: %s' % (len(white.lines)+len(yellow.lines)+len(red.lines)))
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below

        if self.verbose:

            # print('line_detect_node: verbose is on!')

            # Draw lines and normals
            image_with_lines = np.copy(image_cv)
            drawLines(image_with_lines, white.lines, (0, 0, 0))
            drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            colorSegment = color_segment(white.area, red.area, yellow.area)
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector_used.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())
Exemple #4
0
    def cbNewImage(self, image_msg):
        # this callback proceeds the latest image, i.e. it applies the current transformation to the image and publishes it
        # begin2=rospy.Time.now()
        if not self.thread_lock.acquire(False):
            return

        # memorize image
        self.image_msg = image_msg

        tk = TimeKeeper(image_msg)
        try:
            cv_image = bgr_from_jpg(image_msg.data)
        except ValueError as e:
            rospy.loginfo('Anti_instagram cannot decode image: %s' % e)
            return

        #cv_image = cv2.imread(cv_image)
        scale_percent = self.scale_percent
        width = int(cv_image.shape[1] * scale_percent / 100)
        height = int(cv_image.shape[0] * scale_percent / 100)
        dim = (width, height)
        cv_image = cv2.resize(cv_image, dim, interpolation=cv2.INTER_NEAREST)
        tk = TimeKeeper(image_msg)
        #
        # end0=rospy.Time.now()
        # duration0=end0-begin2
        # rospy.loginfo('Conversion: %s' % duration0)
        tk.completed('converted')

        if self.trafo_mode == "cb" or self.trafo_mode == "both":
            # apply color balance using latest thresholds
            # begin1=rospy.Time.now()
            colorBalanced_image_cv2 = self.ai.applyColorBalance(
                img=cv_image, ThLow=self.ai.ThLow, ThHi=self.ai.ThHi)
            tk.completed('applyColorBalance')
            # end1=rospy.Time.now()
            # duration=end1-begin1

            # rospy.loginfo('Complete CB-Trafo Duration: %s' % duration)
        else:
            # pass input image
            colorBalanced_image_cv2 = cv_image

        if self.trafo_mode == "lin" or self.trafo_mode == "both":
            # apply color Transform using latest parameters
            corrected_image_cv2 = self.ai.applyTransform(
                colorBalanced_image_cv2)
            tk.completed('applyTransform')
        else:
            # pass input image
            corrected_image_cv2 = colorBalanced_image_cv2
        # begin3=rospy.Time.now()
        # store image to ros message
        self.corrected_image = self.bridge.cv2_to_compressed_imgmsg(
            corrected_image_cv2)  # , "bgr8")
        tk.completed('encode')

        self.corrected_image.header.stamp = image_msg.header.stamp  # for synchronization

        # publish image
        self.pub_image.publish(self.corrected_image)
        tk.completed('published')
        # end3=rospy.Time.now()
        # duration3=end3-begin3
        # rospy.loginfo('Publishing time: %s' % duration3)
        # begin4=rospy.Time.now()

        if self.verbose:
            rospy.loginfo('ai:\n' + tk.getall())

        #self.r.sleep() #to keep the rate
        self.thread_lock.release()
    def duckie_detection(self, image_msg):
        # Decode from compressed image with OpenCV
        try:
            image_cv = bgr_from_jpg(image_msg.data)

        except ValueError as e:
            print('Could not decode image: %s' % e)
            return

        # Switch image from BGR colorspace to HSV
        hsv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2HSV)

        # First reduce noise of image by blurring
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
        orange_mask = cv2.inRange(hsv, (10, 100, 20), (20, 255, 255))

        # Helps to remove other blobs
        erosion_image_cv = cv2.erode(orange_mask,kernel,iterations = 3)

        # Dilate image that contains what we need after noise removed, (Removes a little noise too)
        orange_dilate_cv = cv2.dilate(erosion_image_cv, kernel, iterations=1)
            
        # Bitwise-AND of mask and yellow only image
        orange_only_image_cv = cv2.bitwise_and(image_cv, image_cv, mask = orange_dilate_cv)

        # Detect blobs.
        reversemask = 255 - orange_dilate_cv
        keypoints = self.detector.detect(reversemask)

        # We can keep track of the point with the largest diameter, it is likely a duckie
        largestBlob = 0
        duckPoint = None
        if len(keypoints) != 0:
            for points in range(len(keypoints)):
                    x = int(keypoints[points].pt[0])
                    y = int(keypoints[points].pt[1])
                    diameter = int(keypoints[0].size)
                    print("Value of X = ", x, "Value of Y = ", y, "Diameter is : ", diameter)
                    if diameter > largestBlob:
                        duckPoint = points
                        largestBlob = diameter

            duckieX = int(keypoints[duckPoint].pt[0])
            duckieY = int(keypoints[duckPoint].pt[1])
            duckieD = int(keypoints[duckPoint].size)
            cv2.putText(image_cv, "Duckie", (duckieX - 20, duckieY - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
            
            # Handle sending UDP message to Server
            message = str(duckieD)
            self.clientSocket.sendto(message.encode(), (self.serverName, self.serverPort))
            # returnMessage, serverAddress = self.clientSocket.recvfrom(2048)

        # Draw detected blobs as green circles.
        # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
        im_with_keypoints = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0,255,0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        keypoints_image_ros = self.bridge.cv2_to_imgmsg(im_with_keypoints, "bgr8")
        
        # Publish images
        self.publishImage(self.pub_duckie_detection, keypoints_image_ros)
    def processImage(self, event):
        # processes image with either color balance, linear trafo or both

        # if we have seen an image:
        if self.image_msg is not None:
            tk = TimeKeeper(self.image_msg)

            try:
                cv_image = bgr_from_jpg(self.image_msg.data)
            except ValueError as e:
                rospy.loginfo('Anti_instagram cannot decode image: %s' % e)
                return

            tk.completed('converted')

            # resize input image
            resized_img = cv2.resize(cv_image, (0, 0),
                                     fx=self.resize,
                                     fy=self.resize)
            tk.completed('resized')

            H, W, D = resized_img.shape

            # apply geometry
            if self.fancyGeom:
                # apply fancy geom
                mask = self.mask = processGeom2(resized_img)
                self.mask255 = mask
                self.mask255[self.mask255 == 1] = 255
                self.geomImage = np.expand_dims(mask, axis=-1) * resized_img

                tk.completed('fancyGeom')
                # self.geomImage = np.transpose(np.stack((mask, mask, mask), axis=2)) * resized_img
                # idx = (mask==1)
                # self.geomImage = np.zeros((H, W), np.uint8)
                # self.geomImage = resized_img[idx]
            else:
                # remove upper part of the image
                self.geomImage = resized_img[int(H * 0.3):(H - 1), :, :]
                tk.completed('notFancyGeom')

            # apply color balance if required
            if self.trafo_mode == "cb" or self.trafo_mode == "both":
                # find color balance thresholds

                start_cb = time.time()
                self.ai.calculateColorBalanceThreshold(self.geomImage,
                                                       self.cb_percentage)
                end_cb = time.time()
                tk.completed('calculateColorBalanceThresholds')

                # store color balance thresholds to ros message
                self.transform_CB.th[0], self.transform_CB.th[
                    1], self.transform_CB.th[2] = self.ai.ThLow
                self.transform_CB.th[3], self.transform_CB.th[
                    4], self.transform_CB.th[5] = self.ai.ThHi
                self.transform_CB.th[3] = 255
                self.transform_CB.th[4] = 255
                self.transform_CB.th[5] = 255

                # publish color balance thresholds
                self.pub_trafo_CB.publish(self.transform_CB)
                #    rospy.loginfo('ai: Color balance thresholds published.')

                tk.completed('colorBalance analysis')

            # apply linear trafo if required
            if self.trafo_mode == "lin" or self.trafo_mode == "both":
                # take in account the previous color balance
                if self.trafo_mode == "both":
                    # apply color balance
                    colorBalanced_image = self.ai.applyColorBalance(
                        self.geomImage, self.ai.ThLow, self.ai.ThHi)
                    rospy.loginfo(
                        'TRANSFORMATION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
                else:
                    # pass image without color balance trafo
                    colorBalanced_image = self.geomImage

                tk.completed('passed image to linear trafo')

                # not yet initialized
                if not self.initialized:
                    # apply bounded trafo
                    start_lin = time.time()
                    mbool = self.ai.calculateBoundedTransform(
                        colorBalanced_image, self.max_it_1)
                    end_lin = time.time()
                    if mbool:
                        # store color transform to ros message
                        self.transform.s[0], self.transform.s[
                            1], self.transform.s[2] = self.ai.shift
                        self.transform.s[3], self.transform.s[
                            4], self.transform.s[5] = self.ai.scale

                        # publish color trafo
                        self.pub_trafo.publish(self.transform)
                        rospy.loginfo(
                            'ai: Initial trafo found and published! Switch to continuous mode.'
                        )
                    else:
                        rospy.loginfo(
                            'ai: average error too large. transform NOT updated.'
                        )

                # initialisation already done: continuous mode
                else:
                    # find color transform
                    start_lin2 = time.time()
                    mbool2 = self.ai.calculateBoundedTransform(
                        colorBalanced_image, self.max_it_2)
                    end_lin2 = time.time()
                    if mbool2:
                        tk.completed('calculateTransform')

                        # store color transform to ros message
                        self.transform.s[0], self.transform.s[
                            1], self.transform.s[2] = self.ai.shift
                        self.transform.s[3], self.transform.s[
                            4], self.transform.s[5] = self.ai.scale

                        # publish color trafo
                        self.pub_trafo.publish(self.transform)
                    else:
                        rospy.loginfo(
                            'ai: average error too large. transform NOT updated.'
                        )

            if self.fancyGeom:
                self.mask = self.bridge.cv2_to_imgmsg(self.mask255, "mono8")
                self.pub_mask.publish(self.mask)
                rospy.loginfo('published mask!')

            geomImgMsg = self.bridge.cv2_to_imgmsg(self.geomImage, "bgr8")
            self.pub_geomImage.publish(geomImgMsg)

            #if self.verbose:
            #    rospy.loginfo('ai:\n' + tk.getall())
            if self.trafo_mode == "cb" or self.trafo_mode == "both":
                cb_time = end_cb - start_cb
                #print('CB took: ' + str(cb_time))
            if self.trafo_mode == "lin" or self.trafo_mode == "both":
                if not self.initialized:
                    lin1_time = end_lin - start_lin
                #    print('Lin took: ' + str(lin1_time))
                else:
                    lin2_time = end_lin2 - start_lin2