def cbNewImage(self,image_msg):
        # memorize image
        self.image_msg = image_msg
        
        if self.image_pub_switch:
            tk = TimeKeeper(image_msg)
#            cv_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
            cv_image = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.IMREAD_COLOR)
            
            corrected_image_cv2 = self.ai.applyTransform(cv_image)
            tk.completed('applyTransform')
            
            corrected_image_cv2 = np.clip(corrected_image_cv2, 0, 255).astype(np.uint8)
#            self.corrected_image = self.bridge.cv2_to_imgmsg(corrected_image_cv2, "bgr8")
            self.corrected_image = CompressedImage()
            self.corrected_image.format = "jpeg"
            self.corrected_image.data = np.array(cv2.imencode('.jpg', corrected_image_cv2)).flatten()z
            
            tk.completed('encode')
            
            self.pub_image.publish(self.corrected_image)
            
            tk.completed('published')
            
            if self.verbose:
                rospy.loginfo('ai:\n' + tk.getall())
Exemple #2
0
    def cbNewImage(self, image_msg):
        # memorize image
        print("new image received.")
        self.image_msg = image_msg

        if True:
            tk = TimeKeeper(image_msg)
            # cv_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
            cv_image = image_cv_from_jpg(self.image_msg.data)

            corrected_image_cv2 = scaleandshift2(cv_image, self.scale,
                                                 self.shift)
            tk.completed('applyTransform')

            corrected_image_cv2 = np.clip(corrected_image_cv2, 0,
                                          255).astype(np.uint8)
            self.corrected_image = self.bridge.cv2_to_imgmsg(
                corrected_image_cv2, "bgr8")

            tk.completed('encode')

            self.pub_image.publish(self.corrected_image)

            tk.completed('published')

            if self.verbose:
                rospy.loginfo('ai:\n' + tk.getall())
Exemple #3
0
    def cbNewImage(self,image_msg):
        # memorize image
        self.image_msg = image_msg

        if self.image_pub_switch:
            tk = TimeKeeper(image_msg)
            cv_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")

            corrected_image_cv2 = self.ai.applyTransform(cv_image)
            tk.completed('applyTransform')

            corrected_image_cv2 = np.clip(corrected_image_cv2, 0, 255).astype(np.uint8)
            self.corrected_image = self.bridge.cv2_to_imgmsg(corrected_image_cv2, "bgr8")

            tk.completed('encode')

            self.pub_image.publish(self.corrected_image)

            tk.completed('published')

            if self.verbose:
                rospy.loginfo('ai:\n' + tk.getall())
	def cbNewImage(self,image_msg):
		# memorize image
		self.image_msg = image_msg

		if self.image_pub_switch:
			tk = TimeKeeper(image_msg)
			cv_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")

			corrected_image_cv2 = self.ai.applyTransform(cv_image)
			tk.completed('applyTransform')

			corrected_image_cv2 = np.clip(corrected_image_cv2, 0, 255).astype(np.uint8)
			self.corrected_image = self.bridge.cv2_to_imgmsg(corrected_image_cv2, "bgr8")

			tk.completed('encode')

			self.pub_image.publish(self.corrected_image)

			tk.completed('published')

			if self.verbose:
				rospy.loginfo('ai:\n' + tk.getall())
Exemple #5
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(white.lines) > 0:
            lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, white.normals,
                                  Segment.WHITE))
        if len(yellow.lines) > 0:
            lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, yellow.normals,
                                  Segment.YELLOW))
        if len(red.lines) > 0:
            lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, red.normals,
                                  Segment.RED))

        self.intermittent_log(
            '# segments: white %3d yellow %3d red %3d' %
            (len(white.lines), len(yellow.lines), len(red.lines)))

        #self.loginfo("self.verbose %d" % self.verbose)

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            drawLines(image_with_lines, white.lines, (0, 0, 0))
            drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            colorSegment = color_segment(white.area, red.area, yellow.area)
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # set up parameter

        hsv_blue1 = (100, 50, 50)
        hsv_blue2 = (150, 255, 255)
        hsv_yellow1 = (25, 50, 50)
        hsv_yellow2 = (45, 255, 255)

        # Set the image to be detected
        gray = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 80, 200, apertureSize=3)

        hsv = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2HSV)
        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        yellow = cv2.dilate(yellow, kernel)

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        blue = cv2.dilate(blue, kernel)

        # Detect lines and normals

        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi / 180, 10,
                                       np.empty(1), 3, 1)
        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])

        else:
            lines_yellow = []

        #edge_color_blue = cv2.bitwise_and(blue, edges)
        #lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi/180, 10, np.empty(1), 3, 1)
        #if lines_blue is not None:
        #lines_blue = np.array(lines_blue[0])

        #else:
        #lines_blue = []

#print "***************** ",image_cv.shape," *********************"
#bw_yellow = yellow
#bw_blue = blue

        self.blue = blue
        self.yellow = yellow
        if len(lines_yellow) > 0:
            lines_yellow, normals_yellow = self.normals(lines_yellow, yellow)

        #if len(lines_blue) > 0:
        #lines_blue,normals_blue = self.normals(lines_blue,bw_blue)

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList

        if len(lines_yellow) > 0:
            segmentList.segments.extend(
                self.toSegmentMsg(lines_yellow, normals_yellow,
                                  Segment.YELLOW))

        #if len(lines_blue) > 0:
        #segmentList.segments.extend(self.toSegmentMsg(lines_blue, normals_blue, Segment.YELLOW))

        self.intermittent_log('# segments:yellow %3d' % (len(lines_yellow)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            for x1, y1, x2, y2, norm_x, norm_y in np.hstack(
                (lines_yellow, normals_yellow)):
                x1 = int(x1)
                x2 = int(x2)
                y1 = int(y1)
                y2 = int(y2)

                ox = int((x1 + x2) / 2)
                oy = int((y1 + y2) / 2)

                cx = (ox + 3 * norm_x).astype('int')
                cy = (oy + 3 * norm_y).astype('int')

                ccx = (ox - 3 * norm_x).astype('int')
                ccy = (oy - 3 * norm_y).astype('int')

                if cx > 158:
                    cx = 158
                elif cx < 1:
                    cx = 1
                if ccx > 158:
                    ccx = 158
                elif ccx < 1:
                    ccx = 1

                if cy >= 79:
                    cy = 79
                elif cy < 1:
                    cy = 1
                if ccy >= 79:
                    ccy = 79
                elif ccy < 1:
                    ccy = 1

                if (blue[cy, cx] == 255 and yellow[ccy, ccx] == 255) or (
                        yellow[cy, cx] == 255 and blue[ccy, ccx] == 255):

                    cv2.line(image_with_lines, (x1, y1), (x2, y2), (0, 0, 255),
                             2)
                    cv2.circle(image_with_lines, (x1, y1), 2, (0, 255, 0))
                    cv2.circle(image_with_lines, (x2, y2), 2, (255, 0, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            #colorSegment = color_segment(white.area, red.area, yellow.area)
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        gray = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 100, 200, apertureSize=3)
        lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 50, np.empty(1), 3, 1)

        hsv_yellow1 = (25, 50, 50)
        hsv_yellow2 = (45, 255, 255)

        hsv_blue1 = (100, 90, 80)
        hsv_blue2 = (150, 255, 155)

        #change color space to HSV
        hsv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2HSV)

        #find the color
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        yellow = cv2.dilate(yellow, kernel)
        self.bw_1 = yellow

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        blue = cv2.dilate(blue, kernel)
        self.bw_2 = blue

        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        edge_color_blue = cv2.bitwise_and(blue, edges)

        lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 10, np.empty(1), 3, 1)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi / 180, 10,
                                       np.empty(1), 3, 1)
        lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi / 180, 10,
                                     np.empty(1), 3, 1)

        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])
            print "found yellow lines"

        else:
            lines_yellow = []
            print "no yellow lines"

        if lines_blue is not None:
            lines_blue = np.array(lines_blue[0])
            print "found blue lines"

        else:
            lines_blue = []
            print "no blue lines"

        arr_cutoff = np.array((0, 40, 0, 40))
        arr_ratio = np.array((1. / 120, 1. / 160, 1. / 120, 1. / 160))

        lines_1 = lines_yellow
        lines_2 = lines_blue

        normals = []
        centers = []
        if len(lines_2) > 0:
            #find the normalized coordinates
            lines_normalized = ((lines_1 + arr_cutoff) * arr_ratio)

            #find the unit vector
            length = np.sum((lines_1[:, 0:2] - lines_1[:, 2:4])**2,
                            axis=1,
                            keepdims=True)**0.5
            dx = 1. * (lines_1[:, 3:4] - lines_1[:, 1:2]) / length
            dy = 1. * (lines_1[:, 0:1] - lines_1[:, 2:3]) / length

            #find the center point
            centers = np.hstack([(lines_1[:, 0:1] + lines_1[:, 2:3]) / 2,
                                 (lines_1[:, 1:2] + lines_1[:, 3:4]) / 2])

            #find the vectors' direction
            x3 = (centers[:, 0:1] - 4. * dx).astype('int')
            x3[x3 < 0] = 0
            x3[x3 >= 160] = 160 - 1

            y3 = (centers[:, 1:2] - 4. * dy).astype('int')
            y3[y3 < 0] = 0
            y3[y3 >= 120] = 120 - 1

            x4 = (centers[:, 0:1] + 4. * dx).astype('int')
            x4[x4 < 0] = 0
            x4[x4 >= 160] = 160 - 1

            y4 = (centers[:, 1:2] + 4. * dy).astype('int')
            y4[y4 < 0] = 0
            y4[y4 >= 120] = 120 - 1

            flag_signs = (np.logical_and(
                self.bw_2[y3, x3] > 0,
                self.bw_2[y4, x4] > 0)).astype('int') * 2 - 1
            normals = np.hstack([dx, dy]) * flag_signs

            flag = ((lines_1[:, 2] - lines_1[:, 0]) * normals[:, 1] -
                    (lines_1[:, 3] - lines_1[:, 1]) * normals[:, 0]) > 0
            for i in range(len(lines_1)):
                if flag[i]:
                    x1, y1, x2, y2 = lines_1[i, :]
                    lines_1[i, :] = [x2, y2, x1, y1]

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(lines_2) > 0:
            lines_normalized_blue = ((lines_2 + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized, normals, centers, 0))

        self.intermittent_log('# segments: white %3d ' % (len(lines_2)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            if len(lines_1) > 0:
                for x1, y1, x2, y2 in lines_1:
                    cx = int(x1 + x2) / 2
                    cy = int(y1 + y2) / 2
                    if cx > 160:
                        cx = 160 - 1
                    elif cx < 0:
                        cx = 0
                    if cy > 120:
                        cy = 120 - 1
                    elif cy < 0:
                        cy = 0
                    if (self.bw_2[cy, cx - 1] == 255
                            and self.bw_1[cy, cx + 1] == 255):
                        cv2.line(image_with_lines, (x1, y1), (x2, y2),
                                 (255, 255, 255))
                        cv2.circle(image_with_lines, (x1, y1), 1,
                                   (0, 255, 0))  #green circle
                        cv2.circle(image_with_lines, (x2, y2), 1,
                                   (255, 0, 0))  #red circle
                    if (self.bw_2[cy, cx + 1] == 255
                            and self.bw_1[cy, cx - 1] == 255):
                        cv2.line(image_with_lines, (x1, y1), (x2, y2),
                                 (255, 255, 255))
                        cv2.circle(image_with_lines, (x1, y1), 1,
                                   (0, 255, 0))  #green circle
                        cv2.circle(image_with_lines, (x2, y2), 1,
                                   (255, 0, 0))  #red circle
            #drawLines(image_with_lines, (lines_2), (255, 255, 255))
            #drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            #drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            #colorSegment = color_segment(self.bw_1, self.bw_2)
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())
Exemple #8
0
    def cbNewImage(self, image_msg):
        # this callback proceeds the latest image, i.e. it applies the current transformation to the image and publishes it
        # begin2=rospy.Time.now()
        if not self.thread_lock.acquire(False):
            return

        # memorize image
        self.image_msg = image_msg

        tk = TimeKeeper(image_msg)
        try:
            cv_image = bgr_from_jpg(image_msg.data)
        except ValueError as e:
            rospy.loginfo('Anti_instagram cannot decode image: %s' % e)
            return

        #cv_image = cv2.imread(cv_image)
        scale_percent = self.scale_percent
        width = int(cv_image.shape[1] * scale_percent / 100)
        height = int(cv_image.shape[0] * scale_percent / 100)
        dim = (width, height)
        cv_image = cv2.resize(cv_image, dim, interpolation=cv2.INTER_NEAREST)
        tk = TimeKeeper(image_msg)
        #
        # end0=rospy.Time.now()
        # duration0=end0-begin2
        # rospy.loginfo('Conversion: %s' % duration0)
        tk.completed('converted')

        if self.trafo_mode == "cb" or self.trafo_mode == "both":
            # apply color balance using latest thresholds
            # begin1=rospy.Time.now()
            colorBalanced_image_cv2 = self.ai.applyColorBalance(
                img=cv_image, ThLow=self.ai.ThLow, ThHi=self.ai.ThHi)
            tk.completed('applyColorBalance')
            # end1=rospy.Time.now()
            # duration=end1-begin1

            # rospy.loginfo('Complete CB-Trafo Duration: %s' % duration)
        else:
            # pass input image
            colorBalanced_image_cv2 = cv_image

        if self.trafo_mode == "lin" or self.trafo_mode == "both":
            # apply color Transform using latest parameters
            corrected_image_cv2 = self.ai.applyTransform(
                colorBalanced_image_cv2)
            tk.completed('applyTransform')
        else:
            # pass input image
            corrected_image_cv2 = colorBalanced_image_cv2
        # begin3=rospy.Time.now()
        # store image to ros message
        self.corrected_image = self.bridge.cv2_to_compressed_imgmsg(
            corrected_image_cv2)  # , "bgr8")
        tk.completed('encode')

        self.corrected_image.header.stamp = image_msg.header.stamp  # for synchronization

        # publish image
        self.pub_image.publish(self.corrected_image)
        tk.completed('published')
        # end3=rospy.Time.now()
        # duration3=end3-begin3
        # rospy.loginfo('Publishing time: %s' % duration3)
        # begin4=rospy.Time.now()

        if self.verbose:
            rospy.loginfo('ai:\n' + tk.getall())

        #self.r.sleep() #to keep the rate
        self.thread_lock.release()
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # set up parameter

        hsv_blue1 = (100,50,50)
        hsv_blue2 = (150,255,255)
        hsv_yellow1 = (25,50,50)
        hsv_yellow2 = (45,255,255)


        # Set the image to be detected
        gray = cv2.cvtColor(image_cv_corr,cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 80, 200, apertureSize = 3)
    
        hsv = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2HSV)
        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
        yellow = cv2.dilate(yellow, kernel)

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
        blue = cv2.dilate(blue, kernel)

        # Detect lines and normals

        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi/180, 10, np.empty(1), 3, 1)
        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])
        
        else:
            lines_yellow = []

        #edge_color_blue = cv2.bitwise_and(blue, edges)
        #lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi/180, 10, np.empty(1), 3, 1)
        #if lines_blue is not None:
            #lines_blue = np.array(lines_blue[0])

        #else:
            #lines_blue = []

	#print "***************** ",image_cv.shape," *********************"
        #bw_yellow = yellow
        #bw_blue = blue

        self.blue = blue
        self.yellow = yellow
        if len(lines_yellow) > 0:
            lines_yellow,normals_yellow = self.normals(lines_yellow,yellow)

        #if len(lines_blue) > 0:
            #lines_blue,normals_blue = self.normals(lines_blue,bw_blue)


        tk.completed('detected')
     
        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        
        
        if len(lines_yellow) > 0:
            segmentList.segments.extend(self.toSegmentMsg(lines_yellow, normals_yellow, Segment.YELLOW))
        if len(segmentList.segments) == 0:

                    if self.time_switch == False:
                        msgg = BoolStamped()
                        msgg.data = False
                        self.pub_lane.publish(msgg)
                        self.time_switch = True


                    car_control_msg = Twist2DStamped()
                    car_control_msg.v = 0.0
                    car_control_msg.omega = 0.0
                    self.pub_car_cmd.publish(car_control_msg)
                

        #if len(lines_blue) > 0:
            #segmentList.segments.extend(self.toSegmentMsg(lines_blue, normals_blue, Segment.YELLOW))


        self.intermittent_log('# segments:yellow %3d' % (len(lines_yellow)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)

        # VISUALIZATION only below
        
        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines_yellow,normals_yellow)):
                x1 = int(x1) 
                x2 = int(x2)
                y1 = int(y1)
                y2 = int(y2)
        
                ox= int((x1+x2)/2)
                oy= int((y1+y2)/2)
        
                cx = (ox+3*norm_x).astype('int')
                cy = (oy+3*norm_y).astype('int') 
        
                ccx = (ox-3*norm_x).astype('int')
                ccy = (oy-3*norm_y).astype('int') 
        
                if cx >158:
                    cx = 158
                elif cx <1:
                    cx = 1
                if ccx >158:
                    ccx = 158
                elif ccx <1:
                    ccx = 1
 
                if cy >=79:
                    cy = 79
                elif cy <1:
                    cy = 1
                if ccy >=79:
                    ccy = 79
                elif ccy <1:
                    ccy = 1

        
                if (blue[cy, cx] == 255 and yellow[ccy,ccx] ==255) or (yellow[cy, cx] == 255 and blue[ccy,ccx] ==255):

                    cv2.line(image_with_lines, (x1,y1), (x2,y2), (0,0,255), 2)
                    cv2.circle(image_with_lines, (x1,y1), 2, (0,255,0))
                    cv2.circle(image_with_lines, (x2,y2), 2, (255,0,0))            

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

#         if self.verbose:
            #colorSegment = color_segment(white.area, red.area, yellow.area) 
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')


        self.intermittent_log(tk.getall())
Exemple #10
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # set up parameter

        hsv_green1 = np.array([70,100,60])
        hsv_green2 = np.array([90,255,255])
        hsv_blue1 = np.array([90,80,50])
        hsv_blue2 = np.array([110,255,255])


        # Set the image to be detected
        hsv = cv2.cvtColor(image_cv_corr,cv2.COLOR_BGR2HSV)
        green = cv2.inRange(hsv,hsv_green1,hsv_green2)
        blue = cv2.inRange(hsv,hsv_blue1,hsv_blue2)
    
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
        green = cv2.dilate(green, kernel)
        blue = cv2.dilate(blue, kernel)

        x = green[90:120,:]
        y = blue[90:120,:]
        
        msgg = BoolStamped()


        if (x==255).sum() > 250:
            print "green line detected!"
            time.sleep(4)
            print " 4 sec finish"

            msgg.data = True
            self.pub_lane_recovery.publish(msgg)


        elif (y==255).sum() > 250:
            print "blue line detected!"
            time.sleep(7)
            print " 7 sec finish"

            msgg.data = True
            self.pub_lane_recovery.publish(msgg)

        else:
            print "only red line detected"
            time.sleep(1)
            print " 1 sec finish"

            msgg.data = True
            self.pub_lane_recovery.publish(msgg)
       
        tk.completed('prepared')

        # VISUALIZATION only below
        
        if self.verbose:

                    

            tk.completed('drawn')

            # Publish the frame with lines

            image_msg_out_green = self.bridge.cv2_to_imgmsg(green, "mono8")
            image_msg_out_green.header.stamp = image_msg.header.stamp
            self.pub_image_green.publish(image_msg_out_green)

            image_msg_out_blue = self.bridge.cv2_to_imgmsg(blue, "mono8")
            image_msg_out_blue.header.stamp = image_msg.header.stamp
            self.pub_image_blue.publish(image_msg_out_blue)

            tk.completed('pub_image')




        self.intermittent_log(tk.getall())
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        gray = cv2.cvtColor(image_cv_corr,cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 100, 200, apertureSize = 3)
        lines = cv2.HoughLinesP(edges, 1, np.pi/180, 50, np.empty(1), 3, 1)

        hsv_yellow1 = (25,50,50)
        hsv_yellow2 = (45,255,255)

        hsv_blue1 = (100,90,80)
        hsv_blue2 = (150,255,155)

        #change color space to HSV
        hsv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2HSV)

        #find the color
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))

        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        yellow = cv2.dilate(yellow, kernel)
        self.bw_1=yellow

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        blue = cv2.dilate(blue, kernel) 
        self.bw_2=blue


        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        edge_color_blue   = cv2.bitwise_and(blue, edges)

        lines = cv2.HoughLinesP(edges, 1, np.pi/180, 10, np.empty(1), 3, 1)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi/180, 10, np.empty(1), 3, 1)
        lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi/180, 10, np.empty(1), 3, 1)


        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])
            print "found yellow lines"
    
        else:
            lines_yellow = []
            print "no yellow lines"
    
        if lines_blue is not None:
            lines_blue = np.array(lines_blue[0])
            print "found blue lines"

        else:
            lines_blue= []
            print "no blue lines"


        arr_cutoff = np.array((0, 40, 0, 40))
        arr_ratio = np.array((1./120, 1./160, 1./120, 1./160))

        lines_1 = lines_yellow
        lines_2 = lines_blue

        normals = []
        centers = []
        if len(lines_2)>0:
            #find the normalized coordinates
            lines_normalized = ((lines_1 + arr_cutoff) * arr_ratio)

            #find the unit vector
            length = np.sum((lines_1[:, 0:2] -lines_1[:, 2:4])**2, axis=1, keepdims=True)**0.5
            dx = 1.* (lines_1[:,3:4]-lines_1[:,1:2])/length
            dy = 1.* (lines_1[:,0:1]-lines_1[:,2:3])/length

            #find the center point
            centers = np.hstack([(lines_1[:,0:1]+lines_1[:,2:3])/2, (lines_1[:,1:2]+lines_1[:,3:4])/2])

       
            #find the vectors' direction
            x3 = (centers[:,0:1] - 4.*dx).astype('int')
            x3[x3<0]=0
            x3[x3>=160]=160-1

            y3 = (centers[:,1:2] - 4.*dy).astype('int')
            y3[y3<0]=0
            y3[y3>=120]=120-1

            x4 = (centers[:,0:1] + 4.*dx).astype('int')
            x4[x4<0]=0
            x4[x4>=160]=160-1

            y4 = (centers[:,1:2] + 4.*dy).astype('int')
            y4[y4<0]=0
            y4[y4>=120]=120-1
    
            flag_signs = (np.logical_and(self.bw_2[y3,x3]>0, self.bw_2[y4,x4]>0)).astype('int')*2-1
            normals = np.hstack([dx, dy]) * flag_signs
    
    
            flag = ((lines_1[:,2]-lines_1[:,0])*normals[:,1] - (lines_1[:,3]-lines_1[:,1])*normals[:,0])>0
            for i in range(len(lines_1)):
                if flag[i]:
                    x1,y1,x2,y2 = lines_1[i, :]
                    lines_1[i, :] = [x2,y2,x1,y1]


        tk.completed('detected')
     
        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
        if len(lines_2) > 0:
            lines_normalized_blue = ((lines_2 + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized, normals,centers, 0))

        self.intermittent_log('# segments: white %3d ' % (len(lines_2)))
        
        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below
        
        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            if len(lines_1)>0:
                for x1,y1,x2,y2 in lines_1:    
                    cx = int(x1+x2)/2
                    cy = int(y1+y2)/2
                    if cx >160:
                        cx = 160-1
                    elif cx<0:
                        cx=0
                    if cy >120:
                        cy = 120-1
                    elif cy<0:
                        cy=0
                    if(self.bw_2[cy,cx-1]==255 and self.bw_1[cy,cx+1]==255):
                        cv2.line(image_with_lines, (x1,y1), (x2,y2), (255,255,255))
                        cv2.circle(image_with_lines, (x1,y1), 1, (0,255,0)) #green circle
                        cv2.circle(image_with_lines, (x2,y2), 1, (255,0,0)) #red circle
                    if(self.bw_2[cy,cx+1]==255 and self.bw_1[cy,cx-1]==255):
                        cv2.line(image_with_lines, (x1,y1), (x2,y2), (255,255,255))
                        cv2.circle(image_with_lines, (x1,y1), 1, (0,255,0)) #green circle
                        cv2.circle(image_with_lines, (x2,y2), 1, (255,0,0)) #red circle
            #drawLines(image_with_lines, (lines_2), (255, 255, 255))
            #drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            #drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')


#         if self.verbose:
            #colorSegment = color_segment(self.bw_1, self.bw_2) 
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')

        tk.completed('detected')
     
        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
        if len(white.lines) > 0:
            lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE))
        if len(yellow.lines) > 0:
            lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW))
        if len(red.lines) > 0:
            lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))
        
        self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines),
                len(yellow.lines), len(red.lines)))
        
        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below
        
        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            drawLines(image_with_lines, white.lines, (0, 0, 0))
            drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

#         if self.verbose:
            colorSegment = color_segment(white.area, red.area, yellow.area) 
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')


        self.intermittent_log(tk.getall())
Exemple #13
0
    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            # Return immediately if the thread is locked
            return

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        image_cv = image_cv_from_jpg(image_msg.data)

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals
        lines_white, normals_white, centers_white, area_white = self.detector.detectLines2(
            'white')
        lines_yellow, normals_yellow, centers_yellow, area_yellow = self.detector.detectLines2(
            'yellow')
        lines_red, normals_red, centers_red, area_red = self.detector.detectLines2(
            'red')

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(lines_white) > 0:
            lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, normals_white,
                                  Segment.WHITE))
        if len(lines_yellow) > 0:
            lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, normals_yellow,
                                  Segment.YELLOW))
        if len(lines_red) > 0:
            lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, normals_red,
                                  Segment.RED))

        self.intermittent_log(
            '# segments: white %3d yellow %3d red %3d' %
            (len(lines_white), len(lines_yellow), len(lines_red)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('pub_lines')

        # VISUALIZATION only below

        # Publish the frame with lines

        # Draw lines and normals
        image_with_lines = np.copy(image_cv_corr)
        drawLines(image_with_lines, lines_white, (0, 0, 0))
        drawLines(image_with_lines, lines_yellow, (255, 0, 0))
        drawLines(image_with_lines, lines_red, (0, 255, 0))
        #drawNormals2(image_with_lines, centers_white, normals_white, (0,0,0))
        #drawNormals2(image_with_lines, centers_yellow, normals_yellow, (255,0,0))
        #drawNormals2(image_with_lines, centers_red, normals_red, (0,255,0))

        tk.completed('drawn')

        image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
        image_msg_out.header.stamp = image_msg.header.stamp
        self.pub_image.publish(image_msg_out)

        tk.completed('pub_image')

        # Verbose
        if self.verbose:

            color_segment = self.detector.color_segment(
                area_white, area_red, area_yellow)

            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                color_segment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

        tk.completed('pub_edge')

        self.intermittent_log(tk.getall())

        # Release the thread lock
        self.thread_lock.release()