コード例 #1
0
	def save_image(self, center_camera, left_camera, right_camera, img_id):
		center_image_cv = image_cv_from_jpg(center_camera.data)
		left_image_cv = image_cv_from_jpg(left_camera.data)
		right_image_cv = image_cv_from_jpg(right_camera.data)

		cv2.imwrite('{}/center-{}.jpg'.format(IMG_PATH, img_id), center_image_cv)
		cv2.imwrite('{}/left-{}.jpg'.format(IMG_PATH, img_id), left_image_cv)
		cv2.imwrite('{}/right-{}.jpg'.format(IMG_PATH, img_id), right_image_cv)
コード例 #2
0
    def cbNewImage(self, image_msg):
        # memorize image
        print("new image received.")
        self.image_msg = image_msg

        if True:
            tk = TimeKeeper(image_msg)
            # cv_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
            cv_image = image_cv_from_jpg(self.image_msg.data)

            corrected_image_cv2 = scaleandshift2(cv_image, self.scale,
                                                 self.shift)
            tk.completed('applyTransform')

            corrected_image_cv2 = np.clip(corrected_image_cv2, 0,
                                          255).astype(np.uint8)
            self.corrected_image = self.bridge.cv2_to_imgmsg(
                corrected_image_cv2, "bgr8")

            tk.completed('encode')

            self.pub_image.publish(self.corrected_image)

            tk.completed('published')

            if self.verbose:
                rospy.loginfo('ai:\n' + tk.getall())
 def cbImage(self, image_msg):
     try:
         image_cv = image_cv_from_jpg(image_msg.data)
     except ValueError as e:
         print 'Could not decode image: %s' % (e)
         return
     if not self.active:
         return
     thread = threading.Thread(target=self.run, args=(image_cv, ))
     thread.setDaemon(True)
     thread.start()
コード例 #4
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')
コード例 #5
0
    def processImage(self, msg):
        '''
        Inputs:
            msg - CompressedImage - uncorrected image from raspberry pi camera
            
        Uses anti_instagram library to adjust msg so that it looks like the same
        color temperature as a duckietown reference image. Calculates health of the node
        and publishes the corrected image and the health state. Health somehow corresponds
        to how good of a transformation it is.
        '''

        rospy.loginfo('ai: Computing color transform...')
        tk = TimeKeeper(msg)

        #cv_image = self.bridge.imgmsg_to_cv2(msg,"bgr8")
        try:
            cv_image = image_cv_from_jpg(msg.data)
        except ValueError as e:
            rospy.loginfo('Anti_instagram cannot decode image: %s' % e)
            return

        tk.completed('converted')

        self.ai.calculateTransform(cv_image)

        tk.completed('calculateTransform')

        # if health is much below the threshold value, do not update the color correction and log it.
        if self.ai.health <= 0.001:
            # health is not good

            rospy.loginfo("Health is not good")

        else:
            self.health.J1 = self.ai.health
            self.transform.s[0], self.transform.s[1], self.transform.s[
                2] = self.ai.shift
            self.transform.s[3], self.transform.s[4], self.transform.s[
                5] = self.ai.scale

            rospy.set_param('antiins_shift', self.ai.shift.tolist())
            rospy.set_param('antiins_scale', self.ai.scale.tolist())
            self.pub_health.publish(self.health)
            self.pub_transform.publish(self.transform)
            rospy.loginfo('ai: Color transform published.')
コード例 #6
0
	def processImage(self,msg):
		'''
		Inputs:
			msg - CompressedImage - uncorrected image from raspberry pi camera
		Uses anti_instagram library to adjust msg so that it looks like the same
		color temperature as a duckietown reference image. Calculates health of the node
		and publishes the corrected image and the health state. Health somehow corresponds
		to how good of a transformation it is.
		'''

		rospy.loginfo('ai: Computing color transform...')
		tk = TimeKeeper(msg)

		#cv_image = self.bridge.imgmsg_to_cv2(msg,"bgr8")
		try:
			cv_image = image_cv_from_jpg(msg.data)
		except ValueError as e:
			rospy.loginfo('Anti_instagram cannot decode image: %s' % e)
			return

		tk.completed('converted')

		self.ai.calculateTransform(cv_image)

		tk.completed('calculateTransform')


		# FIXME !!!
		if False:
			# health is not good

			rospy.loginfo("Health is not good")

		else:
			self.health.J1 = self.ai.health
			self.transform.s[0], self.transform.s[1], self.transform.s[2] = self.ai.shift
			self.transform.s[3], self.transform.s[4], self.transform.s[5] = self.ai.scale

			self.pub_health.publish(self.health)
			self.pub_transform.publish(self.transform)
			rospy.loginfo('ai: Color transform published!!!')
コード例 #7
0
    def process_log(self, bag_in, bag_out):
        algo_db = get_easy_algo_db()
        line_detector = algo_db.create_instance(FAMILY_LINE_DETECTOR,
                                                self.line_detector)
        image_prep = algo_db.create_instance('image_prep', self.image_prep)

        vehicle = which_robot(bag_in)
        topic = '/%s/camera_node/image/compressed' % vehicle
        context = FakeContext()
        transform = None
        frame = 0
        for compressed_img_msg in d8n_bag_read_with_progress(bag_in, topic):

            with context.phase('decoding'):
                try:
                    image_cv = image_cv_from_jpg(compressed_img_msg.data)
                except ValueError as e:
                    msg = 'Could not decode image: %s' % e
                    raise_wrapped(ValueError, e, msg)

            segment_list = image_prep.process(context, image_cv, line_detector,
                                              transform)

            rendered = vs_fancy_display(image_prep.image_cv, segment_list)
            rendered = d8_image_zoom_linear(rendered, 2)
            log_name = 'log_name'
            time = 12
            rendered = add_duckietown_header(rendered, log_name, time, frame)
            out = d8n_image_msg_from_cv_image(
                rendered, "bgr8", same_timestamp_as=compressed_img_msg)

            # Write to the bag
            bag_out.write('processed', out)

            #             out = d8n_image_msg_from_cv_image(image_cv, "bgr8", same_timestamp_as=compressed_img_msg)
            bag_out.write('image', compressed_img_msg)

            frame += 1
コード例 #8
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')

        tk.completed('detected')
     
        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
        if len(white.lines) > 0:
            lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE))
        if len(yellow.lines) > 0:
            lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW))
        if len(red.lines) > 0:
            lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))
        
        self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines),
                len(yellow.lines), len(red.lines)))
        
        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below
        
        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            drawLines(image_with_lines, white.lines, (0, 0, 0))
            drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

#         if self.verbose:
            colorSegment = color_segment(white.area, red.area, yellow.area) 
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')


        self.intermittent_log(tk.getall())
コード例 #9
0
def decode_cv_orig(data):
    return image_cv_from_jpg(data)
コード例 #10
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            #self.intermittent_log(self.stats.info())
            self.stats.reset()

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        #dark count and tunnel check
        hist = cv2.calcHist([image_cv], [0], None, [5], [0, 5])
        if (hist[0] + hist[1]) > 150000 :
            self.dark_count = self.dark_count + 1
        else :
            self.dark_count = 0

        if 3 < self.dark_count < 9 :
            self.pub_signal.publish("TUNNEL")



        #calibration proc && image publish
        K_undistort = np.array(self.c['camera_matrix'])
        img_und = cv2.undistort(image_cv, np.array(self.c['camera_matrix']), np.array(self.c['dist_coefs']),
                                newCameraMatrix=K_undistort)
        img_und_p = self.bridge.cv2_to_imgmsg(img_und, "bgr8")
        #self.pub_image_cal.publish(img_und_p)

        # Resize and crop image and rotation
        hei_original, wid_original = img_und.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            img_und = cv2.resize(img_und, (self.image_size[1], self.image_size[0]),
                                 interpolation=cv2.INTER_NEAREST)
            img_origin = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                 interpolation=cv2.INTER_NEAREST)
        rotation_center = (self.image_size[1]/2, self.image_size[0]/2)
        rotation_mat = cv2.getRotationMatrix2D(rotation_center, 180, 1.0)
        img_und = cv2.warpAffine(img_und, rotation_mat, (self.image_size[1], self.image_size[0]))
        img_und = img_und[self.top_cutoff:,:,:]
        img_origin = cv2.warpAffine(img_origin, rotation_mat, (self.image_size[1], self.image_size[0]))
        img_origin2 = img_origin[:, :, :]
        img_origin = img_origin[:100,150:,:]
        image_cv_corr = cv2.convertScaleAbs(img_und)
        image_cv_corr_origin = cv2.convertScaleAbs(img_origin)
        image_cv_corr_origin2 = cv2.convertScaleAbs(img_origin2)

        ###############################################
        # Set the image to be detected
        self.detector.setImage(image_cv_corr)
        hei2_original, wid2_original = image_cv_corr.shape[0:2]

        # Detect lines and normals
        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')
        ################################################
        def detectLines2(self, color, hough_threshold, hough_min_line_length, hough_max_line_gap):
            bw, edge_color = self._colorFilter(color)

        #################################################
        self.detector2.setImage(image_cv_corr_origin)
        hei2_original_o, wid2_original_o = image_cv_corr_origin.shape[0:2]

        # Detect lines and normals
        #white_o = self.detector2.detectLines('white')
        #yellow_o = self.detector2.detectLines('yellow')
        #hough_threshold = 20
        #hough_min_line_length = 1
        #hough_max_line_gap = 1
        red_o = self.detector2.detectLines('red')

        #################################################3

        #################################################
        self.detector2.setImage(image_cv_corr_origin2)
        hei2_original_o2, wid2_original_o2 = image_cv_corr_origin2.shape[0:2]

        # Detect lines and normals
        #white_o = self.detector2.detectLines('white')
        yellow_o2 = self.detector2.detectLines('yellow')
        #hough_threshold = 20
        #hough_min_line_length = 1
        #hough_max_line_gap = 1
        red_o2 = self.detector2.detectLines('red')

        #################################################3

        if len(white.lines) > 0 or len(yellow.lines) > 0 or len(red.lines) > 0 :
            self.Linedisable = False

        if len(red_o.lines) > 0:
            print("red siganl detect !!!!!!!!!!!!!!!!!!!")
            self.pub_signal.publish("RED")

        #if len(yellow.lines) < 1 :
            #print("yellow ......... disable ")

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Yellow, White Line add segments to segmentList
        if len(white.lines) > 0:
            segmentList.segments.extend(self.toSegmentMsg(white.lines, white.normals, Segment.WHITE))
        if len(yellow.lines) > 0:
           segmentList.segments.extend(self.toSegmentMsg(yellow.lines, yellow.normals, Segment.YELLOW))
        #if len(red.lines) > 0:
        #   segmentList.segments.extend(self.toSegmentMsg(red.lines, red.normals, Segment.RED))
        # Publish segmentList
        self.pub_lines.publish(segmentList)

        # Draw lines and normals
        image_with_lines = np.copy(image_cv_corr)
        drawLines(image_with_lines, white.lines, (0, 0, 0))
        drawLines(image_with_lines, yellow.lines, (255, 0, 0))

        image_with_lines_o = np.copy(image_cv_corr_origin)
        drawLines(image_with_lines_o, red_o.lines, (0, 255, 0))

        #image_with_lines_o2 = np.copy(image_cv_corr_origin2)
        #drawLines(image_with_lines_o, white.lines, (0, 0, 0))
        #drawLines(image_with_lines_o2, yellow_o2.lines, (255, 0, 0))
        #drawLines(image_with_lines_o2, red_o2.lines, (0, 255, 0))

        # Publish the frame with lines
        image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
        image_msg_out.header.stamp = image_msg.header.stamp
        self.pub_image_with_line.publish(image_msg_out)

        # Publish the frame with lines
        image_msg_out_o = self.bridge.cv2_to_imgmsg(image_with_lines_o, "bgr8")
        self.pub_image_with_line_o.publish(image_msg_out_o)
コード例 #11
0
    def computePose(self, image_msg):
        # if we haven't set the camera parameters, do nothing
        if (not self.camParamsSet):
            return
        
        # undistort image
        undistorted = image_cv_from_jpg(image_msg.data)
        undistorted = cv2.undistort(undistorted, self.K, self.D)

        # convert image to grayscale
        undistorted = cv2.cvtColor(undistorted, cv2.COLOR_BGR2GRAY)

        # if we haven't processed the first image, do that
        if (self.firstImage):
            self.oldImage = undistorted
            self.oldFeatures = self.detector.detect(self.oldImage)
            self.oldFeatures = np.array([x.pt for x in self.oldFeatures], dtype=np.float32)
            self.firstImage = False
            return
        
        if (self.secondImage):
            self.newImage = undistorted

            self.trackFeatures()
            # F, mask = cv2.findFundamentalMat(self.newFeatures, self.oldFeatures)
            # _, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.newFeatures, self.oldFeatures, focal=self.focal, pp=self.pp)
            # E, normOldPts, normNewPts, mask = self.findEssentialMat()
            self.cur_R, self.cur_t = self.recoverPose()

            self.newCloud = self.triangulatePoints(self.cur_R, self.cur_t)

            self.oldFeatures = self.newFeatures
            self.oldImage = self.newImage
            self.oldCloud = self.newCloud

            self.secondImage = False

            self.publishPose()
            return
            
        # process subsequent images
        self.newImage = undistorted

        self.trackFeatures()
        #F, mask = cv2.findFundamentalMat(self.newFeatures, self.oldFeatures)
        # _, R, t, mask = cv2.recoverPose(E, self.newFeatures, self.oldFeatures, focal=self.focal, pp=self.pp)
        #E = self.findEssentialMat(F)
        R, t = self.recoverPose()

        self.newCloud = self.triangulatePoints(R,t)
        self.scale = self.getRelativeScale()

        self.cur_t = self.cur_t + self.scale * self.cur_R * t
        self.cur_R = self.cur_R * R
        
        print('num features %d' % (self.newFeatures.shape[0]))
        if (self.newFeatures.shape[0] < self.minNumFeatures):
            self.newFeatures = self.detector.detect(self.newImage)
            self.newFeatures = np.array([x.pt for x in self.newFeatures], dtype=np.float32)

        self.oldFeatures = self.newFeatures
        self.oldImage = self.newImage.copy()
        self.oldCloud = self.newCloud
        #kpim = self.oldImage.copy()
        #for feature in self.oldFeatures:
            #print(feature)
         #   cv2.circle(kpim, (int(feature[0]), int(feature[1])), 3, (0,0,255))
        #cv2.imwrite('odometryimage%d.png' % (self.imindex), kpim)
        #self.imindex += 1
        self.publishPose()
コード例 #12
0
 def cbImage(self, msg):
     image_cv = image_cv_from_jpg(msg.data)
     self.pic = image_cv
コード例 #13
0
    def on_received_image(self, context, image_msg):
        if not self.active:
            return

        self.intermittent_counter += 1

        with context.phase('decoding'):
            # Decode from compressed image with OpenCV
            try:
                image_cv = image_cv_from_jpg(image_msg.data)
            except ValueError as e:
                self.loginfo('Could not decode image: %s' % e)
                return

        with context.phase('resizing'):
            # Resize and crop image
            hei_original, wid_original = image_cv.shape[0:2]

            if self.config.img_size[0] != hei_original or self.config.img_size[
                    1] != wid_original:
                # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
                image_cv = cv2.resize(
                    image_cv,
                    (self.config.img_size[1], self.config.img_size[0]),
                    interpolation=cv2.INTER_NEAREST)
            image_cv = image_cv[self.config.top_cutoff:, :, :]

        with context.phase('correcting'):
            # apply color correction: AntiInstagram
            image_cv_corr = self.ai.applyTransform(image_cv)
            image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        with context.phase('detection'):
            # Set the image to be detected
            self.detector.setImage(image_cv_corr)

            # Detect lines and normals
            white = self.detector.detectLines('white')
            yellow = self.detector.detectLines('yellow')
            red = self.detector.detectLines('red')

        with context.phase('preparing-images'):
            # SegmentList constructor
            segmentList = SegmentList()
            segmentList.header.stamp = image_msg.header.stamp

            # Convert to normalized pixel coordinates, and add segments to segmentList
            top_cutoff = self.config.top_cutoff
            s0, s1 = self.config.img_size[0], self.config.img_size[1]

            arr_cutoff = np.array((0, top_cutoff, 0, top_cutoff))
            arr_ratio = np.array((1. / s1, 1. / s0, 1. / s1, 1. / s0))
            if len(white.lines) > 0:
                lines_normalized_white = ((white.lines + arr_cutoff) *
                                          arr_ratio)
                segmentList.segments.extend(
                    toSegmentMsg(lines_normalized_white, white.normals,
                                 Segment.WHITE))
            if len(yellow.lines) > 0:
                lines_normalized_yellow = ((yellow.lines + arr_cutoff) *
                                           arr_ratio)
                segmentList.segments.extend(
                    toSegmentMsg(lines_normalized_yellow, yellow.normals,
                                 Segment.YELLOW))
            if len(red.lines) > 0:
                lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
                segmentList.segments.extend(
                    toSegmentMsg(lines_normalized_red, red.normals,
                                 Segment.RED))

            self.intermittent_log(
                '# segments: white %3d yellow %3d red %3d' %
                (len(white.lines), len(yellow.lines), len(red.lines)))

        # Publish segmentList
        with context.phase('publishing'):
            self.publishers.segment_list.publish(segmentList)

        # VISUALIZATION only below

        if self.config.verbose:

            with context.phase('draw-lines'):
                # Draw lines and normals
                image_with_lines = np.copy(image_cv_corr)
                drawLines(image_with_lines, white.lines, (0, 0, 0))
                drawLines(image_with_lines, yellow.lines, (255, 0, 0))
                drawLines(image_with_lines, red.lines, (0, 255, 0))

            with context.phase('published-images'):
                # Publish the frame with lines
                out = d8n_image_msg_from_cv_image(image_with_lines,
                                                  "bgr8",
                                                  same_timestamp_as=image_msg)
                self.publishers.image_with_lines.publish(out)

            with context.phase('pub_edge/pub_segment'):
                out = d8n_image_msg_from_cv_image(self.detector.edges,
                                                  "mono8",
                                                  same_timestamp_as=image_msg)
                self.publishers.edge.publish(out)

                colorSegment = color_segment(white.area, red.area, yellow.area)
                out = d8n_image_msg_from_cv_image(colorSegment,
                                                  "bgr8",
                                                  same_timestamp_as=image_msg)
                self.publishers.color_segment.publish(out)

        if self.intermittent_log_now():
            self.info('stats from easy_node\n' +
                      indent(context.get_stats(), '> '))
コード例 #14
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        gray = cv2.cvtColor(image_cv_corr,cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 100, 200, apertureSize = 3)
        lines = cv2.HoughLinesP(edges, 1, np.pi/180, 50, np.empty(1), 3, 1)

        hsv_yellow1 = (25,50,50)
        hsv_yellow2 = (45,255,255)

        hsv_blue1 = (100,90,80)
        hsv_blue2 = (150,255,155)

        #change color space to HSV
        hsv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2HSV)

        #find the color
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))

        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        yellow = cv2.dilate(yellow, kernel)
        self.bw_1=yellow

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        blue = cv2.dilate(blue, kernel) 
        self.bw_2=blue


        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        edge_color_blue   = cv2.bitwise_and(blue, edges)

        lines = cv2.HoughLinesP(edges, 1, np.pi/180, 10, np.empty(1), 3, 1)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi/180, 10, np.empty(1), 3, 1)
        lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi/180, 10, np.empty(1), 3, 1)


        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])
            print "found yellow lines"
    
        else:
            lines_yellow = []
            print "no yellow lines"
    
        if lines_blue is not None:
            lines_blue = np.array(lines_blue[0])
            print "found blue lines"

        else:
            lines_blue= []
            print "no blue lines"


        arr_cutoff = np.array((0, 40, 0, 40))
        arr_ratio = np.array((1./120, 1./160, 1./120, 1./160))

        lines_1 = lines_yellow
        lines_2 = lines_blue

        normals = []
        centers = []
        if len(lines_2)>0:
            #find the normalized coordinates
            lines_normalized = ((lines_1 + arr_cutoff) * arr_ratio)

            #find the unit vector
            length = np.sum((lines_1[:, 0:2] -lines_1[:, 2:4])**2, axis=1, keepdims=True)**0.5
            dx = 1.* (lines_1[:,3:4]-lines_1[:,1:2])/length
            dy = 1.* (lines_1[:,0:1]-lines_1[:,2:3])/length

            #find the center point
            centers = np.hstack([(lines_1[:,0:1]+lines_1[:,2:3])/2, (lines_1[:,1:2]+lines_1[:,3:4])/2])

       
            #find the vectors' direction
            x3 = (centers[:,0:1] - 4.*dx).astype('int')
            x3[x3<0]=0
            x3[x3>=160]=160-1

            y3 = (centers[:,1:2] - 4.*dy).astype('int')
            y3[y3<0]=0
            y3[y3>=120]=120-1

            x4 = (centers[:,0:1] + 4.*dx).astype('int')
            x4[x4<0]=0
            x4[x4>=160]=160-1

            y4 = (centers[:,1:2] + 4.*dy).astype('int')
            y4[y4<0]=0
            y4[y4>=120]=120-1
    
            flag_signs = (np.logical_and(self.bw_2[y3,x3]>0, self.bw_2[y4,x4]>0)).astype('int')*2-1
            normals = np.hstack([dx, dy]) * flag_signs
    
    
            flag = ((lines_1[:,2]-lines_1[:,0])*normals[:,1] - (lines_1[:,3]-lines_1[:,1])*normals[:,0])>0
            for i in range(len(lines_1)):
                if flag[i]:
                    x1,y1,x2,y2 = lines_1[i, :]
                    lines_1[i, :] = [x2,y2,x1,y1]


        tk.completed('detected')
     
        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
        if len(lines_2) > 0:
            lines_normalized_blue = ((lines_2 + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized, normals,centers, 0))

        self.intermittent_log('# segments: white %3d ' % (len(lines_2)))
        
        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below
        
        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            if len(lines_1)>0:
                for x1,y1,x2,y2 in lines_1:    
                    cx = int(x1+x2)/2
                    cy = int(y1+y2)/2
                    if cx >160:
                        cx = 160-1
                    elif cx<0:
                        cx=0
                    if cy >120:
                        cy = 120-1
                    elif cy<0:
                        cy=0
                    if(self.bw_2[cy,cx-1]==255 and self.bw_1[cy,cx+1]==255):
                        cv2.line(image_with_lines, (x1,y1), (x2,y2), (255,255,255))
                        cv2.circle(image_with_lines, (x1,y1), 1, (0,255,0)) #green circle
                        cv2.circle(image_with_lines, (x2,y2), 1, (255,0,0)) #red circle
                    if(self.bw_2[cy,cx+1]==255 and self.bw_1[cy,cx-1]==255):
                        cv2.line(image_with_lines, (x1,y1), (x2,y2), (255,255,255))
                        cv2.circle(image_with_lines, (x1,y1), 1, (0,255,0)) #green circle
                        cv2.circle(image_with_lines, (x2,y2), 1, (255,0,0)) #red circle
            #drawLines(image_with_lines, (lines_2), (255, 255, 255))
            #drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            #drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')


#         if self.verbose:
            #colorSegment = color_segment(self.bw_1, self.bw_2) 
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())
コード例 #15
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            #self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        #origin image publish
        image_cv_origin_p = self.bridge.cv2_to_imgmsg(image_cv, "bgr8")
        image_cv_origin_p.header.stamp = image_msg.header.stamp
        self.pub_image_origin.publish(image_cv_origin_p)

        #calibration proc && image publish

        K_undistort = np.array(self.c['camera_matrix'])
        img_und = cv2.undistort(image_cv,
                                np.array(self.c['camera_matrix']),
                                np.array(self.c['dist_coefs']),
                                newCameraMatrix=K_undistort)
        img_und_p = self.bridge.cv2_to_imgmsg(img_und, "bgr8")
        self.pub_image_cal.publish(img_und_p)

        # Resize and crop image and rotation
        hei_original, wid_original = img_und.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            img_und = cv2.resize(img_und,
                                 (self.image_size[1], self.image_size[0]),
                                 interpolation=cv2.INTER_NEAREST)

        rotation_center = (self.image_size[1] / 2, self.image_size[0] / 2)
        rotation_mat = cv2.getRotationMatrix2D(rotation_center, 180, 1.0)
        img_und = cv2.warpAffine(img_und, rotation_mat,
                                 (self.image_size[1], self.image_size[0]))

        img_und = img_und[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        # image_cv_corr = image_cv
        #image_cv_corr = self.ai.applyTransform(img_und)
        image_cv_corr = cv2.convertScaleAbs(img_und)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)
        hei2_original, wid2_original = image_cv_corr.shape[0:2]

        # Detect lines and normals

        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')
        #rospy.loginfo('after %.1f %.1f  ',hei2_original , wid2_original)
        #rospy.loginfo('after %d %d  (%.1f %.1f) (%.1f %.1f)',hei2_original , wid2_original, white.lines.x1)

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(white.lines) > 0:
            #lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            lines_normalized_white = white.lines
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, white.normals,
                                  Segment.WHITE))
            #rospy.loginfo('white detect ')
        if len(yellow.lines) > 0:
            #lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            lines_normalized_yellow = yellow.lines
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, yellow.normals,
                                  Segment.YELLOW))
            #rospy.loginfo('yellow detect ' )
        if len(red.lines) > 0:
            #lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            lines_normalized_red = red.lines
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, red.normals,
                                  Segment.RED))

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        #self.loginfo('send segmentList publish finish')

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            drawLines(image_with_lines, white.lines, (0, 0, 0))
            drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image_with_line.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            colorSegment = color_segment(white.area, red.area, yellow.area)
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')
コード例 #16
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # set up parameter

        hsv_blue1 = (100, 50, 50)
        hsv_blue2 = (150, 255, 255)
        hsv_yellow1 = (25, 50, 50)
        hsv_yellow2 = (45, 255, 255)

        # Set the image to be detected
        gray = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 80, 200, apertureSize=3)

        hsv = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2HSV)
        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        yellow = cv2.dilate(yellow, kernel)

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        blue = cv2.dilate(blue, kernel)

        # Detect lines and normals

        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi / 180, 10,
                                       np.empty(1), 3, 1)
        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])

        else:
            lines_yellow = []

        #edge_color_blue = cv2.bitwise_and(blue, edges)
        #lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi/180, 10, np.empty(1), 3, 1)
        #if lines_blue is not None:
        #lines_blue = np.array(lines_blue[0])

        #else:
        #lines_blue = []

#print "***************** ",image_cv.shape," *********************"
#bw_yellow = yellow
#bw_blue = blue

        self.blue = blue
        self.yellow = yellow
        if len(lines_yellow) > 0:
            lines_yellow, normals_yellow = self.normals(lines_yellow, yellow)

        #if len(lines_blue) > 0:
        #lines_blue,normals_blue = self.normals(lines_blue,bw_blue)

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList

        if len(lines_yellow) > 0:
            segmentList.segments.extend(
                self.toSegmentMsg(lines_yellow, normals_yellow,
                                  Segment.YELLOW))

        #if len(lines_blue) > 0:
        #segmentList.segments.extend(self.toSegmentMsg(lines_blue, normals_blue, Segment.YELLOW))

        self.intermittent_log('# segments:yellow %3d' % (len(lines_yellow)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            for x1, y1, x2, y2, norm_x, norm_y in np.hstack(
                (lines_yellow, normals_yellow)):
                x1 = int(x1)
                x2 = int(x2)
                y1 = int(y1)
                y2 = int(y2)

                ox = int((x1 + x2) / 2)
                oy = int((y1 + y2) / 2)

                cx = (ox + 3 * norm_x).astype('int')
                cy = (oy + 3 * norm_y).astype('int')

                ccx = (ox - 3 * norm_x).astype('int')
                ccy = (oy - 3 * norm_y).astype('int')

                if cx > 158:
                    cx = 158
                elif cx < 1:
                    cx = 1
                if ccx > 158:
                    ccx = 158
                elif ccx < 1:
                    ccx = 1

                if cy >= 79:
                    cy = 79
                elif cy < 1:
                    cy = 1
                if ccy >= 79:
                    ccy = 79
                elif ccy < 1:
                    ccy = 1

                if (blue[cy, cx] == 255 and yellow[ccy, ccx] == 255) or (
                        yellow[cy, cx] == 255 and blue[ccy, ccx] == 255):

                    cv2.line(image_with_lines, (x1, y1), (x2, y2), (0, 0, 255),
                             2)
                    cv2.circle(image_with_lines, (x1, y1), 2, (0, 255, 0))
                    cv2.circle(image_with_lines, (x2, y2), 2, (255, 0, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            #colorSegment = color_segment(white.area, red.area, yellow.area)
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())
コード例 #17
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            #self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        #origin image publish
        image_cv_origin_p = self.bridge.cv2_to_imgmsg(image_cv, "bgr8")
        image_cv_origin_p.header.stamp = image_msg.header.stamp
        #self.pub_image_origin.publish(image_cv_origin_p)

        #calibration proc && image publish

        #K_undistort = np.array(self.c['camera_matrix'])
        #img_und = cv2.undistort(image_cv, np.array(self.c['camera_matrix']), np.array(self.c['dist_coefs']), newCameraMatrix=K_undistort)
        #img_und_p = self.bridge.cv2_to_imgmsg(img_und, "bgr8")
        #self.pub_image_cal.publish(img_und_p)

        # Resize and crop image and rotation
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)

        rotation_center = (self.image_size[1] / 2, self.image_size[0] / 2)
        rotation_mat = cv2.getRotationMatrix2D(rotation_center, 180, 1.0)
        image_cv = cv2.warpAffine(image_cv, rotation_mat,
                                  (self.image_size[1], self.image_size[0]))

        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        # image_cv_corr = image_cv
        #image_cv_corr = self.ai.applyTransform(img_und)
        image_cv_corr = cv2.convertScaleAbs(image_cv)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)
        hei2_original, wid2_original = image_cv_corr.shape[0:2]

        # Detect lines and normals

        white = self.detector.detectLines('white')
        #yellow = self.detector.detectLines('yellow')
        #red = self.detector.detectLines('red')
        #rospy.loginfo('after %.1f %.1f  ',hei2_original , wid2_original)
        #rospy.loginfo('after %d %d  (%.1f %.1f) (%.1f %.1f)',hei2_original , wid2_original, white.lines.x1)

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))

        rospy.loginfo('white.lines %d', len(white.lines))
        if len(white.lines) > 0:
            #lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            lines_normalized_white = white.lines
        #segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE))
            #rospy.loginfo('white detect ')
            #if len(yellow.lines) > 0:
            #lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            #lines_normalized_yellow = yellow.lines
            #segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW))
            #rospy.loginfo('yellow detect ' )
            #if len(red.lines) > 0:
            #lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            #lines_normalized_red = red.lines
            #segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))

            # Publish segmentList
        self.pub_lines.publish(segmentList)
        #self.loginfo('send segmentList publish finish')

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            for i in xrange(len(white.lines)):
                #if i == 5 :
                #   break
                for x1, y1, x2, y2 in white.lines[i]:
                    cv2.line(image_with_lines, (x1, y1), (x2, y2), (0, 0, 255),
                             3)

# arrange
#i = 0
            px1 = 0
            py1 = 1
            px2 = 2
            py2 = 3
            pdeg = 4
            LM = [[0] * 5 for i in range(len(white.lines))]
            for i in xrange(len(white.lines)):
                for x1, y1, x2, y2 in white.lines[i]:
                    if x1 <= x2:
                        LM[i][px1] = x1
                        LM[i][py1] = y1
                        LM[i][px2] = x2
                        LM[i][py2] = y2
                    else:
                        LM[i][px1] = x2
                        LM[i][py1] = y2
                        LM[i][px2] = x1
                        LM[i][py2] = y1
# add degree and check degree
#i = 0
            pdegM = [[0] * 1 for i in range(len(LM))]
            for i in xrange(len(LM)):
                LM[i][pdeg] = ((LM[i][py2] - LM[i][py1]) * 100) / (LM[i][px2] -
                                                                   LM[i][px1])
                pdegM[i] = LM[i][pdeg]
                rospy.loginfo('(%d, %d) (%d, %d) %d ', LM[i][px1], LM[i][py1],
                              LM[i][px2], LM[i][py2], LM[i][pdeg])
            degM = np.mean(pdegM)
            rospy.loginfo('mean %d ,var  %d', np.mean(pdegM), np.var(pdegM))

            # Make Block
            i = 0
            j = 0
            BM = []
            BM.append(LM[0])
            #rospy.loginfo('BLOCK NUMBER1 %d ', len(BM));
            #BM.append(LM[1])
            #rospy.loginfo('BLOCK NUMBER2 %d ', len(BM));
            #BM.append(LM[2])
            #rospy.loginfo('BLOCK NUMBER3 %d ', len(BM));

            #for i in xrange(len(LM)) :
            #rospy.loginfo('////////////////////////////////////////// %d  ', len(BM))
            #for j in xrange(len(BM)):
            #if (BM[j][px1] <= LM[i][px1] and LM[i][px1] <= BM[j][px2]) or (BM[j][px1] <= LM[i][px2] and LM[i][px2] <= BM[j][px2]) or  (LM[i][px1] <= BM[j][px1] and BM[j][px1] <= LM[i][px2]) or (LM[i][px1] <= BM[j][px2] and BM[j][px2] <= LM[i][px2]) :
            #inBM = [ BM[j][px1], LM[i][px1]], [BM[j][px2], LM[i][px2] ]
            #BM[j][px1] = np.min(inBM)
            #BM[j][px2] = np.max(inBM)
            #rospy.loginfo('BLOCK NUMBER i%d  j%d  ( %d  %d ) ', i, j ,BM[j][px1], BM[j][px2]);
            #continue
            #BM.append(LM[i])
            #break

            px1M = [[LM[i][px1]] for i in range(len(LM))]
            px2M = [[LM[i][px2]] for i in range(len(LM))]
            xMix = np.min(px1M)
            XMax = np.max(px2M)
            rospy.loginfo('//// min %d max %d ', xMix, XMax)

            #overlap = False
            #for j in xrange(xMix,XMax+1):

            #for i in xrange(len(LM)) :
            #for k in xrange((LM[i][px1],LM[i][px2]+1)
            #k == j
            #overlap = True

            overlap = False
            for i in xrange(len(LM)):
                rospy.loginfo(
                    '////////////////////////////////////////// %d  ', len(BM))
                for j in xrange(len(BM)):
                    #while True :
                    overlap = False
                    for m in xrange(BM[j][px1], BM[j][px2] + 1):
                        for n in xrange(LM[i][px1], LM[i][px2] + 1):
                            if m == n:
                                overlap = True

                    if overlap:
                        rospy.loginfo('LM %d ,BM %d overlap happen', i, j)
                        inBM = [BM[j][px1],
                                LM[i][px1]], [BM[j][px2], LM[i][px2]]
                        BM[j][px1] = np.min(inBM)
                        BM[j][px2] = np.max(inBM)
                        break
                    BM.append(LM[i])
                    j = j + 1
                    break

#rospy.loginfo('////////////////////////////////////////// %d  ', len(BM))

# Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image_with_line.publish(image_msg_out)

            tk.completed('pub_image')

        if self.verbose:
            #colorSegment = color_segment(white.area, red.area, yellow.area)
            colorSegment = color_segment(white.area, white.area, white.area)
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')
コード例 #18
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # set up parameter

        hsv_green1 = np.array([70,100,60])
        hsv_green2 = np.array([90,255,255])
        hsv_blue1 = np.array([90,80,50])
        hsv_blue2 = np.array([110,255,255])


        # Set the image to be detected
        hsv = cv2.cvtColor(image_cv_corr,cv2.COLOR_BGR2HSV)
        green = cv2.inRange(hsv,hsv_green1,hsv_green2)
        blue = cv2.inRange(hsv,hsv_blue1,hsv_blue2)
    
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
        green = cv2.dilate(green, kernel)
        blue = cv2.dilate(blue, kernel)

        x = green[90:120,:]
        y = blue[90:120,:]
        
        msgg = BoolStamped()


        if (x==255).sum() > 250:
            print "green line detected!"
            time.sleep(4)
            print " 4 sec finish"

            msgg.data = True
            self.pub_lane_recovery.publish(msgg)


        elif (y==255).sum() > 250:
            print "blue line detected!"
            time.sleep(7)
            print " 7 sec finish"

            msgg.data = True
            self.pub_lane_recovery.publish(msgg)

        else:
            print "only red line detected"
            time.sleep(1)
            print " 1 sec finish"

            msgg.data = True
            self.pub_lane_recovery.publish(msgg)
       
        tk.completed('prepared')

        # VISUALIZATION only below
        
        if self.verbose:

                    

            tk.completed('drawn')

            # Publish the frame with lines

            image_msg_out_green = self.bridge.cv2_to_imgmsg(green, "mono8")
            image_msg_out_green.header.stamp = image_msg.header.stamp
            self.pub_image_green.publish(image_msg_out_green)

            image_msg_out_blue = self.bridge.cv2_to_imgmsg(blue, "mono8")
            image_msg_out_blue.header.stamp = image_msg.header.stamp
            self.pub_image_blue.publish(image_msg_out_blue)

            tk.completed('pub_image')




        self.intermittent_log(tk.getall())
コード例 #19
0
def saveImage(image_msg):
    global index
    image = image_cv_from_jpg(image_msg.data)
    cv2.imwrite('./botimage%04d.png' % (index), image)
    index += 1
コード例 #20
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # set up parameter

        hsv_blue1 = (100,50,50)
        hsv_blue2 = (150,255,255)
        hsv_yellow1 = (25,50,50)
        hsv_yellow2 = (45,255,255)


        # Set the image to be detected
        gray = cv2.cvtColor(image_cv_corr,cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 80, 200, apertureSize = 3)
    
        hsv = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2HSV)
        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
        yellow = cv2.dilate(yellow, kernel)

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
        blue = cv2.dilate(blue, kernel)

        # Detect lines and normals

        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi/180, 10, np.empty(1), 3, 1)
        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])
        
        else:
            lines_yellow = []

        #edge_color_blue = cv2.bitwise_and(blue, edges)
        #lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi/180, 10, np.empty(1), 3, 1)
        #if lines_blue is not None:
            #lines_blue = np.array(lines_blue[0])

        #else:
            #lines_blue = []

	#print "***************** ",image_cv.shape," *********************"
        #bw_yellow = yellow
        #bw_blue = blue

        self.blue = blue
        self.yellow = yellow
        if len(lines_yellow) > 0:
            lines_yellow,normals_yellow = self.normals(lines_yellow,yellow)

        #if len(lines_blue) > 0:
            #lines_blue,normals_blue = self.normals(lines_blue,bw_blue)


        tk.completed('detected')
     
        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        
        
        if len(lines_yellow) > 0:
            segmentList.segments.extend(self.toSegmentMsg(lines_yellow, normals_yellow, Segment.YELLOW))
        if len(segmentList.segments) == 0:

                    if self.time_switch == False:
                        msgg = BoolStamped()
                        msgg.data = False
                        self.pub_lane.publish(msgg)
                        self.time_switch = True


                    car_control_msg = Twist2DStamped()
                    car_control_msg.v = 0.0
                    car_control_msg.omega = 0.0
                    self.pub_car_cmd.publish(car_control_msg)
                

        #if len(lines_blue) > 0:
            #segmentList.segments.extend(self.toSegmentMsg(lines_blue, normals_blue, Segment.YELLOW))


        self.intermittent_log('# segments:yellow %3d' % (len(lines_yellow)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)

        # VISUALIZATION only below
        
        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines_yellow,normals_yellow)):
                x1 = int(x1) 
                x2 = int(x2)
                y1 = int(y1)
                y2 = int(y2)
        
                ox= int((x1+x2)/2)
                oy= int((y1+y2)/2)
        
                cx = (ox+3*norm_x).astype('int')
                cy = (oy+3*norm_y).astype('int') 
        
                ccx = (ox-3*norm_x).astype('int')
                ccy = (oy-3*norm_y).astype('int') 
        
                if cx >158:
                    cx = 158
                elif cx <1:
                    cx = 1
                if ccx >158:
                    ccx = 158
                elif ccx <1:
                    ccx = 1
 
                if cy >=79:
                    cy = 79
                elif cy <1:
                    cy = 1
                if ccy >=79:
                    ccy = 79
                elif ccy <1:
                    ccy = 1

        
                if (blue[cy, cx] == 255 and yellow[ccy,ccx] ==255) or (yellow[cy, cx] == 255 and blue[ccy,ccx] ==255):

                    cv2.line(image_with_lines, (x1,y1), (x2,y2), (0,0,255), 2)
                    cv2.circle(image_with_lines, (x1,y1), 2, (0,255,0))
                    cv2.circle(image_with_lines, (x2,y2), 2, (255,0,0))            

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

#         if self.verbose:
            #colorSegment = color_segment(white.area, red.area, yellow.area) 
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')


        self.intermittent_log(tk.getall())
コード例 #21
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        gray = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 100, 200, apertureSize=3)
        lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 50, np.empty(1), 3, 1)

        hsv_yellow1 = (25, 50, 50)
        hsv_yellow2 = (45, 255, 255)

        hsv_blue1 = (100, 90, 80)
        hsv_blue2 = (150, 255, 155)

        #change color space to HSV
        hsv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2HSV)

        #find the color
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        yellow = cv2.dilate(yellow, kernel)
        self.bw_1 = yellow

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        blue = cv2.dilate(blue, kernel)
        self.bw_2 = blue

        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        edge_color_blue = cv2.bitwise_and(blue, edges)

        lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 10, np.empty(1), 3, 1)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi / 180, 10,
                                       np.empty(1), 3, 1)
        lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi / 180, 10,
                                     np.empty(1), 3, 1)

        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])
            print "found yellow lines"

        else:
            lines_yellow = []
            print "no yellow lines"

        if lines_blue is not None:
            lines_blue = np.array(lines_blue[0])
            print "found blue lines"

        else:
            lines_blue = []
            print "no blue lines"

        arr_cutoff = np.array((0, 40, 0, 40))
        arr_ratio = np.array((1. / 120, 1. / 160, 1. / 120, 1. / 160))

        lines_1 = lines_yellow
        lines_2 = lines_blue

        normals = []
        centers = []
        if len(lines_2) > 0:
            #find the normalized coordinates
            lines_normalized = ((lines_1 + arr_cutoff) * arr_ratio)

            #find the unit vector
            length = np.sum((lines_1[:, 0:2] - lines_1[:, 2:4])**2,
                            axis=1,
                            keepdims=True)**0.5
            dx = 1. * (lines_1[:, 3:4] - lines_1[:, 1:2]) / length
            dy = 1. * (lines_1[:, 0:1] - lines_1[:, 2:3]) / length

            #find the center point
            centers = np.hstack([(lines_1[:, 0:1] + lines_1[:, 2:3]) / 2,
                                 (lines_1[:, 1:2] + lines_1[:, 3:4]) / 2])

            #find the vectors' direction
            x3 = (centers[:, 0:1] - 4. * dx).astype('int')
            x3[x3 < 0] = 0
            x3[x3 >= 160] = 160 - 1

            y3 = (centers[:, 1:2] - 4. * dy).astype('int')
            y3[y3 < 0] = 0
            y3[y3 >= 120] = 120 - 1

            x4 = (centers[:, 0:1] + 4. * dx).astype('int')
            x4[x4 < 0] = 0
            x4[x4 >= 160] = 160 - 1

            y4 = (centers[:, 1:2] + 4. * dy).astype('int')
            y4[y4 < 0] = 0
            y4[y4 >= 120] = 120 - 1

            flag_signs = (np.logical_and(
                self.bw_2[y3, x3] > 0,
                self.bw_2[y4, x4] > 0)).astype('int') * 2 - 1
            normals = np.hstack([dx, dy]) * flag_signs

            flag = ((lines_1[:, 2] - lines_1[:, 0]) * normals[:, 1] -
                    (lines_1[:, 3] - lines_1[:, 1]) * normals[:, 0]) > 0
            for i in range(len(lines_1)):
                if flag[i]:
                    x1, y1, x2, y2 = lines_1[i, :]
                    lines_1[i, :] = [x2, y2, x1, y1]

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(lines_2) > 0:
            lines_normalized_blue = ((lines_2 + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized, normals, centers, 0))

        self.intermittent_log('# segments: white %3d ' % (len(lines_2)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            if len(lines_1) > 0:
                for x1, y1, x2, y2 in lines_1:
                    cx = int(x1 + x2) / 2
                    cy = int(y1 + y2) / 2
                    if cx > 160:
                        cx = 160 - 1
                    elif cx < 0:
                        cx = 0
                    if cy > 120:
                        cy = 120 - 1
                    elif cy < 0:
                        cy = 0
                    if (self.bw_2[cy, cx - 1] == 255
                            and self.bw_1[cy, cx + 1] == 255):
                        cv2.line(image_with_lines, (x1, y1), (x2, y2),
                                 (255, 255, 255))
                        cv2.circle(image_with_lines, (x1, y1), 1,
                                   (0, 255, 0))  #green circle
                        cv2.circle(image_with_lines, (x2, y2), 1,
                                   (255, 0, 0))  #red circle
                    if (self.bw_2[cy, cx + 1] == 255
                            and self.bw_1[cy, cx - 1] == 255):
                        cv2.line(image_with_lines, (x1, y1), (x2, y2),
                                 (255, 255, 255))
                        cv2.circle(image_with_lines, (x1, y1), 1,
                                   (0, 255, 0))  #green circle
                        cv2.circle(image_with_lines, (x2, y2), 1,
                                   (255, 0, 0))  #red circle
            #drawLines(image_with_lines, (lines_2), (255, 255, 255))
            #drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            #drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            #colorSegment = color_segment(self.bw_1, self.bw_2)
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())
コード例 #22
0
    def processImage(self, msg):
        '''
        Inputs:
            msg - CompressedImage - uncorrected image from raspberry pi camera

        Uses anti_instagram library to adjust msg so that it looks like the same
        color temperature as a duckietown reference image. Calculates health of the node
        and publishes the corrected image and the health state. Health somehow corresponds
        to how good of a transformation it is.
        '''

        rospy.loginfo('ai: Computing color transform...')
        tk = TimeKeeper(msg)

        # cv_image = self.bridge.imgmsg_to_cv2(msg,"bgr8")
        try:
            cv_image = image_cv_from_jpg(msg.data)
        except ValueError as e:
            rospy.loginfo('Anti_instagram cannot decode image: %s' % e)
            return

        tk.completed('converted')

        # apply KMeans
        self.KM.applyKM(cv_image, fancyGeom=self.fancyGeom)

        # get the indices of the matched centers
        idxBlack, idxRed, idxYellow, idxWhite = self.KM.determineColor(
            True, self.KM.trained_centers)

        # get centers with red
        trained_centers = np.array([
            self.KM.trained_centers[idxBlack], self.KM.trained_centers[idxRed],
            self.KM.trained_centers[idxYellow],
            self.KM.trained_centers[idxWhite]
        ])

        # get centers w/o red
        trained_centers_woRed = np.array([
            self.KM.trained_centers[idxBlack],
            self.KM.trained_centers[idxYellow],
            self.KM.trained_centers[idxWhite]
        ])

        # calculate transform with 4 centers
        T4 = calcTransform(4, trained_centers)
        T4.calcTransform()

        # calculate transform with 3 centers
        T3 = calcTransform(3, trained_centers_woRed)
        T3.calcTransform()

        # compare residuals
        # in practice, this is NOT a fair way to compare the residuals, 4 will almost always win out,
        # causing a serious red shift in any image that has only 3 colors
        if T4.returnResidualNorm() >= T3.returnResidualNorm():
            self.shift = T4.shift
            self.scale = T4.scale
        else:
            self.shift = T3.shift
            self.scale = T3.scale

        self.shift = T3.shift
        self.scale = T3.scale
        tk.completed('calculateTransform')

        # if health is much below the threshold value, do not update the color correction and log it.
        if self.ai_health <= 0.001:
            # health is not good

            rospy.loginfo("Health is not good")

        else:
            self.health.J1 = self.ai_health
            self.transform.s[0], self.transform.s[1], self.transform.s[
                2] = self.shift
            self.transform.s[3], self.transform.s[4], self.transform.s[
                5] = self.scale

            self.pub_health.publish(self.health)
            self.pub_transform.publish(self.transform)
            rospy.loginfo('ai: Color transform published.')
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            #self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        #origin image publish
        image_cv_origin_p = self.bridge.cv2_to_imgmsg(image_cv, "bgr8")
        image_cv_origin_p.header.stamp = image_msg.header.stamp
        #self.pub_image_origin.publish(image_cv_origin_p)

        #calibration proc && image publish

        K_undistort = np.array(self.c['camera_matrix'])
        img_und = cv2.undistort(image_cv,
                                np.array(self.c['camera_matrix']),
                                np.array(self.c['dist_coefs']),
                                newCameraMatrix=K_undistort)
        #img_und_p = self.bridge.cv2_to_imgmsg(img_und, "bgr8")
        #self.pub_image_cal.publish(img_und_p)

        # Resize and crop image and rotation
        hei_original, wid_original = image_cv.shape[0:2]
        #hei_original3, wid_original3 = img_und.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
            img_und = cv2.resize(img_und,
                                 (self.image_size[1], self.image_size[0]),
                                 interpolation=cv2.INTER_NEAREST)

        rotation_center = (self.image_size[1] / 2, self.image_size[0] / 2)
        rotation_mat = cv2.getRotationMatrix2D(rotation_center, 180, 1.0)
        image_cv = cv2.warpAffine(image_cv, rotation_mat,
                                  (self.image_size[1], self.image_size[0]))

        img_und = cv2.warpAffine(img_und, rotation_mat,
                                 (self.image_size[1], self.image_size[0]))

        image_cv3 = img_und[self.top_cutoff:, :, :]
        image_cv = image_cv[self.top_cutoff:, 50:, :]

        #image_cv3 = image_cv

        tk.completed('resized')

        # apply color correction: AntiInstagram
        # image_cv_corr = image_cv
        #image_cv_corr = self.ai.applyTransform(img_und)
        image_cv_corr = cv2.convertScaleAbs(image_cv)
        image_cv_corr3 = cv2.convertScaleAbs(image_cv3)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)
        hei2_original, wid2_original = image_cv_corr.shape[0:2]

        self.detector3.setImage(image_cv_corr3)
        hei2_original3, wid2_original3 = image_cv_corr3.shape[0:2]

        # Detect lines and normals

        white = self.detector.detectLines('white')

        hough_threshold = 20  #20, 10
        hough_min_line_length = 30  #10, 1
        hough_max_line_gap = 10  #30, 1
        white2 = self.detector.detectLines2('white', hough_threshold,
                                            hough_min_line_length,
                                            hough_max_line_gap)

        yellow2 = self.detector3.detectLines2('yellow', hough_threshold,
                                              hough_min_line_length,
                                              hough_max_line_gap)
        #red = self.detector.detectLines('red')
        #rospy.loginfo('after %.1f %.1f  ',hei2_original , wid2_original)
        #rospy.loginfo('after %d %d  (%.1f %.1f) (%.1f %.1f)',hei2_original , wid2_original, white.lines.x1)

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))

        #rospy.loginfo('white.lines %d', len(white.lines))
        if len(white.lines) > 0:
            #lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            lines_normalized_white = white.lines
            #segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE))
            #rospy.loginfo('white detect ')
            #if len(yellow.lines) > 0:
            #lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            #lines_normalized_yellow = yellow.lines
            #segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW))
            #rospy.loginfo('yellow detect ' )
            #if len(red.lines) > 0:
            #lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            #lines_normalized_red = red.lines
            #segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))

        # Publish segmentList
        #self.pub_lines.publish(segmentList)
        #self.loginfo('send segmentList publish finish')

        # VISUALIZATION only below

        if True:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            image_with_lines3 = np.copy(image_cv_corr3)
            for i in xrange(len(white.lines)):
                #if i == 5 :
                #   break
                for x1, y1, x2, y2 in white.lines[i]:
                    cv2.line(image_with_lines, (x1, y1), (x2, y2), (0, 0, 255),
                             3)

            image_with_lines2 = np.copy(image_cv_corr)

            for i in xrange(len(white2.lines)):
                #if i == 5 :
                #   break
                for x1, y1, x2, y2 in white2.lines[i]:
                    cv2.line(image_with_lines2, (x1, y1), (x2, y2),
                             (0, 0, 255), 3)

            for i in xrange(len(yellow2.lines)):
                #if i == 5 :
                #   break
                for x1, y1, x2, y2 in yellow2.lines[i]:
                    cv2.line(image_with_lines3, (x1, y1), (x2, y2),
                             (255, 0, 0), 3)

            #print( len(white.lines) ,  len(white2.lines) )
            if len(white.lines) > 1 and len(white2.lines) == 0:
                self.dot_suspen = self.dot_suspen + 1
                print(" detect dot count ", len(white.lines),
                      len(white2.lines))
                self.pub_dot_state.publish("DOT_LINE")
            elif len(white.lines) > 0 and len(white2.lines) == 0:
                self.dot_suspen = self.dot_suspen
            else:
                #if self.prev_line_state == "DOT_LINE" or self.prev_line_state == "DOT_STOP_LINE" :
                #    self.line_state = "DOT_STOP_LINE"
                self.dot_suspen = 0

            if self.dot_suspen >= 7:
                print(" detect dot !!!!!!!!!!!!1 !!!!!! 1  ")
            #self.start_time = rospy.get_time()
            #msg = String()
            #msg.data = "PARKING"
            #if self.prev_line_state == "default" or self.prev_line_state == "DOT_LINE" :
            #    self.line_state = "DOT_LINE"
            #    self.pub_line_state.publish("DOT_LINE")
            #if self.prev_line_state == "DOT_STOP_LINE" or self.prev_line_state == "DOT_LINE2" :
            #    self.line_state = "DOT_LINE2"
            #    self.pub_line_state.publish("DOT_LINE2")

            #self.prev_line_state = self.line_state

            #default, DOT_LINE , DOT_STOP_LINE, DOT_LINE2

            # arrange
            #i = 0

            px1 = 0
            py1 = 1
            px2 = 2
            py2 = 3
            pdeg = 4

            LM = [[0] * 5 for i in range(len(yellow2.lines))]
            for i in xrange(len(yellow2.lines)):
                for x1, y1, x2, y2 in yellow2.lines[i]:
                    if x1 <= x2:
                        LM[i][px1] = x1
                        LM[i][py1] = y1
                        LM[i][px2] = x2
                        LM[i][py2] = y2
                    else:
                        LM[i][px1] = x2
                        LM[i][py1] = y2
                        LM[i][px2] = x1
                        LM[i][py2] = y1
            # add degree and check degree
            #i = 0
            pdegM = [[0] * 1 for i in range(len(LM))]
            #pMY = [[0]*1 for i in range(len(LM))]
            for i in xrange(len(LM)):
                LM[i][pdeg] = ((LM[i][py2] - LM[i][py1]) * 100) / (LM[i][px2] -
                                                                   LM[i][px1])
                pdegM[i] = LM[i][pdeg]
            #rospy.loginfo('(%d, %d) (%d, %d) %d ',LM[i][px1], LM[i][py1], LM[i][px2], LM[i][py2], LM[i][pdeg] )
            if len(yellow2.lines) > 1:
                degM = np.mean(pdegM)
                #print( pdegM )
                rospy.loginfo('mean %d ,var  %d , len %d', np.mean(pdegM),
                              np.var(pdegM), len(yellow2.lines))
                self.pub_yellow_degree.publish(np.mean(pdegM))
            else:
                self.pub_yellow_degree.publish(99887)

                # Make Block
                #i = 0
                #j = 0
                #BM = []
                #BM.append(LM[0])

                #rospy.loginfo('BLOCK NUMBER1 %d ', len(BM));
                #BM.append(LM[1])
                #rospy.loginfo('BLOCK NUMBER2 %d ', len(BM));
                #BM.append(LM[2])
                #rospy.loginfo('BLOCK NUMBER3 %d ', len(BM));

                #for i in xrange(len(LM)) :
            #rospy.loginfo('////////////////////////////////////////// %d  ', len(BM))
            #for j in xrange(len(BM)):
            #if (BM[j][px1] <= LM[i][px1] and LM[i][px1] <= BM[j][px2]) or (BM[j][px1] <= LM[i][px2] and LM[i][px2] <= BM[j][px2]) or  (LM[i][px1] <= BM[j][px1] and BM[j][px1] <= LM[i][px2]) or (LM[i][px1] <= BM[j][px2] and BM[j][px2] <= LM[i][px2]) :
            #inBM = [ BM[j][px1], LM[i][px1]], [BM[j][px2], LM[i][px2] ]
            #BM[j][px1] = np.min(inBM)
            #BM[j][px2] = np.max(inBM)
            #rospy.loginfo('BLOCK NUMBER i%d  j%d  ( %d  %d ) ', i, j ,BM[j][px1], BM[j][px2]);
            #continue
            #BM.append(LM[i])
            #break
            '''
            px1M = [LM[i][px1] for i in range(len(LM))]
            px2M = [LM[i][px2] for i in range(len(LM))]
            #px1M = [152, 107, 115, 114, 133 , 143, 152, 155,105]
            #px2M = [180, 118, 119, 121, 146, 149, 180, 182,123]
            Bpx1M = [] ; Bpx2M = []
            Bpx1M.append(px1M[0]); Bpx2M.append(px2M[0])
            #print ( px1M, px2M )
            #return
            overlap = False
            for i in xrange(len(px1M)) :
                #print ( '___________BM len ', len(Bpx1M) , "_LM i :" , i , "( " , px1M[i] , "," , px2M[i] ,  " ) " )
                j = 0
               while j < len(Bpx1M) :
                #print('///////////', px1M[i], px2M[i] , 'BM' ,Bpx1M , Bpx2M)
                overlap = False
                for m in xrange(Bpx1M[j],Bpx2M[j]) :
                    for n in xrange(px1M[i],px2M[i]) :
                        if m == n:
                           overlap = True
    
                if overlap == True :
                    #print( 'LM ', i,'BM ',j,'overlap happen')
                    inBM = [ px1M[i], px2M[i], Bpx1M[j], Bpx2M[j] ]
                    Bpx1M[j] = min(inBM)
                    Bpx2M[j] = max(inBM)
                    break
                else :
                    if j == (len(Bpx1M)-1) :
                       #print('add BM len1 ', px1M[ i ], px2M[ i ], Bpx1M, Bpx2M)
                       Bpx1M.append(px1M[i])
                       Bpx2M.append(px2M[i])
                       #print('add BM len2 ',px1M[i],px2M[i], Bpx1M, Bpx2M)
                       break
                j = j + 1
                #print ( 'commm')
    
            Bpx1M.sort()
                Bpx2M.sort()
                dBM = []
                dBM_b = []
    
                for i in range(len(Bpx1M)) :
                    dBM.append(Bpx2M[i] - Bpx1M[i])
                    dBM_b. append(Bpx2M[i] - Bpx1M[i])
    
                #print ('final BM len ',Bpx1M , Bpx2M, dBM )
            #print ('final BM len ',len(Bpx1M) , np.var(pdegM) )
    
            #rospy.loginfo('////////////////////////////////////////// %d  ', len(BM))
            '''
            # Publish the frame with lines
        image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
        image_msg_out.header.stamp = image_msg.header.stamp
        self.pub_image_with_line.publish(image_msg_out)

        image_msg_out2 = self.bridge.cv2_to_imgmsg(image_with_lines2, "bgr8")
        image_msg_out2.header.stamp = image_msg.header.stamp
        self.pub_image_with_line2.publish(image_msg_out2)

        image_msg_out3 = self.bridge.cv2_to_imgmsg(image_with_lines3, "bgr8")
        image_msg_out3.header.stamp = image_msg.header.stamp
        self.pub_image_with_line3.publish(image_msg_out3)

        tk.completed('pub_image')

        if self.verbose:
            #colorSegment = color_segment(white.area, red.area, yellow.area)
            colorSegment = color_segment(white.area, white.area, white.area)
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')
コード例 #24
0
    def ArmImage(self, image_msg):
        if self.action == 1:
            return
        if time.time() - self.camera_time < 0.5:
            return
        self.camera_time = time.time()
        image_cv = image_cv_from_jpg(image_msg.data)
        img_data = np.copy(image_cv)
        hsv = np.empty(0)
        hsv = cv2.cvtColor(img_data, cv2.COLOR_BGR2HSV)
        low_blue = np.array([90, 80, 80])
        high_blue = np.array([130, 255, 255])
        mask = cv2.inRange(hsv, low_blue, high_blue)
        img, contours = cv2.findContours(mask, 1, 2)
        large_blue_area = 0
        for i in range(0, len(img)):
            if cv2.contourArea(img[i]) > large_blue_area:
                large_blue_area = cv2.contourArea(img[i])
                large_i = i
        if large_blue_area > 300:
            moments = cv2.moments(img[large_i])
            if moments['m00'] == 0:
                return
            cx = moments['m10'] / moments['m00']
            cy = moments['m01'] / moments['m00']
            rad = (cx - 320)**2 + (cy - 500)**2
            #print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
            #print(rad)
            cx -= 320
            cy = 700 - cy
            #print(cx)
            #print(cy)
            t = cx / cy
            #print(math.atan(t))
            if cx > 0:
                self.r += 1
                self.l = 0
            else:
                self.r = 0
                self.l += 1
            if t <= 0.08 and t >= -0.08:
                self.m += 1
            else:
                self.m = 0

            if self.r > 3 and t > 0.08:
                self.r = 0
                cmd_msg = Twist2DStamped()
                cmd_msg.v = 0.5
                cmd_msg.omega = -15
                wheel_time = time.time()
                while ((time.time() - wheel_time) < 0.15 * t):
                    self.pub_joy_cmd.publish(cmd_msg)
                #time.sleep(0.3*t)
                cmd_msg.v = 0
                cmd_msg.omega = 0
                self.pub_joy_cmd.publish(cmd_msg)

            elif self.l > 3 and t < -0.08:
                self.l = 0
                cmd_msg = Twist2DStamped()
                cmd_msg.v = 0.5
                cmd_msg.omega = 15
                wheel_time = time.time()
                while ((time.time() - wheel_time) < -0.15 * t):
                    self.pub_joy_cmd.publish(cmd_msg)
                #time.sleep(-0.3*t)
                cmd_msg.v = 0
                cmd_msg.omega = 0
                self.pub_joy_cmd.publish(cmd_msg)
            elif self.m > 3:
                if rad > 1500:
                    dis = (rad - 1500)**0.5
                    print(dis)
                    cmd_msg = Twist2DStamped()
                    cmd_msg.v = 0.5
                    cmd_msg.omega = 0
                    wheel_time = time.time()
                    while ((time.time() - wheel_time) < 0.003 * dis):
                        self.pub_joy_cmd.publish(cmd_msg)
                    cmd_msg.v = 0
                    cmd_msg.omega = 0
                    self.pub_joy_cmd.publish(cmd_msg)
                    self.m = 0
        else:
            self.r = 0
            self.l = 0
            self.m = 0
コード例 #25
0
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(white.lines) > 0:
            lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, white.normals,
                                  Segment.WHITE))
        if len(yellow.lines) > 0:
            lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, yellow.normals,
                                  Segment.YELLOW))
        if len(red.lines) > 0:
            lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, red.normals,
                                  Segment.RED))

        self.intermittent_log(
            '# segments: white %3d yellow %3d red %3d' %
            (len(white.lines), len(yellow.lines), len(red.lines)))

        #self.loginfo("self.verbose %d" % self.verbose)

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            drawLines(image_with_lines, white.lines, (0, 0, 0))
            drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            colorSegment = color_segment(white.area, red.area, yellow.area)
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())
コード例 #26
0
    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            # Return immediately if the thread is locked
            return

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        image_cv = image_cv_from_jpg(image_msg.data)

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals
        lines_white, normals_white, centers_white, area_white = self.detector.detectLines2(
            'white')
        lines_yellow, normals_yellow, centers_yellow, area_yellow = self.detector.detectLines2(
            'yellow')
        lines_red, normals_red, centers_red, area_red = self.detector.detectLines2(
            'red')

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(lines_white) > 0:
            lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, normals_white,
                                  Segment.WHITE))
        if len(lines_yellow) > 0:
            lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, normals_yellow,
                                  Segment.YELLOW))
        if len(lines_red) > 0:
            lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, normals_red,
                                  Segment.RED))

        self.intermittent_log(
            '# segments: white %3d yellow %3d red %3d' %
            (len(lines_white), len(lines_yellow), len(lines_red)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('pub_lines')

        # VISUALIZATION only below

        # Publish the frame with lines

        # Draw lines and normals
        image_with_lines = np.copy(image_cv_corr)
        drawLines(image_with_lines, lines_white, (0, 0, 0))
        drawLines(image_with_lines, lines_yellow, (255, 0, 0))
        drawLines(image_with_lines, lines_red, (0, 255, 0))
        #drawNormals2(image_with_lines, centers_white, normals_white, (0,0,0))
        #drawNormals2(image_with_lines, centers_yellow, normals_yellow, (255,0,0))
        #drawNormals2(image_with_lines, centers_red, normals_red, (0,255,0))

        tk.completed('drawn')

        image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
        image_msg_out.header.stamp = image_msg.header.stamp
        self.pub_image.publish(image_msg_out)

        tk.completed('pub_image')

        # Verbose
        if self.verbose:

            color_segment = self.detector.color_segment(
                area_white, area_red, area_yellow)

            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                color_segment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

        tk.completed('pub_edge')

        self.intermittent_log(tk.getall())

        # Release the thread lock
        self.thread_lock.release()