def debugOutput(self, segment_list_msg, d_max, phi_max, timestamp_before_processing): """Creates and publishes debug messages Args: segment_list_msg (:obj:`SegmentList`): message containing list of filtered segments d_max (:obj:`float`): best estimate for d phi_max (:obj:``float): best estimate for phi timestamp_before_processing (:obj:`float`): timestamp dating from before the processing """ if self._debug: # Latency of Estimation including curvature estimation estimation_latency_stamp = rospy.Time.now() - timestamp_before_processing estimation_latency = estimation_latency_stamp.secs + estimation_latency_stamp.nsecs / 1e9 self.latencyArray.append(estimation_latency) if len(self.latencyArray) >= 20: self.latencyArray.pop(0) # print "Latency of segment list: ", segment_latency self.loginfo(f"Mean latency of Estimation:................. {np.mean(self.latencyArray)}") # Get the segments that agree with the best estimate and publish them inlier_segments = self.filter.get_inlier_segments(segment_list_msg.segments, d_max, phi_max) inlier_segments_msg = SegmentList() inlier_segments_msg.header = segment_list_msg.header inlier_segments_msg.segments = inlier_segments self.pub_seglist_filtered.publish(inlier_segments_msg) # Create belief image and publish it belief_img = self.bridge.cv2_to_imgmsg( np.array(255 * self.filter.belief).astype("uint8"), "mono8" ) belief_img.header.stamp = segment_list_msg.header.stamp self.pub_belief_img.publish(belief_img)
def processSegmentList(self, segmentList): filteredSegments = SegmentList() filteredSegments.header = segmentList.header for segment in segmentList.segments: # Filter off segments behind us if segment.points[0].x < 0 or segment.points[1].x < 0: continue # Discard RED lines, for now if segment.color != segment.WHITE and segment.color != segment.YELLOW: print('#########################CODE RED!!') continue # Apply a few more fancy filters to the segment d_i, phi_i, l_i, state = self.fancyFilters(segment) # Ignore if the line segment is UNKNOWN (i.e., = 0) if state == 0: continue if d_i > self.d_max or d_i < self.d_min or phi_i < self.phi_min or phi_i > self.phi_max: continue # filteredSegments.append(segment) # print(filteredSegments) filteredSegments.segments.append(segment) return filteredSegments
def lineseglist_cb(self, seglist_msg): """ Projects a list of line segments on the ground reference frame point by point by calling :py:meth:`pixel_msg_to_ground_msg`. Then publishes the projected list of segments. Args: seglist_msg (:obj:`duckietown_msgs.msg.SegmentList`): Line segments in pixel space from unrectified images """ if self.camera_info_received: seglist_out = SegmentList() seglist_out.header = seglist_msg.header for received_segment in seglist_msg.segments: new_segment = Segment() new_segment.points[0] = self.pixel_msg_to_ground_msg( received_segment.pixels_normalized[0]) new_segment.points[1] = self.pixel_msg_to_ground_msg( received_segment.pixels_normalized[1]) new_segment.color = received_segment.color # TODO what about normal and points seglist_out.segments.append(new_segment) self.pub_lineseglist.publish(seglist_out) if not self.first_processing_done: self.log("First projected segments published.") self.first_processing_done = True if self.pub_debug_img.get_num_connections() > 0: debug_image_msg = self.bridge.cv2_to_compressed_imgmsg( self.debug_image(seglist_out)) debug_image_msg.header = seglist_out.header self.pub_debug_img.publish(debug_image_msg) else: self.log("Waiting for a CameraInfo message", "warn")
def debugOutput(self, segment_list_msg, d_max, phi_max): """Creates and publishes debug messages Args: segment_list_msg (:obj:`SegmentList`): message containing list of filtered segments d_max (:obj:`float`): best estimate for d phi_max (:obj:``float): best estimate for phi """ if self._debug: # Get the segments that agree with the best estimate and publish them inlier_segments = self.filter.get_inlier_segments( segment_list_msg.segments, d_max, phi_max) inlier_segments_msg = SegmentList() inlier_segments_msg.header = segment_list_msg.header inlier_segments_msg.segments = inlier_segments self.pub_seglist_filtered.publish(inlier_segments_msg) # Create belief image and publish it ml = self.filter.generate_measurement_likelihood( segment_list_msg.segments) if ml is not None: ml_img = self.bridge.cv2_to_imgmsg( np.array(255 * ml).astype("uint8"), "mono8") ml_img.header.stamp = segment_list_msg.header.stamp self.pub_ml_img.publish(ml_img)
def find_ground_coordinates( gpg: GroundProjectionGeometry, sl: SegmentList, skip_not_on_ground: bool = True ) -> SegmentList: """ Creates a new segment list with the ground coordinates set. """ cutoff = 0.01 sl2 = SegmentList() sl2.header = sl.header # Get ground truth of segmentList for s1 in sl.segments: g0 = gpg.vector2ground(s1.pixels_normalized[0]) g1 = gpg.vector2ground(s1.pixels_normalized[1]) if skip_not_on_ground: if g0.x < cutoff or g1.x < cutoff: continue points = [g0, g1] pixels_normalized = [s1.pixels_normalized[0], s1.pixels_normalized[1]] color = s1.color s2 = Segment(points=points, pixels_normalized=pixels_normalized, color=color) # TODO what about normal and points sl2.segments.append(s2) return sl2
def lineseglist_cb(self, seglist_msg): seglist_out = SegmentList() seglist_out.header = seglist_msg.header for received_segment in seglist_msg.segments: new_segment = Segment() new_segment.points[0] = self.gpg.vector2ground(received_segment.pixels_normalized[0]) new_segment.points[1] = self.gpg.vector2ground(received_segment.pixels_normalized[1]) new_segment.color = received_segment.color # TODO what about normal and points seglist_out.segments.append(new_segment) self.pub_lineseglist_.publish(seglist_out)
def storeSegments(self,in_segs): rec_seg_list = SegmentList() rec_seg_list.header = in_segs.header for in_seg in in_segs.segments: new_seg = Segment() new_seg.points[0].x = in_seg.points[0].x new_seg.points[0].y = in_seg.points[0].y new_seg.points[0].z = 0.0 new_seg.points[1].x = in_seg.points[1].x new_seg.points[1].y = in_seg.points[1].y new_seg.points[1].z = 0.0 new_seg.color = in_seg.color rec_seg_list.segments.append(new_seg) self.seg_list.append(rec_seg_list) self.pubMap()
def get_segment_list_normalized(top_cutoff, shape, white, yellow, red): segmentList = SegmentList() # Convert to normalized pixel coordinates, and add segments to segmentList s0, s1 = shape arr_cutoff = np.array((0, top_cutoff, 0, top_cutoff)) arr_ratio = np.array((1.0 / s1, 1.0 / s0, 1.0 / s1, 1.0 / s0)) if len(white.lines) > 0: lines_normalized_white = (white.lines + arr_cutoff) * arr_ratio segmentList.segments.extend( toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE)) if len(yellow.lines) > 0: lines_normalized_yellow = (yellow.lines + arr_cutoff) * arr_ratio segmentList.segments.extend( toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW)) if len(red.lines) > 0: lines_normalized_red = (red.lines + arr_cutoff) * arr_ratio segmentList.segments.extend( toSegmentMsg(lines_normalized_red, red.normals, Segment.RED)) return segmentList
def __init__(self): #node_name = "Lane Filter Tester" pub_fake_segment_list = rospy.Publisher("~segment_list", SegmentList, queue_size=1) rospy.sleep(1) seg = Segment() seg.points[0].x = rospy.get_param("~x1") seg.points[0].y = rospy.get_param("~y1") seg.points[1].x = rospy.get_param("~x2") seg.points[1].y = rospy.get_param("~y2") color = rospy.get_param("~color") if color == "white": seg.color = seg.WHITE elif color == "yellow": seg.color = seg.YELLOW elif color == "red": seg.color = seg.RED else: print("error no color specified") seg_list = SegmentList() seg_list.segments.append(seg) pub_fake_segment_list.publish(seg_list)
def fuzzy_segment_list_image_space(segment_list, n, intensity): S2 = SegmentList() for segment in segment_list.segments: for _ in range(n): s2 = fuzzy_segment(segment, intensity) S2.segments.append(s2) return S2
def rectify_segments(gpg, segment_list): res = [] for segment in segment_list.segments: s2 = rectify_segment(gpg, segment) res.append(s2) return SegmentList(segments=res)
def rectify_segments(rectify: Rectify, gpg: GroundProjectionGeometry, segment_list: SegmentList) -> SegmentList: res = [] for segment in segment_list.segments: s2 = rectify_segment(rectify, gpg, segment) res.append(s2) return SegmentList(segments=res)
def fuzzy_color(segment_list): S2 = SegmentList() for segment in segment_list.segments: for color in [segment.YELLOW, segment.WHITE]: s2 = fuzzy_segment(segment, intensity=0.001) s2.color = color S2.segments.append(s2) return S2
def __init__(self): self.node_name = "Visual Odometry Line" self.sub = rospy.Subscriber("~segment_list", SegmentList, self.processSegments) self.old_segment_list = SegmentList() self.pub_entropy = rospy.Publisher("~entropy", Float32, queue_size=1) self.pub_segments = rospy.Publisher("~segment_list_repeater", SegmentList, queue_size=1)
def processImage(self,image_msg): image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR) #image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8") # Resize and crop image hei_original = image_cv.shape[0] wid_original = image_cv.shape[1] if self.image_size[0]!=hei_original or self.image_size[1]!=wid_original: image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0])) image_cv = image_cv[self.top_cutoff:,:,:] # Set the image to be detected self.detector.setImage(image_cv) # Detect lines and normals lines_white, normals_white = self.detector.detectLines('white') lines_yellow, normals_yellow = self.detector.detectLines('yellow') lines_red, normals_red = self.detector.detectLines('red') # Draw lines and normals self.detector.drawLines(lines_white, (0,0,0)) self.detector.drawLines(lines_yellow, (255,0,0)) self.detector.drawLines(lines_red, (0,255,0)) #self.detector.drawNormals(lines_white, normals_white) #self.detector.drawNormals(lines_yellow, normals_yellow) #self.detector.drawNormals(lines_red, normals_red) # Convert to normalized pixel coordinates, and add segments to segmentList segmentList = SegmentList() arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff)) arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0])) if len(lines_white)>0: rospy.loginfo("[LineDetectorNode] number of white lines = %s" %(len(lines_white))) lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio) segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, normals_white, Segment.WHITE)) if len(lines_yellow)>0: rospy.loginfo("[LineDetectorNode] number of yellow lines = %s" %(len(lines_yellow))) lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio) segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, normals_yellow, Segment.YELLOW)) if len(lines_red)>0: rospy.loginfo("[LineDetectorNode] number of red lines = %s" %(len(lines_red))) lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio) segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, normals_red, Segment.RED)) # Publish segmentList self.pub_lines.publish(segmentList) # Publish the frame with lines image_msg = self.bridge.cv2_to_imgmsg(self.detector.getImage(), "bgr8") self.pub_image.publish(image_msg)
def lineseglist_cb(self, seglist_msg): message_time = seglist_msg.header.stamp.sec + seglist_msg.header.stamp.nanosec*1e-9 current_time = time.time() delay = current_time - message_time #fname = "/dev/shm/segment" + str(message_time) #seglist_msg.segments = pickle.load(open(fname, "rb")) #os.unlink(fname) #print("message time: " + str(message_time)) #print("current time: " + str(current_time)) #print("delay: " + str(delay), "# segments: ", len(seglist_msg.segments)) seglist_out = SegmentList() seglist_out.header = seglist_msg.header for received_segment in seglist_msg.segments: new_segment = Segment() new_segment.points[0] = self.gp.vector2ground(received_segment.pixels_normalized[0]) new_segment.points[1] = self.gp.vector2ground(received_segment.pixels_normalized[1]) new_segment.color = received_segment.color # TODO what about normal and points seglist_out.segments.append(new_segment) self.pub_lineseglist_.publish(seglist_out)
def predict_segments(sm, gpg): """ Predicts what segments the robot would see. Assumes map is in FRAME_AXLE. """ x_frustum = +0.1 fov = np.deg2rad(150) res = [] for segment in sm.segments: p1 = segment.points[0] p2 = segment.points[1] # If we are both in FRAME_AXLE if ((sm.points[p1].id_frame != FRAME_AXLE) or (sm.points[p2].id_frame != FRAME_AXLE)): msg = "Cannot deal with points not in frame FRAME_AXLE" raise NotImplementedError(msg) w1 = sm.points[p1].coords w2 = sm.points[p2].coords coords_inside = clip_to_view([w1, w2], x_frustum, fov) if not coords_inside: continue w1 = coords_inside[0] w2 = coords_inside[1] point1 = Point(w1[0], w1[1], w1[2]) point2 = Point(w2[0], w2[1], w2[2]) pixel1 = gpg.ground2pixel(point1) pixel2 = gpg.ground2pixel(point2) normalized1 = gpg.pixel2vector(pixel1) normalized2 = gpg.pixel2vector(pixel2) pixels_normalized = [normalized1, normalized2] normal = Vector2D(0, 0) points = [point1, point2] # color = segment_color_constant_from_string(segment.color) assert segment.color in [Segment.RED, Segment.YELLOW, Segment.WHITE] s = Segment(pixels_normalized=pixels_normalized, normal=normal, points=points, color=segment.color) res.append(s) return SegmentList(segments=res)
def processImage_(self, image_msg): # self.stats.processed() #if self.intermittent_log_now(): #self.intermittent_log(self.stats.info()) #self.stats.reset() # tk = TimeKeeper(image_msg) self.intermittent_counter += 1 try: image_cv = image_cv_from_jpg(image_msg.data) except ValueError as e: self.loginfo('Could not decode image: %s' % e) return # tk.completed('decoded') hei_original,wid_original = image_cv.shape[0:2] if self.image_size[0] != hei_original or self.image_size[1] != wid_original: # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2) image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST) image_cv = image_cv[self.top_cutoff:,:,:] #tk.completed('resized') #image_cv_corr = self.ai.applyTransform(image_cv) #image_cv_corr = cv2.convertScaleAbs(image_cv_corr) image_cv_corr = cv2.convertScaleAbs(image_cv) # Set the image to be detected self.detector.setImage(image_cv_corr) # Detect lines and normals white = self.detector.detectLines('white') yellow = self.detector.detectLines('yellow') red = self.detector.detectLines('red') white = white[0] yellow = yellow[0] red = red[0] # tk.completed('detected') # SegmentList constructor segmentList = SegmentList() segmentList.header.stamp = image_msg.header.stamp # Convert to normalized pixel coordinates, and add segments to segmentList arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff)) arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0])) if len(white.lines) > 0: lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE)) if len(yellow.lines) > 0: lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW)) if len(red.lines) > 0: lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED)) self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines), len(yellow.lines), len(red.lines))) #tk.completed('prepared') # Publish segmentList # FOR TESTING, write to file instead of ros publish #message_time = segmentList.header.stamp.sec + segmentList.header.stamp.nanosec*1e-9 #fname = "/dev/shm/segment" + str(message_time) #open(fname, "wb").write(pickle.dumps(segmentList.segments, pickle.HIGHEST_PROTOCOL)) #segmentList.segments = [] self.pub_lines.publish(segmentList) #self.loginfo("published line segments") #tk.completed('--pub_lines--') if self.verbose: # Draw lines and normals image_with_lines = np.copy(image_cv_corr) drawLines(image_with_lines, white.lines, (0, 0, 0)) drawLines(image_with_lines, yellow.lines, (255, 0, 0)) drawLines(image_with_lines, red.lines, (0, 255, 0)) #tk.completed('drawn') # Publish the frame with lines image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8") image_msg_out.header.stamp = image_msg.header.stamp self.pub_image.publish(image_msg_out) #tk.completed('pub_image') # if self.verbose: colorSegment = color_segment(white.area, red.area, yellow.area) edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8") colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8") self.pub_edge.publish(edge_msg_out) self.pub_colorSegment.publish(colorSegment_msg_out)
def processSegments(self,segment_list_msg): # Get actual timestamp for latency measurement timestamp_now = rospy.Time.now() if not self.active: return # Step 1: predict current_time = rospy.get_time() dt = current_time - self.t_last_update v = self.velocity.v w = self.velocity.omega self.filter.predict(dt=dt, v=v, w=w) self.t_last_update = current_time # Step 2: update self.filter.update(segment_list_msg.segments) # Step 3: build messages and publish things [d_max, phi_max] = self.filter.getEstimate() ## Inlier segments inlier_segments = self.filter.get_inlier_segments(segment_list_msg.segments, d_max, phi_max) inlier_segments_msg = SegmentList() inlier_segments_msg.header = segment_list_msg.header inlier_segments_msg.segments = inlier_segments self.pub_seglist_filtered.publish(inlier_segments_msg) ## Lane pose in_lane = self.filter.isInLane() lanePose = LanePose() lanePose.header.stamp = segment_list_msg.header.stamp lanePose.d = d_max lanePose.phi = phi_max lanePose.in_lane = in_lane lanePose.status = lanePose.NORMAL self.pub_lane_pose.publish(lanePose) ## Belief image bridge = CvBridge() if self.mode == 'histogram': belief_img = bridge.cv2_to_imgmsg(np.array(255 * self.filter.beliefArray).astype("uint8"), "mono8") elif self.mode == 'particle': belief_img = bridge.cv2_to_imgmsg(np.array(255 * self.filter.getBeliefArray()).astype("uint8"), "mono8") belief_img.header.stamp = segment_list_msg.header.stamp self.pub_belief_img.publish(belief_img) ## Latency of Estimation estimation_latency_stamp = rospy.Time.now() - timestamp_now estimation_latency = estimation_latency_stamp.secs + estimation_latency_stamp.nsecs/1e9 self.latencyArray.append(estimation_latency) if (len(self.latencyArray) >= 20): self.latencyArray.pop(0) # Separate Bool for the FSM in_lane_msg = BoolStamped() in_lane_msg.header.stamp = segment_list_msg.header.stamp in_lane_msg.data = True #TODO-TAL change with in_lane. Is this messqge useful since it is alwas true ? self.pub_in_lane.publish(in_lane_msg)
def processImage_(self, image_msg): self.stats.processed() if self.intermittent_log_now(): self.intermittent_log(self.stats.info()) self.stats.reset() tk = TimeKeeper(image_msg) self.intermittent_counter += 1 # Decode from compressed image with OpenCV try: image_cv = image_cv_from_jpg(image_msg.data) except ValueError as e: self.loginfo('Could not decode image: %s' % e) return tk.completed('decoded') # Resize and crop image hei_original, wid_original = image_cv.shape[0:2] if self.image_size[0] != hei_original or self.image_size[ 1] != wid_original: # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2) image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST) image_cv = image_cv[self.top_cutoff:, :, :] tk.completed('resized') # apply color correction: AntiInstagram image_cv_corr = self.ai.applyTransform(image_cv) image_cv_corr = cv2.convertScaleAbs(image_cv_corr) tk.completed('corrected') # Set the image to be detected self.detector.setImage(image_cv_corr) # Detect lines and normals white = self.detector.detectLines('white') yellow = self.detector.detectLines('yellow') red = self.detector.detectLines('red') tk.completed('detected') # SegmentList constructor segmentList = SegmentList() segmentList.header.stamp = image_msg.header.stamp # Convert to normalized pixel coordinates, and add segments to segmentList arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff)) arr_ratio = np.array( (1. / self.image_size[1], 1. / self.image_size[0], 1. / self.image_size[1], 1. / self.image_size[0])) if len(white.lines) > 0: lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE)) if len(yellow.lines) > 0: lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW)) if len(red.lines) > 0: lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED)) self.intermittent_log( '# segments: white %3d yellow %3d red %3d' % (len(white.lines), len(yellow.lines), len(red.lines))) #self.loginfo("self.verbose %d" % self.verbose) tk.completed('prepared') # Publish segmentList self.pub_lines.publish(segmentList) tk.completed('--pub_lines--') # VISUALIZATION only below if self.verbose: # Draw lines and normals image_with_lines = np.copy(image_cv_corr) drawLines(image_with_lines, white.lines, (0, 0, 0)) drawLines(image_with_lines, yellow.lines, (255, 0, 0)) drawLines(image_with_lines, red.lines, (0, 255, 0)) tk.completed('drawn') # Publish the frame with lines image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8") image_msg_out.header.stamp = image_msg.header.stamp self.pub_image.publish(image_msg_out) tk.completed('pub_image') # if self.verbose: colorSegment = color_segment(white.area, red.area, yellow.area) edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8") colorSegment_msg_out = self.bridge.cv2_to_imgmsg( colorSegment, "bgr8") self.pub_edge.publish(edge_msg_out) self.pub_colorSegment.publish(colorSegment_msg_out) tk.completed('pub_edge/pub_segment') self.intermittent_log(tk.getall())
def processImage_(self, image_msg): self.stats.processed() if self.intermittent_log_now(): self.intermittent_log(self.stats.info()) self.stats.reset() tk = TimeKeeper(image_msg) self.intermittent_counter += 1 # Decode from compressed image with OpenCV try: image_cv = image_cv_from_jpg(image_msg.data) except ValueError as e: self.loginfo('Could not decode image: %s' % e) return tk.completed('decoded') # Resize and crop image hei_original, wid_original = image_cv.shape[0:2] if self.image_size[0] != hei_original or self.image_size[ 1] != wid_original: # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2) image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST) image_cv = image_cv[self.top_cutoff:, :, :] tk.completed('resized') # apply color correction: AntiInstagram image_cv_corr = self.ai.applyTransform(image_cv) image_cv_corr = cv2.convertScaleAbs(image_cv_corr) tk.completed('corrected') # set up parameter hsv_blue1 = (100, 50, 50) hsv_blue2 = (150, 255, 255) hsv_yellow1 = (25, 50, 50) hsv_yellow2 = (45, 255, 255) # Set the image to be detected gray = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 80, 200, apertureSize=3) hsv = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2HSV) yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) yellow = cv2.dilate(yellow, kernel) blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) blue = cv2.dilate(blue, kernel) # Detect lines and normals edge_color_yellow = cv2.bitwise_and(yellow, edges) lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi / 180, 10, np.empty(1), 3, 1) if lines_yellow is not None: lines_yellow = np.array(lines_yellow[0]) else: lines_yellow = [] #edge_color_blue = cv2.bitwise_and(blue, edges) #lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi/180, 10, np.empty(1), 3, 1) #if lines_blue is not None: #lines_blue = np.array(lines_blue[0]) #else: #lines_blue = [] #print "***************** ",image_cv.shape," *********************" #bw_yellow = yellow #bw_blue = blue self.blue = blue self.yellow = yellow if len(lines_yellow) > 0: lines_yellow, normals_yellow = self.normals(lines_yellow, yellow) #if len(lines_blue) > 0: #lines_blue,normals_blue = self.normals(lines_blue,bw_blue) tk.completed('detected') # SegmentList constructor segmentList = SegmentList() segmentList.header.stamp = image_msg.header.stamp # Convert to normalized pixel coordinates, and add segments to segmentList if len(lines_yellow) > 0: segmentList.segments.extend( self.toSegmentMsg(lines_yellow, normals_yellow, Segment.YELLOW)) #if len(lines_blue) > 0: #segmentList.segments.extend(self.toSegmentMsg(lines_blue, normals_blue, Segment.YELLOW)) self.intermittent_log('# segments:yellow %3d' % (len(lines_yellow))) tk.completed('prepared') # Publish segmentList self.pub_lines.publish(segmentList) # VISUALIZATION only below if self.verbose: # Draw lines and normals image_with_lines = np.copy(image_cv_corr) for x1, y1, x2, y2, norm_x, norm_y in np.hstack( (lines_yellow, normals_yellow)): x1 = int(x1) x2 = int(x2) y1 = int(y1) y2 = int(y2) ox = int((x1 + x2) / 2) oy = int((y1 + y2) / 2) cx = (ox + 3 * norm_x).astype('int') cy = (oy + 3 * norm_y).astype('int') ccx = (ox - 3 * norm_x).astype('int') ccy = (oy - 3 * norm_y).astype('int') if cx > 158: cx = 158 elif cx < 1: cx = 1 if ccx > 158: ccx = 158 elif ccx < 1: ccx = 1 if cy >= 79: cy = 79 elif cy < 1: cy = 1 if ccy >= 79: ccy = 79 elif ccy < 1: ccy = 1 if (blue[cy, cx] == 255 and yellow[ccy, ccx] == 255) or ( yellow[cy, cx] == 255 and blue[ccy, ccx] == 255): cv2.line(image_with_lines, (x1, y1), (x2, y2), (0, 0, 255), 2) cv2.circle(image_with_lines, (x1, y1), 2, (0, 255, 0)) cv2.circle(image_with_lines, (x2, y2), 2, (255, 0, 0)) tk.completed('drawn') # Publish the frame with lines image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8") image_msg_out.header.stamp = image_msg.header.stamp self.pub_image.publish(image_msg_out) tk.completed('pub_image') # if self.verbose: #colorSegment = color_segment(white.area, red.area, yellow.area) #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8") #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8") #self.pub_edge.publish(edge_msg_out) #self.pub_colorSegment.publish(colorSegment_msg_out) tk.completed('pub_edge/pub_segment') self.intermittent_log(tk.getall())
def processSegments(self, segment_list_msg): if not self.active: return # Get actual timestamp for latency measurement timestamp_now = rospy.Time.now() # TODO-TAL double call to param server ... --> see TODO in the readme, not only is it never updated, but it is alwas 0 # Step 0: get values from server if (rospy.get_param('~curvature_res') is not self.curvature_res): self.curvature_res = rospy.get_param('~curvature_res') self.filter.updateRangeArray(self.curvature_res) # Step 1: predict current_time = rospy.get_time() dt = current_time - self.t_last_update v = self.velocity.v w = self.velocity.omega self.filter.predict(dt=dt, v=v, w=w) self.t_last_update = current_time # Step 2: update self.filter.update(segment_list_msg.segments) parking_detected = BoolStamped() parking_detected.header.stamp = segment_list_msg.header.stamp parking_detected.data = self.filter.parking_detected self.pub_parking_detection.publish(parking_detected) parking_on = BoolStamped() parking_on.header.stamp = segment_list_msg.header.stamp parking_on.data = True self.pub_parking_on.publish(parking_on) if self.active_mode: # Step 3: build messages and publish things [d_max, phi_max] = self.filter.getEstimate() # print "d_max = ", d_max # print "phi_max = ", phi_max inlier_segments = self.filter.get_inlier_segments( segment_list_msg.segments, d_max, phi_max) inlier_segments_msg = SegmentList() inlier_segments_msg.header = segment_list_msg.header inlier_segments_msg.segments = inlier_segments self.pub_seglist_filtered.publish(inlier_segments_msg) #max_val = self.filter.getMax() #in_lane = max_val > self.filter.min_max # build lane pose message to send lanePose = LanePose() lanePose.header.stamp = segment_list_msg.header.stamp lanePose.d = d_max[0] lanePose.phi = phi_max[0] #lanePose.in_lane = in_lane # XXX: is it always NORMAL? lanePose.status = lanePose.NORMAL if self.curvature_res > 0: lanePose.curvature = self.filter.getCurvature( d_max[1:], phi_max[1:]) self.pub_lane_pose.publish(lanePose) # TODO-TAL add a debug param to not publish the image !! # TODO-TAL also, the CvBridge is re instantiated every time... # publish the belief image bridge = CvBridge() belief_img = bridge.cv2_to_imgmsg( np.array(255 * self.filter.beliefArray[0]).astype("uint8"), "mono8") belief_img.header.stamp = segment_list_msg.header.stamp self.pub_belief_img.publish(belief_img) # Latency of Estimation including curvature estimation estimation_latency_stamp = rospy.Time.now() - timestamp_now estimation_latency = estimation_latency_stamp.secs + estimation_latency_stamp.nsecs / 1e9 self.latencyArray.append(estimation_latency) if (len(self.latencyArray) >= 20): self.latencyArray.pop(0) # print "Latency of segment list: ", segment_latency # print("Mean latency of Estimation:................. %s" % np.mean(self.latencyArray)) # also publishing a separate Bool for the FSM in_lane_msg = BoolStamped() in_lane_msg.header.stamp = segment_list_msg.header.stamp in_lane_msg.data = self.filter.inLane if (self.filter.inLane): self.pub_in_lane.publish(in_lane_msg)
def processSegments(self, segment_list_msg): # Get actual timestamp for latency measurement timestamp_now = rospy.Time.now() if not self.active: return # TODO-TAL double call to param server ... --> see TODO in the readme, not only is it never updated, but it is alwas 0 # Step 0: get values from server if (rospy.get_param('~curvature_res') is not self.curvature_res): self.curvature_res = rospy.get_param('~curvature_res') self.filter.updateRangeArray(self.curvature_res) # Step 1: predict current_time = rospy.get_time() dt = current_time - self.t_last_update v = self.velocity.v w = self.velocity.omega # ==== HERE THE UPPER BOUNDS ======== ub_d = dt * self.omega_max * self.gain * 1000 ub_phi = dt * self.v_ref * self.gain * 1000 self.filter.predict(dt=dt, v=v, w=w) self.t_last_update = current_time # Step 2: update self.filter.update(segment_list_msg.segments, self.lane_offset) # Step 3: build messages and publish things [d_max, phi_max] = self.filter.getEstimate() inlier_segments = self.filter.get_inlier_segments( segment_list_msg.segments, d_max, phi_max) inlier_segments_msg = SegmentList() inlier_segments_msg.header = segment_list_msg.header inlier_segments_msg.segments = inlier_segments self.pub_seglist_filtered.publish(inlier_segments_msg) max_val = self.filter.getMax() in_lane = max_val > self.filter.min_max # build lane pose message to send lanePose = LanePose() lanePose.header.stamp = segment_list_msg.header.stamp lanePose.d = d_max[0] if not self.last_d: self.last_d = lanePose.d if lanePose.d - self.last_d > ub_d: lanePose.d = self.last_d + ub_d rospy.loginfo("d changed too much") if lanePose.d - self.last_d < -ub_d: lanePose.d = self.last_d - ub_d rospy.loginfo("d changed too much") lanePose.phi = phi_max[0] if not self.last_phi: self.last_phi = lanePose.phi if lanePose.phi - self.last_phi > ub_phi: lanePose.phi = self.last_phi + ub_phi rospy.loginfo("phi changed too much") if lanePose.phi - self.last_phi < -ub_phi: lanePose.phi = self.last_phi - ub_phi rospy.loginfo("phi changed too much") lanePose.in_lane = in_lane # XXX: is it always NORMAL? lanePose.status = lanePose.NORMAL if self.curvature_res > 0: lanePose.curvature = self.filter.getCurvature( d_max[1:], phi_max[1:]) self.pub_lane_pose.publish(lanePose) # Save for next iteration <========= self.last_d = lanePose.d self.last_phi = lanePose.phi # publish the belief image bridge = CvBridge() belief_img = bridge.cv2_to_imgmsg( np.array(255 * self.filter.beliefArray[0]).astype("uint8"), "mono8") belief_img.header.stamp = segment_list_msg.header.stamp self.pub_belief_img.publish(belief_img) # Latency of Estimation including curvature estimation estimation_latency_stamp = rospy.Time.now() - timestamp_now estimation_latency = estimation_latency_stamp.secs + estimation_latency_stamp.nsecs / 1e9 self.latencyArray.append(estimation_latency) if (len(self.latencyArray) >= 20): self.latencyArray.pop(0) # also publishing a separate Bool for the FSM in_lane_msg = BoolStamped() in_lane_msg.header.stamp = segment_list_msg.header.stamp in_lane_msg.data = True #TODO-TAL change with in_lane. Is this messqge useful since it is alwas true ? self.pub_in_lane.publish(in_lane_msg)
def lines_cb(self, image): img = self.bridge.compressed_imgmsg_to_cv2(image, "bgr8") imgCropped = img[240:480, 0:640] hsvimg = cv.cvtColor(imgCropped, cv.COLOR_BGR2HSV) lower = np.array([23, 157, 118]) upper = np.array([179, 255, 255]) white_lower = np.array([42, 8, 88]) white_upper = np.array([122, 99, 255]) arr_cutoff = np.array([0, 40, 0, 40]) arr_ratio = np.array([1/160, 1/120, 1/160, 1/120]) a = [] b = [] c = SegmentList() maskWhite = cv.inRange(hsvimg, white_lower, white_upper) maskYellow = cv.inRange(hsvimg, lower, upper) imgWhite = cv.bitwise_and(imgCropped, imgCropped, mask=maskWhite) imgYellow = cv.bitwise_and(imgCropped, imgCropped, mask=maskYellow) imgCanny_white = cv.Canny(imgWhite, 150, 200) imgCanny_yellow = cv.Canny(imgYellow, 150, 200) imgResized_white = cv.resize(imgCanny_white, (160, 120), interpolation=cv.INTER_NEAREST) imgResized_yellow = cv.resize(imgCanny_yellow, (160, 120), interpolation=cv.INTER_NEAREST) imgCut_white = imgResized_white[40:120, 0:160] imgCut_yellow = imgResized_yellow[40:120, 0:160] lines_white = cv.HoughLinesP(imgCut_white, 1, (3.14159 / 180), 1) lines_yellow = cv.HoughLinesP(imgCut_yellow, 1, (3.14159 / 180), 1) #rospy.logwarn(maskYellow) for i in range(len(lines_white)): lines_normalized_white = (lines_white[i] + arr_cutoff) * arr_ratio a.append(lines_normalized_white) for i in range(len(lines_yellow)): lines_normalized_yellow = (lines_yellow[i] + arr_cutoff) * arr_ratio b.append(lines_normalized_yellow) segments = SegmentList() white_seg = Segment() yellow_seg = Segment() for i in range(len(a)): lines_a = a[i][0] white_seg.color = 0 white_seg.pixels_normalized[0].x = lines_a[0] white_seg.pixels_normalized[0].y = lines_a[1] white_seg.pixels_normalized[1].x = lines_a[2] white_seg.pixels_normalized[1].y = lines_a[3] segments.segments.append(white_seg) for i in range(len(b)): lines_b = b[i][0] yellow_seg.color = 1 yellow_seg.pixels_normalized[0].x = lines_b[0] yellow_seg.pixels_normalized[0].y = lines_b[1] yellow_seg.pixels_normalized[1].x = lines_b[2] yellow_seg.pixels_normalized[1].y = lines_b[3] #yellow_seg.append([yellow_seg.pixels_normalized[0].x,yellow_seg.pixels_normalized[0].y,yellow_seg.pixels_normalized[1].x,yellow_seg.pixels_normalized[1].y]) segments.segments.append(yellow_seg) #rospy.logwarn(lines_yellow) self.pub.publish(segments)
def callback_lab5(self, msg): # convert to a ROS image using the bridge cv_img = self.bridge.compressed_imgmsg_to_cv2(msg, "bgr8") # crop the image using the method provided in lab5 pdf image_size = (160, 120) offset = 40 new_image = cv2.resize(cv_img, image_size, interpolation=cv2.INTER_NEAREST) cv_cropped = new_image[offset:, :] # convert colorspace from BGR to HSV hsv_img = cv2.cvtColor(cv_cropped, cv2.COLOR_BGR2HSV) # filter white lane white_filter = cv2.inRange(hsv_img, (0, 0, 225), (180, 40, 255)) # filter yellow lane yellow_filter = cv2.inRange(hsv_img, (25, 80, 120), (75, 255, 255)) # convert the cropped image to gray grey_img = cv2.cvtColor(cv_cropped, cv2.COLOR_BGR2GRAY) # perform canny edge detection edges = cv2.Canny(grey_img, 0, 255) # need to dilate incoming white and yellow lane images before doing bitwise and # implement the dilate function privided in lecture slide kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7)) # also create a unique kernel to dilate filtered edges kernel_for_edges = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 1)) # dilate white and yellow filtered images dilated_white = cv2.dilate(white_filter, kernel) dilated_yellow = cv2.dilate(yellow_filter, kernel) # dilate the edges to account for lines that are too thin dilated_edges = cv2.dilate(edges, kernel_for_edges) # perform bitwise and for the white and yellow lines and_white = cv2.bitwise_and(dilated_edges, dilated_white, mask=None) and_yellow = cv2.bitwise_and(dilated_edges, dilated_yellow, mask=None) # apply Probabilistic Hough Transform on both white and yellow images' lines # white lines lines_white = cv2.HoughLinesP(and_white, 1, np.pi / 180, 5, None, 10, 5) # yellow lines lines_yellow = cv2.HoughLinesP(and_yellow, 1, np.pi / 180, 5, None, 2, 2) # draw blue line on the two images using function provided in homework9 pdf image_lines_white = self.output_lines(cv_cropped, lines_white) # merge the white and yellow line images by sending image_lines_white as argument to output_lines image_lines_combined = self.output_lines(image_lines_white, lines_yellow) # prepare imgmsg for the combined white/yellow lane detection ros_combined_output = self.bridge.cv2_to_imgmsg( image_lines_combined, "bgr8") # publish the combined result of white/yellow lane detector image self.pub_combined_img.publish(ros_combined_output) # values for normalization as provided in lab5 PDF arr_cutoff = np.array([0, offset, 0, offset]) arr_ratio = np.array([ 1. / image_size[1], 1. / image_size[0], 1. / image_size[1], 1. / image_size[0] ]) # stores the list of Segments and will be published to /segment_list resultList = SegmentList() #check to make sure white line is detected if (lines_white is not None): # normalize the list of white lines line_normalized_white = (lines_white + arr_cutoff) * arr_ratio w_list = [] #iterate through each white line for i in range(len(line_normalized_white)): #create a single segment to store values w_seg = Segment() w_seg.color = Segment.WHITE #store values x0, y0, x1, y1 to this segment w_seg.pixels_normalized[0].x = line_normalized_white[i][0][0] w_seg.pixels_normalized[0].y = line_normalized_white[i][0][1] w_seg.pixels_normalized[1].x = line_normalized_white[i][0][2] w_seg.pixels_normalized[1].y = line_normalized_white[i][0][3] # add the segment to list w_list.append(w_seg) # add the resulting list to the resulting List resultList.segments.extend(w_list) #check to make sure yellow line is detected if (lines_yellow is not None): # normalize the list of yellow lines line_normalized_yellow = (lines_yellow + arr_cutoff) * arr_ratio y_list = [] #iterate through each yellow line for i in range(len(line_normalized_yellow)): #create a single segment to store values y_seg = Segment() y_seg.color = Segment.YELLOW #store values x0, y0, x1, y1 to this segment y_seg.pixels_normalized[0].x = line_normalized_yellow[i][0][0] y_seg.pixels_normalized[0].y = line_normalized_yellow[i][0][1] y_seg.pixels_normalized[1].x = line_normalized_yellow[i][0][2] y_seg.pixels_normalized[1].y = line_normalized_yellow[i][0][3] # add this segment to the list of yellow segments y_list.append(y_seg) # finally, add the list to the resulting List resultList.segments.extend(y_list) # publish the resulting list of segments to a topic received by ground_detection_node self.pub_seglist.publish(resultList)
def processImage_(self, image_msg): self.stats.processed() if self.intermittent_log_now(): self.intermittent_log(self.stats.info()) self.stats.reset() tk = TimeKeeper(image_msg) self.intermittent_counter += 1 # Decode from compressed image with OpenCV try: image_cv = image_cv_from_jpg(image_msg.data) except ValueError as e: self.loginfo('Could not decode image: %s' % e) return tk.completed('decoded') # Resize and crop image hei_original, wid_original = image_cv.shape[0:2] if self.image_size[0] != hei_original or self.image_size[ 1] != wid_original: # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2) image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST) image_cv = image_cv[self.top_cutoff:, :, :] tk.completed('resized') # apply color correction: AntiInstagram image_cv_corr = self.ai.applyTransform(image_cv) image_cv_corr = cv2.convertScaleAbs(image_cv_corr) tk.completed('corrected') # Set the image to be detected self.detector.setImage(image_cv_corr) # Detect lines and normals gray = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 100, 200, apertureSize=3) lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 50, np.empty(1), 3, 1) hsv_yellow1 = (25, 50, 50) hsv_yellow2 = (45, 255, 255) hsv_blue1 = (100, 90, 80) hsv_blue2 = (150, 255, 155) #change color space to HSV hsv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2HSV) #find the color kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2) yellow = cv2.dilate(yellow, kernel) self.bw_1 = yellow blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2) blue = cv2.dilate(blue, kernel) self.bw_2 = blue edge_color_yellow = cv2.bitwise_and(yellow, edges) edge_color_blue = cv2.bitwise_and(blue, edges) lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 10, np.empty(1), 3, 1) lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi / 180, 10, np.empty(1), 3, 1) lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi / 180, 10, np.empty(1), 3, 1) if lines_yellow is not None: lines_yellow = np.array(lines_yellow[0]) print "found yellow lines" else: lines_yellow = [] print "no yellow lines" if lines_blue is not None: lines_blue = np.array(lines_blue[0]) print "found blue lines" else: lines_blue = [] print "no blue lines" arr_cutoff = np.array((0, 40, 0, 40)) arr_ratio = np.array((1. / 120, 1. / 160, 1. / 120, 1. / 160)) lines_1 = lines_yellow lines_2 = lines_blue normals = [] centers = [] if len(lines_2) > 0: #find the normalized coordinates lines_normalized = ((lines_1 + arr_cutoff) * arr_ratio) #find the unit vector length = np.sum((lines_1[:, 0:2] - lines_1[:, 2:4])**2, axis=1, keepdims=True)**0.5 dx = 1. * (lines_1[:, 3:4] - lines_1[:, 1:2]) / length dy = 1. * (lines_1[:, 0:1] - lines_1[:, 2:3]) / length #find the center point centers = np.hstack([(lines_1[:, 0:1] + lines_1[:, 2:3]) / 2, (lines_1[:, 1:2] + lines_1[:, 3:4]) / 2]) #find the vectors' direction x3 = (centers[:, 0:1] - 4. * dx).astype('int') x3[x3 < 0] = 0 x3[x3 >= 160] = 160 - 1 y3 = (centers[:, 1:2] - 4. * dy).astype('int') y3[y3 < 0] = 0 y3[y3 >= 120] = 120 - 1 x4 = (centers[:, 0:1] + 4. * dx).astype('int') x4[x4 < 0] = 0 x4[x4 >= 160] = 160 - 1 y4 = (centers[:, 1:2] + 4. * dy).astype('int') y4[y4 < 0] = 0 y4[y4 >= 120] = 120 - 1 flag_signs = (np.logical_and( self.bw_2[y3, x3] > 0, self.bw_2[y4, x4] > 0)).astype('int') * 2 - 1 normals = np.hstack([dx, dy]) * flag_signs flag = ((lines_1[:, 2] - lines_1[:, 0]) * normals[:, 1] - (lines_1[:, 3] - lines_1[:, 1]) * normals[:, 0]) > 0 for i in range(len(lines_1)): if flag[i]: x1, y1, x2, y2 = lines_1[i, :] lines_1[i, :] = [x2, y2, x1, y1] tk.completed('detected') # SegmentList constructor segmentList = SegmentList() segmentList.header.stamp = image_msg.header.stamp # Convert to normalized pixel coordinates, and add segments to segmentList arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff)) arr_ratio = np.array( (1. / self.image_size[1], 1. / self.image_size[0], 1. / self.image_size[1], 1. / self.image_size[0])) if len(lines_2) > 0: lines_normalized_blue = ((lines_2 + arr_cutoff) * arr_ratio) segmentList.segments.extend( self.toSegmentMsg(lines_normalized, normals, centers, 0)) self.intermittent_log('# segments: white %3d ' % (len(lines_2))) tk.completed('prepared') # Publish segmentList self.pub_lines.publish(segmentList) tk.completed('--pub_lines--') # VISUALIZATION only below if self.verbose: # Draw lines and normals image_with_lines = np.copy(image_cv_corr) if len(lines_1) > 0: for x1, y1, x2, y2 in lines_1: cx = int(x1 + x2) / 2 cy = int(y1 + y2) / 2 if cx > 160: cx = 160 - 1 elif cx < 0: cx = 0 if cy > 120: cy = 120 - 1 elif cy < 0: cy = 0 if (self.bw_2[cy, cx - 1] == 255 and self.bw_1[cy, cx + 1] == 255): cv2.line(image_with_lines, (x1, y1), (x2, y2), (255, 255, 255)) cv2.circle(image_with_lines, (x1, y1), 1, (0, 255, 0)) #green circle cv2.circle(image_with_lines, (x2, y2), 1, (255, 0, 0)) #red circle if (self.bw_2[cy, cx + 1] == 255 and self.bw_1[cy, cx - 1] == 255): cv2.line(image_with_lines, (x1, y1), (x2, y2), (255, 255, 255)) cv2.circle(image_with_lines, (x1, y1), 1, (0, 255, 0)) #green circle cv2.circle(image_with_lines, (x2, y2), 1, (255, 0, 0)) #red circle #drawLines(image_with_lines, (lines_2), (255, 255, 255)) #drawLines(image_with_lines, yellow.lines, (255, 0, 0)) #drawLines(image_with_lines, red.lines, (0, 255, 0)) tk.completed('drawn') # Publish the frame with lines image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8") image_msg_out.header.stamp = image_msg.header.stamp self.pub_image.publish(image_msg_out) tk.completed('pub_image') # if self.verbose: #colorSegment = color_segment(self.bw_1, self.bw_2) #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8") #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8") #self.pub_edge.publish(edge_msg_out) #self.pub_colorSegment.publish(colorSegment_msg_out) tk.completed('pub_edge/pub_segment') self.intermittent_log(tk.getall())
def processSegments(self, segment_list_msg): # Get actual timestamp for latency measurement timestamp_now = rospy.Time.now() if not self.active: return # TODO-TAL double call to param server ... --> see TODO in the readme, not only is it never updated, but it is alwas 0 # Step 0: get values from server if (rospy.get_param('~curvature_res') is not self.curvature_res): self.curvature_res = rospy.get_param('~curvature_res') self.filter.updateRangeArray(self.curvature_res) ## ####################################################### # ######################################################## # Parameters # ######################################################## lane_width = 0.23 # [m] valid_rad = 0.5 # [m] lookahead = 0.1 # [m] n_test_points = 50 max_buffer_size = 25 # ######################################################## # Process inputs # ######################################################## # Generate an xy list from the data white_xy = [] yellow_xy = [] for segment in segment_list_msg.segments: # White points if segment.color == 0: white_xy.append([segment.points[0].x, segment.points[0].y]) # Yellow points elif segment.color == 1: yellow_xy.append([segment.points[0].x, segment.points[0].y]) else: print("Unrecognized segment color") # Turn into numpy arrays white_xy = np.array(white_xy) yellow_xy = np.array(yellow_xy) # ######################################################## # Process buffer # ######################################################## # Integrate the odometry current_time = rospy.get_time() dt = current_time - self.t_last_update self.t_last_update = current_time delta_x = self.velocity.v * dt delta_theta = self.velocity.omega * dt # Form the displacement delta_r = delta_x * np.array( [-np.sin(delta_theta), np.cos(delta_theta)]) # If buffers contains data, propogate it BACKWARD if len(self.buffer_white) > 0: self.buffer_white = self.buffer_white - delta_r if len(self.buffer_yellow) > 0: self.buffer_yellow = self.buffer_yellow - delta_r # If new observations were received, then append them to the top of the list if len(white_xy) > 0: if len(self.buffer_white) > 0: self.buffer_white = np.vstack((white_xy, self.buffer_white)) else: self.buffer_white = white_xy if len(yellow_xy) > 0: if len(self.buffer_yellow) > 0: self.buffer_yellow = np.vstack((yellow_xy, self.buffer_yellow)) else: self.buffer_yellow = yellow_xy # ######################################################## # Trim training points to within a valid radius # ######################################################## valid_white_xy = [] valid_yellow_xy = [] # Select points within rad if len(self.buffer_white) > 0: tf_valid_white = self.computeRad(self.buffer_white) <= valid_rad valid_white_xy = self.buffer_white[tf_valid_white, :] if len(self.buffer_yellow) > 0: tf_valid_yellow = self.computeRad(self.buffer_yellow) <= valid_rad valid_yellow_xy = self.buffer_yellow[tf_valid_yellow, :] # ######################################################## # Fit GPs # ######################################################## # Set up a linspace for prediction if len(valid_white_xy) > 0: x_pred_white = np.linspace(0, np.minimum(valid_rad, np.amax(valid_white_xy[:, 0])), \ num=n_test_points+1).reshape(-1, 1) else: x_pred_white = np.linspace(0, valid_rad, num=n_test_points + 1).reshape(-1, 1) if len(valid_yellow_xy) > 0: x_pred_yellow = np.linspace(0, np.minimum(valid_rad, np.amax(valid_yellow_xy[:, 0])), \ num=n_test_points+1).reshape(-1, 1) else: x_pred_yellow = np.linspace(0, valid_rad, num=n_test_points + 1).reshape(-1, 1) # Start with the prior y_pred_white = -lane_width / 2.0 * np.ones((len(x_pred_white), 1)) y_pred_yellow = lane_width / 2.0 * np.ones((len(x_pred_yellow), 1)) t_start = time.time() # White GP. Note that we're fitting y = f(x) gpWhite = [] if len(valid_white_xy) > 2: x_train_white = valid_white_xy[:, 0].reshape(-1, 1) y_train_white = valid_white_xy[:, 1] whiteKernel = C(0.01, (-lane_width, 0)) * RBF(5, (0.3, 0.4)) # whiteKernel = C(1.0, (1e-3, 1e3)) * RBF(5, (1e-2, 1e2)) gpWhite = GaussianProcessRegressor(kernel=whiteKernel, optimizer=None) # x = f(y) gpWhite.fit(x_train_white, y_train_white) # Predict y_pred_white = gpWhite.predict(x_pred_white) # Yellow GP gpYellow = [] if len(valid_yellow_xy) > 2: x_train_yellow = valid_yellow_xy[:, 0].reshape(-1, 1) y_train_yellow = valid_yellow_xy[:, 1] # yellowKernel = C(lane_width / 2.0, (0, lane_width)) * RBF(5, (0.3, # 0.4)) yellowKernel = C(0.01, (0, lane_width)) * RBF(5, (0.3, 0.4)) gpYellow = GaussianProcessRegressor(kernel=yellowKernel, optimizer=None) gpYellow.fit(x_train_yellow, y_train_yellow) # Predict y_pred_yellow = gpYellow.predict(x_pred_yellow) t_end = time.time() # print("MODEL BUILDING TIME: ", t_end - t_start) # Make xy point arrays xy_gp_white = np.hstack((x_pred_white, y_pred_white.reshape(-1, 1))) xy_gp_yellow = np.hstack((x_pred_yellow, y_pred_yellow.reshape(-1, 1))) # Trim predicted values to valid radius # Logical conditions tf_valid_white = self.computeRad(xy_gp_white) <= valid_rad tf_valid_yellow = self.computeRad(xy_gp_yellow) <= valid_rad # Select points within rad valid_xy_gp_white = xy_gp_white[tf_valid_white, :] valid_xy_gp_yellow = xy_gp_yellow[tf_valid_yellow, :] # ######################################################## # Display # ######################################################## self.visualizeCurves(valid_xy_gp_white, valid_xy_gp_yellow) # ######################################################## # Compute d and \phi # ######################################################## # Evaluate each GP at the lookahead distance and average to get d # Start with prior y_white = -lane_width / 2 y_yellow = lane_width / 2 if gpWhite: y_white = gpWhite.predict(np.array([lookahead]).reshape(-1, 1)) if gpYellow: y_yellow = gpYellow.predict(np.array([lookahead]).reshape(-1, 1)) # Take the average, and negate disp = np.mean((y_white, y_yellow)) d = -disp # Compute phi from the lookahead distance and d L = np.sqrt(d**2 + lookahead**2) phi = disp / L print("D is: ", d) print("phi is: ", phi) # ######################################################## # Publish the lanePose message # ######################################################## lanePose = LanePose() lanePose.header.stamp = segment_list_msg.header.stamp lanePose.d = d lanePose.phi = phi lanePose.in_lane = True lanePose.status = lanePose.NORMAL lanePose.curvature = 0 self.pub_lane_pose.publish(lanePose) # ######################################################## # Save valid training points to buffer # ######################################################## # Cap the buffer at max_buffer_size white_max = np.minimum(len(self.buffer_white), max_buffer_size) yellow_max = np.minimum(len(self.buffer_yellow), max_buffer_size) self.buffer_white = self.buffer_white[0:white_max - 1] self.buffer_yellow = self.buffer_yellow[0:yellow_max - 1] # print ("SIZE of white buffer: ", len(self.buffer_white)) # print ("YELLOW of yellow buffer: ", len(self.buffer_yellow)) ## ####################################################### # Step 1: predict current_time = rospy.get_time() dt = current_time - self.t_last_update v = self.velocity.v w = self.velocity.omega self.filter.predict(dt=dt, v=v, w=w) # self.t_last_update = current_time # Step 2: update self.filter.update(segment_list_msg.segments) # Step 3: build messages and publish things [d_max, phi_max] = self.filter.getEstimate() # print "d_max = ", d_max # print "phi_max = ", phi_max inlier_segments = self.filter.get_inlier_segments( segment_list_msg.segments, d_max, phi_max) inlier_segments_msg = SegmentList() inlier_segments_msg.header = segment_list_msg.header inlier_segments_msg.segments = inlier_segments self.pub_seglist_filtered.publish(inlier_segments_msg) max_val = self.filter.getMax() in_lane = max_val > self.filter.min_max # build lane pose message to send # lanePose = LanePose() # lanePose.header.stamp = segment_list_msg.header.stamp # lanePose.d = d_max[0] # lanePose.phi = phi_max[0] # lanePose.in_lane = in_lane # # XXX: is it always NORMAL? # lanePose.status = lanePose.NORMAL # if self.curvature_res > 0: # lanePose.curvature = self.filter.getCurvature(d_max[1:], phi_max[1:]) # self.pub_lane_pose.publish(lanePose) # TODO-TAL add a debug param to not publish the image !! # TODO-TAL also, the CvBridge is re instantiated every time... # publish the belief image bridge = CvBridge() belief_img = bridge.cv2_to_imgmsg( np.array(255 * self.filter.beliefArray[0]).astype("uint8"), "mono8") belief_img.header.stamp = segment_list_msg.header.stamp self.pub_belief_img.publish(belief_img) # Latency of Estimation including curvature estimation estimation_latency_stamp = rospy.Time.now() - timestamp_now estimation_latency = estimation_latency_stamp.secs + estimation_latency_stamp.nsecs / 1e9 self.latencyArray.append(estimation_latency) if (len(self.latencyArray) >= 20): self.latencyArray.pop(0) # print "Latency of segment list: ", segment_latency # print("Mean latency of Estimation:................. %s" % np.mean(self.latencyArray)) # also publishing a separate Bool for the FSM in_lane_msg = BoolStamped() in_lane_msg.header.stamp = segment_list_msg.header.stamp in_lane_msg.data = True #TODO-TAL change with in_lane. Is this messqge useful since it is alwas true ? self.pub_in_lane.publish(in_lane_msg)
def cbImage(self, image_msg): # self.sub_rect.unregister() image_cv = self.bridge.imgmsg_to_cv2(image_msg, desired_encoding="bgr8") hei_original, wid_original = image_cv.shape[0:2] if self.image_size[0] != hei_original or self.image_size[ 1] != wid_original: # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2) image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST) image_cv = image_cv[self.top_cutoff:, :, :] # Set the image to be detected self.detector_used.setImage(image_cv) # Detect lines and normals white = self.detector_used.detectLines('white') yellow = self.detector_used.detectLines('yellow') red = self.detector_used.detectLines('red') # SegmentList constructor segmentList = SegmentList() segmentList.header.stamp = image_msg.header.stamp # Convert to normalized pixel coordinates, and add segments to segmentList arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff)) arr_ratio = np.array( (1. / self.image_size[1], 1. / self.image_size[0], 1. / self.image_size[1], 1. / self.image_size[0])) if len(white.lines) > 0: lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE)) if len(yellow.lines) > 0: lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW)) if len(red.lines) > 0: lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED)) rospy.loginfo('# segments: white %3d yellow %3d red %3d' % (len(white.lines), len(yellow.lines), len(red.lines))) # Publish segmentList self.pub_lines.publish(segmentList) if self.verbose: # print('line_detect_node: verbose is on!') # Draw lines and normals image_with_lines = np.copy(image_cv) drawLines(image_with_lines, white.lines, (0, 0, 0)) drawLines(image_with_lines, yellow.lines, (255, 0, 0)) drawLines(image_with_lines, red.lines, (0, 255, 0)) # Publish the frame with lines image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8") image_msg_out.header.stamp = image_msg.header.stamp self.pub_image.publish(image_msg_out) colorSegment = color_segment(white.area, red.area, yellow.area) edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector_used.edges, "mono8") colorSegment_msg_out = self.bridge.cv2_to_imgmsg( colorSegment, "bgr8") self.pub_edge.publish(edge_msg_out) self.pub_colorSegment.publish(colorSegment_msg_out)
def only_one_color(segment_list, color): segments = [s for s in segment_list.segments if s.color == color] return SegmentList(segments=segments)
def on_received_image(self, context, image_msg): if not self.active: return self.intermittent_counter += 1 with context.phase('decoding'): # Decode from compressed image with OpenCV try: image_cv = image_cv_from_jpg(image_msg.data) except ValueError as e: self.loginfo('Could not decode image: %s' % e) return with context.phase('resizing'): # Resize and crop image hei_original, wid_original = image_cv.shape[0:2] if self.config.img_size[0] != hei_original or self.config.img_size[ 1] != wid_original: # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2) image_cv = cv2.resize( image_cv, (self.config.img_size[1], self.config.img_size[0]), interpolation=cv2.INTER_NEAREST) image_cv = image_cv[self.config.top_cutoff:, :, :] with context.phase('correcting'): # apply color correction: AntiInstagram image_cv_corr = self.ai.applyTransform(image_cv) image_cv_corr = cv2.convertScaleAbs(image_cv_corr) with context.phase('detection'): # Set the image to be detected self.detector.setImage(image_cv_corr) # Detect lines and normals white = self.detector.detectLines('white') yellow = self.detector.detectLines('yellow') red = self.detector.detectLines('red') with context.phase('preparing-images'): # SegmentList constructor segmentList = SegmentList() segmentList.header.stamp = image_msg.header.stamp # Convert to normalized pixel coordinates, and add segments to segmentList top_cutoff = self.config.top_cutoff s0, s1 = self.config.img_size[0], self.config.img_size[1] arr_cutoff = np.array((0, top_cutoff, 0, top_cutoff)) arr_ratio = np.array((1. / s1, 1. / s0, 1. / s1, 1. / s0)) if len(white.lines) > 0: lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE)) if len(yellow.lines) > 0: lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW)) if len(red.lines) > 0: lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio) segmentList.segments.extend( toSegmentMsg(lines_normalized_red, red.normals, Segment.RED)) self.intermittent_log( '# segments: white %3d yellow %3d red %3d' % (len(white.lines), len(yellow.lines), len(red.lines))) # Publish segmentList with context.phase('publishing'): self.publishers.segment_list.publish(segmentList) # VISUALIZATION only below if self.config.verbose: with context.phase('draw-lines'): # Draw lines and normals image_with_lines = np.copy(image_cv_corr) drawLines(image_with_lines, white.lines, (0, 0, 0)) drawLines(image_with_lines, yellow.lines, (255, 0, 0)) drawLines(image_with_lines, red.lines, (0, 255, 0)) with context.phase('published-images'): # Publish the frame with lines out = d8n_image_msg_from_cv_image(image_with_lines, "bgr8", same_timestamp_as=image_msg) self.publishers.image_with_lines.publish(out) with context.phase('pub_edge/pub_segment'): out = d8n_image_msg_from_cv_image(self.detector.edges, "mono8", same_timestamp_as=image_msg) self.publishers.edge.publish(out) colorSegment = color_segment(white.area, red.area, yellow.area) out = d8n_image_msg_from_cv_image(colorSegment, "bgr8", same_timestamp_as=image_msg) self.publishers.color_segment.publish(out) if self.intermittent_log_now(): self.info('stats from easy_node\n' + indent(context.get_stats(), '> '))