Exemple #1
0
    def __init__(self):
        #node_name = "Lane Filter Tester"
        pub_fake_segment_list = rospy.Publisher("~segment_list",
                                                SegmentList,
                                                queue_size=1)
        rospy.sleep(1)

        seg = Segment()
        seg.points[0].x = rospy.get_param("~x1")
        seg.points[0].y = rospy.get_param("~y1")
        seg.points[1].x = rospy.get_param("~x2")
        seg.points[1].y = rospy.get_param("~y2")
        color = rospy.get_param("~color")

        if color == "white":
            seg.color = seg.WHITE
        elif color == "yellow":
            seg.color = seg.YELLOW
        elif color == "red":
            seg.color = seg.RED
        else:
            print("error no color specified")
        seg_list = SegmentList()
        seg_list.segments.append(seg)
        pub_fake_segment_list.publish(seg_list)
    def toSegmentMsg(self,  lines, normals, color):

    	arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
        
        segmentMsgList = []
        for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):

            ox= int((x1+x2)/2)
            oy= int((y1+y2)/2)
        
            cx = (ox+3*norm_x).astype('int')
            cy = (oy+3*norm_y).astype('int') 
        
            ccx = (ox-3*norm_x).astype('int')
            ccy = (oy-3*norm_y).astype('int') 
        
            if cx >158:
                cx = 158
            elif cx <1:
                cx = 1
            if ccx >158:
                ccx = 158
            elif ccx <1:
                ccx = 1

            if cy >=79:
                cy = 79
            elif cy <1:
                cy = 1
            if ccy >=79:
                ccy = 79
            elif ccy <1:
                ccy = 1

        
            if (self.blue[cy, cx] == 255 and self.yellow[ccy,ccx] ==255) or (self.yellow[cy, cx] == 255 and self.blue[ccy,ccx] ==255):

                [x1,y1,x2,y2] = (([x1,y1,x2,y2] + arr_cutoff) * arr_ratio)

                segment = Segment()
                segment.color = color
                segment.pixels_normalized[0].x = x1
                segment.pixels_normalized[0].y = y1
                segment.pixels_normalized[1].x = x2
                segment.pixels_normalized[1].y = y2
                segment.normal.x = norm_x
                segment.normal.y = norm_y
                segmentMsgList.append(segment)

                if self.time_switch == True:
                    msgg = BoolStamped()
                    msgg.data = True
                    self.pub_lane.publish(msgg)
                    self.time_switch = False
                    self.count = 0

            

        return segmentMsgList
Exemple #3
0
    def _to_segment_msg(lines, normals, color):
        """
        Converts line detections to a list of Segment messages.

        Converts the resultant line segments and normals from the line detection to a list of Segment messages.

        Args:
            lines (:obj:`numpy array`): An ``Nx4`` array where each row represents a line.
            normals (:obj:`numpy array`): An ``Nx2`` array where each row represents the normal of a line.
            color (:obj:`str`): Color name string, should be one of the pre-defined in the Segment message definition.

        Returns:
            :obj:`list` of :obj:`duckietown_msgs.msg.Segment`: List of Segment messages

        """
        segment_msg_list = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y
            segment_msg_list.append(segment)
        return segment_msg_list
    def lineseglist_cb(self, seglist_msg):
        """
        Projects a list of line segments on the ground reference frame point by point by
        calling :py:meth:`pixel_msg_to_ground_msg`. Then publishes the projected list of segments.

        Args:
            seglist_msg (:obj:`duckietown_msgs.msg.SegmentList`): Line segments in pixel space from
            unrectified images

        """
        if self.camera_info_received:
            seglist_out = SegmentList()
            seglist_out.header = seglist_msg.header
            for received_segment in seglist_msg.segments:
                new_segment = Segment()
                new_segment.points[0] = self.pixel_msg_to_ground_msg(
                    received_segment.pixels_normalized[0])
                new_segment.points[1] = self.pixel_msg_to_ground_msg(
                    received_segment.pixels_normalized[1])
                new_segment.color = received_segment.color
                # TODO what about normal and points
                seglist_out.segments.append(new_segment)
            self.pub_lineseglist.publish(seglist_out)

            if not self.first_processing_done:
                self.log("First projected segments published.")
                self.first_processing_done = True

            if self.pub_debug_img.get_num_connections() > 0:
                debug_image_msg = self.bridge.cv2_to_compressed_imgmsg(
                    self.debug_image(seglist_out))
                debug_image_msg.header = seglist_out.header
                self.pub_debug_img.publish(debug_image_msg)
        else:
            self.log("Waiting for a CameraInfo message", "warn")
Exemple #5
0
 def lineseglist_cb(self,seglist_msg):
     seglist_out = SegmentList()
     for received_segment in seglist_msg.segments:
         new_segment = Segment()
         new_segment.points[0] = self.gp.vector2ground(received_segment.pixels_normalized[0])
         new_segment.points[1] = self.gp.vector2ground(received_segment.pixels_normalized[1])
         new_segment.color = received_segment.color
         # TODO what about normal and points
         seglist_out.segments.append(new_segment)
     self.pub_lineseglist_.publish(seglist_out)
Exemple #6
0
def fuzzy_segment(s1, intensity):
    s2 = Segment()

    def noise():
        return np.random.randn() * intensity

    for i in range(2):
        s2.pixels_normalized[i].x = s1.pixels_normalized[i].x + noise()
        s2.pixels_normalized[i].y = s1.pixels_normalized[i].y + noise()
    s2.color = s1.color
    return s2
Exemple #7
0
def toSegmentMsg(lines, normals, color):
    segmentMsgList = []
    for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
        segment = Segment()
        segment.color = color
        segment.pixels_normalized[0].x = x1
        segment.pixels_normalized[0].y = y1
        segment.pixels_normalized[1].x = x2
        segment.pixels_normalized[1].y = y2
        segment.normal.x = norm_x
        segment.normal.y = norm_y
        segmentMsgList.append(segment)
    return segmentMsgList
 def toSegmentMsg(self,  lines, normals, color):
     
     segmentMsgList = []
     for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
         segment = Segment()
         segment.color = color
         segment.pixels_normalized[0].x = x1
         segment.pixels_normalized[0].y = y1
         segment.pixels_normalized[1].x = x2
         segment.pixels_normalized[1].y = y2
         segment.normal.x = norm_x
         segment.normal.y = norm_y
          
         segmentMsgList.append(segment)
     return segmentMsgList
Exemple #9
0
 def storeSegments(self,in_segs):
     rec_seg_list = SegmentList()
     rec_seg_list.header = in_segs.header
     for in_seg in in_segs.segments:
         new_seg = Segment()
         new_seg.points[0].x = in_seg.points[0].x
         new_seg.points[0].y = in_seg.points[0].y
         new_seg.points[0].z = 0.0
         new_seg.points[1].x = in_seg.points[1].x
         new_seg.points[1].y = in_seg.points[1].y
         new_seg.points[1].z = 0.0
         new_seg.color = in_seg.color
         rec_seg_list.segments.append(new_seg)
     self.seg_list.append(rec_seg_list)
     self.pubMap()
    def pointArrays2Marker(self, *pointArrays):
        marker = Marker()
        marker.header.frame_id = self.veh_name
        marker.ns = self.veh_name + "/line_seg"
        marker.id = 0
        marker.action = Marker.ADD
        marker.lifetime = rospy.Duration.from_sec(5.0)
        marker.type = Marker.LINE_LIST
        marker.pose.orientation.w = 1.0
        marker.scale.x = 0.02
        for pointArray in pointArrays:
            arrayShape = np.shape(pointArray)
            print(pointArray)
            for lv1 in range(arrayShape[1] - 1):
                seg = Segment()
                seg.points[0].x = pointArray[lv1][0]
                seg.points[0].y = pointArray[lv1][1]
                seg.points[1].x = pointArray[lv1 + 1][0]
                seg.points[1].y = pointArray[lv1 + 1][1]
                marker.points.append(seg.points[0])
                marker.points.append(seg.points[1])
                color = ColorRGBA(r=1.0, g=0.0, b=0.0, a=1.0)
                marker.colors.append(color)
                marker.colors.append(color)

        return marker
Exemple #11
0
    def visualizeCurves(self, *pointArrays):
        marker_array = MarkerArray()
        marker = Marker()
        marker.header.frame_id = self.veh_name
        marker.ns = self.veh_name + "/line_seg"
        marker.id = 0
        marker.action = Marker.ADD
        marker.lifetime = rospy.Duration.from_sec(5.0)
        marker.type = Marker.LINE_LIST
        marker.pose.orientation.w = 1.0
        marker.scale.x = 0.02
        for pointArray in pointArrays:
            for lv1 in range(len(pointArray) - 1):
                seg = Segment()
                seg.points[0].x = pointArray[lv1][0]
                seg.points[0].y = pointArray[lv1][1]
                seg.points[1].x = pointArray[lv1 + 1][0]
                seg.points[1].y = pointArray[lv1 + 1][1]
                marker.points.append(seg.points[0])
                marker.points.append(seg.points[1])
                color = ColorRGBA(r=1.0, g=0.0, b=0.0, a=1.0)
                marker.colors.append(color)
                marker.colors.append(color)
                marker_array.markers.append(marker)

        self.pub_lane.publish(marker_array)
Exemple #12
0
def find_ground_coordinates(
    gpg: GroundProjectionGeometry, sl: SegmentList, skip_not_on_ground: bool = True
) -> SegmentList:
    """
        Creates a new segment list with the ground coordinates set.

    """
    cutoff = 0.01
    sl2 = SegmentList()
    sl2.header = sl.header

    # Get ground truth of segmentList
    for s1 in sl.segments:

        g0 = gpg.vector2ground(s1.pixels_normalized[0])
        g1 = gpg.vector2ground(s1.pixels_normalized[1])
        if skip_not_on_ground:
            if g0.x < cutoff or g1.x < cutoff:
                continue

        points = [g0, g1]
        pixels_normalized = [s1.pixels_normalized[0], s1.pixels_normalized[1]]
        color = s1.color
        s2 = Segment(points=points, pixels_normalized=pixels_normalized, color=color)
        # TODO what about normal and points
        sl2.segments.append(s2)
    return sl2
Exemple #13
0
    def toSegmentMsg(self, lines, normals, color):

        segmentMsgList = []
        #segmentMsgList = [Segment() for i in range(lines.shape[0])]
        #k = 0
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            segmentMsgList.append(segment)
            #segmentMsgList[k] = segment
            #k += 1
        return segmentMsgList
Exemple #14
0
 def toSegmentMsg(self,  lines, normals, color, wid, hei):
     
     segmentMsgList = []
     for u1,v1,u2,v2,norm_u,norm_v in np.hstack((lines,normals)):
         segment = Segment()
         segment.color = color
         segment.pixels[0].u = int(u1)
         segment.pixels[0].v = int(v1)
         segment.pixels[1].u = int(u2)
         segment.pixels[1].v = int(v2)
         segment.pixels_normalized[0].x = u1/wid
         segment.pixels_normalized[0].y = v1/hei
         segment.pixels_normalized[1].x = u2/wid
         segment.pixels_normalized[1].y = v2/hei
         segment.normal.x = norm_u
         segment.normal.y = norm_v
          
         segmentMsgList.append(segment)
     return segmentMsgList
    def toSegmentMsg(self, lines, normals, centers, color):
        segmentMsgList = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            #if  self.bw_2[y4,x4]>0 :
            #   segment = Segment()
            #  segment.color = color
            # segment.pixels_normalized[0].x = x1
            #segment.pixels_normalized[0].y = y1
            #segment.pixels_normalized[1].x = x2
            #segment.pixels_normalized[1].y = y2
            #segment.normal.x = norm_x
            #segment.normal.y = norm_y
            segmentMsgList.append(segment)
        return segmentMsgList
Exemple #16
0
    def lineseglist_cb(self, seglist_msg):
        message_time = seglist_msg.header.stamp.sec + seglist_msg.header.stamp.nanosec*1e-9
        current_time = time.time()
        delay = current_time - message_time
        
        #fname = "/dev/shm/segment" + str(message_time) 
        #seglist_msg.segments = pickle.load(open(fname, "rb"))
        #os.unlink(fname)

        #print("message time: " + str(message_time))
        #print("current time: " + str(current_time))
        #print("delay: " + str(delay), "# segments: ", len(seglist_msg.segments))

        seglist_out = SegmentList()
        seglist_out.header = seglist_msg.header
        for received_segment in seglist_msg.segments:
            new_segment = Segment()
            new_segment.points[0] = self.gp.vector2ground(received_segment.pixels_normalized[0])
            new_segment.points[1] = self.gp.vector2ground(received_segment.pixels_normalized[1])
            new_segment.color = received_segment.color
            # TODO what about normal and points
            seglist_out.segments.append(new_segment)
        self.pub_lineseglist_.publish(seglist_out)
def predict_segments(sm, gpg):
    """
        Predicts what segments the robot would see.

        Assumes map is in FRAME_AXLE.
    """
    x_frustum = +0.1
    fov = np.deg2rad(150)
    res = []
    for segment in sm.segments:
        p1 = segment.points[0]
        p2 = segment.points[1]

        # If we are both in FRAME_AXLE
        if ((sm.points[p1].id_frame != FRAME_AXLE)
                or (sm.points[p2].id_frame != FRAME_AXLE)):
            msg = "Cannot deal with points not in frame FRAME_AXLE"
            raise NotImplementedError(msg)

        w1 = sm.points[p1].coords
        w2 = sm.points[p2].coords

        coords_inside = clip_to_view([w1, w2], x_frustum, fov)

        if not coords_inside:
            continue
        w1 = coords_inside[0]
        w2 = coords_inside[1]
        point1 = Point(w1[0], w1[1], w1[2])
        point2 = Point(w2[0], w2[1], w2[2])

        pixel1 = gpg.ground2pixel(point1)
        pixel2 = gpg.ground2pixel(point2)

        normalized1 = gpg.pixel2vector(pixel1)
        normalized2 = gpg.pixel2vector(pixel2)

        pixels_normalized = [normalized1, normalized2]
        normal = Vector2D(0, 0)
        points = [point1, point2]

        #         color = segment_color_constant_from_string(segment.color)
        assert segment.color in [Segment.RED, Segment.YELLOW, Segment.WHITE]
        s = Segment(pixels_normalized=pixels_normalized,
                    normal=normal,
                    points=points,
                    color=segment.color)
        res.append(s)

    return SegmentList(segments=res)
Exemple #18
0
def rectify_segment(rectify: Rectify, gpg: GroundProjectionGeometry,
                    s1: Segment) -> Segment:
    pixels_normalized = []

    for i in (0, 1):
        # normalized coordinates
        nc = s1.pixels_normalized[i]
        # get pixel coordinates
        pixels = gpg.vector2pixel(nc)
        # rectify
        pr = rectify.rectify_point(pixels)
        # recompute normalized coordinates
        # t = Pixel(pr[0], pr[1])
        v = gpg.pixel2vector(ImageSpaceResdepPoint(pr))
        pixels_normalized.append(v)

    s2 = Segment(color=s1.color, pixels_normalized=pixels_normalized)
    return s2
Exemple #19
0
def rectify_segment(gpg, s1):
    pixels_normalized = []

    for i in (0, 1):
        # normalized coordinates
        nc = s1.pixels_normalized[i]
        # get pixel coordinates
        pixels = gpg.vector2pixel(nc)
        uv = (pixels.u, pixels.v)
        # rectify
        pr = gpg.rectify_point(uv)
        # recompute normalized coordinates
        t = Pixel(pr[0], pr[1])
        v = gpg.pixel2vector(t)
        pixels_normalized.append(v)

    s2 = Segment(color=s1.color, pixels_normalized=pixels_normalized)
    return s2
    def toSegmentMsg(self, lines, normals, color):

        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))

        segmentMsgList = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):

            ox = int((x1 + x2) / 2)
            oy = int((y1 + y2) / 2)

            cx = (ox + 3 * norm_x).astype('int')
            cy = (oy + 3 * norm_y).astype('int')

            ccx = (ox - 3 * norm_x).astype('int')
            ccy = (oy - 3 * norm_y).astype('int')

            if cx > 158:
                cx = 158
            elif cx < 1:
                cx = 1
            if ccx > 158:
                ccx = 158
            elif ccx < 1:
                ccx = 1

            if cy >= 79:
                cy = 79
            elif cy < 1:
                cy = 1
            if ccy >= 79:
                ccy = 79
            elif ccy < 1:
                ccy = 1

            if (self.blue[cy, cx] == 255 and self.yellow[ccy, ccx] == 255) or (
                    self.yellow[cy, cx] == 255 and self.blue[ccy, ccx] == 255):

                [x1, y1, x2,
                 y2] = (([x1, y1, x2, y2] + arr_cutoff) * arr_ratio)

                segment = Segment()
                segment.color = color
                segment.pixels_normalized[0].x = x1
                segment.pixels_normalized[0].y = y1
                segment.pixels_normalized[1].x = x2
                segment.pixels_normalized[1].y = y2
                segment.normal.x = norm_x
                segment.normal.y = norm_y
                segmentMsgList.append(segment)

                if self.time_switch == True:
                    msgg = BoolStamped()
                    msgg.data = True
                    self.pub_lane.publish(msgg)
                    self.time_switch = False
                    self.count = 0

            else:
                self.count += 1

                if self.count > 5:
                    print "****************count = ", self.count, " *******************"
                    if self.time_switch == False:
                        msgg = BoolStamped()
                        msgg.data = False
                        self.pub_lane.publish(msgg)
                        self.time_switch = True

                car_control_msg = Twist2DStamped()
                car_control_msg.v = 0.0
                car_control_msg.omega = 0.0
                self.pub_car_cmd.publish(car_control_msg)

        return segmentMsgList
Exemple #21
0
	def lines_cb(self, image):
		img = self.bridge.compressed_imgmsg_to_cv2(image, "bgr8")
		imgCropped = img[240:480, 0:640]
		hsvimg = cv.cvtColor(imgCropped, cv.COLOR_BGR2HSV)
		lower = np.array([23, 157, 118])
		upper = np.array([179, 255, 255])
		white_lower = np.array([42, 8, 88])
		white_upper = np.array([122, 99, 255])
		arr_cutoff = np.array([0, 40, 0, 40])
		arr_ratio = np.array([1/160, 1/120, 1/160, 1/120])
		a = []
		b = []
		c = SegmentList()
		
		maskWhite = cv.inRange(hsvimg, white_lower, white_upper)
		maskYellow = cv.inRange(hsvimg, lower, upper)
			
		imgWhite = cv.bitwise_and(imgCropped, imgCropped, mask=maskWhite)
		imgYellow = cv.bitwise_and(imgCropped, imgCropped, mask=maskYellow)
		imgCanny_white = cv.Canny(imgWhite, 150, 200)
		imgCanny_yellow = cv.Canny(imgYellow, 150, 200)        
		imgResized_white = cv.resize(imgCanny_white, (160, 120), interpolation=cv.INTER_NEAREST)
		imgResized_yellow = cv.resize(imgCanny_yellow, (160, 120), interpolation=cv.INTER_NEAREST)
		imgCut_white = imgResized_white[40:120, 0:160]
		imgCut_yellow = imgResized_yellow[40:120, 0:160]
		lines_white = cv.HoughLinesP(imgCut_white, 1, (3.14159 / 180), 1)
		lines_yellow = cv.HoughLinesP(imgCut_yellow, 1, (3.14159 / 180), 1)
		#rospy.logwarn(maskYellow)
		
		for i in range(len(lines_white)):
			lines_normalized_white = (lines_white[i] + arr_cutoff) * arr_ratio
			a.append(lines_normalized_white)
		for i in range(len(lines_yellow)):
			lines_normalized_yellow = (lines_yellow[i] + arr_cutoff) * arr_ratio
			b.append(lines_normalized_yellow)

		
		segments = SegmentList()
		white_seg = Segment()
		yellow_seg = Segment()
		for i in range(len(a)):
			lines_a = a[i][0]	
			white_seg.color = 0
			white_seg.pixels_normalized[0].x = lines_a[0]
			white_seg.pixels_normalized[0].y = lines_a[1]
			white_seg.pixels_normalized[1].x = lines_a[2]
			white_seg.pixels_normalized[1].y = lines_a[3]
			segments.segments.append(white_seg)


		for i in range(len(b)):
			lines_b = b[i][0]
			yellow_seg.color = 1
			yellow_seg.pixels_normalized[0].x = lines_b[0]
			yellow_seg.pixels_normalized[0].y = lines_b[1]
			yellow_seg.pixels_normalized[1].x = lines_b[2]
			yellow_seg.pixels_normalized[1].y = lines_b[3]
			#yellow_seg.append([yellow_seg.pixels_normalized[0].x,yellow_seg.pixels_normalized[0].y,yellow_seg.pixels_normalized[1].x,yellow_seg.pixels_normalized[1].y])
			segments.segments.append(yellow_seg)

		#rospy.logwarn(lines_yellow)
		self.pub.publish(segments)
Exemple #22
0
    def callback_lab5(self, msg):

        # convert to a ROS image using the bridge
        cv_img = self.bridge.compressed_imgmsg_to_cv2(msg, "bgr8")

        # crop the image using the method provided in lab5 pdf
        image_size = (160, 120)
        offset = 40
        new_image = cv2.resize(cv_img,
                               image_size,
                               interpolation=cv2.INTER_NEAREST)
        cv_cropped = new_image[offset:, :]

        # convert colorspace from BGR to HSV
        hsv_img = cv2.cvtColor(cv_cropped, cv2.COLOR_BGR2HSV)

        # filter white lane
        white_filter = cv2.inRange(hsv_img, (0, 0, 225), (180, 40, 255))

        # filter yellow lane
        yellow_filter = cv2.inRange(hsv_img, (25, 80, 120), (75, 255, 255))

        # convert the cropped image to gray
        grey_img = cv2.cvtColor(cv_cropped, cv2.COLOR_BGR2GRAY)

        # perform canny edge detection
        edges = cv2.Canny(grey_img, 0, 255)

        # need to dilate incoming white and yellow lane images before doing bitwise and
        # implement the dilate function privided in lecture slide
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))

        # also create a unique kernel to dilate filtered edges
        kernel_for_edges = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 1))

        # dilate white and yellow filtered images
        dilated_white = cv2.dilate(white_filter, kernel)
        dilated_yellow = cv2.dilate(yellow_filter, kernel)

        # dilate the edges to account for lines that are too thin
        dilated_edges = cv2.dilate(edges, kernel_for_edges)

        # perform bitwise and for the white and yellow lines
        and_white = cv2.bitwise_and(dilated_edges, dilated_white, mask=None)
        and_yellow = cv2.bitwise_and(dilated_edges, dilated_yellow, mask=None)

        # apply Probabilistic Hough Transform on both white and yellow images' lines
        # white lines
        lines_white = cv2.HoughLinesP(and_white, 1, np.pi / 180, 5, None, 10,
                                      5)

        # yellow lines
        lines_yellow = cv2.HoughLinesP(and_yellow, 1, np.pi / 180, 5, None, 2,
                                       2)

        # draw blue line on the two images using function provided in homework9 pdf
        image_lines_white = self.output_lines(cv_cropped, lines_white)
        # merge the white and yellow line images by sending image_lines_white as argument to output_lines
        image_lines_combined = self.output_lines(image_lines_white,
                                                 lines_yellow)

        # prepare imgmsg for the combined white/yellow lane detection
        ros_combined_output = self.bridge.cv2_to_imgmsg(
            image_lines_combined, "bgr8")

        # publish the combined result of white/yellow lane detector image
        self.pub_combined_img.publish(ros_combined_output)

        # values for normalization as provided in lab5 PDF
        arr_cutoff = np.array([0, offset, 0, offset])
        arr_ratio = np.array([
            1. / image_size[1], 1. / image_size[0], 1. / image_size[1],
            1. / image_size[0]
        ])

        # stores the list of Segments and will be published to /segment_list
        resultList = SegmentList()

        #check to make sure white line is detected
        if (lines_white is not None):
            # normalize the list of white lines
            line_normalized_white = (lines_white + arr_cutoff) * arr_ratio
            w_list = []

            #iterate through each white line
            for i in range(len(line_normalized_white)):

                #create a single segment to store values
                w_seg = Segment()
                w_seg.color = Segment.WHITE

                #store values x0, y0, x1, y1 to this segment
                w_seg.pixels_normalized[0].x = line_normalized_white[i][0][0]
                w_seg.pixels_normalized[0].y = line_normalized_white[i][0][1]
                w_seg.pixels_normalized[1].x = line_normalized_white[i][0][2]
                w_seg.pixels_normalized[1].y = line_normalized_white[i][0][3]

                # add the segment to list
                w_list.append(w_seg)

            # add the resulting list to the resulting List
            resultList.segments.extend(w_list)

        #check to make sure yellow line is detected
        if (lines_yellow is not None):
            # normalize the list of yellow lines
            line_normalized_yellow = (lines_yellow + arr_cutoff) * arr_ratio
            y_list = []
            #iterate through each yellow line
            for i in range(len(line_normalized_yellow)):

                #create a single segment to store values
                y_seg = Segment()
                y_seg.color = Segment.YELLOW

                #store values x0, y0, x1, y1 to this segment
                y_seg.pixels_normalized[0].x = line_normalized_yellow[i][0][0]
                y_seg.pixels_normalized[0].y = line_normalized_yellow[i][0][1]
                y_seg.pixels_normalized[1].x = line_normalized_yellow[i][0][2]
                y_seg.pixels_normalized[1].y = line_normalized_yellow[i][0][3]

                # add this segment to the list of yellow segments
                y_list.append(y_seg)

            # finally, add the list to the resulting List
            resultList.segments.extend(y_list)

        # publish the resulting list of segments to a topic received by ground_detection_node
        self.pub_seglist.publish(resultList)
    def toSegmentMsg(self,  lines, normals, color):
        qtable = zeros(7,15,3)
        round1 = 0

        segmentMsgList = []
        for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
            segment = Segment()
            segment.color = color

            map_seg = zeros(7,15)

            for i in range(round(x1*15),round(x2*15))
                map_seg(i,round(i*(y2-y1)/(x2-x1)))=1

            t_pre = map_seg(6,1)

            for i in 15
                t = map_seg(6,i)
                if (t < t_pre)  
                    s = i
                elif (t > t_pre)  
                    e = i
                t_pre = t

            gaol_x = 6
            gaol_y = (s+e)/2


            while round1<50

                map_matrix = map_seg
                round1 = round1+1
                position_x = 2
                position_y = 8
                count=0
                while ~(position_x == gaol_x && position_y == gaol_y)
                    a=0.9
                    b=0.8
                    t_pre = map_seg(position_x,1)
                    for i in 15
                        t = map_seg(position_x,i)
                        if     (t < t_pre)  
                            s = i
                        elif (t > t_pre)  
                            e = i
                        t_pre = t

                    dis = (position_y-s) - (e-position_y)
                    if (dis <= 0)  
                        reward =  5*dis
                    elif (dis >  0)  
                        reward =  -5*dis
                    count = count+1
                    rand_action = round( random.randint(1,3) )
                    max_q = max([qtable(position_x,position_y,1) qtable(position_x,position_y,2) qtable(position_x,position_y,3) ])
                    max_index = values.index(max([qtable(position_x,position_y,1) qtable(position_x,position_y,2) qtable(position_x,position_y,3) ]))
                    if( qtable(position_x,position_y,rand_action) >= qtable(position_x,position_y,max_index) )
                        action = rand_action
                    else
                        action = max_index
                    map_matrix(position_x,position_y) = count

                    pre_position_x = position_x
                    pre_position_y = position_y

                    if action ==1
                        position_x = pre_position_x+1
                    elif action ==2
                        position_y = pre_position_y-1
                    elif action ==3
                        position_y = pre_position_y+1

                    if(map_seg(position_x,position_y) == 1)
                        position_x = pre_position_x
                        position_y = pre_position_y
                        reward=-100
                        b=0


                    if(position_x == gaol_x && position_y == gaol_y)
                        reward=100
                        b=0


                    max_qtable = max([qtable(position_x,position_y,1) qtable(position_x,position_y,2) qtable(position_x,position_y,3) ])

                    max_qtable_index = values.index(max([qtable(position_x,position_y,1) qtable(position_x,position_y,2) qtable(position_x,position_y,3) ]))

                    old_q=qtable(pre_position_x,pre_position_y,action)
                    new_q=old_q+a*(reward+b*max_qtable-old_q)

                    qtable(pre_position_x,pre_position_y,action)=new_q
                    for i in range(1,15)
                        for j in range(1,7)
                            if map_matrix(i,j)!=0
                                x1=i/15.
                                y1=j/7. 
                                break           

                    for i in range(15,1)[::-1]
                        for j in range(7,1)[::-1]
                            if map_matrix(i,j)!=0
                                x2=i/15.
                                y2=j/7.
                                break

            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            


             
            segmentMsgList.append(segment)
        return segmentMsgList