Esempio n. 1
0
    def nextPathIndex(self):
        self.pathIndex += 1
        if (self.pathIndex >= len(self.path)):
            ## End of the path
            position = Point(self.pose[0], self.pose[1], 0)
            q = tf.transformations.quaternion_from_euler(0, 0, self.pose[2])
            orientation = Quaternion(q[0], q[1], q[2], q[3])
            finalPose = Pose(position, orientation)

            self.server.set_succeeded(FollowPathResult(finalPose))
            cmd = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0))  ## Stop
            self.wheel_cmd_publisher.publish(cmd)
            self.done = True
            rospy.loginfo('Success')
            return

        targetPose = self.path[self.pathIndex]
        prevPose = self.path[self.pathIndex - 1]
        p1 = np.array([prevPose.position.x, prevPose.position.y])
        p2 = np.array([targetPose.position.x, targetPose.position.y])
        self.pathLine = Line(p1, p2)
        self.endLine = Line(p2, p2 + self.pathLine.normal())

        if (self.pathLine.length() < EPSILON):
            self.nextPathIndex()
            return

        headingErr = self.getHeadingError()
        if (abs(headingErr) > math.pi / 2):
            self.direction = -1
        else:
            self.direction = 1
Esempio n. 2
0
def smoothen_over_time(lane_lines):
    """
    Smooth the lane line inference over a window of frames and returns the average lines.
    """
    avg_left_lane = np.zeros((len(lane_lines), 4))
    avg_right_lane = np.zeros((len(lane_lines), 4))

    for t in range(0, len(lane_lines)):
        avg_left_lane[t] += lane_lines[t][0].get_coords()
        avg_right_lane[t] += lane_lines[t][1].get_coords()

    return Line(*np.mean(avg_left_lane, axis=0)), Line(
        *np.mean(avg_right_lane, axis=0))
Esempio n. 3
0
def findBookBoundaries(image):
    """
    Finds all lines that could be book boundaries using Canny Edge Detection and Hough Line Transform
    """
    img_grey = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
    img_downsampled = None
    img_downsampled = cv.pyrDown(
        img_grey, img_downsampled)  # Downsample - scale factor 2
    img_canny_edge = cv.Canny(img_downsampled, 50, 50)

    hough_lines = cv.HoughLines(image=img_canny_edge,
                                rho=1,
                                theta=np.pi / 180,
                                threshold=100)

    if hough_lines is None:
        return []

    boundary_lines = []
    for hough_line in hough_lines:
        rho = hough_line[0][0]
        theta = hough_line[0][1]

        # Keep only lines that are vertical or almost vertical
        if abs(theta) < np.pi / 20 or abs(theta) > 19 * np.pi / 20:
            boundary_lines.append(
                Line(theta, 2 * rho)
            )  # Rho is multiplied by 2 as the image used for detecting the lines is downsampled
    return boundary_lines
Esempio n. 4
0
def get_lane_lines(image, solid_lines=True):
    gray = grayscale(image)
    blur_img = gaussian_blur(gray, 5)

    y, x = image.shape[:2]

    edges = canny(gray, 255 * 1 / 3.0, 255)

    roi = region_of_interest(edges)

    detected_lines = hough_lines(img=roi,
                                 rho=2,
                                 theta=np.pi / 180,
                                 threshold=1,
                                 min_line_len=15,
                                 max_line_gap=5)

    detected_lines = [
        Line(l[0][0], l[0][1], l[0][2], l[0][3]) for l in detected_lines
    ]

    if solid_lines:
        candidate_lines = []

        for line in detected_lines:
            # consider only lines having slope btwn 30 and 60
            if 0.5 <= np.abs(line.slope) <= 1.2:
                candidate_lines.append(line)

        lane_lines = compute_lane_from_candidates(candidate_lines, gray.shape)
    else:
        lane_lines = detected_lines

    return lane_lines
 def init_line(self):
     """更新line"""
     self.line = Line(self.width, self.height,
                      self.input_path)  # 用于使用utils中的算法
     self.line.get_canvas(self.work_input_cv)
     self.tool_dist_etr.delete(0, tk.END)
     self.tool_dist_etr.insert(0, str(self.line.focal_length))
     self.tool_dist_lb3.config(text='1')
Esempio n. 6
0
 def findTarget(self):
     v_heading = np.array(
         [math.cos(self.robot_theta),
          math.sin(self.robot_theta)])
     v = np.array(
         [math.cos(self.pallet_theta),
          math.sin(self.pallet_theta)])
     if (np.dot(v, v_heading) > 0):
         self.target_position = self.pallet_position - (v * ALIGN_DISTANCE)
         self.target_theta = self.pallet_theta
         self.target_line = Line(self.target_position,
                                 self.target_position - v)
     else:
         self.target_position = self.pallet_position + (v * ALIGN_DISTANCE)
         self.target_theta = (self.pallet_theta + math.pi)
         self.target_line = Line(self.target_position,
                                 self.target_position + v)
Esempio n. 7
0
 def get_all_next_slps(self, slp):
     all_next_slps = set()
     for i in range(len(slp)):
         for j in range(len(slp)):
             next_slps = [
                 slp.deepcopy() for _ in range(2 if self.monotone else 3)
             ]
             next_slps[0].add(Line(i, j, '*'))
             next_slps[1].add(Line(i, j, '+'))
             if not self.monotone:
                 next_slps[2].add(Line(i, j, '-'))
             # filter out negative slps and slps with multiple lines of the same value
             next_slps = {
                 slp
                 for slp in next_slps
                 if slp.value > 0 and slp.value not in slp.values[:-1]
             }
             all_next_slps.update(next_slps)
     return all_next_slps
Esempio n. 8
0
    def findTarget(self):
        pos = self.targetPose.position
        self.pallet_position = np.array([pos.x, pos.y])
        q = self.targetPose.orientation
        euler = tf.transformations.euler_from_quaternion([q.x, q.y, q.z, q.w])
        self.pallet_theta = euler[2]

        v_heading = np.array(
            [math.cos(self.robot_theta),
             math.sin(self.robot_theta)])
        v = np.array(
            [math.cos(self.pallet_theta),
             math.sin(self.pallet_theta)])
        if (np.dot(v, v_heading) > 0):
            self.target_position = self.pallet_position - (v * ALIGN_DISTANCE)
            self.target_theta = self.pallet_theta
            self.target_line = Line(self.target_position,
                                    self.target_position - v)
        else:
            self.target_position = self.pallet_position + (v * ALIGN_DISTANCE)
            self.target_theta = (self.pallet_theta + math.pi)
            self.target_line = Line(self.target_position,
                                    self.target_position + v)
Esempio n. 9
0
def compute_lane_from_candidates(lines, img_shape):
    """
      Left lane = positive gradients/slope
      Right Lane = negative gradients/slope
      Below we are separating the left and right lane
    """
    pos_line = [l for l in lines if l.slope > 0]
    neg_line = [l for l in lines if l.slope < 0]

    neg_bias = np.median([l.bias for l in neg_line]).astype(int)
    neg_slope = np.median([l.slope for l in neg_line])
    x1, y1 = 0, neg_bias
    x2, y2 = -np.int32(np.round(neg_bias / neg_slope)), 0
    left_lane = Line(x1, y1, x2, y2)

    right_bias = np.median([l.bias for l in pos_line]).astype(int)
    right_slope = np.median([l.slope for l in pos_line])
    x1, y1 = 0, right_bias
    x2, y2 = np.int32(np.round(
        (img_shape[0] - right_bias) / right_slope)), img_shape[0]
    right_lane = Line(x1, y1, x2, y2)

    return left_lane, right_lane
Esempio n. 10
0
                         args=(monthly, f2_label.set)).start()
    except Exception as e:
        f2_label.set('')
        return


f2_lock = threading.Lock()
f2_btn = tk.StringVar(value='转换')
tk.Button(window, textvariable=f2_btn, command=do_pivottable).pack()

f2_label = tk.StringVar()
tk.Label(window, textvariable=f2_label).pack()

ttk.Separator(window, orient='horizontal').pack(fill=tk.X)


# Author info
with Line(window) as l:
    tk.Label(l, text='项目地址:').pack(side='left')
    url = tk.Label(l, text='https://github.com/lshpku/rabbiter')
    url.pack(side='left')


def open_url(event):
    webbrowser.open('https://github.com/lshpku/rabbiter')


url.bind("<Button-1>", open_url)

window.mainloop()
Esempio n. 11
0
    ## Where start_second and end_second are integer values representing the start and end of the subclip
    ## You may also uncomment the following line for a subclip of the first 5 seconds
    ##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
    video_fn = 'project_video.mp4'
    # video_fn = 'challenge_video.mp4'
    # video_fn = 'harder_challenge_video.mp4'
    video_output_path = os.path.join(video_output_dir, video_fn)
    # clip1 = VideoFileClip(os.path.join('../', video_fn)).subclip(0,2)
    clip1 = VideoFileClip(os.path.join('../', video_fn))
    white_clip = clip1.fl_image(
        process_image)  # NOTE: this function expects color images!!
    white_clip.write_videofile(video_output_path, audio=False)


if __name__ == '__main__':
    lane_left, lane_right = Line(buffer_len=20), Line(buffer_len=20)
    frame_idx = -1
    nwindows = 9
    margin = 100
    minpix = 50
    thresh_gradx = (20, 100)
    thresh_grady = (20, 100)
    thresh_mag = (30, 100)
    thresh_dir = (0.7, 1.3)
    thresh_s_channel = (170, 255)
    # Define conversions in x and y from pixels space to meters
    ym_per_pix = 30 / 720  # meters per pixel in y dimension
    xm_per_pix = 3.7 / 700  # meters per pixel in x dimension
    ret, mtx, dist, rvecs, tvecs = calibrate(is_save=False)
    main()
Esempio n. 12
0
 def __init__(self, minimized=False):
     self.minimized = minimized
     if not self.minimized:
         self.lines = [Line("DUMMY", "LINE", '+')]
     self.values = [1]
     self.value = 1
 for argument in range(2):
     if argument == 0:
         H, W, _ = im.shape
         lines = []
         with open(join(label_path, label_file)) as f:
             data = f.readlines()
             nums = len(data)
             stastic[nums] += 1
             for line in data:
                 data1 = line.strip().split(',')
                 if len(data1) <= 4:
                     continue
                 data1 = [int(float(x)) for x in data1]
                 if data1[1]==data1[3] and data1[0]==data1[2]:
                     continue
                 line = Line([data1[1], data1[0], data1[3], data1[2]])
                 lines.append(line)
         
         annotation = LineAnnotation(size=[H, W], divisions=args.num_directions, lines=lines)
     else:
         im = cv2.flip(im, 1)
         filename = filename + '_flip'
         H, W, _ = im.shape
         lines = []
         with open(join(label_path, label_file)) as f:
             data = f.readlines()
             for line in data:
                 data1 = line.strip().split(',')
                 if len(data1) <= 4:
                     continue 
                 data1 = [int(float(x)) for x in data1]
Esempio n. 14
0
    for argument in range(2):
        if argument == 0:
            lines = []
            with open(join(label_path, label_file)) as f:
                data = f.readlines()[0].split(' ')
                nums = int(data[0])
                stastic[nums] += 1
                total_nums += nums
                
                if int(nums) == 0:
                    print("Warning: image has no semantic line : %s" % (filename))

                for i in range(nums):
                    y1, x1 = check(int(data[i*4+2]), int(data[i*4+1]), H, W)
                    y2, x2 = check(int(data[i*4+4]), int(data[i*4+3]), H, W)
                    line = Line([y1, x1, y2, x2])
                    angle = line.angle()
                    angle_stastic[int((angle / np.pi + 0.5) * 180)] += 1
                    total_lines += 1
                    line.rescale(scale_H, scale_W)
                    lines.append(line)
            
            annotation = LineAnnotation(size=[args.fixsize, args.fixsize], lines=lines)
        else:
            im = cv2.flip(im, 1)
            filename = filename + '_flip'
            lines = []
            with open(join(label_path, label_file)) as f:
                data = f.readlines()[0].split(' ')
                for i in range(int(data[0])):
                    y1, x1 = check(int(data[i*4+2]), W-1-int(data[i*4+1]), H, W)
Esempio n. 15
0
def test_line_length():
    from utils import Line
    points = [0, 0, 3, 4]
    line = Line(points)
    length = line.get_length()
    assert length == 5.0
Esempio n. 16
0
from utils import Line
from moviepy.editor import VideoFileClip

if __name__ == '__main__':
    line = Line()
    raw_clip = VideoFileClip('project_video.mp4')
    processed_clip = raw_clip.fl_image(line.process_pipeline)
    processed_clip.write_videofile('processed_project_video.mp4', audio=False)
Esempio n. 17
0
    def find_initial_guess(self, coordinates, robots_pose):
        """Find the initial guess using a montecarlo-based approach.

        Argument:
            coordinates (float[]): points coordinatesn
        """
        # Get the box size
        box_params = rospy.get_param('box')
        box_length = box_params['length']
        box_width = box_params['width']
        max_length = max(box_length, box_width)

        # Get info on the uncertainty area
        rospy.wait_for_service('uncertainty_area_get_info')
        info = rospy.ServiceProxy('uncertainty_area_get_info',
                                  GetUncertaintyAreaInfo)
        try:
            response = info(0)
        except rospy.ServiceException:
            pass
        unc_area = BoxGeometry(2 * max_length, max_length,\
                               [response.pose[0], response.pose[1]],\
                               response.pose[2])
        # Robot that found the box
        discoverer_id = response.discoverer_id
        discoverer_pose = robots_pose[discoverer_id]

        # Evaluate the segment between the two other robot
        indexes = [0, 1, 2]
        indexes.remove(discoverer_id)
        poses = [robots_pose[i] for i in indexes]

        segment_between = Segment([poses[0][0],poses[0][1]],\
                                  [poses[1][0],poses[1][1]])
        half_way = np.array(segment_between.point(0.5))
        segment_length = segment_between.length()

        # Find the length of the side on which the other robots are attached
        robot_radius = float(rospy.get_param('robot_radius'))
        effective_length = segment_length - 2 * robot_radius

        side_length = box_width
        if abs(effective_length - box_width) < abs(effective_length -
                                                   box_length):
            side_length = box_length

        # Find an uncertainty area for the center of the box
        direction = np.array(
            [np.cos(discoverer_pose[2]),
             np.sin(discoverer_pose[2])])
        line_half_way = Line(half_way.tolist(),
                             (half_way + direction).tolist())
        side0 = Line(unc_area.vertex(0), unc_area.vertex(1))
        intersection = line_half_way.intersect(side0)
        center_guess = (np.array(intersection) +
                        (side_length / 2) * direction).tolist()
        theta_guess = discoverer_pose[2]
        if side_length == box_width:
            theta_guess -= np.pi / 2

        # Estimate the initial guess
        min_value = float('inf')
        random.seed()
        theta = angle_normalization(theta_guess)
        for i in range(10000):
            x_c = random.gauss(center_guess[0], 0.01)
            y_c = random.gauss(center_guess[1], 0.01)

            estimated_state = [x_c, y_c, theta]

            new_value = self.__loss_function(estimated_state, coordinates)
            min_value = min(min_value, new_value)

            if new_value == min_value:
                self._state = estimated_state

        print estimated_state
        print center_guess
        print theta_guess