コード例 #1
0
    def nextPathIndex(self):
        self.pathIndex += 1
        if (self.pathIndex >= len(self.path)):
            ## End of the path
            position = Point(self.pose[0], self.pose[1], 0)
            q = tf.transformations.quaternion_from_euler(0, 0, self.pose[2])
            orientation = Quaternion(q[0], q[1], q[2], q[3])
            finalPose = Pose(position, orientation)

            self.server.set_succeeded(FollowPathResult(finalPose))
            cmd = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0))  ## Stop
            self.wheel_cmd_publisher.publish(cmd)
            self.done = True
            rospy.loginfo('Success')
            return

        targetPose = self.path[self.pathIndex]
        prevPose = self.path[self.pathIndex - 1]
        p1 = np.array([prevPose.position.x, prevPose.position.y])
        p2 = np.array([targetPose.position.x, targetPose.position.y])
        self.pathLine = Line(p1, p2)
        self.endLine = Line(p2, p2 + self.pathLine.normal())

        if (self.pathLine.length() < EPSILON):
            self.nextPathIndex()
            return

        headingErr = self.getHeadingError()
        if (abs(headingErr) > math.pi / 2):
            self.direction = -1
        else:
            self.direction = 1
 def init_line(self):
     """更新line"""
     self.line = Line(self.width, self.height,
                      self.input_path)  # 用于使用utils中的算法
     self.line.get_canvas(self.work_input_cv)
     self.tool_dist_etr.delete(0, tk.END)
     self.tool_dist_etr.insert(0, str(self.line.focal_length))
     self.tool_dist_lb3.config(text='1')
コード例 #3
0
def smoothen_over_time(lane_lines):
    """
    Smooth the lane line inference over a window of frames and returns the average lines.
    """
    avg_left_lane = np.zeros((len(lane_lines), 4))
    avg_right_lane = np.zeros((len(lane_lines), 4))

    for t in range(0, len(lane_lines)):
        avg_left_lane[t] += lane_lines[t][0].get_coords()
        avg_right_lane[t] += lane_lines[t][1].get_coords()

    return Line(*np.mean(avg_left_lane, axis=0)), Line(
        *np.mean(avg_right_lane, axis=0))
コード例 #4
0
def get_lane_lines(image, solid_lines=True):
    gray = grayscale(image)
    blur_img = gaussian_blur(gray, 5)

    y, x = image.shape[:2]

    edges = canny(gray, 255 * 1 / 3.0, 255)

    roi = region_of_interest(edges)

    detected_lines = hough_lines(img=roi,
                                 rho=2,
                                 theta=np.pi / 180,
                                 threshold=1,
                                 min_line_len=15,
                                 max_line_gap=5)

    detected_lines = [
        Line(l[0][0], l[0][1], l[0][2], l[0][3]) for l in detected_lines
    ]

    if solid_lines:
        candidate_lines = []

        for line in detected_lines:
            # consider only lines having slope btwn 30 and 60
            if 0.5 <= np.abs(line.slope) <= 1.2:
                candidate_lines.append(line)

        lane_lines = compute_lane_from_candidates(candidate_lines, gray.shape)
    else:
        lane_lines = detected_lines

    return lane_lines
コード例 #5
0
def findBookBoundaries(image):
    """
    Finds all lines that could be book boundaries using Canny Edge Detection and Hough Line Transform
    """
    img_grey = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
    img_downsampled = None
    img_downsampled = cv.pyrDown(
        img_grey, img_downsampled)  # Downsample - scale factor 2
    img_canny_edge = cv.Canny(img_downsampled, 50, 50)

    hough_lines = cv.HoughLines(image=img_canny_edge,
                                rho=1,
                                theta=np.pi / 180,
                                threshold=100)

    if hough_lines is None:
        return []

    boundary_lines = []
    for hough_line in hough_lines:
        rho = hough_line[0][0]
        theta = hough_line[0][1]

        # Keep only lines that are vertical or almost vertical
        if abs(theta) < np.pi / 20 or abs(theta) > 19 * np.pi / 20:
            boundary_lines.append(
                Line(theta, 2 * rho)
            )  # Rho is multiplied by 2 as the image used for detecting the lines is downsampled
    return boundary_lines
コード例 #6
0
 def findTarget(self):
     v_heading = np.array(
         [math.cos(self.robot_theta),
          math.sin(self.robot_theta)])
     v = np.array(
         [math.cos(self.pallet_theta),
          math.sin(self.pallet_theta)])
     if (np.dot(v, v_heading) > 0):
         self.target_position = self.pallet_position - (v * ALIGN_DISTANCE)
         self.target_theta = self.pallet_theta
         self.target_line = Line(self.target_position,
                                 self.target_position - v)
     else:
         self.target_position = self.pallet_position + (v * ALIGN_DISTANCE)
         self.target_theta = (self.pallet_theta + math.pi)
         self.target_line = Line(self.target_position,
                                 self.target_position + v)
コード例 #7
0
 def get_all_next_slps(self, slp):
     all_next_slps = set()
     for i in range(len(slp)):
         for j in range(len(slp)):
             next_slps = [
                 slp.deepcopy() for _ in range(2 if self.monotone else 3)
             ]
             next_slps[0].add(Line(i, j, '*'))
             next_slps[1].add(Line(i, j, '+'))
             if not self.monotone:
                 next_slps[2].add(Line(i, j, '-'))
             # filter out negative slps and slps with multiple lines of the same value
             next_slps = {
                 slp
                 for slp in next_slps
                 if slp.value > 0 and slp.value not in slp.values[:-1]
             }
             all_next_slps.update(next_slps)
     return all_next_slps
コード例 #8
0
ファイル: EBot.py プロジェクト: kuiqejw/digital-world
    def is_aligned_to(self, target):
        dir_line = Line.from_points(self.back, self.front)

        is_front_closer = self.back.dist_to(target) > self.front.dist_to(
            target)

        print 'is front closer: {}'.format(is_front_closer)

        print '\n\nDIRLINE CONTAINS TARGET: {}\n\n'.format(
            dir_line.contains(target))
        return dir_line.contains(target) and is_front_closer
コード例 #9
0
def compute_lane_from_candidates(lines, img_shape):
    """
      Left lane = positive gradients/slope
      Right Lane = negative gradients/slope
      Below we are separating the left and right lane
    """
    pos_line = [l for l in lines if l.slope > 0]
    neg_line = [l for l in lines if l.slope < 0]

    neg_bias = np.median([l.bias for l in neg_line]).astype(int)
    neg_slope = np.median([l.slope for l in neg_line])
    x1, y1 = 0, neg_bias
    x2, y2 = -np.int32(np.round(neg_bias / neg_slope)), 0
    left_lane = Line(x1, y1, x2, y2)

    right_bias = np.median([l.bias for l in pos_line]).astype(int)
    right_slope = np.median([l.slope for l in pos_line])
    x1, y1 = 0, right_bias
    x2, y2 = np.int32(np.round(
        (img_shape[0] - right_bias) / right_slope)), img_shape[0]
    right_lane = Line(x1, y1, x2, y2)

    return left_lane, right_lane
コード例 #10
0
    def findTarget(self):
        pos = self.targetPose.position
        self.pallet_position = np.array([pos.x, pos.y])
        q = self.targetPose.orientation
        euler = tf.transformations.euler_from_quaternion([q.x, q.y, q.z, q.w])
        self.pallet_theta = euler[2]

        v_heading = np.array(
            [math.cos(self.robot_theta),
             math.sin(self.robot_theta)])
        v = np.array(
            [math.cos(self.pallet_theta),
             math.sin(self.pallet_theta)])
        if (np.dot(v, v_heading) > 0):
            self.target_position = self.pallet_position - (v * ALIGN_DISTANCE)
            self.target_theta = self.pallet_theta
            self.target_line = Line(self.target_position,
                                    self.target_position - v)
        else:
            self.target_position = self.pallet_position + (v * ALIGN_DISTANCE)
            self.target_theta = (self.pallet_theta + math.pi)
            self.target_line = Line(self.target_position,
                                    self.target_position + v)
コード例 #11
0
    def find_initial_guess(self, coordinates, robots_pose):
        """Find the initial guess using a montecarlo-based approach.

        Argument:
            coordinates (float[]): points coordinatesn
        """
        # Get the box size
        box_params = rospy.get_param('box')
        box_length = box_params['length']
        box_width = box_params['width']
        max_length = max(box_length, box_width)

        # Get info on the uncertainty area
        rospy.wait_for_service('uncertainty_area_get_info')
        info = rospy.ServiceProxy('uncertainty_area_get_info',
                                  GetUncertaintyAreaInfo)
        try:
            response = info(0)
        except rospy.ServiceException:
            pass
        unc_area = BoxGeometry(2 * max_length, max_length,\
                               [response.pose[0], response.pose[1]],\
                               response.pose[2])
        # Robot that found the box
        discoverer_id = response.discoverer_id
        discoverer_pose = robots_pose[discoverer_id]

        # Evaluate the segment between the two other robot
        indexes = [0, 1, 2]
        indexes.remove(discoverer_id)
        poses = [robots_pose[i] for i in indexes]

        segment_between = Segment([poses[0][0],poses[0][1]],\
                                  [poses[1][0],poses[1][1]])
        half_way = np.array(segment_between.point(0.5))
        segment_length = segment_between.length()

        # Find the length of the side on which the other robots are attached
        robot_radius = float(rospy.get_param('robot_radius'))
        effective_length = segment_length - 2 * robot_radius

        side_length = box_width
        if abs(effective_length - box_width) < abs(effective_length -
                                                   box_length):
            side_length = box_length

        # Find an uncertainty area for the center of the box
        direction = np.array(
            [np.cos(discoverer_pose[2]),
             np.sin(discoverer_pose[2])])
        line_half_way = Line(half_way.tolist(),
                             (half_way + direction).tolist())
        side0 = Line(unc_area.vertex(0), unc_area.vertex(1))
        intersection = line_half_way.intersect(side0)
        center_guess = (np.array(intersection) +
                        (side_length / 2) * direction).tolist()
        theta_guess = discoverer_pose[2]
        if side_length == box_width:
            theta_guess -= np.pi / 2

        # Estimate the initial guess
        min_value = float('inf')
        random.seed()
        theta = angle_normalization(theta_guess)
        for i in range(10000):
            x_c = random.gauss(center_guess[0], 0.01)
            y_c = random.gauss(center_guess[1], 0.01)

            estimated_state = [x_c, y_c, theta]

            new_value = self.__loss_function(estimated_state, coordinates)
            min_value = min(min_value, new_value)

            if new_value == min_value:
                self._state = estimated_state

        print estimated_state
        print center_guess
        print theta_guess
コード例 #12
0
from utils import Line
from moviepy.editor import VideoFileClip

if __name__ == '__main__':
    line = Line()
    raw_clip = VideoFileClip('project_video.mp4')
    processed_clip = raw_clip.fl_image(line.process_pipeline)
    processed_clip.write_videofile('processed_project_video.mp4', audio=False)
コード例 #13
0
ファイル: test_utils.py プロジェクト: wdzhong/vision
def test_line_length():
    from utils import Line
    points = [0, 0, 3, 4]
    line = Line(points)
    length = line.get_length()
    assert length == 5.0
コード例 #14
0
                         args=(monthly, f2_label.set)).start()
    except Exception as e:
        f2_label.set('')
        return


f2_lock = threading.Lock()
f2_btn = tk.StringVar(value='转换')
tk.Button(window, textvariable=f2_btn, command=do_pivottable).pack()

f2_label = tk.StringVar()
tk.Label(window, textvariable=f2_label).pack()

ttk.Separator(window, orient='horizontal').pack(fill=tk.X)


# Author info
with Line(window) as l:
    tk.Label(l, text='项目地址:').pack(side='left')
    url = tk.Label(l, text='https://github.com/lshpku/rabbiter')
    url.pack(side='left')


def open_url(event):
    webbrowser.open('https://github.com/lshpku/rabbiter')


url.bind("<Button-1>", open_url)

window.mainloop()
コード例 #15
0
    ## Where start_second and end_second are integer values representing the start and end of the subclip
    ## You may also uncomment the following line for a subclip of the first 5 seconds
    ##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
    video_fn = 'project_video.mp4'
    # video_fn = 'challenge_video.mp4'
    # video_fn = 'harder_challenge_video.mp4'
    video_output_path = os.path.join(video_output_dir, video_fn)
    # clip1 = VideoFileClip(os.path.join('../', video_fn)).subclip(0,2)
    clip1 = VideoFileClip(os.path.join('../', video_fn))
    white_clip = clip1.fl_image(
        process_image)  # NOTE: this function expects color images!!
    white_clip.write_videofile(video_output_path, audio=False)


if __name__ == '__main__':
    lane_left, lane_right = Line(buffer_len=20), Line(buffer_len=20)
    frame_idx = -1
    nwindows = 9
    margin = 100
    minpix = 50
    thresh_gradx = (20, 100)
    thresh_grady = (20, 100)
    thresh_mag = (30, 100)
    thresh_dir = (0.7, 1.3)
    thresh_s_channel = (170, 255)
    # Define conversions in x and y from pixels space to meters
    ym_per_pix = 30 / 720  # meters per pixel in y dimension
    xm_per_pix = 3.7 / 700  # meters per pixel in x dimension
    ret, mtx, dist, rvecs, tvecs = calibrate(is_save=False)
    main()
コード例 #16
0
 def __init__(self, minimized=False):
     self.minimized = minimized
     if not self.minimized:
         self.lines = [Line("DUMMY", "LINE", '+')]
     self.values = [1]
     self.value = 1
コード例 #17
0
    for argument in range(2):
        if argument == 0:
            lines = []
            with open(join(label_path, label_file)) as f:
                data = f.readlines()[0].split(' ')
                nums = int(data[0])
                stastic[nums] += 1
                total_nums += nums
                
                if int(nums) == 0:
                    print("Warning: image has no semantic line : %s" % (filename))

                for i in range(nums):
                    y1, x1 = check(int(data[i*4+2]), int(data[i*4+1]), H, W)
                    y2, x2 = check(int(data[i*4+4]), int(data[i*4+3]), H, W)
                    line = Line([y1, x1, y2, x2])
                    angle = line.angle()
                    angle_stastic[int((angle / np.pi + 0.5) * 180)] += 1
                    total_lines += 1
                    line.rescale(scale_H, scale_W)
                    lines.append(line)
            
            annotation = LineAnnotation(size=[args.fixsize, args.fixsize], lines=lines)
        else:
            im = cv2.flip(im, 1)
            filename = filename + '_flip'
            lines = []
            with open(join(label_path, label_file)) as f:
                data = f.readlines()[0].split(' ')
                for i in range(int(data[0])):
                    y1, x1 = check(int(data[i*4+2]), W-1-int(data[i*4+1]), H, W)
class Tkwindow:
    """
    窗口类,用于生成主窗口,拖动并显示图片,选择算法参数等功能
    """
    def __init__(self, root):
        self.root = root  # 父窗口
        self.width = 600  # 帆布组件的宽度
        self.height = 450  # 帆布组件的高度
        self.input_path = None  # 原图片路径
        self.depth = None  # 深度值矩阵
        self.result = False  # 生成深度图状态,若已生成深度图,则为True,可以保存
        self.line = None  # 初始化用于utils计算的类
        self.algorithm = {
            'FCRN': 'NYU_FCRN.ckpt',
            'MiDaS': 'model.pt',
            'MegaDepth': 'best_generalization_net_G.pth',
            'monodepth2': 'mono+stereo_1024x320'
        }  # 算法对应的权重
        self.cmaps = [
            ('Perceptually Uniform Sequential',
             ['viridis', 'plasma', 'inferno', 'magma', 'cividis']),
            ('Sequential', [
                'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
                'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu', 'GnBu',
                'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn'
            ]),
            ('Sequential (2)', [
                'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
                'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
                'hot', 'afmhot', 'gist_heat', 'copper'
            ]),
            ('Diverging', [
                'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu', 'RdYlBu',
                'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic'
            ]), ('Cyclic', ['twilight', 'twilight_shifted', 'hsv']),
            ('Qualitative', [
                'Pastel1', 'Pastel2', 'Paired', 'Accent', 'Dark2', 'Set1',
                'Set2', 'Set3', 'tab10', 'tab20', 'tab20b', 'tab20c'
            ]),
            ('Miscellaneous', [
                'flag', 'prism', 'ocean', 'gist_earth', 'terrain',
                'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix',
                'brg', 'gist_rainbow', 'rainbow', 'jet', 'turbo',
                'nipy_spectral', 'gist_ncar'
            ])
        ]  # 深度图颜色选项

        # 菜单栏 menu bar
        self.menu = tk.Menu(root)
        root.config(menu=self.menu)

        # 工具栏 tool bar
        self.tool = tk.Frame(root)

        self.tool_depth = tk.LabelFrame(self.tool,
                                        text='深度估计',
                                        labelanchor='s')  # 深度估计方法框架
        self.tool_depth_cbb1 = ttk.Combobox(self.tool_depth, state='readonly')
        self.tool_depth_cbb2 = ttk.Combobox(self.tool_depth, state='readonly')

        self.tool_dist = tk.LabelFrame(self.tool, text='距离测量',
                                       labelanchor='s')  # 距离测量框架
        self.tool_dist_etr = tk.Entry(self.tool_dist)  # 输入焦距
        self.tool_dist_lb3 = tk.Label(self.tool_dist, text='1')  # 比例尺信息

        self.tool_dist_alter = tk.LabelFrame(self.tool,
                                             text='距离修改',
                                             labelanchor='s')  # 距离修正框架

        self.tool_visual = tk.LabelFrame(self.tool,
                                         text='可视化效果',
                                         labelanchor='s')  # 可视化效果框架
        self.tool_visual_cbb = ttk.Combobox(self.tool_visual)

        # 工作区域(主界面) working area
        self.work = tk.Frame(root)
        self.work_output_cv = tk.Canvas(self.work,
                                        width=self.width,
                                        height=self.height,
                                        bg='white')  # 深度图帆布
        self.work_input_cv = tk.Canvas(self.work,
                                       width=self.width,
                                       height=self.height,
                                       bg='white')  # 原图帆布

        # 状态栏 status bar
        self.status = tk.Frame(root)
        self.status_message_lb1_ord = tk.Label(self.status, text='')  # A点坐标值
        self.status_message_lb1_dep = tk.Label(self.status, text='')  # A点深度值
        self.status_message_lb2_ord = tk.Label(self.status, text='')  # B点坐标值
        self.status_message_lb2_dep = tk.Label(self.status, text='')  # B点深度值
        self.status_message_dis = tk.Label(self.status, text='')  # 距离值

    def init_menu(self):
        """
        初始化菜单栏
        """
        menu_file = tk.Menu(self.menu)  # 文件菜单
        menu_file.add_command(label='打开', command=self.open_input)  # 打开图片
        menu_file.add_command(label='保存', command=self.save_output)  # 保存图片
        self.menu.add_cascade(label='文件', menu=menu_file)

        menu_help = tk.Menu(self.menu)  # 帮助菜单
        menu_help.add_command(label='说明')
        self.menu.add_cascade(label='帮助', menu=menu_help)

    def init_toolbar(self):
        """
        初始化工具栏
        """
        self.tool.pack(anchor='w')

        # 深度估计框架
        self.tool_depth.pack(side='left', fill='y')

        tool_depth_lb1 = tk.Label(self.tool_depth, text='算法')  # 算法
        tool_depth_lb1.grid(row=1, column=1)
        self.tool_depth_cbb1.grid(row=1, column=2)
        self.tool_depth_cbb1.bind('<<ComboboxSelected>>',
                                  self.select_weight)  # 选择算法后自动绑定对应权重
        self.tool_depth_cbb1['values'] = list(self.algorithm.keys())

        tool_depth_lb2 = tk.Label(self.tool_depth, text='权重')  # 权重
        tool_depth_lb2.grid(row=2, column=1)
        self.tool_depth_cbb2.grid(row=2, column=2)
        tool_depth_bt = tk.Button(self.tool_depth,
                                  text='生成',
                                  command=self.show_output)
        tool_depth_bt.grid(row=1, column=3, rowspan=2)

        # 距离测量框架
        self.tool_dist.pack(side='left', fill='y')

        tool_dist_lb1 = tk.Label(self.tool_dist, text='焦距')  # 焦距
        tool_dist_lb1.grid(row=1, column=1)
        self.tool_dist_etr.grid(row=1, column=2)  # 输入焦距信息
        self.tool_dist_etr.bind('<KeyRelease>', lambda event: self.line.
                                update_focal(self.tool_dist_etr))  # 检测是否为数字
        tool_dist_lb10 = tk.Label(self.tool_dist, text='(mm)')
        tool_dist_lb10.grid(row=1, column=3)
        tool_dist_lb2 = tk.Label(self.tool_dist, text='比例尺')  # 比例尺
        tool_dist_lb2.grid(row=2, column=1)
        self.tool_dist_lb3.grid(row=2, column=2)  # 比例尺信息
        # tool_dist_etr = tk.Entry(self.tool_dist, text='1')
        # tool_dist_etr.grid(row=2, column=2)

        # 距离修正框架
        self.tool_dist_alter.pack(side='left', fill='y')  # 修正距离

        tool_dist_alter_lb1 = tk.Label(self.tool_dist_alter, text='修正为')
        tool_dist_alter_lb1.grid(row=1, column=1)
        tool_dist_alter_etr = tk.Entry(self.tool_dist_alter)  # 输入修正后的距离
        tool_dist_alter_etr.grid(row=1, column=2)
        tool_dist_alter_etr.bind(
            '<KeyRelease>',
            lambda event: check_num(tool_dist_alter_etr))  # 检测是否为数字
        tool_dist_alter_lb2 = tk.Label(self.tool_dist_alter, text='(mm)')
        tool_dist_alter_lb2.grid(row=1, column=3)
        tool_dist_bt = tk.Button(self.tool_dist_alter,
                                 text='修改',
                                 command=lambda: self.alter_dis(
                                     tool_dist_alter_etr.get()))  # 确认修改按钮
        tool_dist_bt.grid(row=2, column=2)

        # 可视化框架
        self.tool_visual.pack(side='left', fill='y')

        tool_visual_lb = tk.Label(self.tool_visual, text='颜色')  # 颜色
        tool_visual_lb.grid(row=1, column=1)
        self.tool_visual_cbb.grid(row=1, column=2)
        self.tool_visual_cbb.bind('<<ComboboxSelected>>', self.visual)
        self.tool_visual_cbb.bind('<Return>', self.visual)
        cmaps_list = []
        for _, cmap_list in self.cmaps:
            cmaps_list = cmaps_list + cmap_list
        self.tool_visual_cbb['values'] = cmaps_list
        self.tool_visual_cbb.current(0)

        tool_visual_lb = tk.Label(self.tool_visual, text='方向')  # 方向
        tool_visual_lb.grid(row=2, column=1)
        tool_visual_br = tk.Button(self.tool_visual,
                                   text='顺时针转动',
                                   command=self.br)
        tool_visual_br.grid(row=2, column=2)

    def init_work(self):
        """
        初始化工作区域
        """
        self.work.pack()

        self.work_input_cv.create_text(self.width / 2,
                                       self.height / 2,
                                       text='拖拽图片到此处',
                                       fill='grey',
                                       anchor='center')
        self.work_input_cv.pack(side='left')

        # 点击并移动鼠标, 测量距离
        self.work_input_cv.bind('<Button-1>',
                                lambda event: self.line.click(event, self))
        self.work_input_cv.bind("<B1-Motion>",
                                lambda event: self.line.move(event, self))
        self.work_input_cv.bind("<ButtonRelease-1>",
                                lambda event: self.line.release(event, self))

        windnd.hook_dropfiles(self.work_input_cv,
                              func=self.drag_input)  # 将拖拽图片与wa_input_cv组件挂钩

        self.work_output_cv.pack(side='right')

    def init_status_bar(self):
        """
        初始化状态栏
        """
        self.status.pack(anchor='e')
        status_message_lb1 = tk.Label(self.status, text='A点:')  # A点信息
        status_message_lb1.pack(side='left')
        status_message_lb11 = tk.Label(self.status, text='坐标:')
        status_message_lb11.pack(side='left')
        self.status_message_lb1_ord.pack(side='left')
        status_message_lb12 = tk.Label(self.status, text='深度:')
        status_message_lb12.pack(side='left')
        self.status_message_lb1_dep.pack(side='left')

        status_message_lb01 = tk.Label(self.status, text='  ')  # 间隔
        status_message_lb01.pack(side='left')

        status_message_lb2 = tk.Label(self.status, text='B点:')  # B点信息
        status_message_lb2.pack(side='left')
        status_message_lb21 = tk.Label(self.status, text='坐标:')
        status_message_lb21.pack(side='left')
        self.status_message_lb2_ord.pack(side='left')
        status_message_lb22 = tk.Label(self.status, text='深度:')
        status_message_lb22.pack(side='left')
        self.status_message_lb2_dep.pack(side='left')

        status_message_lb02 = tk.Label(self.status, text='  ')  # 间隔
        status_message_lb02.pack(side='left')

        status_message_lb3 = tk.Label(self.status, text='距离:')  # 距离
        status_message_lb3.pack(side='left')
        self.status_message_dis.pack(side='left')

    def drag_input(self, images):
        """
        将拖拽的图片加载到原图帆布中

        :param images: 拖拽获得的文件列表
        :return: None
        """
        self.input_path = images[0].decode()  # 获取拖拽文件列表中第一个文件的路径(str类型)
        self.init_line()  # 根据图片更新line
        self.show_image(self.input_path, self.work_input_cv)  # 将文件加载到原图帆布中
        return

    def init_line(self):
        """更新line"""
        self.line = Line(self.width, self.height,
                         self.input_path)  # 用于使用utils中的算法
        self.line.get_canvas(self.work_input_cv)
        self.tool_dist_etr.delete(0, tk.END)
        self.tool_dist_etr.insert(0, str(self.line.focal_length))
        self.tool_dist_lb3.config(text='1')

    def open_input(self):
        """
        打开图片并加载到帆布中
        """
        self.input_path = filedialog.askopenfilename(
            parent=self.root,  # 父窗口
            title='打开',  # 对话框标题
            initialdir='',  # 默认路径
            initialfile='',  # 默认文件名
            filetypes=[],  # 设置文件类型选项
            defaultextension='.jpg',  # 默认文件拓展名
            multiple=False)
        try:
            self.show_image(self.input_path, self.work_input_cv)  # 将文件加载到原图帆布中
        except:
            messagebox.showerror('错误', '无法识别的文件格式')
        return

    def show_image(self, image, canvas):
        """
        将图片加载到帆布中

        :param image: 要加载的文件路径
        :param canvas: 展示图片的帆布组件
        :return: None
        """
        image_open = Image.open(image)  # 加载图片
        image_resize = image_open.resize((self.width, self.height))  # 缩放图片
        image_tk = ImageTk.PhotoImage(image_resize)  # 利用PIL包将图片转化tkinter兼容的格式
        canvas.create_image(0, 0, anchor='nw', image=image_tk)  # 在canvas中显示图像
        canvas.image = image_tk  # 保留对图像对象的引用,使图像持续显示

        self.line.get_image_size(image_open)  # 存储图片信息,用于Line类

        return

    def select_weight(self, *args):
        """
        选择算法后自动绑定对应的权重选项(暂未实现权重选择功能)
        """
        self.tool_depth_cbb2['values'] = self.algorithm[
            self.tool_depth_cbb1.get()]
        self.tool_depth_cbb2.current(0)  # 设置初始权重选项
        return

    def show_output(self):
        """
        运行对应算法,返回深度值矩阵,将图片保存到根目录;再将图片加载到深度图帆布中;所有功能用一个函数搞定

        :return: None
        """
        self.depth = get_depth(self.tool_depth_cbb1.get(), self.input_path)
        self.line.get_depth(self.depth)  # 将深度矩阵传入Line类

        plt.imsave('pred.jpg', self.depth)  # 保存深度图片
        self.show_image('pred.jpg', self.work_output_cv)  # 将生成图加载到深度图帆布中
        self.result = True  # 深度图已生成,可以保存
        return

    def save_output(self):
        """
        保存生成深度图
        """
        if self.result:
            save_path = filedialog.asksaveasfilename(
                defaultextension='.jpg',  # 默认文件拓展名
                filetypes=[('JPG', '*.jpg')],  # 设置文件类型选项,目前仅支持jpg格式输出
                initialdir='',  # 默认路径
                initialfile='深度估计',  # 默认文件名
                parent=self.root,  # 父对话框
                title='保存')  # 对话框标题
            if save_path != '':  # 取消保存时返回空字符
                image = Image.open('pred.jpg')
                image.save(save_path)  # 一种比较“投机”的保存方式
        else:
            messagebox.showerror('错误', '未生成深度估计图')
        return

    def alter_dis(self, str_dis):
        """
        修正距离

        :param str_dis: 输入框中的输入的距离
        :return:
        """
        dis_para = self.line.alter_dis(str_dis)
        self.tool_dist_lb3.config(text=str(dis_para))
        return

    def visual(self, *args):
        """
        图片可视化效果
        """
        if self.result:
            try:
                plt.imsave('pred.jpg',
                           self.depth,
                           cmap=self.tool_visual_cbb.get())
                self.show_image('pred.jpg', self.work_output_cv)
            except:
                messagebox.showerror('错误', '指定的颜色映射不存在')
        else:
            messagebox.showerror('错误', '未生成深度估计图')
        return

    def br(self):
        '''
        对方向错误的图片进行转动
        '''
        # 工作区左侧
        im = Image.open(self.input_path)
        out = im.transpose(Image.ROTATE_270)  # 进行旋转270
        out.save(self.input_path)
        self.show_image(self.input_path, self.work_input_cv)  # 将文件加载到原图帆布中
        # 工作区右侧
        im = Image.open('pred.jpg')
        out = im.transpose(Image.ROTATE_270)  # 进行旋转270
        out.save('pred.jpg')
        self.show_image('pred.jpg', self.work_output_cv)
        return

    def __call__(self):
        self.init_menu()
        self.init_toolbar()
        self.init_work()
        self.init_status_bar()
コード例 #19
0
ファイル: box.py プロジェクト: xEnVrE/cooperative_transport
    def find_initial_guess(self, coordinates, robots_pose):
        """Find the initial guess using a montecarlo-based approach.

        Argument:
            coordinates (float[]): points coordinatesn
        """
        # Get the box size
        box_params = rospy.get_param('box')
        box_length = box_params['length']
        box_width = box_params['width']
        max_length = max(box_length, box_width)    

        # Get info on the uncertainty area
        rospy.wait_for_service('uncertainty_area_get_info')
        info = rospy.ServiceProxy('uncertainty_area_get_info', GetUncertaintyAreaInfo)
        try:
            response = info(0)
        except rospy.ServiceException:
            pass
        unc_area = BoxGeometry(2 * max_length, max_length,\
                               [response.pose[0], response.pose[1]],\
                               response.pose[2])
        # Robot that found the box
        discoverer_id = response.discoverer_id
        discoverer_pose = robots_pose[discoverer_id]

        # Evaluate the segment between the two other robot
        indexes = [0, 1, 2]
        indexes.remove(discoverer_id)
        poses = [robots_pose[i] for i in indexes]

        segment_between = Segment([poses[0][0],poses[0][1]],\
                                  [poses[1][0],poses[1][1]])
        half_way = np.array(segment_between.point(0.5))
        segment_length = segment_between.length()

        # Find the length of the side on which the other robots are attached
        robot_radius = float(rospy.get_param('robot_radius'))
        effective_length = segment_length - 2 * robot_radius

        side_length = box_width
        if abs(effective_length - box_width) < abs(effective_length - box_length):
            side_length = box_length
            
        # Find an uncertainty area for the center of the box
        direction = np.array([np.cos(discoverer_pose[2]), np.sin(discoverer_pose[2])])
        line_half_way = Line(half_way.tolist(), (half_way + direction).tolist())
        side0 = Line(unc_area.vertex(0), unc_area.vertex(1))
        intersection = line_half_way.intersect(side0)
        center_guess = (np.array(intersection) + (side_length / 2) * direction).tolist()
        theta_guess = discoverer_pose[2]
        if side_length == box_width:
            theta_guess -= np.pi / 2

        # Estimate the initial guess
        min_value = float('inf')
        random.seed()
        theta = angle_normalization(theta_guess)
        for i in range(10000):
            x_c = random.gauss(center_guess[0], 0.01)
            y_c = random.gauss(center_guess[1], 0.01)

            estimated_state = [x_c, y_c, theta]

            new_value = self.__loss_function(estimated_state, coordinates)
            min_value = min(min_value, new_value)
            
            if new_value == min_value:
                self._state = estimated_state

        print estimated_state
        print center_guess
        print theta_guess
コード例 #20
0
 for argument in range(2):
     if argument == 0:
         H, W, _ = im.shape
         lines = []
         with open(join(label_path, label_file)) as f:
             data = f.readlines()
             nums = len(data)
             stastic[nums] += 1
             for line in data:
                 data1 = line.strip().split(',')
                 if len(data1) <= 4:
                     continue
                 data1 = [int(float(x)) for x in data1]
                 if data1[1]==data1[3] and data1[0]==data1[2]:
                     continue
                 line = Line([data1[1], data1[0], data1[3], data1[2]])
                 lines.append(line)
         
         annotation = LineAnnotation(size=[H, W], divisions=args.num_directions, lines=lines)
     else:
         im = cv2.flip(im, 1)
         filename = filename + '_flip'
         H, W, _ = im.shape
         lines = []
         with open(join(label_path, label_file)) as f:
             data = f.readlines()
             for line in data:
                 data1 = line.strip().split(',')
                 if len(data1) <= 4:
                     continue 
                 data1 = [int(float(x)) for x in data1]
コード例 #21
0
class PickupPalletServer:
    def __init__(self):
        self.wheel_cmd_publisher = rospy.Publisher(WHEEL_CMD_TOPIC,
                                                   Twist,
                                                   queue_size=1)
        self.lift_cmd_publisher = rospy.Publisher(LIFT_CONTROL_TOPIC,
                                                  Float64,
                                                  queue_size=1)
        self.tf_listener = tf.TransformListener()

        rospy.wait_for_service('gazebo/get_model_state')
        rospy.wait_for_service('gazebo/get_link_state')
        self.modelStateService = rospy.ServiceProxy('gazebo/get_model_state',
                                                    GetModelState)
        self.linkStateService = rospy.ServiceProxy('gazebo/get_link_state',
                                                   GetLinkState)

        self.server = actionlib.SimpleActionServer(ACTION_SERVER_NAME,
                                                   PickupPalletAction,
                                                   self.execute, False)
        self.server.start()
        print('Server started')

    def execute(self, goal):
        rate = rospy.Rate(PUBLISH_RATE)
        self.targetPallet = goal.palletName.data
        self.done = False
        self.state = CONTROL_STATE_ALIGN
        self.start_time = rospy.get_time()
        self.lookupTransfrom()
        self.findTarget()

        while not rospy.is_shutdown() and not self.done:
            if self.server.is_preempt_requested():
                rospy.loginfo('Pickup Canceled')
                self.server.set_preempted()
                break

            self.lookupTransfrom()

            if self.state == CONTROL_STATE_ALIGN:
                self.control_alignment()
            elif self.state == CONTROL_STATE_LIFT:
                self.control_lift()
            elif self.state == CONTROL_STATE_DONE:
                self.server.set_succeeded()
                break

            rate.sleep()

    def findTarget(self):
        v_heading = np.array(
            [math.cos(self.robot_theta),
             math.sin(self.robot_theta)])
        v = np.array(
            [math.cos(self.pallet_theta),
             math.sin(self.pallet_theta)])
        if (np.dot(v, v_heading) > 0):
            self.target_position = self.pallet_position - (v * ALIGN_DISTANCE)
            self.target_theta = self.pallet_theta
            self.target_line = Line(self.target_position,
                                    self.target_position - v)
        else:
            self.target_position = self.pallet_position + (v * ALIGN_DISTANCE)
            self.target_theta = (self.pallet_theta + math.pi)
            self.target_line = Line(self.target_position,
                                    self.target_position + v)

    def lookupTransfrom(self):
        try:
            (trans,
             rot) = self.tf_listener.lookupTransform(MAP_FRAME, ROBOT_FRAME,
                                                     rospy.Time(0))
            euler = tf.transformations.euler_from_quaternion(
                [rot[0], rot[1], rot[2], rot[3]])
            self.robot_position = np.array([trans[0], trans[1]])
            self.robot_theta = euler[2]

            modelState = self.modelStateService(model_name=self.targetPallet)
            modelPos = modelState.pose.position
            self.pallet_position = np.array([modelPos.x, modelPos.y])
            q = modelState.pose.orientation
            euler = tf.transformations.euler_from_quaternion(
                [q.x, q.y, q.z, q.w])
            self.pallet_theta = euler[2]
        except Exception as e:
            rospy.logwarn('Failed to lookup transform')
            print(e)
            return 0

    def control_alignment(self):
        v_pallet = self.pallet_position - self.robot_position
        v_target = self.target_position - self.robot_position
        v_heading = np.array(
            [math.cos(self.robot_theta),
             math.sin(self.robot_theta)])
        distance = np.linalg.norm(v_pallet)

        if distance < ALIGN_DISTANCE:
            #self.server.set_succeeded()
            rospy.loginfo('Aligned')
            self.state = CONTROL_STATE_LIFT
            return

        theta_target = math.atan2(v_target[1], v_target[0])

        lineDot = np.dot(self.target_line.normal(), v_pallet)
        lineDistance = self.target_line.distance(self.robot_position)
        lineErr = np.sign(lineDot) * lineDistance

        alignErr = self.target_theta - self.robot_theta
        if (alignErr > math.pi):
            alignErr -= 2 * math.pi
        elif (alignErr < -math.pi):
            alignErr += 2 * math.pi

        headingErr = theta_target - self.robot_theta
        if (headingErr > math.pi):
            headingErr -= 2 * math.pi
        elif (headingErr < -math.pi):
            headingErr += 2 * math.pi

        if lineDistance > 0.5:
            steering = (alignErr * 0.2) + (headingErr * 1.5) + (lineErr * 1.0)
        elif distance > 2.4:
            steering = (alignErr * 2.0) + (headingErr * 4.0) + (lineErr * 0.5)
        else:
            steering = (alignErr * 3.0) + (headingErr * 0.0) + (lineErr * 1.0)

        target_turn = max(min(steering, MAX_TURN), -MAX_TURN)

        rospy.loginfo(
            'HErr: %.2f, AErr: %.2f, PDist: %.2f, Steering: %.2f, Dist: %.2f',
            headingErr, alignErr, lineErr, steering, distance)

        target_speed = 0.2
        angular = (target_speed / LENGTH) * math.tan(target_turn)

        cmd = Twist()
        cmd.linear = Vector3(target_speed, 0, 0)
        cmd.angular = Vector3(0, 0, angular)
        self.wheel_cmd_publisher.publish(cmd)

    def control_lift(self):

        lift_link = self.linkStateService(link_name=LIFT_LINK_NAME)
        height = lift_link.link_state.pose.position.z

        if height < 0.5:
            self.lift_cmd_publisher.publish(0.5)
        else:
            self.lift_cmd_publisher.publish(0)
            rospy.loginfo('Lift is up')
            self.state = CONTROL_STATE_DONE
コード例 #22
0
class FollowPathServer:
    def __init__(self):
        self.wheel_cmd_publisher = rospy.Publisher(WHEEL_CONTROL_TOPIC,
                                                   Twist,
                                                   queue_size=1)
        self.tf_listener = tf.TransformListener()

        self.pub_marker = rospy.Publisher('/path/marker',
                                          Marker,
                                          queue_size=10)

        self.server = actionlib.SimpleActionServer(ACTION_SERVER_NAME,
                                                   FollowPathAction,
                                                   self.execute, False)
        self.server.start()
        print('Server started')

    def execute(self, goal):
        self.done = False
        self.path = goal.poses
        self.pathIndex = 0
        self.updatePose()
        self.nextPathIndex()
        self.publishPathMarker(self.path, 99)

        rate = rospy.Rate(PUBLISH_RATE)
        while not rospy.is_shutdown() and not self.done:
            if self.server.is_preempt_requested():
                rospy.loginfo('Path Canceled')
                self.server.set_preempted()
                break

            self.updatePose()

            pos = np.array([self.pose[0], self.pose[1]])
            v1 = self.pathLine.p2 - pos

            pathDot = np.dot(self.pathLine.normal(), v1)
            endDot = np.dot(v1, self.pathLine.direction())

            endDistance = self.endLine.distance(pos)
            pathDistance = self.pathLine.distance(pos)
            pathErr = (-np.sign(pathDot)) * self.direction * pathDistance

            if (pathDistance > MAX_PATH_ERROR):
                self.server.set_aborted()
                rospy.loginfo('Too far off path')
                break

            if (endDot < 0):
                ## Aim for the next point on the path
                self.nextPathIndex()
                continue

            delta = self.getHeadingError()
            steering = delta * 3.0 + pathErr * 2.5
            target_turn = max(min(steering, MAX_TURN), -MAX_TURN)

            target_speed = 0.5 * self.direction
            angular = (target_speed / LENGTH) * math.tan(target_turn)

            cmd = Twist()
            cmd.linear = Vector3(target_speed, 0, 0)
            cmd.angular = Vector3(0, 0, angular)
            self.wheel_cmd_publisher.publish(cmd)

            rate.sleep()

        self.clearPathMarker(99)

    def updatePose(self):
        try:
            (trans,
             rot) = self.tf_listener.lookupTransform('/map', '/base_link',
                                                     rospy.Time(0))
            euler = tf.transformations.euler_from_quaternion(
                [rot[0], rot[1], rot[2], rot[3]])

            self.pose = [trans[0], trans[1], euler[2]]
        except:
            rospy.logwarn('Failed to lookup transform')

    def nextPathIndex(self):
        self.pathIndex += 1
        if (self.pathIndex >= len(self.path)):
            ## End of the path
            position = Point(self.pose[0], self.pose[1], 0)
            q = tf.transformations.quaternion_from_euler(0, 0, self.pose[2])
            orientation = Quaternion(q[0], q[1], q[2], q[3])
            finalPose = Pose(position, orientation)

            self.server.set_succeeded(FollowPathResult(finalPose))
            cmd = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0))  ## Stop
            self.wheel_cmd_publisher.publish(cmd)
            self.done = True
            rospy.loginfo('Success')
            return

        targetPose = self.path[self.pathIndex]
        prevPose = self.path[self.pathIndex - 1]
        p1 = np.array([prevPose.position.x, prevPose.position.y])
        p2 = np.array([targetPose.position.x, targetPose.position.y])
        self.pathLine = Line(p1, p2)
        self.endLine = Line(p2, p2 + self.pathLine.normal())

        if (self.pathLine.length() < EPSILON):
            self.nextPathIndex()
            return

        headingErr = self.getHeadingError()
        if (abs(headingErr) > math.pi / 2):
            self.direction = -1
        else:
            self.direction = 1

    def getHeadingError(self):
        targetHeading = self.pathLine.heading()

        currentHeading = self.pose[2]
        delta = targetHeading - currentHeading
        if (delta > math.pi):
            delta -= 2 * math.pi
        elif (delta < -math.pi):
            delta += 2 * math.pi

        return delta

    def publishPathMarker(self, path, id):
        m = Marker()
        m.header.frame_id = "map"
        m.type = Marker.LINE_STRIP
        m.action = Marker.ADD

        m.points = []
        for pose in path:
            p = Point(pose.position.x, pose.position.y, 0.1)
            m.points.append(p)

        m.scale = Vector3(0.1, 0.1, 0.1)
        m.color = ColorRGBA(0, 1, 0, 1)
        m.id = id
        self.pub_marker.publish(m)

    def clearPathMarker(self, id):
        m = Marker()
        m.action = Marker.DELETE
        m.id = id
        self.pub_marker.publish(m)