Exemple #1
0
def run():
    array = []
    xi = []
    detector = Apriltag()
    detector.create_detector(debug=False, sigma=1.4, thresholding='canny')
    for index in range(3, 16):
        filename = 'picture/1080p-' + str(index) + '0' + '.jpg'
        frame = cv2.imread(filename)
        detections = detector.detect(frame)

        show = None
        if (len(detections) == 0):
            show = frame
        else:
            show = frame
            edges = np.array([[0, 1], [1, 2], [2, 3], [3, 0]])

            for detection in detections:
                dis = tud.get_pixel(detection.homography)
                xi.append(1 / dis)
                dete_point = np.int32(detection.points)
                array.append(dis * index * 100)
                print('pixeldis:', dis)

                ########################
                num_detections = len(detections)
    yi = np.array(range(300, 1600, 100)) * 1.0
    p0 = [1.0, 1.0]
    Para = leastsq(error, p0, args=(xi, yi))
    k, b = Para[0]
    print('y = ', k, 'x + ', b)
    print('average: ', np.average(array))

    caldis = []

    errors = []

    for index in range(3, 16):
        filename = 'picture/1080p-' + str(index) + '0' + '.jpg'
        frame = cv2.imread(filename)
        detections = detector.detect(frame)
        show = None
        if (len(detections) == 0):
            show = frame
        else:
            show = frame
            for detection in detections:
                dis = tud.get_distance(detection.homography, k)
                caldis.append(dis)
                print('dis:', dis)

    for i in range(12):
        errors.append((caldis[i + 1] - caldis[i]) / 100)
    arrayerror = np.average(errors)
    print('arrayerror  ', arrayerror)
Exemple #2
0
def robodock():
    global dis
    while True:
        robodock.k = 0
        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        detections, dimg = detector.detect(gray, return_image=True)
        num_detections = len(detections)
        print('Detected {} tags.\n'.format(num_detections))
        setspeedm1(800)
        setspeedm2(800)
        right()
        for i, detection in enumerate(detections):
            #print('Detection {} of {}:'.format(i+1, num_detections))
            #print()
            #print(detection.tostring(indent=2))
            #print()

            dis = tud.get_distance(detection.homography, 10000)
            print('Distance', dis)
            #if (dis < 20):
            #    break
            # if (dis < 20):
            #    break
            detector.detection_pose(detection,
                                    camera_params,
                                    tag_size=1,
                                    z_sign=1)

            pose, e0, e1 = detector.detection_pose(detection, camera_params,
                                                   tag_size)
            print(pose[2][0])
            if pose[2][0] < -0.2:
                setspeedm1(800)
                setspeedm2(800)
                right()
            elif pose[2][0] > 0.2:
                setspeedm1(800)
                setspeedm2(800)
                left()
            else:
                stop()
                break
            # if (dis >= 20):
            #     setspeedm1(0)
            #     setspeedm2(0)
            #forward()
            # elif (dis < 20):
            #     stop()
            #     print("stop")
            #     k = k + 1
            #     time.sleep(2)
        print(robodock.k)
Exemple #3
0
def main():
    cap = cv2.VideoCapture(0)
    cap.set(3, 1920)
    cap.set(4, 1080)
    fps = 24
    window = 'Camera'
    cv2.namedWindow(window)
    detector = Apriltag()
    detector.create_detector(debug=False)

    while cap.grab():
        success, frame = cap.retrieve()
        if not success:
            break

        detections = detector.detect(frame)
        show = None
        if (len(detections) == 0):
            show = frame
        else:
            show = frame
            edges = np.array([[0, 1], [1, 2], [2, 3], [3, 0]])
            for detection in detections:
                point = tud.get_pose_point(detection.homography)
                dis = tud.get_distance(detection.homography, 122274)
                for j in range(4):
                    cv2.line(show, tuple(point[edges[j, 0]]),
                             tuple(point[edges[j, 1]]), (0, 0, 255), 2)
                data_point = np.int32(detection.corners)
                for j in range(4):
                    cv2.line(show,
                             tuple(data_point[edges[j, 0]]),
                             tuple(data_point[edges[j, 1]]),
                             color=(0, 255, 0))
                print('dis:', dis)

        ########################
        num_detections = len(detections)
        cv2.imshow(window, show)
        k = cv2.waitKey(1000 // int(fps))

        if k == 27:
            break
    cap.release()
Exemple #4
0
 def __detector_id_dis(self):
     result = []
     cam = 0
     for frame in self.frames:
         gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
         detections = self.detector.detect(gray)
         num_detections = len(detections)
         if (num_detections == 0):
             continue
         else:
             distances = np.zeros(num_detections)
             tagid = np.zeros(num_detections)
             i = 0
             for detection in detections:
                 dis = tud.get_distance(detection.homography,121938.1)
                 distances[i] = dis
                 tagid[i] = detection.tag_id
                 i = i + 1
             min_index = np.argmin(distances)
             result.append([cam,tagid[min_index], distances[min_index]])
         cam = cam+1
     self.frames = []  # clear
     return np.array(result)
Exemple #5
0
def robodock():
   while True:
       robodock.k = 0
       ret, frame = cam.read()
       gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
       detections, dimg = detector.detect(gray, return_image=True)

       num_detections = len(detections)
       print('Detected {} tags.\n'.format(num_detections))
       setspeed(0)
       for i, detection in enumerate(detections):
           #print('Detection {} of {}:'.format(i+1, num_detections))
           #print()
           #print(detection.tostring(indent=2))
           #print()

           dis = tud.get_distance(detection.homography,10000)
           print('Distance',dis)
        
           detector.detection_pose(detection,camera_params,tag_size=1,z_sign=1)
        
           pose, e0, e1 = detector.detection_pose(detection,camera_params,tag_size)
           print(pose[2][0])
           if pose[2][0] < -0.1:
               left()
           elif pose[2][0] > 0.1:
               right()    
               if (dis >= 20):
                   setspeed(850)
                   forward()
               elif (dis < 20):
                   stop()
                   print("stop")
                   k = k + 1
                   time.sleep(2)
                   break
       print(robodock.k)
    frame3 = cv2.imread(filename3)
    gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
    gray1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY)
    gray2 = cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY)
    gray3 = cv2.cvtColor(frame3, cv2.COLOR_RGB2GRAY)
    window = 'Camera'
    cv2.namedWindow(window)

    detector = apriltag.Detector()

    detections = detector.detect(gray, return_image=False)
    detections1 = detector.detect(gray1, return_image=False)
    detections2 = detector.detect(gray2, return_image=False)
    detections3 = detector.detect(gray3, return_image=False)

    show = None
    if (len(detections) + len(detections1) + len(detections2) +
            len(detections3) < 4):
        continue
    else:
        dis = tud.get_distance(detections[0].homography, 122274.9)
        dis1 = tud.get_distance(detections1[0].homography, 122274.9)
        dis2 = tud.get_distance(detections2[0].homography, 122274.9)
        dis3 = tud.get_distance(detections3[0].homography, 122274.9)

        x, y, z = tud.sovle_coord(dis, dis1, dis3)

        z = tud.verify_z(x, y, dis2)

        print(x, y, z)
Exemple #7
0
def DisplayandSolve():
    print("Start Displaying and Solving")
    #创建识别器
    detector = Apriltag()
    detector.create_detector(debug=False)
    #创建跟踪器
    mot_tracker = Sort()
    #读取相机内外参
    camerasconfig = cameraconfig.stereoCamera()

    while True:
        if q.empty() != True:
            framel = ql.get()
            framer = qr.get()
            #读取照片参数
            height, width = framel.shape[0:2]
            #以下开始处理图片

            #立体校正
            map1x, map1y, map2x, map2y, Q = stereomoulds.getRectifyTransform(
                height, width,
                camerasconfig)  # 获取用于畸变校正和立体校正的映射矩阵以及用于计算像素空间坐标的重投影矩阵
            iml_rectified, imr_rectified = stereomoulds.rectifyImage(
                framel, framer, map1x, map1y, map2x, map2y)

            detections = detector.detect(framel)
            objtracks = list()
            show = None
            if (len(detections) == 0):
                show = frame
            else:
                show = frame
                edges = np.array([[0, 1], [1, 2], [2, 3], [3, 0]])
                for detection in detections:
                    point = tud.get_pose_point(detection.homography)
                    dis = tud.get_distance(detection.homography, 122274)
                    obj = [
                        point[0][0], point[0][1], point[2][0], point[2][1],
                        detections.index(detection)
                    ]
                    objtracks.append(obj)
                    for j in range(4):
                        cv2.line(show, tuple(point[edges[j, 0]]),
                                 tuple(point[edges[j, 1]]), (0, 0, 255), 2)
                    #print ('dis:' , dis)

            cv2.imshow("frame1", show)
            #跟踪更新box
            track_bbs_ids = mot_tracker.update(objtracks)

            #立体校正
            map1x, map1y, map2x, map2y, Q = stereomoulds.getRectifyTransform(
                height, width,
                camerasconfig)  # 获取用于畸变校正和立体校正的映射矩阵以及用于计算像素空间坐标的重投影矩阵
            iml_rectified, imr_rectified = stereomoulds.rectifyImage(
                iml, imr, map1x, map1y, map2x, map2y)
            #立体匹配
            iml_, imr_ = stereomoulds.preprocess(iml_rectified, imr_rectified)
            disp, _ = stereomoulds.disparity_SGBM(iml_, imr_)
            disp = np.divide(disp.astype(np.float32),
                             16.)  # 除以16得到真实视差(因为SGBM算法得到的视差是×16的)
            # 计算像素点的3D坐标(左相机坐标系下)
            points_3d = cv2.reprojectImageTo3D(disp, Q)

            #得到目标中心的3D坐标(左相机坐标系下)
            points_3d = stereomoulds.stereoto3d(track_bbs_idsl, track_bbs_idsr,
                                                Q)

            #记录坐标点轨迹到trajectory.txt文件
            file = open("trajectory.txt", "a")
            #trajt=file.write(point_3d)
            pickle.dump(point_3d, file)
            file.close()
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break