Esempio n. 1
0
def get_camera_origin_in_world(thetax, thetay, thetaz, p0):
    # p0 is t (which is extrinsic parameter of camera)
    x = copy.deepcopy(p0)
    x[0], x[1] = rotateByZ(x[0], x[1], -1 * thetaz)
    x[0], x[2] = rotateByY(x[0], x[2], -1 * thetay)
    x[1], x[2] = rotateByX(x[1], x[2], -1 * thetax)
    return -1 * x
Esempio n. 2
0
def get_person_origin_in_world(thetax, thetay, thetaz, p0, cw):
    # p0: the person origin in camera coordinate
    # cw: the camera position in world coordinate
    # return: the person origin in world coordinate
    x = copy.deepcopy(p0)
    x[0], x[1] = rotateByZ(x[0], x[1], -thetaz)
    x[0], x[2] = rotateByY(x[0], x[2], -thetay)
    x[1], x[2] = rotateByX(x[1], x[2], -thetax)
    x += cw
    return x
Esempio n. 3
0
def main():
    # Create data
    #N = 60
    #g1 = (0.6 + 0.6 * np.random.rand(N), np.random.rand(N),0.4+0.1*np.random.rand(N))
    #g2 = (0.4+0.3 * np.random.rand(N), 0.5*np.random.rand(N),0.1*np.random.rand(N))
    #g3 = (0.3*np.random.rand(N),0.3*np.random.rand(N),0.3*np.random.rand(N))
    data = np.array([
        [0, 0, 0],  # P1
        [0, 150, 0],  # P2
        [200, 0, 0],  # P3
        [200, 150, 0]
    ])  # P4

    color = "red"  #, "green", "blue")

    # Create plot
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=color)
    p = [1, 2, 3]  #[[100,0,0],[0,100,0],[0,0,100]])
    x = copy.deepcopy(p)  #[None]*3
    #x[0], x[1], x[2] = rotateByVector(x[0],x[1],x[2],[0,0,1], 90)

    x[1], x[2] = rotateByX(x[1], x[2], 90)
    x[0], x[2] = rotateByY(x[0], x[2], -90)
    x[0], x[1] = rotateByZ(x[0], x[1], -90)
    print("new x:", x)

    ax.quiver(0, 0, 0, 100, 0, 0)
    ax.quiver(0, 0, 0, 0, 100, 0)
    ax.quiver(0, 0, 0, 0, 0, 100)

    ax.quiver(0, 0, 0, p[0], p[1], p[2])
    ax.quiver(0, 0, 0, x[0], x[1], x[2])

    ax.set_zlim(0.0, 300.0)
    ax.set_xlim(0.0, 300.0)
    ax.set_ylim(300.0, 0.0)
    ax.set_xlabel('X')
    ax.set_ylabel('Y')
    ax.set_zlabel('Z')
    ax.set_aspect(1)

    plt.title('realtime camera position and direction')
    plt.show()
Esempio n. 4
0
def main():
    cap = cv2.VideoCapture(0)
    #image = cv2.imread('img.jpg')
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    cv2.namedWindow("image", 0)
    cv2.resizeWindow("image", 640, 480)
    cv2.namedWindow("tmp", 0)
    cv2.resizeWindow("tmp", 640, 480)
    cv2.setMouseCallback('image', select_point)  # 设置回调函数

    #marker_3d = np.array([[0,0,0],[800,0,0],[0,800,0],[800,800,0], [0,0,0], [240,0,0], [0,170,0], [240, 170, 0]], dtype=np.float32).reshape(-1,1,3)

    marker_3d = np.array([[0, 0, 0], [150, 0, 0], [0, 200, 0], [150, 200, 0],
                          [0, 0, 0], [80, 0, 0], [0, 100, 0], [80, 100, 0]],
                         dtype=np.float32).reshape(-1, 1, 3)
    axis = np.float32([[30, 0, 0], [0, 30, 0], [0, 0, 30]]).reshape(-1, 3)
    mtx, dist = load_intrinsic_parameters('webcam_calibration_ouput.npz')
    while True:
        _, image = cap.read()
        if image is None:
            break
        # these four points is ground plane
        for i in range(len(marker_2d[:4])):  #len(marker_2d)):
            #print('tracking: point %d =' % i, marker_2d[i])
            #ret = tracking(image, marker_2d[i])
            #marker_2d[i] = ret
            #cv2.circle(image, ret, 4, (255,0,0), -1)
            cv2.circle(image, marker_2d[i], 4, (255, 0, 0), -1)
        # these four points is human upper body plane
        for i in range(4, len(marker_2d)):
            cv2.circle(image, marker_2d[i], 4, (255, 0, 0), -1)

        if len(marker_2d) == 8:
            _, rvecs, tvecs, inliers = cv2.solvePnPRansac(
                marker_3d[:4],
                np.array(marker_2d[:4], dtype=np.float32).reshape(-1, 1, 2),
                mtx, dist)
            imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
            tmp = copy.deepcopy(image)
            draw(tmp, marker_2d[0], imgpts)
            cv2.imshow('tmp', tmp)
            rotM, _ = cv2.Rodrigues(rvecs)
            r11 = rotM[0][0]
            r12 = rotM[0][1]
            r13 = rotM[0][2]
            r21 = rotM[1][0]
            r22 = rotM[1][1]
            r23 = rotM[1][2]
            r31 = rotM[2][0]
            r32 = rotM[2][1]
            r33 = rotM[2][2]
            thetaz = math.atan2(r21, r11) / math.pi * 180
            thetay = math.atan2(
                -1 * r31, math.sqrt(r32 * r32 + r33 * r33)) / math.pi * 180
            thetax = math.atan2(r32, r33) / math.pi * 180
            pc = tvecs.reshape(3, )
            # 获取相机在世界坐标系中的坐标
            cw = get_camera_origin_in_world(thetax, thetay, thetaz, pc)
            print("camera pos in world axis:", cw)

            _, rvecs, tvecs, inliers = cv2.solvePnPRansac(
                marker_3d[4:],
                np.array(marker_2d[4:], dtype=np.float32).reshape(-1, 1, 2),
                mtx, dist)
            p0c = tvecs.reshape(3, )
            # 获取人体原点在世界坐标系中的坐标
            p0w = get_person_origin_in_world(thetax, thetay, thetaz, p0c, cw)
            print("person pos in world axis:", p0w)
            rotM, _ = cv2.Rodrigues(rvecs)
            r11 = rotM[0][0]
            r12 = rotM[0][1]
            r13 = rotM[0][2]
            r21 = rotM[1][0]
            r22 = rotM[1][1]
            r23 = rotM[1][2]
            r31 = rotM[2][0]
            r32 = rotM[2][1]
            r33 = rotM[2][2]
            thetazp = math.atan2(r21, r11) / math.pi * 180
            thetayp = math.atan2(
                -1 * r31, math.sqrt(r32 * r32 + r33 * r33)) / math.pi * 180
            thetaxp = math.atan2(r32, r33) / math.pi * 180
            upper_body_in_world = []
            upper_body_in_world.append(p0w)
            # 在人体坐标系中的所有点(除去原点)先顺向旋转thetaxp,再顺向旋转thetax
            for p in marker_3d[5:]:
                tmp = copy.deepcopy(p).reshape(3, )
                print("before rotation:", tmp)
                tmp[1], tmp[2] = rotateByX(tmp[1], tmp[2], thetaxp)
                tmp[0], tmp[2] = rotateByY(tmp[0], tmp[2], thetayp)
                tmp[0], tmp[1] = rotateByZ(tmp[0], tmp[1], thetazp)
                tmp[0], tmp[1] = rotateByZ(tmp[0], tmp[1], -thetaz)
                tmp[0], tmp[2] = rotateByY(tmp[0], tmp[2], -thetay)
                tmp[1], tmp[2] = rotateByX(tmp[1], tmp[2], -thetax)
                print("after rotation:", tmp)
                piw = p0w + tmp
                upper_body_in_world.append(piw)
            for p in upper_body_in_world:
                ax.scatter([p[0]], [p[1]], [p[2]], c="red")

            # plot camera
            plot_camera(ax, cw, marker_3d[:4].squeeze(1))
            # plot normal vector
            _, human_norm_vec = get_plane(upper_body_in_world[0],
                                          upper_body_in_world[1],
                                          upper_body_in_world[2])
            tmp = angle_between_vectors(human_norm_vec, np.array([0, 0, 1]))
            print("angle=%d" % tmp)
            plot_arrow(ax, upper_body_in_world[0], human_norm_vec)
            # plot person plane
            #plot_person_plane(ax, upper_body_in_world[0], upper_body_in_world[1], upper_body_in_world[2])

        cv2.imshow('image', image)
        key = cv2.waitKey(1)
        if key == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 5
0
def main():
    cap = cv2.VideoCapture('20181205clip.avi')#'/data1/Project/Jail/给杨博士/法制行为录像/倒地.mp4')#'倒地自制_clip.avi')#'/data1/Project/Jail/给杨博士/法制行为录像/倒地.mp4')
    #image = cv2.imread('screenshot.jpg')

    # Load openpose to detect human keypoints
    model = load_openpose_params()

    # Make figures ready for illustration
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    cv2.namedWindow("image", 0)
    cv2.resizeWindow("image", 640, 480)
    cv2.namedWindow("tmp", 0)
    cv2.resizeWindow("tmp", 640, 480)
    cv2.setMouseCallback('image', select_point)  # 设置回调函数
    
    # 地面区域,宽=7块瓷砖,高=3.6块瓷砖,每块瓷砖=40cm
    # 人体黄金比例: 上半身=0.382,取0.45*175=78,肩宽=36, 腰宽=30
    #marker_3d = np.array([[0,0,0],[140,0,0],[0,280,0],[140,280,0], [0,0,0], [78,3,0], [0,36,0], [78, 33, 0]], dtype=np.float32).reshape(-1,1,3)
    marker_3d = np.array([[0,0,0],[80,0,0],[0,80,0],[80,80,0], [0,0,0], [65,3,0], [0,36,0], [65, 33, 0]], dtype=np.float32).reshape(-1,1,3)
    axis = np.float32([[30,0,0], [0,30,0], [0,0,30]]).reshape(-1,3)
    #mtx, dist = load_intrinsic_parameters('webcam_calibration_ouput.npz')
    mtx = load_intrinsic_K('hearingroom_intrinsic_K.npz')
    dist = np.zeros((1,5), dtype=np.float)
    while True:
        _, image = cap.read()
        if image is None:
            break
        detect_results = openpose_keypoint(model, image)
        # these four points is ground plane
        for i in range(len(ground_2d)):#len(marker_2d)):
            #print('tracking: point %d =' % i, marker_2d[i])
            #ret = tracking(image, marker_2d[i])
            #marker_2d[i] = ret
            cv2.circle(image, ground_2d[i], 4, (255,0,0), -1)
        # these four points is human upper body plane
        for i in range(len(detect_results)):
            if not detect_results[i]:
                continue
            for j in range(4):
                #print("detect:", detect_results[i][j])
                cv2.circle(image, detect_results[i][j], 4, (255,0,0), -1)
        
        if len(ground_2d) == 4:
            _, rvecs, tvecs, inliers = cv2.solvePnPRansac(marker_3d[:4], np.array(ground_2d, dtype=np.float32).reshape(-1,1,2), mtx, dist)
            imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
            tmp = copy.deepcopy(image)
            draw(tmp, ground_2d[0], imgpts)
            cv2.imshow('tmp', tmp)
            rotM,_ = cv2.Rodrigues(rvecs)
            r11 = rotM[0][0]
            r12 = rotM[0][1]
            r13 = rotM[0][2]
            r21 = rotM[1][0]
            r22 = rotM[1][1]
            r23 = rotM[1][2]
            r31 = rotM[2][0]
            r32 = rotM[2][1]
            r33 = rotM[2][2]
            thetaz = math.atan2(r21, r11) / math.pi * 180
            thetay = math.atan2(-1 * r31, math.sqrt(r32*r32 + r33*r33)) / math.pi * 180
            thetax = math.atan2(r32, r33) / math.pi * 180
            pc = tvecs.reshape(3,)
            # 获取相机在世界坐标系中的坐标
            cw = get_camera_origin_in_world(thetax, thetay, thetaz, pc)
            #print("camera pos in world axis:", cw)
            
            upper_body_in_world = []
            for i in range(len(detect_results)):
                marker_2d = detect_results[i]
                if not marker_2d:
                    continue
                _, rvecs, tvecs, inliers = cv2.solvePnPRansac(marker_3d[4:], np.array(marker_2d, dtype=np.float32).reshape(-1,1,2), mtx, dist)
                p0c = tvecs.reshape(3,)
                # 获取人体原点在世界坐标系中的坐标
                p0w = get_person_origin_in_world(thetax, thetay, thetaz, p0c, cw)
                #print("person pos in world axis:", p0w)
                rotM,_ = cv2.Rodrigues(rvecs)
                r11 = rotM[0][0]
                r12 = rotM[0][1]
                r13 = rotM[0][2]
                r21 = rotM[1][0]
                r22 = rotM[1][1]
                r23 = rotM[1][2]
                r31 = rotM[2][0]
                r32 = rotM[2][1]
                r33 = rotM[2][2]
                thetazp = math.atan2(r21, r11) / math.pi * 180
                thetayp = math.atan2(-1 * r31, math.sqrt(r32*r32 + r33*r33)) / math.pi * 180
                thetaxp = math.atan2(r32, r33) / math.pi * 180
                upper_body_in_world.append([])
                upper_body_in_world[i].append(p0w)
                # 在人体坐标系中的所有点(除去原点)先顺向旋转thetaxp,再顺向旋转thetax
                for p in marker_3d[5:]:
                    tmp = copy.deepcopy(p).reshape(3,)
                    #print("before rotation:", tmp)
                    tmp[1], tmp[2] = rotateByX(tmp[1], tmp[2], thetaxp)
                    tmp[0], tmp[2] = rotateByY(tmp[0], tmp[2], thetayp)
                    tmp[0], tmp[1] = rotateByZ(tmp[0], tmp[1], thetazp)
                    tmp[0], tmp[1] = rotateByZ(tmp[0], tmp[1], -thetaz)
                    tmp[0], tmp[2] = rotateByY(tmp[0], tmp[2], -thetay)
                    tmp[1], tmp[2] = rotateByX(tmp[1], tmp[2], -thetax)
                    #print("after rotation:", tmp)
                    piw = p0w + tmp
                    upper_body_in_world[i].append(piw)
            for i in range(len(upper_body_in_world)):
                for p in upper_body_in_world[i]:
                    ax.scatter([p[0]], [p[1]], [p[2]], c="red")
        
            # plot camera
            plot_camera(ax, cw, ground_points=marker_3d[:4].squeeze(1))
            for i in range(len(upper_body_in_world)):
                # plot normal vector
                _, human_norm_vec = get_plane(upper_body_in_world[i][0], 
                                              upper_body_in_world[i][1],
                                              upper_body_in_world[i][2])
                tmp = 90 - angle_between_vectors(human_norm_vec, np.array([0,0,1]))
                print("person %d angle=%d" % (i, tmp))
                plot_arrow(ax, upper_body_in_world[i][0], human_norm_vec)
                # plot person plane
                #plot_person_plane(ax, upper_body_in_world[i][0], upper_body_in_world[i][1], upper_body_in_world[i][2])
            
        cv2.imshow('image', image)
        key = cv2.waitKey(1)
        if key == ord('q'):
            break
    cv2.destroyAllWindows()