Пример #1
0
    def get_shoulder_angles(self):

        # Find versors
        if self.joint_points3D is None or len(self.joint_points3D) == 0:
            return None, None

        p7 = self.joint_points3D[PyKinectV2.JointType_ShoulderRight]
        p4 = self.joint_points3D[PyKinectV2.JointType_ShoulderLeft]
        p13 = self.joint_points3D[PyKinectV2.JointType_SpineBase]

        # N1 versor
        normaN1 = math.sqrt((p7.x - p4.x)**2 + (p7.y - p4.y)**2 +
                            (p7.z - p4.z)**2)
        N1 = PyKinectV2._Joint()
        N1.x = ((p7.x - p4.x) / normaN1)
        N1.y = ((p7.y - p4.y) / normaN1)
        N1.z = ((p7.z - p4.z) / normaN1)

        # Tmp versor
        normaU = math.sqrt((p7.x - p13.x)**2 + (p7.y - p13.y)**2 +
                           (p7.z - p13.z)**2)
        U = PyKinectV2._Joint()
        U.x = ((p7.x - p13.x) / normaU)
        U.y = ((p7.y - p13.y) / normaU)
        U.z = ((p7.z - p13.z) / normaU)

        # N3 versor
        N3 = PyKinectV2._Joint()
        N3.x = N1.y * U.z - N1.z * U.y
        N3.y = N1.z * U.x - N1.x * U.z
        N3.z = N1.x * U.y - N1.y * U.x

        normaN3 = math.sqrt(N3.x**2 + N3.y**2 + N3.z**2)
        N3.x = N3.x / normaN3
        N3.y = N3.y / normaN3
        N3.z = N3.z / normaN3

        # N2 versor
        N2 = PyKinectV2._Joint()
        N2.x = N3.y * N1.z - N3.z * N1.y
        N2.y = N3.z * N1.x - N3.x * N1.z
        N2.z = N3.x * N1.y - N3.y * N1.x

        matrR = np.array([[N1.x, N1.y, N1.z], [N2.x, N2.y, N2.z],
                          [N3.x, N3.y, N3.z]])
        euler = self.rotationMatrixToEulerAngles(matrR)
        shoulder_pitch = euler[0] * 180 / np.pi
        shoulder_yaw = euler[1] * 180 / np.pi
        shoulder_roll = euler[2] * 180 / np.pi

        matrR = np.array([[N1.x, N1.y, N1.z, 0], [N2.x, N2.y, N2.z, 0],
                          [N3.x, N3.y, N3.z, 0], [0, 0, 0, 1]])
        quat = self.rotationMatrixToQuaternion(matrR)

        return {
            "roll": shoulder_roll,
            "pitch": shoulder_pitch,
            "yaw": shoulder_yaw
        }, quat
Пример #2
0
 def read_raw_data(self, filename):
     fp = open("data/" + filename + ".csv", "r")
     self._frame_count = int(fp.readline())
     self._sample_count = int(fp.readline())
     self._gait_index = int(fp.readline())
     self._Bdata = np.zeros(self._frame_count, dtype=list)
     self._Fdata = np.zeros((self._frame_count, 4))
     self._time = np.zeros(self._frame_count)
     for i in range(self._frame_count):
         data = fp.readline().split(',')
         t = [PyKinectV2._Joint() for _ in range(PyKinectV2.JointType_Count)]
         for j in range(0, PyKinectV2.JointType_Count * 5, 5):
             t[j / 5].Position.x = float(data[j + 0])
             t[j / 5].Position.y = float(data[j + 1])
             t[j / 5].Position.z = float(data[j + 2])
             t[j / 5].TrackingState = int(data[j + 3])
             t[j / 5].JointType = int(data[j + 4])
         self._Bdata[i] = t
         data = fp.readline().split(',')
         u = [0] * 4
         u[0] = float(data[0])
         u[1] = float(data[1])
         u[2] = float(data[2])
         u[3] = float(data[3])
         self._Fdata[i] = u
         data = fp.readline()
         self._time[i] = float(data)
     fp.close()
Пример #3
0
 def getMakerXYZ(self, markercolumnid, markerrowid, markerdepth):
     if markerdepth > 0:
         point3d = self.kinect.mapper().MapDepthPointToCameraSpace(
             PyKinectV2._DepthSpacePoint(ctypes.c_float(markercolumnid), ctypes.c_float(markerrowid)),
             ctypes.c_ushort(markerdepth))
         return [point3d.x, point3d.y, point3d.z]
     else:
         return []
Пример #4
0
 def getMakerXYZ(self, markercolumnid, markerrowid, markerdepth):
     if markerdepth > 0:
         point3d = self.kinect.mapper().MapDepthPointToCameraSpace(
             PyKinectV2._DepthSpacePoint(ctypes.c_float(markercolumnid),
                                         ctypes.c_float(markerrowid)),
             ctypes.c_ushort(markerdepth))
         return [point3d.x, point3d.y, point3d.z]
     else:
         return []
Пример #5
0
def translate_body(x, y, z, joints):
    res = [PyKinectV2._Joint() for i in range(PyKinectV2.JointType_Count)]
    for i in range(PyKinectV2.JointType_Count):
        res[i].Position.x = joints[i].Position.x + x
        res[i].Position.y = joints[i].Position.y + y
        res[i].Position.z = joints[i].Position.z + z
        res[i].TrackingState = joints[i].TrackingState
        res[i].JointType = joints[i].JointType
    return res
Пример #6
0
    def create_CoordinateMap(self):
        """ Function to create a point to point map of the spatial/pixel equivalences between the depth space, color space and
        camera space. This method requires the depth frame to assign a depth value to the color point.
        Returns:
            CoordinateMap: DataFrame with the x,y,z values of the depth frame; x,y equivalence between the depth space to camera space and
            real world values of x,y and z in meters
        """
        height, width = self.calib.s_height, self.calib.s_width
        x = numpy.arange(0, width)
        y = numpy.arange(0, height)
        xx, yy = numpy.meshgrid(x, y)
        xy_points = numpy.vstack([xx.ravel(), yy.ravel()]).T
        depth = self.kinect.get_frame()
        depth_x = []
        depth_y = []
        depth_z = []
        camera_x = []
        camera_y = []
        camera_z = []
        color_x = []
        color_y = []
        for i in range(len(xy_points)):
            x_point = xy_points[i, 0]
            y_point = xy_points[i, 1]
            z_point = depth[y_point][x_point]
            if z_point != 0:  # values that do not have depth information cannot be projected to the color space
                point = PyKinectV2._DepthSpacePoint(x_point, y_point)
                col = self.kinect.device._mapper.MapDepthPointToColorSpace(
                    point, z_point)
                cam = self.kinect.device._mapper.MapDepthPointToCameraSpace(
                    point, z_point)
                # since the position of the camera and sensor are different, they will not have the same coverage. Specially in the extremes
                if col.y > 0:
                    depth_x.append(x_point)
                    depth_y.append(y_point)
                    depth_z.append(z_point)
                    camera_x.append(cam.x)
                    camera_y.append(cam.y)
                    camera_z.append(cam.z)
                    color_x.append(
                        int(col.x) + self._correction_x
                    )  ####TODO: constants addded since image is not exact when doing the transformation
                    color_y.append(int(col.y) - self._correction_y)

        self.CoordinateMap = pd.DataFrame({
            'Depth_x': depth_x,
            'Depth_y': depth_y,
            'Depth_Z(mm)': depth_z,
            'Color_x': color_x,
            'Color_y': color_y,
            'Camera_x(m)': camera_x,
            'Camera_y(m)': camera_y,
            'Camera_z(m)': camera_z
        })

        return self.CoordinateMap
Пример #7
0
 def acquireCameraSpace(self, depthpoints, depths):
     n = len(depthpoints)
     points_cam = np.array([])
     assert n == len(depths)
     for i in range(n):
         depthpoint = PyKinectV2._DepthSpacePoint()
         depthpoint.x = depthpoints[i][0]
         depthpoint.y = depthpoints[i][1]
         space_point = self._kinect.depth_to_camera(depthpoint, depths[i])
         points_cam = np.append(
             points_cam,
             np.array([space_point.x, space_point.y, space_point.z]))
     return points_cam
Пример #8
0
def rotate_body(R, joints):
    res = [PyKinectV2._Joint() for i in range(PyKinectV2.JointType_Count)]
    for i in range(PyKinectV2.JointType_Count):
        t = np.dot(
            R,
            np.array([
                joints[i].Position.x, joints[i].Position.y,
                joints[i].Position.z
            ]))
        res[i].Position.x = t[0]
        res[i].Position.y = t[1]
        res[i].Position.z = t[2]
        res[i].TrackingState = joints[i].TrackingState
        res[i].JointType = joints[i].JointType
    return res
Пример #9
0
    def getPointCloud(self):
        while True:
            if self.kinect.has_new_depth_frame():
                dframe = self.kinect.get_last_depth_frame()
                dwidth = self.kinect.depth_frame_desc.Width
                dheight = self.kinect.depth_frame_desc.Height

                pcd = []
                for i in range(dheight):
                    for j in range(dwidth):
                        depth = dframe[i * dwidth + j]
                        if depth > 0:
                            point3d = self.kinect.mapper().MapDepthPointToCameraSpace(
                                PyKinectV2._DepthSpacePoint(ctypes.c_float(j), ctypes.c_float(i)), ctypes.c_ushort(depth))
                            # pcd.append((point3d.x*1000.0, point3d.y*1000.0, point3d.z*1000.0))
                            pcd.append((point3d.x, point3d.y, point3d.z))
                break

        return pcd
Пример #10
0
    def __getXYZForPixel(self, dframe, px):
        """
        Convert a depth frame pixel to a 3D point

        :param dframe: depthframe from pykinectv2
        :param px: 1-by-2 list
        :return: 1-by-3 list [x, y, z] or None

        author: weibo
        date: 20180110
        """

        u, v = int(px[0]), int(px[1])
        z3d = dframe[v * self.__depth_width + u]

        if z3d <= 0:
            return None

        point3d = self._mapper.MapDepthPointToCameraSpace(
            PyKinectV2._DepthSpacePoint(ctypes.c_float(u), ctypes.c_float(v)), ctypes.c_ushort(z3d))

        return [point3d.x * 1000.0, point3d.y * 1000.0, point3d.z * 1000.0]
Пример #11
0
    def getPointCloud(self):
        while True:
            if self.kinect.has_new_depth_frame():
                dframe = self.kinect.get_last_depth_frame()
                dwidth = self.kinect.depth_frame_desc.Width
                dheight = self.kinect.depth_frame_desc.Height

                pcd = []
                for i in range(dheight):
                    for j in range(dwidth):
                        depth = dframe[i * dwidth + j]
                        if depth > 0:
                            point3d = self.kinect.mapper(
                            ).MapDepthPointToCameraSpace(
                                PyKinectV2._DepthSpacePoint(
                                    ctypes.c_float(j), ctypes.c_float(i)),
                                ctypes.c_ushort(depth))
                            # pcd.append((point3d.x*1000.0, point3d.y*1000.0, point3d.z*1000.0))
                            pcd.append((point3d.x, point3d.y, point3d.z))
                break

        return pcd
            cont = sorted(cont, key=cv2.contourArea, reverse=True)
            # loop over the contours
            for c in cont:
                # approximate the contour
                per = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.1 * per, True)
                # check number of points of contour
                if len(approx) == 4:
                    cv2.drawContours(depthImageRGB, [approx], -1, (0, 255, 0),
                                     2)
                    mom = cv2.moments(approx)
                    if mom['m00'] != 0:
                        x = int(mom['m10'] / mom['m00'])
                        y = int(mom['m01'] / mom['m00'])
                    cv2.circle(depthImageRGB, (x, y), 2, (0, 255, 0), -1)
                    depthPoint = PyKinectV2._DepthSpacePoint(x, y)
                    cameraPoint = kinect._mapper.MapDepthPointToCameraSpace(
                        depthPoint, depthImageData[y, x])
                    print("X: ", cameraPoint.x, " Y: ", cameraPoint.y, " Z: ",
                          cameraPoint.z)
                    break

        # Draw frame using opencv
        cv2.imshow('Depth image', depthImageRGB)
        key = cv2.waitKey(1)
        if key == 27:
            break

        # Draw frame using opencv
        cv2.imshow('Filtered depth image', filterdDepthImage)
        key = cv2.waitKey(1)
Пример #13
0
 def myMapDepthPointToCameraSpace(self,kinect,pt_depth,depth_value):
     pt_d=PyKinectV2._DepthSpacePoint(pt_depth[0],pt_depth[1])
     depth_v=ctypes.c_ushort(depth_value)
     camera_point=kinect._mapper.MapDepthPointToCameraSpace(pt_d,depth_v)
     return camera_point