Exemplo n.º 1
0
    def __init__(self, invert_camera=False):
        self.win_depth = "depth"
        self.win_left = "left_rectified"
        self.win_right = "right_rectified"
        self.win_disparity = "disparity"
        self.win_colormap = "colormap"
        cv2.namedWindow(self.win_depth)
        cv2.createTrackbar("num", self.win_depth, 2, 10, lambda x: None)
        cv2.createTrackbar("blockSize", self.win_depth, 5, 255, lambda x: None)
        self.ecv = image_processing.EventCv()
        self.ecv.add_mouse_event(self.win_depth)
        self.ecv.add_mouse_event(self.win_disparity)
        self.ecv.add_mouse_event(self.win_colormap)

        camera_height = camera_configs.camera_height
        camera_width = camera_configs.camera_width
        input_size = [camera_height, camera_width]
        # self.pose = openpose.OpenPose(model_path, input_size=input_size)
        if invert_camera:
            self.camera1 = cv2.VideoCapture(1)  # left camera1
            self.camera2 = cv2.VideoCapture(0)  # right camera2
        else:
            self.camera1 = cv2.VideoCapture(0)  # left camera1
            self.camera2 = cv2.VideoCapture(1)  # right camera2

        self.detector = UltraLightFaceDetector(model_path=None, network=None, )
        self.g = open3d_visual.Open3DVisual(camera_intrinsic=camera_configs.camera_intrinsic,
                                            depth_width=camera_configs.depth_width,
                                            depth_height=camera_configs.camera_height,
                                            depth_scale=camera_configs.depth_scale,
                                            clipping_distance_in_meters=camera_configs.clipping_distance_in_meters)
        self.g.show_origin_pcd()
        self.g.show_bone_line_pcd()
        self.g.show_image_pcd(True)

        class_name = "ttt"
        real_part = True
        # real_part = False
        dataset_root = "dataset/"
        self.scale = 1
        self.prefix = "v"
        if real_part:
            self.snapshot_dir = os.path.join(dataset_root, "real_part", class_name)
        else:
            self.snapshot_dir = os.path.join(dataset_root, "fake_part", class_name)
        file_processing.create_dir(self.snapshot_dir, "color")
        file_processing.create_dir(self.snapshot_dir, "depth")
        # file_processing.create_dir(self.snapshot_dir, "ir")
        file_processing.create_dir(self.snapshot_dir, "video")
        video_name = file_processing.get_time()
        self.save_video = os.path.join(self.snapshot_dir, "video", "{}_{}.avi".format(class_name, video_name))
        if self.save_video:
            self.video_writer = self.get_video_writer(self.save_video,
                                                      width=640,
                                                      height=480,
                                                      fps=20)  # (424, 512, 4)
    def __init__(self, calibration_file, width=640, height=480):
        """
        :param calibration_file:
        :param width:
        :param height:
        """
        self.config = camera_params.get_stereo_coefficients(
            calibration_file, width, height)
        self.pcd = open3d_visual.Open3DVisual(
            camera_intrinsic=self.config["K1"],
            depth_width=width,
            depth_height=height)

        self.detector = UltraLightFaceDetector(
            model_path=None,
            network=None,
        )

        class_name = "ttt"
        real_part = True
        # real_part = False
        save_root = "dataset/"
        self.scale = 1
        self.prefix = "v"
        if real_part:
            self.save_dir = os.path.join(save_root, "real_part", class_name)
        else:
            self.save_dir = os.path.join(save_root, "fake_part", class_name)
        file_processing.create_dir(self.save_dir, "color")
        file_processing.create_dir(self.save_dir, "depth")
        file_processing.create_dir(self.save_dir, "ir")
        file_processing.create_dir(self.save_dir, "video")
        video_name = file_processing.get_time()
        self.save_l_video = os.path.join(
            self.save_dir, "video",
            "left_{}_{}.avi".format(class_name, video_name))
        self.save_r_video = os.path.join(
            self.save_dir, "video",
            "right_{}_{}.avi".format(class_name, video_name))
        if self.save_l_video:
            self.video_l_writer = self.get_video_writer(self.save_l_video,
                                                        width=width,
                                                        height=height,
                                                        fps=30)
        if self.save_r_video:
            self.video_r_writer = self.get_video_writer(self.save_r_video,
                                                        width=width,
                                                        height=height,
                                                        fps=30)

        self.pcd.show_image_pcd(True)
        self.pcd.show_origin_pcd(True)
        self.pcd.show_image_pcd(True)
Exemplo n.º 3
0
    def __init__(self):
        # Kinect runtime object
        self.joint_count = PyKinectV2.JointType_Count  # 25
        self.kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Body
            | PyKinectV2.FrameSourceTypes_Color
            | PyKinectV2.FrameSourceTypes_Infrared
            | PyKinectV2.FrameSourceTypes_Depth)
        self.depth_width, self.depth_height = self.kinect.depth_frame_desc.Width, self.kinect.depth_frame_desc.Height
        self.color_width, self.color_height = self.kinect.color_frame_desc.Width, self.kinect.color_frame_desc.Height
        self.ir_width, self.ir_height = self.kinect.infrared_frame_desc.Width, self.kinect.infrared_frame_desc.Height
        self.g = geometry_3d_pose.Geometry3DPose(kinect_config)

        self.detector = UltraLightFaceDetector(
            model_path=None,
            network=None,
        )
        class_name = "0"
        # real_part = True
        real_part = False
        dataset_root = "dataset/"
        self.scale = 2
        self.prefix = "v1"
        if real_part:
            self.snapshot_dir = os.path.join(dataset_root, "real_part",
                                             class_name)
        else:
            self.snapshot_dir = os.path.join(dataset_root, "fake_part",
                                             class_name)
        file_processing.create_dir(self.snapshot_dir, "color")
        file_processing.create_dir(self.snapshot_dir, "depth")
        file_processing.create_dir(self.snapshot_dir, "ir")
        file_processing.create_dir(self.snapshot_dir, "video")
        video_name = file_processing.get_time()
        self.save_video = os.path.join(
            self.snapshot_dir, "video",
            "{}_{}.avi".format(class_name, video_name))
        if self.save_video:
            self.video_writer = self.get_video_writer(
                self.save_video,
                width=self.depth_width * self.scale,
                height=self.depth_height * self.scale,
                fps=20)  # (424, 512, 4)
        :return:
        """
        video_cap = image_processing.get_video_capture(video_path)
        width, height, numFrames, fps = image_processing.get_video_info(
            video_cap)
        # freq = int(fps / detect_freq)
        count = 0
        while True:
            isSuccess, frame = video_cap.read()
            if not isSuccess:
                break
            if count % freq == 0:
                out_frame = self.do_something(frame)
                path = os.path.join(save_dir, "{:0=6d}.jpg".format(count))
                cv2.imwrite(path, out_frame)
            count += 1
        video_cap.release()

    def do_something(self, frame):
        pass
        return frame


if __name__ == "__main__":
    time = file_processing.get_time()
    cvv = CVVideo()
    image_dir = "/media/dm/dm/FaceRecognition/face_cpp/outputs"
    save_video = "/media/dm/dm/FaceRecognition/face_cpp/demo.avi"
    cvv.convert_images2video(image_dir, save_video)
    # cvv.convert_video2images(save_video, image_dir)