예제 #1
0
파일: test.py 프로젝트: zoom1539/CenterHMR
    def webcam_run_local(self, video_file_path=None):
        '''
        20.9 FPS of forward prop. on 1070Ti
        '''
        print('run on local')
        import keyboard
        from utils.demo_utils import OpenCVCapture, Open3d_visualizer, Image_Reader
        capture = OpenCVCapture(video_file_path)
        visualizer = Open3d_visualizer()

        # Warm-up
        for i in range(10):
            self.single_image_forward(np.zeros((512, 512, 3)).astype(np.uint8))
        counter = Time_counter(thresh=1)
        while True:
            start_time_perframe = time.time()
            frame = capture.read()
            if frame is None:
                continue

            counter.start()
            with torch.no_grad():
                outputs = self.single_image_forward(frame)
            counter.count()
            counter.fps()

            if outputs is not None and outputs['success_flag']:
                verts = outputs['verts'][0].cpu().numpy()
                verts = verts * 50 + np.array([0, 0, 100])
                break_flag = visualizer.run(verts, frame)
                if break_flag:
                    break
예제 #2
0
    def process_video(self, video_file_path=None):
        import keyboard
        from utils.demo_utils import OpenCVCapture, frames2video
        capture = OpenCVCapture(video_file_path)
        video_length = int(capture.cap.get(cv2.CAP_PROP_FRAME_COUNT))
        results, result_frames = {}, []
        for frame_id in range(video_length):
            print('Processing video {}/{}'.format(frame_id, video_length))
            frame = capture.read()
            with torch.no_grad():
                outputs = self.single_image_forward(frame)
            vis_dict = {'image_org': outputs['meta_data']['image_org'].cpu()}
            single_batch_results = self.reorganize_results(
                outputs,
                [os.path.basename(video_file_path) + '_' + str(frame_id)],
                outputs['reorganize_idx'].cpu().numpy())
            results.update(single_batch_results)
            vis_eval_results = self.visualizer.visulize_result_onorg(
                outputs['verts'],
                outputs['verts_camed'],
                vis_dict,
                reorganize_idx=outputs['reorganize_idx'].cpu().numpy())
            result_frames.append(vis_eval_results[0])

        if self.save_dict_results:
            print('Saving parameter results to {}'.format(
                video_file_path.replace('.mp4', '_results.npz')))
            np.savez(video_file_path.replace('.mp4', '_results.npz'),
                     results=results)

        video_save_name = video_file_path.replace('.mp4', '_results.mp4')
        print('Writing results to {}'.format(video_save_name))
        frames2video(result_frames, video_save_name, fps=args.fps_save)
예제 #3
0
 def image_put(self, q):
     self.capture = OpenCVCapture()
     time.sleep(3)
     while True:
         if q.qsize() > 2:
             q.get()
         q.put(self.capture.read())
예제 #4
0
파일: test.py 프로젝트: majunfu/ROMP
    def process_video(self, video_file_path=None):
        import keyboard
        from utils.demo_utils import OpenCVCapture, frames2video
        capture = OpenCVCapture(video_file_path)
        video_length = int(capture.cap.get(cv2.CAP_PROP_FRAME_COUNT))
        video_basename = get_video_bn(video_file_path)
        print('Processing {}, saving to {}'.format(video_file_path,
                                                   self.output_dir))
        os.makedirs(self.output_dir, exist_ok=True)
        if not os.path.isdir(self.output_dir):
            self.output_dir = video_file_path.replace(
                os.path.basename(video_file_path), '')

        results, result_frames = {}, []
        for frame_id in range(video_length):
            print('Processing video {}/{}'.format(frame_id, video_length))
            frame = capture.read()
            with torch.no_grad():
                outputs = self.single_image_forward(frame)
            vis_dict = {'image_org': outputs['meta_data']['image_org'].cpu()}
            img_paths = [str(frame_id) for _ in range(1)]
            single_batch_results = self.reorganize_results(
                outputs, img_paths, outputs['reorganize_idx'].cpu().numpy())
            results.update(single_batch_results)
            vis_eval_results = self.visualizer.visulize_result_onorg(
                outputs['verts'],
                outputs['verts_camed'],
                vis_dict,
                reorganize_idx=outputs['reorganize_idx'].cpu().numpy())
            result_frames.append(vis_eval_results[0])
            outputs['meta_data']['imgpath'] = img_paths
            if self.save_mesh:
                save_meshes(outputs['reorganize_idx'].cpu().numpy(), outputs,
                            self.output_dir, self.smpl_faces)

        if self.save_dict_results:
            save_dict_path = os.path.join(self.output_dir,
                                          video_basename + '_results.npz')
            print('Saving parameter results to {}'.format(save_dict_path))
            np.savez(save_dict_path, results=results)

        if self.save_video_results:
            video_save_name = os.path.join(self.output_dir,
                                           video_basename + '_results.mp4')
            print('Writing results to {}'.format(video_save_name))
            frames2video(result_frames, video_save_name, fps=args.fps_save)
예제 #5
0
 def process_video(self, video_file_path=None):
     
     import keyboard
     from utils.demo_utils import OpenCVCapture, frames2video
     capture = OpenCVCapture(video_file_path)
     video_length = int(capture.cap.get(cv2.CAP_PROP_FRAME_COUNT))
     result_frames = []
     for frame_id in range(video_length):
         print('Processing video {}/{}'.format(frame_id, video_length))
         frame = capture.read()
         with torch.no_grad():
             outputs = self.single_image_forward(frame[:,:,::-1])
         vis_dict = {'image_org': outputs['input_image'].cpu()}
         vis_eval_results = self.visualizer.visulize_result_onorg(outputs['verts'], outputs['verts_camed'], vis_dict, reorganize_idx=outputs['reorganize_idx'])
         result_frames.append(vis_eval_results[0])
     video_save_name = video_file_path.replace('.mp4', '_results.mp4')
     print('Writing results to {}'.format(video_save_name))
     frames2video(result_frames, video_save_name, fps=30)
예제 #6
0
파일: test.py 프로젝트: human2b/CenterHMR
    def webcam_run_local(self):
        import keyboard
        from utils.demo_utils import OpenCVCapture, Open3d_visualizer
        capture = OpenCVCapture()
        visualizer = Open3d_visualizer()

        while True:
            frame = capture.read()
            if frame is None:
                continue
            with torch.no_grad():
                outputs = self.single_image_forward(frame)
            if outputs is not None:
                verts = outputs['verts'][0].cpu().numpy()
                verts = verts * 50 + np.array([0, 0, 100])
                break_flag = visualizer.run(verts, frame)
                if break_flag:
                    break
예제 #7
0
파일: test.py 프로젝트: majunfu/ROMP
    def webcam_run_local(self, video_file_path=None):
        '''
        20.9 FPS of forward prop. on 1070Ti
        '''
        print('run on local')
        import keyboard
        from utils.demo_utils import OpenCVCapture, Image_Reader
        if 'tex' in args.webcam_mesh_color:
            from utils.demo_utils import vedo_visualizer as Visualizer
        else:
            from utils.demo_utils import Open3d_visualizer as Visualizer
        capture = OpenCVCapture(video_file_path)
        visualizer = Visualizer()
        print('Initialization is down')

        # Warm-up
        for i in range(10):
            self.single_image_forward(np.zeros((512, 512, 3)).astype(np.uint8))
        counter = Time_counter(thresh=1)
        while True:
            start_time_perframe = time.time()
            frame = capture.read()
            if frame is None:
                continue

            counter.start()
            with torch.no_grad():
                outputs = self.single_image_forward(frame)
            counter.count()
            counter.fps()

            if outputs is not None and outputs['detection_flag']:
                if args.show_single:
                    verts = outputs['verts'].cpu().numpy()
                    verts = verts * 50 + np.array([0, 0, 100])
                    break_flag = visualizer.run(verts[0], frame)
                else:
                    verts = outputs['verts_camed'].cpu().numpy()
                    verts = verts * 50 + np.array([0, 0, 100])
                    break_flag = visualizer.run_multiperson(verts, frame)
                if break_flag:
                    break
예제 #8
0
class Multiprocess(Base):
    def __init__(self):
        self.run_single_camera()

    def set_up_model_pool(self):
        self.model_pool = []
        for i in range(self.model_number):
            self.model_pool.append(Base())

    def single_image_forward(self, image):
        image_size = image.shape[:2][::-1]
        image_org = Image.fromarray(image)

        resized_image_size = (float(self.input_size) / max(image_size) *
                              np.array(image_size) // 2 * 2).astype(
                                  np.int)[::-1]
        padding = tuple((self.input_size - resized_image_size)[::-1] // 2)
        transform = torchvision.transforms.Compose([
            torchvision.transforms.Resize(resized_image_size, interpolation=3),
            torchvision.transforms.Pad(padding,
                                       fill=0,
                                       padding_mode='constant'),
        ])
        image = torch.from_numpy(np.array(
            transform(image_org))).unsqueeze(0).cuda().contiguous().float()
        outputs, centermaps, heatmap_AEs, _, reorganize_idx = self.net_forward(
            None, self.generator, image, mode='test')
        return outputs

    def image_put(self, q):
        self.capture = OpenCVCapture()
        time.sleep(3)
        while True:
            if q.qsize() > 2:
                q.get()
            q.put(self.capture.read())

    def image_get(self, q, q_vis):
        super(Multiprocess, self).__init__()
        self.set_up_smplx()
        self._build_model()
        self.generator.eval()
        for i in range(10):
            self.single_image_forward(np.zeros((512, 512, 3)).astype(np.uint8))
        while True:
            try:
                frame = q.get()
                with torch.no_grad():
                    outputs = self.single_image_forward(frame)
                q_vis.put((frame, outputs))
            except Exception as error:
                print(error)
                self.endprocess()

    def show_results(self, q):
        '''
        17.5 FPS of entire process on 1080
        '''
        self.visualizer = Open3d_visualizer()
        self.counter = Time_counter(thresh=0.1)
        time.sleep(4)
        start_flag = 1
        while True:
            try:
                if start_flag:
                    self.counter.start()
                frame, outputs = q.get()
                start_flag = 0
                break_flag = self.visualize(frame, outputs)
                self.counter.count()
                self.counter.fps()
                if break_flag:
                    self.endprocess()
            except Exception as error:
                print(error)
                #self.endprocess()

    def visualize(self, frame, outputs):
        verts = outputs['verts'][0].cpu().numpy()
        verts = verts * 50 + np.array([0, 0, 100])
        break_flag = self.visualizer.run(verts, frame)
        return break_flag

    def run_single_camera(self):
        queue = mp.Queue(maxsize=3)
        queue_vis = mp.Queue(maxsize=3)
        self.processes = [
            mp.Process(target=self.image_put, args=(queue, )),
            mp.Process(target=self.image_get, args=(
                queue,
                queue_vis,
            )),
            mp.Process(target=self.show_results, args=(queue_vis, ))
        ]

        [process.start() for process in self.processes]
        [process.join() for process in self.processes]

    def endprocess(self):
        [process.terminate() for process in self.processes]
        [process.join() for process in self.processes]