def _process(self, videofile): ''' videofile:要处理的视频文件 这里对类型进行检测 把 目标类型,时间点,坐标,以及附属关系 输出到相应的 目录一个json文件 ''' print('开始任务1:%s' % videofile) stream = None writer = None final = self._defaultReturn(getId(videofile)) try: stream, videoinfo = readVideo(videofile) outputfile = os.path.join(self.outputpath, os.path.basename(videofile)) writer = videoWriter(outputfile, scale=(videoinfo['width'], videoinfo['height']), fps=30 // self.skip) final['job2_video'] = outputfile frames = videoinfo['frames'] queue, ts = [], [] for t in tqdm(range(0, frames, self.skip)): stream.set(1, t) retval, frame = stream.read() if not retval: break queue.append(frame) ts.append(t) if len(queue) == self.batchsize: self._handle(ts, queue, final, writer) queue = [] ts = [] if len(queue) > 0: self._handle(ts, queue, final, writer) print('任务1:%s完成' % videofile) except Exception as e: final['status'] = 'fail' final['error'] = 'At job1,' + str(e) if self.config['debug']: raise e finally: if stream is not None: stream.release() if writer is not None: writer.release() self._after_precessing(final, videofile)
def _process(self, filename): ''' videofile:要处理的视频文件 这里对类型进行检测 把 目标类型,时间点,坐标,以及附属关系 输出到相应的 目录一个json文件 ''' id = getId(filename) print('开始任务2:%s' % filename) stream = None final = self._defaultReturn(filename) try: if final['status'] == 'success': videofile = final['job2_video'] stream, videoinfo = readVideo(videofile) frames = videoinfo['frames'] for t in tqdm(range(frames)): retval, frame = stream.read() if not retval: break frame = frame[:, :, ::-1] faceboxes, landmarks = self.det.detect_faces( Image.fromarray(frame), self.config['mtcnn']['min_face_size'], self.config['mtcnn']['thresholds']) objs_at_t = final['track']['objs'][t] self._insertFace(objs_at_t, faceboxes, landmarks) self._after_precessing(final, videofile, filename) print('任务2:%s完成' % videofile) except Exception as e: final['status'] = 'fail' final['error'] = 'At job2,' + str(e) if self.config['debug']: raise e finally: if stream is not None: stream.release()
def _process(self, filename): print('开始任务3:%s' % filename) stream = None final = self._defaultReturn(filename) queue = [] try: if final['status'] == 'success': videofile = final['job3_video'] stream, videoinfo = readVideo(videofile) frames = videoinfo['frames'] for t in tqdm(range(frames)): retval, frame = stream.read() if not retval: break objs_at_t = final['track']['objs'][t] for obj in objs_at_t: if 'face_box' in obj: queue.append(self._get_input(frame, obj)) if len(queue) == self.batchsize: self._handle(queue) del queue queue = [] if len(queue) > 0: self._handle(queue) self._after_precessing(final, videofile, filename) print('任务3:%s完成' % videofile) except Exception as e: final['status'] = 'fail' final['error'] = 'At job3,' + str(e) if self.config['debug']: raise e finally: if stream is not None: stream.release() print('任务3:%s完成' % filename)
for i in range(cnt): lines = fs.readline().strip().split(' ') x1, y1 = int(lines[0]), int(lines[1]) x2, y2 = x1 + int(lines[2]), y1 + int(lines[3]) plot_one_box([x1, y1, x2, y2], I, None) return I if __name__ == '__main__': imgpath = '/home/zxk/AI/faceswap-GAN/videoplayback.mp4' # imgpath = '/home/zxk/AI/faceswap-GAN/videoplayback1.webm' out = '/home/zxk/AI/faceswap-GAN/1.avi' cap, info = readVideo(imgpath) writer = videoWriter(out, videoformat='XVID', scale=(info['width'], info['height']), fps=30) import tqdm start = 2600 # start=5400 end = 4440 for i in tqdm.tqdm(range(start, start + 25 * 30)): cap.set(1, i) _, frame = cap.read() # print(frame.shape) # print(frame) writer.write(frame)
stopThreads() def stopThreads(): global STOP_CLASSIFIER_THREAD STOP_CLASSIFIER_THREAD = True def main(video, classifier): global STOP_CLASSIFIER_THREAD STOP_CLASSIFIER_THREAD = st.checkbox("Stop recognition", value=False, key=None) # STOP_CLASSIFIER_THREAD = False try: classifierThread = threading.Thread(target=classifyWithYOLO, args=(classifier, )) classifierThread.start() runVideo(video) except Exception as e: # just in case to stop threads print(traceback.format_exc()) stopThreads() if __name__ == "__main__": video = readVideo('../videos/droga.mp4') main(video, None)
if __name__ == '__main__': device = torch.device('cpu') # detector = CCPD_YOLO_Detector(device=device) mtcnn = MTCNN( image_size=160, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7], factor=0.709, prewhiten=True, select_largest=True, #True,boxes按照面积的大小降序排列 keep_all=True, device=device) cap, videoinfo = readVideo('data/laofoye_high.avi') print(videoinfo) cap_writer = videoWriter('hello.avi', scale=(videoinfo['width'], videoinfo['height']), fps=3) cnt = 0 rate = 10 buf = [] pos = 0 while True: retval, frame = cap.read() if not retval: break pos += 1 if pos % rate == 0: