class DETECT_Thread(QThread): def __init__(self): super().__init__() self.detector = FaceDet(register=True) self.running = True def run(self): global im, text while True: if not self.running: return if not im is None: if not text is None: self.detector.regist(im,text) text=None
class DETECT_Thread(QThread): def __init__(self): super().__init__() self.task = None self.detector = FaceDet() self.running = True def run(self): global im, result while True: if not self.running: return if not im is None: if self.task == 'rec': result = self.detector.detect_and_recognition(im) elif self.task == 'det': result = self.detector.detect_only(im) else: result = None sleep(0.2)
def __init__(self): super().__init__() self.detector = FaceDet(register=True) self.running = True
import numpy as np import argparse import sys import cv2 import os from func.facenet import FaceDet if __name__ == '__main__': cap = cv2.VideoCapture('./src.flv') det = FaceDet() video_width = int(cap.get(3)) video_height = int(cap.get(4)) fps = int(cap.get(5)) # fps = 15 print(fps) fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') #opencv3.0 videoWriter = cv2.VideoWriter('dst.mp4', fourcc, fps, (533, 300)) while True: _, frame = cap.read() if frame is None: break # frame = det.detect_and_recognition(frame) frame = det.detect_and_recognition(frame) cv2.imshow('a', frame) videoWriter.write(frame) if cv2.waitKey(10) & 0xFF == ord('q'):
def __init__(self): super().__init__() self.task = None self.detector = FaceDet() self.running = True