Exemplo n.º 1
0
    def __init__(self):
        super(AUFR, self).__init__()
        self.setupUi(self)
        try:
            self.setting = Setting(self)
            conf = self.setting.get_config()
            self.threshold = conf.threshold
            self.model_name = 'mobile face' if conf.net_mode is None else conf.net_mode
            self.net_mode = conf.net_mode
            self.use_mtcnn = True if conf.use_mtcnn else False
            self.camera_id = conf.video_source
            self.face_recognize = face_recognize(conf)
        except:
            pass
            conf = get_config(net_mode='ir_se', use_mtcnn=True, threshold=1.25)
            self.threshold = conf.threshold
            self.model_name = 'mobile face' if conf.net_mode is None else conf.net_mode
            self.net_mode = conf.net_mode
            self.use_mtcnn = True if conf.use_mtcnn else False
            self.camera_id = conf.video_source
            self.face_recognize = face_recognize(conf)

        self.targets, self.names = self.face_recognize.get_facebank()
        self.has_targ = len(self.targets) > 0
        # Variables
        self.camera_id = 'video.mp4'  # can also be a url of Video
        self.dataset_per_subject = 50
        self.ret = False
        self.trained_model = 0
        self.image = cv2.imread("icon/default.jpg", 1)
        self.modified_image = self.image.copy()
        self.reload()
        self.display()
        # Actions
        self.generate_dataset_btn.setCheckable(True)
        # self.train_model_btn.setCheckable(True)
        self.recognize_face_btn.setCheckable(True)
        # Menu
        self.about_menu = self.menu_bar.addAction("About")
        self.help_menu = self.menu_bar.addAction("Help")
        self.about_menu.triggered.connect(self.about_info)
        self.help_menu.triggered.connect(self.help_info)

        # Algorithms
        self.generate_dataset_btn.clicked.connect(self.pressedGendataButton)

        self.recognize_face_btn.clicked.connect(self.recognize)

        self.video_recording_btn.clicked.connect(self.save_video)

        self.adv_setting.clicked.connect(self.pressedSettingsButton)

        self.capture_btn.clicked.connect(self.captureDialog_show)

        if not os.path.exists(FACE_BANK):
            os.mkdir(FACE_BANK)

        self.createMenus()
Exemplo n.º 2
0
 def __init__(self, width=None, height=None):
     # initialize the video camera stream and read the first frame
     # from the stream
     self.width = width
     self.height = height
     self.image_hub = imagezmq.ImageHub()
     self.frame_ori = {}
     self.last_active = {}
     self.active_check_secon = 30
     self.stop_process = {}
     self.frame_process = {}
     self.lst_thread = []
     self.face_reg = face_recognize(conf)
     self.targets, self.names = self.face_reg.load_facebank()
Exemplo n.º 3
0
def process_two_image(data):
    folder_name = str(uuid.uuid1())
    command = 'mkdir %s' % folder_name
    subprocess.call(command, shell=True)
    image_path_origin = download_file_by_url(data['image_url_origin'],
                                             folder_name)
    image_path_detection = download_file_by_url(data['image_url_detection'],
                                                folder_name)

    from api import face_recognize
    conf = get_config()
    face_recognize = face_recognize(conf)
    face_recognize._raw_load_single_face(image_path_origin)
    targets = face_recognize.embeddings
    image = Image.open(image_path_detection)
    submiter = [['image_url', 'x1', 'y1', 'x2', 'y2', 'result']]
    try:
        bboxes, faces = face_recognize.align_multi(image)
    except:
        bboxes = []
        faces = []
    if len(bboxes) > 0:
        bboxes = bboxes[:, :-1]
        bboxes = bboxes.astype(int)
        bboxes = bboxes + [-1, -1, 1, 1]
        results, score = face_recognize.infer(faces, targets)

        for id, (re, sc) in enumerate(zip(results, score)):
            if re != -1:
                temp = {
                    'x1': bboxes[id][0],
                    'y1': bboxes[id][1],
                    'x2': bboxes[id][2],
                    'y2': bboxes[id][3],
                    'result': 1
                }
                temp = [
                    data['image_url_detection'], bboxes[id][0], bboxes[id][1],
                    bboxes[id][2], bboxes[id][3], 1
                ]
                submiter.append(temp)
    command = 'rm -rf %s' % folder_name
    subprocess.call(command, shell=True)
    df = pd.DataFrame.from_records(submiter)
    headers = df.iloc[0]
    df = pd.DataFrame(df.values[1:], columns=headers)
    df = df.sort_values(by=['result'], ascending=False)
    results = df.to_json(orient='records')
    return results
Exemplo n.º 4
0
def process_images(image_path='', path=''):
    from api import face_recognize
    conf = get_config()
    face_recognize = face_recognize(conf)
    face_recognize._raw_load_single_face(image_path)
    targets = face_recognize.embeddings
    submiter = [['image', 'x1', 'y1', 'x2', 'y2', 'result']]
    list_file = glob.glob(path + '/*')
    if os.path.isfile(list_file[0]) == False:
        path = list_file[0]
        print(path)
    for img in tqdm(glob.glob(path + '/*')):
        temp = [img.split('/')[-1], 0, 0, 0, 0, 0]
        image = Image.open(img)
        try:
            bboxes, faces = face_recognize.align_multi(image)
        except:
            bboxes = []
            faces = []
        if len(bboxes) > 0:
            bboxes = bboxes[:, :-1]
            bboxes = bboxes.astype(int)
            bboxes = bboxes + [-1, -1, 1, 1]
            results, score = face_recognize.infer(faces, targets)

            for id, (re, sc) in enumerate(zip(results, score)):
                if re != -1:
                    temp = [
                        img.split('/')[-1], bboxes[id][0], bboxes[id][1],
                        bboxes[id][2], bboxes[id][3], 1
                    ]
        submiter.append(temp)
    df = pd.DataFrame.from_records(submiter)
    headers = df.iloc[0]
    df = pd.DataFrame(df.values[1:], columns=headers)
    df = df.sort_values(by=['result'], ascending=False)
    results = df.to_json(orient='records')
    return results
Exemplo n.º 5
0
@app.route('/video_feed/<camera_type>/<device_id>')
def video_feed(camera_type, device_id):
    """Video streaming route. Put this in the src attribute of an img tag."""
    camera_stream = import_module('camera_' + camera_type).Camera
    if camera_type == 'opencv':
        try:
            device = int(device)
        except:
            pass
        camera_stream.set_video_source(link_video)
    return Response(gen(camera_stream(int(device_id))),
                    mimetype='multipart/x-mixed-replace; boundary=frame')

link_video = ['rtsp://*****:*****@@10.0.20.226:554/profile2/media.smp']
# link_video = ['video.mp4']
# client = ObjectDetector()
conf = get_config(net_mode = 'mobi', threshold = 1.22, detect_id = 1)
face_recognize = face_recognize(conf)
targets, names  = face_recognize.load_facebank()
lst_video = []
fps_init = FPS()

app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem'

if __name__ == '__main__':

    app.secret_key = 'super secret key'
    app.config['SESSION_TYPE'] = 'filesystem'
    app.run(host='0.0.0.0', port=5001, debug=True, threaded=True)