restime = reciprocal_of_max_frame_rate - loop.time() + start_time
        if restime > 0:
            await asyncio.sleep(restime)

# =================== INIT ====================
# address_dict = ['10.41.0.198', '10.41.0.199']
address_dict = ['10.41.0.231']
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
frame_buffer_size = 10 * len(address_dict)
upstream_frame_queue = Queue(maxsize=frame_buffer_size)
suspicion_face_queue = Queue(maxsize=frame_buffer_size)
result_queue = Queue(maxsize=frame_buffer_size)

# =================== ARGS ====================
args = start_up_init()
args.address_dict = address_dict

# =================== Process On ====================
args.ip_address = '10.41.0.231'
frame_queue_231 = Queue(maxsize=frame_buffer_size)
Process(target=lambda: asyncio.run(
    detection_loop(args, frame_queue_231))).start()

# args.ip_address = '10.41.0.232'
# frame_queue_232 = Queue(maxsize=frame_buffer_size)
# Process(target=lambda: asyncio.run(
#     detection_loop(args, frame_queue_232))).start()

Process(target=lambda: asyncio.run(embedding_loop(args))).start()
Process(target=lambda: asyncio.run(camera_loop(args))).start()
Esempio n. 2
0
        start_time = loop.time()
        camera.read()[1]
        # pairs = [(ip, camera_dict[ip].frame(rows=672, cols=672)) for ip in addr_list]
        # pairs = [(str(code), camera_dict[code].read()[1]) for code in code_list]
        pairs = [(str(code), cv2.resize(camera_dict[code].read()[1],
                                        (672, 672))) for code in code_list]
        frame_queue.put(pairs)
        #print(loop.time() - start_time, frame_queue.qsize())
        restime = rate - loop.time() + start_time
        if restime > 0:
            await asyncio.sleep(restime)


# =================== INIT ====================
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
preload = start_up_init()
# preload.scales = [0.7]
# preload.usb_camera_code = [0]
preload.max_face_number = 30

print(preload.usb_camera_code)

camera_dict = {}
for code in preload.usb_camera_code:
    camera = cv2.VideoCapture(code)
    # camera = cv2.VideoCapture('./trump.mp4') £ use like this for videos testing
    camera_dict[code] = camera

# =================== QUEUE ====================
frame_buffer_size = preload.queue_buffer_size
frame_queue = Queue(frame_buffer_size)
import sys
import numpy as np
import time
from termcolor import colored
import matplotlib.pyplot as plt
import pickle

from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn import metrics

from helper import read_pkl_model, start_up_init, get_dataset, get_image_paths_and_labels

# =================== ARGS ====================
args = start_up_init(True)

# =================== MODEL CLASS ====================
arcface = face_model.FaceModel(args)

# =================== LOAD DATASET ====================.
dir_train = './Embedding/train.npy'
data_train = './Temp/train_data'
dataset_train = get_dataset(data_train)
paths_train, labels_train = get_image_paths_and_labels(dataset_train)

dir_safe = './Embedding/safe.npy'
data_safe = './Temp/safe_data'
dataset_safe = get_dataset(data_safe)
paths_safe, labels_safe = get_image_paths_and_labels(dataset_safe)