def train_arcface_model(face_detector, config, dataset_folder,
                        embedding_folder):
    objs = []
    register_image_bbox_objs = []
    registered_ids = []

    for folder in os.listdir(dataset_folder):
        if not folder.endswith('.txt'):
            for face in os.listdir(f"{dataset_folder}/{folder}"):
                raw_image_path = f"{dataset_folder}/{folder}/{face}"
                train_image_id = ImageId(channel='train_img',
                                         timestamp=arrow.now().timestamp,
                                         file_format=os.path.splitext(face)[1])
                train_image_obj = Image(train_image_id,
                                        raw_image_path=raw_image_path)

                try:
                    face_detection_result = face_detector.detect(
                        train_image_obj, label=folder)
                    if len(face_detection_result.detected_objects) == 1:
                        register_image_bbox_objs.append(
                            face_detection_result.detected_objects[0])
                        registered_ids.append(
                            face_detection_result.detected_objects[0].label)
                        objs += train_image_obj.fetch_bbox_pil_objs(
                            register_image_bbox_objs)
                except:
                    print(
                        f"[ERROR] An error occured with the face detector model. Can't open the photo {dataset_folder}/{folder}/{face}"
                    )
                register_image_bbox_objs = []

    objects_frame = resize_and_stack_image_objs((112, 112), objs)
    print("Object_frame shape:", objects_frame.shape)

    objects_frame = np.transpose(objects_frame, (0, 3, 1, 2))
    with SimpleTimer("[INFO] Extracting embedding for our dataset"):
        arcface_classifier = ArcFaceClassifier(config,
                                               registered_ids,
                                               objects_frame=objects_frame)

    embedding_path = f"{embedding_folder}/faces.pkl"
    print(f"[INFO] Store face embedding to {embedding_path}")
    arcface_classifier.store_embedding_info(embedding_path)

    ids_path = f"{embedding_folder}/registered_ids.txt"
    print(f"[INFO] Store registered ids to {ids_path}")
    with open(ids_path, 'w') as filehandle:
        for registered_id in registered_ids:
            filehandle.write('%s\n' % registered_id)

    return arcface_classifier
Exemplo n.º 2
0
    def recognize_person(self, img, face_detection_result):
        image_id = ImageId(channel='stream', timestamp=arrow.now().timestamp)
        image_obj = Image(image_id, pil_image_obj=img)

        detection_result = self.detect(image_obj,
                                       face_detection_result.detected_objects)
        persons = [(elt.label, elt.x1, elt.y1, elt.x2, elt.y2, elt.score)
                   for elt in detection_result.detected_objects]

        return (persons, detection_result.detected_objects)
Exemplo n.º 3
0
    # if args.gpu >= 0:
    #     ctx = mx.gpu(args.gpu)
    # else:
    #     ctx = mx.cpu(0)
    model_name = 'MTCNN'
    with SimpleTimer("Loading model %s" % model_name):
        face_detector = MtcnnFaceDetector(args.mtcnn_path, ctx=mx.gpu(0))

    # Préprocess de l'image
    raw_image_path = 'demo/183club/test_imag1.jpg'

    #ImageId is used to standardize the image_id format
    train_image_id = ImageId(channel='demo',
                             timestamp=arrow.now().timestamp,
                             file_format='jpg')
    train_image_obj = Image(train_image_id, raw_image_path=raw_image_path)

    register_image_bbox_objs = [
        BoundedBoxObject(x1=1526,
                         y1=755,
                         x2=1730,
                         y2=1007,
                         label='Zoe',
                         score=1,
                         meta=''),
        #BoundedBoxObject(x1=946, y1=551, x2=1149, y2=784, label='Amine', score=1, meta=''),
        BoundedBoxObject(x1=1364,
                         y1=564,
                         x2=1492,
                         y2=720,
                         label='Lu',
Exemplo n.º 4
0
            url=FLAGS.url, verbose=FLAGS.verbose)
    except Exception as e:
        print("channel creation failed: " + str(e))
        sys.exit()

    model_name = "yolov4"

    # Infer
    inputs = []
    outputs = []
    # the built engine with input NCHW
    inputs.append(tritongrpcclient.InferInput("data", [1, 3, 608, 608],
                                              "FP32"))

    # Initialize the data
    image_obj = Image("image_id", raw_image_path=FLAGS.img)
    ori_w, ori_h = image_obj.pil_image_obj.size
    image_frame, scale_ratio = preprocess(image_obj.pil_image_obj,
                                          input_image_shape=(608, 608))
    inputs[0].set_data_from_numpy(image_frame)

    outputs.append(tritongrpcclient.InferRequestedOutput("prob"))

    # Test with outputs
    results = triton_client.infer(model_name=model_name,
                                  inputs=inputs,
                                  outputs=outputs,
                                  headers={"test": "1"})

    statistics = triton_client.get_inference_statistics(model_name=model_name)
    print(statistics)
Exemplo n.º 5
0
from eyewitness.image_utils import ImageHandler, Image
from bistiming import SimpleTimer

from cv2_detector import CascadeClassifierPersonWrapper, HogPersonDetectorWrapper, MobileNetWrapper


def get_person_detector(model):
    if model == 'Cascade':
        detector = CascadeClassifierPersonWrapper()
    elif model == 'Hog':
        detector = HogPersonDetectorWrapper()
    elif model == 'MobileNet':
        detector = MobileNetWrapper()
    else:
        raise Exception('not implement error')
    return detector


if __name__ == '__main__':
    model_name = 'MobileNet'
    with SimpleTimer("Loading model %s" % model_name):
        object_detector = get_person_detector(model_name)
    raw_image_path = 'demo/test_image.jpg'
    image_id = ImageId(channel='demo', timestamp=arrow.now().timestamp, file_format='jpg')
    image_obj = Image(image_id, raw_image_path=raw_image_path)
    with SimpleTimer("Predicting image with classifier"):
        detection_result = object_detector.detect(image_obj)
    print("detected %s objects" % len(detection_result.detected_objects))
    ImageHandler.draw_bbox(image_obj.pil_image_obj, detection_result.detected_objects)
    ImageHandler.save(image_obj.pil_image_obj, "detected_image/drawn_image.jpg")