Esempio n. 1
0
 def test_run_inference(self):
     for model in test_utils.get_model_list():
         print('Testing model :', model)
         engine = BasicEngine(test_utils.test_data_path(model))
         input_data = test_utils.generate_random_input(
             1, engine.required_input_array_size())
         latency, ret = engine.run_inference(input_data)
         self.assertEqual(ret.size, engine.total_output_array_size())
         # Check debugging functions.
         self.assertLess(math.fabs(engine.get_inference_time() - latency),
                         0.001)
         raw_output = engine.get_raw_output()
         self.assertEqual(ret.size, raw_output.size)
         for i in range(ret.size):
             if math.isnan(ret[i]) and math.isnan(raw_output[i]):
                 continue
             self.assertLess(math.fabs(ret[i] - raw_output[i]), 0.001)
Esempio n. 2
0
def main(user_id, output_file='training_data.txt'):
    # initial the facenet TFLite model
    engine = BasicEngine("../src/models/facenet_edgetpu.tflite")
    # list of people (subdirectory folder names)
    people = [person for person in os.listdir("image_data/")
              ] if user_id == "-1" else [str(user_id)]
    with open(output_file, 'a+') as f:
        writer = csv.writer(f)
        for person in people:
            image_names = [
                image for image in os.listdir("image_data/" + person)
            ]
            # run inferece on each mage in the directory
            for image_name in image_names:
                image = Image.open("image_data/" + person + '/' + image_name)
                print("\t->" + person + '/' + image_name)
                # run inference
                engine.RunInference(np.array(image).flatten())
                value = np.zeros(513).astype(object)
                value[0] = str(person).replace('_', ' ')
                value[1:] = engine.get_raw_output()
                # append new label and face embedding pair of the image to the output file
                writer.writerow(value)
Esempio n. 3
0
def inference_thread(running, state, result_buffer, frame_buffer, args, identity_dict, current_identity):
    global IDLE, TRACK, RESET, FACE_RECOG_THRESHOLD, FACE_RECOG_THRESHOLD_A
    global od_engine, face_detector, facenet_engine, svm_clf
    # Initialize object detection engine.
    od_engine = DetectionEngine(args.od_model)
    print("device_path: ", od_engine.device_path())
    _, od_width, od_height, _ = od_engine.get_input_tensor_shape()
    print("od input dim: ", od_width, od_height)
    # initial face detector using the opencv haarcascade model
    face_detector = FaceDetector(args.hc_model)
    # Initialize facenet engine.
    facenet_engine = BasicEngine(args.fn_model)
    # load the sklearn support vector machine model from disk
    svm_clf = pickle.load(open(args.svm_model, 'rb'))

    while running.value:
        # check if the frame buffer has a frame, else busy waiting
        if frame_buffer.empty():
            continue
        frame = frame_buffer.get()
        tinf = time.perf_counter()

        if state.value == IDLE:
            fd_results = None
            # reorder image frame from BGR to RGB
            img = frame[:,:,::-1]
            # face detection
            faces_coord = face_detector.detect(img, True)
            # image preprocessing, downsampling
            print("faces_coord: ",faces_coord)
            if not isinstance(faces_coord, type(None)):
                # normalize face image
                face_image = np.array(normalize_faces(img ,faces_coord))
                # facenet to generate face embedding
                facenet_engine.RunInference(face_image.flatten())
                face_emb = facenet_engine.get_raw_output().reshape(1,-1)
                # use SVM to classfy identity with face embedding
                pred_prob = svm_clf.predict_proba(face_emb)
                best_class_index = np.argmax(pred_prob, axis=1)[0]
                best_class_prob = pred_prob[0, best_class_index]
                print("best_class_index: ",best_class_index)
                print("best_class_prob: ",best_class_prob)
                print("label", svm_clf.classes_[best_class_index])
                # Check threshold and verify identify is in the identifiy dictionary
                if best_class_prob > FACE_RECOG_THRESHOLD:
                    face_label = svm_clf.classes_[best_class_index]
                    if face_label in identity_dict:
                        print("\n=================================")
                        print("Identity found: ", face_label, " ",identity_dict[face_label],
                            " with Prob = ", best_class_prob)
                        print("=================================\n")
                        current_identity.value = identity_dict[face_label][0] # ID
                result_buffer.put(faces_coord)
        elif state.value == TRACK:
            od_results = None
            # convert numpy array representation to PIL image with rgb format
            img = Image.fromarray(frame[:,:,::-1], 'RGB')
            # Run inference.
            od_results = od_engine.DetectWithImage(img, threshold=0.30, keep_aspect_ratio=True, relative_coord=False, top_k=10)
            # push result to buffer queue
            result_buffer.put(od_results)
        print(time.perf_counter() - tinf, "sec")
    print("[Finish] inference_thread")
Esempio n. 4
0
    ##### INVERT
    image = cv2.bitwise_not(image)

    ##### CONVERT TO BINARY (OTHER OPTIONS MAY MAKE MORE SENSE)
    _, image = cv2.threshold(image,180,255,cv2.THRESH_BINARY)

    ##### FLATTEN INPUT (TPU REQUIREMENT)
    input = image.flatten()

    ##### RUN ON TPU
    results = engine.run_inference(input)

    ##### PLOT RESULTS
    mp.gca().cla()
    mp.bar(np.arange(10),engine.get_raw_output())
    mp.axis([-0.5,9.5,0,1])
    mp.xlabel('Number')
    mp.ylabel('Probability')
    mp.pause(0.001)

    ##### SHOW IMAGE THAT WAS FORWARDED TO TPU MODEL
    image = cv2.resize(frame, (560, 560))

    cv2.imshow('frame', image)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

##### CLEAR CAPTURE AND DISPAY
cap.release()
cv2.destroyAllWindows()