def gen():
    engine = BasicEngine(model)
    labels = read_label_file(label)
    cap = get_cap()

    # a = engine.get_num_of_output_tensors()
    # b = engine.total_output_array_size()
    # c = engine.get_output_tensor_size(0)
    # d = engine.required_input_array_size()

    # print(a, b, c, d)

    while True:
        _, frame = cap.read()
        input_val = cv2.resize(frame, (432, 368))
        input_val = input_val.flatten()
        ans = engine.RunInference(input_val)
        heat_map = ans[1].reshape([54, 46, 57])
        prop = heat_map[1, :, :]
        prop = np.multiply(prop, 255)
        # prop = cv2.resize(prop, (460, 640))
        _, buffer = cv2.imencode(".jpg", prop)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' +
               io.BytesIO(buffer).read() + b'\r\n')
Example #2
0
def coralQueue(inqueue, addr):
    context = zmq.Context()
    socket = context.socket(zmq.PAIR)
    socket.connect('tcp://%s' % addr)

    engine = BasicEngine(MODELPATH)

    fps = FPS()

    while True:
        obj = inqueue.get()
        if obj is None:
            socket.send_pyobj(None)  # BLOCK HERE
            break


#        start_time = time.time()
        img, content = obj

        input_tensor = np.asarray(img).flatten()
        _, raw_result = engine.RunInference(input_tensor)
        bbox_result = []
        num_candidates = raw_result[tensor_start_index[3]]
        for i in range(int(round(num_candidates))):
            score = raw_result[tensor_start_index[2] + i]
            if score > threshold:
                label_id = int(round(raw_result[tensor_start_index[1] + i]))
                if label_id in target_labels:
                    y1 = max(0.0, raw_result[tensor_start_index[0] + 4 * i])
                    x1 = max(0.0,
                             raw_result[tensor_start_index[0] + 4 * i + 1])
                    y2 = min(1.0,
                             raw_result[tensor_start_index[0] + 4 * i + 2])
                    x2 = min(1.0,
                             raw_result[tensor_start_index[0] + 4 * i + 3])

                    # This is ratio.
                    bbox_result.append([x1, y1, x2, y2, score])
        bbox_result.sort(key=lambda x: -x[4])

        #        end_time = time.time()
        #        SDLogger.debug(f'Preprocess + Inference costs: {end_time-start_time:.3f}')

        try:
            pass
            socket.send_pyobj((content, bbox_result[:top_k]),
                              flags=zmq.NOBLOCK)
        except Exception as e:
            SDLogger.error('Error when sending the detection result: %s' % e)
        SDLogger.info(f'Detection FPS: {fps():.2f}')
Example #3
0
def main():

    log.basicConfig(format=" [ %(levelname)s] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)

    args = build_argparser().parse_args()

    #log.info("Loading the network files model")

    voice = sound_decode(args.input)

    infer_engine = BasicEngine(args.model)
    latency, results = infer_engine.RunInference(voice)
    print(latency)
    print(infer_engine.get_inference_time())
    print(results)
def deep_inferencer(results, frameBuffer, model, device):

    deep_engine = None
    deep_engine = BasicEngine(model, device)
    print("Loaded Graphs!!! (Deeplab)")

    while True:

        if frameBuffer.empty():
            continue

        # Run inference.
        color_image = frameBuffer.get()
        prepimg_deep = color_image[:, :, ::-1].copy()
        prepimg_deep = prepimg_deep.flatten()
        tinf = time.perf_counter()
        latency, result_deep = deep_engine.RunInference(prepimg_deep)
        print(time.perf_counter() - tinf, "sec (Deeplab)")
        results.put(result_deep)
class DualNetworkEdgeTpu():
    """DualNetwork implementation for Google's EdgeTPU."""

    def __init__(self, save_file):
        self.engine = BasicEngine(save_file)
        self.board_size = go.N
        self.output_policy_size = self.board_size**2 + 1

        input_tensor_shape = self.engine.get_input_tensor_shape()
        expected_input_shape = [1, self.board_size, self.board_size, 17]
        if not np.array_equal(input_tensor_shape, expected_input_shape):
            raise RuntimeError(
                'Invalid input tensor shape {}. Expected: {}'.format(
                    input_tensor_shape, expected_input_shape))
        output_tensors_sizes = self.engine.get_all_output_tensors_sizes()
        expected_output_tensor_sizes = [self.output_policy_size, 1]
        if not np.array_equal(output_tensors_sizes,
                              expected_output_tensor_sizes):
            raise RuntimeError(
                'Invalid output tensor sizes {}. Expected: {}'.format(
                    output_tensors_sizes, expected_output_tensor_sizes))

    def run(self, position):
        """Runs inference on a single position."""
        probs, values = self.run_many([position])
        return probs[0], values[0]

    def run_many(self, positions):
        """Runs inference on a list of position."""
        processed = list(map(features_lib.extract_features, positions))
        probabilities = []
        values = []
        for state in processed:
            assert state.shape == (self.board_size, self.board_size,
                                   17), str(state.shape)
            result = self.engine.RunInference(state.flatten())
            # If needed you can get the raw inference time from the result object.
            # inference_time = result[0] # ms
            policy_output = result[1][0:self.output_policy_size]
            value_output = result[1][-1]
            probabilities.append(policy_output)
            values.append(value_output)
        return probabilities, values
Example #6
0
def main(user_id, output_file='training_data.txt'):
    # initial the facenet TFLite model
    engine = BasicEngine("../src/models/facenet_edgetpu.tflite")
    # list of people (subdirectory folder names)
    people = [person for person in os.listdir("image_data/")
              ] if user_id == "-1" else [str(user_id)]
    with open(output_file, 'a+') as f:
        writer = csv.writer(f)
        for person in people:
            image_names = [
                image for image in os.listdir("image_data/" + person)
            ]
            # run inferece on each mage in the directory
            for image_name in image_names:
                image = Image.open("image_data/" + person + '/' + image_name)
                print("\t->" + person + '/' + image_name)
                # run inference
                engine.RunInference(np.array(image).flatten())
                value = np.zeros(513).astype(object)
                value[0] = str(person).replace('_', ' ')
                value[1:] = engine.get_raw_output()
                # append new label and face embedding pair of the image to the output file
                writer.writerow(value)
Example #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model",
                        help="File path of Tflite model.",
                        required=True)
    parser.add_argument("--image",
                        help="File path of the image to be recognized.",
                        required=True)
    # parser.add_argument(
    #    '--label', help='File path of label file.', required=True)
    args = parser.parse_args()

    # Initialize colormap
    colormap = label_util.create_pascal_label_colormap()

    # Read image
    org_img = Image.open(args.image)
    im_width, im_height = org_img.size
    engine = BasicEngine(args.model)

    _, height, width, _ = engine.get_input_tensor_shape()
    img = org_img.resize((width, height), Image.NEAREST)
    input_tensor = np.asarray(img).flatten()

    latency, result = engine.RunInference(input_tensor)

    seg_map = np.array(result, dtype=np.uint8)
    seg_map = np.reshape(seg_map, (width, height))

    seg_image = label_util.label_to_color_image(colormap, seg_map)
    seg_image = Image.fromarray(seg_image).resize((im_width, im_height),
                                                  Image.NEAREST)
    out_image = np.array(org_img) * 0.5 + np.array(seg_image) * 0.5

    pil_img = Image.fromarray(out_image.astype(np.uint8))
    pil_img.save(os.path.join(".", "save.png"))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--model",
        default=
        "models/train/test/tpu/mobilenet_v2_1.4_224/output_tflite_graph_edgetpu.tflite",
        help="Path of the inference model.")
    parser.add_argument("--usbcamno",
                        type=int,
                        default=0,
                        help="USB Camera number.")
    parser.add_argument("--usbcamfps",
                        type=int,
                        default=30,
                        help="USB Camera FPS.")
    args = parser.parse_args()

    camera_width = 320
    camera_height = 240

    fps = ""
    framecount = 0
    time1 = 0
    elapsedTime = 0

    h = 368
    w = 432

    new_w = int(camera_width * min(w / camera_width, h / camera_height))
    new_h = int(camera_height * min(w / camera_width, h / camera_height))

    threshold = 0.1
    nPoints = 18

    cap = cv2.VideoCapture(args.usbcamno)
    cap.set(cv2.CAP_PROP_FPS, args.usbcamfps)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
    cv2.namedWindow("USB Camera", cv2.WINDOW_AUTOSIZE)

    # Initialize engine.
    engine = BasicEngine(args.model)

    # Run inference.
    while True:
        t1 = time.perf_counter()

        ret, color_image = cap.read()
        if not ret:
            break

        resized_image = cv2.resize(color_image, (new_w, new_h),
                                   interpolation=cv2.INTER_CUBIC)
        prepimg = resized_image[:, :, ::-1].copy()
        canvas = canvas2 = np.full((h, w, 3), 128)
        canvas[(h - new_h) // 2:(h - new_h) // 2 + new_h,
               (w - new_w) // 2:(w - new_w) // 2 + new_w, :] = prepimg
        canvas2[(h - new_h) // 2:(h - new_h) // 2 + new_h,
                (w - new_w) // 2:(w - new_w) // 2 + new_w, :] = resized_image

        prepimg = np.uint8(canvas).flatten()
        #prepimg = canvas.flatten()

        #tinf = time.perf_counter()
        #ans = engine.DetectWithImage(prepimg, threshold=0.5, keep_aspect_ratio=True, relative_coord=False, top_k=10)

        #print("engine.required_input_array_size()=", engine.required_input_array_size()) #476928
        #print("prepimg.flatten()=", len(prepimg)) #476928
        ans = engine.RunInference(prepimg)
        #print("len(ans)=", len(ans)) #2
        #print("ans[0]=", ans[0]) #3.071000099182129
        print(
            "ans[1]=", ans[1]
        )  #[0.04705882 0.09411765 0.04705882 ... 0.07058824 0.23529412 0.        ]
        print("len(ans[1])=", len(ans[1]))  #141588=1x46x54x57 or 1x57x46x54

        outputs = ans[1].reshape((1, 46, 54, 57)).transpose(
            (0, 3, 1, 2))  #(1, 57, 46, 54)
        #outputs = outputs[np.newaxis, :, :, :]
        #outputs = ans[1].reshape((1, 57, 46, 54)) #(1, 57, 46, 54)

        #print(time.perf_counter() - tinf, "sec")
        #sys.exit(0)

        detected_keypoints = []
        keypoints_list = np.zeros((0, 3))
        keypoint_id = 0

        #print("outputs.shape()=", outputs.shape)
        #print("outputs.shape(outputs[0, 0, :, :])=", outputs[0, 0, :, :])

        for part in range(nPoints):
            probMap = outputs[0, part, :, :]
            probMap = cv2.resize(probMap, (w, h))  # (432, 368)
            keypoints = getKeypoints(probMap, threshold)
            keypoints_with_id = []

            for i in range(len(keypoints)):
                keypoints_with_id.append(keypoints[i] + (keypoint_id, ))
                keypoints_list = np.vstack([keypoints_list, keypoints[i]])
                keypoint_id += 1

            detected_keypoints.append(keypoints_with_id)

        #print("len(detected_keypoints)=", len(detected_keypoints))

        frameClone = np.uint8(canvas2.copy())
        for i in range(nPoints):
            #print("detected_keypoints[i]=", detected_keypoints[i])
            for j in range(len(detected_keypoints[i])):

                cv2.circle(frameClone, detected_keypoints[i][j][0:2], 5,
                           colors[i], -1, cv2.LINE_AA)

        valid_pairs, invalid_pairs = getValidPairs(outputs, w, h,
                                                   detected_keypoints)
        #print("valid_pairs, invalid_pairs=", valid_pairs, invalid_pairs)
        personwiseKeypoints = getPersonwiseKeypoints(valid_pairs,
                                                     invalid_pairs,
                                                     keypoints_list)

        print("personwiseKeypoints=", personwiseKeypoints)

        for i in range(17):
            for n in range(len(personwiseKeypoints)):
                index = personwiseKeypoints[n][np.array(POSE_PAIRS[i])]
                if -1 in index:
                    continue
                B = np.int32(keypoints_list[index.astype(int), 0])
                A = np.int32(keypoints_list[index.astype(int), 1])
                cv2.line(frameClone, (B[0], A[0]), (B[1], A[1]), colors[i], 3,
                         cv2.LINE_AA)

        cv2.putText(frameClone, fps, (w - 170, 15), cv2.FONT_HERSHEY_SIMPLEX,
                    0.5, (38, 0, 255), 1, cv2.LINE_AA)

        cv2.imshow("USB Camera", frameClone)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        # FPS calculation
        framecount += 1
        if framecount >= 5:
            fps = "(Playback) {:.1f} FPS".format(time1 / 15)
            framecount = 0
            time1 = 0
        t2 = time.perf_counter()
        elapsedTime = t2 - t1
        time1 += 1 / elapsedTime

    cap.release()
    cv2.destroyAllWindows()
Example #9
0
def inference_thread(running, state, result_buffer, frame_buffer, args, identity_dict, current_identity):
    global IDLE, TRACK, RESET, FACE_RECOG_THRESHOLD, FACE_RECOG_THRESHOLD_A
    global od_engine, face_detector, facenet_engine, svm_clf
    # Initialize object detection engine.
    od_engine = DetectionEngine(args.od_model)
    print("device_path: ", od_engine.device_path())
    _, od_width, od_height, _ = od_engine.get_input_tensor_shape()
    print("od input dim: ", od_width, od_height)
    # initial face detector using the opencv haarcascade model
    face_detector = FaceDetector(args.hc_model)
    # Initialize facenet engine.
    facenet_engine = BasicEngine(args.fn_model)
    # load the sklearn support vector machine model from disk
    svm_clf = pickle.load(open(args.svm_model, 'rb'))

    while running.value:
        # check if the frame buffer has a frame, else busy waiting
        if frame_buffer.empty():
            continue
        frame = frame_buffer.get()
        tinf = time.perf_counter()

        if state.value == IDLE:
            fd_results = None
            # reorder image frame from BGR to RGB
            img = frame[:,:,::-1]
            # face detection
            faces_coord = face_detector.detect(img, True)
            # image preprocessing, downsampling
            print("faces_coord: ",faces_coord)
            if not isinstance(faces_coord, type(None)):
                # normalize face image
                face_image = np.array(normalize_faces(img ,faces_coord))
                # facenet to generate face embedding
                facenet_engine.RunInference(face_image.flatten())
                face_emb = facenet_engine.get_raw_output().reshape(1,-1)
                # use SVM to classfy identity with face embedding
                pred_prob = svm_clf.predict_proba(face_emb)
                best_class_index = np.argmax(pred_prob, axis=1)[0]
                best_class_prob = pred_prob[0, best_class_index]
                print("best_class_index: ",best_class_index)
                print("best_class_prob: ",best_class_prob)
                print("label", svm_clf.classes_[best_class_index])
                # Check threshold and verify identify is in the identifiy dictionary
                if best_class_prob > FACE_RECOG_THRESHOLD:
                    face_label = svm_clf.classes_[best_class_index]
                    if face_label in identity_dict:
                        print("\n=================================")
                        print("Identity found: ", face_label, " ",identity_dict[face_label],
                            " with Prob = ", best_class_prob)
                        print("=================================\n")
                        current_identity.value = identity_dict[face_label][0] # ID
                result_buffer.put(faces_coord)
        elif state.value == TRACK:
            od_results = None
            # convert numpy array representation to PIL image with rgb format
            img = Image.fromarray(frame[:,:,::-1], 'RGB')
            # Run inference.
            od_results = od_engine.DetectWithImage(img, threshold=0.30, keep_aspect_ratio=True, relative_coord=False, top_k=10)
            # push result to buffer queue
            result_buffer.put(od_results)
        print(time.perf_counter() - tinf, "sec")
    print("[Finish] inference_thread")
Example #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='File path of Tflite model.', required=True)
    parser.add_argument('--width', help='Resolution width.', default=640, type=int)
    parser.add_argument('--height', help='Resolution height.', default=480, type=int)
    parser.add_argument('--nano', help='Works with JETSON Nao and Pi Camera.', action='store_true')
    # parser.add_argument(
    #    '--label', help='File path of label file.', required=True)
    args = parser.parse_args()

    # Initialize window.
    cv2.namedWindow(
        WINDOW_NAME, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO
    )
    cv2.moveWindow(WINDOW_NAME, 100, 200)

    # Initialize colormap
    colormap = label_util.create_pascal_label_colormap()

    # Initialize engine.
    engine = BasicEngine(args.model)
    _, width, height, _ = engine.get_input_tensor_shape()

    if args.nano == True:
        GST_STR = 'nvarguscamerasrc \
            ! video/x-raw(memory:NVMM), width={0:d}, height={1:d}, format=(string)NV12, framerate=(fraction)30/1 \
            ! nvvidconv flip-method=2 !  video/x-raw, width=(int){2:d}, height=(int){3:d}, format=(string)BGRx \
            ! videoconvert \
            ! appsink'.format(args.width, args.height, args.width, args.height)
        cap = cv2.VideoCapture(GST_STR, cv2.CAP_GSTREAMER)

    else:
        cap = cv2.VideoCapture(0)
        cap.set(3, args.width)
        cap.set(4, args.height)

    while(cap.isOpened()):
        _, frame = cap.read()

        start_ms = time.time()

        # Create inpute tensor
        # camera resolution  => input tensor size (513, 513)
        input_buf = cv2.resize(frame, (width, height))
        input_buf = cv2.cvtColor(input_buf, cv2.COLOR_BGR2RGB)
        input_tensor = input_buf.flatten()

        # Run inference
        latency, result = engine.RunInference(input_tensor)

        # Create segmentation map
        seg_map = np.array(result, dtype=np.uint8)
        seg_map = np.reshape(seg_map, (width, height))
        seg_image = label_util.label_to_color_image(colormap, seg_map)

        # segmentation map resize 513, 513 => camera resolution
        seg_image = cv2.resize(seg_image, (args.width, args.height))
        im = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) // 2 + seg_image // 2
        im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)

        elapsed_ms = time.time() - start_ms

        # Calc fps.
        fps = 1 / elapsed_ms
        fps_text = '{0:.2f}ms, {1:.2f}fps'.format((elapsed_ms * 1000.0), fps)
        visual.draw_caption(im, (10, 30), fps_text)

        latency_text = 'RunInference latency: {0:.2f}ms'.format(latency)
        visual.draw_caption(im, (10, 60), latency_text)

        # Display image
        cv2.imshow(WINDOW_NAME, im)
        key = cv2.waitKey(10) & 0xFF
        if key == ord('q'):
            break

        if args.nano != True:
            for i in range(10):
                ret, frame = cap.read()
                
    # When everything done, release the window
    cap.release()
    cv2.destroyAllWindows()
Example #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", help="File path of Tflite model.", required=True)
    parser.add_argument("--width", help="Resolution width.", default=640)
    parser.add_argument("--height", help="Resolution height.", default=480)
    # parser.add_argument(
    #    '--label', help='File path of label file.', required=True)
    args = parser.parse_args()

    # Initialize window.
    cv2.namedWindow(
        WINDOW_NAME, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO
    )
    cv2.moveWindow(WINDOW_NAME, 100, 200)

    # Initialize colormap
    colormap = label_util.create_pascal_label_colormap()

    # Initialize engine.
    engine = BasicEngine(args.model)

    resolution_width = args.width
    rezolution_height = args.height
    with picamera.PiCamera() as camera:

        camera.resolution = (resolution_width, rezolution_height)
        camera.framerate = 30
        _, width, height, _ = engine.get_input_tensor_shape()
        rawCapture = PiRGBArray(camera)

        # allow the camera to warmup
        time.sleep(0.1)

        try:
            for frame in camera.capture_continuous(
                rawCapture, format="rgb", use_video_port=True
            ):
                start_ms = time.time()

                rawCapture.truncate(0)
                image = frame.array

                # Create inpute tensor
                # camera resolution (640, 480) => input tensor size (513, 513)
                input_buf = Image.fromarray(image)
                input_buf = input_buf.resize((width, height), Image.NEAREST)
                input_tensor = np.asarray(input_buf).flatten()

                # Run inference
                latency, result = engine.RunInference(input_tensor)

                # Create segmentation map
                seg_map = np.array(result, dtype=np.uint8)
                seg_map = np.reshape(seg_map, (width, height))
                seg_image = label_util.label_to_color_image(colormap, seg_map)
                # segmentation map resize 513, 513 => camera resolution(640, 480)
                seg_image = cv2.resize(seg_image, (resolution_width, rezolution_height))
                out_image = image // 2 + seg_image // 2
                im = cv2.cvtColor(out_image, cv2.COLOR_RGB2BGR)  # display image

                elapsed_ms = time.time() - start_ms

                # Calc fps.
                fps = 1 / elapsed_ms
                fps_text = "{0:.2f}ms, {1:.2f}fps".format((elapsed_ms * 1000.0), fps)
                visual.draw_caption(im, (10, 30), fps_text)

                latency_text = "Runinference latency: {0:.2f}ms".format(latency)
                visual.draw_caption(im, (10, 60), latency_text)

                # Display image
                cv2.imshow(WINDOW_NAME, im)
                key = cv2.waitKey(10) & 0xFF
                if key == ord("q"):
                    break

        finally:
            camera.stop_preview()

    # When everything done, release the window
    cv2.destroyAllWindows()
Example #12
0
class InferenceEngine:
    """Base inferencing class that wraps Tensorflow Lite"""
    def __init__(self, model_path, output_shapes):
        """
        Initializes InferenceEngine. Creates interpreter, allocates tensors, and grabs input and output details.

        :param model_path: Path to Tensorflow Lite Model
        :param output_shapes: List of tuples, each tuple containing the shape of an output tensor
        """
        # load trained model
        if use_tpu:
            model_path = os.path.splitext(model_path) + '_edgetpu.tflite'
            self.TPU_engine = BasicEngine(model_path)
        else:
            self.interpreter = tf.lite.Interpreter(model_path=model_path)
            self.interpreter.allocate_tensors()

            # Get input and output tensors
            self.input_details = self.interpreter.get_input_details()
            self.output_details = self.interpreter.get_output_details()
        self.output_shapes = output_shapes

    def get_input_shape(self):
        """
        Gets the shape of the input tensor of the model

        :return: Input shape as a tuple
        """
        if use_tpu:
            return self.TPU_engine.get_input_tensor_shape()
        else:
            return self.input_details[0]['shape']

    def invoke(self, input_tensor, lambda_preprocess=None):
        """
        Invokes the interpreter. Basically has the interpreter run the needed calculation to have the output
        tensors ready.

        :param input_tensor: Input Tensor (nD numpy array)
        :param lambda_preprocess: Lambda function to apply to input tensors after resizing
        :return: A list of output tensors (list of numpy arrays)
        """
        input_shape = self.input_details[0]['shape']
        resized_tensor = cv2.resize(input_tensor,
                                    (input_shape[1], input_shape[2]))
        if lambda_preprocess:
            resized_tensor = lambda_preprocess(resized_tensor)

        output = []
        if use_tpu:
            resized_tensor = resized_tensor.flatten()
            _, raw_results = self.TPU_engine.RunInference(resized_tensor)

            so_far = 0
            for index, shape in enumerate(self.output_shapes):
                output_size = self.TPU_engine.get_output_tensor_size(index)
                output.append(raw_results[so_far:so_far +
                                          output_size].reshape(shape))
                so_far += output_size
        else:
            self.interpreter.set_tensor(self.input_details[0]['index'],
                                        resized_tensor.reshape(input_shape))
            self.interpreter.invoke()

            for index, detail in enumerate(self.output_details):
                output.append(
                    self.interpreter.get_tensor(detail['index']).reshape(
                        self.output_shapes[index]))

        return output
Example #13
0
import time
from PIL import Image, ImageDraw, ImageFont
import numpy
from edgetpu.basic.basic_engine import BasicEngine

MODEL_NAME = "model_stride/model_stride_256x256x3_edgetpu.tflite"

### Load model and prepare TPU engine
engine = BasicEngine(MODEL_NAME)
width = engine.get_input_tensor_shape()[1]
height = engine.get_input_tensor_shape()[2]

### prepara input tensor
img = Image.new('RGB', (width, height), (128, 128, 128))
draw = ImageDraw.Draw(img)
input_tensor = numpy.asarray(img).flatten()

### Run inference
start = time.time()
num_measurement = 10000
for i in range(num_measurement):
    _, raw_result = engine.RunInference(input_tensor)
    # time.sleep(2)
elapsed_time = time.time() - start
print("elapsed_time: {0} ".format(1000 * elapsed_time / num_measurement) +
      "[msec]")
Example #14
0
import numpy as np
import pandas as pd
# from edgetpu.classification.engine import ClassificationEngine
from edgetpu.basic.basic_engine import BasicEngine

from sklearn.datasets import load_iris

data = load_iris()
x_train = (data.data * 10).astype(np.float32)
y_train = pd.get_dummies(data.target).values.astype(np.float32)

eng = BasicEngine("converted_model_edgetpu.tflite")
print(eng)
print(eng.get_all_output_tensors_sizes(), eng.get_input_tensor_shape())

for i in range(150):
    # inp = np.array([1,2,3]).astype(np.uint8)
    inp = x_train[i].astype(np.uint8)
    # inp = [30,30,30,30]
    print(inp, x_train[i], y_train[i])
    print(eng.RunInference(inp)[1])