Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", help="File path of Tflite model.", required=True)
    parser.add_argument(
        "--image", help="File path of the image to be recognized.", required=True
    )
    parser.add_argument(
        "--num", help="Number of inference executions.", default=10, type=int
    )
    args = parser.parse_args()

    inference_time = []
    img = Image.open(args.image)

    for i in range(args.num):
        # Initialize engine.
        engine = ClassificationEngine(args.model)

        # Run inference.
        result1 = engine.ClassifyWithImage(img, top_k=3)

        # Get Inference time.
        inference_time.append(engine.get_inference_time())

        # delete Engine
        del engine

    # Print avg
    print("Model inference time avg: {0:.4f}".format(statistics.mean(inference_time)))
Ejemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model1",
                        help="File path of Tflite model.",
                        required=True)
    parser.add_argument("--model2",
                        help="File path of Tflite model.",
                        required=True)
    parser.add_argument("--image",
                        help="File path of the image to be recognized.",
                        required=True)
    parser.add_argument("--num",
                        help="Number of inference executions.",
                        default=100,
                        type=int)
    args = parser.parse_args()

    # Initialize engine.
    engine1 = ClassificationEngine(args.model1)
    engine2 = ClassificationEngine(args.model2)

    # Run inference.
    inference_time1 = []
    inference_time2 = []

    for i in range(num + 1):
        img = Image.open(args.image)
        result1 = engine1.ClassifyWithImage(img, top_k=3)
        result2 = engine2.ClassifyWithImage(img, top_k=3)

        # Get Inference time.
        if i > 0:
            inference_time1.append(engine1.get_inference_time())
            inference_time2.append(engine2.get_inference_time())

    # Avg
    print("Model1 inference time avg: {0:.4f}".format(
        statistics.mean(inference_time1)))
    print("Model2 inference time avg: {0:.4f}".format(
        statistics.mean(inference_time2)))
Ejemplo n.º 3
0
    for split in ['train', 'validation', 'test']:
        for label in ['positive', 'negative']:
            data_paths = glob.glob('data/' + split + '/' + label + '/*.png')
            predictions['path'].extend(data_paths)
            predictions['set'].extend([split] * len(data_paths))
            predictions['truth'].extend([int(label == 'positive')] * len(data_paths))

    inference_times = {}
    model_paths = glob.glob('models/*/model_edgetpu.tflite')

    for model_path in model_paths:
        classifier = ClassificationEngine(model_path)
        model_name = model_path.split('/')[1]
        input_shape = [int(dim) for dim in model_name.split('_')[:3]]
        predictions[model_name] = []
        inference_times[model_name] = []
        for path in predictions['path']:
            image = Image.open(path)
            # Set threshold to smaller than 0 to receive each prediction in range [0, 1]
            prediction = classifier.classify_with_image(image, threshold=-1)
            inference_time = classifier.get_inference_time()
            # Predictions are returned as [(label_id, confidence_score)]
            predictions[model_name].append(prediction[0][1].astype(float))
            inference_times[model_name].append(inference_time)

    with open('results/predictions_edgetpu.json', 'w') as fp:
        json.dump(predictions, fp)

    with open('results/inference_times_edgetpu.json', 'w') as fp:
        json.dump(inference_times, fp)
Ejemplo n.º 4
0
    p = True
    camera.start_preview()
    while p:
        if GPIO.input(23) == GPIO.LOW:
            os.system("./jsay.sh \"かしゃ\"")
            #camera.capture('./image.jpg')
            name = "./image/image" + str(cntr) + ".jpg"
            camera.capture(name)
            cntr = cntr + 1
            img = Image.open(name)
            img2 = img.crop((280, 0, 720, 720))

            start = time.time()
            inf = engine.classify_with_image(img2, top_k=3)
            eltime = time.time() - start
            iftime = engine.get_inference_time()
            print('num i = ', len(inf))
            if (len(inf) == 0):
                lab2 = u"よくわかりません"
            else:
                for result in inf:
                    print('---------------------------')
                    print(labels[result[0]])
                    print('Score : ', result[1])
                print('elTime : ', eltime)
                print('ifTime : ', iftime)
                result = inf[0]
                if result[1] >= 0.4:
                    lab2 = u"これは" + labels[result[0]] + "です"
                else:
                    lab2 = u"これはたぶん" + labels[result[0]] + "かもしれません"
Ejemplo n.º 5
0
	frame = imutils.resize(frame, width=500)
	orig = frame.copy()
	# prepare the image for classification by converting from a NumPy
	# array to PIL image format
	frame = Image.fromarray(frame)

	# make predictions on the input frame
	results = model.classify_with_image(frame, top_k=1)

	# ensure at least one result was found
	if len(results) > 0:
		# draw the predicted class label and probability on the
		# output frame
		(classID, score) = results[0]
		text = "{}: {:.2f}% ({:.4f} sec)".format(classNames[classID],
			score * 100, model.get_inference_time() / 1000)
		cv2.putText(orig, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
			0.5, (0, 0, 255), 2)

	# show the output frame and wait for a key press
	cv2.imshow("Frame", orig)
	key = cv2.waitKey(1) & 0xFF

	
	# if the `q` key was pressed, break from the loop
	if key == ord("q"):
		break

# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))