Esempio n. 1
0
def upload_file():
    coralReply = ''
    coralPercent = ''
    form = UploadForm()
    if form.validate_on_submit():
        filename = photos.save(form.photo.data)
        file_url = photos.url(filename)
        imageSpot = os.path.basename(file_url)
        # Prepare labels.
        labels = ReadLabelFile(myLabel)
        # Initialize engine.
        engine = ClassificationEngine(myModel)
        # Run inference.
        img = Image.open('myPhotos/' + imageSpot)
        for result in engine.ClassifyWithImage(img, top_k=3):
            print('---------------------------')
            print(labels[result[0]])
            print('Score : ', result[1])
            showResult = labels[result[0]]
            showPercent = str(int(round(result[1] * 100)))
            coralReply = showResult
            coralPercent = showPercent + '% confidence'
    else:
        file_url = None
        coralReply = ''
        coralPercent = ''
    return render_template('index.html',
                           form=form,
                           file_url=file_url,
                           coralReply=coralReply,
                           coralPercent=coralPercent)
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", help="File path of Tflite model.", required=True)
    parser.add_argument(
        "--image", help="File path of the image to be recognized.", required=True
    )
    parser.add_argument(
        "--num", help="Number of inference executions.", default=10, type=int
    )
    args = parser.parse_args()

    inference_time = []
    img = Image.open(args.image)

    for i in range(args.num):
        # Initialize engine.
        engine = ClassificationEngine(args.model)

        # Run inference.
        result1 = engine.ClassifyWithImage(img, top_k=3)

        # Get Inference time.
        inference_time.append(engine.get_inference_time())

        # delete Engine
        del engine

    # Print avg
    print("Model inference time avg: {0:.4f}".format(statistics.mean(inference_time)))
Esempio n. 3
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='File path of Tflite model.',
                        required=True)
    parser.add_argument('--image', help='File path of file.', required=True)
    args = parser.parse_args()

    label_names = [
        'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',
        'horse', 'ship', 'truck'
    ]
    img = Image.open(args.image)
    #np_image = np.array(img)

    # Load Engine
    engine = ClassificationEngine(args.model)

    lap_time = time.time()

    # Run inference.
    for result in engine.ClassifyWithImage(img, top_k=3):
        print('---------------------------')
        print(label_names[result[0]])
        print('Score : ', result[1])

    previous_time = lap_time
    lap_time = time.time()
    print("Elapsed time for the last inference: ", lap_time - previous_time)
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='File path of Tflite model.',
                        required=True)
    parser.add_argument('--label',
                        help='File path of label file.',
                        required=True)
    parser.add_argument('--picamera',
                        action='store_true',
                        help="Use PiCamera for image capture",
                        default=False)
    args = parser.parse_args()

    # Prepare labels.
    labels = ReadLabelFile(args.label) if args.label else None
    # Initialize engine.
    engine = ClassificationEngine(args.model)

    # Initialize video stream
    vs = VideoStream(usePiCamera=args.picamera, resolution=(640, 480)).start()
    time.sleep(1)

    fps = FPS().start()

    while True:
        try:
            # Read frame from video
            screenshot = vs.read()
            image = Image.fromarray(screenshot)

            # Perfrom inference and keep time
            start_time = time.time()
            results = engine.ClassifyWithImage(image, top_k=1)
            print(results)
            result = labels[results[0][0]] if results != [] else 'None'
            print(result)
            draw_image(image, result)

            if (cv2.waitKey(5) & 0xFF == ord('q')):
                fps.stop()
                break

            fps.update()
        except KeyboardInterrupt:
            fps.stop()
            break

    print("Elapsed time: " + str(fps.elapsed()))
    print("Approx FPS: :" + str(fps.fps()))

    cv2.destroyAllWindows()
    vs.stop()
    time.sleep(2)
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model1",
                        help="File path of Tflite model.",
                        required=True)
    parser.add_argument("--model2",
                        help="File path of Tflite model.",
                        required=True)
    parser.add_argument("--image",
                        help="File path of the image to be recognized.",
                        required=True)
    parser.add_argument("--num",
                        help="Number of inference executions.",
                        default=100,
                        type=int)
    args = parser.parse_args()

    # Initialize engine.
    engine1 = ClassificationEngine(args.model1)
    engine2 = ClassificationEngine(args.model2)

    # Run inference.
    inference_time1 = []
    inference_time2 = []

    for i in range(num + 1):
        img = Image.open(args.image)
        result1 = engine1.ClassifyWithImage(img, top_k=3)
        result2 = engine2.ClassifyWithImage(img, top_k=3)

        # Get Inference time.
        if i > 0:
            inference_time1.append(engine1.get_inference_time())
            inference_time2.append(engine2.get_inference_time())

    # Avg
    print("Model1 inference time avg: {0:.4f}".format(
        statistics.mean(inference_time1)))
    print("Model2 inference time avg: {0:.4f}".format(
        statistics.mean(inference_time2)))
Esempio n. 6
0
class ClassifyEdgeTPU(ImageClassifier):
    def __init__(self):
        super().__init__()
        self.label_file = (config.download_directory + os.path.sep +
                           "imagenet_labels.txt")
        self.model_file = (config.download_directory + os.path.sep +
                           "mobilenet_v2_1.0_224_quant_edgetpu.tflite")

    def load_model(self, label_file=None, model_file=None):
        """Load a pretrained model"""

        # Prepared labels
        if label_file is not None:
            self.label_file = label_file
        self.labels = self.read_label_file(self.label_file)

        # Initialize TPU engine
        if model_file is not None:
            self.model_file = model_file

        from edgetpu.classification.engine import ClassificationEngine

        self.model = ClassificationEngine(self.model_file)

    def read_label_file(self, file_path):
        """Function to read labels from text files"""
        with open(file_path, 'r') as f:
            lines = f.readlines()
        ret = {}
        for line in lines:
            num = line[:4].strip()
            label = line[5:].strip().split(',')[0].lower()
            ret[int(num)] = label
        return ret

    def predict(self, image_a, top=5, score=False):
        pred = self.model.ClassifyWithImage(image_a)
        if len(pred) == 0:
            p_label = 'other'
            p_score = 0.0
        else:
            p_n_label, p_score = pred[0]
            p_label = self.labels[p_n_label]
        if score:
            return p_label, p_score
        else:
            return p_label

    def predict_file(self, file_path, top=5):
        image_a = self.preprocess(file_path)
        p_label = self.predict(image_a, top=top)
        return p_label
def main():

    # Prepare labels.
    labels = ReadLabelFile(labelfile)
    # Initialize engine.
    engine = ClassificationEngine(model)
    # Run inference on all images in images/ folder
    for image in glob.glob(image_folder):
        img = Image.open(image)
        for result in engine.ClassifyWithImage(img, top_k=1):
            print('---------------------------')
            print(labels[result[0]])
            print('Score : ', result[1])
            print('-----END-------------------')
Esempio n. 8
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--model', help='File path of Tflite model.', required=True)
  parser.add_argument('--label', help='File path of label file.', required=True)
  parser.add_argument(
      '--image', help='File path of the image to be recognized.', required=True)
  args = parser.parse_args()

  # Prepare labels.
  labels = dataset_utils.ReadLabelFile(args.label)
  # Initialize engine.
  engine = ClassificationEngine(args.model)
  # Run inference.
  img = Image.open(args.image)
  for result in engine.ClassifyWithImage(img, top_k=3):
    print('---------------------------')
    print(labels[result[0]])
    print('Score : ', result[1])
    def frames():
        objectEngine = DetectionEngine(
            "models/edge-tpu/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"
        )
        labels = Camera.ReadLabelFile("models/edge-tpu/coco_labels.txt")
        faceEngine = DetectionEngine(
            "models/edge-tpu/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite"
        )
        inceptionObjectEngine = ClassificationEngine(
            "models/edge-tpu/inception_v3_299_quant_edgetpu.tflite")
        inceptionLabels = Camera.ReadLabelFile(
            "models/edge-tpu/imagenet_labels.txt")

        width = 1280
        height = 720
        camera = PiCamera()
        camera.resolution = (width, height)
        camera.framerate = 30
        camera.vflip = True
        camera.hflip = True
        camera.meter_mode = "matrix"
        camera.awb_mode = "auto"
        camera.image_denoise = True
        camera.start_preview()
        try:
            while True:
                objectsInFrame = {}
                stream = io.BytesIO()
                camera.capture(stream, format='jpeg')
                stream.seek(0)
                img = Image.open(stream)
                draw = ImageDraw.Draw(img)

                for result in inceptionObjectEngine.ClassifyWithImage(
                        img, top_k=10):
                    score = result[1]
                    if score > 0.1:
                        label = inceptionLabels[result[0]]
                        objectsInFrame[label] = str(score)

                detectedObjects = objectEngine.DetectWithImage(
                    img,
                    threshold=0.05,
                    keep_aspect_ratio=True,
                    relative_coord=False,
                    top_k=10)
                for detection in detectedObjects:
                    confidence = detection.score
                    if confidence > .4:
                        label = labels[detection.label_id]
                        objectsInFrame[label] = str(confidence)
                        box = detection.bounding_box.flatten().tolist()
                        Camera.drawBoxAndLabel(draw, box, label, 'red')

                detectedFaces = faceEngine.DetectWithImage(
                    img,
                    threshold=0.05,
                    keep_aspect_ratio=True,
                    relative_coord=False,
                    top_k=10)
                for face in detectedFaces:
                    confidence = face.score
                    if confidence > .2:
                        box = face.bounding_box.flatten().tolist()
                        label = "face"
                        objectsInFrame[label] = str(confidence)
                        Camera.drawBoxAndLabel(draw, box, label, 'green')

                objectsInFrame = json.dumps(objectsInFrame)
                output = io.BytesIO()
                img.save(output, format='JPEG')
                yield (objectsInFrame, output.getvalue())
                stream.close()
        finally:
            camera.stop_preview()
Esempio n. 10
0
# load the input image
image = cv2.imread(args["image"])
image = imutils.resize(image, width=500)
orig = image.copy()

# prepare the image for classification by converting (1) it from BGR
# to RGB channel ordering and then (2) from a NumPy array to PIL
# image format
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)

# make predictions on the input image
print("[INFO] making predictions...")
start = time.time()
results = model.ClassifyWithImage(image, top_k=5)
end = time.time()
print("[INFO] classification took {:.4f} seconds...".format(end - start))

# loop over the results
for (i, (classID, score)) in enumerate(results):
    # check to see if this is the top result, and if so, draw the
    # label on the image
    if i == 0:
        text = "Label: {}, {:.2f}%".format(labels[classID], score * 100)
        cv2.putText(orig, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                    (0, 0, 255), 2)

    # display the classification result to the terminal
    print("{}. {}: {:.2f}%".format(i + 1, labels[classID], score * 100))
    print("load model costs %s sec" % (time.time() - start_time))

    dir_path = '/home/pi/Desktop/test-dataset/'
    os.chdir(dir_path)
    subdir_list = next(os.walk('.'))[1]
    print(subdir_list)

    acc = []
    for subdir in subdir_list:
        files = []
        count = 0
        sub_p = os.path.join(dir_path, subdir)
        for r, d, f in os.walk(sub_p):
            for file in f:
                if file[:2] != "._" and file[-3:] == "jpg":
                    files.append(os.path.join(r, file))
        #print(files[1:10])
        print('There are %s images in subdirectory %s' % (len(files), subdir))
        start_time = time.time()
        for i_path in files:
            img = Image.open(i_path)
            for result in engine.ClassifyWithImage(img, top_k=1):
                if labels[result[0]] == subdir:
                    count += 1
                    #print(labels[result[0]],count)
        acc.append(count)
        print("recognition of %s images costs %s sec" %
              (len(files), time.time() - start_time))
    for x in range(len(acc)):
        print(subdir_list[x], acc[x])
Esempio n. 12
0
    return ret


# Prepare labels
labels = ReadLabelFile("models/imagenet_labels.txt")
# Initialize engine
engine = ClassificationEngine(
    "models/mobilenet_v2_1.0_224_quant_edgetpu.tflite")
# Load image
img = Image.open("images/cat.jpg")

# Run once, make sure class is right
# the first time we classify the model is send to the edgeTPU,
# that is why this first prediction is so much slower then the rest
print("warmup prediction")
prediction = engine.ClassifyWithImage(img, top_k=1)
prediction = prediction[0]
print(labels[prediction[0]])
print(prediction[1])
time.sleep(1)

print("starting now (Edge TPU)...")
s = time.time()
for i in range(0, 250, 1):
    result = engine.ClassifyWithImage(img, top_k=1)
    result = result[0]

e = time.time()
print('Time[ms] : ' + str(e - s))
print('FPS      : ' + str(1.0 / ((e - s) / 250.0)))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='File path of Tflite model.',
                        required=True)
    parser.add_argument('--label',
                        help='File path of label file.',
                        required=True)
    args = parser.parse_args()

    # Initialize Picamera and grab reference to the raw capture
    camera = PiCamera()
    camera.resolution = (IM_WIDTH, IM_HEIGHT)
    camera.framerate = 10
    rawCapture = PiRGBArray(camera, size=(IM_WIDTH, IM_HEIGHT))
    rawCapture.truncate(0)

    # Prepare labels.
    labels = ReadLabelFile(args.label)
    # Initialize engine.
    engine = ClassificationEngine(args.model)

    for frame1 in camera.capture_continuous(rawCapture,
                                            format="bgr",
                                            use_video_port=True):

        t1 = cv2.getTickCount()
        frame_rate_calc = 1
        freq = cv2.getTickFrequency()
        font = cv2.FONT_HERSHEY_SIMPLEX

        # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
        # i.e. a single-column array, where each item in the column has the pixel RGB value
        frame = np.copy(frame1.array)
        frame.setflags(write=1)
        frame_expanded = np.expand_dims(frame, axis=0)

        # prepare the frame for classification by converting (1) it from
        # BGR to RGB channel ordering and then (2) from a NumPy array to
        # PIL image format
        frame_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame_img = Image.fromarray(frame)

        # make predictions on the input frame
        start = time.time()
        results = engine.ClassifyWithImage(frame_img, top_k=1)
        end = time.time()

        # ensure at least one result was found
        if len(results) > 0:
            # draw the predicted class label, probability, and inference
            # time on the output frame
            (classID, score) = results[0]
            text = "{}: {:.2f}% ({:.4f} sec)".format(labels[classID],
                                                     score * 100, end - start)
            cv2.putText(frame, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (0, 0, 255), 2)
            # loop over the results
            #for r in results:
            #    # extract the bounding box and box and predicted class label
            #    box = r.bounding_box.flatten().astype("int")
            #    (startX, startY, endX, endY) = box
            #    label = labels[r.label_id]

            #    # draw the bounding box and label on the image
            #    cv2.rectangle(orig, (startX, startY), (endX, endY),
            #        (0, 255, 0), 2)
            #    y = startY - 15 if startY - 15 > 15 else startY + 15
            #    text = "{}: {:.2f}%".format(label, r.score * 100)
            #    cv2.putText(orig, text, (startX, y),
            #        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        #for result in engine.ClassifyWithImage(frame_img, top_k=3):
        #  print('---------------------------')
        #  print(labels[result[0]])
        #  print('Score : ', result[1])

        # Perform the actual detection by running the model with the image as input
        # (boxes, scores, classes, num) = sess.run(
        #   [detection_boxes, detection_scores, detection_classes, num_detections],
        #   feed_dict={image_tensor: frame_expanded})

        # Draw the results of the detection (aka 'visulaize the results')
        # vis_util.visualize_boxes_and_labels_on_image_array(
        #   frame,
        #   np.squeeze(boxes),
        #   np.squeeze(classes).astype(np.int32),
        #   np.squeeze(scores),
        #   category_index,
        #   use_normalized_coordinates=True,
        #   line_thickness=8,
        #   min_score_thresh=0.40)

        # cv2.putText(frame, "FPS: {0:.2f}".format(frame_rate_calc), (30, 50), font, 1, (255, 255, 0), 2, cv2.LINE_AA)

        # All the results have been drawn on the frame, so it's time to display it.
        cv2.imshow('Object detector', frame)
        #
        t2 = cv2.getTickCount()
        time1 = (t2 - t1) / freq
        frame_rate_calc = 1 / time1

        # Press 'q' to quit
        if cv2.waitKey(1) == ord('q'):
            break

        rawCapture.truncate(0)

    camera.close()
Esempio n. 14
0
def load_labels(filename):
    def split(line):
        return tuple(word.strip() for word in line.split(','))

    with open(filename, encoding='utf-8') as f:
        return tuple(split(line) for line in f)
    
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", required = True)
parser.add_argument("--input", required = True)
parser.add_argument("--label_path", required = True)
args = parser.parse_args()

engine = ClassificationEngine(args.model_path)

image = PIL.Image.open(args.input)

starttime = datetime.now()
result = engine.ClassifyWithImage(image, top_k=5)
endtime = datetime.now()
deltatime = endtime-starttime
print(str(deltatime.seconds) + "s, " + str(deltatime.microseconds/1000) + "ms")

_CLASSES = load_labels(args.label_path)
classes =  [('/'.join(_CLASSES[index]), prob) for index, prob in result]

for i, (label, score) in enumerate(classes):
    print('Result %d: %s (prob=%f)' % (i, label, score))

Esempio n. 15
0
"""
Testing Edge TPU
"""
from edgetpu.classification.engine import ClassificationEngine
from PIL import Image

# open an Image
img = Image.open('img/sample_poloshirt.jpg')

# init Edge TPU with the model
tpu = ClassificationEngine('/home/pi/model.tflite')

# do prediction
results = tpu.ClassifyWithImage(img, top_k=3)
print(results)

Esempio n. 16
0
while True:
	# grab the frame from the threaded video stream and resize it
	# to have a maximum width of 500 pixels
	frame = vs.read()
	frame = imutils.resize(frame, width=500)
	orig = frame.copy()

	# prepare the frame for classification by converting (1) it from
	# BGR to RGB channel ordering and then (2) from a NumPy array to
	# PIL image format
	frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
	frame = Image.fromarray(frame)

	# make predictions on the input frame
	start = time.time()
	results = model.ClassifyWithImage(frame, top_k=1)
	end = time.time()

	# ensure at least one result was found
	if len(results) > 0:
		# draw the predicted class label, probability, and inference
		# time on the output frame
		(classID, score) = results[0]
		text = "{}: {:.2f}% ({:.4f} sec)".format(labels[classID],
			score * 100, end - start)
		cv2.putText(orig, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
			0.5, (0, 0, 255), 2)

	# show the output frame and wait for a key press
	cv2.imshow("Frame", orig)
	key = cv2.waitKey(1) & 0xFF
Esempio n. 17
0
def main():
    cloudIotInstance = CloudIot()
    args = cloudIotInstance.parse_command_line_args()
    dicInstance = dict()
    listInstance = list()
    listOfResults = list()
    # Prepare labels.
    labels = ReadLabelFile(args.label)

    # Initialize engine.
    engine = ClassificationEngine(args.model, labels)

    # Run inference.
    imgArr = args.image.split(";")

    index = 1
    for imgStr in imgArr:
        img = Image.open(imgStr)

        for result in engine.ClassifyWithImage(img, top_k=3):
            dicItem = dict()
            dicItem['label'] = str(result[0])
            dicItem['type'] = result[1]
            dicItem['score'] = str(result[2])
            listInstance.append(dicItem)

        inferDict = dict()
        inferDict["results"] = listInstance
        inferDict["category"] = "Classification"
        inferDict["image"] = img.filename
        listOfResults.append(inferDict)
        index += 1

    listInstance.reverse()
    # Publish to the events or state topic based on the flag.

    sub_topic = 'events' if args.message_type == 'event' else 'state'
    mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)

    jwt_iat = datetime.datetime.utcnow()
    jwt_exp_mins = args.jwt_expires_minutes

    client = cloudIotInstance.get_client(args.project_id, args.cloud_region,
                                         args.registry_id, args.device_id,
                                         args.private_key_file, args.algorithm,
                                         args.ca_certs,
                                         args.mqtt_bridge_hostname,
                                         args.mqtt_bridge_port)

    for i in range(1, len(listOfResults) + 1):
        # Process network events.
        client.loop()

        # Wait if backoff is required.
        if cloudIotInstance.should_backoff:
            # If backoff time is too large, give up.
            if cloudIotInstance.minimum_backoff_time > cloudIotInstance.MAXIMUM_BACKOFF_TIME:
                print('Exceeded maximum backoff time. Giving up.')
                break

            # Otherwise, wait and connect again.
            delay = cloudIotInstance.minimum_backoff_time + random.randint(
                0, 1000) / 1000.0
            print('Waiting for {} before reconnecting.'.format(delay))
            time.sleep(delay)
            cloudIotInstance.minimum_backoff_time *= 2
            client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port)

        payload = json.dumps(listOfResults.pop())
        print('Publishing message \'{}\''.format(payload))

        seconds_since_issue = (datetime.datetime.utcnow() - jwt_iat).seconds
        if seconds_since_issue > 60 * jwt_exp_mins:
            print('Refreshing token after {}s').format(seconds_since_issue)
            jwt_iat = datetime.datetime.utcnow()
            client = cloudIotInstance.get_client(
                args.project_id, args.cloud_region, args.registry_id,
                args.device_id, args.private_key_file, args.algorithm,
                args.ca_certs, args.mqtt_bridge_hostname,
                args.mqtt_bridge_port)
        # [END iot_mqtt_jwt_refresh]
        # Publish "payload" to the MQTT topic. qos=1 means at least once
        # delivery. Cloud IoT Core also supports qos=0 for at most once
        # delivery.

        client.publish(mqtt_topic, payload, qos=1)

        # Send events every second. State should not be updated as often
        time.sleep(1 if args.message_type == 'event' else 5)
Esempio n. 18
0
	# extract the bounding box and box and predicted class label
		#print("r", r)
		box = r.bounding_box.flatten().astype("int")
		print("box", box)
		(startX, startY, endX, endY) = box
		#print("frame", frame)
		face_img, cropped = crop_face(framecvt, box, margin=0, size=face_size)
		#print(face_img)
		face_imgs[i,:,:,:] = face_img
		frame1 = Image.fromarray(face_img)
	#		label = labels[r.label_id]


		if len(face_imgs) > 0:
			start = time.time()
			results1 = model1.ClassifyWithImage(frame1)
			end = time.time()
			cv2.imshow("Cropped", face_img)
			if len(results1) > 0:
				(classID, score) = results1[0]
				print("-->", (labels[classID]))
				if labels[classID] == "with_mask":
					green = True
				elif labels[classID] == "without_mask":
					green = False
				print(green)
				text = "{}: {:.2f}% ({:.4f} sec)".format(labels[classID],
					score * 100, end - start)
				cv2.putText(orig, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
					1, (0, 0, 255), 2)
		# draw the bounding box and label on the image
Esempio n. 19
0
def main():
    try:
        # yyyy-mm-dd hh:mm:ss
        currenttime = strftime("%Y-%m-%d %H:%M:%S", gmtime())

        # Write text with weather values to the canvas
        inkydatetime = strftime("%d/%m %H:%M")

        # IoT Host Name
        host = os.uname()[1]

        # - start timing
        starttime = datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S')
        start = time.time()

        # Ip address
        ipaddress = minifiutil.IP_address()

        parser = argparse.ArgumentParser()
        parser.add_argument('--image',
                            help='File path of the image to be recognized.',
                            required=True)
        args = parser.parse_args()

        # Prepare labels.
        labels = ReadLabelFile('/opt/demo/canned_models/imagenet_labels.txt')

        # Initialize engine.
        engine = ClassificationEngine(
            '/opt/demo/canned_models/inception_v4_299_quant_edgetpu.tflite')

        # Run inference.
        img = Image.open(args.image)

        scores = {}
        kCount = 1

        # Iterate Inference Results
        for result in engine.ClassifyWithImage(img, top_k=5):
            scores['label_' + str(kCount)] = labels[result[0]]
            scores['score_' + str(kCount)] = "{:.2f}".format(result[1])
            kCount = kCount + 1

        # end of processing
        end = time.time()

        # Output JSON
        row = {}
        row.update(scores)
        uuid2 = '{0}_{1}'.format(strftime("%Y%m%d%H%M%S", gmtime()),
                                 uuid.uuid4())
        cpuTemp = int(float(minifiutil.getCPUtemperature()))
        usage = psutil.disk_usage("/")

        # Format Fields
        row['host'] = os.uname()[1]
        row['cputemp'] = str(round(cpuTemp, 2))
        row['ipaddress'] = str(ipaddress)
        row['endtime'] = '{0:.2f}'.format(end)
        row['runtime'] = '{0:.2f}'.format(end - start)
        row['systemtime'] = datetime.datetime.now().strftime(
            '%m/%d/%Y %H:%M:%S')
        row['starttime'] = str(starttime)
        row['diskfree'] = "{:.1f}".format(float(usage.free) / 1024 / 1024)
        row['memory'] = str(psutil.virtual_memory().percent)
        row['uuid'] = str(uuid2)
        row['imagename'] = str(os.path.basename(args.image))

        # Output JSON
        json_string = json.dumps(row)
        print(json_string)

        # Current Minute for Display
        currentminute = int(datetime.datetime.now().minute)

        # Once an hour update display
        if currentminute == 1:
            # Set up the display
            inky_display = InkyPHAT("red")
            inky_display.set_border(inky_display.BLACK)

            # Create a new canvas to draw on
            # 212x104
            img = Image.new("P", (inky_display.WIDTH, inky_display.HEIGHT))
            draw = ImageDraw.Draw(img)

            # Load the FredokaOne font
            font = ImageFont.truetype(FredokaOne, 18)

            # draw data
            draw.text((0, 0),
                      "{}".format(row['imagename']),
                      inky_display.RED,
                      font=font)
            draw.text((0, 22),
                      "{}".format(row['ipaddress']),
                      inky_display.RED,
                      font=font)
            draw.text((0, 44),
                      "{}".format(row['label_1']),
                      inky_display.RED,
                      font=font)
            draw.text((0, 66),
                      "{}".format(row['systemtime']),
                      inky_display.RED,
                      font=font)

            # Display the data on Inky pHAT
            inky_display.set_image(img)
            inky_display.show()

    except:
        print("Fail to send.")
Esempio n. 20
0
class MotionSensor(Accessory):

    category = CATEGORY_SENSOR

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        serv_motion = self.add_preload_service('MotionSensor')
        self.char_detected = serv_motion.configure_char('MotionDetected')
        self.engine = ClassificationEngine("./models/classify.tflite")
        self.is_trained = retrain()
        self.labels = get_labels()
        self.is_running = True
        logging.info(self.setup_message())

    def run(self):
        while self.is_running:

            if app_state.last_state == "shutdown":
                self.is_running = False
                os.system('kill $PPID')

            if (app_state.last_state == "run") and self.is_trained:
                detection = False
                img = camera.returnPIL()
                output = self.engine.ClassifyWithImage(img)
                if output[0][0] == int(self.labels["detection"]):
                    detection = True
                    logging.info("detection triggered")
                self._detected(detection)

            if app_state.last_state == "retrain":
                logging.info("imprinting weights")
                self.is_trained = retrain()
                self.labels = get_labels()

                if self.is_trained:
                    self.engine = ClassificationEngine(
                        "./models/classify.tflite")
                    app_state.last_state = "run"
                    logging.info("finished imprinting")

                else:
                    app_state.last_state = "collect"
                    logging.warning(
                        "could not imprint weights. Please provide enough pictures"
                    )

            if app_state.last_state == "collect_background":
                camera.collect("background")
                app_state.last_state = "collect"

            if app_state.last_state == "collect_detection":
                camera.collect("detection")
                app_state.last_state = "collect"

    def _detected(self, val=False):
        self.char_detected.set_value(val)

    def stop(self):
        logging.info("shut down")
        super().stop()