예제 #1
0
def predict_label_from_image_file(image_file, model: ImageModel):
    try:
        result = model.predict_from_file(path=image_file)
        return result.labels[0]
    except Exception as e:
        print(f"Problem predicting image from file: {e}")
    return None, None
예제 #2
0
def predict_image_url(url, model: ImageModel, row):
    label, confidence = '', ''
    try:
        result = model.predict_from_url(url=url)
        label, confidence = result.labels[0]
    except Exception as e:
        print(f"Problem predicting image from url: {e}")
    return label, confidence, row
예제 #3
0
import time
from lobe import ImageModel
#from lobe.signature import Signature
from lobe.signature import ImageClassificationSignature
import lobe
from app_settings import AppSettings

modelTier = "tier1"
_app_settings = AppSettings()

sigpath = _app_settings.get_SignaturePath(modelTier)
#sig:Signature = Signature(sigpath)
sig: ImageClassificationSignature = ImageClassificationSignature(sigpath)

#sig:Signature = Signature('./tf_models_lite/tier1/signature.json')
model: ImageModel = ImageModel.load_from_signature(sig)

#test_model: ImageModel = ImageModel.load_from_signature(Signature('./tf_models_lite/sig.json'))
#model_not_a_bee: ImageModel = ImageModel.load()
'''
The TFClassify class (Tensor Flow Classifier), takes a TensorFlow model and allows you
to pass multiple images to it via the addImage() or addImages() methods. It then
returns the predicted classification of the images as a DICT array
{
    'image': '<image_path_sent_to_classifier>',
    'prediction': 'output_prediction_from_TensorFlow'>
}
'''


class TFClassify:
예제 #4
0
#!/usr/bin/env python
from lobe import ImageModel

model = ImageModel.load('path/to/exported/model')

# Predict from an image file
result = model.predict_from_file('path/to/file.jpg')

# Predict from an image url
result = model.predict_from_url('http://path/to/file.jpg')

# Predict from Pillow image
from PIL import Image
img = Image.open('path/to/file.jpg')
result = model.predict(img)

# Print top prediction
print("Top prediction:", result.prediction)

# Print all classes
for label, prop in result.labels:
    print(f"{label}: {prop*100}%")
예제 #5
0
def predict_folder(img_dir, model_dir, progress_hook=None):
    """
	Run your model on a directory of images. This will also go through any images in existing subdirectories.
	Move each image into a subdirectory structure based on the prediction -- the predicted label
	becomes the directory name where the image goes.

	:param img_dir: the filepath to your directory of images.
	:param model_dir: path to the Lobe Tensorflow SavedModel export.
	:param progress_hook: an optional function that will be run with progress_hook(currentProgress, totalProgress) when progress updates.
	"""
    print(f"Predicting {img_dir}")
    img_dir = os.path.abspath(img_dir)
    # if this a .txt file, don't treat the first row as a header. Otherwise, use the first row for header column names.
    if not os.path.isdir(img_dir):
        raise ValueError(
            f"Please specify a directory to images. Found {img_dir}")

    num_items = sum(len(files) for _, _, files in os.walk(img_dir))
    print(f"Predicting {num_items} items...")

    # load the model
    print("Loading model...")
    model = ImageModel.load(model_path=model_dir)
    print("Model loaded!")

    # iterate over the rows and predict the label
    curr_progress = 0
    no_labels = 0
    with tqdm(total=num_items) as pbar:
        with ThreadPoolExecutor() as executor:
            model_futures = []
            # make our prediction jobs
            for root, _, files in os.walk(img_dir):
                for filename in files:
                    image_file = os.path.abspath(os.path.join(root, filename))
                    model_futures.append(
                        (executor.submit(predict_label_from_image_file,
                                         image_file=image_file,
                                         model=model), image_file))

            for future, img_file in model_futures:
                label = future.result()
                if label == '':
                    no_labels += 1
                filename = os.path.split(img_file)[-1]
                name, ext = os.path.splitext(filename)
                # move the file
                dest_dir = os.path.join(img_dir, label)
                os.makedirs(dest_dir, exist_ok=True)
                dest_file = os.path.abspath(os.path.join(dest_dir, filename))
                # only move if the destination is different than the file
                if dest_file != img_file:
                    try:
                        # rename the file if there is a conflict
                        rename_idx = 0
                        while os.path.exists(dest_file):
                            new_name = f'{name}_{rename_idx}{ext}'
                            dest_file = os.path.abspath(
                                os.path.join(dest_dir, new_name))
                            rename_idx += 1
                        shutil.move(img_file, dest_file)
                    except Exception as e:
                        print(f"Problem moving file: {e}")
                pbar.update(1)
                if progress_hook:
                    curr_progress += 1
                    progress_hook(curr_progress, num_items)
    print(f"Done! Number of images without predicted labels: {no_labels}")
예제 #6
0
#import Pi GPIO library button class
from gpiozero import LED
from time import sleep

#Import Lobe python library
from lobe import ImageModel

# Define LEDs and GPIO pin numbers
# --> Change GPIO pins as needed
red_led = LED(17)
yellow_led = LED(27)
green_led = LED(22)

# Load Lobe TF model
# --> Change model path
model = ImageModel.load('/path/to/model')
# Run photo through Lobe TF model and get prediction results
# --> Change image path
result = model.predict_from_file('/path/to/image/image.jpg')


# Function that takes in a single string, label, and turns on corresponding LED
# --> Change label1, label2, and label3 to reflect your Lobe model labels.
#     Note: Labels are case sensitive!
def ledSelect(label):
    print(label)
    if label == "label1":
        yellow_led.on()
        sleep(5)
    if label == "label2":
        green_led.on()
예제 #7
0
from time import sleep

from lobe import ImageModel
 
button = Button(21)
 
yellow_led = LED(17) 
blue_led = LED(27) 
green_led = LED(22) 
red_led = LED(23) 
pink_led = LED(24) 
white_led = LED(10) 
 
camera = PiCamera()

model = ImageModel.load('model/')


# Take Photo
def take_photo():
    sleep(2)
    print("Starting classify process")
    camera.start_preview(alpha=200)
    camera.rotation = 270
    camera.capture('images/predict.png')
    camera.stop_preview()
    sleep(1)

def ledOff():
        yellow_led.off()
        blue_led.off()
예제 #8
0
# Import Pi Camera library
from picamera import PiCamera
from time import sleep

#Import Lobe python library
from lobe import ImageModel

# Create a camera object
camera = PiCamera()

# Load Lobe TF model
# --> Change model path
model = ImageModel.load('/home/pi/model')

if __name__ == '__main__':
    # Start the camera preview, make slightly transparent to see any python output
    #   Note: preview only shows if you have a monitor connected directly to the Pi
    camera.start_preview(alpha=200)
    # Pi Foundation recommends waiting 2s for light adjustment
    sleep(5)
    # Optional image rotation for camera
    # --> Change or comment out as needed
    camera.rotation = 180
    #Input image file path here
    # --> Change image path as needed
    camera.capture('/home/pi/Documents/image.jpg')
    #Stop camera
    camera.stop_preview()

    # Run photo through Lobe TF model and get prediction results
    result = model.predict_from_file('/home/pi/Documents/image.jpg')
예제 #9
0
def main():
    global jingle_count
    model = ImageModel.load('~/model')

    # Check if there is a folder to keep the retraining data, if it there isn't make it
    if (not os.path.exists('./retraining_data')):
        os.mkdir('./retraining_data')

    with picamera.PiCamera(resolution=(224, 224), framerate=30) as camera:
        stream = io.BytesIO()
        camera.start_preview()
        # Camera warm-up time
        time.sleep(2)
        label = ''
        while True:
            stream.seek(0)
            camera.annotate_text = None
            camera.capture(stream, format='jpeg')
            camera.annotate_text = label
            img = Image.open(stream)
            result = model.predict(img)
            label = result.prediction
            confidence = result.labels[0][1]
            camera.annotate_text = label
            print(f'\rLabel: {label} | Confidence: {confidence*100: .2f}%',
                  end='',
                  flush=True)

            # Check if the current label is package and that the label has changed since last tine the code ran
            if label == LABEL_CAT:
                # Make Servo Jingle Keys
                jingle_keys()
            elif label == LABEL_MULTI_CAT:
                jingle_keys(True)
            elif label == LABEL_NOTHING:
                jingle_count = 0

            time.sleep(0.5)

            inputs = get_inputs()
            # Check if the joystick is pushed up
            if (Input.UP in inputs):
                color_fill(GREEN, 0)
                # Check if there is a folder to keep the retraining data, if it there isn't make it
                if (not os.path.exists(f'./retraining_data/{label}')):
                    os.mkdir(f'./retraining_data/{label}')
                # Remove the text annotation
                camera.annotate_text = None

                # File name
                name = datetime.now()
                # Save the current frame
                camera.capture(
                    os.path.join(
                        f'./retraining_data/{label}',
                        f'{datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}.jpg'))

                color_fill(OFF, 0)

            # Check if the joystick is pushed down
            elif (Input.DOWN in inputs or Input.BUTTON in inputs):
                color_fill(RED, 0)
                # Remove the text annotation
                camera.annotate_text = None
                # Save the current frame to the top level retraining directory
                camera.capture(
                    os.path.join(
                        f'./retraining_data',
                        f'{datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}.jpg'))
                color_fill(OFF, 0)
def main(check, interval, save_path, model_path):
    # Confirm that the provided save path and model paths are valid
    try:
        path_save = validate_path(save_path)
        path_model = validate_path(model_path)
    except Exception as err:
        print(err)
        exit()

    # Load Lobe model
    model = ImageModel.load(path_model)
    with picamera.PiCamera(resolution=(224, 224), framerate=30) as camera:

        # Start camera preview
        stream = io.BytesIO()
        camera.start_preview()

        # Camera warm-up time
        time.sleep(2)

        while True:

            # Start stream at the first byte
            stream.seek(0)

            time_stamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')

            # Clear the last prediction text
            # and capture an image from the camera
            camera.annotate_text = None
            camera.capture(stream, format='jpeg')
            img = Image.open(stream)

            # Run inference on the image
            result = model.predict(img)
            label = result.prediction
            confidence = result.labels[0][1]

            # Add label text to camera preview
            camera.annotate_text = f"{label}\n{confidence}\n{time_stamp}"

            # if the image was predicted interesting, save it to the provided path
            # and wait the interval set for interesting images
            if label == Labels.INTERESTING:
                save_filename = path_save.joinpath(f"{time_stamp}.jpg")
                img.save(save_filename)
                time.sleep(interval)

            # if the image was predicted uninteresting, save it to a sub-directory
            # (for use to make the interesting/not-interesting model better)
            # and wait the interval set for uninteresting images
            elif label == Labels.NOT_INTERESTING:
                save_filename = path_save.joinpath('uninteresting').joinpath(f"un-{time_stamp}.jpg")
                img.save(save_filename)
                time.sleep(check)

            # if some otherlabel is predicted, there's a problem with the model
            # print something out and exit execution
            else:
                print(f"Unexpected result: {label}")
                exit()
예제 #11
0
def predict_dataset(filepath, model_dir, url_col=None, progress_hook=None):
    """
	Given a file with urls to images, predict the given SavedModel on the image and write the label
	and confidene back to the file.

	:param filepath: path to a valid txt or csv file with image urls to download.
	:param model_dir: path to the Lobe Tensorflow SavedModel export.
	:param url_col: if this is a csv, the column header name for the urls to download.
	:param progress_hook: an optional function that will be run with progress_hook(currentProgress, totalProgress) when progress updates.
	"""
    print(f"Predicting {filepath}")
    filepath = os.path.abspath(filepath)
    filename, ext = _name_and_extension(filepath)
    # read the file
    # if this a .txt file, don't treat the first row as a header. Otherwise, use the first row for header column names.
    if ext != '.xlsx':
        csv = pd.read_csv(filepath, header=None if ext == '.txt' else 0)
    else:
        csv = pd.read_excel(filepath, header=0)
    if ext in ['.csv', '.xlsx'] and not url_col:
        raise ValueError(f"Please specify an image url column for the csv.")
    url_col_idx = 0
    if url_col:
        try:
            url_col_idx = list(csv.columns).index(url_col)
        except ValueError:
            raise ValueError(
                f"Image url column {url_col} not found in csv headers {csv.columns}"
            )

    num_items = len(csv)
    print(f"Predicting {num_items} items...")

    # load the model
    print("Loading model...")
    model = ImageModel.load(model_path=model_dir)
    print("Model loaded!")

    # create our output csv
    fname, ext = os.path.splitext(filepath)
    out_file = f"{fname}_predictions.csv"
    with open(out_file, 'w', encoding="utf-8", newline='') as f:
        # our header names from the pandas columns
        writer = csv_writer(f)
        writer.writerow([
            *[str(col) if not pd.isna(col) else '' for col in csv.columns],
            'label', 'confidence'
        ])

    # iterate over the rows and predict the label
    with tqdm(total=len(csv)) as pbar:
        with ThreadPoolExecutor() as executor:
            model_futures = []
            # make our prediction jobs
            for i, row in enumerate(csv.itertuples(index=False)):
                url = row[url_col_idx]
                model_futures.append(
                    executor.submit(predict_image_url,
                                    url=url,
                                    model=model,
                                    row=row))

            # write the results from the predict (this should go in order of the futures)
            for i, future in enumerate(model_futures):
                label, confidence, row = future.result()
                with open(out_file, 'a', encoding="utf-8", newline='') as f:
                    writer = csv_writer(f)
                    writer.writerow([
                        *[str(col) if not pd.isna(col) else '' for col in row],
                        label, confidence
                    ])
                pbar.update(1)
                if progress_hook:
                    progress_hook(i + 1, len(csv))
#!/usr/bin/python
from time import sleep
from lobe import ImageModel
import subprocess
import tflite_runtime.interpreter

# Learning mostly from https://blog.paperspace.com/tensorflow-lite-raspberry-pi/
# and https://github.com/microsoft/TrashClassifier.
# Took some code from my old project: https://github.com/aHagouel/MauiBot4000/blob/master/src/bot.py
# SHoutout Lobe team for helping debug my execution environment.

model_folder = "/home/pi/Development/hungry-hungry-fishos/utilities/model/"

# Load Lobe.ai TF model. Requires TF Lite & Lobe installation. Please check out readme for more info.
model = ImageModel.load(model_folder)


def take_picture(
    file_path='/home/pi/Development/hungry-hungry-fishos/current_state/last_picture'
):
    command = "fswebcam -S 2 -r 980x540 --no-banner " + file_path + '.jpg'
    process = subprocess.call(command.split(), stdout=subprocess.PIPE)
    return file_path + '.jpg'


while True:
    photo_path = take_picture()
    # Run photo through Lobe TF model
    result = model.predict_from_file(photo_path)
    print(result)
    if (result == "Hungry"):
예제 #13
0
def on_connect(client, userdata, flags, rc):
    print("Connected with result code "+str(rc))

def on_message(client, userdata, msg):
    print(msg.topic+" "+str(msg.payload))

client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message

# connect to MQTT broker
client.connect("localhost", 1883, 60)

client.loop_start()

model = ImageModel.load(model_dir)

while True:
		ret, image_np = cap.read()
		image_rgb = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
		im_pil = Image.fromarray(image_rgb)
		result = model.predict(im_pil)
		client.publish("birdcam/detections", json.dumps(result.__dict__), qos=0, retain=False)
		print(result.prediction)
		#comment the line below if you don't want the webcam preview
		cv2.imshow('object detection', image_np)
		time.sleep(1)
		if cv2.waitKey(25) & 0xFF == ord('q'):
			break

cap.release()
예제 #14
0
white_led = PWMLED(24)  # camera state LED

general_ir = Button(23)
recycling_ir = Button(22)

servo = Servo(19)
servo.value = 0  # initate servo to mid point

# count that records row number for google sheets writing
global count
count = 133  # change to current position of last entry

camera = PiCamera()

# Load Lobe TF model
model = ImageModel.load('/home/pi/Lobe')


# run after button press
def get_data(count):
    white_led.blink(0.1, 0.1)
    sleep(1)
    print("Pressed")
    white_led.on()
    # Start the camera preview
    camera.start_preview(alpha=200)
    # wait 0.5s for light adjustment
    sleep(0.5)
    # image rotation for camera
    camera.rotation = 90
    # new image file path