コード例 #1
0
    def __init__(self):
        if torch.cuda.is_available():
            device_type = 'cuda:0'
        else:
            device_type = 'cpu'

        if device_type == 'cuda:0':
            CPU = False

        model_path = '../../models/hardhat.pkl'
        self.model = load_learner(model_path) # , cpu=CPU
コード例 #2
0
    def predict(self, filename: str, plant: str):
        '''classifying image.'''
        print("-------------Model Downloading-------------")
        self.download_model(plant)
        # model = load_learner(Path.cwd()/'../models/export.pkl')
        print("-------------Model Downloaded-------------")

        model = load_learner(f"{plant}.pkl")
        img = PILImage.create(filename)
        pred_class, pred_idx, ful_tensor = model.predict(img)
        return str(pred_class)
コード例 #3
0
async def setup_learner():

    await download_file(model_file_download_url, model_file)
    try:
        learner = load_learner(model_file)
        return learner
    except RuntimeError as e:
        if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:
            print(e)
            message = "\n\nThis model was trained with an old version of fastai and will not work in a CPU environment.\n\nPlease update the fastai library in your training environment and export your model again.\n\nSee instructions for 'Returning to work' at https://course.fast.ai."
            raise RuntimeError(message)
        else:
            raise
コード例 #4
0
from fastai.vision.all import load_learner, torch, PILImage
import streamlit as st
import os
import time

st.set_page_config("PETS", "🐶&🐱")
st.title("PETS 🐶&🐱")

learn = load_learner('models/pets.pkl')


def predict(img):
    st.image(img, use_column_width=True)
    with st.spinner('Wait for it...'):
        time.sleep(3)

    clas, clas_idx, probs = learn.predict(img)
    prob = round(torch.max(probs).item() * 100, 2)
    st.success(f"This is {clas} with proability of {prob}%.")


option = st.radio('', ['Choose a test image', 'Choose your own image'])

if option == 'Choose a test image':
    test_images = os.listdir('images/')
    test_image = st.selectbox('Please select a test image:', test_images)
    file_path = 'images/' + test_image
    img = PILImage.create(file_path)
    predict(img)

else:
コード例 #5
0
import json
import time
from io import BytesIO

import requests
import numpy as np
from fastai.vision.all import load_learner, PILImage

learn = load_learner('export.pkl')

def lambda_handler(event, context):
    """Sample pure Lambda function

    Parameters
    ----------
    event: dict, required
        API Gateway Lambda Proxy Input Format

        Event doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format

    context: object, required
        Lambda Context runtime methods and attributes

        Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html

    Returns
    ------
    API Gateway Lambda Proxy Output Format: dict

        Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
    """
コード例 #6
0
 def __init__(self):
     self.model = load_learner('webapp/HWES/utils/export.pkl')
     self.math = Math()
     self.the_list = []
コード例 #7
0
ファイル: app.py プロジェクト: GemmyTheGeek/FoodyDudy
)
import glob
import streamlit as st
from PIL import Image
from random import shuffle
import urllib.request

# set images
image1 = Image.open('sidebar1.jpg')
image2 = Image.open('menu1.jpg')
image3 = Image.open('menu2.jpg')

#MODEL_URL = "https://github.com/GemmyTheGeek/FoodyDudy/raw/main/resnet34-10.pkl"
MODEL_URL = "http://freelyblog.com/resnet34-10.pkl"
urllib.request.urlretrieve(MODEL_URL, "model.pkl")
learn_inf = load_learner('model.pkl', cpu=True)

thaimenu = [
    "แกงเขียวหวานไก่", "แกงเทโพ", "แกงเลียง", "แกงจืดเต้าหู้หมูสับ",
    "แกงจืดมะระยัดไส้", "แกงมัสมั่นไก่", "แกงส้มกุ้ง",
    "ไก่ผัดเม็ดมะม่วงหิมพานต์", "ไข่เจียว", "ไข่ดาว", "ไข่พะโล้", "ไข่ลูกเขย",
    "กล้วยบวชชี", "ก๋วยเตี๋ยวคั่วไก่", "กะหล่ำปลีผัดน้ำปลา", "กุ้งแม่น้ำเผา",
    "กุ้งอบวุ้นเส้น", "ขนมครก", "ข้าวเหนียวมะม่วง", "ข้าวขาหมู",
    "ข้าวคลุกกะปิ", "ข้าวซอยไก่", "ข้าวผัด", "ข้าวผัดกุ้ง", "ข้าวมันไก่",
    "ข้าวหมกไก่", "ต้มข่าไก่", "ต้มยำกุ้ง", "ทอดมัน", "ปอเปี๊ยะทอด",
    "ผักบุ้งไฟแดง", "ผัดไท", "ผัดกะเพรา", "ผัดซีอิ๊วเส้นใหญ่",
    "ผัดฟักทองใส่ไข่", "ผัดมะเขือยาวหมูสับ", "ผัดหอยลาย", "ฝอยทอง", "พะแนงไก่",
    "ยำถั่วพู", "ยำวุ้นเส้น", "ลาบหมู", "สังขยาฟักทอง", "สาคูไส้หมู", "ส้มตำ",
    "หมูปิ้ง", "หมูสะเต๊ะ", "ห่อหมก"
]
コード例 #8
0
def playground():
    st.title("Playground")
    # Initial instructions
    init_info = st.empty()
    init_info.info(
        "ℹ️ Upload an image on the sidebar to run the model on it!\n"
        "Just be sure that it's satellite imagery, otherwise you're "
        "just going to get random outputs 🤷‍♂️")
    # Set the sidebar inputs
    st.sidebar.title("Inputs")
    # NOTE For the sake of time, we're just going to use the `Land scenes` model
    # model_type = st.sidebar.radio("Model", ["Deforestation", "Land scenes"])
    model_type = "Land scenes"
    input_file = st.sidebar.file_uploader(
        "Upload an image",
        type=["jpg", "jpeg", "png", "tif"],
        accept_multiple_files=False,
        help="Test our model on an image of your liking! "
        "But remember that this should only work for satellite imagery, "
        "ideally around 256x256 size.",
    )
    st.sidebar.markdown(
        "Made by [André Ferreira](https://andrecnf.com/) and [Karthik Bhaskar](https://www.kbhaskar.com/)."
    )
    # Set the model
    model = load_learner(
        "fsdl_deforestation_detection/modeling/resnet50-128.pkl")
    # Speed up model inference by deactivating gradients
    model.model.eval()
    torch.no_grad()
    if input_file is not None:
        # Load and display the uploaded image
        with st.spinner("Loading image..."):
            img = imread(input_file)
            # Check if it's a different image than the one before
            if input_file.name != session_state.image_name:
                session_state.image_name = input_file.name
                session_state.image_id = gen_image_id()
                session_state.ts = datetime.now()
                session_state.user_data_uploaded = False
                # Reset buttons
                session_state.user_feedback_positive = False
                session_state.user_feedback_negative = False
            fig = px.imshow(img)
            st.plotly_chart(fig)
            init_info.empty()
        # Run the model on the image
        output = run_model(model, img)
        st.subheader("Model output:")
        show_model_output(model_type, output)
        st.info(
            "ℹ️ Green labels represent categories that we don't associate with deforestation "
            "risk (e.g. natural occurences or old structures), while red labels can serve as "
            "a potential deforestation signal (e.g. new constructions, empty patches in forests)."
        )
        # User feedback / data flywheel
        st.write("Did the model output match what you expected?")
        feedback_cols = st.beta_columns(2)
        with feedback_cols[0]:
            positive_btn = st.button("✅")
        with feedback_cols[1]:
            negative_btn = st.button("❌")
        if (positive_btn
                or session_state.user_feedback_positive) and not negative_btn:
            session_state.user_feedback_positive = True
            session_state.user_feedback_negative = False
            st.info("ℹ️ Thank you for your feedback! This can help us "
                    "improve our models 🙌")
            if session_state.user_data_uploaded is False:
                upload_user_data(
                    session_state.user_id,
                    session_state.ts,
                    session_state.image_id,
                    img,
                    output[1],
                    session_state.user_feedback_positive,
                )
                session_state.user_data_uploaded = True
            if st.button("Delete my image and feedback data"):
                st.info("ℹ️ Alright, we deleted it. Just know that we had "
                        "high expectations that you could help us improve "
                        "deforestation detection models. We thought we "
                        "were friends 🙁")
        elif (negative_btn
              or session_state.user_feedback_negative) and not positive_btn:
            session_state.user_feedback_positive = False
            session_state.user_feedback_negative = True
            st.info("ℹ️ Thank you for your feedback! This can help us "
                    "improve our models 🙌\n"
                    "It would be even better if you could tell us "
                    "what makes you think the model failed. Mind "
                    "leaving a comment bellow?")
            if session_state.user_data_uploaded is False:
                upload_user_data(
                    session_state.user_id,
                    session_state.ts,
                    session_state.image_id,
                    img,
                    output[1],
                    session_state.user_feedback_positive,
                )
                session_state.user_data_uploaded = True
            user_comment = st.empty()
            user_comment_txt = user_comment.text_input(
                label="Leave a comment on why the model failed.",
                max_chars=280,
            )
            if len(user_comment_txt) > 0:
                upload_user_comment(
                    session_state.user_id,
                    session_state.image_id,
                    user_comment_txt,
                )
            if st.button("Delete my image and feedback data"):
                st.info("ℹ️ Alright, we deleted it. Just know that we had "
                        "high expectations that you could help us improve "
                        "deforestation detection models. We thought we "
                        "were friends 🙁")
                delete_user_data(session_state.user_id, session_state.image_id)
        # Model interpretation
        with st.beta_expander("Peek inside the black box"):
            explain_cols = st.beta_columns(2)
            with explain_cols[0]:
                st.subheader("Model structure")
                st.info(
                    "ℹ️ Our model is largely based on the [ResNet](https://paperswithcode.com/method/resnet) "
                    "archirtecture, using a ResNet50 from [FastAI](https://docs.fast.ai/). "
                    "Bellow you can see the model's layer definition.")
                st.text(model.model)
            with explain_cols[1]:
                st.subheader("Output interpretation")
                # TODO Add the result of applying SHAP to the model in the current sample
                st.info(
                    "ℹ️ Given some difficulties with using [SHAP](https://github.com/slundberg/shap) "
                    "with [FastAI](https://docs.fast.ai/), we haven't implemented this yet. "
                    "Would you like to give it a try?")
コード例 #9
0
def overview():
    st.title("Overview")
    # Initial instructions
    init_info = st.empty()
    # Set the sidebar inputs
    st.sidebar.title("Inputs")
    # NOTE For the sake of time, we're just going to use the `Land scenes` model
    # model_type = st.sidebar.radio("Model", ["Deforestation", "Land scenes"])
    model_type = "Land scenes"
    dataset_name = st.sidebar.radio("Dataset", ["Amazon", "Oil palm"])
    chosen_set = None
    if dataset_name == "Amazon":
        chosen_set = st.sidebar.radio("Set", ["Train", "Validation"])
        init_info.info(
            "ℹ️ You've selected the "
            "[Amazon dataset](https://www.kaggle.com/c/planet-understanding-the-amazon-from-space/), "
            "which is the one in which our models were trained on. As such, you can look at performance "
            "on either the train or validation set.")
        (
            bucket_name,
            img_path,
            labels_table,
            img_name_col,
            label_col,
        ) = set_paths(dataset_name, model_type)
    else:
        init_info.info(
            "ℹ️ You've selected the "
            "[oil palm dataset](https://www.kaggle.com/c/widsdatathon2019/), "
            "which can be seen as a test dataset, i.e. it wasn't used during training. "
            "While it should be somewhat similar to the Amazon dataset, it can be interesting "
            "to compare results on potentially out-of-domain data.")
        (
            bucket_name,
            img_path,
            labels_table,
            img_name_col,
            label_col,
        ) = set_paths(dataset_name, model_type)
    # Set the model
    model = load_learner(
        "fsdl_deforestation_detection/modeling/resnet50-128.pkl")
    # Speed up model inference by deactivating gradients
    model.model.eval()
    torch.no_grad()
    img_names = load_image_names(model, chosen_set, bucket_name, labels_table)
    sample_name = st.sidebar.selectbox(
        "Sample",
        img_names,
    )
    st.sidebar.markdown(
        "Made by [André Ferreira](https://andrecnf.com/) and [Karthik Bhaskar](https://www.kbhaskar.com/)."
    )
    # Load all the data (or some samples) from the selected database
    n_samples = 250
    imgs, labels = load_data(
        dataset_name,
        model_type,
        bucket_name,
        img_path,
        img_names,
        labels_table,
        img_name_col,
        n_samples=n_samples,
    )
    # Show some performance metrics
    # TODO Use all the set data to get the correct performance metrics
    st.header("Performance")
    metrics_cols = st.beta_columns(2)
    with st.spinner("Getting performance results..."):
        if dataset_name == "Amazon":
            # NOTE This are the metrics obtained for the validation set,
            # when training the model in Colab; ideally, this should still
            # be calculated dynamically in here, but it's proving to be
            # slow and impractical in the approach that we were taking
            acc = 0.956407
            fbeta = 0.926633
            # pred, acc, fbeta = get_performance_metrics(
            #     model, imgs, labels, dataset_name
            # )
            pred, _, _ = get_performance_metrics(model, imgs, labels,
                                                 dataset_name)
        else:
            pred, acc, fbeta = get_performance_metrics(model, imgs, labels,
                                                       dataset_name)
        acc, fbeta = 100 * acc, 100 * fbeta
        with metrics_cols[0]:
            fig = get_gauge_plot(acc, title="Accuracy")
            st.plotly_chart(fig, use_container_width=True)
        with metrics_cols[1]:
            fig = get_gauge_plot(fbeta, title="F2")
            st.plotly_chart(fig, use_container_width=True)
        if dataset_name == "Amazon":
            st.info(
                "ℹ️ These are the validation metrics [obtained when training the model](https://colab.research.google.com/github/karthikraja95/fsdl_deforestation_detection/blob/master/fsdl_deforestation_detection/experimental/FSDL_Final_Model.ipynb)."
            )
        else:
            st.info(
                "ℹ️ Showing performance metrics here by mapping the original labels to a "
                "binary, deforestation label. This should be somewhat relatable to the "
                "presence of oil palm plantations, which is the label in this dataset."
            )
    # Show number of samples
    fig = get_number_plot(len(img_names), title="Samples")
    st.plotly_chart(fig, use_container_width=True)
    # Show label analysis
    st.header("Label analysis")
    labels_cols = st.beta_columns(2)
    with labels_cols[0]:
        fig = get_hist_plot(
            labels,
            "labels",
            dataset_name,
            model_type,
            title="Labels distribution",
        )
        st.plotly_chart(fig, use_container_width=True)
    with labels_cols[1]:
        fig = get_hist_plot(
            pred,
            "predictions",
            dataset_name,
            model_type,
            title="Predicted labels distribution",
        )
        st.plotly_chart(fig, use_container_width=True)
    st.info(
        f"ℹ️ Using only a subset of {n_samples} samples, so as to make this plot practically fast."
    )
    # Show imagery analysis
    st.header("Imagery analysis")
    st.subheader("Image size")
    img_size_cols = st.beta_columns(3)
    with img_size_cols[0]:
        fig = get_number_plot(imgs.shape[1], title="Height")
        st.plotly_chart(fig, use_container_width=True)
    with img_size_cols[1]:
        fig = get_number_plot(imgs.shape[2], title="Width")
        st.plotly_chart(fig, use_container_width=True)
    with img_size_cols[2]:
        fig = get_number_plot(imgs.shape[3], title="Channels")
        st.plotly_chart(fig, use_container_width=True)
    fig = get_pixel_dist_plot(imgs)
    st.plotly_chart(fig, use_container_width=True)
    st.info(
        f"ℹ️ Using only a subset of {n_samples} samples, so as to make this plot practically fast."
    )
    # Show sample analysis
    st.header("Sample analysis")
    # Load and display the uploaded image
    with st.spinner("Loading image..."):
        img = load_image(bucket_name, img_path, sample_name)
        fig = px.imshow(img)
        st.plotly_chart(fig)
    # Run the model on the image
    output = run_model(model, img)
    st.subheader("Model output:")
    show_model_output(model_type, output)
    st.subheader("Real labels:")
    show_labels(dataset_name, sample_name, labels_table, img_name_col,
                label_col)
    st.info(
        "ℹ️ Green labels represent categories that we don't associate with deforestation "
        "risk (e.g. natural occurences or old structures), while red labels can serve as "
        "a potential deforestation signal (e.g. new constructions, empty patches in forests)."
    )
    # Model interpretation
    with st.beta_expander("Peek inside the black box"):
        explain_cols = st.beta_columns(2)
        with explain_cols[0]:
            st.subheader("Model structure")
            st.info(
                "ℹ️ Our model is largely based on the [ResNet](https://paperswithcode.com/method/resnet) "
                "archirtecture, using a ResNet50 from [FastAI](https://docs.fast.ai/). "
                "Bellow you can see the model's layer definition.")
            st.text(model.model)
        with explain_cols[1]:
            st.subheader("Output interpretation")
            # TODO Add the result of applying SHAP to the model in the current sample
            st.info(
                "ℹ️ Given some difficulties with using [SHAP](https://github.com/slundberg/shap) "
                "with [FastAI](https://docs.fast.ai/), we haven't implemented this yet. "
                "Would you like to give it a try?")
コード例 #10
0
ファイル: main.py プロジェクト: HangCcZ/Senior-Design
import cv2
from fastai.vision.all import load_learner
import numpy as np
import dlib
import time

start = time.time()
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(
    "/home/hang/PycharmProjects/MaskDetector/venv/models"
    "/shape_predictor_68_face_landmarks.dat")
learn_inf = load_learner(
    '/home/hang/PycharmProjects/MaskDetector/venv/models/risk_v2.pkl')
cap = cv2.VideoCapture(
    "/home/hang/PycharmProjects/MaskDetector/venv/Resources/Hang_video_Nov_1.mp4"
)

# font
font = cv2.FONT_HERSHEY_SIMPLEX
# fontScale
fontScale = 0.5
# Line thickness of 2 px
thickness = 1

risk_color = {"low_risk": (0, 255, 0), "high_risk": (0, 0, 255)}
risk_counter = {"low_risk": 0, "high_risk": 0}

prev_time = -1
processed_frames = 0
sample_rate = 1
コード例 #11
0
ファイル: main.py プロジェクト: saitej123/TELEGRAM_CHATBOT
def load_model():
    global model
    model = load_learner('CleanMessyModel.pkl')
    print('Model loaded')
コード例 #12
0
def predict(filename: str):
    '''classifying image.'''
    model = load_learner(Path.cwd() / 'bear.pkl')
    img = PILImage.create(filename)
    pred_class, pred_idx, ful_tensor = model.predict(img)
    return str(pred_class)
コード例 #13
0
import streamlit as st
import numpy as np
from PIL import Image
from fastai.vision.all import load_learner, Path
st.title("WELCOME CAT DOG CLASSIFIER")
st.title("cat dog classifer")
uploaded_file = st.file_uploader("Choose an image...", type="jpg")
#file upload
learn_inf = load_learner("exportt.pkl")  #load trained model
#classification

if uploaded_file is not None:
    #image transformation and prediciton
    img = Image.open(uploaded_file)
    st.image(img, caption='Your Image.', use_column_width=True)
    image = np.asarray(img)
    label = learn_inf.predict(image)
    #label[0] accesses the actual label string
    #output display
    st.write("")
    st.write("Classifying...")
    #check for vowels in the names for correct grammar
    if label[0][0] in "AEIOU":
        st.write("## This looks like an")
    else:
        st.write("## This looks like a")
    #our labels are in capital letters only
    st.title(label[0].lower().title())
コード例 #14
0
ファイル: app.py プロジェクト: CaoKha/plant_classifier
from flask import Flask, render_template, request, url_for
from fastai.vision.all import load_learner, PILImage
import re
# from flask_cors import CORS, cross_origin
import logging

app = Flask(__name__)

learn = load_learner('./models/model.pkl')


@app.route('/')
def homepage():
    return render_template('index.html')


def predict_single(img_file):
    '''function to take image and return prediction'''
    pred, pred_idx, probs = learn.predict(PILImage.create(img_file))
    probs_list = [pred, probs[pred_idx].tolist()]
    return probs_list


@app.route('/result', methods=['POST'])
def predict():
    if request.method == 'POST':
        my_prediction = predict_single(request.files['inputImage'])
        plant_class = str(my_prediction[0])
        plant_type = re.search(r'.+?(?=_)', plant_class).group()
        growth_stage = re.search(r'(?<=_)[\w+.-]+', plant_class).group()
        probability = str("{:.2f}".format(float(my_prediction[1]) * 100))
コード例 #15
0
ファイル: app.py プロジェクト: maxcljque/pokemon_classifier
from envparse import env

from starlette.applications import Starlette
from starlette.staticfiles import StaticFiles
from starlette.responses import JSONResponse
from starlette.templating import Jinja2Templates
import uvicorn
from fastai.vision.all import load_learner

templates = Jinja2Templates(directory='templates')

app = Starlette(debug=True)
app.mount('/static', StaticFiles(directory='statics'), name='static')

model = load_learner('pokemon_classifier.pkl')


@app.route('/')
async def homepage(request):
    template = "index.html"
    context = {"request": request}
    return templates.TemplateResponse(template, context)


@app.route('/classify', methods=['POST'])
async def classify(request):
    data = await request.form()
    bytes = await (data["file"].read())
    prediction, position, tensor = model.predict(bytes)
    return JSONResponse({
        "prediction": prediction,