def __init__(self, learner, input_type=None, output_type=None):
        self.learn = load_learner(learner)

        # get input type
        if input_type == None:
            if 'TextLearner' in str(type(self.learn)):
                self.input_type = 'text'
            else:
                self.input_type = 'image'
        else:
            self.input_type == input_type

        # get output type
        if output_type == None:
            loss_func = str(type(self.learn.loss_func))
            if 'CrossEntropy' in loss_func:
                self.output_type = 'class'
            elif 'BCEWithLogits' in loss_func:
                self.output_type = 'label'
            elif 'MSELoss' in loss_func:
                self.output_type = 'reg'
            else:
                print('no output_type defined!')
        else:
            self.output_type = output_type

        print(
            f'learner initialized! input: {self.input_type}, output: {self.output_type}'
        )
Example #2
0
def experiment_2b_inference():
    #learn = cnn_learner(dls, resnet18, metrics=[acc uracy])
    learn = load_learner("30_export.pkl")
    cats = create_image_array_from_directory_path(
        "datasets/cat_dog/cat_dog_test_set/dogs")
    dl = learn.dls.test_dl(cats)
    op = learn.get_preds(dl=dl)
    print(op)
Example #3
0
 def load(cls, path: str, reset_paths=True, verbose=True):
     from fastai.learner import load_learner
     model = super().load(path, reset_paths=reset_paths, verbose=verbose)
     if model._load_model:
         model.model = load_pkl.load_with_fn(
             f'{model.path}{model.model_internals_file_name}',
             lambda p: load_learner(p),
             verbose=verbose)
     model._load_model = None
     return model
Example #4
0
    def __call__(self, img):
        if not self.loaded:
            self.model = load_learner(self.model_path, cpu=False)
            self.loaded = True
        
        original_size = (img.shape[1], img.shape[0])

        mask = expand_mask(road_preds(self.model.predict(img)))
        masked_img = hide_non_road(img, mask)
        return masked_img
Example #5
0
async def setup_learner():
    await download_file(export_file_url, path / export_file_name)
    try:
        learn = load_learner(path / export_file_name)
        return learn
    except RuntimeError as e:
        if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:
            print(e)
            message = "\n\nThis model was trained with an old version of fastai and will not work in a CPU environment.\n\nPlease update the fastai library in your training environment and export your model again.\n\nSee instructions for 'Returning to work' at https://course.fast.ai."
            raise RuntimeError(message)
        else:
            raise
Example #6
0
class EmojiClassifier:
    def __init__(self):
        self.MODEL_PATH = "model.pkl"
        self.model_origin = self.MODEL_PATH
        if (MODEL_S3_PATH := os.environ.get("MODEL_S3_PATH")) :
            self.MODEL_S3_PATH = MODEL_S3_PATH
            self.model_origin = self.MODEL_S3_PATH
            print(f"Fetching model from: {self.MODEL_S3_PATH}")
            client.download_file(
                os.getenv("SPACE_NAME"), self.MODEL_S3_PATH, self.MODEL_PATH
            )

        print(f"Loading model: {self.MODEL_PATH}")
        self.learn = load_learner(self.MODEL_PATH)
        self.labels = self.learn.dls.vocab[1]
 def get_dog_details(self, img):
     """Use pre trained model and get dog details."""
     if os.path.exists(model_path):
         logger.info("Invoking Model:" + model_path)
         learn_inference = load_learner(model_path)
         logger.info("Running Predictor...")
         img = PILImage.create(img)
         pred, pred_idx, probs = learn_inference.predict(img)
         logger.info('Predicted Result:' + str(pred))
         return {
             'success': True,
             'pred': str(pred),
             # 'pred_idx': pred_idx,
             # 'probs': probs
         }
     else:
         logger.debug("Model not found:" + model_path)
         return {'success': False, 'message': 'Apologies. Model not found'}
def from_pretrained_fastai(
    repo_id: str,
    revision: Optional[str] = None,
):
    """
    Load pretrained fastai model from the Hub or from a local directory.

    Args:
        repo_id (`str`):
            The location where the pickled fastai.Learner is. It can be either of the two:
                - Hosted on the Hugging Face Hub. E.g.: 'espejelomar/fatai-pet-breeds-classification' or 'distilgpt2'.
                  You can add a `revision` by appending `@` at the end of `repo_id`. E.g.: `dbmdz/bert-base-german-cased@main`.
                  Revision is the specific model version to use. Since we use a git-based system for storing models and other
                  artifacts on the Hugging Face Hub, it can be a branch name, a tag name, or a commit id.
                - Hosted locally. `repo_id` would be a directory containing the pickle and a pyproject.toml
                  indicating the fastai and fastcore versions used to build the `fastai.Learner`. E.g.: `./my_model_directory/`.
        revision (`str`, *optional*):
            Revision at which the repo's files are downloaded. See documentation of `snapshot_download`.

    Returns:
        The `fastai.Learner` model in the `repo_id` repo.
    """
    _check_fastai_fastcore_versions()

    # Load the `repo_id` repo.
    # `snapshot_download` returns the folder where the model was stored.
    # `cache_dir` will be the default '/root/.cache/huggingface/hub'
    if not os.path.isdir(repo_id):
        storage_folder = snapshot_download(
            repo_id=repo_id,
            revision=revision,
            library_name="fastai",
            library_version=get_fastai_version(),
        )
    else:
        storage_folder = repo_id

    _check_fastai_fastcore_pyproject_versions(storage_folder)

    from fastai.learner import load_learner

    return load_learner(os.path.join(storage_folder, "model.pkl"))
Example #9
0
def bart2(filename, number, y, str):

    from fastai.learner import load_learner
    import torch
    import pickle
    import datasets
    import re
    import pathlib
    temp = pathlib.PosixPath
    pathlib.PosixPath = pathlib.WindowsPath

    inf_learn = load_learner(fname='./data/bart_pretrained_SAMsum.pkl')
    with open(
            "./static/text_files/{}".format(
                filename.replace('.txt', '_dr_sum_file.txt')), 'w') as f:
        f.write(str + "\n")
        num = 0
        s = ''
        llist = []
        for i in range(len(y)):
            s = s + y[i]
            if num == number:
                llist.append(s)
                num = 0
                s = ''
            num += 1
        ll = []
        for i in llist:
            tt = inf_learn.blurr_generate(i)
            a = tt.pop()
            a = a.strip()
            # a1 = re.sub(r'\([^)]*\)', '', a)
            ll.append(a)
            print(a)
        for i in ll:
            f.write(i + "\n")
    return
Example #10
0
def _load_model(path):
    from fastai.learner import load_learner

    return load_learner(os.path.abspath(path))
Example #11
0
 def __init__(self, model_path):
     self.model = load_learner(model_path)
def load_model():

    return load_learner(current_folder / "instrument_classifier.pkl")
Example #13
0
def load_model():
    """Load model from file."""
    path = Path(os.path.realpath(__file__)).resolve().parent.parent
    path = path / "models/model.pkl"
    learner = load_learner(path, cpu=True)
    return learner
Example #14
0
def load_all(path='export',
             dls_fname='dls',
             model_fname='model',
             learner_fname='learner',
             device=None,
             pickle_module=pickle,
             verbose=False):

    if isinstance(device, int): device = torch.device('cuda', device)
    elif device is None: device = default_device()
    if device == 'cpu': cpu = True
    else: cpu = None

    path = Path(path)
    learn = load_learner(path / f'{learner_fname}.pkl',
                         cpu=cpu,
                         pickle_module=pickle_module)
    learn.load(f'{model_fname}', with_opt=True, device=device)

    if learn.dls_type == "MixedDataLoaders":
        from .data.mixed import MixedDataLoader, MixedDataLoaders
        dls_fnames = []
        _dls = []
        for i in range(learn.n_loaders[0]):
            _dl = []
            for j in range(learn.n_loaders[1]):
                l = torch.load(path / f'{dls_fname}_{i}_{j}.pth',
                               map_location=device,
                               pickle_module=pickle_module)
                l = l.new(num_workers=0)
                l.to(device)
                dls_fnames.append(f'{dls_fname}_{i}_{j}.pth')
                _dl.append(l)
            _dls.append(
                MixedDataLoader(*_dl,
                                path=learn.dls.path,
                                device=device,
                                shuffle=l.shuffle))
        learn.dls = MixedDataLoaders(*_dls, path=learn.dls.path, device=device)

    else:
        loaders = []
        dls_fnames = []
        for i in range(learn.n_loaders):
            dl = torch.load(path / f'{dls_fname}_{i}.pth',
                            map_location=device,
                            pickle_module=pickle_module)
            dl = dl.new(num_workers=0)
            dl.to(device)
            first(dl)
            loaders.append(dl)
            dls_fnames.append(f'{dls_fname}_{i}.pth')
        learn.dls = type(learn.dls)(*loaders,
                                    path=learn.dls.path,
                                    device=device)

    pv(f'Learner loaded:', verbose)
    pv(f"path          = '{path}'", verbose)
    pv(f"dls_fname     = '{dls_fnames}'", verbose)
    pv(f"model_fname   = '{model_fname}.pth'", verbose)
    pv(f"learner_fname = '{learner_fname}.pkl'", verbose)
    return learn
Example #15
0
    st.markdown(
        f"""
    <style>
    .reportview-container .main .block-container{{
        {max_width_str}
    }}
    </style>    
    """,
        unsafe_allow_html=True,
    )


_max_width_()

model_path = Path("bart_tldr.pkl")


# @st.cache(max_entries=3)
def load_model(path):
    return load_learner(path)


tech_doc = st.text_area(
    label="Man Page Entry",
    value="Pls enter technical document you want to summarize",
    height=500,
)

model = load_learner(fname=model_path)
st.markdown(f"### Summary:\n\n{model.blurr_summarize(tech_doc)[0]}")
Example #16
0
def load_model(path):
    return load_learner(path)
Example #17
0
 def __init__(self, model):
     self.inf = load_learner(model)
Example #18
0
    return Image.open(image)


def predict_img(img):
    '''
    Pass    PILImage object
    Return  prediction[str], prediction_idx[int], probability[tensor]
    '''
    if img is not None:
        return learner_inf.predict(pil_img)


'## COVID-19 Mask Classifier'
"Here's the [GitHub](https://github.com/jacKlinc/covid_mask_classifier) repo"
'Upload a picture of someone'
learner_inf = load_learner('./covid_mask.pkl')

# Upload
pic = st.file_uploader("Upload Files")

'Click Classify to find out if there wearing a mask'

probs = []
pred_idx = 1
pred = 'n/a'

# Display image
if pic is not None:
    img = load_image(pic)
    st.image(img)

def predict_img(img):
    '''
    Pass    PILImage object
    Return  prediction[str], prediction_idx[int], probability[tensor]
    '''
    if img is not None:
        return learner_inf.predict(pil_img)


'## German Character Recogniser'
"Here's the [GitHub](https://github.com/jacKlinc/german_char_recogniser) repo"
'Upload a picture of a vowel'

learner_inf = load_learner('./res/AEIOU_model.pkl')

# Upload
pic = st.file_uploader("Upload Image")

'Click Classify to find whether it\'s an A or a B'

probs = []
pred_idx = 1
pred = 'n/a'

# Display image
if pic is not None:
    img = load_image(pic)
    st.image(img)
Example #20
0
 def load_model(self, results_path="results", model_name=None, cpu=True):
     self.learn = load_learner(fname=os.path.join(results_path, model_name),
                               cpu=cpu)
     # can unfreeze the model and fine_tune
     self.learn.unfreeze()
     return self
Example #21
0
from flask import request, redirect, render_template
from fastai.learner import load_learner
import pathlib
from fastai.vision.core import PILImage
import platform

application = Flask(__name__)
application.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024

# Workaround pytorch issue with models developed on linux being used on Windows
if platform.system() == 'Windows':
    pathlib.PosixPath = pathlib.WindowsPath

ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}

learn_inf = load_learner('export.pkl', cpu=True)


def allowed_file(filename):
    return '.' in filename and filename.rsplit(
        '.', 1)[1].lower() in ALLOWED_EXTENSIONS


@application.route("/upload-image/", methods=["GET", "POST"])
def upload_image():
    if request.method == "POST":
        if request.files:
            image = request.files["image"]
            if allowed_file(image.filename):
                pred, pred_idx, probs = learn_inf.predict(
                    PILImage.create(image))
import fastai
from fastai.imports import *
from fastai.learner import load_learner
# from fastai.vision.widgets import *
from fastai.vision.core import PILImage
from flask import Flask, request, render_template
import os
import glob
from flask import send_file
path = Path()
print(path.ls(file_exts='.pkl'))
learn_inf = load_learner(path / 'export.pkl')

app = Flask(__name__)

#Defining the home page for the web service
# @app.route('/')
# def home():
#     return f"<h1>{predict}</h1>"

# UPLOAD_FOLDER = './upload'
UPLOAD_FOLDER = os.path.join('static', 'upload')

app = Flask(__name__, static_folder=os.path.abspath('static'))
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER


def clear_contents():
    files = glob.glob(UPLOAD_FOLDER)
    for f in files:
        os.remove(f)
Example #23
0
def get_mask_detection_learner():
    learner = load_learner(MASK_DETECTION_MODEL_PATH)

    return learner
Example #24
0
def get_model():
    if not os.path.isfile(MODEL_FILE):
        _ = download_file(f'{REPO_DIR}/models/{MODEL_FILE}')

    learn = load_learner(MODEL_FILE)
    return learn
Example #25
0
 def __init__(self, path):
     self.learn = load_learner(path)
     classes = self.learn.dls.vocab
import numpy as np
import pandas as pd
from fastai.learner import load_learner
import plotly.express as px

import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html

external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)

server = app.server

learn = load_learner('resnet50_v2.pkl')
classes = learn.dls.vocab

app.layout = html.Div([
    html.Div([
        html.H2('Airliners classifier'),
        html.H6([
            'Welcome to aircraft photos classifier. This application is expected to recognize an airliner based on uploaded image.',
            html.Br(), 'Currently it supports the following planes:'
        ]),
        html.Div(str(classes))
    ],
             style={'textAlign': 'center'}),
    dcc.Upload(id='upload-image',
               children=html.Div(['Drag and Drop or ',
                                  html.A('Select Files')]),
Example #27
0
from io import BytesIO
from PIL import Image
import torch
import torchvision.transforms.functional as F

import pathlib

temp = pathlib.PosixPath
pathlib.PosixPath = pathlib.WindowsPath

# folder_path = 'C:/Users/SHREYAS/Desktop/Fabric Project/Final/fabrixx/fabrix/models/export.pkl'
# folder_path_fault = 'C:/Users/SHREYAS/Desktop/Fabric Project/Final/fabrixx/fabrix/models/fault.pkl'
folder_path = 'fabrix/models/export.pkl'
folder_path_fault = 'fabrix/models/fault.pkl'

learn = load_learner(folder_path)
classes = ['Defective', 'Non Defective']

learn_fault = load_learner(folder_path_fault)
classes_fault = [
    'Foreign Bodies on Tissue', 'Holes and Cuts', 'Oil Stains', 'Thread Error'
]

# import numpy as np
# import traceback
# import pickle
# import pandas as pd

# with open('./fabrix/models/Fast_ai.pkl', 'rb') as f:
#    learn = pickle.load(f)
#    classes = learn.data.classes
Example #28
0
# %%
model_file_path = config.model_dir_path / "cats-vs-dogs.pth"

# %%
if session.is_interactive:
    learner.dls.show_batch()
    learner.show_results()

# %%
if session.is_interactive:
    learner.export(model_file_path)

# %%
if session.is_interactive:
    loaded_learner = load_learner(model_file_path)


# %%
class AnimalType(str, Enum):
    cat = "cat"
    dog = "dog"


# %%
def classify(image: PILImage, learner: Learner) -> Tuple[AnimalType, float]:
    with learner.no_bar():
        results = learner.predict(image)
    _, category, probabilities = results
    is_a_cat = category == 1
    animal_type = AnimalType.cat if is_a_cat else AnimalType.dog
Example #29
0
def predict_segmentation(path_to_model: str,
                         path_to_image: str,
                         outfile: str,
                         processing_dir: str = 'temp',
                         tile_size: int = 400,
                         tile_overlap: int = 100,
                         use_tta: bool = True):
    """Segment image into land cover classes with a pretrained models
    TODO save also information about label and class
    TODO add test-time augmentations"""
    if os.path.exists(processing_dir):
        print('Processing folder exists')
        return
    os.makedirs(processing_dir)
    print(
        f'Reading and tiling {path_to_image} to {tile_size}x{tile_size} tiles with overlap of {tile_overlap}px'
    )
    tiler = Tiler(outpath=processing_dir,
                  gridsize_x=int(tile_size),
                  gridsize_y=int(tile_size),
                  overlap=(int(tile_overlap), int(tile_overlap)))
    tiler.tile_raster(path_to_image)

    # Check whether is possible to use gpu
    cpu = True if not torch.cuda.is_available() else False

    # Loading pretrained model

    # PyTorch state dict TODO
    if path_to_model.endswith('.pth') or path_to_model.endswith('.pt'):
        print('Using PyTorch state dict not yet supported')
        print('Removing intermediate files')
        rmtree(processing_dir)
        return
    # fastai learn.export()
    elif path_to_model.endswith('.pkl'):
        learn = load_learner(path_to_model, cpu=cpu)
        test_files = get_image_files(f'{processing_dir}/raster_tiles')
        print('Starting prediction')
        os.makedirs(f'{processing_dir}/predicted_rasters')
        # Works with chunks of 300 patches
        for chunk in range(0, len(test_files), 300):
            test_dl = learn.dls.test_dl(test_files[chunk:chunk + 300],
                                        num_workers=0,
                                        bs=1)
            test_dl.set_base_transforms()
            if use_tta:
                batch_tfms = [Dihedral()]
                item_tfms = [ToTensor(), IntToFloatTensor()]
                preds = learn.tta(dl=test_dl, batch_tfms=batch_tfms)[0]
            else:
                preds = learn.get_preds(dl=test_dl,
                                        with_input=False,
                                        with_decoded=False)[0]

            print('Rasterizing predictions')
            for f, p in tqdm(zip(test_files[chunk:chunk + 300], preds)):
                #if len(p.shape) == 3: p = p[0]
                ds = gdal.Open(str(f))
                out_raster = gdal.GetDriverByName('gtiff').Create(
                    f'{processing_dir}/predicted_rasters/{f.stem}.{f.suffix}',
                    ds.RasterXSize, ds.RasterYSize, p.shape[0], gdal.GDT_Int16)
                out_raster.SetProjection(ds.GetProjectionRef())
                out_raster.SetGeoTransform(ds.GetGeoTransform())
                np_pred = p.numpy()  #.argmax(axis=0)
                np_pred = np_pred.round(2)
                np_pred *= 100
                np_pred = np_pred.astype(np.int16)
                for c in range(p.shape[0]):
                    band = out_raster.GetRasterBand(c + 1).WriteArray(
                        np_pred[c])
                    band = None
                #band = out_raster.GetRasterBand(1).WriteArray(np_pred)
                out_raster = None
                ds = None

    print('Merging predictions')
    temp_full = f'{processing_dir}/full_raster.tif'
    untile_raster(f'{processing_dir}/predicted_rasters',
                  outfile=temp_full,
                  method='sum')

    print('Postprocessing predictions')

    raw_raster = gdal.Open(temp_full)
    processed_raster = gdal.GetDriverByName('gtiff').Create(
        outfile, raw_raster.RasterXSize, raw_raster.RasterYSize, 1,
        gdal.GDT_Int16)
    processed_raster.SetProjection(raw_raster.GetProjectionRef())
    processed_raster.SetGeoTransform(raw_raster.GetGeoTransform())
    raw_np = raw_raster.ReadAsArray()
    pred_np = raw_np.argmax(axis=0)
    band = processed_raster.GetRasterBand(1).WriteArray(pred_np)
    raw_raster = None
    band = None
    processed_raster = None

    print('Removing intermediate files')
    rmtree(processing_dir)
    return