Beispiel #1
0
def get_databunch(datasetdir):
    from fastai.vision import DataBunch
    from fastai.vision import torch

    import torchvision.transforms as transforms
    from torch.utils.data import DataLoader

    from KujuMNIST_dataset import KujuMNIST_DS

    trn_data = np.load(os.path.join(datasetdir, 'kmnist-train-imgs.npz'))
    trn_data = trn_data['arr_0'] / 255
    data_mean = trn_data.mean()
    data_std = trn_data.std()
    print(f'Mean: {data_mean}')
    print(f'Std: {data_std}')

    default_device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    transform_train = transforms.Compose([
        transforms.ToPILImage(),
        #transforms.RandomAffine(degrees=7, translate=(0.1, 0.1), scale=(0.95, 1.05)),
        transforms.ToTensor(),
        transforms.Normalize((data_mean, ), (data_std, )),
    ])

    transform_valid = transforms.Compose([
        transforms.ToPILImage(),
        transforms.ToTensor(),
        transforms.Normalize((data_mean, ), (data_std, )),
    ])

    trn_ds = KujuMNIST_DS(datasetdir,
                          train_or_test='train',
                          download=False,
                          tfms=transform_train)
    val_ds = KujuMNIST_DS(datasetdir,
                          train_or_test='test',
                          download=False,
                          tfms=transform_valid)

    trn_dl = DataLoader(trn_ds,
                        batch_size=128,
                        shuffle=True,
                        num_workers=1,
                        pin_memory=True)
    val_dl = DataLoader(val_ds,
                        batch_size=128,
                        shuffle=True,
                        num_workers=1,
                        pin_memory=True)
    databunch = DataBunch(path=datasetdir,
                          train_dl=trn_dl,
                          valid_dl=val_dl,
                          device=default_device)

    return databunch
Beispiel #2
0
def detect(path, model, img_cat_bin):
    '''
        Detect whether an image or a group of images are resistors or not.
    '''

    # setting cpu as default for inference
    defaults.device = torch.device('cpu')

    try:
        # load model
        learner = load_learner(path='.', file=model)

        # open image
        img = open_image(path)

    except FileNotFoundError as e:
        click.echo(e, err=True)
        return

    click.echo(f'Using model: {model}')
    click.echo('Starting prediction for:')

    # display image on the shell using imgcat from iterm2
    os.system(f'{img_cat_bin} {path}')

    # inference
    pred_class, pred_index, probs = learner.predict(img)

    click.echo(f'Data classes: {learner.data.classes} ')
    click.echo(f'prediction: {pred_class} ')
    click.echo(f'prediction index: {pred_index} ')
    click.echo(f'Probabilities: {probs}')
def pre_screen(img, display_img):
    """
  Performs inference and displays target image with result.
  Args:
    - img -> type:Image -> fastai wrapper for pixel image.
    - display_img -> type:str -> location of static image being analyzed.
  """
    with st.spinner('Wait for it...'):
        time.sleep(3)

    model = load_learner('models/', file="e-covidnet.pkl")

    if torch.cuda.is_available():
        model.model.load_state_dict(torch.load('models/e-covidnet.pth'))
    else:
        model.model.load_state_dict(
            torch.load('models/e-covidnet.pth',
                       map_location=torch.device("cpu")))

    pred_class = model.predict(img)[0]
    pred_prob = round(torch.max(model.predict(img)[2]).item() * 100)

    if str(pred_class) == 'COVID-19':
        st.success("COVID-19 with the probability of " + str(pred_prob) + '%.')
        st.image(display_img, width=300)
    elif str(pred_class) == 'pneumonia':
        result = st.success("Viral Pneumonia with the probability of " +
                            str(pred_prob) + '%.')
        st.image(display_img, width=300)
    elif str(pred_class) == 'normal':
        st.success("Normal with the probability of " + str(pred_prob) + '%.')
        st.image(display_img, width=300)
Beispiel #4
0
    def predict(self, bytes):
        defaults.device = torch.device('cpu')

        learn = load_learner(self.path)
        img = open_image(bytes)
        pred_class = learn.predict(img)

        return self.classes[str(pred_class[0])]
    def predict(self, x):
        '''
        Input: x = block of input images, stored as Torch.Tensor of dimension (batch_sizex3xHxW), 
                   scaled between 0 and 1. 
        Returns: a tuple containing: 
            1. The final class predictions for each image (brick, ball, or cylinder) as a list of strings.
            2. Upper left and lower right bounding box coordinates (in pixels) for the brick ball 
            or cylinder in each image, as a 2d numpy array of dimension batch_size x 4.
            3. Segmentation mask for the image, as a 3d numpy array of dimension (batch_sizexHxW). Each value 
            in each segmentation mask should be either 0, 1, 2, or 3. Where 0=background, 1=brick, 
            2=ball, 3=cylinder. 
        '''

        #Normalize input data using the same mean and std used in training:
        x_norm = normalize(x, torch.tensor(self.learn.data.stats[0]),
                           torch.tensor(self.learn.data.stats[1]))

        #Pass data into model:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        with torch.no_grad():
            yhat = self.learn.model(x_norm.to(device))
            yhat = yhat.detach().cpu()

        #Post-processing/parsing outputs, here's an example for classification only:
        class_prediction_indices = yhat.argmax(1)

        #retrieving class values from yhat
        class_values = np.zeros(class_prediction_indices.shape[0],
                                dtype='int8')
        for i in range(class_prediction_indices.shape[0]):
            cls, values = np.unique(class_prediction_indices[i],
                                    return_counts=True)
            counterlist = list(zip(cls, values))
            count = sorted(counterlist, key=lambda item: item[1], reverse=True)
            if len(count) == 1:
                class_values[i] = count[0][0]
            else:
                class_values[i] = count[1][0]

        #creating class label strings
        #print(class_values)
        class_label_strings = [self.class_names[i] for i in class_values]
        print(class_label_strings)

        #Create random segmentation mask:
        # mask=np.random.randint(low=0, high=4, size=(x.shape[0], x.shape[2], x.shape[3]))
        bboxes = np.zeros((len(class_prediction_indices), 4))
        for i in range(len(class_prediction_indices)):
            rows, cols = np.where(class_prediction_indices[i] != 0)
            bboxes[i, :] = np.array(
                [rows.min(), cols.min(),
                 rows.max(), cols.max()])

        class_prediction_indices = np.array(class_prediction_indices)
        return (class_label_strings, bboxes, class_prediction_indices)
    def predict(self, x):
        '''
        Input: x = block of input images, stored as Torch.Tensor of dimension (batch_sizex3xHxW), 
                   scaled between 0 and 1. 
        Returns: a tuple containing: 
            1. The final class predictions for each image (brick, ball, or cylinder) as a list of strings.
            2. Upper left and lower right bounding box coordinates (in pixels) for the brick ball 
            or cylinder in each image, as a 2d numpy array of dimension batch_size x 4.
            3. Segmentation mask for the image, as a 3d numpy array of dimension (batch_sizexHxW). Each value 
            in each segmentation mask should be either 0, 1, 2, or 3. Where 0=background, 1=brick, 
            2=ball, 3=cylinder. 
        '''

        #Normalize input data using the same mean and std used in training:
        x_norm = normalize(x, torch.tensor(self.learn.data.stats[0]),
                           torch.tensor(self.learn.data.stats[1]))

        #Pass data into model:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        with torch.no_grad():
            yhat = self.learn.model(x_norm.to(device))
            yhat = yhat.detach().cpu()

        #Post-processing/parsing outputs, here's an example for classification only:
        class_prediction_indices = yhat.argmax(dim=1)
        class_predictions = [
            self.learn.data.classes[i] for i in class_prediction_indices
        ]

        #Random Selection Placeholder Code for testing
        #class_predictions=[self.class_names[np.random.randint(3)] for i in range(x.shape[0])]

        #Scale randomly chosen bbox coords to image shape:
        bbox = np.random.rand(x.shape[0], 4)
        bbox[:, 0] *= x.shape[2]
        bbox[:, 2] *= x.shape[2]
        bbox[:, 1] *= x.shape[3]
        bbox[:, 3] *= x.shape[3]

        #Create random segmentation mask:
        mask = np.random.randint(low=0,
                                 high=4,
                                 size=(x.shape[0], x.shape[2], x.shape[3]))

        return (class_predictions, bbox, mask)
Beispiel #7
0
 def __init__(self, pkl_dir, pkl_file):
     defaults.device = torch.device('cpu')  # run on a CPU
     self.learn = load_learner(path=pkl_dir, file=pkl_file)
Beispiel #8
0
# from fastai.vision import *
from fastai.vision import ImageList, imagenet_stats, cnn_learner, models
from fastai.vision import get_transforms, ResizeMethod, accuracy, torch
from fastai.vision import ClassificationInterpretation, doc
from fastai.metrics import error_rate
from fastai.imports import Path
import fastai

# from fastai import *
import warnings
import os

import pandas as pd

# change if GPU available (tested only on CPU)
fastai.torch_core.defaults.device = torch.device("cpu")
# defaults.device = torch.device('cpu')
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

warnings.filterwarnings("ignore",
                        category=UserWarning,
                        module="torch.nn.functional")

# https://github.com/dmlc/xgboost/issues/1715
os.environ["KMP_DUPLICATE_LIB_OK"] = "True"

# training is very, very slow
os.environ["OMP_NUM_THREADS"] = "1"

## helpful way to initially get folders
# import split_folders