def create_inference_model(checkpoint: str = None, model='resnet34', path='.'): if model == 'resnet34': model = resnet34 elif model == 'resnet18': model = resnet18 elif model == 'mobilenet_v2': model = mobilenet_v2 # Create an inference model instance and load the requested checkpoint inf_db = DataBlock(blocks=[ImageBlock, CategoryBlock], get_x=ItemGetter(0), get_y=ItemGetter(1)) dummy_img = PILImage.create(np.zeros((415, 415, 3), dtype=np.uint8)) source = [(dummy_img, False), (dummy_img, True)] inf_dls = inf_db.dataloaders(source) if model == mobilenet_v2: learner = cnn_learner(inf_dls, model, cut=-1, splitter=_mobilenetv2_split, pretrained=False) else: learner = cnn_learner(inf_dls, model, pretrained=False) learner.path = Path(path) if checkpoint is not None: learner.load(checkpoint, with_opt=False, device='cpu') return learner
def fake_dataloaders(a=2, b=3, bs=16, n=10): def get_data(n): x = torch.randn(bs * n, 1) return torch.cat((x, a * x + b + 0.1 * torch.randn(bs * n, 1)), 1) ds = get_data(n) dblock = DataBlock() return dblock.dataloaders(ds)
def pack_models(path: str) -> None: model = LinearModel() loss = Loss() dblock = DataBlock(get_items=get_items, get_y=np.sum) dls = dblock.datasets(None).dataloaders() learner = Learner(dls, model, loss) FastAIModel(learner).save(path)
def get_data(inputs, df_all=None, batch_tfms=None, item_tfms=None, verbose=False, autoencoder=False): if df_all is None: df_all = get_dataframe(inputs, verbose) if item_tfms is None: tfms = [Resize(128, method="squish")] else: tfms = item_tfms if autoencoder: blocks = (ImageBlock, ImageBlock) y_reader = ColReader("cam/image_array") else: blocks = (ImageBlock, RegressionBlock(n_out=2)) y_reader = ColReader(['user/angle', 'user/throttle']) pascal = DataBlock(blocks=blocks, splitter=RandomSplitter(), get_x=ColReader("cam/image_array"), get_y=y_reader, item_tfms=tfms, batch_tfms=batch_tfms, n_inp=1) dls = pascal.dataloaders(df_all) if verbose: dls.show_batch() dls.one_batch()[0].shape return dls
# %% # %% mnist_dls_rgb = ImageDataLoaders.from_folder( mnist_dir, train="training", valid="testing", device=device, ) # %% mnist_block = DataBlock( blocks=(ImageBlock(cls=PILImageBW), CategoryBlock), get_items=get_image_files, splitter=GrandparentSplitter(train_name="training", valid_name="testing"), get_y=parent_label, # batch_tfms=aug_transforms(mult=1.2, do_flip=False) ) # %% mnist_dls = mnist_block.dataloaders(mnist_dir) # %% mnist_dls.train.one_batch()[0].shape # %% mnist_dls.show_batch() # %% [markdown] #
import torch from fastai.data.block import DataBlock, CategoryBlock, get_image_files, GrandparentSplitter, parent_label from fastai.metrics import error_rate from fastai.vision.augment import Resize, RandomResizedCrop, aug_transforms from fastai.vision.data import ImageBlock from fastai.vision.all import cnn_learner, ClassificationInterpretation, load_learner, nn, partial, MixUp, xresnet50, \ accuracy, top_k_accuracy, Learner from torchvision.models import resnet18 import matplotlib.pyplot as plt from fastai.distributed import * # In[]: products = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=GrandparentSplitter(train_name="train", valid_name="validation"), get_y=parent_label, item_tfms=Resize(192)) products = products.new(item_tfms=RandomResizedCrop(168, min_scale=0.8), batch_tfms=aug_transforms()) project_path = Path("/home/yaro/Workspace/fastai/") dataset_path = project_path.joinpath("for_test") dls = products.dataloaders(dataset_path) gpu = None if torch.cuda.is_available(): if gpu is not None: torch.cuda.set_device(gpu) n_gpu = torch.cuda.device_count() else: