예제 #1
0
def imagenette_loader(path):
    ''' Load Imagenette-320 version 2 dataset to path '''
    try:
        from fastai.vision.all import untar_data, URLs
    except ImportError:
        print('FastAI do not found')
        print('Please install Fast AI')
        print(
            'Command for installation: "conda install -c fastai -c pytorch fastai"'
        )
    untar_data(URLs.IMAGENETTE_320, dest=path)
예제 #2
0
def main():
    # Parse command-line arguments
    args = parse_args()

    # Split data between training and testing
    splitter = GrandparentSplitter(train_name="training", valid_name="testing")

    # Prepare DataBlock which is a generic container to quickly build Datasets and DataLoaders
    mnist = DataBlock(
        blocks=(ImageBlock(PILImage), CategoryBlock),
        get_items=get_image_files,
        splitter=splitter,
        get_y=parent_label,
    )

    # Download, untar the MNIST data set and create DataLoader from DataBlock
    data = mnist.dataloaders(untar_data(URLs.MNIST), bs=256, num_workers=0)

    # Enable auto logging
    mlflow.fastai.autolog()

    # Create Learner model
    learn = cnn_learner(data, resnet18)

    # Start MLflow session
    with mlflow.start_run():
        # Train and fit with default or supplied command line arguments
        learn.fit_one_cycle(args.epochs, args.lr)
예제 #3
0
def main(epochs):
    Task.init(project_name="examples", task_name="fastai v2")

    path = untar_data(URLs.PETS)
    files = get_image_files(path / "images")

    dls = ImageDataLoaders.from_name_func(path,
                                          files,
                                          label_func,
                                          item_tfms=Resize(224),
                                          num_workers=0)
    dls.show_batch()
    learn = cnn_learner(dls, resnet34, metrics=error_rate)
    learn.fine_tune(epochs)
    learn.show_results()
예제 #4
0
def create_dataloaders() -> DataLoaders:
    """
    Create the dataloaders for the cats vs. dogs dataset.
    """
    path = untar_data(URLs.PETS) / "images"
    dls = ImageDataLoaders.from_name_func(
        path,
        get_image_files(path),
        valid_pct=0.2,
        batch_size=8,
        seed=42,
        label_func=is_cat,
        item_tfms=Resize(224),
    )
    return dls
예제 #5
0
from fastai.vision.all import Learner
from fastai.vision.all import SimpleCNN
from fastai.vision.all import untar_data
from fastai.vision.all import URLs

# TODO(crcrpar): Remove the below three lines once fastai gets compatible with torchvision v0.9.
# Register a global custom opener to avoid HTTP Error 403: Forbidden when downloading MNIST.
# This is a temporary fix until torchvision v0.9 is released.
opener = urllib.request.build_opener()
opener.addheaders = [("User-agent", "Mozilla/5.0")]
urllib.request.install_opener(opener)

BATCHSIZE = 128
EPOCHS = 10

path = untar_data(URLs.MNIST_SAMPLE)


def objective(trial):
    # Data Augmentation
    apply_tfms = trial.suggest_categorical("apply_tfms", [True, False])
    if apply_tfms:
        # MNIST is a hand-written digit dataset. Thus horizontal and vertical flipping are
        # disabled. However, the two flipping will be important when the dataset is CIFAR or
        # ImageNet.
        tfms = aug_transforms(
            do_flip=False,
            flip_vert=False,
            max_rotate=trial.suggest_int("max_rotate", 0, 45),
            max_zoom=trial.suggest_float("max_zoom", 1, 2),
            p_affine=trial.suggest_float("p_affine", 0.1, 1.0, step=0.1),
# https://www.fast.ai/
# https://course.fast.ai/
# https://github.com/fastai/fastbook
# https://github.com/fastai/fastbook/blob/master/01_intro.ipynb


import fastai.vision.all as fa

path = fa.untar_data(fa.URLs.PETS)/"images"

def is_cat(x):
    return x[0].isupper()


# https://github.com/fastai/fastbook/blob/master/01_intro.ipynb
# continue from this page
# fa.ImageDataLoaders.from_name_func(
#     path, fa.get_image_files?
# )



예제 #7
0
from fastai.vision.all import untar_data, URLs, ImageDataLoaders, get_image_files, Resize, error_rate, resnet34, \
    cnn_learner

from labml import lab, experiment
from labml.utils.fastai import LabMLFastAICallback

path = untar_data(
    URLs.PETS,
    dest=lab.get_data_path(),
    fname=lab.get_data_path() / URLs.path(URLs.PETS).name) / 'images'


def is_cat(x):
    return x[0].isupper()


dls = ImageDataLoaders.from_name_func(path,
                                      get_image_files(path),
                                      valid_pct=0.2,
                                      seed=42,
                                      label_func=is_cat,
                                      item_tfms=Resize(224))
# Train the model ⚡
learn = cnn_learner(dls,
                    resnet34,
                    metrics=error_rate,
                    cbs=LabMLFastAICallback())

with experiment.record(name='pets', exp_conf=learn.labml_configs()):
    learn.fine_tune(5)
        ch_in = [3, 6, 12, 24]
        convs = [ConvLayer(c, c * 2, stride=2) for c in ch_in]
        convs += [AdaptiveAvgPool(), Flatten(), nn.Linear(48, emb_size)]
        self.convs = nn.Sequential(*convs)
        self.classifier = classifier

    def get_embs(self, x):
        return self.convs(x)

    def forward(self, x):
        x = self.get_embs(x)
        x = self.classifier(x)
        return x


dls = ImageDataLoaders.from_folder(untar_data(URLs.MNIST),
                                   train="training",
                                   valid="testing",
                                   num_workers=0)
learn = Learner(dls,
                SimpleConv(ArcFaceClassifier(3, 10)),
                metrics=accuracy,
                loss_func=arcface_loss)

# %%
learn.fit_one_cycle(5, 5e-3)

# %%


def get_embs(model, dl):