def objective(trial):
    # Data Augmentation
    apply_tfms = trial.suggest_categorical("apply_tfms", [True, False])
    if apply_tfms:
        # MNIST is a hand-written digit dataset. Thus horizontal and vertical flipping are
        # disabled. However, the two flipping will be important when the dataset is CIFAR or
        # ImageNet.
        tfms = aug_transforms(
            do_flip=False,
            flip_vert=False,
            max_rotate=trial.suggest_int("max_rotate", 0, 45),
            max_zoom=trial.suggest_float("max_zoom", 1, 2),
            p_affine=trial.suggest_float("p_affine", 0.1, 1.0, step=0.1),
        )
    data = ImageDataLoaders.from_folder(
        path, bs=BATCHSIZE, batch_tfms=tfms if apply_tfms else None
    )

    n_layers = trial.suggest_int("n_layers", 2, 5)

    n_channels = [3]
    for i in range(n_layers):
        out_channels = trial.suggest_int("n_channels_{}".format(i), 3, 32)
        n_channels.append(out_channels)
    n_channels.append(2)

    model = SimpleCNN(n_channels)

    learn = Learner(
        data,
        model,
        metrics=[accuracy],
        # You could as FastAIPruningCallback in the fit function
        cbs=FastAIPruningCallback(trial),
    )

    # See https://forums.fast.ai/t/how-to-diable-progress-bar-completely/65249/3
    # to disable progress bar and logging info
    with learn.no_bar():
        with learn.no_logging():
            learn.fit(EPOCHS)

    return learn.validate()[-1]
Exemple #2
0
    data = ImageDataLoaders.from_dsets(train_ds,
                                       valid_ds,
                                       bs=cfg.batch_size,
                                       num_workers=4 * 4,
                                       pin_memory=True)

    if torch.cuda.is_available():
        data.cuda(), model.cuda()

    cbs = [SaveModelCallback(monitor='dice_th', comp=np.greater)]
    learn = Learner(data,
                    model,
                    metrics=cfg.metrics,
                    wd=cfg.weight_decay,
                    loss_func=cfg.loss_func,
                    opt_func=ranger,
                    cbs=cbs,
                    model_dir='models' + '_' + name + cfg.cv_method +
                    str(fold))
    if cfg.mixed_precision_training:
        learn.to_fp16()

    # make learner to use all GPUs
    learn.model = torch.nn.DataParallel(learn.model)
    if fold != args.fold:
        continue

    # Fit
    learn.fit_one_cycle(cfg.epochs, lr_max=cfg.max_learning_rate)
Exemple #3
0
from src.callbacks import *
# import dataloader
from src.build_dataloader import dataloader_s224_bs16, dataloader_s512_bs4, dataloader_s312_bs12
# import misc
import argparse

# original train
# cbs = [ChooseTask, SaveModelCallback1(monitor='accuracy'), ReduceLROnPlateau(factor=3)]
# iterative prune
# cbs = [ChooseTask, LoadCkpt(ckpt_name='tst_bs16_origin', every_batch=True), Prune]
# test
cbs = [ChooseTask, LoadCkpt(ckpt_name='tst_bs16_origin'), LoadMask(mask_name='mask_tst.pth'), CountParams]

# train
if torch.cuda.is_available():
    learner1 = Learner(dataloader_s512_bs4.cuda(), xse_resnet50.cuda(), metrics=accuracy, cbs=cbs)
    learner2 = Learner(dataloader_s512_bs4.cuda(), efficient_b6.cuda(), metrics=accuracy, cbs=cbs)
    learner3 = Learner(dataloader_s224_bs16.cuda(), xse_resnet50.cuda(), metrics=accuracy, cbs=cbs)
    learner4 = Learner(dataloader_s224_bs16.cuda(), efficient_b6.cuda(), metrics=accuracy, cbs=cbs)
    learner5 = Learner(dataloader_s312_bs12.cuda(), xse_resnet50.cuda(), metrics=accuracy, cbs=cbs)
    learner6 = Learner(dataloader_s312_bs12.cuda(), efficient_b6.cuda(), metrics=accuracy, cbs=cbs)
    learner7 = Learner(dataloader_s224_bs16.cuda(), tst_model.cuda(), metrics=accuracy, cbs=cbs)
else:
    learner1 = Learner(dataloader_s512_bs4, xse_resnet50, metrics=accuracy, cbs=cbs)
    learner2 = Learner(dataloader_s512_bs4, efficient_b6, metrics=accuracy, cbs=cbs)
    learner3 = Learner(dataloader_s224_bs16, xse_resnet50, metrics=accuracy, cbs=cbs)
    learner4 = Learner(dataloader_s224_bs16, efficient_b6, metrics=accuracy, cbs=cbs)
    learner5 = Learner(dataloader_s312_bs12, xse_resnet50, metrics=accuracy, cbs=cbs)
    learner6 = Learner(dataloader_s312_bs12, efficient_b6, metrics=accuracy, cbs=cbs)
    learner7 = Learner(dataloader_s224_bs16, tst_model, metrics=accuracy, cbs=cbs)
    def get_embs(self, x):
        return self.convs(x)

    def forward(self, x):
        x = self.get_embs(x)
        x = self.classifier(x)
        return x


dls = ImageDataLoaders.from_folder(untar_data(URLs.MNIST),
                                   train="training",
                                   valid="testing",
                                   num_workers=0)
learn = Learner(dls,
                SimpleConv(ArcFaceClassifier(3, 10)),
                metrics=accuracy,
                loss_func=arcface_loss)

# %%
learn.fit_one_cycle(5, 5e-3)

# %%


def get_embs(model, dl):
    embs = []
    ys = []
    for bx, by in tqdm(dl):
        with torch.no_grad():
            embs.append(model.get_embs(bx))
            ys.append(by)