Example #1
0
def test_lit_trainer():
    pl.seed_everything(seed=42)

    print("Creating Train and Validation Dataset")
    train_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5, ), (0.5, ))])
    valid_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5, ), (0.5, ))])

    train_set, valid_set = dataset.create_cifar10_dataset(
        train_transforms, valid_transforms)
    print("Train and Validation Datasets Created")

    print("Creating DataLoaders")
    train_loader, valid_loader = dataset.create_loaders(train_set, train_set)

    print("Train and Validation Dataloaders Created")
    print("Creating Model")

    print("Train and Validation Dataloaders Created")

    print("Creating Model")
    all_supported_models = [
        "resnet18",
        # "resnet34",
        # "resnet50",
        # "resnet101",
        # "resnet152",
        # "resnext50_32x4d",
        # "resnext101_32x8d",
        # "vgg11",
        # "vgg13",
        # "vgg16",
        # "vgg19",
        # "mobilenet",
        # "mnasnet0_5",
        # "mnasnet1_0",
    ]
    for model_name in all_supported_models:
        model = CNN(model_name, num_classes=10, pretrained=True)
        # print(model)
        trainer = pl.Trainer(fast_dev_run=True)
        trainer.fit(model, train_loader, valid_loader)
    return True
def test_models():
    # print("Setting Seed for the run, seed = {}".format(SEED))
    # utils.seed_everything(SEED)
    # We don't need seeds for tests

    print("Creating Train and Validation Dataset")
    train_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5, ), (0.5, ))])
    valid_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5, ), (0.5, ))])

    train_set, valid_set = dataset.create_cifar10_dataset(
        train_transforms, valid_transforms)
    print("Train and Validation Datasets Created")

    print("Creating DataLoaders")
    train_loader, valid_loader = dataset.create_loaders(train_set, train_set)

    print("Train and Validation Dataloaders Created")
    print("Creating Model")

    all_supported_models = [
        "resnet18",
        # "resnet34",
        # "resnet50",
        # "resnet101",
        # "resnet152",
        # "resnext50_32x4d",
        # "resnext101_32x8d",
        # "vgg11",
        # "vgg13",
        # "vgg16",
        # "vgg19",
        # "mobilenet",
        # "mnasnet0_5",
        # "mnasnet1_0",
    ]

    for model_name in all_supported_models:
        model = model_factory.create_torchvision_model(
            model_name, num_classes=10, pretrained=False
        )  # We don't need pretrained True, we just need a forward pass

        if torch.cuda.is_available():
            print("Model Created. Moving it to CUDA")
        else:
            print("Model Created. Training on CPU only")
        model.to(device)
        optimizer = optim.Adam(model.parameters(), lr=1e-3)

        criterion = (
            nn.CrossEntropyLoss()
        )  # All classification problems we need Cross entropy loss

        # early_stopper = utils.EarlyStopping(
        #     patience=7, verbose=True, path=SAVE_PATH
        # )
        # We do not need early stopping too

        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                               T_max=300)

        swa_scheduler = SWALR(optimizer,
                              anneal_strategy="linear",
                              anneal_epochs=20,
                              swa_lr=0.05)
        swa_start = 2

        if torch.cuda.is_available():
            scaler = amp.GradScaler()

            train_metrics = engine.train_step(
                model,
                train_loader,
                criterion,
                device,
                optimizer,
                num_batches=10,
                fp16_scaler=scaler,
            )

            history2 = engine.fit(
                1,
                model,
                train_loader,
                valid_loader,
                criterion,
                device,
                optimizer,
                num_batches=10,
                grad_penalty=True,
                use_fp16=True,
            )

        train_metrics = engine.train_step(
            model,
            train_loader,
            criterion,
            device,
            optimizer,
            num_batches=10,
        )

        history = engine.sanity_fit(
            model,
            train_loader,
            valid_loader,
            criterion,
            device,
            num_batches=10,
            grad_penalty=True,
        )

        history2 = engine.fit(
            1,
            model,
            train_loader,
            valid_loader,
            criterion,
            device,
            optimizer,
            num_batches=10,
            grad_penalty=True,
        )

        history3 = engine.fit(
            3,
            model,
            train_loader,
            valid_loader,
            criterion,
            device,
            optimizer,
            scheduler=scheduler,
            num_batches=10,
            grad_penalty=True,
            swa_start=swa_start,
            swa_scheduler=swa_scheduler,
        )

    print("Done !!")
    return 1
Example #3
0
from tqdm import tqdm
import torchvision.transforms as T
from pytorch_cnn_trainer import dataset
from pytorch_cnn_trainer import model_factory
import config
from pytorch_cnn_trainer import utils
from pytorch_cnn_trainer import engine

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

if __name__ == "__main__":
    print(f"Setting Seed for the run, seed = {config.SEED}")
    utils.seed_everything(config.SEED)

    print("Creating Train and Validation Dataset")
    train_set, valid_set = dataset.create_cifar10_dataset(
        config.train_transforms, config.valid_transforms)
    print("Train and Validation Datasets Created")

    print("Creating DataLoaders")
    train_loader, valid_loader = dataset.create_loaders(
        train_set,
        train_set,
        config.TRAIN_BATCH_SIZE,
        config.VALID_BATCH_SIZE,
        config.NUM_WORKERS,
    )

    print("Train and Validation Dataloaders Created")
    print("Creating Model")

    # model = model.create_timm_model(
Example #4
0
def test_plotting():
    print("Creating Train and Validation Dataset")
    train_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5, ), (0.5, ))])
    valid_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5, ), (0.5, ))])

    train_set, valid_set = dataset.create_cifar10_dataset(
        train_transforms, valid_transforms)
    print("Train and Validation Datasets Created")

    print("Creating DataLoaders")
    train_loader, valid_loader = dataset.create_loaders(train_set, train_set)

    print("Train and Validation Dataloaders Created")
    print("Creating Model")

    all_supported_models = [
        "resnet18",
        # "resnet34",
        # "resnet50",
        # "resnet101",
        # "resnet152",
        # "resnext50_32x4d",
        # "resnext101_32x8d",
        # "vgg11",
        # "vgg13",
        # "vgg16",
        # "vgg19",
        # "mobilenet",
        # "mnasnet0_5",
        # "mnasnet1_0",
    ]

    for model_name in all_supported_models:
        model = model_factory.create_torchvision_model(
            model_name, num_classes=10, pretrained=False
        )  # We don't need pretrained True, we just need a forward pass

        if torch.cuda.is_available():
            print("Model Created. Moving it to CUDA")
        else:
            print("Model Created. Training on CPU only")
        model.to(device)
        optimizer = optim.Adam(model.parameters(), lr=1e-3)

        criterion = (
            nn.CrossEntropyLoss()
        )  # All classification problems we need Cross entropy loss

        # early_stopper = utils.EarlyStopping(
        #     patience=7, verbose=True, path=SAVE_PATH
        # )
        # We do not need early stopping too

        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                               T_max=300)

        swa_scheduler = SWALR(optimizer,
                              anneal_strategy="linear",
                              anneal_epochs=20,
                              swa_lr=0.05)
        swa_start = 2
        epoch = 5

    history = engine.fit(epoch, model, train_loader, valid_loader, criterion,
                         device, optimizer)
    plotter.plot_results(history, train_metric='loss', val_metric='top5_acc')
    return 1
    IN_CHANNELS = 3
    PRETRAINED = True  # If True -> Fine Tuning else Scratch Training
    EPOCHS = 5
    EARLY_STOPPING = True  # If you need early stoppoing for validation loss
    SAVE_PATH = f"{MODEL_NAME}.pt"
    SEED = 42

    # Train and validation Transforms which you would like
    train_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5, ), (0.5, ))])
    valid_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5, ), (0.5, ))])

    utils.seed_everything(SEED)
    print(f"Setting Seed for the run, seed = {config.SEED}")

    print("Creating Train and Validation Dataset")
    train_set, valid_set = dataset.create_cifar10_dataset(
        train_transforms, valid_transforms)
    print("Train and Validation Datasets Created")

    print("Creating DataLoaders")
    train_loader, valid_loader = dataset.create_loaders(train_set, train_set)
    print("Train and Validation Dataloaders Created")

    print("Creating Model")
    model = model_factory.create_torchvision_model(MODEL_NAME,
                                                   num_classes=NUM_ClASSES,
                                                   pretrained=True)

    if torch.cuda.is_available():
        print("Model Created. Moving it to CUDA")
    else:
        print("Model Created. Training on CPU only")
def test_models():
    # print("Setting Seed for the run, seed = {}".format(SEED))
    # utils.seed_everything(SEED)
    # We don't need seeds for tests

    MODEL_NAME = "efficientnet_b0"  # For now a very small model

    print("Creating Train and Validation Dataset")
    train_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5,), (0.5,))])
    valid_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5,), (0.5,))])

    # Test defaults
    train_set, valid_set = dataset.create_cifar10_dataset(
        train_transforms, valid_transforms
    )
    print("Train and Validation Datasets Created")

    print("Creating DataLoaders")
    train_loader, valid_loader = dataset.create_loaders(train_set, train_set)

    print("Train and Validation Dataloaders Created")
    print("Creating Model")

    # Right method and complete check would be
    # for model in timm.list_models() and then do this. It willl go out of github actions limits.
    model = model_factory.create_timm_model(
        MODEL_NAME,
        num_classes=10,
        in_channels=3,
        pretrained=False,
    )

    if torch.cuda.is_available():
        print("Model Created. Moving it to CUDA")
    else:
        print("Model Created. Training on CPU only")
    model.to(device)
    optimizer = optim.Adam(model.parameters(), lr=1e-3)

    criterion = (
        nn.CrossEntropyLoss()
    )  # All classification problems we need Cross entropy loss

    # early_stopper = utils.EarlyStopping(
    #     patience=7, verbose=True, path=SAVE_PATH
    # )
    # We do not need early stopping too

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=300)

    swa_scheduler = SWALR(
        optimizer, anneal_strategy="linear", anneal_epochs=20, swa_lr=0.05
    )
    swa_start = 2

    if torch.cuda.is_available():
        scaler = amp.GradScaler()

        train_metrics = engine.train_step(
            model,
            train_loader,
            criterion,
            device,
            optimizer,
            num_batches=10,
            fp16_scaler=scaler,
        )

        history2 = engine.fit(
            1,
            model,
            train_loader,
            valid_loader,
            criterion,
            device,
            optimizer,
            num_batches=10,
            grad_penalty=True,
            use_fp16=True,
        )

    train_metrics = engine.train_step(
        model,
        train_loader,
        criterion,
        device,
        optimizer,
        num_batches=10,
    )

    history = engine.sanity_fit(
        model,
        train_loader,
        valid_loader,
        criterion,
        device,
        num_batches=10,
        grad_penalty=True,
    )

    history2 = engine.fit(
        1,
        model,
        train_loader,
        valid_loader,
        criterion,
        device,
        optimizer,
        num_batches=10,
        grad_penalty=True,
    )

    history3 = engine.fit(
        3,
        model,
        train_loader,
        valid_loader,
        criterion,
        device,
        optimizer,
        scheduler=scheduler,
        num_batches=10,
        grad_penalty=True,
        swa_start=swa_start,
        swa_scheduler=swa_scheduler,
    )

    print("Done !!")
    return 1