Esempio n. 1
0
def main():
    device = torch.device("cuda" if not hyperparams.
                          hyperparameter_defaults['no_cuda'] else "cpu")

    hyperparams.hyperparameter_defaults['run_name'] = fileutils.rand_run_name()
    trainloader, testloader = dataloader.get_train_test_dataloader_cifar10()

    print("Initializing datasets and dataloaders")
    model_new = basemodelclass.ResNet18(
        hyperparams.hyperparameter_defaults['dropout'])
    wandb_run_init = wandb.init(
        config=hyperparams.hyperparameter_defaults,
        project=hyperparams.hyperparameter_defaults['project'])
    wandb.watch_called = False
    config = wandb.config
    print(config)
    wandb.watch(model_new, log="all")

    trainloader, testloader = dataloader.get_train_test_dataloader_cifar10()
    optimizer = optim.SGD(model_new.parameters(),
                          lr=config.lr,
                          momentum=config.momentum,
                          weight_decay=config.weight_decay)

    #optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    criterion = nn.CrossEntropyLoss
    #scheduler = None

    #scheduler = CyclicLR(optimizer, base_lr=config.lr*0.01, max_lr=config.lr, mode='triangular', gamma=1.)
    #, cycle_momentum=False)#,step_size_up=1000)#, scale_fn='triangular',step_size_up=200)
    #scheduler = StepLR(optimizer, step_size=config.sched_lr_step, gamma=config.sched_lr_gamma)
    #scheduler = MultiStepLR(optimizer, milestones=[10,20], gamma=config.sched_lr_gamma)
    #scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.2, patience=2, verbose=True, threshold=0.0001)
    scheduler = traintest.MyOwnReduceLROnPlateau(optimizer,
                                                 mode='min',
                                                 factor=0.2,
                                                 patience=2,
                                                 verbose=True,
                                                 threshold=0.0001)
    #scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=config.factor, patience=4, verbose=True, threshold=config.lr_decay_threshold)

    final_model_path = traintest.execute_model(
        model_new,
        hyperparams.hyperparameter_defaults,
        trainloader,
        testloader,
        device,
        dataloader.classes,
        wandb=wandb,
        optimizer_in=optimizer,
        scheduler=scheduler,
        prev_saved_model=saved_model_path,
        criterion=criterion,
        save_best=True,
        lars_mode=False,
        batch_step=False)
Esempio n. 2
0
def main():
    device = torch.device("cuda" if not hyperparams.
                          hyperparameter_defaults['no_cuda'] else "cpu")

    hyperparams.hyperparameter_defaults['run_name'] = fileutils.rand_run_name()
    transform_train = Compose([
        PadIfNeeded(min_height=40, min_width=40, always_apply=True,
                    p=1.0),  #,value=(0,0,0), border_mode=0),
        RandomCrop(height=32, width=32, p=1),
        #Flip(p=0.5),
        IAAFliplr(p=0.5),
        Cutout(num_holes=1,
               max_h_size=8,
               max_w_size=8,
               always_apply=True,
               p=1,
               fill_value=[0.4914 * 255, 0.4826 * 255, 0.44653 * 255]),
        Normalize(
            mean=[0.4914, 0.4826, 0.44653],
            std=[0.24703, 0.24349, 0.26519],
        ),
        ToTensor()
    ])
    trainloader, testloader = dataloader.get_train_test_dataloader_cifar10(
        transform_train=transform_train)

    print("Initializing datasets and dataloaders")
    #model_new = basemodelclass.ResNet18(hyperparams.hyperparameter_defaults['dropout'])
    model_new = basemodelclass.S11ResNet()

    wandb_run_init = wandb.init(
        config=hyperparams.hyperparameter_defaults,
        project=hyperparams.hyperparameter_defaults['project'])
    wandb.watch_called = False
    config = wandb.config
    print(config)
    wandb.watch(model_new, log="all")

    #trainloader, testloader = dataloader.get_train_test_dataloader_cifar10()
    optimizer = optim.SGD(model_new.parameters(),
                          lr=config.lr,
                          momentum=config.momentum,
                          weight_decay=config.weight_decay)

    #optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    criterion = nn.CrossEntropyLoss
    #scheduler = None
    cycle_momentum = True if config.cycle_momentum == "True" else False
    print("Momentum cycling set to {}".format(cycle_momentum))
    #scheduler = CyclicLR(optimizer, base_lr=config.lr*0.01, max_lr=config.lr, mode='triangular', gamma=1., cycle_momentum=True,step_size_up=2000)#, scale_fn='triangular',step_size_up=200)
    scheduler = OneCycleLR(optimizer,
                           config.ocp_max_lr,
                           epochs=config.epochs,
                           cycle_momentum=cycle_momentum,
                           steps_per_epoch=len(trainloader),
                           base_momentum=config.momentum,
                           max_momentum=0.95,
                           pct_start=0.208,
                           anneal_strategy=config.anneal_strategy,
                           div_factor=config.div_factor,
                           final_div_factor=config.final_div_factor)

    #scheduler =CyclicLR(optimizer, base_lr=config.lr*0.01, max_lr=config.lr, mode='triangular', gamma=1., cycle_momentum=True,step_size_up=2000)#, scale_fn='triangular',step_size_up=200)
    #scheduler = StepLR(optimizer, step_size=config.sched_lr_step, gamma=config.sched_lr_gamma)
    #scheduler = MultiStepLR(optimizer, milestones=[10,20], gamma=config.sched_lr_gamma)
    #scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.2, patience=2, verbose=True, threshold=0.0001)
    #scheduler = traintest.MyOwnReduceLROnPlateau(optimizer, mode='min', factor=0.2, patience=2, verbose=True, threshold=0.0001)
    #scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=config.factor, patience=4, verbose=True, threshold=config.lr_decay_threshold)

    final_model_path = traintest.execute_model(
        model_new,
        hyperparams.hyperparameter_defaults,
        trainloader,
        testloader,
        device,
        dataloader.classes,
        wandb=wandb,
        optimizer_in=optimizer,
        scheduler=scheduler,
        prev_saved_model=saved_model_path,
        criterion=criterion,
        save_best=True,
        lars_mode=False,
        batch_step=True)
Esempio n. 3
0
def main():
    device = torch.device("cuda" if not hyperparams.
                          hyperparameter_defaults['no_cuda'] else "cpu")

    hyperparams.hyperparameter_defaults['run_name'] = fileutils.rand_run_name()

    print("Initializing datasets and dataloaders")
    train_csv_file = "/content/drive/My Drive/EVA4/S2_Train.csv"
    test_csv_file = "/content/drive/My Drive/EVA4/S2_Test.csv"
    #model_new = basemodelclass.ResNet18(hyperparams.hyperparameter_defaults['dropout'], num_classes=200)
    #trainloader, testloader = dataloader.get_imagenet_loaders(train_path, test_path, transform_train=None, transform_test=None)
    transform_train = resize_bg_train_rrs(224, 224, [0.485, 0.456, 0.406],
                                          [0.229, 0.224, 0.225])
    transform_test = resize_bg(224, 224, [0.485, 0.456, 0.406],
                               [0.229, 0.224, 0.225])
    default_model = torch.hub.load('pytorch/vision:v0.6.0',
                                   'mobilenet_v2',
                                   pretrained=True)
    model_new = basemodelclass.MobileNetV2New(default_model, 4)
    #model_new = basemodelclass.MobileNetV24C(default_model, 4)
    updatable_params = model_new.unfreeze_core_layer(
        hyperparams.hyperparameter_defaults['unfreeze_layer'])

    trainset = dataloader.QDFDataSet(
        '/content/drive/My Drive/EVA4/S2_Train.csv', transform=transform_train)
    trainloader = dataloader.get_dataloader(
        trainset,
        hyperparams.hyperparameter_defaults['batch_size'],
        shuffle=True,
        num_workers=4)
    testset = dataloader.QDFDataSet('/content/drive/My Drive/EVA4/S2_Test.csv',
                                    transform=transform_test)
    testloader = dataloader.get_dataloader(
        testset,
        hyperparams.hyperparameter_defaults['batch_size'],
        shuffle=False,
        num_workers=4)

    wandb_run_init = wandb.init(
        config=hyperparams.hyperparameter_defaults,
        project=hyperparams.hyperparameter_defaults['project'])
    wandb.watch_called = False
    config = wandb.config
    print(config)
    wandb.watch(model_new, log="all")

    #trainloader, testloader = dataloader.get_train_test_dataloader_cifar10()
    optimizer = optim.SGD(updatable_params,
                          lr=config.lr,
                          momentum=config.momentum,
                          weight_decay=config.weight_decay)

    #optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    #criterion=nn.CrossEntropyLoss
    criterion = nn.NLLLoss
    scheduler = None
    cycle_momentum = True if config.cycle_momentum == "True" else False
    print("Momentum cycling set to {}".format(cycle_momentum))
    if (config.lr_policy == "clr"):
        scheduler = CyclicLR(
            optimizer,
            base_lr=config.lr * 0.01,
            max_lr=config.lr,
            mode='triangular',
            gamma=1.,
            cycle_momentum=True,
            step_size_up=256)  #, scale_fn='triangular',step_size_up=200)
    else:
        scheduler = OneCycleLR(optimizer,
                               config.ocp_max_lr,
                               epochs=config.epochs,
                               cycle_momentum=cycle_momentum,
                               steps_per_epoch=len(trainloader),
                               base_momentum=config.momentum,
                               max_momentum=0.95,
                               pct_start=config.split_pct,
                               anneal_strategy=config.anneal_strategy,
                               div_factor=config.div_factor,
                               final_div_factor=config.final_div_factor)
    local_classes = [
        'Large QuadCopters', 'Flying Birds', 'Winged Drones',
        'Small QuadCopters'
    ]
    final_model_path = traintest.execute_model(
        model_new,
        hyperparams.hyperparameter_defaults,
        trainloader,
        testloader,
        device,
        local_classes,
        wandb=wandb,
        optimizer_in=optimizer,
        scheduler=scheduler,
        prev_saved_model=saved_model_path,
        criterion=criterion,
        save_best=True,
        lars_mode=False,
        batch_step=True)
Esempio n. 4
0
def main():
    device = torch.device("cuda" if not hyperparams.
                          hyperparameter_defaults['no_cuda'] else "cpu")

    hyperparams.hyperparameter_defaults['run_name'] = fileutils.rand_run_name()
    print("Initializing datasets and dataloaders")
    train_path = "/content/t2/train"
    test_path = "/content/t2/val"
    #model_new = basemodelclass.ResNet18(hyperparams.hyperparameter_defaults['dropout'], num_classes=200)
    trainloader, testloader = dataloader.get_imagenet_loaders(
        train_path, test_path, transform_train=None, transform_test=None)
    model_new = basemodelclass.S11ResNet()

    wandb_run_init = wandb.init(
        config=hyperparams.hyperparameter_defaults,
        project=hyperparams.hyperparameter_defaults['project'])
    wandb.watch_called = False
    config = wandb.config
    print(config)
    wandb.watch(model_new, log="all")

    #trainloader, testloader = dataloader.get_train_test_dataloader_cifar10()
    optimizer = optim.SGD(model_new.parameters(),
                          lr=config.lr,
                          momentum=config.momentum,
                          weight_decay=config.weight_decay)

    #optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    criterion = nn.CrossEntropyLoss
    #scheduler = None
    cycle_momentum = True if config.cycle_momentum == "True" else False
    print("Momentum cycling set to {}".format(cycle_momentum))
    if (config.lr_policy == "clr"):
        scheduler = CyclicLR(
            optimizer,
            base_lr=config.lr * 0.01,
            max_lr=config.lr,
            mode='triangular',
            gamma=1.,
            cycle_momentum=True,
            step_size_up=256)  #, scale_fn='triangular',step_size_up=200)
    else:
        scheduler = OneCycleLR(optimizer,
                               config.ocp_max_lr,
                               epochs=config.epochs,
                               cycle_momentum=cycle_momentum,
                               steps_per_epoch=len(trainloader),
                               base_momentum=config.momentum,
                               max_momentum=0.95,
                               pct_start=config.split_pct,
                               anneal_strategy=config.anneal_strategy,
                               div_factor=config.div_factor,
                               final_div_factor=config.final_div_factor)

    final_model_path = traintest.execute_model(
        model_new,
        hyperparams.hyperparameter_defaults,
        trainloader,
        testloader,
        device,
        dataloader.classes,
        wandb=wandb,
        optimizer_in=optimizer,
        scheduler=scheduler,
        prev_saved_model=saved_model_path,
        criterion=criterion,
        save_best=True,
        lars_mode=False,
        batch_step=True)
Esempio n. 5
0
def main():
    device = torch.device("cuda" if not hyperparams.
                          hyperparameter_defaults['no_cuda'] else "cpu")

    fileutils.rand_run_name()

    # print(len(trainloader))
    # dataiter = iter(trainloader)
    # images, labels = dataiter.next()
    # print(images.shape)

    #hyperparams.print_hyperparams()

    # fileutils.get_image_samples(trainloader, classes)

    # model_new = basemodelclass.CIFARModelDepthDilate().to(device)
    # summary(model_new,input_size=(3, 32, 32))

    # type(model_new)

    print("Initializing datasets and dataloaders")

    torch.manual_seed(hyperparams.hyperparameter_defaults['seed'])
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    trainloader = dataloader.get_dataloader(
        trainset,
        hyperparams.hyperparameter_defaults['batch_size'],
        shuffle=True,
        num_workers=2)

    # torch.utils.data.DataLoader(trainset, batch_size=hyperparameter_defaults['batch_size'],
    #                                         shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    testloader = dataloader.get_dataloader(
        testset,
        hyperparams.hyperparameter_defaults['batch_size'],
        shuffle=False,
        num_workers=2)

    # torch.utils.data.DataLoader(testset, batch_size=hyperparameter_defaults['batch_size'],
    #                                         shuffle=False, num_workers=2)

    optimizer = optim.SGD  #(model.parameters(), lr=0.001, momentum=0.9)
    criterion = nn.CrossEntropyLoss
    #model = basemodelclass.CIFARModelBuilder()#.to(device)
    #model_new = basemodelclass.CIFARModelDepthDilate#.to(device)
    model_new = basemodelclass.ResNet18(
        hyperparams.hyperparameter_defaults['dropout'])
    #execute_model(model, hyperparameter_defaults, )
    final_model_path = traintest.execute_model(
        model_new,
        hyperparams.hyperparameter_defaults,
        trainloader,
        testloader,
        device,
        dataloader.classes,
        optimizer=optimizer,
        prev_saved_model=saved_model_path,
        criterion=criterion,
        save_best=True)