Example #1
0
for param in autoencoder.parameters():
    param.data = param.double()
for param in classifier.parameters():
    param.data = param.double()

prefix = f'{args.label}_' if args.label else ''
postfix = '_robust' if args.adversarial else ''

autoencoder.load_state_dict(
    torch.load(path.join(
        args.models_dir,
        prefix + f'autoencoder_{args.epoch}' + postfix + '.pt'),
               map_location=lambda storage, loc: storage))
classifier.load_state_dict(
    torch.load(path.join(args.models_dir, prefix + f'classifier_{args.epoch}' +
                         postfix + '.pt'),
               map_location=lambda storage, loc: storage))

encoder_layers = []

for layer in autoencoder.encoder_layers:
    if isinstance(layer, nn.Sequential):
        for sub_layer in layer:
            encoder_layers += [sub_layer]
    else:
        encoder_layers += [layer]

all_l_inf, all_times = [], []
ver, corr = 0, 0

oracle = DL2_Oracle(learning_rate=DL2_LR,
Example #2
0
def main():
    args = parser.parse_args()

    # seed everything to ensure reproducible results from different runs
    if args.seed is not None:
        random.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True

    ###########################################################################
    # Model
    ###########################################################################
    global best_acc1
    # create model
    if args.arch == 'LogisticRegression':
        model = LogisticRegression(input_size=13, n_classes=args.classes)
    elif args.arch == 'NeuralNet':
        model = NeuralNet(input_size=13, hidden_size=[32, 16], n_classes=args.classes) #hidden_size=[64, 32]

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))
        torch.cuda.set_device(args.gpu)
        torch.backends.cudnn.benchmark = True
        model = model.cuda(args.gpu)

    # print(model)
    if args.train_file:
        print(30 * '=')
        print(summary(model, input_size=(1, 13),
                      batch_size=args.batch_size, device='cpu'))
        print(30 * '=')

    ###########################################################################
    # save directory
    ###########################################################################
    save_dir = os.path.join(os.getcwd(), args.save_dir)
    save_dir += ('/arch[{}]_optim[{}]_lr[{}]_lrsch[{}]_batch[{}]_'
                 'WeightedSampling[{}]').format(args.arch,
                                                args.optim,
                                                args.lr,
                                                args.lr_scheduler,
                                                args.batch_size,
                                                args.weighted_sampling)
    if args.suffix:
        save_dir += '_{}'.format(args.suffix)
    save_dir = save_dir[:]

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    ###########################################################################
    # Criterion and optimizer
    ###########################################################################
    # Initialise criterion and optimizer
    if args.gpu is not None:
        criterion = nn.CrossEntropyLoss().cuda(args.gpu)
    else:
        criterion = nn.CrossEntropyLoss()

    # define optimizer
    print("=> using '{}' optimizer".format(args.optim))
    if args.optim == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)
    else:  # default is adam
        optimizer = torch.optim.Adam(model.parameters(), args.lr,
                                     betas=(0.9, 0.999), eps=1e-08,
                                     weight_decay=args.weight_decay,
                                     amsgrad=False)

    ###########################################################################
    # Resume training and load a checkpoint
    ###########################################################################
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                # Map model to be loaded to specified single gpu.
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    ###########################################################################
    # Data Augmentation
    ###########################################################################
    # TODO

    ###########################################################################
    # Learning rate scheduler
    ###########################################################################
    print("=> using '{}' initial learning rate (lr)".format(args.lr))
    # define learning rate scheduler
    scheduler = args.lr_scheduler
    if args.lr_scheduler == 'reduce':
        print("=> using '{}' lr_scheduler".format(args.lr_scheduler))
        # Reduce learning rate when a metric has stopped improving.
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                               mode='min',
                                                               factor=0.5,
                                                               patience=10)
    elif args.lr_scheduler == 'cyclic':
        print("=> using '{}' lr_scheduler".format(args.lr_scheduler))
        scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer,
                                                      base_lr=0.00005,
                                                      max_lr=0.005)
    elif args.lr_scheduler == 'cosine':
        print("=> using '{}' lr_scheduler".format(args.lr_scheduler))
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                               T_max=100,
                                                               eta_min=0,
                                                               last_epoch=-1)

    ###########################################################################
    # load train data
    ###########################################################################
    if args.train_file:
        train_dataset = HeartDiseaseDataset(csv=args.train_file, label_names=LABELS)
        if args.weighted_sampling:
            train_sampler = torch.utils.data.WeightedRandomSampler(train_dataset.sample_weights,
                                                                   len(train_dataset),
                                                                   replacement=True)
        else:
            train_sampler = None

        ###########################################################################
        # update criterion
        print('class_sample_count ', train_dataset.class_sample_count)
        print('class_probability ', train_dataset.class_probability)
        print('class_weights ', train_dataset.class_weights)
        print('sample_weights ', train_dataset.sample_weights)

        if args.weighted_loss:
            if args.gpu is not None:
                criterion = nn.CrossEntropyLoss(weight=train_dataset.class_weights).cuda(args.gpu)
            else:
                criterion = nn.CrossEntropyLoss(weight=train_dataset.class_weights)

        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=args.batch_size, shuffle=(train_sampler is None),
                                                   num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    ###########################################################################
    # load validation data
    ###########################################################################
    if args.valid_file:
        valid_dataset = HeartDiseaseDataset(csv=args.valid_file, label_names=LABELS)
        val_loader = torch.utils.data.DataLoader(valid_dataset,
                                                 batch_size=args.batch_size, shuffle=False,
                                                 num_workers=args.workers, pin_memory=True)

        if args.evaluate:
            # retrieve correct save path from saved model
            save_dir = os.path.split(args.resume)[0]
            validate(val_loader, model, criterion, save_dir, args)
            return

    ###########################################################################
    # Train the model
    ###########################################################################
    for epoch in range(args.start_epoch, args.epochs):
        # adjust_learning_rate(optimizer, epoch, args)
        print_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer,
              scheduler, epoch, args)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, save_dir, args)

        # update learning rate based on lr_scheduler
        if args.lr_scheduler == 'reduce':
            scheduler.step(acc1)
        elif args.lr_scheduler == 'cosine':
            scheduler.step()

        # remember best acc@1 and save checkpoint
        is_best = acc1 >= best_acc1
        best_acc1 = max(acc1, best_acc1)

        print("Saving model [{}]...".format(save_dir))
        save_checkpoint({'epoch': epoch + 1,
                         'arch': args.arch,
                         'state_dict': model.state_dict(),
                         'best_acc1': best_acc1,
                         'optimizer': optimizer.state_dict(),
                         'criterion': criterion, },
                        is_best,
                        save_dir=save_dir)
        print(30 * '=')