Example #1
0
        with open('output.bin', 'wb') as fp:
            pickle.dump((features, outputs, targets), fp)
            print('output dumped...')

    return total_top1 / total_num * 100, total_top5 / total_num * 100


if __name__ == '__main__':

    feature_dim = 128
    batch_size = 16
    k = 200
    temperature = 0.5
    c = 10
    memory_data = utils.CIFAR10Pair(root='data',
                                    train=True,
                                    transform=utils.test_transform,
                                    download=True)
    memory_loader = DataLoader(memory_data,
                               batch_size=batch_size,
                               shuffle=False)

    test_data = utils.CIFAR10Pair(root='data',
                                  train=False,
                                  transform=utils.test_transform,
                                  download=True)
    test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

    model = Model(feature_dim).cuda()
    device = torch.device("cuda")
    model_path = 'results/model.pth'
    model.load_state_dict(
Example #2
0
def train():
    n_iters_per_epoch = args.n_imgs_per_epoch // args.batchsize
    n_iters_all = n_iters_per_epoch * args.n_epochs #/ args.mu_c
    epsilon = 0.000001

    model, criteria_x, criteria_u = set_model()
    lb_guessor = LabelGuessor(thresh=args.thr)
    ema = EMA(model, args.ema_alpha)

    wd_params, non_wd_params = [], []
    for param in model.parameters():
        if len(param.size()) == 1:
            non_wd_params.append(param)
        else:
            wd_params.append(param)
    param_list = [{'params': wd_params}, {'params': non_wd_params, 'weight_decay': 0}]
    optim = torch.optim.SGD(param_list, lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum, nesterov=True)
    lr_schdlr = WarmupCosineLrScheduler(optim, max_iter=n_iters_all, warmup_iter=0)

    dltrain_x, dltrain_u, dltrain_all = get_train_loader(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch, 
                                                         L=args.n_labeled, seed=args.seed)
    train_args = dict(
        model=model,
        criteria_x=criteria_x,
        criteria_u=criteria_u,
        optim=optim,
        lr_schdlr=lr_schdlr,
        ema=ema,
        dltrain_x=dltrain_x,
        dltrain_u=dltrain_u,
        dltrain_all=dltrain_all,
        lb_guessor=lb_guessor,
    )
    n_labeled = int(args.n_labeled / args.n_classes)
    best_acc, top1 = -1, -1
    results = {'top 1 acc': [], 'best_acc': []}
    
    b_schedule = [args.n_epochs/2, 3*args.n_epochs/4]
    if args.boot_schedule == 1:
        step = int(args.n_epochs/3)
        b_schedule = [step, 2*step]
    elif args.boot_schedule == 2:
        step = int(args.n_epochs/4)
        b_schedule = [step, 2*step, 3*step]
        
    for e in range(args.n_epochs):
        if args.bootstrap > 1 and (e in b_schedule):
            seed = 99
            n_labeled *= args.bootstrap
            name = sort_unlabeled(ema, n_labeled)
            print("Bootstrap at epoch ", e," Name = ",name)
            dltrain_x, dltrain_u, dltrain_all = get_train_loader(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch, 
                                                                 L=10*n_labeled, seed=seed, name=name)
            train_args = dict(
                model=model,
                criteria_x=criteria_x,
                criteria_u=criteria_u,
                optim=optim,
                lr_schdlr=lr_schdlr,
                ema=ema,
                dltrain_x=dltrain_x,
                dltrain_u=dltrain_u,
                dltrain_all=dltrain_all,
                lb_guessor=lb_guessor,
            )

        model.train()
        train_one_epoch(**train_args)
        torch.cuda.empty_cache()

        if args.test == 0 or args.lam_clr < epsilon:
            top1 = evaluate(ema) * 100
        elif args.test == 1:
            memory_data = utils.CIFAR10Pair(root='dataset', train=True, transform=utils.test_transform, download=False)
            memory_data_loader = DataLoader(memory_data, batch_size=args.batchsize, shuffle=False, num_workers=16, pin_memory=True)
            test_data = utils.CIFAR10Pair(root='dataset', train=False, transform=utils.test_transform, download=False)
            test_data_loader = DataLoader(test_data, batch_size=args.batchsize, shuffle=False, num_workers=16, pin_memory=True)
            c = len(memory_data.classes) #10
            top1 = test(model, memory_data_loader, test_data_loader, c, e)
            
        best_acc = top1 if best_acc < top1 else best_acc

        results['top 1 acc'].append('{:.4f}'.format(top1))
        results['best_acc'].append('{:.4f}'.format(best_acc))
        data_frame = pd.DataFrame(data=results)
        data_frame.to_csv(result_dir + '/' + save_name_pre + '.accuracy.csv', index_label='epoch')

        log_msg = [
            'epoch: {}'.format(e + 1),
            'top 1 acc: {:.4f}'.format(top1),
            'best_acc: {:.4f}'.format(best_acc)]
        print(', '.join(log_msg))
Example #3
0
    # data prepare

    #train_data = utils.MNISTPair(root=args.DATA_PATH+"MNIST", train=True, transform=utils.train_transform, download=True)
    #train_data = utils.EMNISTPair(root=args.DATA_PATH+"EMINST", split="byclass", transform=utils.train_transform_mnist, download=True)
    #train_data = utils.CIFAR10Pair(root=args.DATA_PATH+"CIFAR10", train=True, transform=utils.train_transform_mnist, download=True)
    #train_data = utils.STL10Pair(root=args.DATA_PATH+"STL10", split="unlabeled", transform=utils.train_transform, download=True)
    #train_data = utils.SVHNPair(root=args.DATA_PATH+"SVHN", split="train", transform=utils.train_transform, download=True)
    #train_data = utils.CIFAR10_class_Pair(root=args.DATA_PATH+"CIFAR10", train=True, transform=utils.train_transform, download=True, client_class=0)
    train_data = utils.Imagenet32Pair(root=args.DATA_PATH + "Imagenet32",
                                      transform=utils.train_transform,
                                      subset="classes_mammals.txt")
    #train_data = utils.CelebAPair(root=args.DATA_PATH+"CelebA", split="test", transform=utils.train_transform, download=True)

    #memory_data = utils.MNISTPair(root=args.DATA_PATH+"MNIST", train=True, transform=utils.train_transform, download=True)
    memory_data = utils.CIFAR10Pair(root=args.DATA_PATH + "CIFAR10",
                                    train=True,
                                    transform=utils.train_transform,
                                    download=True)
    #memory_data = utils.MNISTPair(root=args.DATA_PATH+"MNIST", train=True, transform=utils.train_transform, download=True)

    #test_data = utils.MNISTPair(root=args.DATA_PATH+"MNIST", train=False, transform=utils.train_transform, download=True)
    test_data = utils.CIFAR10Pair(root=args.DATA_PATH + "CIFAR10",
                                  train=False,
                                  transform=utils.test_transform,
                                  download=True)
    #test_data = utils.MNISTPair(root=args.DATA_PATH+"MNIST", train=False, transform=utils.test_transform, download=True)

    train_loader = DataLoader(train_data,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=16,
                              pin_memory=True,
Example #4
0
    else:
        os.makedirs(save_dir)

    if args.transform == 'default':
        transform = data_transforms.train_transform
    elif args.transform == 'dip':
        transform = data_transforms.DIP_transform_precomputed
    elif args.transform == 'gauss':
        transform = data_transforms.gaussian_blur_transform
    elif args.transform == 'randblur':
        transform = data_transforms.random_blur_transform
    else:
        raise ValueError('incorrect transform type')

    # data prepare
    train_data = utils.CIFAR10Pair(root='data', train=True, transform=data_transforms.gaussian_blur_transform, download=True)
    data_idxs = list(range(len(train_data)//2))  # TODO remove
    train_data = Subset(train_data, data_idxs)
    train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True,
                              drop_last=True)
    memory_data = utils.CIFAR10Pair(root='data', train=True, transform=data_transforms.test_transform, download=True)
    memory_loader = DataLoader(memory_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
    test_data = utils.CIFAR10Pair(root='data', train=False, transform=data_transforms.test_transform, download=True)
    test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)

    # model setup and optimizer config
    model = Model(feature_dim).to(device)
    flops, params = profile(model, inputs=(torch.randn(1, 3, 32, 32).to(device),))
    flops, params = clever_format([flops, params])
    print('# Model Params: {} FLOPs: {}'.format(params, flops))
    optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-6)
Example #5
0
                        help='Temperature used in softmax')
    parser.add_argument('--batch_size',
                        default=512,
                        type=int,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epochs',
                        default=500,
                        type=int,
                        help='Number of sweeps over the dataset to train')

    args = parser.parse_args()
    batch_size, epochs = args.batch_size, args.epochs
    temperature = args.temperature

    train_data = utils.CIFAR10Pair(root='./data',
                                   train=True,
                                   transform=utils.train_transform,
                                   download=True)
    train_loader, valid_loader = utils.create_datasets(batch_size, train_data)
    # model setup and optimizer config

    if not os.path.exists('results_batch{}_2'.format(batch_size)):
        os.mkdir('results_batch{}_2'.format(batch_size))

    results = {'train_sim': [], 'valid_sim': []}

    model = Model().cuda()
    optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-6)
    least_loss = np.Inf

    for epoch in range(1, epochs + 1):
        train_loss = train_val(model, train_loader, optimizer)