Example #1
0
train_dataset = Dataset(params, 'train')
val_dataset = Dataset(params, 'val')
train_data = DataLoader(train_dataset,
                        batch_size=params.batch_size,
                        shuffle=True,
                        num_workers=1)  # num_workers 几个线程参与读数据
valid_data = DataLoader(val_dataset,
                        batch_size=params.batch_size,
                        shuffle=False,
                        num_workers=1)

# build the model
ae = AutoEncoder(params.n_attr).cuda()
lat_dis = LatentDiscriminator(params.n_attr).cuda()
ptc_dis = PatchDiscriminator().cuda()
clf_dis = Classifier(params.n_attr).cuda()

# trainer / evaluator
trainer = Trainer(ae, lat_dis, ptc_dis, clf_dis, train_data, params)
evaluator = Evaluator(ae, lat_dis, ptc_dis, clf_dis, valid_data, params)

for n_epoch in range(params.n_epochs):

    logger.info('Starting epoch %i...' % n_epoch)

    for n_iter in range(0, params.epoch_size, params.batch_size):

        # latent discriminator training
        trainer.lat_dis_step()

        # patch discriminator training
Example #2
0
assert os.path.isfile(params.eval_clf)
assert params.lambda_lat_dis == 0 or params.n_lat_dis > 0
assert params.lambda_ptc_dis == 0 or params.n_ptc_dis > 0
assert params.lambda_clf_dis == 0 or params.n_clf_dis > 0

# initialize experiment / load dataset
logger = initialize_exp(params)
data, attributes, data2, attributes2 = load_images(params)
train_data = DataSampler(data[0], attributes[0], data2, attributes2, params)
valid_data = DataSampler(data[1], attributes[1], None, None, params)

# build the model
ae = AutoEncoder(params).cuda()
lat_dis = LatentDiscriminator(params).cuda() if params.n_lat_dis else None
ptc_dis = PatchDiscriminator(params).cuda() if params.n_ptc_dis else None
clf_dis = Classifier(params).cuda() if params.n_clf_dis else None
eval_clf = torch.load(params.eval_clf).cuda().eval()

# trainer / evaluator
trainer = Trainer(ae, lat_dis, ptc_dis, clf_dis, train_data, params)
evaluator = Evaluator(ae, lat_dis, ptc_dis, clf_dis, eval_clf, valid_data, params)


for n_epoch in range(params.n_epochs):

    logger.info('Starting epoch %i...' % n_epoch)

    for n_iter in range(0, params.epoch_size, params.batch_size):
        
        # latent discriminator training
        for _ in range(params.n_lat_dis):
Example #3
0
    train_set = WeizmannActionClassificationDataset(root='data',
                                                    train=True,
                                                    transform=trfs)
    test_set = WeizmannActionClassificationDataset(root='data',
                                                   train=False,
                                                   transform=trfs)

    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(test_set, batch_size=1, shuffle=False)

    model = Classifier(in_c=in_c,
                       z_dim=z_dim,
                       h_dim=128,
                       n_act=n_act,
                       n_id=n_id)

    crit_act = nn.CrossEntropyLoss()
    crit_id = nn.CrossEntropyLoss()

    if args.use_cuda:
        model.cuda()
        crit_act.cuda()
        crit_id.cuda()

    opt = optim.Adam(model.parameters(), lr=lr)

    batch_timer = RunningAverageMeter()
    end = time.time()
Example #4
0
params.model_type = "classifier"

# check parameters
check_attr(params)
assert len(params.name.strip()) > 0
assert not params.reload or os.path.isfile(params.reload)

# initialize experiment / load dataset
logger = initialize_exp(params)
data, attributes = load_images(params)
train_data = DataSampler(data[0], attributes[0], params)
valid_data = DataSampler(data[1], attributes[1], params)
test_data = DataSampler(data[2], attributes[2], params)

# build the model / reload / optimizer
classifier = Classifier(params).cuda()
if params.reload:
    reload_model(classifier, params.reload,
                 ['img_sz', 'img_fm', 'init_fm', 'hid_dim', 'attr', 'n_attr'])
optimizer = get_optimizer(classifier, params.optimizer)


def save_model(name):
    """
    Save the model.
    """
    path = os.path.join(params.dump_path, '%s.pth' % name)
    logger.info('Saving the classifier to %s ...' % path)
    torch.save(classifier, path)

# best accuracy
Example #5
0
def main(args):

    ### Hyperparameters setting ###
    device = 'cuda' if torch.cuda.is_available else 'cpu'
    main_epochs = args.epochs
    classifier_epochs = args.c_epochs
    T = args.temperature
    patience = args.patience
    num_classes = args.num_classes
    classifier_hidden_dim = args.c_dim
    projection_hidden_dim = args.p_dim
    in_dim = 512  # Constant as long as we use ResNet18

    # model definition
    f, g = resnet18_encoder().to(device), ProjectionHead(
        in_dim, projection_hidden_dim).to(device)

    if not args.test:
        ### Train SimCLR ###
        dataset = DataSetWrapper(args.batch_size,
                                 args.num_worker,
                                 args.valid_size,
                                 input_shape=(96, 96, 3))
        train_loader, valid_loader = dataset.get_data_loaders()

        criterion = NT_XentLoss(T)
        optimizer = torch.optim.Adam(list(f.parameters()) +
                                     list(g.parameters()),
                                     3e-4,
                                     weight_decay=1e-5)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=len(train_loader), eta_min=0, last_epoch=-1)

        train_losses, val_losses = train(main_epochs, patience, optimizer,
                                         scheduler, train_loader, valid_loader,
                                         f, g, criterion)

        plot_loss_curve(train_losses, val_losses, 'results/train_loss.png',
                        'results/val_loss.png')

    else:
        ### Test ###
        load_checkpoint(f, g, args.checkpoint)
        classifier = Classifier(in_dim, num_classes,
                                classifier_hidden_dim).to(device)

        data_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        ])

        if not os.path.exists('checkpoints/classifier.pt'):
            ### Train Classifier ###
            train_dataset = datasets.STL10('./data',
                                           split='train',
                                           download=True,
                                           transform=data_transform)
            train_loader = DataLoader(train_dataset,
                                      batch_size=args.batch_size,
                                      num_workers=args.num_worker)

            criterion = nn.CrossEntropyLoss()

            if args.fine_tuning:
                params = list(f.parameters()) + list(classifier.parameters())
            else:
                params = classifier.parameters()

            optimizer = torch.optim.Adam(params, lr=1e-4)

            train_classifier(classifier_epochs, train_loader, f, classifier,
                             criterion, optimizer)
            save_checkpoint_classifier(classifier, 'checkpoints/classifier.pt')

        else:
            load_checkpoint_classifier(classifier, 'checkpoints/classifier.pt')

        ### Test ###
        test_dataset = datasets.STL10('./data',
                                      split='test',
                                      download=True,
                                      transform=data_transform)
        test_loader = DataLoader(test_dataset,
                                 batch_size=args.batch_size,
                                 num_workers=args.num_worker)

        accuracy = test(test_loader, f, classifier)
        print("Test Accuracy : %.4f" % (accuracy))
Example #6
0
def main(args):
    in_dim = 512
    projection_hidden_dim = 2048
    classifier_hidden_dim = 1024
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    data_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])

    if args.dataset == 'CIFAR-10':
        classifier_path = 'checkpoints/classifier_cifar10.pt'
        num_classes = 10
        train_dataset = datasets.CIFAR10('./data',
                                         train=True,
                                         download=True,
                                         transform=data_transform)
        test_dataset = datasets.CIFAR10('./data',
                                        train=False,
                                        download=True,
                                        transform=data_transform)
    elif args.dataset == 'CIFAR-100':
        classifier_path = 'checkpoints/classifier_cifar100.pt'
        num_classes = 100
        train_dataset = datasets.CIFAR100('./data',
                                          train=True,
                                          download=True,
                                          transform=data_transform)
        test_dataset = datasets.CIFAR100('./data',
                                         train=False,
                                         download=True,
                                         transform=data_transform)

    f, g = resnet18_encoder().to(device), ProjectionHead(
        in_dim, projection_hidden_dim).to(device)
    load_checkpoint(f, g, args.checkpoint)

    classifier = Classifier(in_dim, num_classes,
                            classifier_hidden_dim).to(device)

    if not os.path.exists(classifier_path):
        ### Train Classifier ###
        train_loader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_worker)

        criterion = nn.CrossEntropyLoss()

        if args.fine_tuning:
            params = list(f.parameters()) + list(classifier.parameters())
        else:
            params = classifier.parameters()

        optimizer = torch.optim.Adam(params, lr=1e-4)

        train_classifier(args.epochs, train_loader, f, classifier, criterion,
                         optimizer)
        save_checkpoint_classifier(classifier, classifier_path)

    else:
        load_checkpoint_classifier(classifier, classifier_path)

    ### Test ###
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             num_workers=args.num_worker)

    accuracy = test(test_loader, f, classifier)
    print("Test Accuracy : %.4f" % (accuracy))