Exemplo n.º 1
0
def load_model(checkpoints_folder,device):
    model =ResNetSimCLR(**config['model'])
    model.eval()
    state_dict = torch.load(os.path.join(checkpoints_folder, 'model.pth'), map_location=torch.device('cpu'))
    model.load_state_dict(state_dict)
    model = model.to(device)
    return model
Exemplo n.º 2
0
def encode(save_root, model_file, data_folder, model_name='ca', dataset_name='celeba', batch_size=64, device='cuda:0', out_dim=256):
    os.makedirs(save_root, exist_ok=True)
    os.makedirs(data_folder, exist_ok=True)

    if dataset_name == 'celeba':
        train_loader = DataLoader(datasets.CelebA(data_folder, split='train', download=True, transform=transforms.ToTensor()),
                                    batch_size=batch_size, shuffle=False)
        valid_loader = DataLoader(datasets.CelebA(data_folder, split='valid', download=True, transform=transforms.ToTensor()),
                                    batch_size=batch_size, shuffle=False)
    elif dataset_name == 'stanfordCars':
        t = transforms.Compose([
            transforms.Resize(512),
            transforms.CenterCrop(512),
            transforms.ToTensor(),
            transforms.Lambda(lambda x: x.repeat(3,1,1) if x.shape[0] == 1 else x)
        ])
        train_data_dir = os.path.join(data_folder, 'cars_train/')
        train_annos = os.path.join(data_folder, 'devkit/cars_train_annos.mat')
        train_loader = DataLoader(CarsDataset(train_annos, train_data_dir, t), batch_size=batch_size, shuffle=False)
        valid_data_dir = os.path.join(data_folder, 'cars_test/')
        valid_annos = os.path.join(data_folder, 'devkit/cars_test_annos_withlabels.mat')
        valid_loader = DataLoader(CarsDataset(valid_annos, valid_data_dir, t), batch_size=batch_size, shuffle=False)
    elif dataset_name == 'compCars':
        t = transforms.Compose([
            transforms.Resize(512),
            transforms.CenterCrop(512),
            transforms.ToTensor()
        ])
        train_loader = DataLoader(CompCars(data_folder, True, t), batch_size=batch_size, shuffle=False)
        valid_loader = DataLoader(CompCars(data_folder, False, t), batch_size=batch_size, shuffle=False)


    model = ResNetSimCLR('resnet50', out_dim)
    model.load_state_dict(torch.load(model_file, map_location=device))
    model = model.to(device)
    model.eval()

    print('Starting on training data')
    train_encodings = []
    for x, _ in train_loader:
        x = x.to(device)
        h, _ = model(x)
        train_encodings.append(h.cpu().detach())
    torch.save(torch.cat(train_encodings, dim=0), os.path.join(save_root, f'{dataset_name}-{model_name}model-train_encodings.pt'))

    print('Starting on validation data')
    valid_encodings = []
    for x, _ in valid_loader:
        x = x.to(device)
        h, _ = model(x)
        if len(h.shape) == 1:
            h = h.unsqueeze(0)
        valid_encodings.append(h.cpu().detach())
    torch.save(torch.cat(valid_encodings, dim=0), os.path.join(save_root, f'{dataset_name}-{model_name}model-valid_encodings.pt'))
Exemplo n.º 3
0
def evaluation(checkpoints_folder, config, device):
    model = ResNetSimCLR(**config['model'])
    model.eval()
    model.load_state_dict(
        torch.load(os.path.join(checkpoints_folder, 'model.pth')))
    model = model.to(device)

    train_set = torchvision.datasets.CIFAR10(
        root='../data/CIFAR10',
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ]),
        train=True,
        download=True)
    test_set = torchvision.datasets.CIFAR10(root='../data/CIFAR10',
                                            transform=transforms.Compose([
                                                transforms.ToTensor(),
                                                transforms.Normalize(
                                                    mean=[0.485, 0.456, 0.406],
                                                    std=[0.229, 0.224, 0.225])
                                            ]),
                                            train=False,
                                            download=True)
    # num_train = len(train_dataset)
    # indices = list(range(num_train))
    # np.random.shuffle(indices)
    #
    # split = int(np.floor(0.05 * num_train))
    # train_idx, test_idx = indices[split:], indices[:split]
    #
    # # define samplers for obtaining training and validation batches
    # train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_idx)
    # test_sampler = torch.utils.data.sampler.SubsetRandomSampler(test_idx)  # ?????sampler????????????

    # train_loader = torch.utils.data.DataLoader(train_set, batch_size=config['batch_size'], drop_last=True, shuffle=True)
    #
    # test_loader = torch.utils.data.DataLoader(test_set, batch_size=config['batch_size'], drop_last=True, shuffle=True)

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=48,
                                               drop_last=True,
                                               shuffle=True)

    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=48,
                                              drop_last=True,
                                              shuffle=True)

    X_train_feature = []
    label_train = []

    for data in train_loader:
        x, y = data
        x = x.to(device)
        features, _ = model(x)
        X_train_feature.extend(features.cpu().detach().numpy())
        label_train.extend(y.cpu().detach().numpy())

    X_train_feature = np.array(X_train_feature)
    label_train = np.array(label_train)

    X_test_feature = []
    label_test = []
    for data in test_loader:
        x, y = data
        x = x.to(device)
        features, _ = model(x)
        X_test_feature.extend(features.cpu().detach().numpy())
        label_test.extend(y.cpu().detach().numpy())
    X_test_feature = np.array(X_test_feature)
    label_test = np.array(label_test)
    scaler = preprocessing.StandardScaler()
    print('ok')
    scaler.fit(X_train_feature)
    # print(X_test_feature.shape)
    # print(y_test.shape)
    linear_model_eval(scaler.transform(X_train_feature), label_train,
                      scaler.transform(X_test_feature), label_test)
Exemplo n.º 4
0
    def eval_frozen(self):

        train_loader, val_loader, num_classes = self.dataset.get_dataset_eval()

        model = ResNetSimCLR(**self.config["model"]).to(self.device)
        model = self._load_pre_trained_weights(model)
        model.to(self.device)
        model.eval()

        lineal_classifier = nn.Linear(model.l1.in_features, num_classes)
        lineal_classifier.to(self.device)

        optimizer = torch.optim.SGD(lineal_classifier.parameters(),
                                    1e-3,
                                    weight_decay=eval(
                                        self.config['weight_decay']))

        epochs = self.config['epochs']

        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer,
            milestones=[int(0.5 * epochs),
                        int(0.8 * epochs)],
            gamma=0.1,
            last_epoch=-1)

        criterion = nn.CrossEntropyLoss()
        best_acc = 0

        for epoch in range(epochs):
            print('=' * 20)
            print(f'Epoch: {epoch + 1} / {epochs}')

            top1 = 0
            running_loss = 0
            n = 0
            lineal_classifier.train()

            for idx, (img, lab) in enumerate(train_loader):

                B = img.size(0)

                img = img.to(self.device)
                lab = lab.to(self.device)

                optimizer.zero_grad()

                loss, top1_batch = self._step_eval_train(
                    model, lineal_classifier, img, lab)

                loss.backward()
                optimizer.step()

                top1 += (x.argmax(dim=1) == lab).sum().item()
                running_loss += loss.item() * B
                n += B

                print('Training {}/{} - Loss: {:.2f} - top 1: {:.2f}'.format(
                    idx + 1, len(train_loader), running_loss / n,
                    100 * top1 / n),
                      end='\r')

            print('\n')

            top1 = 0
            running_loss = 0
            n = 0
            lineal_classifier.eval()

            for idx, (img, lab) in enumerate(val_loader):

                B = img.size(0)

                img = img.to(self.device)
                lab = lab.to(self.device)

                loss, top1_batch = self._step_eval_eval(
                    model, lineal_classifier, img, lab)

                top1 += top1_batch
                running_loss += loss.item() * B
                n += B

                print('Val {}/{} - Loss: {:.2f} - top 1: {:.2f}'.format(
                    idx + 1, len(val_loader), running_loss / n,
                    100 * top1 / n),
                      end='\r')

            print('\n')
            if best_acc < top1:
                best_acc = top1

            print(f'Best ACC: {best_acc * 100 / n}')
Exemplo n.º 5
0
def main():
    args = parser.parse_args()
    assert args.n_views == 2, "Only two view training is supported. Please use --n-views 2."
    # check if gpu training is available
    if not args.disable_cuda and torch.cuda.is_available():
        args.device = torch.device('cuda')
        cudnn.deterministic = True
        cudnn.benchmark = True
    else:
        args.device = torch.device('cpu')
        args.gpu_index = -1

    if args.mode == 'simclr':
        dataset = ContrastiveLearningDataset(args.data)
        train_dataset = dataset.get_dataset(args.dataset_name,
                                            args.n_views,
                                            train=True)
        model = ResNetSimCLR(base_model=args.arch, out_dim=args.out_dim)
        trainer_class = SimCLRTrainer
    elif args.mode == 'supervised':
        dataset = SupervisedLearningDataset(args.data)
        train_dataset = dataset.get_dataset(args.dataset_name,
                                            args.supervised_augments,
                                            train=True)
        model = ResNetSimCLR(base_model=args.arch,
                             out_dim=len(train_dataset.classes))
        trainer_class = SupervisedTrainer
    else:
        raise InvalidTrainingMode()

    if args.target_shuffle is not None:
        random.seed(args.target_shuffle)
        random.shuffle(train_dataset.targets)

    checkpoints = []
    for root, dirs, files in os.walk(
            os.path.join('experiments', args.experiment_group, 'wandb')):
        for file in files:
            if file == args.estimate_checkpoint:
                checkpoints += [os.path.join(root, file)]

    set_random_seed(args.seed)
    sample_indices = torch.randint(len(train_dataset),
                                   size=(args.batch_size *
                                         args.estimate_batches, ))

    #  It’s a no-op if the 'gpu_index' argument is a negative integer or None.
    estimated_prob, estimated_argmax = [], []
    with torch.cuda.device(args.gpu_index):
        for file in checkpoints:
            state = torch.load(file)
            model.load_state_dict(state['model'])
            model.eval()
            trainer = trainer_class(model=model,
                                    optimizer=None,
                                    scheduler=None,
                                    args=args)

            checkpoint_prob, checkpoint_argmax = [], []
            for i in range(args.estimate_batches):
                if args.fixed_augments:
                    set_random_seed(args.seed)

                if args.mode == 'simclr':
                    images = [[], []]
                    for index in sample_indices[i:i + args.batch_size]:
                        example = train_dataset[index][0]
                        images[0] += [example[0]]
                        images[1] += [example[1]]

                    images[0] = torch.stack(images[0], dim=0)
                    images[1] = torch.stack(images[1], dim=0)
                    labels = None
                elif args.mode == 'supervised':
                    images, labels = [], []
                    for index in sample_indices[i:i + args.batch_size]:
                        example = train_dataset[index]
                        images += [example[0]]
                        labels += [example[1]]

                    images = torch.stack(images, dim=0)
                    labels = torch.tensor(labels, dtype=torch.long)

                with torch.no_grad():
                    logits, labels = trainer.calculate_logits(images, labels)

                    prob = torch.softmax(logits,
                                         dim=1)[torch.arange(labels.shape[0]),
                                                labels]
                    checkpoint_prob += [prob.detach().cpu()]

                    argmax = (torch.argmax(logits,
                                           dim=1) == labels).to(torch.int)
                    checkpoint_argmax += [argmax.detach().cpu()]

            checkpoint_prob = torch.cat(checkpoint_prob, dim=0)
            estimated_prob += [checkpoint_prob]

            checkpoint_argmax = torch.cat(checkpoint_argmax, dim=0)
            estimated_argmax += [checkpoint_argmax]

    estimated_prob = torch.stack(estimated_prob, dim=0)
    estimated_argmax = torch.stack(estimated_argmax, dim=0)
    torch.save(
        {
            'indices': sample_indices,
            'prob': estimated_prob,
            'argmax': estimated_argmax
        }, os.path.join('experiments', args.experiment_group, args.out_file))
Exemplo n.º 6
0
        if train_gpu:
            xis = xis.cuda()
            xjs = xjs.cuda()

        loss = step(xis, xjs)

        train_writer.add_scalar('train_loss', loss, global_step=n_iter)
        loss.backward()
        optimizer.step()
        n_iter += 1

    if epoch_counter % config['eval_every_n_epochs'] == 0:

        # validation steps
        with torch.no_grad():
            model.eval()

            valid_loss = 0.0
            for counter, ((xis, xjs), _) in enumerate(valid_loader):

                if train_gpu:
                    xis = xis.cuda()
                    xjs = xjs.cuda()
                loss = (step(xis, xjs))
                valid_loss += loss.item()

            valid_loss /= counter

            if valid_loss < best_valid_loss:
                # save the model weights
                best_valid_loss = valid_loss
Exemplo n.º 7
0
def main():
    args = parser.parse_args()
    assert args.n_views == 2, "Only two view training is supported. Please use --n-views 2."
    # check if gpu training is available
    if not args.disable_cuda and torch.cuda.is_available():
        print("Using GPU")
        args.device = torch.device('cuda')
        cudnn.deterministic = True
        cudnn.benchmark = True
    else:
        args.device = torch.device('cpu')
        args.gpu_index = -1

    dataset = ContrastiveLearningDataset(args.data)

    train_dataset = dataset.get_dataset(args.dataset_name, args.n_views)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True,
                                               worker_init_fn=worker_init_fn)

    if args.dataset_name == "mnist":
        in_channels = 1
    else:
        in_channels = 3

    model = ResNetSimCLR(base_model=args.arch,
                         out_dim=args.out_dim,
                         in_channels=in_channels)

    if args.model_path is not None:
        checkpoint = torch.load(args.model_path, map_location=args.device)
        model.load_state_dict(checkpoint['state_dict'])

    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, T_max=len(train_loader), eta_min=0, last_epoch=-1)

    head_dataset = HeadDataset(args.data)
    train_head_dataset = head_dataset.get_dataset(args.dataset_name,
                                                  train=True,
                                                  split="train")
    test_head_dataset = head_dataset.get_dataset(args.dataset_name,
                                                 train=False,
                                                 split="test")

    args.num_classes = head_dataset.get_num_classes(args.dataset_name)

    train_head_loader = torch.utils.data.DataLoader(train_head_dataset,
                                                    batch_size=args.batch_size,
                                                    shuffle=True,
                                                    num_workers=args.workers,
                                                    pin_memory=True,
                                                    drop_last=True)

    test_head_loader = torch.utils.data.DataLoader(test_head_dataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=args.workers,
                                                   pin_memory=True,
                                                   drop_last=True)

    #  It’s a no-op if the 'gpu_index' argument is a negative integer or None.
    with torch.cuda.device(args.gpu_index):
        if not (args.head_only or args.tsne_only):
            simclr = SimCLR(model=model,
                            optimizer=optimizer,
                            scheduler=scheduler,
                            args=args)
            model = simclr.train(train_loader)
        model.eval()
        if not args.tsne_only:
            headsimclr = SimCLRHead(model=model, args=args)
            headsimclr.train(train_head_loader, test_head_loader)

        tsne_plot = TSNE_project(model, test_head_loader, args)