Пример #1
0
    if args.use_mse_loss:
        print('Using MSE loss...')
        loss = nn.MSELoss()
    else:
        print('Using grid loss...')
        loss = TransformedGridLoss(use_cuda=use_cuda,
                                   geometric_model=args.geometric_model)

    # Dataset and dataloader
    dataset = SynthDataset(geometric_model=args.geometric_model,
                           transform=NormalizeImageDict(['image']),
                           dataset_csv_file='train.csv',
                           **arg_groups['dataset'])

    dataloader = DataLoader(
        dataset, batch_size=args.batch_size, shuffle=True,
        num_workers=4)  # don't change num_workers, as they copy the rnd seed

    dataset_test = SynthDataset(geometric_model=args.geometric_model,
                                transform=NormalizeImageDict(['image']),
                                dataset_csv_file='test.csv',
                                **arg_groups['dataset'])

    dataloader_test = DataLoader(dataset_test,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=4)

    cnn_image_size = (args.image_size, args.image_size)

    pair_generation_tnf = SynthPairTnf(geometric_model=args.geometric_model,
Пример #2
0
# Download validation dataset if needed
if args.eval_dataset_path == '' and args.eval_dataset == 'pf-pascal':
    args.eval_dataset_path = 'datasets/proposal-flow-pascal/'
if args.eval_dataset == 'pf-pascal' and not exists(args.eval_dataset_path):
    download_PF_pascal(args.eval_dataset_path)

batch_tnf = BatchTensorToVars(use_cuda=use_cuda)

dataset_test = PFPascalDataset(
    csv_file=os.path.join(args.eval_dataset_path, 'test_pairs_pf_pascal.csv'),
    dataset_path=args.eval_dataset_path,
    transform=NormalizeImageDict(['source_image', 'target_image']))

dataloader_test = DataLoader(dataset_test,
                             batch_size=compute_metric_batch,
                             shuffle=False,
                             num_workers=4)

model.load_state_dict(torch.load("./trained_models/jim_net.pth.tar"))
model.eval()

print('Final test pck...')
compute_metric('pck', model.Alignment_Module, dataset_test, dataloader_test,
               batch_tnf, compute_metric_batch, True, True, True, args)

true_labels = []
predicted_labels = []

saved_image_clusters = {}

Пример #3
0
# filter training categories
if args.categories != 0:
    keep = np.zeros((len(dataset.set), 1))
    for i in range(len(dataset.set)):
        keep[i] = np.sum(dataset.set[i] == args.categories)
    keep_idx = np.nonzero(keep)[0]
    dataset.set = dataset.set[keep_idx]
    dataset.img_A_names = dataset.img_A_names[keep_idx]
    dataset.img_B_names = dataset.img_B_names[keep_idx]

batch_tnf = BatchTensorToVars(use_cuda=use_cuda)

# dataloader
dataloader = DataLoader(dataset,
                        batch_size=args.batch_size,
                        shuffle=True,
                        num_workers=4)
dataloader_eval = DataLoader(dataset_eval,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=4)

checkpoint_name = os.path.join(
    args.result_model_dir,
    'inlier_{}_consis_{}_trans_{}.pth.tar'.format(args.w_inlier, args.w_consis,
                                                  args.w_trans))


# define epoch function
def process_epoch(mode,
                  epoch,
Пример #4
0
    def process_epoch(mode,
                      epoch,
                      model,
                      loss_fn,
                      optimizer,
                      dataloader,
                      batch_preprocessing_fn,
                      use_cuda=True,
                      log_interval=50):
        for param_group in warm_optimizer.param_groups:
            if epoch > 1:
                param_group['lr'] = args.classifier_lr
            else:
                param_group['lr'] = 0.

        for i, param_group in enumerate(optimizer.param_groups):
            if epoch > 1:
                if i == 0:
                    param_group['lr'] = args.align_lr
                elif i <= 3:
                    param_group['lr'] = args.classifier_lr
                else:
                    raise NotImplementedError
            else:
                param_group['lr'] = 0.

        print_time("Started Feature Extraction Step: " + str(epoch))
        old_clusters = cluster_class.clusters
        #cluster step
        cluster_features = []
        images = []
        true_labels = []
        with torch.no_grad():
            for batch_idx, batch in enumerate(dataloader):
                tnf_batch = batch_preprocessing_fn(batch)

                _, _, _, _, _, cluster_feature = model(tnf_batch)

                cluster_features.append(cluster_feature.cpu().detach().numpy())
                images.append(tnf_batch['source_image'].cpu().detach().numpy())

                #TODO make this an optional flag
                #only used for computing metrics
                true_labels.append(tnf_batch['set'].cpu().detach().numpy())

        with torch.no_grad():
            for batch_idx, batch in enumerate(dataloader):
                tnf_batch = batch_preprocessing_fn(batch)
                _, _, _, _, _, cluster_feature = model(tnf_batch)

                cluster_features.append(cluster_feature.cpu().detach().numpy())
                images.append(tnf_batch['target_image'].cpu().detach().numpy())

                #TODO make this an optional flag
                #only used for computing metrics
                true_labels.append(tnf_batch['set'].cpu().detach().numpy())

        cluster_features = np.concatenate(cluster_features)
        images = np.concatenate(images)
        true_labels = np.concatenate(true_labels)

        print_time("Started Clustering: " + str(epoch))

        tsne = TSNE(n_components=3, n_jobs=40, init='pca', perplexity=25)
        features_tsne = tsne.fit_transform(cluster_features)

        gmm = GaussianMixture(n_components=num_clusters,
                              covariance_type='full')
        gmm.fit(features_tsne)
        clusters = gmm.predict(features_tsne)

        if len(old_clusters) != 0:
            aligned_clusters = align_cluster_index(old_clusters, clusters)
            clusters = aligned_clusters
        cluster_class.clusters = clusters

        probabilities = one_hot(clusters, num_clusters)

        print_time("Started Warm Start: " + str(epoch))
        warm_start_tensor = TensorDataset(torch.Tensor(cluster_features),
                                          torch.Tensor(probabilities))
        warm_start_loader = DataLoader(warm_start_tensor,
                                       batch_size=args.batch_size,
                                       shuffle=True)

        loss_warm_start = 0.
        for i in range(args.warmup_epochs):
            for batch_idx, (batch, label) in enumerate(warm_start_loader):
                if mode == 'train':
                    optimizer.zero_grad()
                _, loss = model.classify(batch.cuda(),
                                         probabilities=label.cuda())
                loss_warm_start += loss.data.cpu().numpy()
                if mode == 'train':
                    loss.backward()
                    warm_optimizer.step()
        print_time("Warm start loss: " +
                   str(loss_warm_start /
                       (args.warmup_epochs * len(warm_start_loader) + eps)))

        print_time("Started Alignment: " + str(epoch))
        #alignment step
        unsupervised_dataset = UnsupervisedDataset(images, cluster_features,
                                                   clusters, probabilities,
                                                   num_clusters)
        unsupervised_dataloader = DataLoader(unsupervised_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=True)
        epoch_loss = 0
        for batch_idx, batch in enumerate(unsupervised_dataloader):
            if mode == 'train':
                optimizer.zero_grad()
            tnf_batch = batch_preprocessing_fn(batch)
            loss = loss_fn(tnf_batch)
            loss_np = loss.data.cpu().numpy()
            epoch_loss += loss_np
            if mode == 'train':
                loss.backward()
                optimizer.step()
            else:
                loss = None
            if batch_idx % log_interval == 0:
                print(
                    mode.capitalize() +
                    ' Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.6f}'.format(
                        epoch, batch_idx, len(unsupervised_dataloader), 100. *
                        batch_idx / len(unsupervised_dataloader), loss_np))

        epoch_loss /= len(unsupervised_dataloader)
        print_time(mode.capitalize() +
                   ' set: Average loss: {:.4f}'.format(epoch_loss))

        #evaluate classifier
        cluster_accuracy_loader = DataLoader(torch.Tensor(images),
                                             batch_size=args.batch_size,
                                             shuffle=False)
        predicted_cluster = []

        with torch.no_grad():
            for batch_idx, batch in enumerate(cluster_accuracy_loader):
                preds = model.classify(batch.cuda(), processed=False)
                predicted_cluster.append(preds.cpu().detach().numpy())

        pred_clusters = np.concatenate(predicted_cluster).reshape(
            images.shape[0], num_clusters)
        pred_clusters = np.argmax(pred_clusters, axis=1)

        print_time(
            "V measure predictions: " +
            str(sklearn.metrics.v_measure_score(true_labels, pred_clusters)))
        print_time("V measure clusters: " +
                   str(sklearn.metrics.v_measure_score(true_labels, clusters)))

        try:
            aligned = align_cluster_index(true_labels - 1, pred_clusters)
            equality = np.equal(aligned, true_labels - 1)
            aligned_with_true = equality.sum() / len(true_labels)

            clust_acc1.append((epoch, aligned_with_true))
            print_time("Accuracy Predictions: " + str(aligned_with_true))
        except:
            print_time("Failed to align predictions with labels")

        try:
            aligned = align_cluster_index(true_labels - 1, clusters)
            equality = np.equal(aligned, true_labels - 1)
            aligned_with_true = equality.sum() / len(true_labels)

            clust_acc2.append((epoch, aligned_with_true))
            print_time("Accuracy Clustering: " + str(aligned_with_true))
        except:
            print_time("Failed to align clusters with labels")

        return epoch_loss