Ejemplo n.º 1
0
def create_model(n_predicted_features, opt):
    # create model
    if len(opt.patch_radius) == 1:
        pcpnet = ResPCPNet(num_points=opt.points_per_patch,
                           output_dim=n_predicted_features,
                           use_point_stn=opt.use_point_stn,
                           use_feat_stn=opt.use_feat_stn,
                           sym_op=opt.sym_op,
                           point_tuple=opt.point_tuple)
    else:
        pcpnet = ResMSPCPNet(num_scales=len(opt.patch_radius),
                             num_points=opt.points_per_patch,
                             output_dim=n_predicted_features,
                             use_point_stn=opt.use_point_stn,
                             use_feat_stn=opt.use_feat_stn,
                             sym_op=opt.sym_op,
                             point_tuple=opt.point_tuple)
    return pcpnet
Ejemplo n.º 2
0
def train_pcpnet(opt):
    def green(x):
        return '\033[92m' + x + '\033[0m'

    def blue(x):
        return '\033[94m' + x + '\033[0m'

    log_dirname = os.path.join(opt.logdir, opt.name)
    params_filename = os.path.join(opt.outdir, '%s_params.pth' % (opt.name))
    model_filename = os.path.join(opt.outdir, '%s_model.pth' % (opt.name))
    desc_filename = os.path.join(opt.outdir, '%s_description.txt' % (opt.name))

    check_path_existance(log_dirname, model_filename, opt)

    target_features, output_target_ind, output_pred_ind, output_loss_weight, n_predicted_features = get_output_format(
        opt)

    pcpnet = ResPCPNet(num_points=opt.points_per_patch,
                       output_dim=n_predicted_features,
                       use_point_stn=opt.use_point_stn,
                       use_feat_stn=opt.use_feat_stn,
                       sym_op=opt.sym_op,
                       point_tuple=opt.point_tuple)

    if opt.refine != '':
        pcpnet.load_state_dict(torch.load(opt.refine))

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)
    print("Random Seed: %d" % (opt.seed))
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)
    # create train and test dataset loaders
    train_dataloader, train_datasampler, train_dataset = get_data(
        target_features, opt, train=True)
    test_dataloader, test_datasampler, test_dataset = get_data(target_features,
                                                               opt,
                                                               train=False)
    # keep the exact training shape names for later reference
    opt.train_shapes = train_dataset.shape_names
    opt.test_shapes = test_dataset.shape_names

    print(
        'training set: %d patches (in %d batches) - test set: %d patches (in %d batches)'
        % (len(train_datasampler), len(train_dataloader),
           len(test_datasampler), len(test_dataloader)))

    try:
        os.makedirs(opt.outdir)
    except OSError:
        pass

    train_writer = SummaryWriter(os.path.join(log_dirname, 'train'))
    test_writer = SummaryWriter(os.path.join(log_dirname, 'test'))

    optimizer = optim.SGD(pcpnet.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum)
    # milestones in number of optimizer iterations
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[], gamma=0.1)
    pcpnet.cuda()

    total_train_batches = len(train_dataloader)
    total_test_batches = len(test_dataloader)

    # save parameters
    torch.save(opt, params_filename)

    # save description
    with open(desc_filename, 'w+') as text_file:
        print(opt.desc, file=text_file)

    criterion = torch.nn.L1Loss()
    for epoch in range(opt.nepoch):
        current_train_batch_index = -1
        train_completion = 0.0
        train_batches = enumerate(train_dataloader, 0)
        current_test_batch_index = -1
        test_completion = 0.0
        test_batches = enumerate(test_dataloader, 0)
        for current_train_batch_index, data in train_batches:
            # update learning rate
            scheduler.step(epoch * total_train_batches +
                           current_train_batch_index)
            # set to training mode
            pcpnet.train()

            # get trainingset batch, convert to variables and upload to GPU
            points = data[0]
            target = data[1:-1]
            points = Variable(points)
            points = points.transpose(2, 1)
            points = points.cuda()

            target = tuple(Variable(t) for t in target)
            target = tuple(t.cuda() for t in target)

            # zero gradients
            optimizer.zero_grad()

            # forward pass
            pred, trans, _, _ = pcpnet(points)
            loss = compute_loss(pred=pred,
                                target=target,
                                outputs=opt.outputs,
                                output_pred_ind=output_pred_ind,
                                output_target_ind=output_target_ind,
                                output_loss_weight=output_loss_weight,
                                patch_rot=trans if opt.use_point_stn else None,
                                criterion=criterion)

            # backpropagate through entire network to compute gradients of loss w.r.t. parameters
            loss.backward()

            # parameter optimization step
            optimizer.step()

            train_completion = (current_train_batch_index +
                                1) / total_train_batches

            # print info and update log file
            print('[%s %d/%d: %d/%d] %s loss: %f' %
                  (opt.name, epoch, opt.nepoch, current_train_batch_index,
                   total_train_batches - 1, green('train'), loss.item()))
            train_writer.add_scalar('loss', loss.item(),
                                    (epoch + train_completion) *
                                    total_train_batches * opt.batchSize)

            while test_completion <= train_completion and current_test_batch_index + 1 < total_test_batches:

                # set to evaluation mode
                pcpnet.eval()

                current_test_batch_index, data = next(test_batches)

                # get testset batch, convert to variables and upload to GPU
                # volatile means that autograd is turned off for everything that depends on the volatile variable
                # since we dont need autograd for inference (only for training)
                points = data[0]
                target = data[1:-1]

                points = Variable(points, volatile=True)
                points = points.transpose(2, 1)
                points = points.cuda()

                target = tuple(Variable(t, volatile=True) for t in target)
                target = tuple(t.cuda() for t in target)

                # forward pass
                pred, trans, _, _ = pcpnet(points)
                loss = compute_loss(
                    pred=pred,
                    target=target,
                    outputs=opt.outputs,
                    output_pred_ind=output_pred_ind,
                    output_target_ind=output_target_ind,
                    output_loss_weight=output_loss_weight,
                    patch_rot=trans if opt.use_point_stn else None,
                    criterion=criterion)

                test_completion = (current_test_batch_index +
                                   1) / total_test_batches

                # print info and update log file
                print('[%s %d: %d/%d] %s loss: %f' %
                      (opt.name, epoch, current_train_batch_index,
                       total_train_batches - 1, blue('test'), loss.item()))
                # print('min normal len: %f' % (pred.data.norm(2,1).min()))
                test_writer.add_scalar('loss', loss.item(),
                                       (epoch + test_completion) *
                                       total_train_batches * opt.batchSize)
        # save model, overwriting the old model
        if epoch % opt.saveinterval == 0 or epoch == opt.nepoch - 1:
            torch.save(pcpnet.state_dict(), model_filename)

        # save model in a separate file in epochs 0,5,10,50,100,500,1000, ...
        if epoch % (5 * 10**math.floor(math.log10(max(2, epoch - 1)))
                    ) == 0 or epoch % 100 == 0 or epoch == opt.nepoch - 1:
            torch.save(
                pcpnet.state_dict(),
                os.path.join(opt.outdir,
                             '%s_model_%d.pth' % (opt.name, epoch)))
Ejemplo n.º 3
0
def eval_pcpnet(opt):
    # get a list of model names
    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    print("Random Seed: %d" % (opt.seed))
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    model_filename = os.path.join(opt.modeldir, opt.model + "_model.pth")
    param_filename = os.path.join(opt.modeldir, opt.model + opt.parmpostfix)

    # load model and training parameters
    trainopt = torch.load(param_filename)
    trainopt.outputs = ['outliers']
    if opt.batchSize == 0:
        model_batchSize = trainopt.batchSize
    else:
        model_batchSize = opt.batchSize
    # get indices in targets and predictions corresponding to each output
    pred_dim = 0
    output_pred_ind = []
    for o in trainopt.outputs:
        if o in ['outliers']:
            output_pred_ind.append(pred_dim)
            pred_dim += 1
        else:
            raise ValueError('Unknown output: %s' % (o))

    patch_features = ['original']
    if OUTLIERS:
        patch_features.append('outliers')
    dataset = PointcloudPatchDataset(
        root=opt.indir,
        shapes_list_file=opt.dataset,
        patch_radius=trainopt.patch_radius,
        points_per_patch=trainopt.points_per_patch,
        patch_features=patch_features,
        seed=opt.seed,
        use_pca=trainopt.use_pca,
        center=trainopt.patch_center,
        point_tuple=trainopt.point_tuple,
        sparse_patches=opt.sparse_patches,
        cache_capacity=opt.cache_capacity)
    if opt.sampling == 'full':
        datasampler = SequentialPointcloudPatchSampler(dataset)
    elif opt.sampling == 'sequential_shapes_random_patches':
        datasampler = SequentialShapeRandomPointcloudPatchSampler(
            dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            sequential_shapes=True,
            identical_epochs=False)
    else:
        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             sampler=datasampler,
                                             batch_size=model_batchSize,
                                             num_workers=int(opt.workers))

    regressor = ResPCPNet(num_points=trainopt.points_per_patch,
                          output_dim=pred_dim,
                          use_point_stn=trainopt.use_point_stn,
                          use_feat_stn=trainopt.use_feat_stn,
                          sym_op=trainopt.sym_op,
                          point_tuple=trainopt.point_tuple)
    regressor.load_state_dict(torch.load(model_filename))
    regressor.cuda()
    shape_ind = 0
    shape_patch_offset = 0
    if opt.sampling == 'full':
        shape_patch_count = dataset.shape_patch_count[shape_ind]
    elif opt.sampling == 'sequential_shapes_random_patches':
        shape_patch_count = min(opt.patches_per_shape,
                                dataset.shape_patch_count[shape_ind])
    else:
        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
    shape_properties = torch.FloatTensor(shape_patch_count, 4).zero_()

    # append model name to output directory and create directory if necessary
    model_outdir = os.path.join(opt.outdir, opt.model)
    if not os.path.exists(model_outdir):
        os.makedirs(model_outdir)

    num_batch = len(dataloader)
    batch_enum = enumerate(dataloader, 0)
    for batchind, data in batch_enum:
        regressor.eval()
        # get batch, convert to variables and upload to GPU
        if OUTLIERS:
            points, originals, patch_radiuses, outliers, data_trans = data
        else:
            points, originals, patch_radiuses, data_trans = data
        points = Variable(points, volatile=True)
        points = points.transpose(2, 1)
        points = points.cuda()
        data_trans = data_trans.cuda()
        pred, trans, _, _ = regressor(points)
        # don't need to work with autograd variables anymore
        pred = pred.data
        if trans is not None:
            trans = trans.data
        # post-processing of the prediction
        for oi, o in enumerate(trainopt.outputs):
            if o == 'unoriented_normals' or o == 'oriented_normals':
                o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 3]

                if trainopt.use_point_stn:
                    # transform predictions with inverse transform
                    # since we know the transform to be a rotation (QSTN), the transpose is the inverse
                    o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                             trans.transpose(2, 1)).squeeze(1)

                if trainopt.use_pca:
                    # transform predictions with inverse pca rotation (back to world space)
                    o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                             data_trans.transpose(
                                                 2, 1)).squeeze(1)

                # normalize normals
                o_pred_len = torch.max(
                    torch.cuda.FloatTensor([sys.float_info.epsilon * 100]),
                    o_pred.norm(p=2, dim=1, keepdim=True))
                o_pred = o_pred / o_pred_len
                pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 3] = o_pred
            elif o in ['clean_points']:

                o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 3]
                if trainopt.use_point_stn:
                    # transform predictions with inverse transform
                    # since we know the transform to be a rotation (QSTN), the transpose is the inverse
                    o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                             trans.transpose(2, 1)).squeeze(1)

                if trainopt.use_pca:
                    # transform predictions with inverse pca rotation (back to world space)
                    o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                             data_trans.transpose(
                                                 2, 1)).squeeze(1)
                n_points = patch_radiuses.shape[0]
                o_pred = torch.mul(
                    o_pred,
                    torch.t(patch_radiuses.expand(
                        3, n_points)).float().cuda()) + originals.cuda()
                pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 3] = o_pred
            elif o in ['outliers']:
                # TODO check dimensions here
                o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 1]
                outliers_value = o_pred.cpu()
                is_outlier = torch.ByteTensor(
                    [1 if x > 0.5 else 0 for x in o_pred])
                o_pred = originals.cuda()

                o_pred = torch.cat((o_pred, outliers_value.cuda()), 1)
                pred = o_pred

            elif o == 'max_curvature' or o == 'min_curvature':
                o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 1]

                # undo patch size normalization:
                o_pred[:, :] = o_pred / dataset.patch_radius_absolute[
                    shape_ind][0]

            else:
                raise ValueError('Unsupported output type: %s' % (o))
        print('[%s %d/%d] shape %s' % (opt.model, batchind, num_batch - 1,
                                       dataset.shape_names[shape_ind]))

        batch_offset = 0
        while batch_offset < pred.size(0):

            shape_patches_remaining = shape_patch_count - shape_patch_offset
            batch_patches_remaining = pred.size(0) - batch_offset
            # append estimated patch properties batch to properties for the current shape on the CPU
            shape_properties[shape_patch_offset:shape_patch_offset + min(
                shape_patches_remaining, batch_patches_remaining), :] = pred[
                    batch_offset:batch_offset +
                    min(shape_patches_remaining, batch_patches_remaining), :]

            batch_offset = batch_offset + min(shape_patches_remaining,
                                              batch_patches_remaining)
            shape_patch_offset = shape_patch_offset + min(
                shape_patches_remaining, batch_patches_remaining)
            if shape_patches_remaining <= batch_patches_remaining:

                # save shape properties to disk
                prop_saved = [False] * len(trainopt.outputs)

                # save normals
                oi = [
                    i for i, o in enumerate(trainopt.outputs)
                    if o in ['unoriented_normals', 'oriented_normals']
                ]
                if len(oi) > 1:
                    raise ValueError('Duplicate normal output.')
                elif len(oi) == 1:
                    oi = oi[0]
                    normal_prop = shape_properties[:, output_pred_ind[oi]:
                                                   output_pred_ind[oi] + 3]
                    np.savetxt(
                        os.path.join(
                            model_outdir,
                            dataset.shape_names[shape_ind] + '.normals'),
                        normal_prop.numpy())
                    prop_saved[oi] = True

                # save clean points
                oi = [
                    i for i, o in enumerate(trainopt.outputs)
                    if o in ['clean_points']
                ]
                if len(oi) > 1:
                    raise ValueError('Duplicate point output.')
                elif len(oi) == 1:
                    oi = oi[0]
                    normal_prop = shape_properties[:, output_pred_ind[oi]:
                                                   output_pred_ind[oi] + 3]
                    np.savetxt(
                        os.path.join(model_outdir,
                                     dataset.shape_names[shape_ind] + '.xyz'),
                        normal_prop.numpy())
                    prop_saved[oi] = True

                # save outliers
                oi = [
                    i for i, o in enumerate(trainopt.outputs)
                    if o in ["outliers"]
                ]
                if len(oi) > 1:
                    raise ValueError('Duplicate point output.')
                elif len(oi) == 1:
                    oi = oi[0]
                    outliers_prop = shape_properties[:, output_pred_ind[oi]:
                                                     output_pred_ind[oi] +
                                                     N_OUTPUT]
                    np.savetxt(
                        os.path.join(
                            model_outdir, 'outliers_value' + "_" +
                            dataset.shape_names[shape_ind] + '.info'),
                        outliers_prop.numpy())
                    prop_saved[oi] = True
                # save curvatures
                oi1 = [
                    i for i, o in enumerate(trainopt.outputs)
                    if o == 'max_curvature'
                ]
                oi2 = [
                    i for i, o in enumerate(trainopt.outputs)
                    if o == 'min_curvature'
                ]
                if len(oi1) > 1 or len(oi2) > 1:
                    raise ValueError(
                        'Duplicate minimum or maximum curvature output.')
                elif len(oi1) == 1 or len(oi2) == 1:
                    curv_prop = torch.FloatTensor(shape_properties.size(0),
                                                  2).zero_()
                    if len(oi1) == 1:
                        oi1 = oi1[0]
                        curv_prop[:,
                                  0] = shape_properties[:,
                                                        output_pred_ind[oi1]:
                                                        output_pred_ind[oi1] +
                                                        1]
                        prop_saved[oi1] = True
                    if len(oi2) == 1:
                        oi2 = oi2[0]
                        curv_prop[:,
                                  1] = shape_properties[:,
                                                        output_pred_ind[oi2]:
                                                        output_pred_ind[oi2] +
                                                        1]
                        prop_saved[oi2] = True
                    np.savetxt(
                        os.path.join(model_outdir,
                                     dataset.shape_names[shape_ind] + '.curv'),
                        curv_prop.numpy())

                if not all(prop_saved):
                    raise ValueError(
                        'Not all shape properties were saved, some of them seem to be unsupported.'
                    )

                # save point indices
                if opt.sampling != 'full':
                    np.savetxt(os.path.join(
                        model_outdir, dataset.shape_names[shape_ind] + '.idx'),
                               datasampler.shape_patch_inds[shape_ind],
                               fmt='%d')

                # start new shape
                if shape_ind + 1 < len(dataset.shape_names):
                    shape_patch_offset = 0
                    shape_ind = shape_ind + 1
                    if opt.sampling == 'full':
                        shape_patch_count = dataset.shape_patch_count[
                            shape_ind]
                    elif opt.sampling == 'sequential_shapes_random_patches':
                        shape_patch_count = len(
                            datasampler.shape_patch_inds[shape_ind])
                    else:
                        raise ValueError('Unknown sampling strategy: %s' %
                                         opt.sampling)
                    shape_properties = torch.FloatTensor(
                        shape_patch_count, N_OUTPUT).zero_()
Ejemplo n.º 4
0
def eval_pcpnet(opt):
    # get a list of model names
    model_name = opt.model
    print("Random Seed: %d" % (opt.seed))
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    model_filename = os.path.join(opt.modeldir, opt.model + "_model.pth")
    param_filename = os.path.join(opt.modeldir, opt.model + opt.parmpostfix)

    # load model and training parameters
    trainopt = torch.load(param_filename)
    trainopt.outputs = ['clean_points']
    if opt.batchSize == 0:
        model_batchSize = trainopt.batchSize
    else:
        model_batchSize = opt.batchSize
    # get indices in targets and predictions corresponding to each output
    pred_dim = 0
    output_pred_ind = []
    for o in trainopt.outputs:
        if o in ['clean_points']:
            output_pred_ind.append(pred_dim)
            pred_dim += 3
        else:
            raise ValueError('Unknown output: %s' % (o))
    dataset = PointcloudPatchDataset(
        root=opt.outdir,
        shapes_list_file=opt.dataset,
        patch_radius=trainopt.patch_radius,
        points_per_patch=trainopt.points_per_patch,
        patch_features=['original'],
        seed=opt.seed,
        use_pca=trainopt.use_pca,
        center=trainopt.patch_center,
        point_tuple=trainopt.point_tuple,
        sparse_patches=opt.sparse_patches,
        cache_capacity=opt.cache_capacity,
        shape_names=[opt.shapename.format(i=opt.nrun - 1)])
    if opt.sampling == 'full':
        datasampler = SequentialPointcloudPatchSampler(dataset)
    elif opt.sampling == 'sequential_shapes_random_patches':
        datasampler = SequentialShapeRandomPointcloudPatchSampler(
            dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            sequential_shapes=True,
            identical_epochs=False)
    else:
        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             sampler=datasampler,
                                             batch_size=model_batchSize,
                                             num_workers=int(opt.workers))

    regressor = ResPCPNet(num_points=trainopt.points_per_patch,
                          output_dim=pred_dim,
                          use_point_stn=trainopt.use_point_stn,
                          use_feat_stn=trainopt.use_feat_stn,
                          sym_op=trainopt.sym_op,
                          point_tuple=trainopt.point_tuple)
    regressor.load_state_dict(torch.load(model_filename))
    regressor.cuda()

    shape_ind = 0
    shape_patch_offset = 0
    if opt.sampling == 'full':
        shape_patch_count = dataset.shape_patch_count[shape_ind]
    elif opt.sampling == 'sequential_shapes_random_patches':
        shape_patch_count = min(opt.patches_per_shape,
                                dataset.shape_patch_count[shape_ind])
    else:
        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
    shape_properties = torch.FloatTensor(shape_patch_count, pred_dim).zero_()

    # append model name to output directory and create directory if necessary
    model_outdir = os.path.join(opt.outdir, model_name)
    if not os.path.exists(model_outdir):
        os.makedirs(model_outdir)

    num_batch = len(dataloader)
    batch_enum = enumerate(dataloader, 0)

    regressor.eval()
    for batchind, data in batch_enum:

        # get batch, convert to variables and upload to GPU
        points, originals, patch_radiuses, data_trans = data
        points = Variable(points, volatile=True)
        points = points.transpose(2, 1)
        points = points.cuda()

        data_trans = data_trans.cuda()
        pred, trans, _, _ = regressor(points)
        pred = pred.data
        if trans is not None:
            trans = trans.data

        # post-processing of the prediction
        for oi, o in enumerate(trainopt.outputs):
            o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 3]
            if trainopt.use_point_stn:
                # transform predictions with inverse transform
                # since we know the transform to be a rotation (QSTN), the transpose is the inverse
                o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                         trans.transpose(2, 1)).squeeze(1)
            if trainopt.use_pca:
                # transform predictions with inverse pca rotation (back to world space)
                o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                         data_trans.transpose(2, 1)).squeeze(1)
            n_points = patch_radiuses.shape[0]
            # new coordinates are : old coordiantes + displacement vector
            o_pred = torch.mul(
                o_pred,
                torch.t(patch_radiuses.expand(
                    3, n_points)).float().cuda()) + originals.cuda()
            pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 3] = o_pred

        print('[%s %d/%d] shape %s' % (model_name, batchind, num_batch - 1,
                                       dataset.shape_names[shape_ind]))

        batch_offset = 0
        while batch_offset < pred.size(0):

            shape_patches_remaining = shape_patch_count - shape_patch_offset
            batch_patches_remaining = pred.size(0) - batch_offset
            # append estimated patch properties batch to properties for the current shape on the CPU
            shape_properties[shape_patch_offset:shape_patch_offset + min(
                shape_patches_remaining, batch_patches_remaining), :] = pred[
                    batch_offset:batch_offset +
                    min(shape_patches_remaining, batch_patches_remaining), :]

            batch_offset = batch_offset + min(shape_patches_remaining,
                                              batch_patches_remaining)
            shape_patch_offset = shape_patch_offset + min(
                shape_patches_remaining, batch_patches_remaining)
            if shape_patches_remaining <= batch_patches_remaining:

                # save shape properties to disk
                prop_saved = [False] * len(trainopt.outputs)

                # save clean points
                oi = [
                    k for k, o in enumerate(trainopt.outputs)
                    if o in ['clean_points']
                ]
                if len(oi) > 1:
                    raise ValueError('Duplicate point output.')
                elif len(oi) == 1:
                    oi = oi[0]
                    normal_prop = shape_properties[:, output_pred_ind[oi]:
                                                   output_pred_ind[oi] + 3]
                    # Compute mean displacements, inspired from Taubin smoothing
                    normal_prop = get_meaned_displacements(
                        dataset, normal_prop, opt.n_neighbours)
                    np.savetxt(
                        os.path.join(opt.outdir,
                                     opt.shapename.format(i=opt.nrun) +
                                     '.xyz'), normal_prop.numpy())
                    prop_saved[oi] = True

                if not all(prop_saved):
                    raise ValueError(
                        'Not all shape properties were saved, some of them seem to be unsupported.'
                    )
                # start new shape
                if shape_ind + 1 < len(dataset.shape_names):
                    shape_patch_offset = 0
                    shape_ind = shape_ind + 1
                    if opt.sampling == 'full':
                        shape_patch_count = dataset.shape_patch_count[
                            shape_ind]
                    elif opt.sampling == 'sequential_shapes_random_patches':
                        # shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
                        shape_patch_count = len(
                            datasampler.shape_patch_inds[shape_ind])
                    else:
                        raise ValueError('Unknown sampling strategy: %s' %
                                         opt.sampling)
                    shape_properties = torch.FloatTensor(
                        shape_patch_count, pred_dim).zero_()