Пример #1
0
def eval_pcpnet(opt):
    model_filename = opt.model_filename
    losses_filename = opt.model_filename + ".eval_data"

    target_features = []
    output_target_ind = []
    output_pred_ind = []
    output_loss_weight = []
    pred_dim = 0

    for o in opt.outputs:
        if o == 'oriented_normals' or o == 'unoriented_normals':
            if 'normal' not in target_features:
                target_features.append('normal')

            output_target_ind.append(target_features.index('normal'))
            output_pred_ind.append(pred_dim)
            output_loss_weight.append(1.0)
            pred_dim += 3
        else:
            raise ValueError('Unknown output: %s' % (o))

    if pred_dim <= 0:
        raise ValueError('Prediction is empty for the given outputs.')

    # create model
    if len(opt.patch_radius) == 1:
        pcpnet = PCPNet(num_points=opt.points_per_patch,
                        output_dim=pred_dim,
                        use_point_stn=opt.use_point_stn,
                        use_feat_stn=opt.use_feat_stn,
                        sym_op=opt.sym_op,
                        point_tuple=opt.point_tuple)
    else:
        assert False, "Sebastian only supports patch_radius size 1"

    if os.path.exists(model_filename):
        pcpnet.load_state_dict(torch.load(model_filename))
    else:
        raise ValueError("No model to load")

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    print("Random Seed: %d" % (opt.seed))
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    # create train and test dataset loaders
    train_dataset = SebastianPatchDataset(
        root=opt.datadir,
        shape_list_filename="",
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        patch_features=target_features,
        point_count_std=opt.patch_point_count_std,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        use_pca=opt.use_pca,
        center=opt.patch_center,
        point_tuple=opt.point_tuple,
        cache_capacity=opt.cache_capacity,
        output_eval_data=True)

    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   shuffle=False,
                                                   batch_size=opt.batchSize,
                                                   num_workers=int(
                                                       opt.workers))

    # keep the exact training shape names for later reference
    opt.train_shapes = train_dataset.shape_names

    print('evaluation dataset: %d patches (in %d batches)' %
          (len(train_dataset), len(train_dataloader)))

    pcpnet.cuda()

    train_enum = enumerate(train_dataloader, 0)

    prediction_data = []

    for train_batchind, data in train_enum:

        # set to evaluation mode
        pcpnet.eval()

        # get testset batch, convert to variables and upload to GPU
        # volatile means that autograd is turned off for everything that depends on the volatile variable
        # since we dont need autograd for inference (only for training)
        points = data[0]
        target = data[1:-3]
        filenames = data[-2]
        point_indexes = data[-1]

        points = Variable(points, volatile=True)
        points = points.transpose(2, 1)
        points = points.cuda()

        target = tuple(Variable(t, volatile=True) for t in target)
        target = tuple(t.cuda() for t in target)

        # forward pass
        pred, trans, _, _ = pcpnet(points)

        losses = compute_loss(pred=pred,
                              target=target,
                              outputs=opt.outputs,
                              output_pred_ind=output_pred_ind,
                              output_target_ind=output_target_ind,
                              output_loss_weight=output_loss_weight,
                              patch_rot=trans if opt.use_point_stn else None,
                              normal_loss=opt.normal_loss)

        o_pred = torch.bmm(pred.unsqueeze(1), trans.transpose(2, 1)).squeeze(1)

        for i in range(o_pred.shape[0]):
            target_i = target[0][i, :].data.cpu().numpy()
            predicted_i = o_pred[i, :].data.cpu().numpy()
            target_i = target_i / np.linalg.norm(target_i)
            predicted_i = predicted_i / np.linalg.norm(predicted_i)

            my_loss = float(1.0 - np.abs(np.dot(target_i, predicted_i)))**2
            their_loss = float(losses[i].data.cpu())

            # print("filename: %s" % filenames[i])
            # print("  expected: %s" % str(target_i))
            # print("  predicted %s" % str(predicted_i))
            # print("  computed_loss1 %f" % their_loss)
            # print("  computed_loss2 %f" % my_loss)
            infodict = {
                'filename': filenames[i],
                'expected_normal': target_i,
                'predicted_normal': predicted_i,
                'one_minus_cos_loss': float(losses[i].data.cpu().numpy()),
                'ctr_idx': point_indexes[i],
            }
            prediction_data.append(infodict)

    torch.save(prediction_data, losses_filename)
Пример #2
0
def eval_pcpnet(opt):

    opt.models = opt.models.split()

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    for model_name in opt.models:

        print("Random Seed: %d" % (opt.seed))
        random.seed(opt.seed)
        torch.manual_seed(opt.seed)

        model_filename = os.path.join(opt.modeldir,
                                      model_name + opt.modelpostfix)
        param_filename = os.path.join(opt.modeldir,
                                      model_name + opt.parmpostfix)

        # load model and training parameters
        trainopt = torch.load(param_filename)

        if opt.batchSize == 0:
            model_batchSize = trainopt.batchSize
        else:
            model_batchSize = opt.batchSize

        # get indices in targets and predictions corresponding to each output
        pred_dim = 0
        output_pred_ind = []
        for o in trainopt.outputs:
            if o == 'unoriented_normals' or o == 'oriented_normals':
                output_pred_ind.append(pred_dim)
                pred_dim += 3
            elif o == 'max_curvature' or o == 'min_curvature':
                output_pred_ind.append(pred_dim)
                pred_dim += 1
            else:
                raise ValueError('Unknown output: %s' % (o))

        dataset = PointcloudPatchDataset(
            root=opt.indir,
            shape_list_filename=opt.dataset,
            patch_radius=trainopt.patch_radius,
            points_per_patch=trainopt.points_per_patch,
            patch_features=[],
            seed=opt.seed,
            use_pca=trainopt.use_pca,
            center=trainopt.patch_center,
            point_tuple=trainopt.point_tuple,
            sparse_patches=opt.sparse_patches,
            cache_capacity=opt.cache_capacity)
        if opt.sampling == 'full':
            datasampler = SequentialPointcloudPatchSampler(dataset)
        elif opt.sampling == 'sequential_shapes_random_patches':
            datasampler = SequentialShapeRandomPointcloudPatchSampler(
                dataset,
                patches_per_shape=opt.patches_per_shape,
                seed=opt.seed,
                sequential_shapes=True,
                identical_epochs=False)
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 sampler=datasampler,
                                                 batch_size=model_batchSize,
                                                 num_workers=int(opt.workers))

        if len(trainopt.patch_radius) == 1:
            regressor = PCPNet(num_points=trainopt.points_per_patch,
                               output_dim=pred_dim,
                               use_point_stn=trainopt.use_point_stn,
                               use_feat_stn=trainopt.use_feat_stn,
                               sym_op=trainopt.sym_op,
                               point_tuple=trainopt.point_tuple)
        else:
            regressor = MSPCPNet(num_scales=len(trainopt.patch_radius),
                                 num_points=trainopt.points_per_patch,
                                 output_dim=pred_dim,
                                 use_point_stn=trainopt.use_point_stn,
                                 use_feat_stn=trainopt.use_feat_stn,
                                 sym_op=trainopt.sym_op,
                                 point_tuple=trainopt.point_tuple)

        regressor.load_state_dict(torch.load(model_filename))
        regressor.cuda()
        regressor.eval()

        shape_ind = 0
        shape_patch_offset = 0
        if opt.sampling == 'full':
            shape_patch_count = dataset.shape_patch_count[shape_ind]
        elif opt.sampling == 'sequential_shapes_random_patches':
            shape_patch_count = min(opt.patches_per_shape,
                                    dataset.shape_patch_count[shape_ind])
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        shape_properties = torch.FloatTensor(shape_patch_count,
                                             pred_dim).zero_()

        # append model name to output directory and create directory if necessary
        model_outdir = os.path.join(opt.outdir, model_name)
        if not os.path.exists(model_outdir):
            os.makedirs(model_outdir)

        num_batch = len(dataloader)
        batch_enum = enumerate(dataloader, 0)
        for batchind, data in batch_enum:

            # get batch, convert to variables and upload to GPU
            points, data_trans = data
            points = Variable(points, volatile=True)
            points = points.transpose(2, 1)
            points = points.cuda()

            data_trans = data_trans.cuda()

            pred, trans, _, _ = regressor(points)

            # don't need to work with autograd variables anymore
            pred = pred.data
            if trans is not None:
                trans = trans.data

            # post-processing of the prediction
            for oi, o in enumerate(trainopt.outputs):
                if o == 'unoriented_normals' or o == 'oriented_normals':
                    o_pred = pred[:,
                                  output_pred_ind[oi]:output_pred_ind[oi] + 3]

                    if trainopt.use_point_stn:
                        # transform predictions with inverse transform
                        # since we know the transform to be a rotation (QSTN), the transpose is the inverse
                        o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                                 trans.transpose(2,
                                                                 1)).squeeze(1)

                    if trainopt.use_pca:
                        # transform predictions with inverse pca rotation (back to world space)
                        o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                                 data_trans.transpose(
                                                     2, 1)).squeeze(1)

                    # normalize normals
                    o_pred_len = torch.max(
                        torch.cuda.FloatTensor([sys.float_info.epsilon * 100]),
                        o_pred.norm(p=2, dim=1, keepdim=True))
                    o_pred = o_pred / o_pred_len

                elif o == 'max_curvature' or o == 'min_curvature':
                    o_pred = pred[:,
                                  output_pred_ind[oi]:output_pred_ind[oi] + 1]

                    # undo patch size normalization:
                    o_pred[:, :] = o_pred / dataset.patch_radius_absolute[
                        shape_ind][0]

                else:
                    raise ValueError('Unsupported output type: %s' % (o))

            print('[%s %d/%d] shape %s' % (model_name, batchind, num_batch - 1,
                                           dataset.shape_names[shape_ind]))

            batch_offset = 0
            while batch_offset < pred.size(0):

                shape_patches_remaining = shape_patch_count - shape_patch_offset
                batch_patches_remaining = pred.size(0) - batch_offset

                # append estimated patch properties batch to properties for the current shape on the CPU
                shape_properties[shape_patch_offset:shape_patch_offset + min(
                    shape_patches_remaining, batch_patches_remaining
                ), :] = pred[
                    batch_offset:batch_offset +
                    min(shape_patches_remaining, batch_patches_remaining), :]

                batch_offset = batch_offset + min(shape_patches_remaining,
                                                  batch_patches_remaining)
                shape_patch_offset = shape_patch_offset + min(
                    shape_patches_remaining, batch_patches_remaining)

                if shape_patches_remaining <= batch_patches_remaining:

                    # save shape properties to disk
                    prop_saved = [False] * len(trainopt.outputs)

                    # save normals
                    oi = [
                        i for i, o in enumerate(trainopt.outputs)
                        if o in ['unoriented_normals', 'oriented_normals']
                    ]
                    if len(oi) > 1:
                        raise ValueError('Duplicate normal output.')
                    elif len(oi) == 1:
                        oi = oi[0]
                        normal_prop = shape_properties[:, output_pred_ind[oi]:
                                                       output_pred_ind[oi] + 3]
                        np.savetxt(
                            os.path.join(
                                model_outdir,
                                dataset.shape_names[shape_ind] + '.normals'),
                            normal_prop.numpy())
                        prop_saved[oi] = True

                    # save curvatures
                    oi1 = [
                        i for i, o in enumerate(trainopt.outputs)
                        if o == 'max_curvature'
                    ]
                    oi2 = [
                        i for i, o in enumerate(trainopt.outputs)
                        if o == 'min_curvature'
                    ]
                    if len(oi1) > 1 or len(oi2) > 1:
                        raise ValueError(
                            'Duplicate minimum or maximum curvature output.')
                    elif len(oi1) == 1 or len(oi2) == 1:
                        curv_prop = torch.FloatTensor(shape_properties.size(0),
                                                      2).zero_()
                        if len(oi1) == 1:
                            oi1 = oi1[0]
                            curv_prop[:,
                                      0] = shape_properties[:, output_pred_ind[
                                          oi1]:output_pred_ind[oi1] + 1]
                            prop_saved[oi1] = True
                        if len(oi2) == 1:
                            oi2 = oi2[0]
                            curv_prop[:,
                                      1] = shape_properties[:, output_pred_ind[
                                          oi2]:output_pred_ind[oi2] + 1]
                            prop_saved[oi2] = True
                        np.savetxt(
                            os.path.join(
                                model_outdir,
                                dataset.shape_names[shape_ind] + '.curv'),
                            curv_prop.numpy())

                    if not all(prop_saved):
                        raise ValueError(
                            'Not all shape properties were saved, some of them seem to be unsupported.'
                        )

                    # save point indices
                    if opt.sampling != 'full':
                        np.savetxt(os.path.join(
                            model_outdir,
                            dataset.shape_names[shape_ind] + '.idx'),
                                   datasampler.shape_patch_inds[shape_ind],
                                   fmt='%d')

                    # start new shape
                    if shape_ind + 1 < len(dataset.shape_names):
                        shape_patch_offset = 0
                        shape_ind = shape_ind + 1
                        if opt.sampling == 'full':
                            shape_patch_count = dataset.shape_patch_count[
                                shape_ind]
                        elif opt.sampling == 'sequential_shapes_random_patches':
                            # shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
                            shape_patch_count = len(
                                datasampler.shape_patch_inds[shape_ind])
                        else:
                            raise ValueError('Unknown sampling strategy: %s' %
                                             opt.sampling)
                        shape_properties = torch.FloatTensor(
                            shape_patch_count, pred_dim).zero_()
Пример #3
0
def train_pcpnet(opt):
    print("setting cuda device id to %d" % (opt.cuda_device_id))
    torch.cuda.set_device(opt.cuda_device_id)
    print("torch.cuda.device is %d" % torch.cuda.current_device())

    # colored console output
    green = lambda x: '\033[92m' + x + '\033[0m'
    blue = lambda x: '\033[94m' + x + '\033[0m'

    log_dirname = os.path.join(opt.logdir, opt.name)
    params_filename = os.path.join(opt.outdir, '%s_params.pth' % (opt.name))
    model_filename = os.path.join(opt.outdir, '%s_model.pth' % (opt.name))
    desc_filename = os.path.join(opt.outdir, '%s_description.txt' % (opt.name))
    params_json_filename = os.path.join(opt.outdir,
                                        "%s_params.json" % opt.name)
    with open(params_json_filename, 'w') as f:
        json.dump(vars(opt), f)

    if os.path.exists(log_dirname) or os.path.exists(model_filename):
        response = input(
            'A training run named "%s" already exists, overwrite? (y/n) ' %
            (opt.name))
        if response == 'y':
            if os.path.exists(log_dirname):
                shutil.rmtree(os.path.join(opt.logdir, opt.name))
        else:
            sys.exit()

    # get indices in targets and predictions corresponding to each output
    target_features = []
    output_target_ind = []
    output_pred_ind = []
    output_loss_weight = []
    pred_dim = 0
    for o in opt.outputs:
        if o == 'unoriented_normals' or o == 'oriented_normals':
            if 'normal' not in target_features:
                target_features.append('normal')

            output_target_ind.append(target_features.index('normal'))
            output_pred_ind.append(pred_dim)
            output_loss_weight.append(1.0)
            pred_dim += 3
        elif o == 'max_curvature' or o == 'min_curvature':
            if o not in target_features:
                target_features.append(o)

            output_target_ind.append(target_features.index(o))
            output_pred_ind.append(pred_dim)
            if o == 'max_curvature':
                output_loss_weight.append(0.7)
            else:
                output_loss_weight.append(0.3)
            pred_dim += 1
        else:
            raise ValueError('Unknown output: %s' % (o))

    if pred_dim <= 0:
        raise ValueError('Prediction is empty for the given outputs.')

    # create model
    if len(opt.patch_radius) == 1:
        pcpnet = PCPNet(num_points=opt.points_per_patch,
                        output_dim=pred_dim,
                        use_point_stn=opt.use_point_stn,
                        use_feat_stn=opt.use_feat_stn,
                        sym_op=opt.sym_op,
                        point_tuple=opt.point_tuple)
    else:
        pcpnet = MSPCPNet(num_scales=len(opt.patch_radius),
                          num_points=opt.points_per_patch,
                          output_dim=pred_dim,
                          use_point_stn=opt.use_point_stn,
                          use_feat_stn=opt.use_feat_stn,
                          sym_op=opt.sym_op,
                          point_tuple=opt.point_tuple)
        raise ValueError("Sebastian does not support MSPCPNet")

    if opt.refine != '':
        pcpnet.load_state_dict(torch.load(opt.refine))

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    print("Random Seed: %d" % (opt.seed))
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    # create train and test dataset loaders
    train_dataset = SebastianPatchDataset(
        root=opt.traindir,
        shape_list_filename=opt.trainset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        patch_features=target_features,
        point_count_std=opt.patch_point_count_std,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        use_pca=opt.use_pca,
        center=opt.patch_center,
        point_tuple=opt.point_tuple,
        cache_capacity=opt.cache_capacity)

    shuffle = False
    if opt.training_order == 'random':
        shuffle = True
        # train_datasampler = RandomPointcloudPatchSampler(
        #     train_dataset,
        #     patches_per_shape=opt.patches_per_shape,
        #     seed=opt.seed,
        #     identical_epochs=opt.identical_epochs)
    elif opt.training_order == 'random_shape_consecutive':
        shuffle = False
        # train_datasampler = SequentialShapeRandomPointcloudPatchSampler(
        #     train_dataset,
        #     patches_per_shape=opt.patches_per_shape,
        #     seed=opt.seed,
        #     identical_epochs=opt.identical_epochs)
    else:
        raise ValueError('Unknown training order: %s' % (opt.training_order))

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset,
        shuffle=shuffle,
        # sampler=train_datasampler,
        batch_size=opt.batchSize,
        num_workers=int(opt.workers))

    test_dataset = SebastianPatchDataset(
        root=opt.testdir,
        shape_list_filename=opt.testset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        patch_features=target_features,
        point_count_std=opt.patch_point_count_std,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        use_pca=opt.use_pca,
        center=opt.patch_center,
        point_tuple=opt.point_tuple,
        cache_capacity=opt.cache_capacity)

    shuffle = False
    if opt.training_order == 'random':
        shuffle = True
        # test_datasampler = RandomPointcloudPatchSampler(
        #     test_dataset,
        #     patches_per_shape=opt.patches_per_shape,
        #     seed=opt.seed,
        #     identical_epochs=opt.identical_epochs)
    elif opt.training_order == 'random_shape_consecutive':
        shuffle = False
        # test_datasampler = SequentialShapeRandomPointcloudPatchSampler(
        #     test_dataset,
        #     patches_per_shape=opt.patches_per_shape,
        #     seed=opt.seed,
        #     identical_epochs=opt.identical_epochs)
    else:
        raise ValueError('Unknown training order: %s' % (opt.training_order))

    test_dataloader = torch.utils.data.DataLoader(
        test_dataset,
        shuffle=shuffle,
        # sampler=test_datasampler,
        batch_size=opt.batchSize,
        num_workers=int(opt.workers))

    # keep the exact training shape names for later reference
    opt.train_shapes = train_dataset.shape_names
    opt.test_shapes = test_dataset.shape_names

    print(
        'training set: %d patches (in %d batches) - test set: %d patches (in %d batches)'
        % (len(train_dataset), len(train_dataloader), len(train_dataset),
           len(test_dataloader)))

    try:
        os.makedirs(opt.outdir)
    except OSError:
        pass

    train_writer = SummaryWriter(os.path.join(log_dirname, 'train'))
    test_writer = SummaryWriter(os.path.join(log_dirname, 'test'))

    optimizer = optim.SGD(pcpnet.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum)
    scheduler = lr_scheduler.MultiStepLR(
        optimizer, milestones=[],
        gamma=0.1)  # milestones in number of optimizer iterations
    pcpnet.cuda()

    train_num_batch = len(train_dataloader)
    test_num_batch = len(test_dataloader)

    # save parameters
    torch.save(opt, params_filename)

    # save description
    with open(desc_filename, 'w+') as text_file:
        print(opt.desc, file=text_file)

    for epoch in range(opt.nepoch):

        train_batchind = -1
        train_fraction_done = 0.0
        train_enum = enumerate(train_dataloader, 0)

        test_batchind = -1
        test_fraction_done = 0.0
        test_enum = enumerate(test_dataloader, 0)

        for train_batchind, data in train_enum:

            # update learning rate
            scheduler.step(epoch * train_num_batch + train_batchind)

            # set to training mode
            pcpnet.train()

            # get trainingset batch, convert to variables and upload to GPU
            points = data[0]
            target = data[1:-1]

            points = Variable(points)
            points = points.transpose(2, 1)
            points = points.cuda()

            target = tuple(Variable(t) for t in target)
            target = tuple(t.cuda() for t in target)

            # zero gradients
            optimizer.zero_grad()

            # forward pass
            pred, trans, _, _ = pcpnet(points)

            loss = compute_loss(pred=pred,
                                target=target,
                                outputs=opt.outputs,
                                output_pred_ind=output_pred_ind,
                                output_target_ind=output_target_ind,
                                output_loss_weight=output_loss_weight,
                                patch_rot=trans if opt.use_point_stn else None,
                                normal_loss=opt.normal_loss)

            # backpropagate through entire network to compute gradients of loss w.r.t. parameters
            loss.backward()

            # parameter optimization step
            optimizer.step()

            train_fraction_done = (train_batchind + 1) / train_num_batch

            # print info and update log file
            print('[%s %d: %d/%d] %s loss: %f' %
                  (opt.name, epoch, train_batchind, train_num_batch - 1,
                   green('train'), loss.data[0]))
            # print('min normal len: %f' % (pred.data.norm(2,1).min()))
            train_writer.add_scalar('loss', loss.data[0],
                                    (epoch + train_fraction_done) *
                                    train_num_batch * opt.batchSize)

            while test_fraction_done <= train_fraction_done and test_batchind + 1 < test_num_batch:

                # set to evaluation mode
                pcpnet.eval()

                test_batchind, data = next(test_enum)

                # get testset batch, convert to variables and upload to GPU
                # volatile means that autograd is turned off for everything that depends on the volatile variable
                # since we dont need autograd for inference (only for training)
                points = data[0]
                target = data[1:-1]

                points = Variable(points, volatile=True)
                points = points.transpose(2, 1)
                points = points.cuda()

                target = tuple(Variable(t, volatile=True) for t in target)
                target = tuple(t.cuda() for t in target)

                # forward pass
                pred, trans, _, _ = pcpnet(points)

                loss = compute_loss(
                    pred=pred,
                    target=target,
                    outputs=opt.outputs,
                    output_pred_ind=output_pred_ind,
                    output_target_ind=output_target_ind,
                    output_loss_weight=output_loss_weight,
                    patch_rot=trans if opt.use_point_stn else None,
                    normal_loss=opt.normal_loss)

                test_fraction_done = (test_batchind + 1) / test_num_batch

                # print info and update log file
                print('[%s %d: %d/%d] %s loss: %f' %
                      (opt.name, epoch, train_batchind, train_num_batch - 1,
                       blue('test'), loss.data[0]))
                # print('min normal len: %f' % (pred.data.norm(2,1).min()))
                test_writer.add_scalar('loss', loss.data[0],
                                       (epoch + test_fraction_done) *
                                       train_num_batch * opt.batchSize)

        # save model, overwriting the old model
        if epoch % opt.saveinterval == 0 or epoch == opt.nepoch - 1:
            torch.save(pcpnet.state_dict(), model_filename)

        # save model in a separate file in epochs 0,5,10,50,100,500,1000, ...
        if epoch % (5 * 10**math.floor(math.log10(max(2, epoch - 1)))
                    ) == 0 or epoch % 100 == 0 or epoch == opt.nepoch - 1:
            torch.save(
                pcpnet.state_dict(),
                os.path.join(opt.outdir,
                             '%s_model_%d.pth' % (opt.name, epoch)))