示例#1
0
def get_data_loaders(opt, trainopt, target_features):
    # create dataset loader
    if opt.batchSize == 0:
        model_batchSize = trainopt.batchSize
    else:
        model_batchSize = opt.batchSize

    test_dataset = PointcloudPatchDataset(
        root=opt.indir,
        shape_list_filename=opt.testset,
        patch_radius=trainopt.patch_radius,
        points_per_patch=trainopt.points_per_patch,
        patch_features=target_features,
        point_count_std=trainopt.patch_point_count_std,
        seed=opt.seed,
        identical_epochs=trainopt.identical_epochs,
        use_pca=trainopt.use_pca,
        center=trainopt.patch_center,
        point_tuple=trainopt.point_tuple,
        sparse_patches=opt.sparse_patches,
        cache_capacity=opt.cache_capacity,
        neighbor_search_method=trainopt.neighbor_search)
    if opt.sampling == 'full':
        test_datasampler = SequentialPointcloudPatchSampler(test_dataset)
    elif opt.sampling == 'sequential_shapes_random_patches':
        test_datasampler = SequentialShapeRandomPointcloudPatchSampler(
            test_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            sequential_shapes=True,
            identical_epochs=False)
    else:
        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)

    test_dataloader = torch.utils.data.DataLoader(
        test_dataset,
        sampler=test_datasampler,
        batch_size=model_batchSize,
        num_workers=int(opt.workers))
    return test_dataloader, test_dataset, test_datasampler
示例#2
0
def get_data(target_features, opt, train=True):
    # create train and test dataset loaders
    if train:
        shapes_list_file = opt.trainset
    else:
        shapes_list_file = opt.testset

    dataset = PointcloudPatchDataset(root=opt.indir,
                                     shapes_list_file=shapes_list_file,
                                     patch_radius=opt.patch_radius,
                                     points_per_patch=opt.points_per_patch,
                                     patch_features=target_features,
                                     point_count_std=opt.patch_point_count_std,
                                     seed=opt.seed,
                                     identical_epochs=opt.identical_epochs,
                                     use_pca=opt.use_pca,
                                     center=opt.patch_center,
                                     point_tuple=opt.point_tuple,
                                     cache_capacity=opt.cache_capacity)
    if opt.training_order == 'random':
        datasampler = RandomPointcloudPatchSampler(
            dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    elif opt.training_order == 'random_shape_consecutive':
        datasampler = SequentialShapeRandomPointcloudPatchSampler(
            dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    else:
        raise ValueError('Unknown training order: %s' % (opt.training_order))
    dataloader = torch.utils.data.DataLoader(dataset,
                                             sampler=datasampler,
                                             batch_size=opt.batchSize,
                                             num_workers=int(opt.workers))

    return dataloader, datasampler, dataset
示例#3
0
def eval_Net(opt):

    logfile = open('log.txt', mode='at')
    now = time.localtime()
    logDate = "%04d-%02d-%02d %02d:%02d:%02d" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
    logfile.write("\n%s\n" % logDate)
    startTime = time.time()
    torch.cuda.set_device(opt.GPU_ID)

    # get a list of model names
    model_name = opt.model
    print("Random Seed: %d" % (opt.seed))
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    #model_filename = os.path.join(opt.modeldir, opt.model + "_model.pth")
    #param_filename = os.path.join(opt.modeldir, opt.model+opt.parmpostfix)
    model_filename = glob(os.path.join(opt.modeldir, opt.model)+ "*" + opt.modelpostfix)[0]
    param_filename = glob(os.path.join(opt.modeldir, opt.model)+ "*" + opt.parmpostfix)[0]
    
    # load model and training parameters
    trainopt = torch.load(param_filename)
    trainopt.outputs = ['clean_points']
    if opt.batchSize == 0:
        model_batchSize = trainopt.batchSize
    else:
        model_batchSize = opt.batchSize
    # get indices in targets and predictions corresponding to each output
    pred_dim = 0
    output_pred_ind = []
    for o in trainopt.outputs:
        if o in ['clean_points']:
            output_pred_ind.append(pred_dim)
            pred_dim += 3
        else:
            raise ValueError('Unknown output: %s' % (o))
    dataset = PointcloudPatchDataset(
        root=opt.outdir, shapes_list_file=opt.dataset,
        patch_radius=trainopt.patch_radius,
        points_per_patch=trainopt.points_per_patch,
        patch_features=['original'],
        seed=opt.seed,
        use_pca=trainopt.use_pca,
        center=trainopt.patch_center,
        point_tuple=trainopt.point_tuple,
        sparse_patches=opt.sparse_patches,
        cache_capacity=opt.cache_capacity, shape_names = [opt.shapename.format(i = opt.nrun-1)],
        logfile=logfile)

    if opt.sampling == 'full':
        datasampler = SequentialPointcloudPatchSampler(dataset)
    elif opt.sampling == 'sequential_shapes_random_patches':
        datasampler = SequentialShapeRandomPointcloudPatchSampler(
            dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            sequential_shapes=True,
            identical_epochs=False)
    else:
        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
    dataloader = torch.utils.data.DataLoader(
        dataset,
        sampler=datasampler,
        batch_size=model_batchSize,
        num_workers=int(opt.workers))

    regressor = ResNet(
        num_points=trainopt.points_per_patch,
        output_dim=pred_dim,
        use_point_stn=trainopt.use_point_stn,
        use_feat_stn=trainopt.use_feat_stn,
        sym_op=trainopt.sym_op,
        point_tuple=trainopt.point_tuple)
    regressor.load_state_dict(torch.load(model_filename))
    regressor.cuda()

    shape_ind = 0
    shape_patch_offset = 0
    if opt.sampling == 'full':
        shape_patch_count = dataset.shape_patch_count[shape_ind]
    elif opt.sampling == 'sequential_shapes_random_patches':
        shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
    else:
        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)

    # modify to add pointID info
    shape_properties = torch.FloatTensor(shape_patch_count, pred_dim).zero_()

    # append model name to output directory and create directory if necessary
    dataInputTime = time.time() - startTime
    inputTimeSplit = divmod(dataInputTime, 60)
    print("##################################################")
    print("Data Input WorkingTime: %d min %d sec" % (inputTimeSplit[0], inputTimeSplit[1]))
    print("##################################################")


    num_batch = len(dataloader)
    batch_enum = enumerate(dataloader, 0)
    inferenceStart = time.time()

    print("batch_enum: ", end="")
    print(batch_enum)

    regressor.eval()
    for batchind, data in batch_enum:

        # get batch, convert to variables and upload to GPU
        points,originals, patch_radiuses,data_trans = data
        points = Variable(points, volatile=True)
        points = points.transpose(2, 1)
        points = points.cuda()

        data_trans = data_trans.cuda()
        pred, trans, _, _ = regressor(points)

        pred = pred.data
        if trans is not None:
            trans = trans.data

        # post-processing of the prediction
        tmp_pointID = None
        for oi, o in enumerate(trainopt.outputs):
            # modify to add pointID info
            o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 3]

            if trainopt.use_point_stn:
                # transform predictions with inverse transform
                # since we know the transform to be a rotation (QSTN), the transpose is the inverse
                o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1), trans.transpose(2, 1)).squeeze(1)
            if trainopt.use_pca:
                # transform predictions with inverse pca rotation (back to world space)
                o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1), data_trans.transpose(2, 1)).squeeze(1)
            n_points = patch_radiuses.shape[0]
            # new coordinates are : old coordiantes + displacement vector
            o_pred = torch.mul(o_pred, torch.t(patch_radiuses.expand(3, n_points)).float().cuda()) + originals.cuda()
            pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 3] = o_pred

        print('[%s %d/%d] shape %s' % (model_name, batchind, num_batch-1, dataset.shape_names[shape_ind]))

        batch_offset = 0
        while batch_offset < pred.size(0):

            shape_patches_remaining = shape_patch_count-shape_patch_offset
            batch_patches_remaining = pred.size(0)-batch_offset
            # append estimated patch properties batch to properties for the current shape on the CPU
            shape_properties[shape_patch_offset:shape_patch_offset+min(shape_patches_remaining, batch_patches_remaining), :] = pred[
                batch_offset:batch_offset+min(shape_patches_remaining, batch_patches_remaining), :]

            batch_offset = batch_offset + min(shape_patches_remaining, batch_patches_remaining)
            shape_patch_offset = shape_patch_offset + min(shape_patches_remaining, batch_patches_remaining)
            if shape_patches_remaining <= batch_patches_remaining:

                # save shape properties to disk
                prop_saved = [False]*len(trainopt.outputs)

                # save clean points
                oi = [k for k, o in enumerate(trainopt.outputs) if o in ['clean_points']]
                if len(oi) > 1:
                    raise ValueError('Duplicate point output.')
                elif len(oi) == 1:
                    oi = oi[0]

                    # modify to add pointID info
                    normal_prop = shape_properties[:, output_pred_ind[oi]:output_pred_ind[oi] + 3]

                    # Compute mean displacements, inspired from Taubin smoothing
                    # modify to add pointID info
                    normal_prop = get_meaned_displacements(dataset, normal_prop, opt.n_neighbours)
                    pointID_filename = os.path.join(opt.outdir, opt.shapename.format(i = opt.nrun - 1) + '.xyz_pointID' + '.npy')
                    pointIDs = torch.tensor(np.load(pointID_filename).astype('float64'))#np.load(pointID_filename)#.astype('int')

                    pts_ID_XYZ = torch.cat([pointIDs, normal_prop.type(torch.DoubleTensor)], dim=1)#new_points = torch.cat([tmp_torchpointID, new_points], dim=1)
                    np.savetxt(os.path.join(opt.outdir, opt.shapename.format(i = opt.nrun) + '.xyz'), pts_ID_XYZ.numpy())
                    prop_saved[oi] = True

                if not all(prop_saved):
                    raise ValueError('Not all shape properties were saved, some of them seem to be unsupported.')
                # start new shape
                if shape_ind + 1 < len(dataset.shape_names):
                    # log update
                    outlierInferenceSplit = divmod(time.time() - inferenceStart, 60)
                    logfile.write("Outlier Inference(shape: %s) duration: %d min %d sec\n" % (
                    dataset.shape_names[shape_ind], outlierInferenceSplit[0], outlierInferenceSplit[1]))
                    inferenceStart = time.time()
                    shape_patch_offset = 0
                    shape_ind = shape_ind + 1
                    if opt.sampling == 'full':
                        shape_patch_count = dataset.shape_patch_count[shape_ind]
                    elif opt.sampling == 'sequential_shapes_random_patches':
                        shape_patch_count = len(datasampler.shape_patch_inds[shape_ind])
                    else:
                        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
                    shape_properties = torch.FloatTensor(shape_patch_count, pred_dim).zero_()

        if batchind == num_batch-1:
            outlierInferenceSplit = divmod(time.time() - inferenceStart, 60)
            logfile.write("Outlier Inference(shape: %s) duration: %d min %d sec\n" % (dataset.shape_names[shape_ind], outlierInferenceSplit[0], outlierInferenceSplit[1]))

    totalTime = time.time() - startTime
    gpuInferencingTime = totalTime - dataInputTime
    totalTimeSplit = divmod(totalTime, 60)
    inferenceTimeSplit = divmod(gpuInferencingTime, 60)
    print("##################################################")
    print("Data Input WorkingTime: %d min %d sec" % (inputTimeSplit[0], inputTimeSplit[1]))
    print("GPU Inferencing WorkingTime: %d min %d sec" % (inferenceTimeSplit[0], inferenceTimeSplit[1]))
    print("Total Processing Time : %d min %d sec" % (totalTimeSplit[0], totalTimeSplit[1]))
    print("##################################################")

    logfile.write("\nTotal Processing Time : %d min %d sec\n" % (totalTimeSplit[0], totalTimeSplit[1]))
    logfile.write("\nProcess End\n")
    logfile.close()
示例#4
0
def eval_pcpnet(opt):

    opt.models = opt.models.split()

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    for model_name in opt.models:

        print("Random Seed: %d" % (opt.seed))
        random.seed(opt.seed)
        torch.manual_seed(opt.seed)

        model_filename = os.path.join(opt.modeldir,
                                      model_name + opt.modelpostfix)
        param_filename = os.path.join(opt.modeldir,
                                      model_name + opt.parmpostfix)

        # load model and training parameters
        trainopt = torch.load(param_filename)

        if opt.batchSize == 0:
            model_batchSize = trainopt.batchSize
        else:
            model_batchSize = opt.batchSize

        # get indices in targets and predictions corresponding to each output
        pred_dim = 0
        output_pred_ind = []
        for o in trainopt.outputs:
            if o == 'unoriented_normals' or o == 'oriented_normals':
                output_pred_ind.append(pred_dim)
                pred_dim += 3
            elif o == 'max_curvature' or o == 'min_curvature':
                output_pred_ind.append(pred_dim)
                pred_dim += 1
            else:
                raise ValueError('Unknown output: %s' % (o))

        dataset = PointcloudPatchDataset(
            root=opt.indir,
            shape_list_filename=opt.dataset,
            patch_radius=trainopt.patch_radius,
            points_per_patch=trainopt.points_per_patch,
            patch_features=[],
            seed=opt.seed,
            use_pca=trainopt.use_pca,
            center=trainopt.patch_center,
            point_tuple=trainopt.point_tuple,
            sparse_patches=opt.sparse_patches,
            cache_capacity=opt.cache_capacity)
        if opt.sampling == 'full':
            datasampler = SequentialPointcloudPatchSampler(dataset)
        elif opt.sampling == 'sequential_shapes_random_patches':
            datasampler = SequentialShapeRandomPointcloudPatchSampler(
                dataset,
                patches_per_shape=opt.patches_per_shape,
                seed=opt.seed,
                sequential_shapes=True,
                identical_epochs=False)
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 sampler=datasampler,
                                                 batch_size=model_batchSize,
                                                 num_workers=int(opt.workers))

        if len(trainopt.patch_radius) == 1:
            regressor = PCPNet(num_points=trainopt.points_per_patch,
                               output_dim=pred_dim,
                               use_point_stn=trainopt.use_point_stn,
                               use_feat_stn=trainopt.use_feat_stn,
                               sym_op=trainopt.sym_op,
                               point_tuple=trainopt.point_tuple)
        else:
            regressor = MSPCPNet(num_scales=len(trainopt.patch_radius),
                                 num_points=trainopt.points_per_patch,
                                 output_dim=pred_dim,
                                 use_point_stn=trainopt.use_point_stn,
                                 use_feat_stn=trainopt.use_feat_stn,
                                 sym_op=trainopt.sym_op,
                                 point_tuple=trainopt.point_tuple)

        regressor.load_state_dict(torch.load(model_filename))
        regressor.cuda()
        regressor.eval()

        shape_ind = 0
        shape_patch_offset = 0
        if opt.sampling == 'full':
            shape_patch_count = dataset.shape_patch_count[shape_ind]
        elif opt.sampling == 'sequential_shapes_random_patches':
            shape_patch_count = min(opt.patches_per_shape,
                                    dataset.shape_patch_count[shape_ind])
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        shape_properties = torch.FloatTensor(shape_patch_count,
                                             pred_dim).zero_()

        # append model name to output directory and create directory if necessary
        model_outdir = os.path.join(opt.outdir, model_name)
        if not os.path.exists(model_outdir):
            os.makedirs(model_outdir)

        num_batch = len(dataloader)
        batch_enum = enumerate(dataloader, 0)
        for batchind, data in batch_enum:

            # get batch, convert to variables and upload to GPU
            points, data_trans = data
            points = Variable(points, volatile=True)
            points = points.transpose(2, 1)
            points = points.cuda()

            data_trans = data_trans.cuda()

            pred, trans, _, _ = regressor(points)

            # don't need to work with autograd variables anymore
            pred = pred.data
            if trans is not None:
                trans = trans.data

            # post-processing of the prediction
            for oi, o in enumerate(trainopt.outputs):
                if o == 'unoriented_normals' or o == 'oriented_normals':
                    o_pred = pred[:,
                                  output_pred_ind[oi]:output_pred_ind[oi] + 3]

                    if trainopt.use_point_stn:
                        # transform predictions with inverse transform
                        # since we know the transform to be a rotation (QSTN), the transpose is the inverse
                        o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                                 trans.transpose(2,
                                                                 1)).squeeze(1)

                    if trainopt.use_pca:
                        # transform predictions with inverse pca rotation (back to world space)
                        o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                                 data_trans.transpose(
                                                     2, 1)).squeeze(1)

                    # normalize normals
                    o_pred_len = torch.max(
                        torch.cuda.FloatTensor([sys.float_info.epsilon * 100]),
                        o_pred.norm(p=2, dim=1, keepdim=True))
                    o_pred = o_pred / o_pred_len

                elif o == 'max_curvature' or o == 'min_curvature':
                    o_pred = pred[:,
                                  output_pred_ind[oi]:output_pred_ind[oi] + 1]

                    # undo patch size normalization:
                    o_pred[:, :] = o_pred / dataset.patch_radius_absolute[
                        shape_ind][0]

                else:
                    raise ValueError('Unsupported output type: %s' % (o))

            print('[%s %d/%d] shape %s' % (model_name, batchind, num_batch - 1,
                                           dataset.shape_names[shape_ind]))

            batch_offset = 0
            while batch_offset < pred.size(0):

                shape_patches_remaining = shape_patch_count - shape_patch_offset
                batch_patches_remaining = pred.size(0) - batch_offset

                # append estimated patch properties batch to properties for the current shape on the CPU
                shape_properties[shape_patch_offset:shape_patch_offset + min(
                    shape_patches_remaining, batch_patches_remaining
                ), :] = pred[
                    batch_offset:batch_offset +
                    min(shape_patches_remaining, batch_patches_remaining), :]

                batch_offset = batch_offset + min(shape_patches_remaining,
                                                  batch_patches_remaining)
                shape_patch_offset = shape_patch_offset + min(
                    shape_patches_remaining, batch_patches_remaining)

                if shape_patches_remaining <= batch_patches_remaining:

                    # save shape properties to disk
                    prop_saved = [False] * len(trainopt.outputs)

                    # save normals
                    oi = [
                        i for i, o in enumerate(trainopt.outputs)
                        if o in ['unoriented_normals', 'oriented_normals']
                    ]
                    if len(oi) > 1:
                        raise ValueError('Duplicate normal output.')
                    elif len(oi) == 1:
                        oi = oi[0]
                        normal_prop = shape_properties[:, output_pred_ind[oi]:
                                                       output_pred_ind[oi] + 3]
                        np.savetxt(
                            os.path.join(
                                model_outdir,
                                dataset.shape_names[shape_ind] + '.normals'),
                            normal_prop.numpy())
                        prop_saved[oi] = True

                    # save curvatures
                    oi1 = [
                        i for i, o in enumerate(trainopt.outputs)
                        if o == 'max_curvature'
                    ]
                    oi2 = [
                        i for i, o in enumerate(trainopt.outputs)
                        if o == 'min_curvature'
                    ]
                    if len(oi1) > 1 or len(oi2) > 1:
                        raise ValueError(
                            'Duplicate minimum or maximum curvature output.')
                    elif len(oi1) == 1 or len(oi2) == 1:
                        curv_prop = torch.FloatTensor(shape_properties.size(0),
                                                      2).zero_()
                        if len(oi1) == 1:
                            oi1 = oi1[0]
                            curv_prop[:,
                                      0] = shape_properties[:, output_pred_ind[
                                          oi1]:output_pred_ind[oi1] + 1]
                            prop_saved[oi1] = True
                        if len(oi2) == 1:
                            oi2 = oi2[0]
                            curv_prop[:,
                                      1] = shape_properties[:, output_pred_ind[
                                          oi2]:output_pred_ind[oi2] + 1]
                            prop_saved[oi2] = True
                        np.savetxt(
                            os.path.join(
                                model_outdir,
                                dataset.shape_names[shape_ind] + '.curv'),
                            curv_prop.numpy())

                    if not all(prop_saved):
                        raise ValueError(
                            'Not all shape properties were saved, some of them seem to be unsupported.'
                        )

                    # save point indices
                    if opt.sampling != 'full':
                        np.savetxt(os.path.join(
                            model_outdir,
                            dataset.shape_names[shape_ind] + '.idx'),
                                   datasampler.shape_patch_inds[shape_ind],
                                   fmt='%d')

                    # start new shape
                    if shape_ind + 1 < len(dataset.shape_names):
                        shape_patch_offset = 0
                        shape_ind = shape_ind + 1
                        if opt.sampling == 'full':
                            shape_patch_count = dataset.shape_patch_count[
                                shape_ind]
                        elif opt.sampling == 'sequential_shapes_random_patches':
                            # shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
                            shape_patch_count = len(
                                datasampler.shape_patch_inds[shape_ind])
                        else:
                            raise ValueError('Unknown sampling strategy: %s' %
                                             opt.sampling)
                        shape_properties = torch.FloatTensor(
                            shape_patch_count, pred_dim).zero_()
示例#5
0
def eval_pcpnet(opt):

    if opt.distributed:
        torch.distributed.init_process_group(backend="nccl")
        # 2) 配置每个进程的gpu
        local_rank = torch.distributed.get_rank()
        torch.cuda.set_device(local_rank)
        device = torch.device("cuda", local_rank)
        print('stage 2 passed')
    else:
        device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" %
                              opt.gpu_idx)

    opt.models = opt.models.split()
    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    for model_name in opt.models:

        print("Random Seed: %d" % (opt.seed))
        random.seed(opt.seed)
        torch.manual_seed(opt.seed)

        model_filename = os.path.join(opt.modeldir,
                                      model_name + opt.modelpostfix)
        param_filename = os.path.join(opt.modeldir,
                                      model_name + opt.parmpostfix)

        # load model and training parameters
        trainopt = torch.load(param_filename)
        trainopt.indir2 = '../data2'
        if opt.distributed and local_rank == 0:
            print(trainopt)

        if opt.batchSize == 0:
            model_batchSize = trainopt.batchSize
        else:
            model_batchSize = opt.batchSize

        # get indices in targets and predictions corresponding to each output
        pred_dim = 0
        output_pred_ind = []
        # trainopt.outputs = {'unoriented_normals'}
        for o in trainopt.outputs:
            if o == 'unoriented_normals' or o == 'oriented_normals':
                output_pred_ind.append(pred_dim)
                pred_dim += 3

            else:
                raise ValueError('Unknown output: %s' % (o))

        if len(trainopt.points_per_patch) == 1:
            if trainopt.generate_points_dim == 1:
                regressor = WDSAC(
                    hyps=trainopt.hypotheses + 10,
                    inlier_params=trainopt.inlier_params,
                    patch_radius=trainopt.patch_radius,
                    decoder=trainopt.decoder,
                    use_mask=trainopt.use_mask,
                    dim_pts=trainopt.in_points_dim,
                    num_gpts=trainopt.generate_points_num,
                    dim_gpts=trainopt.generate_points_dim,
                    points_per_patch=trainopt.points_per_patch[0],
                    sym_op=trainopt.sym_op,
                    normal_loss=trainopt.normal_loss,
                    seed=trainopt.seed,
                    device=device,
                    use_point_stn=trainopt.use_point_stn,
                    use_feat_stn=trainopt.use_feat_stn)
            else:
                regressor = DSAC(hyps=trainopt.hypotheses + 10,
                                 inlier_params=trainopt.inlier_params,
                                 patch_radius=trainopt.patch_radius,
                                 decoder=trainopt.decoder,
                                 use_mask=trainopt.use_mask,
                                 dim_pts=trainopt.in_points_dim,
                                 num_gpts=trainopt.generate_points_num,
                                 dim_gpts=trainopt.generate_points_dim,
                                 points_per_patch=trainopt.points_per_patch[0],
                                 sym_op=trainopt.sym_op,
                                 normal_loss=trainopt.normal_loss,
                                 seed=trainopt.seed,
                                 device=device,
                                 use_point_stn=trainopt.use_point_stn,
                                 use_feat_stn=trainopt.use_feat_stn)
        else:
            regressor = MoEDSAC(hyps=trainopt.hypotheses + 10,
                                inlier_params=trainopt.inlier_params,
                                patch_radius=trainopt.patch_radius,
                                share_pts_stn=trainopt.share_pts_stn,
                                decoder=trainopt.decoder,
                                use_mask=trainopt.use_mask,
                                dim_pts=trainopt.in_points_dim,
                                num_gpts=trainopt.generate_points_num,
                                points_per_patch=trainopt.points_per_patch,
                                sym_op=trainopt.sym_op,
                                normal_loss=trainopt.normal_loss,
                                seed=trainopt.seed,
                                device=device,
                                use_point_stn=trainopt.use_point_stn,
                                use_feat_stn=trainopt.use_feat_stn)
            # if len(opt.expert_refine)>1:
            #     dsac.refine(opt.expert_refine)

        regressor.to(device)

        if opt.distributed:
            if torch.cuda.device_count() > 1:
                print("Let's use", torch.cuda.device_count(), "GPUs!")
            regressor = torch.nn.parallel.DistributedDataParallel(
                regressor, device_ids=[local_rank], output_device=local_rank)
            #print(local_rank)

        state_dict = torch.load(model_filename, map_location=device)
        # print(state_dict.keys())
        # print(regressor.state_dict().keys())
        regressor.load_state_dict(state_dict)
        print('stage 3 passed')

        regressor.eval()

        dataset = PointcloudPatchDataset(
            root=opt.indir,
            root_in=trainopt.indir2,
            shape_list_filename=opt.dataset,
            patch_radius=trainopt.patch_radius,
            points_per_patch=trainopt.points_per_patch,
            dim_pts=trainopt.in_points_dim,
            knn=trainopt.knn,
            #patch_features=[],
            seed=opt.seed,
            cache_capacity=opt.cache_capacity)
        if opt.sampling == 'full':
            datasampler = SequentialPointcloudPatchSampler(dataset)
        elif opt.sampling == 'sequential_shapes_random_patches':
            datasampler = SequentialShapeRandomPointcloudPatchSampler(
                dataset,
                patches_per_shape=opt.patches_per_shape,
                seed=opt.seed,
                sequential_shapes=True,
                identical_epochs=False)
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)

        if opt.distributed:
            dataloader = torch.utils.data.DataLoader(
                dataset,
                sampler=DistributedSampler(datasampler),
                batch_size=model_batchSize,
                num_workers=int(opt.workers))
        else:
            dataloader = torch.utils.data.DataLoader(
                dataset,
                sampler=datasampler,
                batch_size=model_batchSize,
                num_workers=int(opt.workers))

        shape_ind = 0
        shape_patch_offset = 0
        if opt.sampling == 'full':
            shape_patch_count = dataset.shape_patch_count[shape_ind]
        elif opt.sampling == 'sequential_shapes_random_patches':
            shape_patch_count = min(opt.patches_per_shape,
                                    dataset.shape_patch_count[shape_ind])
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        shape_properties = torch.zeros(shape_patch_count,
                                       3,
                                       dtype=torch.float,
                                       device=device)

        # append model name to output directory and create directory if necessary
        model_outdir = os.path.join(opt.outdir, model_name)
        if not os.path.exists(model_outdir):
            os.makedirs(model_outdir)

        num_batch = len(dataloader)
        batch_enum = enumerate(dataloader, 0)
        for batchind, data in batch_enum:

            # get batch and upload to GPU
            points, target, _, dist = data
            points = points.transpose(2, 1)
            points = points.to(device)
            target = target.to(device)
            dist = dist.to(device)

            with torch.no_grad():
                exp_loss, top_loss, pred, pts, _, _, _ = regressor(
                    points, target, dist)

            print('[%s %d/%d] shape %s' % (model_name, batchind, num_batch - 1,
                                           dataset.shape_names[shape_ind]))

            batch_offset = 0
            while batch_offset < pred.size(0):

                shape_patches_remaining = shape_patch_count - shape_patch_offset
                batch_patches_remaining = pred.size(0) - batch_offset

                # append estimated patch properties batch to properties for the current shape
                shape_properties[shape_patch_offset:shape_patch_offset + min(
                    shape_patches_remaining, batch_patches_remaining
                ), :] = pred[
                    batch_offset:batch_offset +
                    min(shape_patches_remaining, batch_patches_remaining), :]

                batch_offset = batch_offset + min(shape_patches_remaining,
                                                  batch_patches_remaining)
                shape_patch_offset = shape_patch_offset + min(
                    shape_patches_remaining, batch_patches_remaining)

                if shape_patches_remaining <= batch_patches_remaining:

                    # save shape properties to disk
                    prop_saved = [False] * len(trainopt.outputs)

                    # save normals
                    oi = [
                        i for i, o in enumerate(trainopt.outputs)
                        if o in ['unoriented_normals', 'oriented_normals']
                    ]
                    if len(oi) > 1:
                        raise ValueError('Duplicate normal output.')
                    elif len(oi) == 1:
                        oi = oi[0]
                        normal_prop = shape_properties[:, output_pred_ind[oi]:
                                                       output_pred_ind[oi] + 3]

                        np.savetxt(
                            os.path.join(
                                model_outdir,
                                dataset.shape_names[shape_ind] + '.normals'),
                            normal_prop.cpu().numpy())
                        print('saved normals for ' +
                              dataset.shape_names[shape_ind])
                        prop_saved[oi] = True

                    # save curvatures

                    if not all(prop_saved):
                        raise ValueError(
                            'Not all shape properties were saved, some of them seem to be unsupported.'
                        )

                    # save point indices
                    if opt.sampling != 'full':
                        np.savetxt(os.path.join(
                            model_outdir,
                            dataset.shape_names[shape_ind] + '.idx'),
                                   datasampler.shape_patch_inds[shape_ind],
                                   fmt='%d')

                    # start new shape
                    if shape_ind + 1 < len(dataset.shape_names):
                        shape_patch_offset = 0
                        shape_ind = shape_ind + 1
                        if opt.sampling == 'full':
                            shape_patch_count = dataset.shape_patch_count[
                                shape_ind]
                        elif opt.sampling == 'sequential_shapes_random_patches':
                            # shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
                            shape_patch_count = len(
                                datasampler.shape_patch_inds[shape_ind])
                        else:
                            raise ValueError('Unknown sampling strategy: %s' %
                                             opt.sampling)
                        shape_properties = shape_properties.new_zeros(
                            shape_patch_count, pred_dim)
示例#6
0
def train(opt):
    # 1) 配置每个进程的gpu
    local_rank = torch.distributed.get_rank()
    torch.cuda.set_device(local_rank)
    device = torch.device("cuda", local_rank)
    print('stage 1 passed')

    # 2)初始化
    # colored console output
    green = lambda x: '\033[92m' + x + '\033[0m'
    blue = lambda x: '\033[94m' + x + '\033[0m'

    log_dirname = os.path.join(opt.logdir, opt.name)
    params_filename = os.path.join(opt.outdir, '%s_params.pth' % (opt.name))
    model_filename = os.path.join(opt.outdir, '%s_model.pth' % (opt.name))
    desc_filename = os.path.join(opt.outdir, '%s_description.txt' % (opt.name))

    if local_rank == 0:
        if os.path.exists(log_dirname) or os.path.exists(model_filename):
            if os.path.exists(log_dirname):
                shutil.rmtree(os.path.join(opt.logdir, opt.name))

    # if os.path.exists(log_dirname) or os.path.exists(model_filename):
    #     if os.path.exists(log_dirname):
    #         shutil.rmtree(os.path.join(opt.logdir, opt.name))
    print('stage 2 passed')

    # 3) 封装之前要把模型移到对应的gpu
    if len(opt.points_per_patch) == 1:
        if opt.generate_points_dim == 1:
            dsac = WDSAC(hyps=opt.hypotheses,
                         inlier_params=opt.inlier_params,
                         patch_radius=opt.patch_radius,
                         decoder=opt.decoder,
                         use_mask=opt.use_mask,
                         dim_pts=opt.in_points_dim,
                         num_gpts=opt.generate_points_num,
                         dim_gpts=opt.generate_points_dim,
                         points_per_patch=opt.points_per_patch[0],
                         sym_op=opt.sym_op,
                         normal_loss=opt.normal_loss,
                         seed=opt.seed,
                         device=device,
                         use_point_stn=opt.use_point_stn,
                         use_feat_stn=opt.use_feat_stn)
        else:
            dsac = DSAC(hyps=opt.hypotheses,
                        inlier_params=opt.inlier_params,
                        patch_radius=opt.patch_radius,
                        decoder=opt.decoder,
                        use_mask=opt.use_mask,
                        dim_pts=opt.in_points_dim,
                        num_gpts=opt.generate_points_num,
                        dim_gpts=opt.generate_points_dim,
                        points_per_patch=opt.points_per_patch[0],
                        sym_op=opt.sym_op,
                        normal_loss=opt.normal_loss,
                        seed=opt.seed,
                        device=device,
                        use_point_stn=opt.use_point_stn,
                        use_feat_stn=opt.use_feat_stn)
    else:
        dsac = MoEDSAC(hyps=opt.hypotheses,
                       inlier_params=opt.inlier_params,
                       patch_radius=opt.patch_radius,
                       share_pts_stn=opt.share_pts_stn,
                       decoder=opt.decoder,
                       use_mask=opt.use_mask,
                       dim_pts=opt.in_points_dim,
                       num_gpts=opt.generate_points_num,
                       points_per_patch=opt.points_per_patch,
                       sym_op=opt.sym_op,
                       normal_loss=opt.normal_loss,
                       seed=opt.seed,
                       device=device,
                       use_point_stn=opt.use_point_stn,
                       use_feat_stn=opt.use_feat_stn)

    dsac.to(device)

    if torch.cuda.device_count() > 1:
        if opt.refine != '':  # if the refine model is not from ddp
            dsac.load_state_dict(torch.load(opt.refine, map_location=device))
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # 3) 封装
        dsac = torch.nn.parallel.DistributedDataParallel(
            dsac, device_ids=[local_rank], output_device=local_rank)
        # if opt.refine != '': # if the refine model is not from ddp?
        #     dsac.load_state_dict( torch.load(opt.refine, map_location=device) )
    else:
        if opt.refine != '':
            dsac.load_state_dict(torch.load(opt.refine))

    print('stage 3 passed')

    # 4)使用DistributedSampler
    train_dataset = PointcloudPatchDataset(
        root=opt.indir,
        root_in=opt.indir2,
        shape_list_filename=opt.trainset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        dim_pts=opt.in_points_dim,
        knn=opt.knn,
        point_count_std=opt.patch_point_count_std,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        cache_capacity=opt.cache_capacity)

    train_datasampler = RandomPointcloudPatchSampler(
        train_dataset,
        patches_per_shape=opt.patches_per_shape,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs)

    train_dataloader = DataLoader(
        train_dataset,
        sampler=DistributedSampler(train_datasampler),
        batch_size=opt.batchSize,
        num_workers=int(opt.workers))

    test_dataset = PointcloudPatchDataset(
        root=opt.indir,
        root_in=opt.indir2,
        shape_list_filename=opt.testset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        dim_pts=opt.in_points_dim,
        knn=opt.knn,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        cache_capacity=opt.cache_capacity)

    test_datasampler = RandomPointcloudPatchSampler(
        test_dataset,
        patches_per_shape=opt.patches_per_shape,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs)

    test_dataloader = DataLoader(test_dataset,
                                 sampler=DistributedSampler(test_datasampler),
                                 batch_size=opt.batchSize,
                                 num_workers=int(opt.workers))

    # keep the exact training shape names for later reference
    opt.train_shapes = train_dataset.shape_names
    opt.test_shapes = test_dataset.shape_names

    print(
        'training set: %d patches (in %d batches) - test set: %d patches (in %d batches)'
        % (len(train_datasampler), len(train_dataloader),
           len(test_datasampler), len(test_dataloader)))

    try:
        os.makedirs(opt.outdir)
    except OSError:
        pass

    if local_rank == 0:
        train_writer = SummaryWriter(os.path.join(log_dirname, 'train'))
        test_writer = SummaryWriter(os.path.join(log_dirname, 'test'))

    lrate = opt.lr
    if opt.opti == 'SGD':
        optimizer = optim.SGD(dsac.parameters(),
                              lr=lrate,
                              momentum=opt.momentum)
    else:
        optimizer = optim.Adam(dsac.parameters(), lr=lrate)

    scheduler = lr_scheduler.MultiStepLR(
        optimizer, milestones=[1000],
        gamma=0.1)  # milestones in number of optimizer iterations

    train_num_batch = len(train_dataloader)
    test_num_batch = len(test_dataloader)
    # save parameters
    torch.save(opt, params_filename)

    # save description
    with open(desc_filename, 'w+') as text_file:
        print(opt.desc, file=text_file)

    for epoch in range(opt.nepoch):
        train_batchind = -1
        train_fraction_done = 0.0
        train_enum = enumerate(train_dataloader, 0)

        test_batchind = -1
        test_fraction_done = 0.0
        test_enum = enumerate(test_dataloader, 0)

        for train_batchind, data in train_enum:
            # set to training mode
            dsac.train()

            points = data[0]  #这时的point是64*512*3的类型
            target = data[1]
            mask = data[2]
            dist = data[3]

            points = points.transpose(2, 1)
            points = points.to(device)
            target = target.to(device)
            mask = mask.to(device)
            dist = dist.to(device)

            # zero gradients
            #optimizer.zero_grad()

            exp_loss, top_loss, pred, pts, mask_p, patch_rot, _ = dsac(
                points, target, dist)
            loss = exp_loss.mean()

            # backpropagate through entire network to compute gradients of loss w.r.t. parameters
            loss.backward()

            # parameter optimization step
            optimizer.step()

            train_fraction_done = (train_batchind + 1) / train_num_batch

            # print info and update log file
            print('[%s %d: %d/%d] %s tloss: %f loss: %f Top Loss:%f' %
                  (opt.name, epoch, train_batchind, train_num_batch - 1,
                   green('train'), loss, exp_loss.mean().item(),
                   top_loss.mean()))
            if local_rank == 0:
                x1 = (epoch +
                      train_fraction_done) * train_num_batch * opt.batchSize
                train_writer.add_scalars(
                    'loss', {
                        'meanLoss': exp_loss.mean().item(),
                        'topLoss': top_loss.mean().item()
                    }, x1)

            while test_fraction_done <= train_fraction_done and test_batchind + 1 < test_num_batch:

                # set to evaluation mode
                dsac.eval()

                test_batchind, data = next(test_enum)

                points = data[0]  #这时的point是64*512*3的类型
                target = data[1]
                mask = data[2]
                dist = data[3]

                points = points.transpose(2, 1)
                points = points.to(device)
                target = target.to(device)
                mask = mask.to(device)
                dist = dist.to(device)

                # forward pass
                with torch.no_grad():
                    exp_loss, top_loss, pred, pts, mask_p, patch_rot, _ = dsac(
                        points, target, dist)

                loss = exp_loss.mean()

                test_fraction_done = (test_batchind + 1) / test_num_batch

                # print info and update log file
                print('[%s %d: %d/%d] %s tloss: %f loss: %f Top Loss:%f' %
                      (opt.name, epoch, train_batchind, train_num_batch - 1,
                       blue('test'), loss, exp_loss.mean().item(),
                       top_loss.mean()))
                if local_rank == 0:
                    x1 = (epoch +
                          test_fraction_done) * train_num_batch * opt.batchSize
                    # test_writer.add_scalar('loss', exp_loss.mean().item(), x1)
                    test_writer.add_scalars(
                        'loss', {
                            'meanLoss': exp_loss.mean().item(),
                            'topLoss': top_loss.mean().item()
                        }, x1)

        # update learning rate
        scheduler.step()
        # save model, overwriting the old model
        if local_rank == 0:
            if epoch % opt.saveinterval == 0 or epoch == opt.nepoch - 1:
                print('model saved')
                torch.save(dsac.state_dict(), model_filename)

        # save model in a separate file in epochs 0,5,10,50,100,500,1000, ...
        if epoch % (5 * 10**math.floor(math.log10(max(2, epoch - 1)))
                    ) == 0 or epoch % 100 == 0 or epoch == opt.nepoch - 1:
            torch.save(
                dsac.state_dict(),
                os.path.join(opt.outdir,
                             '%s_model_%d.pth' % (opt.name, epoch)))
示例#7
0
文件: draw.py 项目: bbaaii/DRNE19
def eval_pcpnet(opt):

    opt.models = opt.models.split()

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" % opt.gpu_idx)

    for model_name in opt.models:

        print("Random Seed: %d" % (opt.seed))
        random.seed(opt.seed)
        torch.manual_seed(opt.seed)

        model_filename = os.path.join(opt.modeldir, model_name+opt.modelpostfix)
        param_filename = os.path.join(opt.modeldir, model_name+opt.parmpostfix)

        # load model and training parameters
        trainopt = torch.load(param_filename)

        if opt.batchSize == 0:
            model_batchSize = trainopt.batchSize
        else:
            model_batchSize = opt.batchSize

        # get indices in targets and predictions corresponding to each output
        pred_dim = 0
        output_pred_ind = []
        for o in trainopt.outputs:
            if o == 'unoriented_normals' or o == 'oriented_normals':
                output_pred_ind.append(pred_dim)
                pred_dim += 3
            elif o == 'max_curvature' or o == 'min_curvature':
                output_pred_ind.append(pred_dim)
                pred_dim += 1
            else:
                raise ValueError('Unknown output: %s' % (o))
        #print(trainopt.patch_radius)
        dataset = PointcloudPatchDataset(
            root=opt.indir, shape_list_filename=opt.dataset,
            patch_radius=trainopt.patch_radius,
            points_per_patch=trainopt.points_per_patch,
            #patch_features=[],
            seed=opt.seed,
            #use_pca=trainopt.use_pca,
            center=trainopt.patch_center,
            #point_tuple=trainopt.point_tuple,
            #sparse_patches=opt.sparse_patches,
            cache_capacity=opt.cache_capacity)
        if opt.sampling == 'full':
            datasampler = SequentialPointcloudPatchSampler(dataset)
        elif opt.sampling == 'sequential_shapes_random_patches':
            datasampler = SequentialShapeRandomPointcloudPatchSampler(
                dataset,
                patches_per_shape=opt.patches_per_shape,
                seed=opt.seed,
                sequential_shapes=True,
                identical_epochs=False)
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        dataloader = torch.utils.data.DataLoader(
            dataset,
            sampler=datasampler,
            batch_size=model_batchSize,
            num_workers=int(opt.workers))

        regressor = DSAC(
            trainopt.hypotheses,
            trainopt.inlierthreshold,
            trainopt.inlierbeta,
            trainopt.inlieralpha,
            trainopt.normal_loss,
            trainopt.seed,device,
            use_point_stn=trainopt.use_point_stn,
            use_feat_stn=trainopt.use_feat_stn,
            use_mask=trainopt.use_mask
        )
        
        
        

        regressor.load_state_dict(torch.load(model_filename))
        regressor.to(device)
        regressor.eval()

        shape_ind = 0
        shape_patch_offset = 0
        if opt.sampling == 'full':
            shape_patch_count = dataset.shape_patch_count[shape_ind]
        elif opt.sampling == 'sequential_shapes_random_patches':
            shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        shape_properties = torch.zeros(shape_patch_count, pred_dim, dtype=torch.float, device=device)

        # append model name to output directory and create directory if necessary
        model_outdir = os.path.join(opt.outdir, model_name)
        if not os.path.exists(model_outdir):
            os.makedirs(model_outdir)

        num_batch = len(dataloader)
        batch_enum = enumerate(dataloader, 0)
        for batchind, data in batch_enum:

            # get batch and upload to GPU
            #index =[]
            points, data_trans,mask_t = data
            
            points = points.transpose(2, 1)
            points = points.to(device)

            data_trans = data_trans.to(device)
            mask_t = mask_t.to(device)

            with torch.no_grad():
                exp_loss, top_loss,normal,pts,mask = regressor(points,data_trans)
            viz = Visdom()
            assert viz.check_connection()
            Y=torch.zeros(512+32+1)
            Z=torch.zeros(512+1)
            Z[0:512]+=1
            Z[512]+=2
            Y[0:512]+=1
            
            Y[512:512+32]+=2
            Y[512+32]+=5
            
            print(top_loss.mean())
            for i in range(points.size(0)):
            #print("input",x[i].transpose(0,1)[0:100])
                # print("predict",i,normal[i])
                # print("ground truth",i,data_trans[i])
                # print("top_loss_loss",i,top_loss[i],"\n")
                # print(mask_t[i])
                # print(mask[i].view(-1))
                viz.scatter(
                    X=torch.cat((points[i].transpose(0,1),pts[i],torch.zeros(1,3).cuda()),0),
                    Y=Y,
                    opts=dict(
                        title = str(i),
                    #'legend': ['Men', 'Women'],
                        markersize= 2,
                    #markercolor=np.random.randint(0, 255, (3, 3,)),
                    )
                )
            # #     # viz.scatter(
            # #     #     X=torch.mul(points[i].transpose(0,1),mask[i]),
                    
            # #     #     opts=dict(
            # #     #         title = str(i),
            # #     #     #'legend': ['Men', 'Women'],
            # #     #         markersize= 2,
            # #     #     #markercolor=np.random.randint(0, 255, (3, 3,)),
            # #     #     )
            # #     # )
            #     viz.scatter(
            #         X=torch.cat((torch.mul(points[i].transpose(0,1),mask_t[i].view(-1,1)),torch.zeros(1,3).cuda(2)),0),
            #         Y=Z,
            #         opts=dict(
            #             title = str(i)+"true",
            #         #'legend': ['Men', 'Women'],
            #             markersize= 2,
            #         #markercolor=np.random.randint(0, 255, (3, 3,)),
            #         )
            #     )

            #print("pts",i,pts[i])
                
            # # post-processing of the prediction
            # for oi, o in enumerate(trainopt.outputs):
            #     if o == 'unoriented_normals' or o == 'oriented_normals':
            #         o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi]+3]

            #         if trainopt.use_point_stn:
            #             # transform predictions with inverse transform
            #             # since we know the transform to be a rotation (QSTN), the transpose is the inverse
            #             o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1), trans.transpose(2, 1)).squeeze(dim=1)

            #         if trainopt.use_pca:
            #             # transform predictions with inverse pca rotation (back to world space)
            #             o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1), data_trans.transpose(2, 1)).squeeze(dim=1)

            #         # normalize normals
            #         o_pred_len = torch.max(o_pred.new_tensor([sys.float_info.epsilon*100]), o_pred.norm(p=2, dim=1, keepdim=True))
            #         o_pred = o_pred / o_pred_len

            #     elif o == 'max_curvature' or o == 'min_curvature':
            #         o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi]+1]

            #         # undo patch size normalization:
            #         o_pred[:, :] = o_pred / dataset.patch_radius_absolute[shape_ind][0]

            #     else:
            #         raise ValueError('Unsupported output type: %s' % (o))

            print('[%s %d/%d] shape %s' % (model_name, batchind, num_batch-1, dataset.shape_names[shape_ind]))

            batch_offset = 0
            while batch_offset < normal.size(0):

                shape_patches_remaining = shape_patch_count-shape_patch_offset
                batch_patches_remaining = normal.size(0)-batch_offset

                # append estimated patch properties batch to properties for the current shape
                shape_properties[shape_patch_offset:shape_patch_offset+min(shape_patches_remaining, batch_patches_remaining), :] =  normal[
                    batch_offset:batch_offset+min(shape_patches_remaining, batch_patches_remaining), :]

                batch_offset = batch_offset + min(shape_patches_remaining, batch_patches_remaining)
                shape_patch_offset = shape_patch_offset + min(shape_patches_remaining, batch_patches_remaining)

                if shape_patches_remaining <= batch_patches_remaining:

                    # save shape properties to disk
                    # prop_saved = [False]*len(trainopt.outputs)

                    # # save normals
                    # oi = [i for i, o in enumerate(trainopt.outputs) if o in ['unoriented_normals', 'oriented_normals']]
                    # if len(oi) > 1:
                    #     raise ValueError('Duplicate normal output.')
                    # elif len(oi) == 1:
                    #     oi = oi[0]
                    #     normal_prop = shape_properties[:, output_pred_ind[oi]:output_pred_ind[oi]+3]
                    #     np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.normals'), normal_prop.cpu().numpy())
                    #     prop_saved[oi] = True

                    # # save curvatures
                    # oi1 = [i for i, o in enumerate(trainopt.outputs) if o == 'max_curvature']
                    # oi2 = [i for i, o in enumerate(trainopt.outputs) if o == 'min_curvature']
                    # if len(oi1) > 1 or len(oi2) > 1:
                    #     raise ValueError('Duplicate minimum or maximum curvature output.')
                    # elif len(oi1) == 1 or len(oi2) == 1:
                    #     curv_prop = shape_properties.new_zeros(shape_properties.size(0), 2)
                    #     if len(oi1) == 1:
                    #         oi1 = oi1[0]
                    #         curv_prop[:, 0] = shape_properties[:, output_pred_ind[oi1]]
                    #         prop_saved[oi1] = True
                    #     if len(oi2) == 1:
                    #         oi2 = oi2[0]
                    #         curv_prop[:, 1] = shape_properties[:, output_pred_ind[oi2]]
                    #         prop_saved[oi2] = True
                    #     np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.curv'), curv_prop.cpu().numpy())

                    # if not all(prop_saved):
                    #     raise ValueError('Not all shape properties were saved, some of them seem to be unsupported.')

                    # # save point indices
                    # if opt.sampling != 'full':
                    #     np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.idx'), datasampler.shape_patch_inds[shape_ind], fmt='%d')

                    # start new shape
                    if shape_ind + 1 < len(dataset.shape_names):
                        shape_patch_offset = 0
                        shape_ind = shape_ind + 1
                        if opt.sampling == 'full':
                            shape_patch_count = dataset.shape_patch_count[shape_ind]
                        elif opt.sampling == 'sequential_shapes_random_patches':
                            # shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
                            shape_patch_count = len(datasampler.shape_patch_inds[shape_ind])
                        else:
                            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
                        shape_properties = shape_properties.new_zeros(shape_patch_count, pred_dim)
示例#8
0
def eval_pcpnet(opt):

    opt.models = opt.models.split()

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" %
                          opt.gpu_idx)

    for model_name in opt.models:

        print("Random Seed: %d" % (opt.seed))
        random.seed(opt.seed)
        torch.manual_seed(opt.seed)

        model_filename = os.path.join(opt.modeldir,
                                      model_name + opt.modelpostfix)
        param_filename = os.path.join(opt.modeldir,
                                      model_name + opt.parmpostfix)

        # load model and training parameters
        trainopt = torch.load(param_filename)

        if opt.batchSize == 0:
            model_batchSize = trainopt.batchSize
        else:
            model_batchSize = opt.batchSize

        # get indices in targets and predictions corresponding to each output
        pred_dim = 0
        output_pred_ind = []
        for o in trainopt.outputs:
            if o == 'unoriented_normals' or o == 'oriented_normals':
                output_pred_ind.append(pred_dim)
                pred_dim += 3
            elif o == 'max_curvature' or o == 'min_curvature':
                output_pred_ind.append(pred_dim)
                pred_dim += 1
            else:
                raise ValueError('Unknown output: %s' % (o))
        #print(trainopt.patch_radius)

        dataset = PointcloudPatchDataset(
            root='/Users/jinwei/GItHub/DRNE/data/pclouds',
            root_in=trainopt.indir2,
            shape_list_filename=opt.dataset,
            patch_radius=trainopt.patch_radius,
            points_per_patch=trainopt.points_per_patch,
            dim_pts=trainopt.in_points_dim,
            knn=trainopt.knn,
            point_count_std=trainopt.patch_point_count_std,
            seed=trainopt.seed,
            identical_epochs=trainopt.identical_epochs,
            cache_capacity=trainopt.cache_capacity)

        if opt.sampling == 'full':
            datasampler = SequentialPointcloudPatchSampler(dataset)
        elif opt.sampling == 'sequential_shapes_random_patches':
            datasampler = SequentialShapeRandomPointcloudPatchSampler(
                dataset,
                patches_per_shape=opt.patches_per_shape,
                seed=opt.seed,
                sequential_shapes=True,
                identical_epochs=False)
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 sampler=datasampler,
                                                 batch_size=model_batchSize,
                                                 num_workers=int(opt.workers))

        regressor = DSAC(hyps=trainopt.hypotheses,
                         inlier_params=trainopt.inlier_params,
                         patch_radius=trainopt.patch_radius,
                         decoder=trainopt.decoder,
                         use_mask=trainopt.use_mask,
                         dim_pts=trainopt.in_points_dim,
                         num_gpts=trainopt.generate_points_num,
                         dim_gpts=trainopt.generate_points_dim,
                         points_per_patch=trainopt.points_per_patch[0],
                         sym_op=trainopt.sym_op,
                         normal_loss=trainopt.normal_loss,
                         seed=trainopt.seed,
                         device=device,
                         use_point_stn=trainopt.use_point_stn,
                         use_feat_stn=trainopt.use_feat_stn)

        regressor.load_state_dict(
            torch.load(model_filename, map_location=torch.device('cpu')))
        regressor.to(device)
        regressor.eval()

        shape_ind = 0
        shape_patch_offset = 0
        if opt.sampling == 'full':
            shape_patch_count = dataset.shape_patch_count[shape_ind]
        elif opt.sampling == 'sequential_shapes_random_patches':
            shape_patch_count = min(opt.patches_per_shape,
                                    dataset.shape_patch_count[shape_ind])
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        shape_properties = torch.zeros(shape_patch_count,
                                       pred_dim,
                                       dtype=torch.float,
                                       device=device)

        # append model name to output directory and create directory if necessary
        model_outdir = os.path.join(opt.outdir, model_name)
        if not os.path.exists(model_outdir):
            os.makedirs(model_outdir)

        num_batch = len(dataloader)
        batch_enum = enumerate(dataloader, 0)
        for batchind, data in batch_enum:

            # get batch and upload to GPU
            points = data[0]  #这时的point是64*512*3的类型
            target = data[1]
            mask = data[2]
            dist = data[3]

            points = points.transpose(2, 1)
            points = points.to(device)
            target = target.to(device)
            mask = mask.to(device)
            dist = dist.to(device)

            with torch.no_grad():
                exp_loss, top_loss, pred, pts, mask_p, patch_rot, _ = regressor(
                    points, target, dist)

            print(top_loss.mean())
            pred_len = torch.max(
                torch.FloatTensor([sys.float_info.epsilon * 100]),
                pred.norm(p=2, dim=1, keepdim=True))
            pred = pred / pred_len
            target_len = torch.max(
                torch.FloatTensor([sys.float_info.epsilon * 100]),
                target.norm(p=2, dim=2, keepdim=True))
            target = target / target_len

            # plot a patch
            x, y = torch.meshgrid(torch.tensor([-10.0, 10.0]),
                                  torch.tensor([-10.0, 10.0]))
            for i in range(points.size(0)):
                pred_xy = pred[i, 0:2] / (pred[i, 2] + 1e-10)
                pred_xy = pred_xy.to(device)
                z = -(pred_xy[0] * x + pred_xy[1] * y)
                mlab.figure('patch_with_gpts',
                            fgcolor=(0, 0, 0),
                            bgcolor=(1, 1, 1))
                mlab.points3d(10 * points[i, 0, :],
                              10 * points[i, 1, :],
                              10 * points[i, 2, :],
                              color=(0.7, 0.7, 0.7),
                              scale_factor=0.3,
                              scale_mode='vector')
                mlab.points3d(10 * pts[i, :, 0],
                              10 * pts[i, :, 1],
                              10 * pts[i, :, 2],
                              color=(0.2, 0.2, 0.2),
                              scale_factor=0.7,
                              scale_mode='vector')
                mlab.quiver3d(0.0,
                              0.0,
                              0.0,
                              pred[i, 0],
                              pred[i, 1],
                              pred[i, 2],
                              line_width=3,
                              scale_factor=10,
                              color=(0, 1, 0))
                if (target[i, 0, :] - pred[i, :]).pow(2).sum() > (
                        target[i, 0, :] + pred[i, :]).pow(2).sum():
                    mlab.quiver3d(0.0,
                                  0.0,
                                  0.0,
                                  -target[i, 0, 0],
                                  -target[i, 0, 1],
                                  -target[i, 0, 2],
                                  line_width=3,
                                  scale_factor=10,
                                  color=(1, 0.0, 0.0))
                else:
                    mlab.quiver3d(0.0,
                                  0.0,
                                  0.0,
                                  target[i, 0, 0],
                                  target[i, 0, 1],
                                  target[i, 0, 2],
                                  line_width=3,
                                  scale_factor=10,
                                  color=(1, 0.0, 0.0))
                mlab.surf(x, y, z, opacity=0.3)
                mlab.show()
示例#9
0
def eval_pcpnet(opt):
    # get a list of model names
    model_name = opt.model
    print("Random Seed: %d" % (opt.seed))
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    model_filename = os.path.join(opt.modeldir, opt.model + "_model.pth")
    param_filename = os.path.join(opt.modeldir, opt.model + opt.parmpostfix)

    # load model and training parameters
    trainopt = torch.load(param_filename)
    trainopt.outputs = ['clean_points']
    if opt.batchSize == 0:
        model_batchSize = trainopt.batchSize
    else:
        model_batchSize = opt.batchSize
    # get indices in targets and predictions corresponding to each output
    pred_dim = 0
    output_pred_ind = []
    for o in trainopt.outputs:
        if o in ['clean_points']:
            output_pred_ind.append(pred_dim)
            pred_dim += 3
        else:
            raise ValueError('Unknown output: %s' % (o))
    dataset = PointcloudPatchDataset(
        root=opt.outdir,
        shapes_list_file=opt.dataset,
        patch_radius=trainopt.patch_radius,
        points_per_patch=trainopt.points_per_patch,
        patch_features=['original'],
        seed=opt.seed,
        use_pca=trainopt.use_pca,
        center=trainopt.patch_center,
        point_tuple=trainopt.point_tuple,
        sparse_patches=opt.sparse_patches,
        cache_capacity=opt.cache_capacity,
        shape_names=[opt.shapename.format(i=opt.nrun - 1)])
    if opt.sampling == 'full':
        datasampler = SequentialPointcloudPatchSampler(dataset)
    elif opt.sampling == 'sequential_shapes_random_patches':
        datasampler = SequentialShapeRandomPointcloudPatchSampler(
            dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            sequential_shapes=True,
            identical_epochs=False)
    else:
        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             sampler=datasampler,
                                             batch_size=model_batchSize,
                                             num_workers=int(opt.workers))

    regressor = ResPCPNet(num_points=trainopt.points_per_patch,
                          output_dim=pred_dim,
                          use_point_stn=trainopt.use_point_stn,
                          use_feat_stn=trainopt.use_feat_stn,
                          sym_op=trainopt.sym_op,
                          point_tuple=trainopt.point_tuple)
    regressor.load_state_dict(torch.load(model_filename))
    regressor.cuda()

    shape_ind = 0
    shape_patch_offset = 0
    if opt.sampling == 'full':
        shape_patch_count = dataset.shape_patch_count[shape_ind]
    elif opt.sampling == 'sequential_shapes_random_patches':
        shape_patch_count = min(opt.patches_per_shape,
                                dataset.shape_patch_count[shape_ind])
    else:
        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
    shape_properties = torch.FloatTensor(shape_patch_count, pred_dim).zero_()

    # append model name to output directory and create directory if necessary
    model_outdir = os.path.join(opt.outdir, model_name)
    if not os.path.exists(model_outdir):
        os.makedirs(model_outdir)

    num_batch = len(dataloader)
    batch_enum = enumerate(dataloader, 0)

    regressor.eval()
    for batchind, data in batch_enum:

        # get batch, convert to variables and upload to GPU
        points, originals, patch_radiuses, data_trans = data
        points = Variable(points, volatile=True)
        points = points.transpose(2, 1)
        points = points.cuda()

        data_trans = data_trans.cuda()
        pred, trans, _, _ = regressor(points)
        pred = pred.data
        if trans is not None:
            trans = trans.data

        # post-processing of the prediction
        for oi, o in enumerate(trainopt.outputs):
            o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 3]
            if trainopt.use_point_stn:
                # transform predictions with inverse transform
                # since we know the transform to be a rotation (QSTN), the transpose is the inverse
                o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                         trans.transpose(2, 1)).squeeze(1)
            if trainopt.use_pca:
                # transform predictions with inverse pca rotation (back to world space)
                o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1),
                                         data_trans.transpose(2, 1)).squeeze(1)
            n_points = patch_radiuses.shape[0]
            # new coordinates are : old coordiantes + displacement vector
            o_pred = torch.mul(
                o_pred,
                torch.t(patch_radiuses.expand(
                    3, n_points)).float().cuda()) + originals.cuda()
            pred[:, output_pred_ind[oi]:output_pred_ind[oi] + 3] = o_pred

        print('[%s %d/%d] shape %s' % (model_name, batchind, num_batch - 1,
                                       dataset.shape_names[shape_ind]))

        batch_offset = 0
        while batch_offset < pred.size(0):

            shape_patches_remaining = shape_patch_count - shape_patch_offset
            batch_patches_remaining = pred.size(0) - batch_offset
            # append estimated patch properties batch to properties for the current shape on the CPU
            shape_properties[shape_patch_offset:shape_patch_offset + min(
                shape_patches_remaining, batch_patches_remaining), :] = pred[
                    batch_offset:batch_offset +
                    min(shape_patches_remaining, batch_patches_remaining), :]

            batch_offset = batch_offset + min(shape_patches_remaining,
                                              batch_patches_remaining)
            shape_patch_offset = shape_patch_offset + min(
                shape_patches_remaining, batch_patches_remaining)
            if shape_patches_remaining <= batch_patches_remaining:

                # save shape properties to disk
                prop_saved = [False] * len(trainopt.outputs)

                # save clean points
                oi = [
                    k for k, o in enumerate(trainopt.outputs)
                    if o in ['clean_points']
                ]
                if len(oi) > 1:
                    raise ValueError('Duplicate point output.')
                elif len(oi) == 1:
                    oi = oi[0]
                    normal_prop = shape_properties[:, output_pred_ind[oi]:
                                                   output_pred_ind[oi] + 3]
                    # Compute mean displacements, inspired from Taubin smoothing
                    normal_prop = get_meaned_displacements(
                        dataset, normal_prop, opt.n_neighbours)
                    np.savetxt(
                        os.path.join(opt.outdir,
                                     opt.shapename.format(i=opt.nrun) +
                                     '.xyz'), normal_prop.numpy())
                    prop_saved[oi] = True

                if not all(prop_saved):
                    raise ValueError(
                        'Not all shape properties were saved, some of them seem to be unsupported.'
                    )
                # start new shape
                if shape_ind + 1 < len(dataset.shape_names):
                    shape_patch_offset = 0
                    shape_ind = shape_ind + 1
                    if opt.sampling == 'full':
                        shape_patch_count = dataset.shape_patch_count[
                            shape_ind]
                    elif opt.sampling == 'sequential_shapes_random_patches':
                        # shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
                        shape_patch_count = len(
                            datasampler.shape_patch_inds[shape_ind])
                    else:
                        raise ValueError('Unknown sampling strategy: %s' %
                                         opt.sampling)
                    shape_properties = torch.FloatTensor(
                        shape_patch_count, pred_dim).zero_()
示例#10
0
def eval_Net(opt):
    logfile = open('ParallelTest_log.txt', mode='at')
    now = time.localtime()
    logDate = "%04d-%02d-%02d %02d:%02d:%02d" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
    logfile.write("\n%s\n" % logDate)

    startTime = time.time()

    torch.cuda.set_device(opt.GPU_ID)

    # get a list of model names
    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    print("Random Seed: %d" % (opt.seed))
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    #model_filename = os.path.join(opt.modeldir, opt.model + opt.modelpostfix)
    #param_filename = os.path.join(opt.modeldir, opt.model + opt.parmpostfix)
    model_filename = glob(os.path.join(opt.modeldir, opt.model)+ "*" + opt.modelpostfix)[0]
    param_filename = glob(os.path.join(opt.modeldir, opt.model)+ "*" + opt.parmpostfix)[0]
    
    # load model and training parameters
    trainopt = torch.load(param_filename)
    trainopt.outputs = ['outliers']
    if opt.batchSize == 0:
        model_batchSize = trainopt.batchSize
    else:
        model_batchSize = opt.batchSize
    print("batchSize: ", model_batchSize)
    
    # get indices in targets and predictions corresponding to each output
    pred_dim = 0
    output_pred_ind = []
    for o in trainopt.outputs:
        if o in ['outliers']:
            output_pred_ind.append(pred_dim)
            pred_dim += 1
        else:
            raise ValueError('Unknown output: %s' % (o))

    patch_features = ['original']
    if OUTLIERS:
        patch_features.append('outliers')
    dataset = PointcloudPatchDataset(
        root=opt.indir, shapes_list_file=opt.dataset,
        patch_radius=trainopt.patch_radius,
        points_per_patch=trainopt.points_per_patch,
        patch_features=patch_features,
        seed=opt.seed,
        use_pca=trainopt.use_pca,
        center=trainopt.patch_center,
        point_tuple=trainopt.point_tuple,
        sparse_patches=opt.sparse_patches,
        cache_capacity=opt.cache_capacity,
        logfile=logfile)
    if opt.sampling == 'full':
        datasampler = SequentialPointcloudPatchSampler(dataset)
    elif opt.sampling == 'sequential_shapes_random_patches':
        datasampler = SequentialShapeRandomPointcloudPatchSampler(
            dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            sequential_shapes=True,
            identical_epochs=False)
    else:
        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)

    print("datasampler: ", len(datasampler))
    print("GPU count: %d" % torch.cuda.device_count())
    dataloader = torch.utils.data.DataLoader(
        dataset,
        sampler=datasampler,
        batch_size=model_batchSize,
        num_workers=int(opt.workers))

    regressor = ResNet(
        num_points=trainopt.points_per_patch,
        output_dim=pred_dim,
        use_point_stn=trainopt.use_point_stn,
        use_feat_stn=trainopt.use_feat_stn,
        sym_op=trainopt.sym_op,
        point_tuple=trainopt.point_tuple)
    regressor.load_state_dict(torch.load(model_filename))

    regressor.cuda()

    shape_ind = 0
    shape_patch_offset = 0
    if opt.sampling == 'full':
        print("shape_ind: ", end="")
        print(shape_ind)
        shape_patch_count = dataset.shape_patch_count[shape_ind]
    elif opt.sampling == 'sequential_shapes_random_patches':
        shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
    else:
        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)

    # modify to add pointID info
    shape_properties = torch.FloatTensor(shape_patch_count, 4).zero_()

    # append model name to output directory and create directory if necessary
    model_outdir = os.path.join(opt.outdir, opt.model)
    if not os.path.exists(model_outdir):
        os.makedirs(model_outdir)


    dataInputTime = time.time() - startTime
    inputTimeSplit = divmod(dataInputTime, 60)
    print("##################################################")
    print("Data Input WorkingTime: %d min %d sec" % (inputTimeSplit[0], inputTimeSplit[1]))
    print("##################################################")


    num_batch = len(dataloader)
    batch_enum = enumerate(dataloader, 0)
    inferenceStart = time.time()
    for batchind, data in batch_enum:

        regressor.eval()
        # get batch, convert to variables and upload to GPU
        if OUTLIERS:
            points,originals,patch_radiuses, outliers,data_trans = data
        else:
            points,originals,patch_radiuses, data_trans = data
        points = Variable(points, volatile=True)
        points = points.transpose(2, 1)

        points = points.cuda()
        data_trans = data_trans.cuda()
        pred, trans, _, _ = regressor(points)
        # don't need to work with autograd variables anymore
        pred = pred.data

        if trans is not None:
            trans = trans.data
        # post-processing of the prediction
        for oi, o in enumerate(trainopt.outputs):
            if o == 'unoriented_normals' or o == 'oriented_normals':
                o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi]+3]

                if trainopt.use_point_stn:
                    # transform predictions with inverse transform
                    # since we know the transform to be a rotation (QSTN), the transpose is the inverse
                    o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1), trans.transpose(2, 1)).squeeze(1)

                if trainopt.use_pca:
                    # transform predictions with inverse pca rotation (back to world space)
                    o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1), data_trans.transpose(2, 1)).squeeze(1)

                # normalize normals
                o_pred_len = torch.max(torch.cuda.FloatTensor([sys.float_info.epsilon*100]).cuda(), o_pred.norm(p=2, dim=1, keepdim=True))#device), o_pred.norm(p=2, dim=1, keepdim=True))
                o_pred = o_pred / o_pred_len
                pred[:, output_pred_ind[oi]:output_pred_ind[oi]+3] = o_pred
            elif o in ['clean_points']:


                o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi]+3]
                if trainopt.use_point_stn:
                    # transform predictions with inverse transform
                    # since we know the transform to be a rotation (QSTN), the transpose is the inverse
                    o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1), trans.transpose(2, 1)).squeeze(1)

                if trainopt.use_pca:
                    # transform predictions with inverse pca rotation (back to world space)
                    o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1), data_trans.transpose(2, 1)).squeeze(1)
                n_points = patch_radiuses.shape[0]
                o_pred = torch.mul(o_pred,torch.t(patch_radiuses.expand(3, n_points)).float().cuda()) + originals.cuda()
                pred[:, output_pred_ind[oi]:output_pred_ind[oi]+3] = o_pred
            elif o in ['outliers']:
                # TODO check dimensions here

                o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi]+1]
                outliers_value = o_pred.cpu()
                is_outlier = torch.ByteTensor([1 if x > 0.5 else 0 for x in o_pred])
                o_pred = originals.cuda()

                o_pred = torch.cat((o_pred, outliers_value.cuda()), 1)
                pred = o_pred

            elif o == 'max_curvature' or o == 'min_curvature':
                o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi]+1]

                # undo patch size normalization:
                o_pred[:, :] = o_pred / dataset.patch_radius_absolute[shape_ind][0]

            else:
                raise ValueError('Unsupported output type: %s' % (o))
        print('[%s %d/%d] shape %s' % (opt.model, batchind, num_batch-1, dataset.shape_names[shape_ind]))

        batch_offset = 0
        while batch_offset < pred.size(0):

            shape_patches_remaining = shape_patch_count-shape_patch_offset
            batch_patches_remaining = pred.size(0)-batch_offset
            # append estimated patch properties batch to properties for the current shape on the CPU
            shape_properties[shape_patch_offset:shape_patch_offset+min(shape_patches_remaining, batch_patches_remaining), :] = pred[
                batch_offset:batch_offset+min(shape_patches_remaining, batch_patches_remaining), :]

            batch_offset = batch_offset + min(shape_patches_remaining, batch_patches_remaining)
            shape_patch_offset = shape_patch_offset + min(shape_patches_remaining, batch_patches_remaining)
            if shape_patches_remaining <= batch_patches_remaining:

                # save shape properties to disk
                prop_saved = [False]*len(trainopt.outputs)

                # save normals
                oi = [i for i, o in enumerate(trainopt.outputs) if o in ['unoriented_normals', 'oriented_normals']]
                if len(oi) > 1:
                    raise ValueError('Duplicate normal output.')
                elif len(oi) == 1:
                    oi = oi[0]
                    normal_prop = shape_properties[:, output_pred_ind[oi]:output_pred_ind[oi]+3]
                    np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.normals'), normal_prop.numpy())
                    prop_saved[oi] = True

                # save clean points
                oi = [i for i, o in enumerate(trainopt.outputs) if o in ['clean_points']]
                if len(oi) > 1:
                    raise ValueError('Duplicate point output.')
                elif len(oi) == 1:
                    oi = oi[0]
                    normal_prop = shape_properties[:, output_pred_ind[oi]:output_pred_ind[oi]+3]
                    np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.xyz'), normal_prop.numpy())
                    prop_saved[oi] = True

                # save outliers
                oi = [i for i, o in enumerate(trainopt.outputs) if o in ["outliers"]]
                if len(oi) > 1:
                    raise ValueError('Duplicate point output.')
                elif len(oi) == 1:
                    oi = oi[0]
                    outliers_prop = shape_properties[:, output_pred_ind[oi]:output_pred_ind[oi]+N_OUTPUT]

                    # modify to add pointID info
                    pointID_filename = os.path.join(opt.indir, dataset.shape_names[shape_ind] + '.xyz_pointID' + '.npy')
                    pointIDs = torch.tensor(np.load(pointID_filename).astype('float64'))

                    pts_ID_XYZ = torch.cat([pointIDs, outliers_prop.type(torch.DoubleTensor)], dim=1)
                    np.savetxt(os.path.join(model_outdir,'outliers_value' +"_" + dataset.shape_names[shape_ind] + '.info'), pts_ID_XYZ.numpy())
                    prop_saved[oi] = True

                # save curvatures
                oi1 = [i for i, o in enumerate(trainopt.outputs) if o == 'max_curvature']
                oi2 = [i for i, o in enumerate(trainopt.outputs) if o == 'min_curvature']
                if len(oi1) > 1 or len(oi2) > 1:
                    raise ValueError('Duplicate minimum or maximum curvature output.')
                elif len(oi1) == 1 or len(oi2) == 1:
                    curv_prop = torch.FloatTensor(shape_properties.size(0), 2).zero_()
                    if len(oi1) == 1:
                        oi1 = oi1[0]
                        curv_prop[:, 0] = shape_properties[:, output_pred_ind[oi1]:output_pred_ind[oi1]+1]
                        prop_saved[oi1] = True
                    if len(oi2) == 1:
                        oi2 = oi2[0]
                        curv_prop[:, 1] = shape_properties[:, output_pred_ind[oi2]:output_pred_ind[oi2]+1]
                        prop_saved[oi2] = True
                    np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.curv'), curv_prop.numpy())

                if not all(prop_saved):
                    raise ValueError('Not all shape properties were saved, some of them seem to be unsupported.')

                # save point indices
                if opt.sampling != 'full':
                    np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.idx'), datasampler.shape_patch_inds[shape_ind], fmt='%d')

                # start new shape
                if shape_ind + 1 < len(dataset.shape_names):
                    #log update
                    outlierInferenceSplit = divmod(time.time() - inferenceStart, 60)
                    logfile.write("Outlier Inference(shape: %s) duration: %d min %d sec\n" % (dataset.shape_names[shape_ind], outlierInferenceSplit[0], outlierInferenceSplit[1]))
                    inferenceStart = time.time()

                    shape_patch_offset = 0
                    shape_ind = shape_ind + 1
                    if opt.sampling == 'full':
                        shape_patch_count = dataset.shape_patch_count[shape_ind]
                    elif opt.sampling == 'sequential_shapes_random_patches':
                        shape_patch_count = len(datasampler.shape_patch_inds[shape_ind])
                    else:
                        raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
                    shape_properties = torch.FloatTensor(shape_patch_count, N_OUTPUT).zero_()

        if batchind == num_batch-1:
            outlierInferenceSplit = divmod(time.time() - inferenceStart, 60)
            logfile.write("Outlier Inference(shape: %s) duration: %d min %d sec\n" % (dataset.shape_names[shape_ind], outlierInferenceSplit[0], outlierInferenceSplit[1]))

    totalTime = time.time() - startTime
    gpuInferencingTime = totalTime - dataInputTime
    totalTimeSplit = divmod(totalTime, 60)
    inferenceTimeSplit = divmod(gpuInferencingTime, 60)
    print("##################################################")
    print("Data Input WorkingTime: %d min %d sec" % (inputTimeSplit[0], inputTimeSplit[1]))
    print("GPU Inferencing WorkingTime: %d min %d sec" % (inferenceTimeSplit[0], inferenceTimeSplit[1]))
    print("Total Processing Time : %d min %d sec" % (totalTimeSplit[0], totalTimeSplit[1]))
    print("##################################################")

    logfile.write("\nTotal Processing Time : %d min %d sec\n" % (totalTimeSplit[0], totalTimeSplit[1]))
    logfile.write("\nProcess End\n")
    logfile.close()
示例#11
0
def train_pcpnet(opt):

    # colored console output
    green = lambda x: '\033[92m' + x + '\033[0m'
    blue = lambda x: '\033[94m' + x + '\033[0m'

    log_dirname = os.path.join(opt.logdir, opt.name)
    params_filename = os.path.join(opt.outdir, '%s_params.pth' % (opt.name))
    model_filename = os.path.join(opt.outdir, '%s_model.pth' % (opt.name))
    desc_filename = os.path.join(opt.outdir, '%s_description.txt' % (opt.name))

    if os.path.exists(log_dirname) or os.path.exists(model_filename):
        response = input(
            'A training run named "%s" already exists, overwrite? (y/n) ' %
            (opt.name))
        if response == 'y':
            if os.path.exists(log_dirname):
                shutil.rmtree(os.path.join(opt.logdir, opt.name))
        else:
            sys.exit()

    # get indices in targets and predictions corresponding to each output
    target_features = []
    output_target_ind = []
    output_pred_ind = []
    output_loss_weight = []
    pred_dim = 0
    for o in opt.outputs:
        if o == 'unoriented_normals' or o == 'oriented_normals':
            if 'normal' not in target_features:
                target_features.append('normal')

            output_target_ind.append(target_features.index('normal'))
            output_pred_ind.append(pred_dim)
            output_loss_weight.append(1.0)
            pred_dim += 3
        elif o == 'max_curvature' or o == 'min_curvature':
            if o not in target_features:
                target_features.append(o)

            output_target_ind.append(target_features.index(o))
            output_pred_ind.append(pred_dim)
            if o == 'max_curvature':
                output_loss_weight.append(0.7)
            else:
                output_loss_weight.append(0.3)
            pred_dim += 1
        else:
            raise ValueError('Unknown output: %s' % (o))

    if pred_dim <= 0:
        raise ValueError('Prediction is empty for the given outputs.')

    # create model
    if len(opt.patch_radius) == 1:
        pcpnet = PCPNet(num_points=opt.points_per_patch,
                        output_dim=pred_dim,
                        use_point_stn=opt.use_point_stn,
                        use_feat_stn=opt.use_feat_stn,
                        sym_op=opt.sym_op,
                        point_tuple=opt.point_tuple)
    else:
        pcpnet = MSPCPNet(num_scales=len(opt.patch_radius),
                          num_points=opt.points_per_patch,
                          output_dim=pred_dim,
                          use_point_stn=opt.use_point_stn,
                          use_feat_stn=opt.use_feat_stn,
                          sym_op=opt.sym_op,
                          point_tuple=opt.point_tuple)

    if opt.refine != '':
        pcpnet.load_state_dict(torch.load(opt.refine))

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    print("Random Seed: %d" % (opt.seed))
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    # create train and test dataset loaders
    train_dataset = PointcloudPatchDataset(
        root=opt.indir,
        shape_list_filename=opt.trainset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        patch_features=target_features,
        point_count_std=opt.patch_point_count_std,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        use_pca=opt.use_pca,
        center=opt.patch_center,
        point_tuple=opt.point_tuple,
        cache_capacity=opt.cache_capacity)
    if opt.training_order == 'random':
        train_datasampler = RandomPointcloudPatchSampler(
            train_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    elif opt.training_order == 'random_shape_consecutive':
        train_datasampler = SequentialShapeRandomPointcloudPatchSampler(
            train_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    else:
        raise ValueError('Unknown training order: %s' % (opt.training_order))

    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   sampler=train_datasampler,
                                                   batch_size=opt.batchSize,
                                                   num_workers=int(
                                                       opt.workers))

    test_dataset = PointcloudPatchDataset(
        root=opt.indir,
        shape_list_filename=opt.testset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        patch_features=target_features,
        point_count_std=opt.patch_point_count_std,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        use_pca=opt.use_pca,
        center=opt.patch_center,
        point_tuple=opt.point_tuple,
        cache_capacity=opt.cache_capacity)
    if opt.training_order == 'random':
        test_datasampler = RandomPointcloudPatchSampler(
            test_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    elif opt.training_order == 'random_shape_consecutive':
        test_datasampler = SequentialShapeRandomPointcloudPatchSampler(
            test_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    else:
        raise ValueError('Unknown training order: %s' % (opt.training_order))

    test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                                  sampler=test_datasampler,
                                                  batch_size=opt.batchSize,
                                                  num_workers=int(opt.workers))

    # keep the exact training shape names for later reference
    opt.train_shapes = train_dataset.shape_names
    opt.test_shapes = test_dataset.shape_names

    print(
        'training set: %d patches (in %d batches) - test set: %d patches (in %d batches)'
        % (len(train_datasampler), len(train_dataloader),
           len(test_datasampler), len(test_dataloader)))

    try:
        os.makedirs(opt.outdir)
    except OSError:
        pass

    train_writer = SummaryWriter(os.path.join(log_dirname, 'train'))
    test_writer = SummaryWriter(os.path.join(log_dirname, 'test'))

    optimizer = optim.SGD(pcpnet.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum)
    scheduler = lr_scheduler.MultiStepLR(
        optimizer, milestones=[],
        gamma=0.1)  # milestones in number of optimizer iterations
    pcpnet.cuda()

    train_num_batch = len(train_dataloader)
    test_num_batch = len(test_dataloader)

    # save parameters
    torch.save(opt, params_filename)

    # save description
    with open(desc_filename, 'w+') as text_file:
        print(opt.desc, file=text_file)

    for epoch in range(opt.nepoch):

        train_batchind = -1
        train_fraction_done = 0.0
        train_enum = enumerate(train_dataloader, 0)

        test_batchind = -1
        test_fraction_done = 0.0
        test_enum = enumerate(test_dataloader, 0)

        for train_batchind, data in train_enum:

            # update learning rate
            scheduler.step(epoch * train_num_batch + train_batchind)

            # set to training mode
            pcpnet.train()

            # get trainingset batch, convert to variables and upload to GPU
            points = data[0]
            target = data[1:-1]

            points = Variable(points)
            points = points.transpose(2, 1)
            points = points.cuda()

            target = tuple(Variable(t) for t in target)
            target = tuple(t.cuda() for t in target)

            # zero gradients
            optimizer.zero_grad()

            # forward pass
            pred, trans, _, _ = pcpnet(points)

            loss = compute_loss(pred=pred,
                                target=target,
                                outputs=opt.outputs,
                                output_pred_ind=output_pred_ind,
                                output_target_ind=output_target_ind,
                                output_loss_weight=output_loss_weight,
                                patch_rot=trans if opt.use_point_stn else None,
                                normal_loss=opt.normal_loss)

            # backpropagate through entire network to compute gradients of loss w.r.t. parameters
            loss.backward()

            # parameter optimization step
            optimizer.step()

            train_fraction_done = (train_batchind + 1) / train_num_batch

            # print info and update log file
            print('[%s %d: %d/%d] %s loss: %f' %
                  (opt.name, epoch, train_batchind, train_num_batch - 1,
                   green('train'), loss.data[0]))
            # print('min normal len: %f' % (pred.data.norm(2,1).min()))
            train_writer.add_scalar('loss', loss.data[0],
                                    (epoch + train_fraction_done) *
                                    train_num_batch * opt.batchSize)

            while test_fraction_done <= train_fraction_done and test_batchind + 1 < test_num_batch:

                # set to evaluation mode
                pcpnet.eval()

                test_batchind, data = next(test_enum)

                # get testset batch, convert to variables and upload to GPU
                # volatile means that autograd is turned off for everything that depends on the volatile variable
                # since we dont need autograd for inference (only for training)
                points = data[0]
                target = data[1:-1]

                points = Variable(points, volatile=True)
                points = points.transpose(2, 1)
                points = points.cuda()

                target = tuple(Variable(t, volatile=True) for t in target)
                target = tuple(t.cuda() for t in target)

                # forward pass
                pred, trans, _, _ = pcpnet(points)

                loss = compute_loss(
                    pred=pred,
                    target=target,
                    outputs=opt.outputs,
                    output_pred_ind=output_pred_ind,
                    output_target_ind=output_target_ind,
                    output_loss_weight=output_loss_weight,
                    patch_rot=trans if opt.use_point_stn else None,
                    normal_loss=opt.normal_loss)

                test_fraction_done = (test_batchind + 1) / test_num_batch

                # print info and update log file
                print('[%s %d: %d/%d] %s loss: %f' %
                      (opt.name, epoch, train_batchind, train_num_batch - 1,
                       blue('test'), loss.data[0]))
                # print('min normal len: %f' % (pred.data.norm(2,1).min()))
                test_writer.add_scalar('loss', loss.data[0],
                                       (epoch + test_fraction_done) *
                                       train_num_batch * opt.batchSize)

        # save model, overwriting the old model
        if epoch % opt.saveinterval == 0 or epoch == opt.nepoch - 1:
            torch.save(pcpnet.state_dict(), model_filename)

        # save model in a separate file in epochs 0,5,10,50,100,500,1000, ...
        if epoch % (5 * 10**math.floor(math.log10(max(2, epoch - 1)))
                    ) == 0 or epoch % 100 == 0 or epoch == opt.nepoch - 1:
            torch.save(
                pcpnet.state_dict(),
                os.path.join(opt.outdir,
                             '%s_model_%d.pth' % (opt.name, epoch)))
示例#12
0
def eval_pcpnet(opt):

    opt.models = opt.models.split()

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" % opt.gpu_idx)

    for model_name in opt.models:

        print("Random Seed: %d" % (opt.seed))
        random.seed(opt.seed)
        torch.manual_seed(opt.seed)

        model_filename = os.path.join(opt.modeldir, model_name+opt.modelpostfix)
        param_filename = os.path.join(opt.modeldir, model_name+opt.parmpostfix)

        # load model and training parameters
        trainopt = torch.load(param_filename)

        if opt.batchSize == 0:
            model_batchSize = trainopt.batchSize
        else:
            model_batchSize = opt.batchSize

        # get indices in targets and predictions corresponding to each output
        pred_dim = 0
        output_pred_ind = []
        for o in trainopt.outputs:
            if o == 'unoriented_normals' or o == 'oriented_normals':
                output_pred_ind.append(pred_dim)
                pred_dim += 3
            
            else:
                raise ValueError('Unknown output: %s' % (o))

            dataset = PointcloudPatchDataset(
                root=opt.indir, shape_list_filename=opt.dataset,
                patch_radius=trainopt.patch_radius,
                points_per_patch=trainopt.points_per_patch,
                #patch_features=[],
                seed=opt.seed,
            #use_pca=trainopt.use_pca,
                center=trainopt.patch_center,
            #point_tuple=trainopt.point_tuple,
            #sparse_patches=opt.sparse_patches,
                cache_capacity=opt.cache_capacity
                )
        if opt.sampling == 'full':
            datasampler = SequentialPointcloudPatchSampler(dataset)
        elif opt.sampling == 'sequential_shapes_random_patches':
            datasampler = SequentialShapeRandomPointcloudPatchSampler(
                dataset,
                patches_per_shape=opt.patches_per_shape,
                seed=opt.seed,
                sequential_shapes=True,
                identical_epochs=False)
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        dataloader = torch.utils.data.DataLoader(
            dataset,
            sampler=datasampler,
            batch_size=model_batchSize,
            num_workers=int(opt.workers))

        regressor = DSAC(
            trainopt.hypotheses,
            trainopt.inlierthreshold,
            trainopt.inlierbeta,
            trainopt.inlieralpha,
            trainopt.normal_loss,
            trainopt.seed,device,
            use_point_stn=trainopt.use_point_stn,
            use_feat_stn=trainopt.use_feat_stn,
            use_mask=trainopt.use_mask
        )

        regressor.load_state_dict(torch.load(model_filename))
        regressor.to(device)
        regressor.eval()

        shape_ind = 0
        shape_patch_offset = 0
        if opt.sampling == 'full':
            shape_patch_count = dataset.shape_patch_count[shape_ind]
        elif opt.sampling == 'sequential_shapes_random_patches':
            shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        shape_properties = torch.zeros(shape_patch_count, 3, dtype=torch.float, device=device)

        # append model name to output directory and create directory if necessary
        model_outdir = os.path.join(opt.outdir, model_name)
        if not os.path.exists(model_outdir):
            os.makedirs(model_outdir)

        num_batch = len(dataloader)
        batch_enum = enumerate(dataloader, 0)
        for batchind, data in batch_enum:

            # get batch and upload to GPU
            points, data_trans,_ = data
            points = points.transpose(2, 1)
            points = points.to(device)

            data_trans = data_trans.to(device)

            with torch.no_grad():
                exp_loss, top_loss,pred,pts,_ = regressor(points,data_trans)

            # post-processing of the prediction
            # for oi, o in enumerate(trainopt.outputs):
            #     if o == 'unoriented_normals' or o == 'oriented_normals':
            #         o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi]+3]

                    

                    

            #         # normalize normals
            #         #o_pred_len = torch.max(o_pred.new_tensor([sys.float_info.epsilon*100]), o_pred.norm(p=2, dim=1, keepdim=True))
            #         #o_pred = o_pred / o_pred_len

                

            #     else:
            #         raise ValueError('Unsupported output type: %s' % (o))

            print('[%s %d/%d] shape %s' % (model_name, batchind, num_batch-1, dataset.shape_names[shape_ind]))
            

            batch_offset = 0
            while batch_offset < pred.size(0):

                shape_patches_remaining = shape_patch_count-shape_patch_offset
                batch_patches_remaining = pred.size(0)-batch_offset

                # append estimated patch properties batch to properties for the current shape
                shape_properties[shape_patch_offset:shape_patch_offset+min(shape_patches_remaining, batch_patches_remaining), :] = pred[
                    batch_offset:batch_offset+min(shape_patches_remaining, batch_patches_remaining), :]

                batch_offset = batch_offset + min(shape_patches_remaining, batch_patches_remaining)
                shape_patch_offset = shape_patch_offset + min(shape_patches_remaining, batch_patches_remaining)

                if shape_patches_remaining <= batch_patches_remaining:

                    # save shape properties to disk
                    prop_saved = [False]*len(trainopt.outputs)

                    # save normals
                    oi = [i for i, o in enumerate(trainopt.outputs) if o in ['unoriented_normals', 'oriented_normals']]
                    if len(oi) > 1:
                        raise ValueError('Duplicate normal output.')
                    elif len(oi) == 1:
                        oi = oi[0]
                        normal_prop = shape_properties[:, output_pred_ind[oi]:output_pred_ind[oi]+3]
                        
                        np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.normals'), normal_prop.cpu().numpy())
                        print('saved normals for ' + dataset.shape_names[shape_ind])
                        prop_saved[oi] = True

                    # save curvatures
                    

                    if not all(prop_saved):
                        raise ValueError('Not all shape properties were saved, some of them seem to be unsupported.')

                    # save point indices
                    if opt.sampling != 'full':
                        np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.idx'), datasampler.shape_patch_inds[shape_ind], fmt='%d')

                    # start new shape
                    if shape_ind + 1 < len(dataset.shape_names):
                        shape_patch_offset = 0
                        shape_ind = shape_ind + 1
                        if opt.sampling == 'full':
                            shape_patch_count = dataset.shape_patch_count[shape_ind]
                        elif opt.sampling == 'sequential_shapes_random_patches':
                            # shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
                            shape_patch_count = len(datasampler.shape_patch_inds[shape_ind])
                        else:
                            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
                        shape_properties = shape_properties.new_zeros(shape_patch_count, pred_dim)
示例#13
0
def train_dsacpnet(opt):

    # gpu init
    # multi_gpus = False
    # if ',' in opt.gpu_idx:
    #     gpu_ids = [int(id) for id in opt.gpu_idx.split(',')]
    #     multi_gpus = True
    # else:
    #     gpu_ids = [int(opt.gpu_idx)]

    # device = torch.device('cuda:{}'.format(gpu_ids[0]) if torch.cuda.is_available() else 'cpu')
    # 此处如果使用 下面一行代码,则会报错,RuntimeError: all tensors must be on devices[0]
    # 默认情况下 device 为0,因此需要指定 device id.
    device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" %
                          opt.gpu_idx)

    #if multi_gpus:
    # net = DataParallel(net, device_ids=gpu_ids).to(device)
    # margin = DataParallel(margin, device_ids=gpu_ids).to(device)
    #else:
    #   net = net.to(device)
    #  margin = margin.to(device)
    #device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" % opt.gpu_idx)

    # colored console output
    green = lambda x: '\033[92m' + x + '\033[0m'
    blue = lambda x: '\033[94m' + x + '\033[0m'

    log_dirname = os.path.join(opt.logdir, opt.name)
    params_filename = os.path.join(opt.outdir, '%s_params.pth' % (opt.name))
    model_filename = os.path.join(opt.outdir, '%s_model.pth' % (opt.name))
    desc_filename = os.path.join(opt.outdir, '%s_description.txt' % (opt.name))

    if os.path.exists(log_dirname) or os.path.exists(model_filename):
        response = input(
            'A training run named "%s" already exists, overwrite? (y/n) ' %
            (opt.name))
        if response == 'y':
            if os.path.exists(log_dirname):
                shutil.rmtree(os.path.join(opt.logdir, opt.name))
        else:
            sys.exit()

    criterion = nn.BCEWithLogitsLoss()
    dsac = DSAC(opt.hypotheses,
                opt.inlierthreshold,
                opt.inlierbeta,
                opt.inlieralpha,
                opt.normal_loss,
                opt.seed,
                device,
                use_point_stn=opt.use_point_stn,
                use_feat_stn=opt.use_feat_stn,
                use_mask=opt.use_mask,
                points_num=opt.points_num,
                points_per_patch=opt.points_per_patch,
                sym_op=opt.sym_op)

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    print("Random Seed: %d" % (opt.seed))
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    # create train and test dataset loaders
    train_dataset = PointcloudPatchDataset(
        root=opt.indir,
        shape_list_filename=opt.trainset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        center=opt.patch_center,
        cache_capacity=opt.cache_capacity)
    if opt.training_order == 'random':
        train_datasampler = RandomPointcloudPatchSampler(
            train_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    elif opt.training_order == 'random_shape_consecutive':
        train_datasampler = SequentialShapeRandomPointcloudPatchSampler(
            train_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    else:
        raise ValueError('Unknown training order: %s' % (opt.training_order))

    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   sampler=train_datasampler,
                                                   batch_size=opt.batchSize,
                                                   num_workers=int(
                                                       opt.workers))

    test_dataset = PointcloudPatchDataset(
        root=opt.indir,
        shape_list_filename=opt.testset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        center=opt.patch_center,
        cache_capacity=opt.cache_capacity)
    if opt.training_order == 'random':
        test_datasampler = RandomPointcloudPatchSampler(
            test_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    elif opt.training_order == 'random_shape_consecutive':
        test_datasampler = SequentialShapeRandomPointcloudPatchSampler(
            test_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    else:
        raise ValueError('Unknown training order: %s' % (opt.training_order))

    test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                                  sampler=test_datasampler,
                                                  batch_size=opt.batchSize,
                                                  num_workers=int(opt.workers))

    # keep the exact training shape names for later reference
    opt.train_shapes = train_dataset.shape_names
    opt.test_shapes = test_dataset.shape_names

    print(
        'training set: %d patches (in %d batches) - test set: %d patches (in %d batches)'
        % (len(train_datasampler), len(train_dataloader),
           len(test_datasampler), len(test_dataloader)))

    try:
        os.makedirs(opt.outdir)
    except OSError:
        pass

    train_writer = SummaryWriter(os.path.join(log_dirname, 'train'))
    test_writer = SummaryWriter(os.path.join(log_dirname, 'test'))

    optimizer = optim.SGD(dsac.parameters(), lr=opt.lr, momentum=opt.momentum)
    scheduler = lr_scheduler.MultiStepLR(
        optimizer, milestones=[],
        gamma=0.1)  # milestones in number of optimizer iterations
    #dsacpnet= torch.nn.DataParallel(dsacpnet, device_ids=gpu_ids).to(device)
    #dsac= torch.nn.DataParallel(dsac, device_ids=gpu_ids).to(device)
    dsac.to(device)
    train_num_batch = len(train_dataloader)
    test_num_batch = len(test_dataloader)

    # save parameters
    torch.save(opt, params_filename)

    # save description
    with open(desc_filename, 'w+') as text_file:
        print(opt.desc, file=text_file)

    for epoch in range(opt.nepoch):

        train_batchind = -1
        train_fraction_done = 0.0
        train_enum = enumerate(train_dataloader, 0)

        test_batchind = -1
        test_fraction_done = 0.0
        test_enum = enumerate(test_dataloader, 0)

        for train_batchind, data in train_enum:

            # update learning rate
            scheduler.step(epoch * train_num_batch + train_batchind)

            # set to training mode
            dsac.train()

            points = data[0]  #这时的point是64*512*3的类型
            target = data[1]
            mask = data[2]

            points = points
            # for point in points:

            points = points.transpose(2, 1)
            points = points.to(device)

            target = target.to(device)
            mask = mask.to(device)

            # zero gradients
            optimizer.zero_grad()

            exp_loss, top_loss, _, pts, mask_p = dsac(points, target)

            if opt.use_mask:
                mask_p = mask_p.view(-1, opt.points_per_patch)
                mask_loss = criterion(mask_p, mask)  #
            else:
                mask_loss = 0
            exp_loss = exp_loss.mean()

            loss = exp_loss + mask_loss  #+chamfer_loss
            # backpropagate through entire network to compute gradients of loss w.r.t. parameters
            loss.backward()

            # parameter optimization step
            optimizer.step()

            train_fraction_done = (train_batchind + 1) / train_num_batch

            # print info and update log file
            print(
                '[%s %d: %d/%d] %s tloss: %f loss: %f Top Loss:%f mask Loss:%f'
                % (opt.name, epoch, train_batchind, train_num_batch - 1,
                   green('train'), loss, exp_loss.mean().item(),
                   top_loss.mean(), mask_loss))
            train_writer.add_scalar('loss', exp_loss.item(),
                                    (epoch + train_fraction_done) *
                                    train_num_batch * opt.batchSize)

            while test_fraction_done <= train_fraction_done and test_batchind + 1 < test_num_batch:

                # set to evaluation mode
                dsac.eval()

                test_batchind, data = next(test_enum)

                points = data[0]  #这时的point是64*512*3的类型
                target = data[1]
                mask = data[2]

                points = points.transpose(2, 1)
                points = points.to(device)

                target = target.to(device)
                mask = mask.to(device)

                # forward pass
                with torch.no_grad():
                    exp_loss, top_loss, _, pts, mask_p = dsac(points, target)

                if opt.use_mask:
                    mask_p = mask_p.view(-1, opt.points_per_patch)
                    mask_loss = criterion(mask_p, mask)  #
                else:
                    mask_loss = 0
                #mask_loss=criterion(mask_p, mask)
                loss = exp_loss + mask_loss
                test_fraction_done = (test_batchind + 1) / test_num_batch

                # print info and update log file
                print(
                    '[%s %d: %d/%d] %s tloss: %f loss: %f Top Loss:%f mask Loss:%f '
                    % (opt.name, epoch, train_batchind, train_num_batch - 1,
                       blue('test'), loss, exp_loss.mean().item(),
                       top_loss.mean(), mask_loss))
                test_writer.add_scalar('loss',
                                       exp_loss.mean().item(),
                                       (epoch + test_fraction_done) *
                                       train_num_batch * opt.batchSize)

        # save model, overwriting the old model
        if epoch % opt.saveinterval == 0 or epoch == opt.nepoch - 1:
            torch.save(dsac.state_dict(), model_filename)

        # save model in a separate file in epochs 0,5,10,50,100,500,1000, ...
        if epoch % (5 * 10**math.floor(math.log10(max(2, epoch - 1)))
                    ) == 0 or epoch % 100 == 0 or epoch == opt.nepoch - 1:
            torch.save(
                dsac.state_dict(),
                os.path.join(opt.outdir,
                             '%s_model_%d.pth' % (opt.name, epoch)))