Esempio n. 1
0
def prepare_model(option):
    loss = LineLoss(option.imagesize)
    dsac = DSAC(option.hypotheses, option.inlierthreshold, option.inlierbeta,
                option.inlieralpha, loss)
    point_nn = LineNN(option.capacity, option.receptivefield)
    if not option.cpu:
        point_nn = point_nn.cuda()
    point_nn.train()
    opt_point_nn = optim.Adam(point_nn.parameters(), lr=opt.learningrate)
    lrs_point_nn = optim.lr_scheduler.StepLR(opt_point_nn,
                                             opt.lrstep,
                                             gamma=0.5)
    return point_nn, dsac, opt_point_nn, lrs_point_nn
Esempio n. 2
0
def eval_pcpnet(opt):

    if opt.distributed:
        torch.distributed.init_process_group(backend="nccl")
        # 2) 配置每个进程的gpu
        local_rank = torch.distributed.get_rank()
        torch.cuda.set_device(local_rank)
        device = torch.device("cuda", local_rank)
        print('stage 2 passed')
    else:
        device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" %
                              opt.gpu_idx)

    opt.models = opt.models.split()
    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    for model_name in opt.models:

        print("Random Seed: %d" % (opt.seed))
        random.seed(opt.seed)
        torch.manual_seed(opt.seed)

        model_filename = os.path.join(opt.modeldir,
                                      model_name + opt.modelpostfix)
        param_filename = os.path.join(opt.modeldir,
                                      model_name + opt.parmpostfix)

        # load model and training parameters
        trainopt = torch.load(param_filename)
        trainopt.indir2 = '../data2'
        if opt.distributed and local_rank == 0:
            print(trainopt)

        if opt.batchSize == 0:
            model_batchSize = trainopt.batchSize
        else:
            model_batchSize = opt.batchSize

        # get indices in targets and predictions corresponding to each output
        pred_dim = 0
        output_pred_ind = []
        # trainopt.outputs = {'unoriented_normals'}
        for o in trainopt.outputs:
            if o == 'unoriented_normals' or o == 'oriented_normals':
                output_pred_ind.append(pred_dim)
                pred_dim += 3

            else:
                raise ValueError('Unknown output: %s' % (o))

        if len(trainopt.points_per_patch) == 1:
            if trainopt.generate_points_dim == 1:
                regressor = WDSAC(
                    hyps=trainopt.hypotheses + 10,
                    inlier_params=trainopt.inlier_params,
                    patch_radius=trainopt.patch_radius,
                    decoder=trainopt.decoder,
                    use_mask=trainopt.use_mask,
                    dim_pts=trainopt.in_points_dim,
                    num_gpts=trainopt.generate_points_num,
                    dim_gpts=trainopt.generate_points_dim,
                    points_per_patch=trainopt.points_per_patch[0],
                    sym_op=trainopt.sym_op,
                    normal_loss=trainopt.normal_loss,
                    seed=trainopt.seed,
                    device=device,
                    use_point_stn=trainopt.use_point_stn,
                    use_feat_stn=trainopt.use_feat_stn)
            else:
                regressor = DSAC(hyps=trainopt.hypotheses + 10,
                                 inlier_params=trainopt.inlier_params,
                                 patch_radius=trainopt.patch_radius,
                                 decoder=trainopt.decoder,
                                 use_mask=trainopt.use_mask,
                                 dim_pts=trainopt.in_points_dim,
                                 num_gpts=trainopt.generate_points_num,
                                 dim_gpts=trainopt.generate_points_dim,
                                 points_per_patch=trainopt.points_per_patch[0],
                                 sym_op=trainopt.sym_op,
                                 normal_loss=trainopt.normal_loss,
                                 seed=trainopt.seed,
                                 device=device,
                                 use_point_stn=trainopt.use_point_stn,
                                 use_feat_stn=trainopt.use_feat_stn)
        else:
            regressor = MoEDSAC(hyps=trainopt.hypotheses + 10,
                                inlier_params=trainopt.inlier_params,
                                patch_radius=trainopt.patch_radius,
                                share_pts_stn=trainopt.share_pts_stn,
                                decoder=trainopt.decoder,
                                use_mask=trainopt.use_mask,
                                dim_pts=trainopt.in_points_dim,
                                num_gpts=trainopt.generate_points_num,
                                points_per_patch=trainopt.points_per_patch,
                                sym_op=trainopt.sym_op,
                                normal_loss=trainopt.normal_loss,
                                seed=trainopt.seed,
                                device=device,
                                use_point_stn=trainopt.use_point_stn,
                                use_feat_stn=trainopt.use_feat_stn)
            # if len(opt.expert_refine)>1:
            #     dsac.refine(opt.expert_refine)

        regressor.to(device)

        if opt.distributed:
            if torch.cuda.device_count() > 1:
                print("Let's use", torch.cuda.device_count(), "GPUs!")
            regressor = torch.nn.parallel.DistributedDataParallel(
                regressor, device_ids=[local_rank], output_device=local_rank)
            #print(local_rank)

        state_dict = torch.load(model_filename, map_location=device)
        # print(state_dict.keys())
        # print(regressor.state_dict().keys())
        regressor.load_state_dict(state_dict)
        print('stage 3 passed')

        regressor.eval()

        dataset = PointcloudPatchDataset(
            root=opt.indir,
            root_in=trainopt.indir2,
            shape_list_filename=opt.dataset,
            patch_radius=trainopt.patch_radius,
            points_per_patch=trainopt.points_per_patch,
            dim_pts=trainopt.in_points_dim,
            knn=trainopt.knn,
            #patch_features=[],
            seed=opt.seed,
            cache_capacity=opt.cache_capacity)
        if opt.sampling == 'full':
            datasampler = SequentialPointcloudPatchSampler(dataset)
        elif opt.sampling == 'sequential_shapes_random_patches':
            datasampler = SequentialShapeRandomPointcloudPatchSampler(
                dataset,
                patches_per_shape=opt.patches_per_shape,
                seed=opt.seed,
                sequential_shapes=True,
                identical_epochs=False)
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)

        if opt.distributed:
            dataloader = torch.utils.data.DataLoader(
                dataset,
                sampler=DistributedSampler(datasampler),
                batch_size=model_batchSize,
                num_workers=int(opt.workers))
        else:
            dataloader = torch.utils.data.DataLoader(
                dataset,
                sampler=datasampler,
                batch_size=model_batchSize,
                num_workers=int(opt.workers))

        shape_ind = 0
        shape_patch_offset = 0
        if opt.sampling == 'full':
            shape_patch_count = dataset.shape_patch_count[shape_ind]
        elif opt.sampling == 'sequential_shapes_random_patches':
            shape_patch_count = min(opt.patches_per_shape,
                                    dataset.shape_patch_count[shape_ind])
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        shape_properties = torch.zeros(shape_patch_count,
                                       3,
                                       dtype=torch.float,
                                       device=device)

        # append model name to output directory and create directory if necessary
        model_outdir = os.path.join(opt.outdir, model_name)
        if not os.path.exists(model_outdir):
            os.makedirs(model_outdir)

        num_batch = len(dataloader)
        batch_enum = enumerate(dataloader, 0)
        for batchind, data in batch_enum:

            # get batch and upload to GPU
            points, target, _, dist = data
            points = points.transpose(2, 1)
            points = points.to(device)
            target = target.to(device)
            dist = dist.to(device)

            with torch.no_grad():
                exp_loss, top_loss, pred, pts, _, _, _ = regressor(
                    points, target, dist)

            print('[%s %d/%d] shape %s' % (model_name, batchind, num_batch - 1,
                                           dataset.shape_names[shape_ind]))

            batch_offset = 0
            while batch_offset < pred.size(0):

                shape_patches_remaining = shape_patch_count - shape_patch_offset
                batch_patches_remaining = pred.size(0) - batch_offset

                # append estimated patch properties batch to properties for the current shape
                shape_properties[shape_patch_offset:shape_patch_offset + min(
                    shape_patches_remaining, batch_patches_remaining
                ), :] = pred[
                    batch_offset:batch_offset +
                    min(shape_patches_remaining, batch_patches_remaining), :]

                batch_offset = batch_offset + min(shape_patches_remaining,
                                                  batch_patches_remaining)
                shape_patch_offset = shape_patch_offset + min(
                    shape_patches_remaining, batch_patches_remaining)

                if shape_patches_remaining <= batch_patches_remaining:

                    # save shape properties to disk
                    prop_saved = [False] * len(trainopt.outputs)

                    # save normals
                    oi = [
                        i for i, o in enumerate(trainopt.outputs)
                        if o in ['unoriented_normals', 'oriented_normals']
                    ]
                    if len(oi) > 1:
                        raise ValueError('Duplicate normal output.')
                    elif len(oi) == 1:
                        oi = oi[0]
                        normal_prop = shape_properties[:, output_pred_ind[oi]:
                                                       output_pred_ind[oi] + 3]

                        np.savetxt(
                            os.path.join(
                                model_outdir,
                                dataset.shape_names[shape_ind] + '.normals'),
                            normal_prop.cpu().numpy())
                        print('saved normals for ' +
                              dataset.shape_names[shape_ind])
                        prop_saved[oi] = True

                    # save curvatures

                    if not all(prop_saved):
                        raise ValueError(
                            'Not all shape properties were saved, some of them seem to be unsupported.'
                        )

                    # save point indices
                    if opt.sampling != 'full':
                        np.savetxt(os.path.join(
                            model_outdir,
                            dataset.shape_names[shape_ind] + '.idx'),
                                   datasampler.shape_patch_inds[shape_ind],
                                   fmt='%d')

                    # start new shape
                    if shape_ind + 1 < len(dataset.shape_names):
                        shape_patch_offset = 0
                        shape_ind = shape_ind + 1
                        if opt.sampling == 'full':
                            shape_patch_count = dataset.shape_patch_count[
                                shape_ind]
                        elif opt.sampling == 'sequential_shapes_random_patches':
                            # shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
                            shape_patch_count = len(
                                datasampler.shape_patch_inds[shape_ind])
                        else:
                            raise ValueError('Unknown sampling strategy: %s' %
                                             opt.sampling)
                        shape_properties = shape_properties.new_zeros(
                            shape_patch_count, pred_dim)
Esempio n. 3
0
def train(opt):
    # 1) 配置每个进程的gpu
    local_rank = torch.distributed.get_rank()
    torch.cuda.set_device(local_rank)
    device = torch.device("cuda", local_rank)
    print('stage 1 passed')

    # 2)初始化
    # colored console output
    green = lambda x: '\033[92m' + x + '\033[0m'
    blue = lambda x: '\033[94m' + x + '\033[0m'

    log_dirname = os.path.join(opt.logdir, opt.name)
    params_filename = os.path.join(opt.outdir, '%s_params.pth' % (opt.name))
    model_filename = os.path.join(opt.outdir, '%s_model.pth' % (opt.name))
    desc_filename = os.path.join(opt.outdir, '%s_description.txt' % (opt.name))

    if local_rank == 0:
        if os.path.exists(log_dirname) or os.path.exists(model_filename):
            if os.path.exists(log_dirname):
                shutil.rmtree(os.path.join(opt.logdir, opt.name))

    # if os.path.exists(log_dirname) or os.path.exists(model_filename):
    #     if os.path.exists(log_dirname):
    #         shutil.rmtree(os.path.join(opt.logdir, opt.name))
    print('stage 2 passed')

    # 3) 封装之前要把模型移到对应的gpu
    if len(opt.points_per_patch) == 1:
        if opt.generate_points_dim == 1:
            dsac = WDSAC(hyps=opt.hypotheses,
                         inlier_params=opt.inlier_params,
                         patch_radius=opt.patch_radius,
                         decoder=opt.decoder,
                         use_mask=opt.use_mask,
                         dim_pts=opt.in_points_dim,
                         num_gpts=opt.generate_points_num,
                         dim_gpts=opt.generate_points_dim,
                         points_per_patch=opt.points_per_patch[0],
                         sym_op=opt.sym_op,
                         normal_loss=opt.normal_loss,
                         seed=opt.seed,
                         device=device,
                         use_point_stn=opt.use_point_stn,
                         use_feat_stn=opt.use_feat_stn)
        else:
            dsac = DSAC(hyps=opt.hypotheses,
                        inlier_params=opt.inlier_params,
                        patch_radius=opt.patch_radius,
                        decoder=opt.decoder,
                        use_mask=opt.use_mask,
                        dim_pts=opt.in_points_dim,
                        num_gpts=opt.generate_points_num,
                        dim_gpts=opt.generate_points_dim,
                        points_per_patch=opt.points_per_patch[0],
                        sym_op=opt.sym_op,
                        normal_loss=opt.normal_loss,
                        seed=opt.seed,
                        device=device,
                        use_point_stn=opt.use_point_stn,
                        use_feat_stn=opt.use_feat_stn)
    else:
        dsac = MoEDSAC(hyps=opt.hypotheses,
                       inlier_params=opt.inlier_params,
                       patch_radius=opt.patch_radius,
                       share_pts_stn=opt.share_pts_stn,
                       decoder=opt.decoder,
                       use_mask=opt.use_mask,
                       dim_pts=opt.in_points_dim,
                       num_gpts=opt.generate_points_num,
                       points_per_patch=opt.points_per_patch,
                       sym_op=opt.sym_op,
                       normal_loss=opt.normal_loss,
                       seed=opt.seed,
                       device=device,
                       use_point_stn=opt.use_point_stn,
                       use_feat_stn=opt.use_feat_stn)

    dsac.to(device)

    if torch.cuda.device_count() > 1:
        if opt.refine != '':  # if the refine model is not from ddp
            dsac.load_state_dict(torch.load(opt.refine, map_location=device))
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # 3) 封装
        dsac = torch.nn.parallel.DistributedDataParallel(
            dsac, device_ids=[local_rank], output_device=local_rank)
        # if opt.refine != '': # if the refine model is not from ddp?
        #     dsac.load_state_dict( torch.load(opt.refine, map_location=device) )
    else:
        if opt.refine != '':
            dsac.load_state_dict(torch.load(opt.refine))

    print('stage 3 passed')

    # 4)使用DistributedSampler
    train_dataset = PointcloudPatchDataset(
        root=opt.indir,
        root_in=opt.indir2,
        shape_list_filename=opt.trainset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        dim_pts=opt.in_points_dim,
        knn=opt.knn,
        point_count_std=opt.patch_point_count_std,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        cache_capacity=opt.cache_capacity)

    train_datasampler = RandomPointcloudPatchSampler(
        train_dataset,
        patches_per_shape=opt.patches_per_shape,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs)

    train_dataloader = DataLoader(
        train_dataset,
        sampler=DistributedSampler(train_datasampler),
        batch_size=opt.batchSize,
        num_workers=int(opt.workers))

    test_dataset = PointcloudPatchDataset(
        root=opt.indir,
        root_in=opt.indir2,
        shape_list_filename=opt.testset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        dim_pts=opt.in_points_dim,
        knn=opt.knn,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        cache_capacity=opt.cache_capacity)

    test_datasampler = RandomPointcloudPatchSampler(
        test_dataset,
        patches_per_shape=opt.patches_per_shape,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs)

    test_dataloader = DataLoader(test_dataset,
                                 sampler=DistributedSampler(test_datasampler),
                                 batch_size=opt.batchSize,
                                 num_workers=int(opt.workers))

    # keep the exact training shape names for later reference
    opt.train_shapes = train_dataset.shape_names
    opt.test_shapes = test_dataset.shape_names

    print(
        'training set: %d patches (in %d batches) - test set: %d patches (in %d batches)'
        % (len(train_datasampler), len(train_dataloader),
           len(test_datasampler), len(test_dataloader)))

    try:
        os.makedirs(opt.outdir)
    except OSError:
        pass

    if local_rank == 0:
        train_writer = SummaryWriter(os.path.join(log_dirname, 'train'))
        test_writer = SummaryWriter(os.path.join(log_dirname, 'test'))

    lrate = opt.lr
    if opt.opti == 'SGD':
        optimizer = optim.SGD(dsac.parameters(),
                              lr=lrate,
                              momentum=opt.momentum)
    else:
        optimizer = optim.Adam(dsac.parameters(), lr=lrate)

    scheduler = lr_scheduler.MultiStepLR(
        optimizer, milestones=[1000],
        gamma=0.1)  # milestones in number of optimizer iterations

    train_num_batch = len(train_dataloader)
    test_num_batch = len(test_dataloader)
    # save parameters
    torch.save(opt, params_filename)

    # save description
    with open(desc_filename, 'w+') as text_file:
        print(opt.desc, file=text_file)

    for epoch in range(opt.nepoch):
        train_batchind = -1
        train_fraction_done = 0.0
        train_enum = enumerate(train_dataloader, 0)

        test_batchind = -1
        test_fraction_done = 0.0
        test_enum = enumerate(test_dataloader, 0)

        for train_batchind, data in train_enum:
            # set to training mode
            dsac.train()

            points = data[0]  #这时的point是64*512*3的类型
            target = data[1]
            mask = data[2]
            dist = data[3]

            points = points.transpose(2, 1)
            points = points.to(device)
            target = target.to(device)
            mask = mask.to(device)
            dist = dist.to(device)

            # zero gradients
            #optimizer.zero_grad()

            exp_loss, top_loss, pred, pts, mask_p, patch_rot, _ = dsac(
                points, target, dist)
            loss = exp_loss.mean()

            # backpropagate through entire network to compute gradients of loss w.r.t. parameters
            loss.backward()

            # parameter optimization step
            optimizer.step()

            train_fraction_done = (train_batchind + 1) / train_num_batch

            # print info and update log file
            print('[%s %d: %d/%d] %s tloss: %f loss: %f Top Loss:%f' %
                  (opt.name, epoch, train_batchind, train_num_batch - 1,
                   green('train'), loss, exp_loss.mean().item(),
                   top_loss.mean()))
            if local_rank == 0:
                x1 = (epoch +
                      train_fraction_done) * train_num_batch * opt.batchSize
                train_writer.add_scalars(
                    'loss', {
                        'meanLoss': exp_loss.mean().item(),
                        'topLoss': top_loss.mean().item()
                    }, x1)

            while test_fraction_done <= train_fraction_done and test_batchind + 1 < test_num_batch:

                # set to evaluation mode
                dsac.eval()

                test_batchind, data = next(test_enum)

                points = data[0]  #这时的point是64*512*3的类型
                target = data[1]
                mask = data[2]
                dist = data[3]

                points = points.transpose(2, 1)
                points = points.to(device)
                target = target.to(device)
                mask = mask.to(device)
                dist = dist.to(device)

                # forward pass
                with torch.no_grad():
                    exp_loss, top_loss, pred, pts, mask_p, patch_rot, _ = dsac(
                        points, target, dist)

                loss = exp_loss.mean()

                test_fraction_done = (test_batchind + 1) / test_num_batch

                # print info and update log file
                print('[%s %d: %d/%d] %s tloss: %f loss: %f Top Loss:%f' %
                      (opt.name, epoch, train_batchind, train_num_batch - 1,
                       blue('test'), loss, exp_loss.mean().item(),
                       top_loss.mean()))
                if local_rank == 0:
                    x1 = (epoch +
                          test_fraction_done) * train_num_batch * opt.batchSize
                    # test_writer.add_scalar('loss', exp_loss.mean().item(), x1)
                    test_writer.add_scalars(
                        'loss', {
                            'meanLoss': exp_loss.mean().item(),
                            'topLoss': top_loss.mean().item()
                        }, x1)

        # update learning rate
        scheduler.step()
        # save model, overwriting the old model
        if local_rank == 0:
            if epoch % opt.saveinterval == 0 or epoch == opt.nepoch - 1:
                print('model saved')
                torch.save(dsac.state_dict(), model_filename)

        # save model in a separate file in epochs 0,5,10,50,100,500,1000, ...
        if epoch % (5 * 10**math.floor(math.log10(max(2, epoch - 1)))
                    ) == 0 or epoch % 100 == 0 or epoch == opt.nepoch - 1:
            torch.save(
                dsac.state_dict(),
                os.path.join(opt.outdir,
                             '%s_model_%d.pth' % (opt.name, epoch)))
Esempio n. 4
0
def eval_pcpnet(opt):

    opt.models = opt.models.split()

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" % opt.gpu_idx)

    for model_name in opt.models:

        print("Random Seed: %d" % (opt.seed))
        random.seed(opt.seed)
        torch.manual_seed(opt.seed)

        model_filename = os.path.join(opt.modeldir, model_name+opt.modelpostfix)
        param_filename = os.path.join(opt.modeldir, model_name+opt.parmpostfix)

        # load model and training parameters
        trainopt = torch.load(param_filename)

        if opt.batchSize == 0:
            model_batchSize = trainopt.batchSize
        else:
            model_batchSize = opt.batchSize

        # get indices in targets and predictions corresponding to each output
        pred_dim = 0
        output_pred_ind = []
        for o in trainopt.outputs:
            if o == 'unoriented_normals' or o == 'oriented_normals':
                output_pred_ind.append(pred_dim)
                pred_dim += 3
            elif o == 'max_curvature' or o == 'min_curvature':
                output_pred_ind.append(pred_dim)
                pred_dim += 1
            else:
                raise ValueError('Unknown output: %s' % (o))
        #print(trainopt.patch_radius)
        dataset = PointcloudPatchDataset(
            root=opt.indir, shape_list_filename=opt.dataset,
            patch_radius=trainopt.patch_radius,
            points_per_patch=trainopt.points_per_patch,
            #patch_features=[],
            seed=opt.seed,
            #use_pca=trainopt.use_pca,
            center=trainopt.patch_center,
            #point_tuple=trainopt.point_tuple,
            #sparse_patches=opt.sparse_patches,
            cache_capacity=opt.cache_capacity)
        if opt.sampling == 'full':
            datasampler = SequentialPointcloudPatchSampler(dataset)
        elif opt.sampling == 'sequential_shapes_random_patches':
            datasampler = SequentialShapeRandomPointcloudPatchSampler(
                dataset,
                patches_per_shape=opt.patches_per_shape,
                seed=opt.seed,
                sequential_shapes=True,
                identical_epochs=False)
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        dataloader = torch.utils.data.DataLoader(
            dataset,
            sampler=datasampler,
            batch_size=model_batchSize,
            num_workers=int(opt.workers))

        regressor = DSAC(
            trainopt.hypotheses,
            trainopt.inlierthreshold,
            trainopt.inlierbeta,
            trainopt.inlieralpha,
            trainopt.normal_loss,
            trainopt.seed,device,
            use_point_stn=trainopt.use_point_stn,
            use_feat_stn=trainopt.use_feat_stn,
            use_mask=trainopt.use_mask
        )
        
        
        

        regressor.load_state_dict(torch.load(model_filename))
        regressor.to(device)
        regressor.eval()

        shape_ind = 0
        shape_patch_offset = 0
        if opt.sampling == 'full':
            shape_patch_count = dataset.shape_patch_count[shape_ind]
        elif opt.sampling == 'sequential_shapes_random_patches':
            shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        shape_properties = torch.zeros(shape_patch_count, pred_dim, dtype=torch.float, device=device)

        # append model name to output directory and create directory if necessary
        model_outdir = os.path.join(opt.outdir, model_name)
        if not os.path.exists(model_outdir):
            os.makedirs(model_outdir)

        num_batch = len(dataloader)
        batch_enum = enumerate(dataloader, 0)
        for batchind, data in batch_enum:

            # get batch and upload to GPU
            #index =[]
            points, data_trans,mask_t = data
            
            points = points.transpose(2, 1)
            points = points.to(device)

            data_trans = data_trans.to(device)
            mask_t = mask_t.to(device)

            with torch.no_grad():
                exp_loss, top_loss,normal,pts,mask = regressor(points,data_trans)
            viz = Visdom()
            assert viz.check_connection()
            Y=torch.zeros(512+32+1)
            Z=torch.zeros(512+1)
            Z[0:512]+=1
            Z[512]+=2
            Y[0:512]+=1
            
            Y[512:512+32]+=2
            Y[512+32]+=5
            
            print(top_loss.mean())
            for i in range(points.size(0)):
            #print("input",x[i].transpose(0,1)[0:100])
                # print("predict",i,normal[i])
                # print("ground truth",i,data_trans[i])
                # print("top_loss_loss",i,top_loss[i],"\n")
                # print(mask_t[i])
                # print(mask[i].view(-1))
                viz.scatter(
                    X=torch.cat((points[i].transpose(0,1),pts[i],torch.zeros(1,3).cuda()),0),
                    Y=Y,
                    opts=dict(
                        title = str(i),
                    #'legend': ['Men', 'Women'],
                        markersize= 2,
                    #markercolor=np.random.randint(0, 255, (3, 3,)),
                    )
                )
            # #     # viz.scatter(
            # #     #     X=torch.mul(points[i].transpose(0,1),mask[i]),
                    
            # #     #     opts=dict(
            # #     #         title = str(i),
            # #     #     #'legend': ['Men', 'Women'],
            # #     #         markersize= 2,
            # #     #     #markercolor=np.random.randint(0, 255, (3, 3,)),
            # #     #     )
            # #     # )
            #     viz.scatter(
            #         X=torch.cat((torch.mul(points[i].transpose(0,1),mask_t[i].view(-1,1)),torch.zeros(1,3).cuda(2)),0),
            #         Y=Z,
            #         opts=dict(
            #             title = str(i)+"true",
            #         #'legend': ['Men', 'Women'],
            #             markersize= 2,
            #         #markercolor=np.random.randint(0, 255, (3, 3,)),
            #         )
            #     )

            #print("pts",i,pts[i])
                
            # # post-processing of the prediction
            # for oi, o in enumerate(trainopt.outputs):
            #     if o == 'unoriented_normals' or o == 'oriented_normals':
            #         o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi]+3]

            #         if trainopt.use_point_stn:
            #             # transform predictions with inverse transform
            #             # since we know the transform to be a rotation (QSTN), the transpose is the inverse
            #             o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1), trans.transpose(2, 1)).squeeze(dim=1)

            #         if trainopt.use_pca:
            #             # transform predictions with inverse pca rotation (back to world space)
            #             o_pred[:, :] = torch.bmm(o_pred.unsqueeze(1), data_trans.transpose(2, 1)).squeeze(dim=1)

            #         # normalize normals
            #         o_pred_len = torch.max(o_pred.new_tensor([sys.float_info.epsilon*100]), o_pred.norm(p=2, dim=1, keepdim=True))
            #         o_pred = o_pred / o_pred_len

            #     elif o == 'max_curvature' or o == 'min_curvature':
            #         o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi]+1]

            #         # undo patch size normalization:
            #         o_pred[:, :] = o_pred / dataset.patch_radius_absolute[shape_ind][0]

            #     else:
            #         raise ValueError('Unsupported output type: %s' % (o))

            print('[%s %d/%d] shape %s' % (model_name, batchind, num_batch-1, dataset.shape_names[shape_ind]))

            batch_offset = 0
            while batch_offset < normal.size(0):

                shape_patches_remaining = shape_patch_count-shape_patch_offset
                batch_patches_remaining = normal.size(0)-batch_offset

                # append estimated patch properties batch to properties for the current shape
                shape_properties[shape_patch_offset:shape_patch_offset+min(shape_patches_remaining, batch_patches_remaining), :] =  normal[
                    batch_offset:batch_offset+min(shape_patches_remaining, batch_patches_remaining), :]

                batch_offset = batch_offset + min(shape_patches_remaining, batch_patches_remaining)
                shape_patch_offset = shape_patch_offset + min(shape_patches_remaining, batch_patches_remaining)

                if shape_patches_remaining <= batch_patches_remaining:

                    # save shape properties to disk
                    # prop_saved = [False]*len(trainopt.outputs)

                    # # save normals
                    # oi = [i for i, o in enumerate(trainopt.outputs) if o in ['unoriented_normals', 'oriented_normals']]
                    # if len(oi) > 1:
                    #     raise ValueError('Duplicate normal output.')
                    # elif len(oi) == 1:
                    #     oi = oi[0]
                    #     normal_prop = shape_properties[:, output_pred_ind[oi]:output_pred_ind[oi]+3]
                    #     np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.normals'), normal_prop.cpu().numpy())
                    #     prop_saved[oi] = True

                    # # save curvatures
                    # oi1 = [i for i, o in enumerate(trainopt.outputs) if o == 'max_curvature']
                    # oi2 = [i for i, o in enumerate(trainopt.outputs) if o == 'min_curvature']
                    # if len(oi1) > 1 or len(oi2) > 1:
                    #     raise ValueError('Duplicate minimum or maximum curvature output.')
                    # elif len(oi1) == 1 or len(oi2) == 1:
                    #     curv_prop = shape_properties.new_zeros(shape_properties.size(0), 2)
                    #     if len(oi1) == 1:
                    #         oi1 = oi1[0]
                    #         curv_prop[:, 0] = shape_properties[:, output_pred_ind[oi1]]
                    #         prop_saved[oi1] = True
                    #     if len(oi2) == 1:
                    #         oi2 = oi2[0]
                    #         curv_prop[:, 1] = shape_properties[:, output_pred_ind[oi2]]
                    #         prop_saved[oi2] = True
                    #     np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.curv'), curv_prop.cpu().numpy())

                    # if not all(prop_saved):
                    #     raise ValueError('Not all shape properties were saved, some of them seem to be unsupported.')

                    # # save point indices
                    # if opt.sampling != 'full':
                    #     np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.idx'), datasampler.shape_patch_inds[shape_ind], fmt='%d')

                    # start new shape
                    if shape_ind + 1 < len(dataset.shape_names):
                        shape_patch_offset = 0
                        shape_ind = shape_ind + 1
                        if opt.sampling == 'full':
                            shape_patch_count = dataset.shape_patch_count[shape_ind]
                        elif opt.sampling == 'sequential_shapes_random_patches':
                            # shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
                            shape_patch_count = len(datasampler.shape_patch_inds[shape_ind])
                        else:
                            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
                        shape_properties = shape_properties.new_zeros(shape_patch_count, pred_dim)
Esempio n. 5
0
def eval_pcpnet(opt):

    opt.models = opt.models.split()

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" %
                          opt.gpu_idx)

    for model_name in opt.models:

        print("Random Seed: %d" % (opt.seed))
        random.seed(opt.seed)
        torch.manual_seed(opt.seed)

        model_filename = os.path.join(opt.modeldir,
                                      model_name + opt.modelpostfix)
        param_filename = os.path.join(opt.modeldir,
                                      model_name + opt.parmpostfix)

        # load model and training parameters
        trainopt = torch.load(param_filename)

        if opt.batchSize == 0:
            model_batchSize = trainopt.batchSize
        else:
            model_batchSize = opt.batchSize

        # get indices in targets and predictions corresponding to each output
        pred_dim = 0
        output_pred_ind = []
        for o in trainopt.outputs:
            if o == 'unoriented_normals' or o == 'oriented_normals':
                output_pred_ind.append(pred_dim)
                pred_dim += 3
            elif o == 'max_curvature' or o == 'min_curvature':
                output_pred_ind.append(pred_dim)
                pred_dim += 1
            else:
                raise ValueError('Unknown output: %s' % (o))
        #print(trainopt.patch_radius)

        dataset = PointcloudPatchDataset(
            root='/Users/jinwei/GItHub/DRNE/data/pclouds',
            root_in=trainopt.indir2,
            shape_list_filename=opt.dataset,
            patch_radius=trainopt.patch_radius,
            points_per_patch=trainopt.points_per_patch,
            dim_pts=trainopt.in_points_dim,
            knn=trainopt.knn,
            point_count_std=trainopt.patch_point_count_std,
            seed=trainopt.seed,
            identical_epochs=trainopt.identical_epochs,
            cache_capacity=trainopt.cache_capacity)

        if opt.sampling == 'full':
            datasampler = SequentialPointcloudPatchSampler(dataset)
        elif opt.sampling == 'sequential_shapes_random_patches':
            datasampler = SequentialShapeRandomPointcloudPatchSampler(
                dataset,
                patches_per_shape=opt.patches_per_shape,
                seed=opt.seed,
                sequential_shapes=True,
                identical_epochs=False)
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 sampler=datasampler,
                                                 batch_size=model_batchSize,
                                                 num_workers=int(opt.workers))

        regressor = DSAC(hyps=trainopt.hypotheses,
                         inlier_params=trainopt.inlier_params,
                         patch_radius=trainopt.patch_radius,
                         decoder=trainopt.decoder,
                         use_mask=trainopt.use_mask,
                         dim_pts=trainopt.in_points_dim,
                         num_gpts=trainopt.generate_points_num,
                         dim_gpts=trainopt.generate_points_dim,
                         points_per_patch=trainopt.points_per_patch[0],
                         sym_op=trainopt.sym_op,
                         normal_loss=trainopt.normal_loss,
                         seed=trainopt.seed,
                         device=device,
                         use_point_stn=trainopt.use_point_stn,
                         use_feat_stn=trainopt.use_feat_stn)

        regressor.load_state_dict(
            torch.load(model_filename, map_location=torch.device('cpu')))
        regressor.to(device)
        regressor.eval()

        shape_ind = 0
        shape_patch_offset = 0
        if opt.sampling == 'full':
            shape_patch_count = dataset.shape_patch_count[shape_ind]
        elif opt.sampling == 'sequential_shapes_random_patches':
            shape_patch_count = min(opt.patches_per_shape,
                                    dataset.shape_patch_count[shape_ind])
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        shape_properties = torch.zeros(shape_patch_count,
                                       pred_dim,
                                       dtype=torch.float,
                                       device=device)

        # append model name to output directory and create directory if necessary
        model_outdir = os.path.join(opt.outdir, model_name)
        if not os.path.exists(model_outdir):
            os.makedirs(model_outdir)

        num_batch = len(dataloader)
        batch_enum = enumerate(dataloader, 0)
        for batchind, data in batch_enum:

            # get batch and upload to GPU
            points = data[0]  #这时的point是64*512*3的类型
            target = data[1]
            mask = data[2]
            dist = data[3]

            points = points.transpose(2, 1)
            points = points.to(device)
            target = target.to(device)
            mask = mask.to(device)
            dist = dist.to(device)

            with torch.no_grad():
                exp_loss, top_loss, pred, pts, mask_p, patch_rot, _ = regressor(
                    points, target, dist)

            print(top_loss.mean())
            pred_len = torch.max(
                torch.FloatTensor([sys.float_info.epsilon * 100]),
                pred.norm(p=2, dim=1, keepdim=True))
            pred = pred / pred_len
            target_len = torch.max(
                torch.FloatTensor([sys.float_info.epsilon * 100]),
                target.norm(p=2, dim=2, keepdim=True))
            target = target / target_len

            # plot a patch
            x, y = torch.meshgrid(torch.tensor([-10.0, 10.0]),
                                  torch.tensor([-10.0, 10.0]))
            for i in range(points.size(0)):
                pred_xy = pred[i, 0:2] / (pred[i, 2] + 1e-10)
                pred_xy = pred_xy.to(device)
                z = -(pred_xy[0] * x + pred_xy[1] * y)
                mlab.figure('patch_with_gpts',
                            fgcolor=(0, 0, 0),
                            bgcolor=(1, 1, 1))
                mlab.points3d(10 * points[i, 0, :],
                              10 * points[i, 1, :],
                              10 * points[i, 2, :],
                              color=(0.7, 0.7, 0.7),
                              scale_factor=0.3,
                              scale_mode='vector')
                mlab.points3d(10 * pts[i, :, 0],
                              10 * pts[i, :, 1],
                              10 * pts[i, :, 2],
                              color=(0.2, 0.2, 0.2),
                              scale_factor=0.7,
                              scale_mode='vector')
                mlab.quiver3d(0.0,
                              0.0,
                              0.0,
                              pred[i, 0],
                              pred[i, 1],
                              pred[i, 2],
                              line_width=3,
                              scale_factor=10,
                              color=(0, 1, 0))
                if (target[i, 0, :] - pred[i, :]).pow(2).sum() > (
                        target[i, 0, :] + pred[i, :]).pow(2).sum():
                    mlab.quiver3d(0.0,
                                  0.0,
                                  0.0,
                                  -target[i, 0, 0],
                                  -target[i, 0, 1],
                                  -target[i, 0, 2],
                                  line_width=3,
                                  scale_factor=10,
                                  color=(1, 0.0, 0.0))
                else:
                    mlab.quiver3d(0.0,
                                  0.0,
                                  0.0,
                                  target[i, 0, 0],
                                  target[i, 0, 1],
                                  target[i, 0, 2],
                                  line_width=3,
                                  scale_factor=10,
                                  color=(1, 0.0, 0.0))
                mlab.surf(x, y, z, opacity=0.3)
                mlab.show()
Esempio n. 6
0
parser.add_argument('--cpu', '-cpu', action='store_true',
	help='execute networks on CPU. Note that (RANSAC) line fitting anyway runs on CPU')

parser.add_argument('--session', '-sid', default='',
	help='custom session name appended to output files. Useful to separate different runs of the program')

opt = parser.parse_args()

if len(opt.session) > 0: opt.session = '_' + opt.session
sid = 'rf%d_c%d_h%d_t%.2f%s' % (opt.receptivefield, opt.capacity, opt.hypotheses, opt.inlierthreshold, opt.session)

# setup the training process
dataset = LineDataset(opt.imagesize, opt.imagesize)

loss = LineLoss(opt.imagesize)
dsac = DSAC(opt.hypotheses, opt.inlierthreshold, opt.inlierbeta, opt.inlieralpha, loss)

# we train two CNNs in parallel
# 1) a CNN that predicts points and is trained with DSAC -> PointNN (good idea)
point_nn = LineNN(opt.capacity, opt.receptivefield)
if  not opt.cpu: point_nn = point_nn.cuda()
point_nn.train()
opt_point_nn = optim.Adam(point_nn.parameters(), lr=opt.learningrate)
lrs_point_nn = optim.lr_scheduler.StepLR(opt_point_nn, opt.lrstep, gamma=0.5)

# 2) a CNN that predicts the line parameters directly -> DirectNN (bad idea)
direct_nn = LineNN(opt.capacity, 0, True)
if not opt.cpu: direct_nn = direct_nn.cuda()
direct_nn.train()
opt_direct_nn = optim.Adam(direct_nn.parameters(), lr=opt.learningrate)
lrs_direct_nn = optim.lr_scheduler.StepLR(opt_direct_nn, opt.lrstep, gamma=0.5)
Esempio n. 7
0
def eval_pcpnet(opt):

    opt.models = opt.models.split()

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" % opt.gpu_idx)

    for model_name in opt.models:

        print("Random Seed: %d" % (opt.seed))
        random.seed(opt.seed)
        torch.manual_seed(opt.seed)

        model_filename = os.path.join(opt.modeldir, model_name+opt.modelpostfix)
        param_filename = os.path.join(opt.modeldir, model_name+opt.parmpostfix)

        # load model and training parameters
        trainopt = torch.load(param_filename)

        if opt.batchSize == 0:
            model_batchSize = trainopt.batchSize
        else:
            model_batchSize = opt.batchSize

        # get indices in targets and predictions corresponding to each output
        pred_dim = 0
        output_pred_ind = []
        for o in trainopt.outputs:
            if o == 'unoriented_normals' or o == 'oriented_normals':
                output_pred_ind.append(pred_dim)
                pred_dim += 3
            
            else:
                raise ValueError('Unknown output: %s' % (o))

            dataset = PointcloudPatchDataset(
                root=opt.indir, shape_list_filename=opt.dataset,
                patch_radius=trainopt.patch_radius,
                points_per_patch=trainopt.points_per_patch,
                #patch_features=[],
                seed=opt.seed,
            #use_pca=trainopt.use_pca,
                center=trainopt.patch_center,
            #point_tuple=trainopt.point_tuple,
            #sparse_patches=opt.sparse_patches,
                cache_capacity=opt.cache_capacity
                )
        if opt.sampling == 'full':
            datasampler = SequentialPointcloudPatchSampler(dataset)
        elif opt.sampling == 'sequential_shapes_random_patches':
            datasampler = SequentialShapeRandomPointcloudPatchSampler(
                dataset,
                patches_per_shape=opt.patches_per_shape,
                seed=opt.seed,
                sequential_shapes=True,
                identical_epochs=False)
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        dataloader = torch.utils.data.DataLoader(
            dataset,
            sampler=datasampler,
            batch_size=model_batchSize,
            num_workers=int(opt.workers))

        regressor = DSAC(
            trainopt.hypotheses,
            trainopt.inlierthreshold,
            trainopt.inlierbeta,
            trainopt.inlieralpha,
            trainopt.normal_loss,
            trainopt.seed,device,
            use_point_stn=trainopt.use_point_stn,
            use_feat_stn=trainopt.use_feat_stn,
            use_mask=trainopt.use_mask
        )

        regressor.load_state_dict(torch.load(model_filename))
        regressor.to(device)
        regressor.eval()

        shape_ind = 0
        shape_patch_offset = 0
        if opt.sampling == 'full':
            shape_patch_count = dataset.shape_patch_count[shape_ind]
        elif opt.sampling == 'sequential_shapes_random_patches':
            shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
        else:
            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
        shape_properties = torch.zeros(shape_patch_count, 3, dtype=torch.float, device=device)

        # append model name to output directory and create directory if necessary
        model_outdir = os.path.join(opt.outdir, model_name)
        if not os.path.exists(model_outdir):
            os.makedirs(model_outdir)

        num_batch = len(dataloader)
        batch_enum = enumerate(dataloader, 0)
        for batchind, data in batch_enum:

            # get batch and upload to GPU
            points, data_trans,_ = data
            points = points.transpose(2, 1)
            points = points.to(device)

            data_trans = data_trans.to(device)

            with torch.no_grad():
                exp_loss, top_loss,pred,pts,_ = regressor(points,data_trans)

            # post-processing of the prediction
            # for oi, o in enumerate(trainopt.outputs):
            #     if o == 'unoriented_normals' or o == 'oriented_normals':
            #         o_pred = pred[:, output_pred_ind[oi]:output_pred_ind[oi]+3]

                    

                    

            #         # normalize normals
            #         #o_pred_len = torch.max(o_pred.new_tensor([sys.float_info.epsilon*100]), o_pred.norm(p=2, dim=1, keepdim=True))
            #         #o_pred = o_pred / o_pred_len

                

            #     else:
            #         raise ValueError('Unsupported output type: %s' % (o))

            print('[%s %d/%d] shape %s' % (model_name, batchind, num_batch-1, dataset.shape_names[shape_ind]))
            

            batch_offset = 0
            while batch_offset < pred.size(0):

                shape_patches_remaining = shape_patch_count-shape_patch_offset
                batch_patches_remaining = pred.size(0)-batch_offset

                # append estimated patch properties batch to properties for the current shape
                shape_properties[shape_patch_offset:shape_patch_offset+min(shape_patches_remaining, batch_patches_remaining), :] = pred[
                    batch_offset:batch_offset+min(shape_patches_remaining, batch_patches_remaining), :]

                batch_offset = batch_offset + min(shape_patches_remaining, batch_patches_remaining)
                shape_patch_offset = shape_patch_offset + min(shape_patches_remaining, batch_patches_remaining)

                if shape_patches_remaining <= batch_patches_remaining:

                    # save shape properties to disk
                    prop_saved = [False]*len(trainopt.outputs)

                    # save normals
                    oi = [i for i, o in enumerate(trainopt.outputs) if o in ['unoriented_normals', 'oriented_normals']]
                    if len(oi) > 1:
                        raise ValueError('Duplicate normal output.')
                    elif len(oi) == 1:
                        oi = oi[0]
                        normal_prop = shape_properties[:, output_pred_ind[oi]:output_pred_ind[oi]+3]
                        
                        np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.normals'), normal_prop.cpu().numpy())
                        print('saved normals for ' + dataset.shape_names[shape_ind])
                        prop_saved[oi] = True

                    # save curvatures
                    

                    if not all(prop_saved):
                        raise ValueError('Not all shape properties were saved, some of them seem to be unsupported.')

                    # save point indices
                    if opt.sampling != 'full':
                        np.savetxt(os.path.join(model_outdir, dataset.shape_names[shape_ind]+'.idx'), datasampler.shape_patch_inds[shape_ind], fmt='%d')

                    # start new shape
                    if shape_ind + 1 < len(dataset.shape_names):
                        shape_patch_offset = 0
                        shape_ind = shape_ind + 1
                        if opt.sampling == 'full':
                            shape_patch_count = dataset.shape_patch_count[shape_ind]
                        elif opt.sampling == 'sequential_shapes_random_patches':
                            # shape_patch_count = min(opt.patches_per_shape, dataset.shape_patch_count[shape_ind])
                            shape_patch_count = len(datasampler.shape_patch_inds[shape_ind])
                        else:
                            raise ValueError('Unknown sampling strategy: %s' % opt.sampling)
                        shape_properties = shape_properties.new_zeros(shape_patch_count, pred_dim)
Esempio n. 8
0
def train_dsacpnet(opt):

    # gpu init
    # multi_gpus = False
    # if ',' in opt.gpu_idx:
    #     gpu_ids = [int(id) for id in opt.gpu_idx.split(',')]
    #     multi_gpus = True
    # else:
    #     gpu_ids = [int(opt.gpu_idx)]

    # device = torch.device('cuda:{}'.format(gpu_ids[0]) if torch.cuda.is_available() else 'cpu')
    # 此处如果使用 下面一行代码,则会报错,RuntimeError: all tensors must be on devices[0]
    # 默认情况下 device 为0,因此需要指定 device id.
    device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" %
                          opt.gpu_idx)

    #if multi_gpus:
    # net = DataParallel(net, device_ids=gpu_ids).to(device)
    # margin = DataParallel(margin, device_ids=gpu_ids).to(device)
    #else:
    #   net = net.to(device)
    #  margin = margin.to(device)
    #device = torch.device("cpu" if opt.gpu_idx < 0 else "cuda:%d" % opt.gpu_idx)

    # colored console output
    green = lambda x: '\033[92m' + x + '\033[0m'
    blue = lambda x: '\033[94m' + x + '\033[0m'

    log_dirname = os.path.join(opt.logdir, opt.name)
    params_filename = os.path.join(opt.outdir, '%s_params.pth' % (opt.name))
    model_filename = os.path.join(opt.outdir, '%s_model.pth' % (opt.name))
    desc_filename = os.path.join(opt.outdir, '%s_description.txt' % (opt.name))

    if os.path.exists(log_dirname) or os.path.exists(model_filename):
        response = input(
            'A training run named "%s" already exists, overwrite? (y/n) ' %
            (opt.name))
        if response == 'y':
            if os.path.exists(log_dirname):
                shutil.rmtree(os.path.join(opt.logdir, opt.name))
        else:
            sys.exit()

    criterion = nn.BCEWithLogitsLoss()
    dsac = DSAC(opt.hypotheses,
                opt.inlierthreshold,
                opt.inlierbeta,
                opt.inlieralpha,
                opt.normal_loss,
                opt.seed,
                device,
                use_point_stn=opt.use_point_stn,
                use_feat_stn=opt.use_feat_stn,
                use_mask=opt.use_mask,
                points_num=opt.points_num,
                points_per_patch=opt.points_per_patch,
                sym_op=opt.sym_op)

    if opt.seed < 0:
        opt.seed = random.randint(1, 10000)

    print("Random Seed: %d" % (opt.seed))
    random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    # create train and test dataset loaders
    train_dataset = PointcloudPatchDataset(
        root=opt.indir,
        shape_list_filename=opt.trainset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        center=opt.patch_center,
        cache_capacity=opt.cache_capacity)
    if opt.training_order == 'random':
        train_datasampler = RandomPointcloudPatchSampler(
            train_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    elif opt.training_order == 'random_shape_consecutive':
        train_datasampler = SequentialShapeRandomPointcloudPatchSampler(
            train_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    else:
        raise ValueError('Unknown training order: %s' % (opt.training_order))

    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   sampler=train_datasampler,
                                                   batch_size=opt.batchSize,
                                                   num_workers=int(
                                                       opt.workers))

    test_dataset = PointcloudPatchDataset(
        root=opt.indir,
        shape_list_filename=opt.testset,
        patch_radius=opt.patch_radius,
        points_per_patch=opt.points_per_patch,
        seed=opt.seed,
        identical_epochs=opt.identical_epochs,
        center=opt.patch_center,
        cache_capacity=opt.cache_capacity)
    if opt.training_order == 'random':
        test_datasampler = RandomPointcloudPatchSampler(
            test_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    elif opt.training_order == 'random_shape_consecutive':
        test_datasampler = SequentialShapeRandomPointcloudPatchSampler(
            test_dataset,
            patches_per_shape=opt.patches_per_shape,
            seed=opt.seed,
            identical_epochs=opt.identical_epochs)
    else:
        raise ValueError('Unknown training order: %s' % (opt.training_order))

    test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                                  sampler=test_datasampler,
                                                  batch_size=opt.batchSize,
                                                  num_workers=int(opt.workers))

    # keep the exact training shape names for later reference
    opt.train_shapes = train_dataset.shape_names
    opt.test_shapes = test_dataset.shape_names

    print(
        'training set: %d patches (in %d batches) - test set: %d patches (in %d batches)'
        % (len(train_datasampler), len(train_dataloader),
           len(test_datasampler), len(test_dataloader)))

    try:
        os.makedirs(opt.outdir)
    except OSError:
        pass

    train_writer = SummaryWriter(os.path.join(log_dirname, 'train'))
    test_writer = SummaryWriter(os.path.join(log_dirname, 'test'))

    optimizer = optim.SGD(dsac.parameters(), lr=opt.lr, momentum=opt.momentum)
    scheduler = lr_scheduler.MultiStepLR(
        optimizer, milestones=[],
        gamma=0.1)  # milestones in number of optimizer iterations
    #dsacpnet= torch.nn.DataParallel(dsacpnet, device_ids=gpu_ids).to(device)
    #dsac= torch.nn.DataParallel(dsac, device_ids=gpu_ids).to(device)
    dsac.to(device)
    train_num_batch = len(train_dataloader)
    test_num_batch = len(test_dataloader)

    # save parameters
    torch.save(opt, params_filename)

    # save description
    with open(desc_filename, 'w+') as text_file:
        print(opt.desc, file=text_file)

    for epoch in range(opt.nepoch):

        train_batchind = -1
        train_fraction_done = 0.0
        train_enum = enumerate(train_dataloader, 0)

        test_batchind = -1
        test_fraction_done = 0.0
        test_enum = enumerate(test_dataloader, 0)

        for train_batchind, data in train_enum:

            # update learning rate
            scheduler.step(epoch * train_num_batch + train_batchind)

            # set to training mode
            dsac.train()

            points = data[0]  #这时的point是64*512*3的类型
            target = data[1]
            mask = data[2]

            points = points
            # for point in points:

            points = points.transpose(2, 1)
            points = points.to(device)

            target = target.to(device)
            mask = mask.to(device)

            # zero gradients
            optimizer.zero_grad()

            exp_loss, top_loss, _, pts, mask_p = dsac(points, target)

            if opt.use_mask:
                mask_p = mask_p.view(-1, opt.points_per_patch)
                mask_loss = criterion(mask_p, mask)  #
            else:
                mask_loss = 0
            exp_loss = exp_loss.mean()

            loss = exp_loss + mask_loss  #+chamfer_loss
            # backpropagate through entire network to compute gradients of loss w.r.t. parameters
            loss.backward()

            # parameter optimization step
            optimizer.step()

            train_fraction_done = (train_batchind + 1) / train_num_batch

            # print info and update log file
            print(
                '[%s %d: %d/%d] %s tloss: %f loss: %f Top Loss:%f mask Loss:%f'
                % (opt.name, epoch, train_batchind, train_num_batch - 1,
                   green('train'), loss, exp_loss.mean().item(),
                   top_loss.mean(), mask_loss))
            train_writer.add_scalar('loss', exp_loss.item(),
                                    (epoch + train_fraction_done) *
                                    train_num_batch * opt.batchSize)

            while test_fraction_done <= train_fraction_done and test_batchind + 1 < test_num_batch:

                # set to evaluation mode
                dsac.eval()

                test_batchind, data = next(test_enum)

                points = data[0]  #这时的point是64*512*3的类型
                target = data[1]
                mask = data[2]

                points = points.transpose(2, 1)
                points = points.to(device)

                target = target.to(device)
                mask = mask.to(device)

                # forward pass
                with torch.no_grad():
                    exp_loss, top_loss, _, pts, mask_p = dsac(points, target)

                if opt.use_mask:
                    mask_p = mask_p.view(-1, opt.points_per_patch)
                    mask_loss = criterion(mask_p, mask)  #
                else:
                    mask_loss = 0
                #mask_loss=criterion(mask_p, mask)
                loss = exp_loss + mask_loss
                test_fraction_done = (test_batchind + 1) / test_num_batch

                # print info and update log file
                print(
                    '[%s %d: %d/%d] %s tloss: %f loss: %f Top Loss:%f mask Loss:%f '
                    % (opt.name, epoch, train_batchind, train_num_batch - 1,
                       blue('test'), loss, exp_loss.mean().item(),
                       top_loss.mean(), mask_loss))
                test_writer.add_scalar('loss',
                                       exp_loss.mean().item(),
                                       (epoch + test_fraction_done) *
                                       train_num_batch * opt.batchSize)

        # save model, overwriting the old model
        if epoch % opt.saveinterval == 0 or epoch == opt.nepoch - 1:
            torch.save(dsac.state_dict(), model_filename)

        # save model in a separate file in epochs 0,5,10,50,100,500,1000, ...
        if epoch % (5 * 10**math.floor(math.log10(max(2, epoch - 1)))
                    ) == 0 or epoch % 100 == 0 or epoch == opt.nepoch - 1:
            torch.save(
                dsac.state_dict(),
                os.path.join(opt.outdir,
                             '%s_model_%d.pth' % (opt.name, epoch)))