예제 #1
0
def main(args):
    model = Pointnet2StructurePointNet(
        num_structure_points=args.num_structure_points,
        input_channels=0,
        use_xyz=True)
    model.cuda()
    checkpoint_util.load_checkpoint(model_3d=model, filename=args.model_fname)

    if os.path.exists(args.output_dir) is False:
        os.makedirs(args.output_dir)

    fnames = glob.glob(os.path.join(args.data_dir, '*.off'))

    for fname in fnames:
        fname = os.path.basename(fname)
        pts = point_cloud_utils.read_points_off(
            os.path.join(args.data_dir, fname))

        batch_pts = torch.from_numpy(pts)[None, :, :].cuda()
        structure_points = model(batch_pts)

        structure_points = structure_points[0].cpu().detach().numpy()
        outfname = os.path.join(args.output_dir, fname[:-4] + '_stpts.off')
        point_cloud_utils.write_points_off(
            outfname, structure_points,
            COLOR_LIST[:structure_points.shape[0], :])

    print('output saved to {0}'.format(args.output_dir))
예제 #2
0
def main(args):
    test_set = bhcp_dataloader.bhcp_dataloader(
        args.data_dir, args.category, is_pts_aligned=args.test_on_aligned)
    test_loader = DataLoader(
        test_set,
        batch_size=1,
        shuffle=False,
        num_workers=1,
        pin_memory=False,
    )

    model = Pointnet2StructurePointNet(
        num_structure_points=args.num_structure_points,
        input_channels=0,
        use_xyz=True)
    model.cuda()
    checkpoint_util.load_checkpoint(model_3d=model, filename=args.model_fname)
    dis_ratios, dis_thresholds = bhcp_evaluate_one_epoch(
        model, test_loader, args.test_on_aligned)

    print('distance threshold|radio:')
    for i in range(dis_ratios.shape[0]):
        print(' {0}|{1}'.format(dis_thresholds[i], ("%.6f" % dis_ratios[i])),
              end='')
    print('\n')
예제 #3
0
def main(args):
    model = Pointnet2StructurePointNet(
        num_structure_points=args.num_structure_points,
        input_channels=0,
        use_xyz=True)
    model.cuda()
    checkpoint_util.load_checkpoint(model_3d=model, filename=args.model_fname)

    src_shape_fname = args.src_shape_fname
    query_pts_fname = args.query_pts_fname
    tgt_shape_fname = args.tgt_shape_fname
    out_corres_pts_fname = args.out_corres_pts_fname

    src_pts = point_cloud_utils.read_points_off(src_shape_fname)
    tgt_pts = point_cloud_utils.read_points_off(tgt_shape_fname)
    query_pts, query_pts_colors = point_cloud_utils.read_points_off(
        query_pts_fname, read_color=True)

    batch_src_pts = torch.from_numpy(src_pts)[None, :, :].cuda()
    batch_tgt_pts = torch.from_numpy(tgt_pts)[None, :, :].cuda()
    query_pts = torch.from_numpy(query_pts)[:, :]

    if args.test_on_aligned is False:
        batch_src_pts, src_rot_mats, _ = d_utils.AddPCATransformsToBatchPoints(
            batch_src_pts, num_of_trans=1)
        batch_src_pts = batch_src_pts.squeeze(dim=0)
        src_rot_mat = src_rot_mats.squeeze(dim=0)[0]
        batch_tgt_pts, tgt_rot_mats, _ = d_utils.AddPCATransformsToBatchPoints(
            batch_tgt_pts, num_of_trans=1)
        batch_tgt_pts = batch_tgt_pts.squeeze(dim=0)
        tgt_rot_mat = tgt_rot_mats.squeeze(dim=0)[0]

    structure_points_src = model(batch_src_pts)[0]
    structure_points_tgt = model(batch_tgt_pts)[0]
    structure_points_src = torch.mm(src_rot_mat.transpose(0, 1),
                                    structure_points_src.transpose(
                                        0, 1)).transpose(0, 1)
    structure_points_tgt = torch.mm(tgt_rot_mat.transpose(0, 1),
                                    structure_points_tgt.transpose(
                                        0, 1)).transpose(0, 1)

    corres_pts_in_tgt = compute_correspondence(structure_points_src, query_pts,
                                               structure_points_tgt)
    point_cloud_utils.write_points_off(out_corres_pts_fname, corres_pts_in_tgt,
                                       query_pts_colors)

    print('output saved to {0}'.format(out_corres_pts_fname))
def main(args):

    model = Pointnet2StructurePointNet(
        num_structure_points=args.num_structure_points,
        input_channels=0,
        use_xyz=True)
    model.cuda()
    checkpoint_util.load_checkpoint(model_3d=model, filename=args.model_fname)
    model.eval()

    if os.path.exists(args.output_dir) is False:
        os.makedirs(args.output_dir)

    fnames = glob.glob(os.path.join(args.data_dir, '*.off'))

    for fname in fnames:

        fname = os.path.basename(fname)
        pts = point_cloud_utils.read_points_off(
            os.path.join(args.data_dir, fname))

        batch_pts = torch.from_numpy(pts)[None, :, :].cuda()

        if args.test_on_aligned is False:
            batch_pts, rot_mats, _ = d_utils.AddPCATransformsToBatchPoints(
                batch_pts, num_of_trans=1)
            batch_pts = batch_pts.squeeze(dim=0).contiguous()
            rot_mats = rot_mats.squeeze(dim=0)

        structure_points = model(batch_pts)

        if args.test_on_aligned is False:
            inv_rot_mats = rot_mats.transpose(1, 2)
            structure_points = torch.matmul(inv_rot_mats,
                                            structure_points.transpose(
                                                1, 2)).transpose(1, 2)

        structure_points = structure_points[0].cpu().detach().numpy()
        outfname = os.path.join(args.output_dir, fname[:-4] + '_stpts.off')
        point_cloud_utils.write_points_off(
            outfname, structure_points,
            COLOR_LIST[:structure_points.shape[0], :])

    print('output saved to {0}'.format(args.output_dir))
def train(cmd_args):
    if os.path.exists(cmd_args.log_dir) is not True:
        os.makedirs(cmd_args.log_dir)

    checkpoints_dir = os.path.join(cmd_args.log_dir, "checkpoints")

    if os.path.exists(checkpoints_dir) is not True:
        os.mkdir(checkpoints_dir)

    lr_clip = 1e-5
    bnm_clip = 1e-2
    trans = transforms.Compose([
        d_utils.PointcloudToTensor(),
        #d_utils.PointcloudRandomPermutation(),
        d_utils.PointcloudJitter()
    ])

    train_set = bhcp_dataloader.bhcp_dataloader(cmd_args.data_dir,
                                                cmd_args.category,
                                                transforms=trans,
                                                is_pts_aligned=False)
    train_loader = DataLoader(
        train_set,
        batch_size=cmd_args.batch_size,
        shuffle=True,
        num_workers=5,
        pin_memory=False,
    )
    model = Pointnet2StructurePointNet(
        num_structure_points=cmd_args.num_structure_points,
        input_channels=0,
        use_xyz=True)
    model.cuda()
    optimizer = optim.Adam(model.parameters(),
                           lr=cmd_args.lr,
                           weight_decay=cmd_args.weight_decay)
    lr_lbmd = lambda it: max(
        cmd_args.lr_decay**
        (int(it * cmd_args.batch_size / cmd_args.decay_step)),
        lr_clip / cmd_args.lr,
    )
    bn_lbmd = lambda it: max(
        cmd_args.bn_momentum * cmd_args.bnm_decay**
        (int(it * cmd_args.batch_size / cmd_args.decay_step)),
        bnm_clip,
    )
    iters = -1
    criterions = {'ComputeLoss3d': ComputeLoss3d()}

    # load status from checkpoint
    log_open_mode = 'w'
    start_epoch = 0
    if cmd_args.checkpoint is not None:
        fname = os.path.join(checkpoints_dir, cmd_args.checkpoint)
        start_epoch, iters = checkpoint_util.load_checkpoint(
            model_3d=model, optimizer=optimizer, filename=fname)
        start_epoch += 1
        log_open_mode = 'a'

    log = LogUtils(os.path.join(cmd_args.log_dir, 'logfile'), log_open_mode)

    log.write('train unsupervised structure points for bhcp\n')
    log.write_args(cmd_args)

    lr_scheduler = lr_sched.LambdaLR(optimizer,
                                     lr_lambda=lr_lbmd,
                                     last_epoch=iters)
    bnm_scheduler = pt_utils.BNMomentumScheduler(model,
                                                 bn_lambda=bn_lbmd,
                                                 last_epoch=iters)

    iters = max(iters, 0)
    for epoch_i in range(start_epoch, cmd_args.max_epochs):
        log.write('\nepoch: {0} ###########################'.format(epoch_i))
        train_loss, train_cd_loss, train_consistent_loss, iters = train_one_epoch(
            model,
            optimizer,
            train_loader,
            lr_scheduler,
            bnm_scheduler,
            iters,
            criterions,
            num_of_trans=cmd_args.num_of_transform,
            num_inpts=cmd_args.num_inpts)
        log.write('\nave_train_loss:{0}, cd_loss:{1}, consis_loss:{2}'.format(
            ("%.8f" % train_loss), ("%.8f" % train_cd_loss),
            ("%.8f" % train_consistent_loss)))

        if cmd_args.checkpoint_save_step != -1 and (
                epoch_i + 1) % cmd_args.checkpoint_save_step is 0:
            fname = os.path.join(checkpoints_dir,
                                 'checkpoint_{}'.format(epoch_i))
            checkpoint_util.save_checkpoint(filename=fname,
                                            model_3d=model,
                                            optimizer=optimizer,
                                            iters=iters,
                                            epoch=epoch_i)

        fname = os.path.join(checkpoints_dir, 'model')
        checkpoint_util.save_checkpoint(filename=fname,
                                        model_3d=model,
                                        optimizer=optimizer,
                                        iters=iters,
                                        epoch=epoch_i)
예제 #6
0
# Load the model
input_channel = 1
model = ftp_psp3(input_channel).cuda()
model = nn.DataParallel(model)  # For using multiple GPUs

# Define the optimizer
optimizer = optim.Adam(model.parameters(), lr=opt.lr)

#Load status from checkpoint
log_open_mode = 'w'
start_epoch = 0
if opt.checkpoint is not None:
    fname = os.path.join(checkpoints_dir, opt.checkpoint)
    start_epoch, iters = checkpoint_util.load_checkpoint(model_3d=model,
                                                         optimizer=optimizer,
                                                         filename=fname)
    start_epoch += 1
    log_open_mode = 'a'

log = LogUtils(os.path.join(opt.log_dir, 'logfile'), log_open_mode)
log.write('Supervised learning for phase map enhancement - Training\n')
log.write_args(opt)
lr_scheduler = lr_scd.StepLR(optimizer,
                             step_size=opt.decay_step,
                             gamma=opt.lr_decay)
iters = max(iters, 0)
reg = 1e-7
# Train the network on the training dataset
for epoch_num in range(start_epoch, opt.num_epochs):
    trainData = iter(trainLoader)
예제 #7
0
파일: test.py 프로젝트: VigneshS93/FTP_PSP
if os.path.exists(opt.log_dir) is not True:
    os.makedirs(opt.log_dir)

checkpoints_dir = os.path.join(opt.log_dir, "checkpoints")

if os.path.exists(checkpoints_dir) is not True:
    os.mkdir(checkpoints_dir)

# Load the model
input_channel = 1
model = ftp_psp5(input_channel).cuda()
# model = nn.DataParallel(model) # For using multiple GPUs

#Load status from checkpoint
log_open_mode = 'w'
checkpoint_util.load_checkpoint(model_3d=model, filename=opt.checkpoint)

log = LogUtils(os.path.join(opt.log_dir, 'logfile'), log_open_mode)
log.write('Supervised learning for phase map enhancement - Testing\n')
log.write_args(opt)

if opt.checkpoint is None:
    print('Checkpoint is missing! Load the checkpoint to start the testing')
    sys.exit()

# Test the network using the trained model
testData = iter(testLoader)
ave_loss = 0
count = 0
for data in iter(testLoader):
    inp_PM, gt_PM, mask_PM, filename_PM = next(testData)