def main():

    single_channel = True

    print("开始进行测试")
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    #ntg_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'
    # ntg_checkpoint_path = '/mnt/4T/zlk/trained_weights/checkpoint_NTG_resnet101.pth.tar'      # 这两个一样
    ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar' # 这两个一样
    # ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/best_checkpoint_voc2011_NTG_resnet101.pth.tar'
    test_image_path = '/mnt/4T/zlk/datasets/mulitspectral/complete_ms_data_mat'

    threshold = 3
    batch_size = 1
    # 加载模型
    use_cuda = torch.cuda.is_available()

    vis = VisdomHelper(env_name='CAVE_test')
    ntg_model = createModel(ntg_checkpoint_path,use_cuda=use_cuda,single_channel=single_channel)

    print('测试harvard网格点损失')
    dataloader,pair_generator =  createDataloader(test_image_path,batch_size=batch_size,single_channel=single_channel,use_cuda = use_cuda)

    iterDataset(dataloader,pair_generator,ntg_model,vis,threshold=threshold,use_cuda=use_cuda)
    def __init__(self):
        print("开始进行测试")

        self.param_gpu_id = 0
        self.param_single_channel = True
        self.param_threshold = 3
        self.param_batch_size = 1
        self.param_use_cvpr = False
        self.param_use_cnn = True
        self.param_use_traditional = True
        self.param_use_combine = True
        self.param_save_mat = False
        # 加载模型
        os.environ["CUDA_VISIBLE_DEVICES"] = str(self.param_gpu_id)
        self.use_cuda = torch.cuda.is_available()

        print(self.param_gpu_id, self.param_single_channel,
              self.param_threshold, self.param_batch_size)
        if self.param_single_channel:
            if self.use_cuda:
                param_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar'
            else:
                param_checkpoint_path = '/Users/zale/project/myself/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar'

        else:
            param_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'

        if self.param_use_cnn:
            self.ntg_model = createModel(
                param_checkpoint_path,
                use_cuda=self.use_cuda,
                single_channel=self.param_single_channel)
            self.ntg_model.eval()
        else:
            self.ntg_model = None

        if self.param_use_cvpr:
            self.cvpr_model = createCVPRModel(use_cuda=self.use_cuda)
            self.cvpr_model.eval()
        else:
            self.cvpr_model = None
예제 #3
0
def main():

    single_channel = False
    print("开始进行测试")
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    ntg_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'
    #test_image_path = '/home/zlk/datasets/coco_test2017'
    # test_image_path = '/mnt/4T/zlk/datasets/mulitspectral/Harvard'
    test_image_path = '/mnt/4T/zlk/datasets/mulitspectral/complete_ms_data_mat'

    threshold = 3
    batch_size = 1
    # 加载模型
    use_cuda = torch.cuda.is_available()

    # vis = VisdomHelper(env_name='Harvard_test')
    vis = VisdomHelper(env_name='CAVE_test')

    ntg_model = createModel(ntg_checkpoint_path,
                            use_cuda=use_cuda,
                            single_channel=single_channel)
    cvpr_model = createCVPRModel(use_cuda=use_cuda)

    print('测试harvard网格点损失')
    dataloader, pair_generator = createDataloader(
        test_image_path,
        batch_size,
        use_cuda=use_cuda,
        single_channel=single_channel)

    iterDataset(dataloader,
                pair_generator,
                ntg_model,
                cvpr_model,
                vis,
                threshold=threshold,
                use_cuda=use_cuda)
    if param_single_channel:
        param_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar'
    else:
        param_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'

    param_test_image_path = '/mnt/4T/zlk/datasets/mulitspectral/complete_ms_data_mat'
    # param_test_image_path = '/home/zale/datasets/complete_ms_data_mat'
    # param_test_image_path = '/Users/zale/project/datasets/complete_ms_data_mat'

    # 加载模型
    os.environ["CUDA_VISIBLE_DEVICES"] = str(param_gpu_id)
    use_cuda = torch.cuda.is_available()

    if param_use_cnn:
        ntg_model = createModel(param_checkpoint_path,
                                use_cuda=use_cuda,
                                single_channel=param_single_channel)
    else:
        ntg_model = None

    cvpr_model = createCVPRModel(use_cuda=use_cuda)

    print('测试cave网格点损失')
    dataloader, pair_generator = createDataloader(
        param_test_image_path,
        batch_size=param_batch_size,
        single_channel=param_single_channel,
        use_cuda=use_cuda)

    iterDataset(dataloader,
                pair_generator,
    def register_showVisdom(self):
        print("开始进行测试")

        param_gpu_id = 0
        param_single_channel = True
        param_threshold = 3
        param_batch_size = 1
        param_use_cvpr = True
        param_use_cnn = True
        param_use_traditional = True
        param_use_combine = True
        param_save_mat = False

        print(param_gpu_id, param_single_channel, param_threshold,
              param_batch_size)

        vis = VisdomHelper(env_name='CAVE_common', port=8098)

        if param_single_channel:
            param_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar'
        else:
            param_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'

        param_test_image_path = '/mnt/4T/zlk/datasets/mulitspectral/complete_ms_data_mat'
        # param_test_image_path = '/home/zale/datasets/complete_ms_data_mat'
        # param_test_image_path = '/Users/zale/project/datasets/complete_ms_data_mat'

        # 加载模型
        os.environ["CUDA_VISIBLE_DEVICES"] = str(param_gpu_id)
        use_cuda = torch.cuda.is_available()

        if param_use_cnn:
            ntg_model = createModel(param_checkpoint_path,
                                    use_cuda=use_cuda,
                                    single_channel=param_single_channel)
        else:
            ntg_model = None

        cvpr_model = createCVPRModel(use_cuda=use_cuda)

        source_image_path = '../datasets/row_data/multispectral/door2.jpg'
        target_image_path = '../datasets/row_data/multispectral/door1.jpg'

        source_image_raw = io.imread(source_image_path)
        target_image_raw = io.imread(target_image_path)

        source_image = source_image_raw[:, :, 0:1]
        target_image = target_image_raw[:, :, 2:3]

        source_image_var = preprocess_image(source_image,
                                            resize=True,
                                            use_cuda=use_cuda)
        target_image_var = preprocess_image(target_image,
                                            resize=True,
                                            use_cuda=use_cuda)

        batch = {
            'source_image': source_image_var,
            'target_image': target_image_var
        }

        ntg_model.eval()
        theta = ntg_model(batch)
        # theta_opencv = theta2param(theta.view(-1, 2, 3), 240, 240, use_cuda=use_cuda)
        # cnn_ntg_param_batch = estimate_param_batch(source_image_var[:, 0, :, :], target_image_var[:, 2, :, :], theta_opencv)

        cnn_image_warped_batch = affine_transform_pytorch(
            source_image_var, theta)

        vis.showImageBatch(source_image_var,
                           normailze=True,
                           win='source_image_batch',
                           title='source_image_batch')
        vis.showImageBatch(target_image_var,
                           normailze=True,
                           win='target_image_batch',
                           title='target_image_batch')
        vis.showImageBatch(cnn_image_warped_batch,
                           normailze=True,
                           win='cnn_image_warped_batch',
                           title='cnn_image_warped_batch')
예제 #6
0
def main():
    print("eval pf dataset")
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    # ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar"
    # ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/checkpoint_voc2011_NTG_resnet101.pth.tar"
    # ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/checkpoint_voc2011_20r_NTG_resnet101.pth.tar"
    # ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/three_channel/checkpoint_NTG_resnet101.pth.tar'
    small_aff_ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/three_channel/coco2014_small_aff_checkpoint_NTG_resnet101.pth.tar'
    ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/best_checkpoint_voc2011_three_channel_paper_NTG_resnet101.pth.tar'
    # ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011_paper_affine/best_checkpoint_voc2011_NTG_resnet101.pth.tar'

    #ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/checkpoint_voc2011_30r_NTG_resnet101.pth.tar"
    # image_path = '../datasets/row_data/VOC/3
    # label_path = '../datasets/row_data/label_file/aff_param2.csv'
    #image_path = '../datasets/row_data/COCO/'
    #label_path = '../datasets/row_data/label_file/aff_param_coco.csv'

    pf_data_path = 'datasets/row_data/pf_data'

    batch_size = 128
    # 加载模型
    use_cuda = torch.cuda.is_available()

    ntg_model = createModel(ntg_checkpoint_path, use_cuda=use_cuda)
    small_aff_ntg_model = createModel(small_aff_ntg_checkpoint_path,
                                      use_cuda=use_cuda)

    dataset = PFDataset(
        csv_file=os.path.join(pf_data_path, 'test_pairs_pf.csv'),
        training_image_path=pf_data_path,
        transform=NormalizeImageDict(['source_image', 'target_image']))

    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=4)

    batchTensorToVars = BatchTensorToVars(use_cuda=use_cuda)

    pt = PointTnf(use_cuda=use_cuda)

    print('Computing PCK...')
    total_correct_points_aff = 0
    ntg_total_correct_points_aff = 0
    cnn_ntg_total_correct_points_aff = 0
    total_correct_points_tps = 0
    total_correct_points_aff_tps = 0
    total_points = 0
    ntg_total_points = 0
    cnn_ntg_total_points = 0

    for i, batch in enumerate(dataloader):
        batch = batchTensorToVars(batch)
        source_im_size = batch['source_im_size']
        target_im_size = batch['target_im_size']

        source_points = batch['source_points']
        target_points = batch['target_points']

        source_image_batch = batch['source_image']
        target_image_batch = batch['target_image']

        # warp points with estimated transformations
        target_points_norm = PointsToUnitCoords(target_points, target_im_size)

        theta_estimate_batch = ntg_model(batch)

        #warped_image_batch = affine_transform_pytorch(source_image_batch, theta_estimate_batch)
        #batch['source_image'] = warped_image_batch
        #theta_estimate_batch = small_aff_ntg_model(batch)

        # 将pytorch的变换参数转为opencv的变换参数
        #theta_opencv = theta2param(theta_estimate_batch.view(-1, 2, 3), 240, 240, use_cuda=use_cuda)

        # P5使用传统NTG方法进行优化cnn的结果
        #cnn_ntg_param_batch = estimate_param_batch(source_image_batch, target_image_batch, theta_opencv,itermax = 600)
        #theta_pytorch = param2theta(cnn_ntg_param_batch.view(-1, 2, 3),240,240,use_cuda=use_cuda)

        # theta_opencv = theta2param(theta_estimate_batch.view(-1, 2, 3), 240, 240, use_cuda=use_cuda)
        # with torch.no_grad():
        #     ntg_param_batch = estimate_aff_param_iterator(source_image_batch[:, 0, :, :].unsqueeze(1),
        #                                                   target_image_batch[:, 0, :, :].unsqueeze(1),
        #                                                   None, use_cuda=use_cuda, itermax=600)
        #
        #     cnn_ntg_param_batch = estimate_aff_param_iterator(source_image_batch[:, 0, :, :].unsqueeze(1),
        #                                                       target_image_batch[:, 0, :, :].unsqueeze(1),
        #                                                       theta_opencv, use_cuda=use_cuda, itermax=600)
        #
        #     ntg_param_pytorch_batch = param2theta(ntg_param_batch,240, 240, use_cuda=use_cuda)
        #     cnn_ntg_param_pytorch_batch = param2theta(cnn_ntg_param_batch,240, 240, use_cuda=use_cuda)

        warped_points_aff_norm = pt.affPointTnf(theta_estimate_batch,
                                                target_points_norm)
        warped_points_aff = PointsToPixelCoords(warped_points_aff_norm,
                                                source_im_size)

        # ntg_warped_points_aff_norm = pt.affPointTnf(ntg_param_pytorch_batch, target_points_norm)
        # ntg_warped_points_aff = PointsToPixelCoords(ntg_warped_points_aff_norm, source_im_size)
        #
        # cnn_ntg_warped_points_aff_norm = pt.affPointTnf(cnn_ntg_param_pytorch_batch, target_points_norm)
        # cnn_ntg_warped_points_aff = PointsToPixelCoords(cnn_ntg_warped_points_aff_norm, source_im_size)

        L_pck = batch['L_pck'].data

        correct_points_aff, num_points = correct_keypoints(
            source_points.data, warped_points_aff.data, L_pck)
        # ntg_correct_points_aff, ntg_num_points = correct_keypoints(source_points.data,
        #                                                    ntg_warped_points_aff.data, L_pck)
        # cnn_ntg_correct_points_aff, cnn_ntg_num_points = correct_keypoints(source_points.data,
        #                                                    cnn_ntg_warped_points_aff.data, L_pck)

        total_correct_points_aff += correct_points_aff
        total_points += num_points

        # ntg_total_correct_points_aff += ntg_correct_points_aff
        # ntg_total_points += ntg_num_points
        #
        # cnn_ntg_total_correct_points_aff += cnn_ntg_correct_points_aff
        # cnn_ntg_total_points += cnn_ntg_num_points

        print('Batch: [{}/{} ({:.0f}%)]'.format(i, len(dataloader),
                                                100. * i / len(dataloader)))

    total_correct_points_aff = total_correct_points_aff.__float__()
    # ntg_total_correct_points_aff = ntg_total_correct_points_aff.__float__()
    # cnn_ntg_total_correct_points_aff = cnn_ntg_total_correct_points_aff.__float__()

    PCK_aff = total_correct_points_aff / total_points
    # ntg_PCK_aff=ntg_total_correct_points_aff/ntg_total_points
    # cnn_ntg_PCK_aff=cnn_ntg_total_correct_points_aff/cnn_ntg_total_points
    print('PCK affine:', PCK_aff)
    # print('ntg_PCK affine:',ntg_PCK_aff)
    # print('cnn_ntg_PCK affine:',cnn_ntg_PCK_aff)
    print('Done!')