예제 #1
0
def main():

    print("开始进行测试")

    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    #ntg_checkpoint_path = "../trained_weight/output/checkpoint_NTG_resnet101.pth.tar"
    ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/checkpoint_voc2011_NTG_resnet101.pth.tar"
    image_path = '/home/zlk/datasets/coco_test2017'

    use_custom_aff_param = True
    if use_custom_aff_param:
        label_path = '../datasets/row_data/label_file/coco_test2017_custom_param.csv'
    else:
        label_path = '../datasets/row_data/label_file/coco_test2017_paper_param.csv'

    threshold = 10

    batch_size = 164
    # 加载模型
    use_cuda = torch.cuda.is_available()

    vis = VisdomHelper(env_name='DMN_test')

    ntg_model = createModel(ntg_checkpoint_path, use_cuda=use_cuda)
    dataloader, pair_generator = createDataloader(image_path,
                                                  label_path,
                                                  batch_size,
                                                  use_cuda=use_cuda)
    iterDataset(dataloader,
                pair_generator,
                ntg_model,
                vis,
                threshold=threshold,
                use_cuda=use_cuda)
def main():

    single_channel = True

    print("开始进行测试")
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    #ntg_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'
    # ntg_checkpoint_path = '/mnt/4T/zlk/trained_weights/checkpoint_NTG_resnet101.pth.tar'      # 这两个一样
    ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar' # 这两个一样
    # ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/best_checkpoint_voc2011_NTG_resnet101.pth.tar'
    test_image_path = '/mnt/4T/zlk/datasets/mulitspectral/complete_ms_data_mat'

    threshold = 3
    batch_size = 1
    # 加载模型
    use_cuda = torch.cuda.is_available()

    vis = VisdomHelper(env_name='CAVE_test')
    ntg_model = createModel(ntg_checkpoint_path,use_cuda=use_cuda,single_channel=single_channel)

    print('测试harvard网格点损失')
    dataloader,pair_generator =  createDataloader(test_image_path,batch_size=batch_size,single_channel=single_channel,use_cuda = use_cuda)

    iterDataset(dataloader,pair_generator,ntg_model,vis,threshold=threshold,use_cuda=use_cuda)
예제 #3
0
def main():

    print("开始进行测试")

    os.environ["CUDA_VISIBLE_DEVICES"] = "1"

    #ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/best_checkpoint_voc2011_NTG_resnet101.pth.tar"
    #ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/checkpoint_voc2011_NTG_resnet101.pth.tar"
    #ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar"
    #ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/checkpoint_voc2011_NTG_resnet101_distributed.pth.tar"
    #ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/checkpoint_voc2011_20r_NTG_resnet101.pth.tar"
    #ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/three_channel/checkpoint_NTG_resnet101.pth.tar'
    ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/checkpoint_voc2011_three_channel_paper_NTG_resnet101.pth.tar'

    nir_image_path = '/mnt/4T/zlk/datasets/mulitspectral/nirscene_total/nir_image'
    rgb_image_path = '/mnt/4T/zlk/datasets/mulitspectral/nirscene_total/rgb_image'

    use_custom_aff_param = True
    if use_custom_aff_param:
        #label_path = '../datasets/row_data/label_file/coco_test2017_n2000_custom_20r_param.csv'
        label_path = '../datasets/row_data/label_file/nir_rgb_custom_20r_param.csv'
    else:
        label_path = '../datasets/row_data/label_file/nir_rgb_paper_affine_param.csv'

    threshold = 10

    batch_size = 16
    # 加载模型
    use_cuda = torch.cuda.is_available()

    vis = VisdomHelper(env_name='DMN_test')

    ntg_model = createModel(ntg_checkpoint_path, use_cuda=use_cuda)
    dataloader, pair_generator = createDataloader(nir_image_path,
                                                  rgb_image_path,
                                                  label_path,
                                                  batch_size,
                                                  use_cuda=use_cuda)
    iterDataset(dataloader,
                pair_generator,
                ntg_model,
                vis,
                threshold=threshold,
                use_cuda=use_cuda)
예제 #4
0
def main():

    single_channel = False
    print("开始进行测试")
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    ntg_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'
    #test_image_path = '/home/zlk/datasets/coco_test2017'
    # test_image_path = '/mnt/4T/zlk/datasets/mulitspectral/Harvard'
    test_image_path = '/mnt/4T/zlk/datasets/mulitspectral/complete_ms_data_mat'

    threshold = 3
    batch_size = 1
    # 加载模型
    use_cuda = torch.cuda.is_available()

    # vis = VisdomHelper(env_name='Harvard_test')
    vis = VisdomHelper(env_name='CAVE_test')

    ntg_model = createModel(ntg_checkpoint_path,
                            use_cuda=use_cuda,
                            single_channel=single_channel)
    cvpr_model = createCVPRModel(use_cuda=use_cuda)

    print('测试harvard网格点损失')
    dataloader, pair_generator = createDataloader(
        test_image_path,
        batch_size,
        use_cuda=use_cuda,
        single_channel=single_channel)

    iterDataset(dataloader,
                pair_generator,
                ntg_model,
                cvpr_model,
                vis,
                threshold=threshold,
                use_cuda=use_cuda)
    print("开始进行测试")

    param_gpu_id = 2
    param_single_channel = True
    param_threshold = 3
    param_batch_size = 1
    param_use_cvpr = True
    param_use_cnn = True
    param_use_traditional = True
    param_use_combine = True
    param_save_mat = False

    print(param_gpu_id, param_single_channel, param_threshold,
          param_batch_size)

    vis = VisdomHelper(env_name='CAVE_common', port=8098)

    if param_single_channel:
        param_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar'
    else:
        param_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'

    param_test_image_path = '/mnt/4T/zlk/datasets/mulitspectral/complete_ms_data_mat'
    # param_test_image_path = '/home/zale/datasets/complete_ms_data_mat'
    # param_test_image_path = '/Users/zale/project/datasets/complete_ms_data_mat'

    # 加载模型
    os.environ["CUDA_VISIBLE_DEVICES"] = str(param_gpu_id)
    use_cuda = torch.cuda.is_available()

    if param_use_cnn:
def main(args):

    # checkpoint_path = "/home/zale/project/registration_cnn_ntg/trained_weight/voc2011_multi_gpu/checkpoint_voc2011_multi_gpu_paper_NTG_resnet101.pth.tar"
    # checkpoint_path = "/home/zale/project/registration_cnn_ntg/trained_weight/coco2017_multi_gpu/checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar"
    #args.training_image_path = '/home/zale/datasets/vocdata/VOC_train_2011/VOCdevkit/VOC2011/JPEGImages'
    # args.training_image_path = '/media/disk2/zale/datasets/coco2017/train2017'

    checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011_multi_gpu/checkpoint_voc2011_multi_gpu_three_channel_paper_origin_NTG_resnet101.pth.tar"
    args.training_image_path = '/home/zlk/datasets/vocdata/VOC_train_2011/VOCdevkit/VOC2011/JPEGImages'

    random_seed = 10021
    init_seeds(random_seed + random.randint(0, 10000))
    mixed_precision = True

    utils.init_distributed_mode(args)
    print(args)

    #device,local_rank = torch_util.select_device(multi_process =True,apex=mixed_precision)

    device = torch.device(args.device)
    use_cuda = True
    # Data loading code
    print("Loading data")
    RandomTnsDataset = RandomTnsData(args.training_image_path,
                                     cache_images=False,
                                     paper_affine_generator=True,
                                     transform=NormalizeImageDict(["image"]))
    # train_dataloader = DataLoader(RandomTnsDataset, batch_size=args.batch_size, shuffle=True, num_workers=4,
    #                               pin_memory=True)

    print("Creating data loaders")
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            RandomTnsDataset)
        # test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
    else:
        train_sampler = torch.utils.data.RandomSampler(RandomTnsDataset)
        # test_sampler = torch.utils.data.SequentialSampler(dataset_test)

    # train_batch_sampler = torch.utils.data.BatchSampler(
    #     train_sampler, args.batch_size, drop_last=True)

    data_loader = DataLoader(RandomTnsDataset,
                             sampler=train_sampler,
                             num_workers=4,
                             shuffle=(train_sampler is None),
                             pin_memory=False,
                             batch_size=args.batch_size)

    # data_loader_test = torch.utils.data.DataLoader(
    #     dataset_test, batch_size=1,
    #     sampler=test_sampler, num_workers=args.workers,
    #     collate_fn=utils.collate_fn)

    print("Creating model")
    model = CNNRegistration(use_cuda=use_cuda)

    model.to(device)

    # 优化器 和scheduler
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.Adam(params, lr=args.lr)

    # 学习率小于1e-6 ntg损失下降很慢,所以最小设置为1e-6
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, T_max=args.lr_max_iter, eta_min=1e-6)

    # if mixed_precision:
    #     model,optimizer = amp.initialize(model,optimizer,opt_level='O1',verbosity=0)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module

    minium_loss, saved_epoch = load_checkpoint(model_without_ddp, optimizer,
                                               lr_scheduler, checkpoint_path,
                                               args.rank)

    vis_env = "multi_gpu_rgb_train_paper_30"
    loss = NTGLoss()
    pair_generator = RandomTnsPair(use_cuda=use_cuda)
    gridGen = AffineGridGen()
    vis = VisdomHelper(env_name=vis_env)

    print('Starting training...')
    start_time = time.time()
    draw_test_loss = False
    log_interval = 20
    for epoch in range(saved_epoch, args.num_epochs):
        start_time = time.time()

        if args.distributed:
            train_sampler.set_epoch(epoch)

        train_loss = train(epoch,
                           model,
                           loss,
                           optimizer,
                           data_loader,
                           pair_generator,
                           gridGen,
                           vis,
                           use_cuda=use_cuda,
                           log_interval=log_interval,
                           lr_scheduler=lr_scheduler,
                           rank=args.rank)

        if draw_test_loss:
            #test_loss = test(model,loss,test_dataloader,pair_generator,gridGen,use_cuda=use_cuda)
            #vis.drawBothLoss(epoch,train_loss,test_loss,'loss_table')
            pass
        else:
            vis.drawLoss(epoch, train_loss)

        end_time = time.time()
        print("epoch:", str(end_time - start_time), '秒')

        is_best = train_loss < minium_loss
        minium_loss = min(train_loss, minium_loss)

        state_dict = model_without_ddp.state_dict()
        if is_main_process():
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'args': args,
                    # 'state_dict': model.state_dict(),
                    'state_dict': state_dict,
                    'minium_loss': minium_loss,
                    'model_loss': train_loss,
                    'optimizer': optimizer.state_dict(),
                    'lr_scheduler': lr_scheduler.state_dict(),
                },
                is_best,
                checkpoint_path)
예제 #7
0
def main():

    print("开始进行测试")

    os.environ["CUDA_VISIBLE_DEVICES"] = "1"

    #ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/best_checkpoint_voc2011_NTG_resnet101.pth.tar"
    # ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/checkpoint_voc2011_NTG_resnet101.pth.tar"
    # ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar"
    # ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/checkpoint_voc2011_three_channel_paper_NTG_resnet101.pth.tar"
    # ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011/checkpoint_voc2011_20r_NTG_resnet101.pth.tar"
    # ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/three_channel/checkpoint_NTG_resnet101.pth.tar'
    ntg_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_voc2011_multi_gpu_three_channel_paper_origin_NTG_resnet101.pth.tar'
    # ntg_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'
    # ntg_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'
    # ntg_checkpoint_path = '/mnt/4T/zlk/trained_weights/checkpoint_NTG_resnet101.pth.tar'
    # ntg_checkpoint_path = '/mnt/4T/zlk/trained_weights/checkpoint_NTG_resnet101.pth.tar'
    # ntg_checkpoint_path = '/home/zale/project/registration_cnn_ntg/trained_weight/output/checkpoint_NTG_resnet101.pth.tar'
    # ntg_checkpoint_path = '/home/zale/project/registration_cnn_ntg/trained_weight/output/checkpoint_NTG_resnet101.pth.tar'
    # ntg_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar'
    # ntg_checkpoint_path = "/home/zlk/project/registration_cnn_ntg/trained_weight/voc2011_paper_three/checkpoint_voc2011_paper_NTG_resnet101.pth.tar"
    #test_image_path = '/home/zlk/datasets/coco_test2017'
    test_image_path = '/home/zlk/datasets/coco_test2017_n2000'

    use_custom_aff_param = True
    print("use_custom_aff_param:", use_custom_aff_param)
    if use_custom_aff_param:
        label_path = '../datasets/row_data/label_file/coco_test2017_n2000_custom_20r_param.csv'
    else:
        label_path = '../datasets/row_data/label_file/coco_test2017_paper_param_n2000.csv'

    eval_kind = 1  # 1是coco2017test,2是nir image

    print("label_path:", label_path)

    threshold = 3
    batch_size = 96
    # 加载模型
    use_cuda = torch.cuda.is_available()

    vis = VisdomHelper(env_name='DMN_test')

    ntg_model = createModel(ntg_checkpoint_path,
                            single_channel=False,
                            use_cuda=use_cuda)
    cvpr_model = createCVPRModel(use_cuda=use_cuda)

    if eval_kind == 1:
        print('测试coco2017网格点损失')
        dataloader, pair_generator = createDataloader(test_image_path,
                                                      label_path,
                                                      batch_size,
                                                      use_cuda=use_cuda)
    elif eval_kind == 2:
        print('测试NIR图片网格点损失')
        nir_image_path = '/mnt/4T/zlk/datasets/mulitspectral/nirscene_total/nir_image'
        rgb_image_path = '/mnt/4T/zlk/datasets/mulitspectral/nirscene_total/rgb_image'
        label_path = '../datasets/row_data/label_file/nir_rgb_custom_20r_param.csv'
        dataloader, pair_generator = createNirDataloader(nir_image_path,
                                                         rgb_image_path,
                                                         label_path,
                                                         batch_size,
                                                         use_cuda=use_cuda)
    else:
        print("测试种类错误")
        return

    iterDataset(dataloader,
                pair_generator,
                ntg_model,
                cvpr_model,
                vis,
                threshold=threshold,
                use_cuda=use_cuda)
    def register_showVisdom(self):
        print("开始进行测试")

        param_gpu_id = 0
        param_single_channel = True
        param_threshold = 3
        param_batch_size = 1
        param_use_cvpr = True
        param_use_cnn = True
        param_use_traditional = True
        param_use_combine = True
        param_save_mat = False

        print(param_gpu_id, param_single_channel, param_threshold,
              param_batch_size)

        vis = VisdomHelper(env_name='CAVE_common', port=8098)

        if param_single_channel:
            param_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar'
        else:
            param_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'

        param_test_image_path = '/mnt/4T/zlk/datasets/mulitspectral/complete_ms_data_mat'
        # param_test_image_path = '/home/zale/datasets/complete_ms_data_mat'
        # param_test_image_path = '/Users/zale/project/datasets/complete_ms_data_mat'

        # 加载模型
        os.environ["CUDA_VISIBLE_DEVICES"] = str(param_gpu_id)
        use_cuda = torch.cuda.is_available()

        if param_use_cnn:
            ntg_model = createModel(param_checkpoint_path,
                                    use_cuda=use_cuda,
                                    single_channel=param_single_channel)
        else:
            ntg_model = None

        cvpr_model = createCVPRModel(use_cuda=use_cuda)

        source_image_path = '../datasets/row_data/multispectral/door2.jpg'
        target_image_path = '../datasets/row_data/multispectral/door1.jpg'

        source_image_raw = io.imread(source_image_path)
        target_image_raw = io.imread(target_image_path)

        source_image = source_image_raw[:, :, 0:1]
        target_image = target_image_raw[:, :, 2:3]

        source_image_var = preprocess_image(source_image,
                                            resize=True,
                                            use_cuda=use_cuda)
        target_image_var = preprocess_image(target_image,
                                            resize=True,
                                            use_cuda=use_cuda)

        batch = {
            'source_image': source_image_var,
            'target_image': target_image_var
        }

        ntg_model.eval()
        theta = ntg_model(batch)
        # theta_opencv = theta2param(theta.view(-1, 2, 3), 240, 240, use_cuda=use_cuda)
        # cnn_ntg_param_batch = estimate_param_batch(source_image_var[:, 0, :, :], target_image_var[:, 2, :, :], theta_opencv)

        cnn_image_warped_batch = affine_transform_pytorch(
            source_image_var, theta)

        vis.showImageBatch(source_image_var,
                           normailze=True,
                           win='source_image_batch',
                           title='source_image_batch')
        vis.showImageBatch(target_image_var,
                           normailze=True,
                           win='target_image_batch',
                           title='target_image_batch')
        vis.showImageBatch(cnn_image_warped_batch,
                           normailze=True,
                           win='cnn_image_warped_batch',
                           title='cnn_image_warped_batch')
예제 #9
0
def start_train(training_path,test_image_path,load_from,out_path,vis_env,paper_affine_generator = False,
                random_seed=666,log_interval=100,multi_gpu=True,use_cuda=True):

    init_seeds(random_seed+random.randint(0,10000))

    device,local_rank = torch_util.select_device(multi_process =multi_gpu,apex=mixed_precision)

    # args.batch_size = args.batch_size * torch.cuda.device_count()
    args.batch_size = 16
    args.lr_scheduler = True
    draw_test_loss = False
    print(args.batch_size)


    print("创建模型中")
    model = CNNRegistration(use_cuda=use_cuda)

    model = model.to(device)

    # 优化器 和scheduler
    optimizer = optim.Adam(model.FeatureRegression.parameters(), lr=args.lr)

    if args.lr_scheduler:
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                               T_max=args.lr_max_iter,
                                                               eta_min=1e-7)
    else:
        scheduler = False

    print("加载权重")
    minium_loss,saved_epoch= load_checkpoint(model,optimizer,load_from,0)

    # Mixed precision training https://github.com/NVIDIA/apex
    if mixed_precision:
        model,optimizer = amp.initialize(model,optimizer,opt_level='01',verbosity=0)

    if multi_gpu:
        model = nn.DataParallel(model)

    loss = NTGLoss()
    pair_generator = RandomTnsPair(use_cuda=use_cuda)
    gridGen = AffineGridGen()
    vis = VisdomHelper(env_name=vis_env)

    print("创建dataloader")
    RandomTnsDataset = RandomTnsData(training_path, cache_images=False,paper_affine_generator = paper_affine_generator,
                                     transform=NormalizeImageDict(["image"]))
    train_dataloader = DataLoader(RandomTnsDataset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)

    if draw_test_loss:
        testDataset = RandomTnsData(test_image_path, cache_images=False, paper_affine_generator=paper_affine_generator,
                                     transform=NormalizeImageDict(["image"]))
        test_dataloader = DataLoader(testDataset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=False)

    print('Starting training...')

    for epoch in range(saved_epoch, args.num_epochs):
        start_time = time.time()

        train_loss = train(epoch, model, loss, optimizer, train_dataloader, pair_generator, gridGen, vis,
                           use_cuda=use_cuda, log_interval=log_interval,scheduler = scheduler)

        if draw_test_loss:
            test_loss = test(model,loss,test_dataloader,pair_generator,gridGen,use_cuda=use_cuda)
            vis.drawBothLoss(epoch,train_loss,test_loss,'loss_table')
        else:
            vis.drawLoss(epoch,train_loss)

        end_time = time.time()
        print("epoch:", str(end_time - start_time),'秒')

        is_best = train_loss < minium_loss
        minium_loss = min(train_loss, minium_loss)

        state_dict = model.module.state_dict() if multi_gpu else model.state_dict()
        save_checkpoint({
            'epoch': epoch + 1,
            'args': args,
            #'state_dict': model.state_dict(),
            'state_dict': state_dict,
            'minium_loss': minium_loss,
            'model_loss':train_loss,
            'optimizer': optimizer.state_dict(),
        }, is_best, out_path)
예제 #10
0
def test_bar(vis):
    x_list = [i for i in range(10)]

    A_list = [i + 14 for i in range(10)]
    B_list = [0 + 15 for i in range(10)]
    C_list = [0 + 16 for i in range(10)]
    D_list = [0 + 11 for i in range(10)]

    # vis.getVisdom().bar(
    #     X=np.column_stack((A_list,B_list, C_list,D_list)),
    #     opts=dict(
    #         stacked=False,
    #         legend=['The Netherlands', 'France', 'United States','sdfsd']
    #     )
    # )
    vis.drawGridlossBar(x_list,
                        A_list,
                        B_list,
                        C_list,
                        D_list,
                        layout_title='Grid_loss_histogram')


if __name__ == '__main__':

    env = 'DMN_test'
    vis = VisdomHelper(env)

    test_bar(vis)
예제 #11
0
def register_images(source_image_path, target_image_path, use_cuda=True):

    env_name = 'compare_ntg_realize'
    vis = VisdomHelper(env_name)

    # 创建模型
    ntg_model = CNNRegistration(single_channel=True, use_cuda=use_cuda)

    print("Loading trained model weights")
    print("ntg_checkpoint_path:", ntg_checkpoint_path)

    # 把所有的张量加载到CPU中     GPU ==> CPU
    ntg_checkpoint = torch.load(ntg_checkpoint_path,
                                map_location=lambda storage, loc: storage)
    ntg_checkpoint['state_dict'] = OrderedDict([
        (k.replace('vgg', 'mo del'), v)
        for k, v in ntg_checkpoint['state_dict'].items()
    ])
    ntg_model.load_state_dict(ntg_checkpoint['state_dict'])

    source_image_raw = io.imread(source_image_path)

    target_image_raw = io.imread(target_image_path)

    source_image = source_image_raw
    target_image = target_image_raw

    source_image_var = preprocess_image(source_image,
                                        resize=True,
                                        use_cuda=use_cuda)
    target_image_var = preprocess_image(target_image,
                                        resize=True,
                                        use_cuda=use_cuda)

    # source_image_var = source_image_var[:,0,:,:][:,np.newaxis,:,:]
    # target_image_var = target_image_var[:,0,:,:][:,np.newaxis,:,:]

    batch = {
        'source_image': source_image_var,
        'target_image': target_image_var
    }

    affine_tnf = AffineTnf(use_cuda=use_cuda)

    ntg_model.eval()
    theta = ntg_model(batch)

    ntg_param_batch = estimate_param_batch(source_image_var[:, 0, :, :],
                                           target_image_var[:, 2, :, :], None)
    ntg_image_warped_batch = affine_transform_opencv_2(source_image_var,
                                                       ntg_param_batch)

    theta_opencv = theta2param(theta.view(-1, 2, 3),
                               240,
                               240,
                               use_cuda=use_cuda)
    cnn_ntg_param_batch = estimate_param_batch(source_image_var[:, 0, :, :],
                                               target_image_var[:, 2, :, :],
                                               theta_opencv)

    cnn_image_warped_batch = affine_transform_pytorch(source_image_var, theta)
    cnn_ntg_image_warped_batch = affine_transform_opencv_2(
        source_image_var, cnn_ntg_param_batch)

    cnn_ntg_param_multi_batch = estimate_aff_param_iterator(
        source_image_var[:, 0, :, :].unsqueeze(1),
        target_image_var[:, 0, :, :].unsqueeze(1),
        theta_opencv,
        use_cuda=use_cuda,
        itermax=800)

    cnn_ntg_image_warped_mulit_batch = affine_transform_opencv_2(
        source_image_var,
        cnn_ntg_param_multi_batch.detach().cpu().numpy())
    # cnn_ntg_image_warped_mulit_batch = affine_transform_opencv_2(source_image_var, theta_opencv.detach().cpu().numpy())

    vis.showImageBatch(source_image_var,
                       normailze=True,
                       win='source_image_batch',
                       title='source_image_batch')
    vis.showImageBatch(target_image_var,
                       normailze=True,
                       win='target_image_batch',
                       title='target_image_batch')
    vis.showImageBatch(cnn_image_warped_batch,
                       normailze=True,
                       win='cnn_image_warped_batch',
                       title='cnn_image_warped_batch')
    # 直接使用NTG去做的话不同通道可能直接就失败了
    # vis.showImageBatch(ntg_image_warped_batch, normailze=True, win='warped_image_batch', title='warped_image_batch')
    vis.showImageBatch(cnn_ntg_image_warped_mulit_batch,
                       normailze=True,
                       win='cnn_ntg_param_multi_batch',
                       title='cnn_ntg_param_multi_batch')
예제 #12
0
from tnf_transform.img_process import NormalizeImageDict
from tnf_transform.transformation import affine_transform_opencv
from traditional_ntg.estimate_affine_param import estimate_param_batch
from util.pytorchTcv import param2theta
from visualization.train_visual import VisdomHelper

if __name__ == '__main__':

    os.environ["CUDA_VISIBLE_DEVICES"] = "2"

    print('使用传统NTG批量测试')

    use_cuda = torch.cuda.is_available()

    env = "ntg_pytorch"
    vis = VisdomHelper(env)
    test_image_path = '/home/zlk/datasets/coco_test2017_n2000'
    label_path = 'datasets/row_data/label_file/coco_test2017_n2000_custom_20r_param.csv'

    threshold = 3
    batch_size = 164

    # dataset = TestDataset(test_image_path,label_path,transform=NormalizeImageDict(["image"]))
    dataset = TestDataset(test_image_path, label_path)
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=4,
                            pin_memory=True)
    # pair_generator = NtgTestPair(use_cuda=use_cuda,output_size=(480, 640))
    pair_generator = NtgTestPair(use_cuda=use_cuda)
예제 #13
0
    print("开始进行测试")

    param_gpu_id = 0
    param_single_channel = True
    param_threshold = 3
    param_batch_size = 1
    param_use_cvpr = True
    param_use_cnn = True
    param_use_traditional = True
    param_use_combine = True
    param_save_mat = False

    print(param_gpu_id, param_single_channel, param_threshold,
          param_batch_size)

    vis = VisdomHelper(env_name='CAVE_common')

    if param_single_channel:
        param_checkpoint_path = '/home/zlk/project/registration_cnn_ntg/trained_weight/output/voc2012_coco2014_NTG_resnet101.pth.tar'
    else:
        param_checkpoint_path = '/mnt/4T/zlk/trained_weights/best_checkpoint_coco2017_multi_gpu_paper30_NTG_resnet101.pth.tar'

    param_test_image_path = '/mnt/4T/zlk/datasets/mulitspectral/complete_ms_data_mat'
    # param_test_image_path = '/home/zale/datasets/complete_ms_data_mat'
    # param_test_image_path = '/Users/zale/project/datasets/complete_ms_data_mat'

    # 加载模型
    os.environ["CUDA_VISIBLE_DEVICES"] = str(param_gpu_id)
    use_cuda = torch.cuda.is_available()

    if param_use_cnn: