def register_CNN(self, source_image_path, target_image_path): source_image_raw = io.imread(source_image_path) target_image_raw = io.imread(target_image_path) # testImage = io.imread(source_image_path) # testImage = cv2.resize(testImage,(240,240)) source_image_resize = cv2.resize(source_image_raw, (240, 240)) target_image_resize = cv2.resize(target_image_raw, (240, 240)) source_image = source_image_raw[:, :, 0:1] target_image = target_image_raw[:, :, 2:3] source_image_var = preprocess_image(source_image, resize=True, use_cuda=self.use_cuda) target_image_var = preprocess_image(target_image, resize=True, use_cuda=self.use_cuda) batch = { 'source_image': source_image_var, 'target_image': target_image_var } if self.ntg_model is not None: theta = self.ntg_model(batch) opencv_theta = theta2param(theta.view(-1, 2, 3), 240, 240, use_cuda=self.use_cuda) cnn_image_warped_batch = single_affine_transform_opencv( source_image_resize, opencv_theta[0].detach().numpy()) return cnn_image_warped_batch else: print('ntg_model is None')
def evaluate(theta_estimate_batch, theta_GT_batch, source_image_batch, target_image_batch, use_cuda=True): # 将pytorch的变换参数转为opencv的变换参数 theta_opencv = theta2param(theta_estimate_batch.view(-1, 2, 3), 240, 240, use_cuda=use_cuda) # P5使用传统NTG方法进行优化cnn的结果 ntg_param = estimate_param_batch(source_image_batch, target_image_batch, None, itermax=600) ntg_param_pytorch = param2theta(ntg_param, 240, 240, use_cuda=use_cuda) cnn_ntg_param_batch = estimate_param_batch(source_image_batch, target_image_batch, theta_opencv, itermax=800) cnn_ntg_param_pytorch_batch = param2theta(cnn_ntg_param_batch, 240, 240, use_cuda=use_cuda)
def visualize_spec_epoch_result(source_image_batch, target_image_batch, theta_GT_batch, theta_estimate_batch, use_cuda=True): theta_opencv = theta2param(theta_estimate_batch.view(-1, 2, 3), 240, 240, use_cuda=use_cuda) grid_loss = GridLoss(use_cuda=use_cuda) # 使用传统ntg方法的结果 iter_list = [800] # iter_list = [100,200] # for i in range(len(iter_list)): # # start_time = time.time() # ntg_param_opencv_batch = estimate_param_batch(source_image_batch, target_image_batch, theta_opencv, # iter_list[i]) # elpased1 = calculate_diff_time(start_time) # # start_time = time.time() # # ntg_param_opencv_batch_traditional = estimate_param_batch(source_image_batch, target_image_batch, None, # # iter_list[i]) # elpased2 = calculate_diff_time(start_time) # print('使用ntg方法', str(len(source_image_batch)) + '对图片用时:', '有初值:', str(elpased1), '无初值:', str(elpased2)) # # ntg_param_pytorch_batch = param2theta(ntg_param_opencv_batch, 240, 240, use_cuda=use_cuda) # # ntg_param_pytorch_batch_traditional = param2theta(ntg_param_opencv_batch_traditional, 240, 240, # # use_cuda=use_cuda) # # # print(str(iter_list[i])+''+str(grid_loss.compute_grid_loss(ntg_param_opencv_batch,theta_GT_batch))) # grid_loss_batch.append(grid_loss.compute_grid_loss(ntg_param_pytorch_batch, theta_GT_batch).numpy().tolist()) # # grid_loss_triditional_batch.append( # # grid_loss.compute_grid_loss(ntg_param_pytorch_batch_traditional, theta_GT_batch).numpy()) ntg_param_opencv_batch = estimate_param_batch(source_image_batch, target_image_batch, theta_opencv, iter_list[0]) ntg_param_opencv_batch_traditional = estimate_param_batch( source_image_batch, target_image_batch, None, iter_list[0]) ntg_param_pytorch_batch = param2theta(ntg_param_opencv_batch, 240, 240, use_cuda=use_cuda) ntg_param_pytorch_batch_traditional = param2theta( ntg_param_opencv_batch_traditional, 240, 240, use_cuda=use_cuda) grid_loss_batch = grid_loss.compute_grid_loss( ntg_param_pytorch_batch, theta_GT_batch).numpy().tolist() grid_loss_traditional_batch = grid_loss.compute_grid_loss( ntg_param_pytorch_batch_traditional, theta_GT_batch).numpy().tolist() return grid_loss_batch, grid_loss_traditional_batch
def visualize_compare_result(source_image_batch, target_image_batch, theta_GT_batch, theta_estimate_batch, use_cuda=True): # P2真值结果 warped_image_GT_list = affine_transform_pytorch(source_image_batch, theta_GT_batch) # P3使用CNN配准的结果 warped_image_list = affine_transform_pytorch(source_image_batch, theta_estimate_batch) # P4使用传统ntg方法的结果 ntg_param_batch = estimate_param_batch(source_image_batch, target_image_batch, None) ntg_image_warped_batch = affine_transform_opencv(source_image_batch, ntg_param_batch) # 将pytorch的变换参数转为opencv的变换参数 theta_opencv = theta2param(theta_estimate_batch.view(-1, 2, 3), 240, 240, use_cuda=use_cuda) # P5使用传统NTG方法进行优化cnn的结果 cnn_ntg_param_batch = estimate_param_batch(source_image_batch, target_image_batch, theta_opencv) cnn_ntg_image_warped_batch = affine_transform_opencv( source_image_batch, cnn_ntg_param_batch) # 转换为pytorch的参数再次进行变换,主要为了验证使用opencv和pytorch的变换方式一样 # ntg_param_pytorch_batch = param2theta(ntg_param_batch, 240, 240, use_cuda=use_cuda) # ntg_image_warped_pytorch_batch = affine_transform_pytorch(source_image_list, ntg_param_pytorch_batch) # 将结果可视化 plot_title = [ 'source_img', 'target_img', 'cnn_img', 'ntg_img', 'cnn_ntg_img' ] plot_batch_result(source_image_batch, target_image_batch, warped_image_list, ntg_image_warped_batch, cnn_ntg_image_warped_batch, plot_title=plot_title)
def register_CNN_NTG(self, source_image_path, target_image_path, itermax=800, custom_pyramid_level=-1): source_image_raw = io.imread(source_image_path) target_image_raw = io.imread(target_image_path) source_image_resize = cv2.resize(source_image_raw, (240, 240)) target_image_resize = cv2.resize(target_image_raw, (240, 240)) source_image = source_image_raw[:, :, 0:1] target_image = target_image_raw[:, :, 2:3] source_image_var = preprocess_image(source_image, resize=True, use_cuda=self.use_cuda) target_image_var = preprocess_image(target_image, resize=True, use_cuda=self.use_cuda) batch = { 'source_image': source_image_var, 'target_image': target_image_var } if self.ntg_model is not None: theta = self.ntg_model(batch) theta_opencv = theta2param(theta.view(-1, 2, 3), 240, 240, use_cuda=self.use_cuda) cnn_ntg_param_batch = estimate_affine_param( target_image_resize[:, :, 0], source_image_resize[:, :, 2], theta_opencv[0].detach().numpy(), itermax=itermax, custom_pyramid_level=custom_pyramid_level) ntg_image_warped_batch = single_affine_transform_opencv( source_image_resize, cnn_ntg_param_batch) return ntg_image_warped_batch else: print('ntg_model is None')
def iterDataset(dataloader, pair_generator, ntg_model, cvpr_model, vis, threshold=10, use_cuda=True, use_traditional=False, use_combine=False, save_mat=False, use_cvpr=False, use_cnn=False): ''' 迭代数据集中的批次数据,进行处理 :param dataloader: :param pair_generator: :param ntg_model: :param use_cuda: :return: ''' fn_grid_loss = GridLoss(use_cuda=use_cuda) grid_loss_cnn_list = [] grid_loss_cvpr_list = [] grid_loss_ntg_list = [] grid_loss_comb_list = [] mutual_info_cnn_list = [] mutual_info_cvpr_list = [] mutual_info_ntg_list = [] mutual_info_comb_list = [] ntg_loss_total = 0 cnn_ntg_loss_total = 0 normalize_func = NormalizeCAVEDict(["image"]) for batch_idx, batch in enumerate(dataloader): # if batch_idx == 1: # print('==1 break') # break if batch_idx % 5 == 0: print('test batch: [{}/{} ({:.0f}%)]'.format( batch_idx, len(dataloader), 100. * batch_idx / len(dataloader))) pair_batch = pair_generator( batch) # image[batch_size,1,w,h] theta_GT[batch_size,2,3] # raw_source_image_batch = normalize_func.scale_image_batch(pair_batch['raw_source_image_batch']) # raw_target_image_batch = normalize_func.scale_image_batch(pair_batch['raw_target_image_batch']) # raw_source_image_batch = pair_batch['raw_source_image_batch'] # raw_target_image_batch = pair_batch['raw_target_image_batch'] raw_source_image_batch = pair_batch['source_image'] raw_target_image_batch = pair_batch['target_image'] pair_batch['source_image'] = normalize_func.normalize_image_batch( pair_batch['source_image']) pair_batch['target_image'] = normalize_func.normalize_image_batch( pair_batch['target_image']) # pair_batch['source_image'] = normalize_func.scale_image_batch(pair_batch['source_image']) # pair_batch['target_image'] = normalize_func.scale_image_batch(pair_batch['target_image']) source_image_batch = pair_batch['source_image'] target_image_batch = pair_batch['target_image'] theta_GT_batch = pair_batch['theta_GT'] name = pair_batch['name'] print(name) # if name[0] != 'fake_and_real_tomatoes_ms.mat': # continue if use_cnn: theta_estimate_batch = ntg_model( pair_batch) # theta [batch_size,6] theta_opencv = theta2param(theta_estimate_batch.view(-1, 2, 3), 240, 240, use_cuda=use_cuda) # 网络测出来的,第1,2,3,5的值和真值是相反的,是因为在pair_generator中生成的原始图像 # 和目标图像对换了 loss_cnn = fn_grid_loss.compute_grid_loss( theta_estimate_batch.detach(), theta_GT_batch) grid_loss_cnn_list.append(loss_cnn.detach().cpu().numpy()) if use_cvpr: pair_batch['source_image'] = torch.cat( (source_image_batch, source_image_batch, source_image_batch), 1) pair_batch['target_image'] = torch.cat( (target_image_batch, target_image_batch, target_image_batch), 1) theta_cvpr_batch = cvpr_model(pair_batch) loss_cvpr = fn_grid_loss.compute_grid_loss( theta_cvpr_batch.detach(), theta_GT_batch) grid_loss_cvpr_list.append(loss_cvpr.detach().cpu().numpy()) if use_traditional: with torch.no_grad(): ntg_param_batch = estimate_aff_param_iterator( source_image_batch[:, 0, :, :].unsqueeze(1), target_image_batch[:, 0, :, :].unsqueeze(1), None, use_cuda=use_cuda, itermax=800, normalize_func=normalize_func) ntg_param_pytorch_batch = param2theta(ntg_param_batch, 240, 240, use_cuda=use_cuda) loss_ntg = fn_grid_loss.compute_grid_loss( ntg_param_pytorch_batch.detach(), theta_GT_batch) # print(theta2param(ntg_param_pytorch_batch,512,512,False)) # print(theta2param(theta_GT_batch,512,512,False)) # print(loss_ntg) grid_loss_ntg_list.append(loss_ntg.detach().cpu().numpy()) if use_combine: with torch.no_grad(): # cnn_ntg_param_batch = estimate_aff_param_iterator(raw_source_image_batch[:, 0, :, :].unsqueeze(1), # raw_target_image_batch[:, 0, :, :].unsqueeze(1), # theta_opencv, use_cuda=use_cuda, itermax=600,normalize_func=normalize_func) cnn_ntg_param_batch = estimate_aff_param_iterator( source_image_batch[:, 0, :, :].unsqueeze(1), target_image_batch[:, 0, :, :].unsqueeze(1), theta_opencv, use_cuda=use_cuda, itermax=600, normalize_func=normalize_func) cnn_ntg_param_pytorch_batch = param2theta(cnn_ntg_param_batch, 240, 240, use_cuda=use_cuda) loss_cnn_ntg = fn_grid_loss.compute_grid_loss( cnn_ntg_param_pytorch_batch.detach(), theta_GT_batch) grid_loss_comb_list.append(loss_cnn_ntg.detach().cpu().numpy()) # source_image_batch = normalize_func.scale_image_batch(source_image_batch) # target_image_batch = normalize_func.scale_image_batch(target_image_batch) cnn_wraped_image = affine_transform_pytorch(source_image_batch, theta_estimate_batch) cvpr_wraped_image = affine_transform_pytorch(source_image_batch, theta_cvpr_batch) ntg_wraped_image = affine_transform_pytorch(source_image_batch, ntg_param_pytorch_batch) cnn_ntg_wraped_image = affine_transform_pytorch( source_image_batch, cnn_ntg_param_pytorch_batch) gt_image_batch = affine_transform_pytorch(source_image_batch, theta_GT_batch) # mutual_info_cnn_list.append(calculate_mutual_info_batch(cnn_wraped_image, gt_wraped_image)) # mutual_info_cvpr_list.append(calculate_mutual_info_batch(cvpr_wraped_image, gt_wraped_image)) # mutual_info_ntg_list.append(calculate_mutual_info_batch(ntg_wraped_image, gt_wraped_image)) # mutual_info_comb_list.append(calculate_mutual_info_batch(cnn_ntg_wraped_image, gt_wraped_image)) # normailze_visual = False vis.showImageBatch(source_image_batch, normailze=True, win='source_image_batch', title='source_image_batch', start_index=14) vis.showImageBatch(target_image_batch, normailze=True, win='target_image_batch', title='target_image_batch', start_index=14) vis.showImageBatch(ntg_wraped_image, normailze=True, win='ntg_wraped_image', title='ntg_wraped_image', start_index=14) vis.showImageBatch(cvpr_wraped_image, normailze=True, win='cvpr_wraped_image', title='cvpr_wraped_image') vis.showImageBatch(cnn_wraped_image, normailze=True, win='cnn_wraped_image', title='cnn_wraped_image') vis.showImageBatch(cnn_ntg_wraped_image, normailze=True, win='cnn_ntg_wraped_image', title='cnn_ntg_wraped_image') vis.showImageBatch(gt_image_batch, normailze=True, win='gt_image_batch', title='gt_image_batch') # print(image_name) # scio.savemat('mutual_info_cave_dict.mat', {'mutual_info_cnn_list':mutual_info_cnn_list, # 'mutual_info_cvpr_list':mutual_info_cvpr_list, # 'mutual_info_ntg_list':mutual_info_ntg_list, # 'mutual_info_comb_list':mutual_info_comb_list}) grid_loss_cnn_array = np.array(grid_loss_cnn_list) grid_loss_ntg_array = np.array(grid_loss_ntg_list) grid_loss_comb_array = np.array(grid_loss_comb_list) grid_loss_cvpr_array = np.array(grid_loss_cvpr_list) # if use_cnn and save_mat: # scio.savemat('exp_bigger/cnn_error.mat', {'cave_error_cnn': grid_loss_cnn_array}) # # if use_traditional and save_mat: # scio.savemat('exp_bigger/ntg_error.mat', {'cave_error_ntg': grid_loss_ntg_array}) # # if use_combine and save_mat: # scio.savemat('exp_bigger/cnn_ntg_error.mat', {'cave_error_cnn_ntg': grid_loss_comb_array}) # scio.savemat('cave_grid_loss.mat',{'cave_cnn': grid_loss_cnn_array, # 'cave_ntg': grid_loss_ntg_array, # 'cave_cnn_ntg': grid_loss_comb_array, # 'cave_cvpr': grid_loss_cvpr_array}) print("网格点损失超过阈值的不计入平均值") print('ntg网格点损失') ntg_group_list = compute_average_grid_loss(grid_loss_ntg_list) print('cnn网格点损失') cnn_group_list = compute_average_grid_loss(grid_loss_cnn_list) print('cnn_ntg网格点损失') cnn_ntg_group_list = compute_average_grid_loss(grid_loss_comb_list) # x_list = [i for i in range(10)] # vis.drawGridlossGroup(x_list,ntg_group_list,cnn_group_list,cnn_ntg_group_list,cvpr_group_list, # layout_title="nir_result",win='nir_result') # vis.drawGridlossBar(x_list,ntg_group_list,cnn_group_list,cnn_ntg_group_list,cvpr_group_list, # layout_title="Grid_loss_histogram",win='Grid_loss_histogram') print("计算正确率") print('ntg正确率') compute_correct_rate(grid_loss_ntg_list, threshold=threshold) print('cnn正确率') compute_correct_rate(grid_loss_cnn_list, threshold=threshold) print('cnn+ntg 正确率') compute_correct_rate(grid_loss_comb_list, threshold=threshold) print('cnngeometric 正确率') compute_correct_rate(grid_loss_cvpr_list, threshold=threshold)
def iterDataset(dataloader, pair_generator, ntg_model, cvpr_model, vis, threshold=10, use_cuda=True): ''' 迭代数据集中的批次数据,进行处理 :param dataloader: :param pair_generator: :param ntg_model: :param use_cuda: :return: ''' fn_grid_loss = GridLoss(use_cuda=use_cuda) grid_loss_cnn_list = [] grid_loss_cvpr_list = [] grid_loss_ntg_list = [] grid_loss_comb_list = [] mutual_info_cnn_list = [] mutual_info_cvpr_list = [] mutual_info_ntg_list = [] mutual_info_comb_list = [] ntg_loss_total = 0 cnn_ntg_loss_total = 0 # iter_list = [100, 200, 300, 400, 500, 600] iter_list = [1, 10, 30, 50, 100, 200, 300, 400, 500, 600, 700, 800] print(iter_list) grid_loss_dict = {} grid_loss_traditional_dict = {} for i in range(len(iter_list)): dict_key = 'key' + str(iter_list[i]) grid_loss_dict[dict_key] = [] grid_loss_traditional_dict[dict_key] = [] # batch {image.shape = } for batch_idx, batch in enumerate(dataloader): #print("batch_id",batch_idx,'/',len(dataloader)) # if batch_idx == 1: # break if batch_idx % 5 == 0: print('test batch: [{}/{} ({:.0f}%)]'.format( batch_idx, len(dataloader), 100. * batch_idx / len(dataloader))) pair_batch = pair_generator( batch) # image[batch_size,1,w,h] theta_GT[batch_size,2,3] theta_estimate_batch = ntg_model(pair_batch) # theta [batch_size,6] theta_cvpr_estimate_batch = cvpr_model(pair_batch) source_image_batch = pair_batch['source_image'] target_image_batch = pair_batch['target_image'] theta_GT_batch = pair_batch['theta_GT'] image_name = pair_batch['name'] # warped_image_batch = affine_transform_pytorch(source_image_batch, theta_estimate_batch) # gt_image_batch = affine_transform_pytorch(source_image_batch, theta_GT_batch) # cvpr_wraped_image = affine_transform_pytorch(source_image_batch, theta_cvpr_estimate_batch) # loss, g1xy, g2xy = loss_fn(target_image_batch, warped_image_batch) #print("one batch ntg:",loss.item()) # ntg_loss_total += loss.item() # 显示CNN配准结果 # print("显示图片") #visualize_cnn_result(source_image_batch,target_image_batch,theta_estimate_batch,vis) # # # time.sleep(10) # 显示一个epoch的对比结果 #visualize_compare_result(source_image_batch,target_image_batch,theta_GT_batch,theta_estimate_batch,use_cuda=use_cuda) # 显示多个epoch的折线图 # visualize_iter_result(source_image_batch[:, 0, :, :].unsqueeze(1),target_image_batch[:, 0, :, :].unsqueeze(1), # theta_GT_batch,theta_estimate_batch, # grid_loss_dict,grid_loss_traditional_dict,use_cuda=use_cuda) # continue ## 计算网格点损失配准误差 # 将pytorch的变换参数转为opencv的变换参数 theta_opencv = theta2param(theta_estimate_batch.view(-1, 2, 3), 240, 240, use_cuda=use_cuda) # P5使用传统NTG方法进行优化cnn的结果 #ntg_param = estimate_param_batch(source_image_batch,target_image_batch,None,itermax=600) #ntg_param_pytorch = param2theta(ntg_param,240,240,use_cuda=use_cuda) #print('使用并行ntg进行估计') with torch.no_grad(): ntg_param_batch = estimate_aff_param_iterator( source_image_batch[:, 0, :, :].unsqueeze(1), target_image_batch[:, 0, :, :].unsqueeze(1), None, use_cuda=use_cuda, itermax=900) cnn_ntg_param_batch = estimate_aff_param_iterator( source_image_batch[:, 0, :, :].unsqueeze(1), target_image_batch[:, 0, :, :].unsqueeze(1), theta_opencv, use_cuda=use_cuda, itermax=900) cnn_ntg_param_pytorch_batch = param2theta(cnn_ntg_param_batch, 240, 240, use_cuda=use_cuda) ntg_param_pytorch_batch = param2theta(ntg_param_batch, 240, 240, use_cuda=use_cuda) # cnn_ntg_wraped_image = affine_transform_pytorch(source_image_batch, cnn_ntg_param_pytorch_batch) # ntg_wraped_image = affine_transform_pytorch(source_image_batch, ntg_param_pytorch_batch) # combine_loss, _, _ = loss_fn(target_image_batch, cnn_ntg_wraped_image) # cnn_ntg_loss_total += combine_loss.item() # 网络测出来的,第1,2,3,5的值和真值是相反的,是因为在pair_generator中生成的原始图像 # 和目标图像对换了 loss_cvpr_2018 = fn_grid_loss.compute_grid_loss( theta_cvpr_estimate_batch, theta_GT_batch) loss_cnn = fn_grid_loss.compute_grid_loss( theta_estimate_batch.detach(), theta_GT_batch) loss_ntg = fn_grid_loss.compute_grid_loss( ntg_param_pytorch_batch.detach(), theta_GT_batch) loss_cnn_ntg = fn_grid_loss.compute_grid_loss( cnn_ntg_param_pytorch_batch.detach(), theta_GT_batch) grid_loss_ntg_list.append(loss_ntg.detach().cpu().numpy()) grid_loss_cnn_list.append(loss_cnn.detach().cpu().numpy()) grid_loss_comb_list.append(loss_cnn_ntg.detach().cpu().numpy()) grid_loss_cvpr_list.append(loss_cvpr_2018.detach().cpu().numpy()) # vis.showImageBatch(source_image_batch,normailze=True,win='source_image_batch',title='source_image_batch') # vis.showImageBatch(target_image_batch,normailze=True,win='target_image_batch',title='target_image_batch') # vis.showImageBatch(warped_image_batch,normailze=True,win='warped_image_batch',title='cnn') # vis.showImageBatch(cnn_ntg_wraped_image,normailze=True,win='cnn_ntg_wraped_image',title='ntg_pytorch') # vis.showImageBatch(gt_image_batch,normailze=True,win='gt_image_batch',title='gt_image_batch') # mutual_info_cnn_list.append(calculate_mutual_info_batch(warped_image_batch,gt_image_batch)) # mutual_info_cvpr_list.append(calculate_mutual_info_batch(cvpr_wraped_image,gt_image_batch)) # mutual_info_ntg_list.append(calculate_mutual_info_batch(ntg_wraped_image,gt_image_batch)) # mutual_info_comb_list.append(calculate_mutual_info_batch(cnn_ntg_wraped_image,gt_image_batch)) # print(image_name) # 显示特定epoch的gridloss的直方图 # g_loss,g_trad_loss = visualize_spec_epoch_result(source_image_batch, target_image_batch, theta_GT_batch, theta_estimate_batch, # use_cuda=use_cuda) # grid_loss_hist.append(g_loss) # grid_loss_traditional_hist.append(g_trad_loss) # loss_cnn = grid_loss.compute_grid_loss(theta_estimate_batch,theta_GT_list) # # loss_cnn_ntg = grid_loss.compute_grid_loss(cnn_ntg_param,theta_GT_list) # scio.savemat('grid_loss_dict800.mat',grid_loss_dict) # scio.savemat('grid_loss_traditional_dict800.mat',grid_loss_traditional_dict) # return # scio.savemat('mutual_info_dict.mat', {'mutual_info_cnn_list':mutual_info_cnn_list, # 'mutual_info_cvpr_list':mutual_info_cvpr_list, # 'mutual_info_ntg_list':mutual_info_ntg_list, # 'mutual_info_comb_list':mutual_info_comb_list}) grid_loss_ntg_array = np.array(grid_loss_ntg_list) grid_loss_cnn_array = np.array(grid_loss_cnn_list) grid_loss_comb_array = np.array(grid_loss_comb_list) grid_loss_cvpr_array = np.array(grid_loss_cvpr_list) scio.savemat( 'grid_loss_voc2011_test_iter900.mat', { 'grid_loss_ntg_array': grid_loss_ntg_array, 'grid_loss_cvpr_array': grid_loss_cvpr_array, 'grid_loss_cnn_array': grid_loss_cnn_array, 'grid_loss_comb_array': grid_loss_comb_array }) print("网格点损失超过阈值的不计入平均值") print('ntg网格点损失') ntg_group_list = compute_average_grid_loss(grid_loss_ntg_list) print('cnn网格点损失') cnn_group_list = compute_average_grid_loss(grid_loss_cnn_list) print('cnn_ntg网格点损失') cnn_ntg_group_list = compute_average_grid_loss(grid_loss_comb_list) print('cvpr网格点损失') cvpr_group_list = compute_average_grid_loss(grid_loss_cvpr_list) x_list = [i for i in range(10)] # vis.drawGridlossGroup(x_list,ntg_group_list,cnn_group_list,cnn_ntg_group_list,cvpr_group_list, # layout_title="nir_result",win='nir_result') # vis.drawGridlossBar(x_list,ntg_group_list,cnn_group_list,cnn_ntg_group_list,cvpr_group_list, # layout_title="Grid_loss_histogram",win='Grid_loss_histogram') # vis.getVisdom().line(x_list,cnn_group_list) # vis.getVisdom().line(X=np.column_stack(x_list,x_list), # Y =np.column_stack(cnn_group_list,cnn_ntg_group_list)) print("计算CNN平均NTG值", ntg_loss_total / len(dataloader)) print("计算CNN+NTG平均NTG值", cnn_ntg_loss_total / len(dataloader)) print("计算正确率") print('ntg正确率') compute_correct_rate(grid_loss_ntg_list, threshold=threshold) print('cnn正确率') compute_correct_rate(grid_loss_cnn_list, threshold=threshold) print('cnn+ntg 正确率') compute_correct_rate(grid_loss_comb_list, threshold=threshold) print('cvpr正确率') compute_correct_rate(grid_loss_cvpr_list, threshold=threshold)
def visualize_iter_result(source_image_batch, target_image_batch, theta_GT_batch, theta_estimate_batch, grid_loss_dict, grid_loss_traditional_dict, use_cuda=True): theta_opencv = theta2param(theta_estimate_batch.view(-1, 2, 3), 240, 240, use_cuda=use_cuda) grid_loss = GridLoss(use_cuda=use_cuda) # 使用传统ntg方法的结果 # iter_list = [300,600,1000,1500,2000] #iter_list = [100,200] # 归一化互信息数据 # matual_info_list = [] # matual_info_traditional_list = [] # matual_info_list_batch = [] # matual_info_traditional_list_batch = [] # grid_loss_batch = [] # grid_loss_triditional_batch = [] # iter_list = [100, 200, 300, 400, 500, 600] iter_list = [1, 10, 30, 50, 100, 200, 300, 400, 500, 600, 700, 800] # grid_loss_dict = {} # grid_loss_traditional_dict = {} # for i in range(len(iter_list)): # dict_key = 'key' + str(iter_list[i]) # grid_loss_dict[dict_key] = [] # grid_loss_traditional_dict[dict_key] = [] # result_batch= [] for i in range(len(iter_list)): start_time = time.time() ntg_param_opencv_batch = estimate_aff_param_iterator( source_image_batch, target_image_batch, theta_opencv, use_cuda=use_cuda, itermax=iter_list[i]) elpased1 = calculate_diff_time(start_time) start_time = time.time() ntg_param_opencv_batch_traditional = estimate_aff_param_iterator( source_image_batch, target_image_batch, None, use_cuda=use_cuda, itermax=iter_list[i]) elpased2 = calculate_diff_time(start_time) # print('使用ntg方法',str(len(source_image_batch))+'对图片用时:','有初值:',str(elpased1),'无初值:',str(elpased2)) ntg_param_pytorch_batch = param2theta(ntg_param_opencv_batch, 240, 240, use_cuda=use_cuda) ntg_param_pytorch_batch_traditional = param2theta( ntg_param_opencv_batch_traditional, 240, 240, use_cuda=use_cuda) # ntg_image_warped_batch = affine_transform_pytorch(source_image_batch, ntg_param_pytorch_batch) # ntg_image_warped_triditional_batch = affine_transform_pytorch(source_image_batch, ntg_param_pytorch_batch_traditional) # 只绘制最后的结果 # if i == len(iter_list)-1: # result_batch.append(ntg_image_warped_triditional_batch) # result_batch.append(ntg_image_warped_batch) #print(str(iter_list[i])+''+str(grid_loss.compute_grid_loss(ntg_param_opencv_batch,theta_GT_batch))) # grid_loss_batch.append(grid_loss.compute_grid_loss(ntg_param_pytorch_batch,theta_GT_batch).numpy()) # grid_loss_triditional_batch.append(grid_loss.compute_grid_loss(ntg_param_pytorch_batch_traditional,theta_GT_batch).numpy()) dict_key = 'key' + str(iter_list[i]) grid_loss_dict[dict_key].append( grid_loss.compute_grid_loss(ntg_param_pytorch_batch, theta_GT_batch).detach().cpu().numpy()) grid_loss_traditional_dict[dict_key].append( grid_loss.compute_grid_loss(ntg_param_pytorch_batch_traditional, theta_GT_batch).detach().cpu().numpy())
def register_images(source_image_path, target_image_path, use_cuda=True): env_name = 'compare_ntg_realize' vis = VisdomHelper(env_name) # 创建模型 ntg_model = CNNRegistration(single_channel=True, use_cuda=use_cuda) print("Loading trained model weights") print("ntg_checkpoint_path:", ntg_checkpoint_path) # 把所有的张量加载到CPU中 GPU ==> CPU ntg_checkpoint = torch.load(ntg_checkpoint_path, map_location=lambda storage, loc: storage) ntg_checkpoint['state_dict'] = OrderedDict([ (k.replace('vgg', 'mo del'), v) for k, v in ntg_checkpoint['state_dict'].items() ]) ntg_model.load_state_dict(ntg_checkpoint['state_dict']) source_image_raw = io.imread(source_image_path) target_image_raw = io.imread(target_image_path) source_image = source_image_raw target_image = target_image_raw source_image_var = preprocess_image(source_image, resize=True, use_cuda=use_cuda) target_image_var = preprocess_image(target_image, resize=True, use_cuda=use_cuda) # source_image_var = source_image_var[:,0,:,:][:,np.newaxis,:,:] # target_image_var = target_image_var[:,0,:,:][:,np.newaxis,:,:] batch = { 'source_image': source_image_var, 'target_image': target_image_var } affine_tnf = AffineTnf(use_cuda=use_cuda) ntg_model.eval() theta = ntg_model(batch) ntg_param_batch = estimate_param_batch(source_image_var[:, 0, :, :], target_image_var[:, 2, :, :], None) ntg_image_warped_batch = affine_transform_opencv_2(source_image_var, ntg_param_batch) theta_opencv = theta2param(theta.view(-1, 2, 3), 240, 240, use_cuda=use_cuda) cnn_ntg_param_batch = estimate_param_batch(source_image_var[:, 0, :, :], target_image_var[:, 2, :, :], theta_opencv) cnn_image_warped_batch = affine_transform_pytorch(source_image_var, theta) cnn_ntg_image_warped_batch = affine_transform_opencv_2( source_image_var, cnn_ntg_param_batch) cnn_ntg_param_multi_batch = estimate_aff_param_iterator( source_image_var[:, 0, :, :].unsqueeze(1), target_image_var[:, 0, :, :].unsqueeze(1), theta_opencv, use_cuda=use_cuda, itermax=800) cnn_ntg_image_warped_mulit_batch = affine_transform_opencv_2( source_image_var, cnn_ntg_param_multi_batch.detach().cpu().numpy()) # cnn_ntg_image_warped_mulit_batch = affine_transform_opencv_2(source_image_var, theta_opencv.detach().cpu().numpy()) vis.showImageBatch(source_image_var, normailze=True, win='source_image_batch', title='source_image_batch') vis.showImageBatch(target_image_var, normailze=True, win='target_image_batch', title='target_image_batch') vis.showImageBatch(cnn_image_warped_batch, normailze=True, win='cnn_image_warped_batch', title='cnn_image_warped_batch') # 直接使用NTG去做的话不同通道可能直接就失败了 # vis.showImageBatch(ntg_image_warped_batch, normailze=True, win='warped_image_batch', title='warped_image_batch') vis.showImageBatch(cnn_ntg_image_warped_mulit_batch, normailze=True, win='cnn_ntg_param_multi_batch', title='cnn_ntg_param_multi_batch')
def iterDataset(dataloader, pair_generator, ntg_model, cvpr_model, vis, threshold=10, use_cuda=True): ''' 迭代数据集中的批次数据,进行处理 :param dataloader: :param pair_generator: :param ntg_model: :param use_cuda: :return: ''' fn_grid_loss = GridLoss(use_cuda=use_cuda) grid_loss_cnn_list = [] grid_loss_cvpr_list = [] grid_loss_ntg_list = [] grid_loss_comb_list = [] ntg_loss_total = 0 cnn_ntg_loss_total = 0 # batch {image.shape = } for batch_idx, batch in enumerate(dataloader): #print("batch_id",batch_idx,'/',len(dataloader)) # if batch_idx == 15: # break if batch_idx % 5 == 0: print('test batch: [{}/{} ({:.0f}%)]'.format( batch_idx, len(dataloader), 100. * batch_idx / len(dataloader))) pair_batch = pair_generator( batch) # image[batch_size,1,w,h] theta_GT[batch_size,2,3] theta_estimate_batch = ntg_model(pair_batch) # theta [batch_size,6] if cvpr_model is not None: theta_cvpr_estimate_batch = cvpr_model(pair_batch) source_image_batch = pair_batch['source_image'] target_image_batch = pair_batch['target_image'] theta_GT_batch = pair_batch['theta_GT'] image_name = pair_batch['name'] ## 计算网格点损失配准误差 # 将pytorch的变换参数转为opencv的变换参数 theta_opencv = theta2param(theta_estimate_batch.view(-1, 2, 3), 240, 240, use_cuda=use_cuda) #print('使用并行ntg进行估计') with torch.no_grad(): ntg_param_batch = estimate_aff_param_iterator( source_image_batch[:, 0, :, :].unsqueeze(1), target_image_batch[:, 0, :, :].unsqueeze(1), None, use_cuda=use_cuda, itermax=600) cnn_ntg_param_batch = estimate_aff_param_iterator( source_image_batch[:, 0, :, :].unsqueeze(1), target_image_batch[:, 0, :, :].unsqueeze(1), theta_opencv, use_cuda=use_cuda, itermax=600) cnn_ntg_param_pytorch_batch = param2theta(cnn_ntg_param_batch, 240, 240, use_cuda=use_cuda) ntg_param_pytorch_batch = param2theta(ntg_param_batch, 240, 240, use_cuda=use_cuda) cnn_ntg_wraped_image = affine_transform_pytorch( source_image_batch, cnn_ntg_param_pytorch_batch) ntg_wraped_image = affine_transform_pytorch(source_image_batch, ntg_param_pytorch_batch) cnn_wraped_image = affine_transform_pytorch(source_image_batch, theta_estimate_batch) GT_image = affine_transform_pytorch(source_image_batch, theta_GT_batch) # loss_cvpr_2018 = fn_grid_loss.compute_grid_loss(theta_cvpr_estimate_batch,theta_GT_batch) loss_cnn = fn_grid_loss.compute_grid_loss( theta_estimate_batch.detach(), theta_GT_batch) loss_ntg = fn_grid_loss.compute_grid_loss( ntg_param_pytorch_batch.detach(), theta_GT_batch) loss_cnn_ntg = fn_grid_loss.compute_grid_loss( cnn_ntg_param_pytorch_batch.detach(), theta_GT_batch) vis.showHarvardBatch(source_image_batch, normailze=True, win='source_image_batch', title='source_image_batch') vis.showHarvardBatch(target_image_batch, normailze=True, win='target_image_batch', title='target_image_batch') vis.showHarvardBatch(ntg_wraped_image, normailze=True, win='ntg_wraped_image', title='ntg_wraped_image') vis.showHarvardBatch(cnn_wraped_image, normailze=True, win='cnn_wraped_image', title='cnn_wraped_image') vis.showHarvardBatch(cnn_ntg_wraped_image, normailze=True, win='cnn_ntg_wraped_image', title='cnn_ntg_wraped_image') vis.showHarvardBatch(GT_image, normailze=True, win='GT_image', title='GT_image') grid_loss_ntg_list.append(loss_ntg.detach().cpu()) grid_loss_cnn_list.append(loss_cnn.detach().cpu()) grid_loss_comb_list.append(loss_cnn_ntg.detach().cpu()) # grid_loss_cvpr_list.append(loss_cvpr_2018.detach().cpu()) print("网格点损失超过阈值的不计入平均值") print('ntg网格点损失') ntg_group_list = compute_average_grid_loss(grid_loss_ntg_list) print('cnn网格点损失') cnn_group_list = compute_average_grid_loss(grid_loss_cnn_list) print('cnn_ntg网格点损失') cnn_ntg_group_list = compute_average_grid_loss(grid_loss_comb_list) print('cvpr网格点损失') # cvpr_group_list = compute_average_grid_loss(grid_loss_cvpr_list) x_list = [i for i in range(10)] # vis.drawGridlossBar(x_list,ntg_group_list,cnn_group_list,cnn_ntg_group_list,cvpr_group_list, # layout_title="Grid_loss_histogram",win='Grid_loss_histogram') print("计算CNN平均NTG值", ntg_loss_total / len(dataloader)) print("计算CNN+NTG平均NTG值", cnn_ntg_loss_total / len(dataloader)) print("计算正确率") print('ntg正确率') compute_correct_rate(grid_loss_ntg_list, threshold=threshold) print('cnn正确率') compute_correct_rate(grid_loss_cnn_list, threshold=threshold) print('cnn+ntg 正确率') compute_correct_rate(grid_loss_comb_list, threshold=threshold)