def __init__(self,single_channel=False, use_cuda=True, crop_factor=9 / 16, output_size=(240, 240), padding_factor=0.6): self.single_channel = single_channel self.use_cuda = use_cuda self.crop_factor = crop_factor self.padding_factor = padding_factor self.out_h, self.out_w = output_size self.rescalingTnf = AffineTnf(self.out_h, self.out_w,use_cuda=self.use_cuda) self.geometricTnf = AffineTnf(self.out_h, self.out_w,use_cuda=self.use_cuda)
def __init__(self, use_cuda=True, crop_factor=9 / 16, output_size=(240, 240), padding_factor=0.6): self.use_cuda = use_cuda self.crop_factor = crop_factor self.padding_factor = padding_factor self.out_h, self.out_w = output_size self.channel_choicelist = [0,1,2] self.rescalingTnf = AffineTnf(self.out_h, self.out_w,use_cuda=self.use_cuda) self.geometricTnf = AffineTnf(self.out_h, self.out_w,use_cuda=self.use_cuda)
def save_matlab_pic(image_data, theta_aff): image_batch = torch.from_numpy(image_data).transpose(1, 2).transpose( 0, 1).unsqueeze(1).float() vis.showImageBatch(image_batch, win='image_batch', title='raw_image_batch', start_index=16) crop_factor = 9 / 16 padding_factor = 0.6 padding_image_batch = symmetricImagePad(image_batch, padding_factor=padding_factor) affTnf = AffineTnf(240, 240, use_cuda=False) # 变换以后超出范围自动变为0 source_image_batch = affTnf(padding_image_batch, None, padding_factor, crop_factor) target_image_batch = affTnf(padding_image_batch, theta_aff, padding_factor, crop_factor) vis.showImageBatch(source_image_batch, win='source_image_batch', title='source_image_batch', start_index=16) vis.showImageBatch(target_image_batch, win='target_image_batch', title='target_image_batch', start_index=16) save_image_tensor(source_image_batch[16], 'mul_1s_s.png') save_image_tensor(target_image_batch[16], 'mul_1t_s.png')
def compare_img_resize(): img_path = '/Users/zale/project/myself/registration_cnn_ntg/datasets/row_data/multispectral/It.jpg' h, w = 600, 800 opencv_start_time = time.time() img = cv2.imread(img_path) print('imread_time', calculate_diff_time(opencv_start_time)) img = cv2.resize(img, (w, h), interpolation=cv2.INTER_CUBIC) start_time = time.time() # img_t = img.transpose(2,0,1) img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 img = torch.from_numpy(img).unsqueeze(0) elpased = calculate_diff_time(opencv_start_time) print('opencv time', img.shape, elpased) torch_start_time = time.time() img2 = io.imread(img_path) print('torch_read_time', calculate_diff_time(torch_start_time)) affineTnf = AffineTnf(h, w, use_cuda=False) image = torch.Tensor(img2.astype(np.float32)) image = image.transpose(1, 2).transpose(0, 1) img2 = affineTnf(image.unsqueeze(0)) elpased = calculate_diff_time(torch_start_time) print('torch time,', img2.shape, elpased)
def get_image_information(image_dir, image_name, label_path, vis): image_path = os.path.join(image_dir, image_name) image_np = io.imread(image_path) csv_data = read_csv_file(label_path) label_row_param = csv_data.loc[csv_data['image'] == image_name].values label_row_param = np.squeeze(label_row_param) if image_name != label_row_param[0]: raise ValueError("图片文件名和label图片文件名不匹配") theta_aff = torch.from_numpy(label_row_param[1:].reshape(2, 3).astype( np.float32)).unsqueeze(0) image_batch = torch.from_numpy(image_np).transpose(1, 2).transpose( 0, 1).unsqueeze(0).float() vis.showImageBatch(image_batch, win='image_batch', title='raw_image_batch') crop_factor = 9 / 16 padding_factor = 0.6 # crop_factor = 3 # padding_factor = 0.9 padding_image_batch = symmetricImagePad(image_batch, padding_factor=padding_factor) affTnf = AffineTnf(446, 640, use_cuda=False) # 变换以后超出范围自动变为0 source_image_batch = affTnf(padding_image_batch, theta_aff, padding_factor, crop_factor) target_image_batch = affTnf(padding_image_batch, None, padding_factor, crop_factor) # inverse_theta_aff = inverse_theta(theta_aff,use_cuda=False) warped_image_batch = affTnf(target_image_batch, theta_aff, crop_factor=1, padding_factor=1) vis.showImageBatch(source_image_batch, win='source_image_batch', title='source_image_batch') vis.showImageBatch(target_image_batch, win='target_image_batch', title='target_image_batch') vis.showImageBatch(warped_image_batch, win='warped_image_batch', title='warped_image_batch') # save_image_tensor(image_batch,'raw.jpg') save_image_tensor(source_image_batch, 'source.jpg') save_image_tensor(target_image_batch, 'target.jpg') save_image_tensor(warped_image_batch, 'warped2.jpg')
def __init__(self, use_cuda=True, output_size=(240, 240), crop_factor=9 / 16, padding_factor=0.6): self.use_cuda = use_cuda self.out_h, self.out_w = output_size self.crop_factor = crop_factor self.padding_factor = padding_factor self.affineTnf = AffineTnf(self.out_h, self.out_w, use_cuda=use_cuda)
def preprocess_image(image, resize=True, use_cuda=True): # image (240,240,3) # convert to torch Variable image = np.expand_dims(image.transpose((2, 0, 1)), 0) image_var = torch.Tensor(image.astype(np.float32) / 255.0) if use_cuda: image_var = image_var.cuda() # Resize image using bilinear sampling with identity affine tnf if resize: resizeTnf = AffineTnf(out_h=240, out_w=240, use_cuda=use_cuda) image_var = resizeTnf(image_var) # Normalize image image_var = normalize_image(image_var) return image_var
def __init__(self, csv_file, training_image_path, output_size=(240, 240), transform=None): self.out_h, self.out_w = output_size self.train_data = pd.read_csv(csv_file) self.img_A_names = self.train_data.iloc[:, 0] self.img_B_names = self.train_data.iloc[:, 1] self.point_A_coords = self.train_data.iloc[:, 2:22].as_matrix().astype( 'float') self.point_B_coords = self.train_data.iloc[:, 22:].as_matrix().astype( 'float') self.training_image_path = training_image_path self.transform = transform # no cuda as dataset is called from CPU threads in dataloader and produces confilct self.affineTnf = AffineTnf(out_h=self.out_h, out_w=self.out_w, use_cuda=False)
def __init__(self, image_path, label_path, output_size=(480, 640), transform=None, use_cuda=False): ''' :param image_path: :param label_path: :param output_size: :param normalize_range: :param use_cuda: 读写数据时使用cuda的话使用多个workers会导致不同步产生错乱,所以不使用Cuda ''' self.transform = transform self.image_path = image_path self.label_path = label_path self.image_list = os.listdir(self.image_path) self.out_h, self.out_w = output_size self.csv_data = read_csv_file( label_path) # 数据帧df,可看做表格,如果加入index限定主键的话values就不包含主键 self.resizeTnf = AffineTnf(self.out_h, self.out_w, use_cuda=use_cuda)
def register_images(source_image_path, target_image_path, use_cuda=True): env_name = 'compare_ntg_realize' vis = VisdomHelper(env_name) # 创建模型 ntg_model = CNNRegistration(single_channel=True, use_cuda=use_cuda) print("Loading trained model weights") print("ntg_checkpoint_path:", ntg_checkpoint_path) # 把所有的张量加载到CPU中 GPU ==> CPU ntg_checkpoint = torch.load(ntg_checkpoint_path, map_location=lambda storage, loc: storage) ntg_checkpoint['state_dict'] = OrderedDict([ (k.replace('vgg', 'mo del'), v) for k, v in ntg_checkpoint['state_dict'].items() ]) ntg_model.load_state_dict(ntg_checkpoint['state_dict']) source_image_raw = io.imread(source_image_path) target_image_raw = io.imread(target_image_path) source_image = source_image_raw target_image = target_image_raw source_image_var = preprocess_image(source_image, resize=True, use_cuda=use_cuda) target_image_var = preprocess_image(target_image, resize=True, use_cuda=use_cuda) # source_image_var = source_image_var[:,0,:,:][:,np.newaxis,:,:] # target_image_var = target_image_var[:,0,:,:][:,np.newaxis,:,:] batch = { 'source_image': source_image_var, 'target_image': target_image_var } affine_tnf = AffineTnf(use_cuda=use_cuda) ntg_model.eval() theta = ntg_model(batch) ntg_param_batch = estimate_param_batch(source_image_var[:, 0, :, :], target_image_var[:, 2, :, :], None) ntg_image_warped_batch = affine_transform_opencv_2(source_image_var, ntg_param_batch) theta_opencv = theta2param(theta.view(-1, 2, 3), 240, 240, use_cuda=use_cuda) cnn_ntg_param_batch = estimate_param_batch(source_image_var[:, 0, :, :], target_image_var[:, 2, :, :], theta_opencv) cnn_image_warped_batch = affine_transform_pytorch(source_image_var, theta) cnn_ntg_image_warped_batch = affine_transform_opencv_2( source_image_var, cnn_ntg_param_batch) cnn_ntg_param_multi_batch = estimate_aff_param_iterator( source_image_var[:, 0, :, :].unsqueeze(1), target_image_var[:, 0, :, :].unsqueeze(1), theta_opencv, use_cuda=use_cuda, itermax=800) cnn_ntg_image_warped_mulit_batch = affine_transform_opencv_2( source_image_var, cnn_ntg_param_multi_batch.detach().cpu().numpy()) # cnn_ntg_image_warped_mulit_batch = affine_transform_opencv_2(source_image_var, theta_opencv.detach().cpu().numpy()) vis.showImageBatch(source_image_var, normailze=True, win='source_image_batch', title='source_image_batch') vis.showImageBatch(target_image_var, normailze=True, win='target_image_batch', title='target_image_batch') vis.showImageBatch(cnn_image_warped_batch, normailze=True, win='cnn_image_warped_batch', title='cnn_image_warped_batch') # 直接使用NTG去做的话不同通道可能直接就失败了 # vis.showImageBatch(ntg_image_warped_batch, normailze=True, win='warped_image_batch', title='warped_image_batch') vis.showImageBatch(cnn_ntg_image_warped_mulit_batch, normailze=True, win='cnn_ntg_param_multi_batch', title='cnn_ntg_param_multi_batch')