def check_transformation(): args = [ '--dataroot', '/data/mri/data/pdd_sliced', '--fineSize', '128', '--input_nc', '1', '--input_channels', '0', '--data_suffix', 'npy', '--T', '1' ] opt = TestOptions().parse(args) opt.same_hemisphere = True #opt.nThreads = 1 # test code only supports nThreads = 1 #opt.batchSize = 1 # test code only supports batchSize = 1 #opt.serial_batches = True # no shuffle dataset = SliceDataset(opt) for i, d in enumerate(dataset): #print(d['A'].shape, d['B'].shape, d['A_original'].shape, d['B_original'].shape) print('B:', d['B'].min(), d['B'].max(), 'B_original:', d['B_original'].min(), d['B_original'].max()) #np_B = util.tensor2im(d['B'], undo_norm=False) #np_B_original = util.tensor2im(d['B_original'], undo_norm=False) np_B = util.tensor2np(d['B']) np_B_original = util.tensor2np(d['B_original']) print('np_B:', np_B.min(), np_B.max(), 'np_B_original:', np_B_original.min(), np_B_original.max()) util.save_image(np_B, 'plots/%d_B_t.png' % i) util.save_image(np_B_original, 'plots/%d_B_real.png' % i) print('angular error') print(np.equal(np_B, np_B_original).sum(), np.equal(np_B, np_B).sum()) np_B = np_B * 2 - 1 np_B_original = np_B_original * 2 - 1 angular_errors(np_B, np_B) print('-----------------') angular_errors(np_B, np_B_original) if i == 0: break
def forward(self, in0, in1): if (self.colorspace == 'RGB'): (N, C, X, Y) = in0.size() value = torch.mean(torch.mean(torch.mean((in0 - in1)**2, dim=1).view(N, 1, X, Y), dim=2).view(N, 1, 1, Y), dim=3).view(N) return value elif (self.colorspace == 'Gray'): (N, C, X, Y) = in0.size() in0 = util.tensor2tensorGrayscale(in0) in1 = util.tensor2tensorGrayscale(in1) value = torch.mean(torch.mean(torch.mean((in0 - in1)**2, dim=1).view(N, 1, X, Y), dim=2).view(N, 1, 1, Y), dim=3).view(N) return value elif (self.colorspace == 'Lab'): assert (in0.size()[0] == 1) # currently only supports batchSize 1 value = util.l2( util.tensor2np(util.tensor2tensorlab(in0.data, to_norm=False)), util.tensor2np(util.tensor2tensorlab(in1.data, to_norm=False)), range=100.).astype('float') ret_var = Variable(torch.Tensor((value, ))) if (self.use_gpu): ret_var = ret_var.cuda() return ret_var
def test(self): super().test() # forward() method is called by this real_B = util.tensor2np(self.real_B) fake_B = util.tensor2np(self.fake_B.detach()) psnr_B = self.PSNR(real_B, fake_B, 2.) ssim_B = self.SSIM(real_B, fake_B, multichannel=len(real_B.shape)==3, data_range=2) return {'PSNR': psnr_B,'SSIM': ssim_B}
def forward(self, in0, in1): assert(in0.size()[0]==1) # currently only supports batchSize 1 if(self.colorspace=='RGB'): value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float') elif(self.colorspace=='Lab'): value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float') ret_var = Variable( torch.Tensor((value,) ) ) if(self.use_gpu): ret_var = ret_var.cuda() return ret_var
def forward(self, in0, in1): assert(in0.size()[0]==1) # currently only supports batchSize 1 if(self.colorspace=='RGB'): value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float') elif(self.colorspace=='Lab'): value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float') ret_var = Variable( torch.Tensor((value,) ) ) if(self.use_gpu): ret_var = ret_var.cuda() return ret_var
def forward(self, in0, in1): assert(in0.size()[0]==1) # currently only supports batchSize 1 if(self.colorspace=='RGB'): (N,C,X,Y) = in0.size() value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N) return value elif(self.colorspace=='Lab'): value = util.l2(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float') ret_var = Variable( torch.Tensor((value,) ) ) if(self.use_gpu): ret_var = ret_var.cuda() return ret_var
def get_current_numpy(self, idx): #print(self.real_A.data[0].min(), self.real_A.data[0].max(), self.fake_B.data[0].min(), self.fake_B.data[0].max(), self.real_B.data[0].min(), self.real_B.data[0].max()) real_A = util.tensor2np(self.real_A.data, idx) fake_B = util.tensor2np(self.fake_B.data, idx) real_B = util.tensor2np(self.real_B.data, idx) return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B)])
def pose2img(pose, colors, H=512, W=512): pose_numpy = tensor2np(pose) pix_numpy = pose2pix(pose_numpy, H) image_numpy = joints2image(pix_numpy, colors, H, W) image_numpy = cv2.flip(image_numpy, 0) return image_numpy