Ejemplo n.º 1
0
    def __init__(self,
                 lambda0=0,
                 cg_tol=1e-3,
                 cg_max_iter=1000,
                 n_newton_steps=1,
                 beta=0.7):
        self.shared_data_dir = '../../shared_data_midres'
        self.data_root_dir = '/data/zhenglin/poses_v3'
        self.res = 128
        self.lambda0 = lambda0
        self.cg_tol = cg_tol
        self.cg_max_iter = cg_max_iter
        self.n_newton_steps = n_newton_steps
        self.beta = beta
        self.pd_offset_dir = 'opt_test'
        self.gt_offset_dir = join(self.data_root_dir, 'midres_offset_npys')
        self.skin_vt_dir = join(self.data_root_dir, 'midres_skin_npys')
        self.cr_vt_dir = 'opt_test'
        self.out_obj_dir = 'opt_test'

        self.device = torch.device('cuda:0')
        self.dtype = torch.double

        self.offset_manager = OffsetManager(self.shared_data_dir,
                                            ctx={
                                                'data_root_dir':
                                                self.data_root_dir,
                                                'offset_img_size': self.res,
                                                'device': self.device
                                            })
        self.offset_io_manager = OffsetIOManager(
            res_ctx={
                'skin_dir': None,
                'shared_data_dir': self.shared_data_dir
            })

        tshirt_obj_path = os.path.join(self.shared_data_dir, 'flat_tshirt.obj')
        tshirt_obj = read_obj(tshirt_obj_path)
        self.rest_vts = torch.from_numpy(tshirt_obj.v).to(device=self.device,
                                                          dtype=self.dtype)
        self.fcs = torch.from_numpy(tshirt_obj.f).to(device=self.device,
                                                     dtype=torch.long)

        front_vt_ids = np.loadtxt(
            join(self.shared_data_dir, 'front_vertices.txt')).astype(np.int32)
        back_vt_ids = np.loadtxt(
            join(self.shared_data_dir, 'back_vertices.txt')).astype(np.int32)
        self.stepper = NewtonStepper(self.rest_vts,
                                     self.fcs,
                                     front_vt_ids=front_vt_ids,
                                     back_vt_ids=back_vt_ids,
                                     bdry_ids=None,
                                     lambda0=lambda0,
                                     cg_tol=self.cg_tol,
                                     cg_max_iter=self.cg_max_iter)
        self.system = self.stepper.system

        self.verbose = True
Ejemplo n.º 2
0
    def __init__(self, lambda0=0, cg_tol=1e-3):
        self.shared_data_dir = '../../shared_data_midres'
        self.data_root_dir = '/data/zhenglin/poses_v3'
        self.res = 128
        self.lambda0 = lambda0
        self.cg_tol = cg_tol
        self.cg_max_iter = 1000
        self.skin_img_dir = join(self.data_root_dir,
                                 'midres_skin_imgs_{}'.format(self.res))
        self.pd_img_dir = 'opt_test'
        self.gt_img_dir = join(self.data_root_dir,
                               'midres_offset_imgs_{}'.format(128))
        self.cr_img_dir = 'opt_test'
        self.out_obj_dir = 'opt_test'

        self.l0 = get_l0(self.shared_data_dir, 128)

        self.device = torch.device('cuda:0')
        self.dtype = torch.double
        mask_path = join(self.shared_data_dir,
                         'offset_img_mask_no_pad_{}.npy'.format(self.res))
        self.mask = load_np_img(mask_path).to(self.device, dtype=self.dtype)

        self.offset_manager = OffsetManager(self.shared_data_dir,
                                            ctx={
                                                'data_root_dir':
                                                self.data_root_dir,
                                                'offset_img_size': self.res,
                                                'device': self.device
                                            })
        self.offset_io_manager = OffsetIOManager(
            res_ctx={
                'skin_dir': None,
                'shared_data_dir': self.shared_data_dir
            })

        self.front_mask = self.mask[0, :, :]
        self.front_sum = torch.sum(self.front_mask).item()
        self.back_mask = self.mask[1, :, :]
        self.back_sum = torch.sum(self.back_mask).item()
        self.front_stepper = NewtonStepper(self.front_mask,
                                           self.l0,
                                           lambda0=self.lambda0,
                                           cg_tol=self.cg_tol,
                                           cg_max_iter=self.cg_max_iter)
        self.back_stepper = NewtonStepper(self.back_mask,
                                          self.l0,
                                          lambda0=self.lambda0,
                                          cg_tol=self.cg_tol,
                                          cg_max_iter=self.cg_max_iter)

        self.verbose = True
Ejemplo n.º 3
0
 def get_managers(res_ctx):
     managers = {'loss_manager': LossManager(ctx=ctx)}
     if mode == 'eval':
         managers['offset_manager'] = OffsetManager(
             shared_data_dir=res_ctx['shared_data_dir'], ctx=ctx)
         managers['offset_io_manager'] = OffsetIOManager(res_ctx=res_ctx,
                                                         ctx=ctx)
     return managers
Ejemplo n.º 4
0
	def __init__(self):
		self.shared_data_dir='../../shared_data'
		self.data_root_dir='/data/zhenglin/poses_v3'
		self.offset_img_size=128
		self.device=torch.device('cuda:0')
		self.dtype=torch.double
		self.res='lowres'
		self.offset_manager=OffsetManager(shared_data_dir=self.shared_data_dir,ctx={'data_root_dir':self.data_root_dir,'offset_img_size':self.offset_img_size,'device':self.device,'dtype':self.dtype})
		# self.img_sample_both_sides=ImgSampleBothSidesModule(self.offset_manager)
		self.img_sample_both_sides=ImgSample2ndOrderBothSidesModule(self.offset_manager)
		# self.pd_offset_img_dir='opt_test'
		self.pd_offset_img_dir='../../rundir/lowres/xyz/eval_test'
		self.gt_offset_dir=join(self.data_root_dir,'{}_offset_npys'.format(self.res))
		self.gt_offset_img_dir=join(self.data_root_dir,'{}_offset_imgs_{}'.format(self.res,self.offset_img_size))
		self.out_img_dir='opt_test/img_sample'
		if not isdir(self.out_img_dir):
			os.makedirs(self.out_img_dir)

		mask=np.load(join(self.shared_data_dir,'offset_img_mask_{}.npy'.format(self.offset_img_size)))
		# print('mask',np.unique(mask))
		self.png_mask=get_png_mask_both_sides(mask)
		self.mask_sum=np.sum(mask)
Ejemplo n.º 5
0
 def get_managers(res_ctx):
     managers = {
         'offset_manager':
         OffsetManager(shared_data_dir=res_ctx['shared_data_dir'], ctx=ctx),
         'loss_manager':
         LossManager(ctx=ctx)
     }
     if mode == 'eval':
         managers['offset_io_manager'] = OffsetIOManager(res_ctx=res_ctx,
                                                         ctx=ctx)
     if ctx['use_cvxpy'] or ctx['calc_vt_loss']:
         managers['img_sample_module'] = ImgSampleBothSidesModule(
             managers['offset_manager'])
         # managers['img_sample_module']=ImgSample2ndOrderBothSidesModule(managers['offset_manager'])
     return managers
Ejemplo n.º 6
0
class ImgOptTest:
    def __init__(self, lambda0=0, cg_tol=1e-3):
        self.shared_data_dir = '../../shared_data_midres'
        self.data_root_dir = '/data/zhenglin/poses_v3'
        self.res = 128
        self.lambda0 = lambda0
        self.cg_tol = cg_tol
        self.cg_max_iter = 1000
        self.skin_img_dir = join(self.data_root_dir,
                                 'midres_skin_imgs_{}'.format(self.res))
        self.pd_img_dir = 'opt_test'
        self.gt_img_dir = join(self.data_root_dir,
                               'midres_offset_imgs_{}'.format(128))
        self.cr_img_dir = 'opt_test'
        self.out_obj_dir = 'opt_test'

        self.l0 = get_l0(self.shared_data_dir, 128)

        self.device = torch.device('cuda:0')
        self.dtype = torch.double
        mask_path = join(self.shared_data_dir,
                         'offset_img_mask_no_pad_{}.npy'.format(self.res))
        self.mask = load_np_img(mask_path).to(self.device, dtype=self.dtype)

        self.offset_manager = OffsetManager(self.shared_data_dir,
                                            ctx={
                                                'data_root_dir':
                                                self.data_root_dir,
                                                'offset_img_size': self.res,
                                                'device': self.device
                                            })
        self.offset_io_manager = OffsetIOManager(
            res_ctx={
                'skin_dir': None,
                'shared_data_dir': self.shared_data_dir
            })

        self.front_mask = self.mask[0, :, :]
        self.front_sum = torch.sum(self.front_mask).item()
        self.back_mask = self.mask[1, :, :]
        self.back_sum = torch.sum(self.back_mask).item()
        self.front_stepper = NewtonStepper(self.front_mask,
                                           self.l0,
                                           lambda0=self.lambda0,
                                           cg_tol=self.cg_tol,
                                           cg_max_iter=self.cg_max_iter)
        self.back_stepper = NewtonStepper(self.back_mask,
                                          self.l0,
                                          lambda0=self.lambda0,
                                          cg_tol=self.cg_tol,
                                          cg_max_iter=self.cg_max_iter)

        self.verbose = True

    def get_err(self, front_gt_img, front_pd_img, back_gt_img, back_pd_img):
        front_pd_err = get_total_error(front_pd_img, front_gt_img,
                                       self.front_mask).item()
        back_pd_err = get_total_error(back_pd_img, back_gt_img,
                                      self.back_mask).item()
        pd_err = (front_pd_err + back_pd_err) / (self.front_sum +
                                                 self.back_sum)
        return pd_err

    def get_egy(self, front_pd_img, back_pd_img):
        front_system = self.front_stepper.system
        front_edge_egy = front_system.get_edge_energy(front_pd_img).item()
        front_bend_egy = front_system.get_bend_energy(front_pd_img).item()
        front_cross_egy = front_system.get_cross_energy(front_pd_img).item()
        back_system = self.back_stepper.system
        back_edge_egy = back_system.get_edge_energy(back_pd_img).item()
        back_bend_egy = back_system.get_bend_energy(back_pd_img).item()
        back_cross_egy = back_system.get_cross_energy(back_pd_img).item()
        edge_egy, bend_egy, cross_egy = front_edge_egy + back_edge_egy, front_bend_egy + back_bend_egy, front_cross_egy + back_cross_egy
        return front_system.edge_k * edge_egy + front_system.bend_k * bend_egy + front_system.cross_k * cross_egy

    def test(self, sample_id, save_obj=False, save_diff=False):
        pd_img_path = join(self.pd_img_dir,
                           'pd_img_{:08d}.npy.gz'.format(sample_id))
        pd_img = load_np_img(pd_img_path).to(self.device, dtype=self.dtype)
        gt_img_path = join(self.gt_img_dir,
                           'offset_img_{:08d}.npy'.format(sample_id))
        gt_img = load_np_img(gt_img_path).to(self.device, dtype=self.dtype)
        skin_img_path = join(self.skin_img_dir,
                             'skin_img_{:08d}.npy.gz'.format(sample_id))
        skin_img = load_np_img(skin_img_path).to(self.device, dtype=self.dtype)
        pd_img += skin_img
        gt_img += skin_img

        cr_img_path = join(self.cr_img_dir,
                           'cr_img_{:08d}.npy'.format(sample_id))

        front_pd_img = pd_img[:3, :, :]
        front_gt_img = gt_img[:3, :, :]
        back_pd_img = pd_img[3:, :, :]
        back_gt_img = gt_img[3:, :, :]
        front_cr_img = front_pd_img
        back_cr_img = back_pd_img

        start_time = time.time()
        with torch.no_grad():
            front_cr_img, front_iters = self.front_stepper.step(front_cr_img)
            back_cr_img, back_iters = self.back_stepper.step(back_cr_img)
        end_time = time.time()

        total_time = end_time - start_time

        pd_err = self.get_err(front_gt_img, front_pd_img, back_gt_img,
                              back_pd_img)
        cr_err = self.get_err(front_gt_img, front_cr_img, back_gt_img,
                              back_cr_img)

        pd_egy = self.get_egy(front_pd_img, back_pd_img)
        cr_egy = self.get_egy(front_cr_img, back_cr_img)

        if save_obj:
            cr_img = torch.cat([front_cr_img, back_cr_img], dim=0)
            cr_vts = self.offset_manager.get_offsets_from_offset_imgs_both_sides(
                cr_img.unsqueeze(0))
            obj_path = join(self.out_obj_dir,
                            'cr_{:08d}.obj'.format(sample_id))
            self.offset_io_manager.write_cloth(cr_vts[0].cpu().numpy(),
                                               obj_path)
            if save_diff:
                save_offset_img(join(self.out_obj_dir,
                                     'cr_{:08d}.png'.format(sample_id)),
                                cr_img - pd_img,
                                self.mask,
                                img_stats={
                                    'min': -1e-3,
                                    'max': 1e-3
                                })

        data = {
            'time': total_time,
            'iters_front': front_iters,
            'iters_back': back_iters,
            'err_cr': cr_err,
            'egy_cr': cr_egy
        }
        data['err_pd'] = pd_err
        data['egy_pd'] = pd_egy
        return data
Ejemplo n.º 7
0
class MeshOptTest:
    def __init__(self,
                 lambda0=0,
                 cg_tol=1e-3,
                 cg_max_iter=1000,
                 n_newton_steps=1,
                 beta=0.7):
        self.shared_data_dir = '../../shared_data_midres'
        self.data_root_dir = '/data/zhenglin/poses_v3'
        self.res = 128
        self.lambda0 = lambda0
        self.cg_tol = cg_tol
        self.cg_max_iter = cg_max_iter
        self.n_newton_steps = n_newton_steps
        self.beta = beta
        self.pd_offset_dir = 'opt_test'
        self.gt_offset_dir = join(self.data_root_dir, 'midres_offset_npys')
        self.skin_vt_dir = join(self.data_root_dir, 'midres_skin_npys')
        self.cr_vt_dir = 'opt_test'
        self.out_obj_dir = 'opt_test'

        self.device = torch.device('cuda:0')
        self.dtype = torch.double

        self.offset_manager = OffsetManager(self.shared_data_dir,
                                            ctx={
                                                'data_root_dir':
                                                self.data_root_dir,
                                                'offset_img_size': self.res,
                                                'device': self.device
                                            })
        self.offset_io_manager = OffsetIOManager(
            res_ctx={
                'skin_dir': None,
                'shared_data_dir': self.shared_data_dir
            })

        tshirt_obj_path = os.path.join(self.shared_data_dir, 'flat_tshirt.obj')
        tshirt_obj = read_obj(tshirt_obj_path)
        self.rest_vts = torch.from_numpy(tshirt_obj.v).to(device=self.device,
                                                          dtype=self.dtype)
        self.fcs = torch.from_numpy(tshirt_obj.f).to(device=self.device,
                                                     dtype=torch.long)

        front_vt_ids = np.loadtxt(
            join(self.shared_data_dir, 'front_vertices.txt')).astype(np.int32)
        back_vt_ids = np.loadtxt(
            join(self.shared_data_dir, 'back_vertices.txt')).astype(np.int32)
        self.stepper = NewtonStepper(self.rest_vts,
                                     self.fcs,
                                     front_vt_ids=front_vt_ids,
                                     back_vt_ids=back_vt_ids,
                                     bdry_ids=None,
                                     lambda0=lambda0,
                                     cg_tol=self.cg_tol,
                                     cg_max_iter=self.cg_max_iter)
        self.system = self.stepper.system

        self.verbose = True

    def get_err(self, gt_vt, pd_vt):
        err = torch.sum(
            (gt_vt - pd_vt)**2, dim=1, keepdim=True) * self.system.m
        return torch.sqrt(torch.sum(err) / torch.sum(self.system.m)).item()

    def get_egy(self, vt):
        return self.system.get_total_egy(vt).item()

    def test(self, sample_id, save_obj=False):
        skin_vt_path = join(self.skin_vt_dir,
                            'skin_{:08d}.npy'.format(sample_id))
        skin_vt = torch.from_numpy(np.load(skin_vt_path)).to(
            device=self.device, dtype=self.dtype)
        pd_img_path = join(self.pd_offset_dir,
                           'pd_img_{:08d}.npy.gz'.format(sample_id))
        pd_img = load_np_img(pd_img_path).to(self.device, dtype=self.dtype)
        pd_offset = self.offset_manager.get_offsets_from_offset_imgs_both_sides(
            pd_img.unsqueeze(0)).squeeze().to(dtype=self.dtype)
        pd_vt = pd_offset + skin_vt
        gt_offset_path = join(self.gt_offset_dir,
                              'offset_{:08d}.npy'.format(sample_id))
        gt_offset = torch.from_numpy(np.load(gt_offset_path)).to(
            self.device, dtype=self.dtype)
        gt_vt = gt_offset + skin_vt

        cr_vt = pd_vt
        start_time = time.time()
        with torch.no_grad():
            lambda0 = self.lambda0
            for i in range(self.n_newton_steps):
                self.stepper.system.lambda0 = lambda0
                cr_vt, cg_iters = self.stepper.step(cr_vt, vt0=pd_vt)
                lambda0 *= self.beta
        end_time = time.time()

        total_time = end_time - start_time

        pd_err = self.get_err(pd_vt, gt_vt)
        cr_err = self.get_err(cr_vt, gt_vt)

        gt_egy = self.get_egy(gt_vt)
        pd_egy = self.get_egy(pd_vt)
        cr_egy = self.get_egy(cr_vt)

        if save_obj:
            obj_path = join(self.out_obj_dir,
                            'cr_{:08d}.obj'.format(sample_id))
            self.offset_io_manager.write_cloth(cr_vt.cpu().numpy(), obj_path)

        data = {'time': total_time, 'iters': cg_iters}
        data['err_cr'] = cr_err
        data['err_pd'] = pd_err
        data['egy_gt'] = gt_egy
        data['egy_pd'] = pd_egy
        data['egy_cr'] = cr_egy
        return data
Ejemplo n.º 8
0
                  if not ctx['use_patches'] else ctx['crop_size'],
                  use_coord_conv=ctx['use_coord_conv'],
                  use_up_conv=ctx['use_up_conv'],
                  use_skip_link=ctx['use_skip_link'],
                  use_multi_layer_loss=ctx['use_multi_layer_loss'],
                  init_channels=ctx['init_channels'],
                  output_channels=ctx['output_channels'],
                  init_size=ctx['init_size'],
                  n_res_blocks=ctx['n_res_blocks'],
                  use_dropout=ctx['use_dropout'],
                  relu_type=ctx['relu'])

cp = torch.load(ctx['cp'], map_location='cuda:0')
net.load_state_dict(cp['state_dict'])

offset_manager = OffsetManager(
    shared_data_dir=ctx['res_ctx']['shared_data_dir'], ctx=ctx)

for sample in samples:
    index = '{:08d}'.format(int(sample))
    input_rotate = torch.from_numpy(
        np.load(
            join(ctx['data_root_dir'],
                 'rotation_matrices/rotation_mat_{}.npy'.format(
                     index)))).float()
    predict = net(input_rotate)
    pd_vt_offsets = offset_manager.get_offsets_from_offset_imgs_both_sides(
        predict)
    np.savetxt(join(ctx['eval_out_dir'], 'displace_{}.txt'.format(index)),
               pd_vt_offsets[0, :, :].detach().numpy())
    print('generated data {}'.format(index))
Ejemplo n.º 9
0
class ImgSampleTest:
	def __init__(self):
		self.shared_data_dir='../../shared_data'
		self.data_root_dir='/data/zhenglin/poses_v3'
		self.offset_img_size=128
		self.device=torch.device('cuda:0')
		self.dtype=torch.double
		self.res='lowres'
		self.offset_manager=OffsetManager(shared_data_dir=self.shared_data_dir,ctx={'data_root_dir':self.data_root_dir,'offset_img_size':self.offset_img_size,'device':self.device,'dtype':self.dtype})
		# self.img_sample_both_sides=ImgSampleBothSidesModule(self.offset_manager)
		self.img_sample_both_sides=ImgSample2ndOrderBothSidesModule(self.offset_manager)
		# self.pd_offset_img_dir='opt_test'
		self.pd_offset_img_dir='../../rundir/lowres/xyz/eval_test'
		self.gt_offset_dir=join(self.data_root_dir,'{}_offset_npys'.format(self.res))
		self.gt_offset_img_dir=join(self.data_root_dir,'{}_offset_imgs_{}'.format(self.res,self.offset_img_size))
		self.out_img_dir='opt_test/img_sample'
		if not isdir(self.out_img_dir):
			os.makedirs(self.out_img_dir)

		mask=np.load(join(self.shared_data_dir,'offset_img_mask_{}.npy'.format(self.offset_img_size)))
		# print('mask',np.unique(mask))
		self.png_mask=get_png_mask_both_sides(mask)
		self.mask_sum=np.sum(mask)

	def img_test(self,sample_id):
		offset=np.load(join(self.gt_offset_dir,'offset_{:08d}.npy'.format(sample_id)))
		offsets=torch.from_numpy(offset).to(device=self.device,dtype=self.dtype).unsqueeze(0)
		offset_imgs=self.offset_manager.get_offset_imgs_from_offsets_both_sides(offsets)
		offset_img=offset_imgs[0].permute(1,2,0).cpu().numpy()
		with gzip.open(join('opt_test/img_sample','offset_img_{:08d}.npy.gz'.format(sample_id)),'wb') as f:
			np.save(file=f,arr=offset_img)
		save_png_img(join(self.out_img_dir,'ras_offset_img.png'),normalize_img(offset_img))

		with gzip.open(join(self.gt_offset_img_dir,'offset_img_{:08d}.npy.gz'.format(sample_id)),'rb') as f:
			gt_offset_img=np.load(file=f)
		save_png_img(join(self.out_img_dir,'gt_offset_img.png'),normalize_img(gt_offset_img))

		diff=offset_img-gt_offset_img
		print('diff:norm:',np.linalg.norm(diff),'max:',np.max(np.abs(diff)))
		# difference are on the boundary
		save_png_img(join(self.out_img_dir,'diff_offset_img.png'),normalize_img(offset_img-gt_offset_img))

		sample_offsets=self.offset_manager.get_offsets_from_offset_imgs_both_sides(offset_imgs)
		diff_offsets=sample_offsets-offsets
		print('diff_offset,norm:',torch.norm(diff_offsets).item(),'max:',torch.max(torch.abs(diff_offsets)).item(),'max_ratio:',torch.max(torch.norm(diff_offsets,dim=1)/torch.norm(offsets,dim=1)).item())
		# print('diff_offset:',torch.norm(diff_offsets[0],dim=1).detach().cpu().numpy().tolist())
		
		sample_offset_imgs=self.offset_manager.get_offset_imgs_from_offsets_both_sides(sample_offsets)
		diff2=sample_offset_imgs-offset_imgs
		print('diff2:norm:',torch.norm(diff2).item(),'max:',torch.max(torch.abs(diff2)).item())
		diff2_img=diff2[0].detach().permute(1,2,0).cpu().numpy()
		save_png_img(join(self.out_img_dir,'diff2_offset_img.png'),normalize_img(diff2_img))


	def test(self,sample_id):
		with gzip.open(join(self.pd_offset_img_dir,'pd_img_{:08d}.npy.gz'.format(sample_id))) as f:
			pd_offset_img=np.load(file=f)
		pd_offset_imgs=torch.from_numpy(pd_offset_img).permute(2,0,1).to(self.device,dtype=self.dtype).unsqueeze(0)
		print('pd_offset_imgs',pd_offset_imgs.size())
		gt_offset=np.load(join(self.gt_offset_dir,'offset_{:08d}.npy'.format(sample_id)))
		gt_offsets=torch.from_numpy(gt_offset).to(self.device,dtype=self.dtype).unsqueeze(0)
		loss_fn=torch.nn.MSELoss()

		pd_offset_imgs.requires_grad_(True)
		pd_offsets=self.offset_manager.get_offsets_from_offset_imgs_both_sides(pd_offset_imgs)
		loss=loss_fn(pd_offsets,gt_offsets)
		loss.backward()
		grad=pd_offset_imgs.grad[0].permute(1,2,0).cpu().numpy()
		print('dir grad,min',np.min(grad),'max',np.max(grad))
		save_png_img(join(self.out_img_dir,'dir_grad.png'),normalize_img(grad),self.png_mask)
		v=np.max(np.abs(grad))

		pd_offset_imgs.grad=None
		pd_offset_imgs.requires_grad_(True)
		pd_offsets=self.img_sample_both_sides(pd_offset_imgs)
		loss=loss_fn(pd_offsets,gt_offsets)
		loss.backward()
		grad=pd_offset_imgs.grad[0].permute(1,2,0).cpu().numpy()
		print('sample grad,min',np.min(grad),'max',np.max(grad))
		g=normalize_img(grad,v)
		print('g,min',np.min(g),'max',np.max(g))
		# save_png_img(join(self.out_img_dir,'sample_grad.png'),normalize_img(grad),self.png_mask)
		save_png_img(join(self.out_img_dir,'sample_grad.png'),normalize_img(grad),self.png_mask)

	def numeric_test(self,sample_id):
		pd_offset_imgs=torch.zeros((1,6,self.offset_img_size,self.offset_img_size),device=self.device,dtype=self.dtype)
		pd_offset_imgs.requires_grad_(True)
		pd_offsets=self.img_sample_both_sides(pd_offset_imgs)
		pd_offsets.requires_grad_(True)
		print('pd_offsets',np.unique(pd_offsets[0].detach().cpu().numpy()))
		gt_offsets=torch.ones_like(pd_offsets,device=self.device,dtype=self.dtype)
		loss_fn=torch.nn.MSELoss(reduction='sum')
		loss=loss_fn(pd_offsets,gt_offsets)
		# loss=torch.sum((gt_offsets-pd_offsets)**2)
		print('loss',loss.item())
		loss.backward()
		# offset_grad=pd_offsets.grad[0].cpu().numpy()
		# print('offset_grad',np.unique(offset_grad))
		img_grad=pd_offset_imgs.grad[0].permute(1,2,0).cpu().numpy()
		print('img_grad',np.unique(img_grad))
		offset_manager=self.img_sample_both_sides.offset_manager
		print('w min',torch.min(offset_manager.vt_ws_img).item(),'max',torch.max(offset_manager.vt_ws_img).item())
		front_wsum_img,back_wsum_img=torch.sum(offset_manager.vt_ws_img[:,:,:3],dim=2),torch.sum(offset_manager.vt_ws_img[:,:,3:],dim=2)
		print('front_ws',np.unique(front_wsum_img.cpu().numpy()))
		print('back_ws',np.unique(back_wsum_img.cpu().numpy()))
		print('mask',np.unique(offset_manager.mask.cpu().numpy()))

		save_png_img(join(self.out_img_dir,'numerics_grad.png'),normalize_img(img_grad),self.png_mask)

	def vt_test(self,sample_id):
		with gzip.open(join(self.gt_offset_img_dir,'offset_img_{:08d}.npy.gz'.format(sample_id))) as f:
			gt_offset_img=np.load(file=f)
		gt_offset_imgs=torch.from_numpy(gt_offset_img).permute(2,0,1).to(self.device,dtype=self.dtype).unsqueeze(0)
		gt_offset=np.load(join(self.gt_offset_dir,'offset_{:08d}.npy'.format(sample_id)))
		gt_offsets=torch.from_numpy(gt_offset).to(self.device,dtype=self.dtype).unsqueeze(0)

		ras_gt_offset_imgs=self.offset_manager.get_offset_imgs_from_offsets_both_sides(gt_offsets)
		sample_ras_offsets=self.offset_manager.get_offsets_from_offset_imgs_both_sides(ras_gt_offset_imgs)
		sample_offsets=self.offset_manager.get_offsets_from_offset_imgs_both_sides(gt_offset_imgs)

		print('sample error',torch.norm(sample_offsets-gt_offsets),'ras sample error',torch.norm(sample_ras_offsets-gt_offsets))

	def grad_test(self,sample_id):
		with gzip.open(join(self.pd_offset_img_dir,'{:08d}/pd_img_{:08d}.npy.gz'.format(sample_id,sample_id)),'rb') as f:
			pd_offset_img=np.load(file=f)
		pd_offset_imgs=torch.from_numpy(pd_offset_img).permute(2,0,1).to(device=self.device,dtype=self.dtype).unsqueeze(0)
		with gzip.open(join('opt_test/img_sample','offset_img_{:08d}.npy.gz'.format(sample_id))) as f:
			gt_offset_img=np.load(file=f)
		gt_offset_imgs=torch.from_numpy(gt_offset_img).permute(2,0,1).to(self.device,dtype=self.dtype).unsqueeze(0)
		mseloss=torch.nn.MSELoss(reduction='sum')
		pd_offset_imgs.requires_grad_(True)
		print('pd_offset_imgs',pd_offset_imgs.size())
		gt_loss=mseloss(pd_offset_imgs,gt_offset_imgs)/self.mask_sum
		gt_loss.backward()
		gt_grad=pd_offset_imgs.grad.clone()

		pd_offset_imgs.grad.zero_()
		pd_offset_imgs.requires_grad_(True)
		with open(join(self.gt_offset_dir,'offset_{:08d}.npy'.format(sample_id)),'rb') as f:
			gt_offset=np.load(file=f)
		n_vts=len(gt_offset)
		gt_offsets=torch.from_numpy(gt_offset).to(device=self.device,dtype=self.dtype).unsqueeze(0)
		pd_offsets=self.img_sample_both_sides(pd_offset_imgs)
		test_loss=mseloss(pd_offsets,gt_offsets)/n_vts
		print('gt_loss',gt_loss.item(),'test_loss',test_loss.item(),'n_vts',n_vts,'mask_sum',self.mask_sum)
		test_loss.backward()
		test_grad=pd_offset_imgs.grad

		diff_grad=test_grad-gt_grad
		diff_norm=torch.norm(diff_grad).item()
		diff_max=torch.max(diff_grad).item()
		print('norm1:',torch.norm(gt_grad).item(),'norm2:',torch.norm(test_grad).item())
		print('diff:norm',diff_norm,'max:',diff_max)
		diff_img=diff_grad[0].detach().permute(1,2,0).cpu().numpy()
		save_png_img(join(self.out_img_dir,'diff_grad2.png'),normalize_img(diff_img),self.png_mask)
Ejemplo n.º 10
0
# gt_path='/data/yxjin/poses_v3/lowres_texture_imgs_512/offset_img_00015001.npy.gz'
# gt_img=np.load(gzip.open(gt_path))

# arr_stats={'minval':np.full(6,np.min(gt_img)),'maxval':np.full(6,np.max(gt_img))}

# front_offset_img=np.transpose(offset_img[:2,:,:],(1,2,0))
# gt_front_offset_img=gt_img[:,:,:2]
# front_arr_stats={name:stats[:3] for name,stats in arr_stats.items()}
# gt_front_rgb_img=from_offset_img_to_rgb_img(gt_front_offset_img,front_mask,arr_stats=front_arr_stats)[:, 0:1024]
# front_rgb_img=from_offset_img_to_rgb_img(front_offset_img,front_mask,arr_stats=front_arr_stats)[:, 0:1024]

# back_offset_img=np.transpose(offset_img[2:4,:,:],(1,2,0))
# gt_back_offset_img=gt_img[:,:,2:4]
# back_arr_stats={name:stats[3:] for name,stats in arr_stats.items()}
# gt_back_rgb_img=from_offset_img_to_rgb_img(gt_back_offset_img,back_mask,arr_stats=back_arr_stats)[:, 0:1024]
# back_rgb_img=from_offset_img_to_rgb_img(back_offset_img,back_mask,arr_stats=back_arr_stats)[:, 0:1024]

# output_img=np.concatenate([front_rgb_img, back_rgb_img], axis=1)
# gt_output_img=np.concatenate([gt_front_rgb_img, gt_back_rgb_img], axis=1)
# output_img=np.concatenate([gt_output_img,output_img],axis=0)
# im = Image.fromarray(output_img)
# im.save("output_img.jpeg")

# convert back to vertex displacement
offset_manager = OffsetManager(
    shared_data_dir=
    '/data/yxjin/PhysBAM/Private_Projects/cloth_texture/pixel_network/Learning/shared_data_highres',
    ctx=ctx)
predict = net(input_rotate)
pd_vt_offsets = offset_manager.get_offsets_from_offset_imgs_both_sides(predict)
np.savetxt('displace_test.txt', pd_vt_offsets[0, :, :].detach().numpy())