Esempio n. 1
0
    def __init__(self, src_V, src_F, tar_V, tar_F):
        super(RigidLossLayer, self).__init__()

        self.param_id = torch.tensor(\
         pyDeform.InitializeDeformTemplate(tar_V, tar_F, 0, 64))

        pyDeform.NormalizeByTemplate(src_V, self.param_id.tolist())
        pyDeform.StoreRigidityInformation(src_V, src_F, self.param_id.tolist())
Esempio n. 2
0
	def __init__(self, V1, F1, graph_V1, graph_E1,
		V2, F2, graph_V2, graph_E2,
		rigidity, d=torch.device('cpu')):
		super(GraphLoss2Layer, self).__init__()

		global device
		device = d

		self.param_id1 = torch.tensor(\
			pyDeform.InitializeDeformTemplate(V1, F1, 0, 64))

		self.param_id2 = torch.tensor(\
			pyDeform.InitializeDeformTemplate(V2, F2, 0, 64))

		pyDeform.NormalizeByTemplate(graph_V1, self.param_id1.tolist())
		pyDeform.NormalizeByTemplate(graph_V2, self.param_id2.tolist())

		pyDeform.StoreGraphInformation(graph_V1, graph_E1, self.param_id1.tolist())
		pyDeform.StoreGraphInformation(graph_V2, graph_E2, self.param_id2.tolist())
		self.rigidity2 = torch.tensor(rigidity * rigidity)
Esempio n. 3
0
    def __init__(self,
                 src_V,
                 src_E,
                 tar_V,
                 tar_F,
                 rigidity,
                 d=torch.device('cpu')):
        super(GraphLossLayer, self).__init__()

        global device
        device = d
        self.param_id = torch.tensor(\
         pyDeform.InitializeDeformTemplate(tar_V, tar_F, 0, 64))

        pyDeform.NormalizeByTemplate(src_V, self.param_id.tolist())
        pyDeform.StoreGraphInformation(src_V, src_E, self.param_id.tolist())
        self.rigidity2 = torch.tensor(rigidity * rigidity)
Esempio n. 4
0
def Finalize(src_V, src_F, src_E, src_to_graph, graph_V, rigidity, param_id):
	pyDeform.NormalizeByTemplate(src_V, param_id.tolist())
	pyDeform.SolveLinear(src_V, src_F, src_E, src_to_graph, graph_V, rigidity)
	pyDeform.DenormalizeByTemplate(src_V, param_id.tolist())
Esempio n. 5
0
output_path = args.output
rigidity = float(args.rigidity)
src_V, src_F, src_E, src_to_graph, graph_V, graph_E\
 = pyDeform.LoadCadMesh(source_path)

tar_V, tar_F, tar_E, tar_to_graph, graph_V_tar, graph_E_tar\
 = pyDeform.LoadCadMesh(reference_path)

graph_deform = GraphLossLayer(graph_V, graph_E, tar_V, tar_F, rigidity)
param_id = graph_deform.param_id
reverse_deform = ReverseLossLayer()

graph_V = nn.Parameter(graph_V)
optimizer = optim.Adam([graph_V], lr=1e-3)

pyDeform.NormalizeByTemplate(graph_V_tar, param_id.tolist())
loss_src2tar, loss_tar2src = None, None
niter = 10000
prev_loss_src, prev_loss_tar = 1e30, 1e30
for it in range(0, niter):
    optimizer.zero_grad()
    loss_src2tar = graph_deform(graph_V, graph_E)
    loss_tar2src = reverse_deform(graph_V, graph_V_tar)
    loss = loss_src2tar / graph_V.shape[0] + loss_tar2src / graph_V_tar.shape[0]
    loss.backward()
    optimizer.step()

    if it % 100 == 0:
        current_loss_src = np.sqrt(loss_src2tar.item() / graph_V.shape[0])
        current_loss_tar = np.sqrt(loss_tar2src.item() / graph_V_tar.shape[0])
        print('iter=%d, loss_src2tar=%.6f loss_tar2src=%.6f' %
Esempio n. 6
0
        print(
            'iter=%d, loss1_forward=%.6f loss1_backward=%.6f loss2_forward=%.6f loss2_backward=%.6f'
            % (it, np.sqrt(loss1_forward.item() / GV1.shape[0]),
               np.sqrt(loss1_backward.item() / GV2.shape[0]),
               np.sqrt(loss2_forward.item() / GV2.shape[0]),
               np.sqrt(loss2_backward.item() / GV1.shape[0])))

        current_loss = loss.item()

if save_path != '':
    torch.save({'func': func, 'optim': optimizer}, save_path)

GV1_deformed = func.forward(GV1_device)
GV1_deformed = torch.from_numpy(GV1_deformed.data.cpu().numpy())
V1_copy = V1.clone()
#Finalize(V1_copy, F1, E1, V2G1, GV1_deformed, 1.0, param_id2)

pyDeform.NormalizeByTemplate(V1_copy, param_id1.tolist())
V1_origin = V1_copy.clone()

#V1_copy = V1_copy.to(device)
func.func = func.func.cpu()
V1_copy = func.forward(V1_copy)
V1_copy = torch.from_numpy(V1_copy.data.cpu().numpy())

src_to_src = torch.from_numpy(
    np.array([i for i in range(V1_origin.shape[0])]).astype('int32'))

pyDeform.SolveLinear(V1_origin, F1, E1, src_to_src, V1_copy, 1, 1)
pyDeform.DenormalizeByTemplate(V1_origin, param_id2.tolist())
pyDeform.SaveMesh(output_path, V1_origin, F1)
Esempio n. 7
0
import pyDeform

source_path = sys.argv[1]
reference_path = sys.argv[2]
output_path = sys.argv[3]
src_V, src_F = pyDeform.LoadMesh(source_path)
tar_V, tar_F = pyDeform.LoadMesh(reference_path)

rigid_deform = RigidLossLayer(src_V, src_F, tar_V, tar_F)
reverse_deform = ReverseLossLayer()

param_id = rigid_deform.param_id
src_V = nn.Parameter(src_V)

optimizer = optim.Adam([src_V], lr=1e-3)
pyDeform.NormalizeByTemplate(tar_V, param_id.tolist())
niter = 10000
prev_loss = 1e30
for it in range(0, niter):
    optimizer.zero_grad()
    loss_src2tar = rigid_deform(src_V, src_F)
    loss_tar2src = reverse_deform(src_V, tar_V)
    loss = loss_src2tar * 10 + loss_tar2src
    loss.backward()
    optimizer.step()
    if it % 100 == 0:
        l = loss.item()
        print('iter=%d loss=%.6f loss_tar2src=%.6f' %
              (it, l, np.sqrt((loss_tar2src / tar_V.shape[0]).item())))
        if l > prev_loss:
            break