def optimization(ratio): lr0 = 0.0005 lr = lr0 folder_name = os.getcwd() + '/progress-b-1-%f/' % ratio if not os.path.isdir(folder_name): os.mkdir(folder_name) filename = os.getcwd() + '/../exp_bunny/setup/bunny_transient.mat' setup = scipy.io.loadmat(filename) gt_transient = np.array(setup['gt_transient'], dtype=np.double, order='C') gt_mesh = MESH() gt_mesh.v = igl.eigen.MatrixXd(np.array(setup['gt_v'], dtype=np.double)) gt_mesh.f = igl.eigen.MatrixXd(np.array(setup['gt_f'], dtype=np.double)).castint() opt = OPT(20000) opt.max_distance_bin = gt_transient.shape[1] opt.smooth_weight = 0.001 mesh = MESH() mesh_init_location = os.getcwd() + '/init/cnlos_bunny_threshold.obj' igl.readOBJ(mesh_init_location, mesh.v, mesh.f) mesh.v = np.array(mesh.v, dtype=np.float32, order='C') mesh.f = np.array(mesh.f, dtype=np.int32, order='C') opt.resolution = 64 opt.smooth_ratio = ratio rendering.isotropic_remeshing(mesh, .5 / opt.resolution) old_v = np.array(mesh.v) rendering.compute_mesh_affinity(mesh) rendering.border_indicator(mesh) weight = rendering.create_weighting_function(gt_transient, opt.gamma) global_counter = 0 old_v = np.array(mesh.v) old_f = np.array(mesh.f) global_counter, convergence_flag, l2_record = optimize_parameters.optimize_shape( mesh, gt_transient, weight, opt, 15, lr, gt_mesh, global_counter, folder_name) grad_v = np.array(mesh.v) rendering.el_topo_gradient(mesh, old_v) rendering.el_topo_remeshing(mesh, .5 / opt.resolution) eltopo_v = np.array(mesh.v) eltopo_f = np.array(mesh.f) rendering.isotropic_remeshing(mesh, .5 / opt.resolution) isotropic_v = np.array(mesh.v) isotropic_f = np.array(mesh.f) filename = folder_name + 'data.mat' scipy.io.savemat(filename, mdict={ 'old_v': old_v, 'old_f': old_f, 'grad_v': grad_v, 'eltopo_v': eltopo_v, 'eltopo_f': eltopo_f, 'isotropic_v': isotropic_v, 'isotropic_f': isotropic_f })
def optimization(ratio): lr0 = 0.0001 lr = lr0 folder_name = os.getcwd() + '/progress-b-1-%f/' % ratio if not os.path.isdir(folder_name): os.mkdir(folder_name) filename = os.getcwd() + '/setup/bunny_transient_64_0.mat' setup = scipy.io.loadmat(filename) gt_transient = np.array(setup['gt_transient'], dtype=np.double, order='C') gt_mesh = MESH() gt_mesh.v = igl.eigen.MatrixXd(np.array(setup['gt_v'], dtype=np.double)) gt_mesh.f = igl.eigen.MatrixXd(np.array(setup['gt_f'], dtype=np.double)).castint() opt = OPT(20000) opt.max_distance_bin = gt_transient.shape[1] opt.smooth_weight = 0.001 mesh = MESH() mesh_init_location = os.getcwd() + '/init/cnlos_bunny_threshold_64.obj' igl.readOBJ(mesh_init_location, mesh.v, mesh.f) mesh.v = np.array(mesh.v, dtype=np.float32, order='C') mesh.f = np.array(mesh.f, dtype=np.int32, order='C') opt.resolution = 64 opt.smooth_ratio = ratio rendering.isotropic_remeshing(mesh, .5 / opt.resolution) old_v = np.array(mesh.v) rendering.compute_mesh_affinity(mesh) rendering.border_indicator(mesh) weight = rendering.create_weighting_function(gt_transient, opt.gamma) optimization_alpha = torch.Tensor([mesh.alpha]) optimization_alpha.requires_grad_() optimizer_alpha = optim.Adam([optimization_alpha], lr=opt.alpha_lr) dummy_loss2 = optimization_alpha**2 dummy_loss2.backward() global_counter = 0 for t in range(3): if mesh.f.shape[0] > 250000: break global_counter, l2_record = optimize_parameters.optimize_alpha( mesh, gt_transient, weight, optimization_alpha, optimizer_alpha, opt, 50, global_counter) if t == 0: l2_0 = l2_record lr = (l2_record / l2_0) * lr0 * ((0.99)**(t / 50)) print('new lr %f' % lr) old_v = np.array(mesh.v) global_counter, convergence_flag = optimize_parameters.optimize_shape( mesh, gt_transient, weight, opt, 15, lr, gt_mesh, global_counter) if convergence_flag: if opt.testing_flag == 1: opt.testing_flag = 0 else: opt.testing_flag = 1 rendering.el_topo_gradient(mesh, old_v) rendering.el_topo_remeshing(mesh, .5 / opt.resolution) rendering.isotropic_remeshing(mesh, .5 / opt.resolution) rendering.compute_mesh_affinity(mesh) rendering.removeTriangle(mesh, opt) rendering.compute_mesh_affinity(mesh) rendering.border_indicator(mesh)
def optimization(ratio): lr0 = 0.0001 lr = lr0 folder_name = os.getcwd() + '/progress-b-1-%f/' % ratio if not os.path.isdir(folder_name): os.mkdir(folder_name) filename = os.getcwd() + '/transient.mat' setup = scipy.io.loadmat(filename) gt_transient = np.array(setup['transient'], dtype=np.double, order='C') opt = OPT(20000) opt.max_distance_bin = gt_transient.shape[1] opt.smooth_weight = 0.001 opt.lighting = np.array(setup['lighting'], dtype=np.float32, order='C') opt.sensor = np.array(setup['lighting'], dtype=np.float32, order='C') opt.gt_mesh = False gt_mesh = MESH() mesh = MESH() mesh_init_location = os.getcwd() + '/cnlos_s_threshold.obj' igl.readOBJ(mesh_init_location, mesh.v, mesh.f) mesh.v = np.array(mesh.v, dtype=np.float32, order='C') mesh.f = np.array(mesh.f, dtype=np.int32, order='C') opt.resolution = 64 opt.smooth_ratio = ratio rendering.isotropic_remeshing(mesh, .5 / opt.resolution) old_v = np.array(mesh.v) rendering.compute_mesh_affinity(mesh) rendering.border_indicator(mesh) weight = rendering.create_weighting_function(gt_transient, opt.gamma) mesh.albedo = optimize_parameters.initial_fitting_albedo( mesh, gt_transient, weight, opt) global_counter = 0 l2 = np.empty(400) albedo = np.empty(400) resolution_cnt = 0 for t in range(400): if mesh.f.shape[0] > 250000: break if t < 30 or resolution_cnt < 5: global_counter, l2_record = optimize_parameters.optimize_albedo( mesh, gt_transient, weight, opt, 50, global_counter, folder_name) if t == 0: l2_0 = l2_record opt.albedo_lr = (l2_reccord / l2_0) * opt_albedo_lr0 * ( (0.99)**(t / 5)) lr = (l2_record / l2_0) * lr0 * ((0.99)**(t / 5)) print('new lr %f' % lr) old_v = np.array(mesh.v) global_counter, convergence_flag, l2_record = optimize_parameters.optimize_shape( mesh, gt_transient, weight, opt, 15, lr, gt_mesh, global_counter, folder_name) resolution_cnt += 1 if resolution_cnt == 50: resolution_cnt = 0 opt.resolution *= 1.5 opt.sample_num *= 1.5 if convergence_flag: if opt.testing_flag == 1: opt.testing_flag = 0 opt.smooth_ratio = ratio / 10 + t / 100 print('shading') else: opt.testing_flag = 1 opt.smooth_ratio = ratio + t / 10 rendering.el_topo_gradient(mesh, old_v) rendering.el_topo_remeshing(mesh, .5 / opt.resolution) rendering.isotropic_remeshing(mesh, .5 / opt.resolution) rendering.compute_mesh_affinity(mesh) rendering.removeTriangle(mesh, opt) rendering.compute_mesh_affinity(mesh) rendering.border_indicator(mesh) filename = folder_name + '%05d.mat' % (t) scipy.io.savemat(filename, mdict={ 'v': mesh.v, 'f': mesh.f, 'albedo': mesh.albedo }) l2[t] = l2_record albedo[t] = mesh.albedo filename = folder_name + 'progress.mat' scipy.io.savemat(filename, mdict={'albedo': albedo, 'l2': l2})
def optimization(ratio): lr0 = 0.0001 lr = lr0 folder_name = os.getcwd() + '/progress-b-3-%f/' % ratio if not os.path.isdir(folder_name): os.mkdir(folder_name) test_num = 1 filename = os.getcwd() + '/setup/bunny_transient_64_0_%d.mat' % test_num setup = scipy.io.loadmat(filename) gt_transient = np.array(setup['gt_transient'], dtype=np.double, order='C') gt_mesh = MESH() gt_mesh.v = igl.eigen.MatrixXd(np.array(setup['gt_v'], dtype=np.double)) gt_mesh.f = igl.eigen.MatrixXd(np.array(setup['gt_f'], dtype=np.double)).castint() opt = OPT(20000) opt.max_distance_bin = gt_transient.shape[1] opt.smooth_weight = 0.001 mesh = MESH() mesh_init_location = os.getcwd( ) + '/init/cnlos_bunny_threshold_64_0_%d.obj' % test_num igl.readOBJ(mesh_init_location, mesh.v, mesh.f) mesh.v = np.array(mesh.v, dtype=np.float32, order='C') mesh.f = np.array(mesh.f, dtype=np.int32, order='C') opt.resolution = 64 opt.smooth_ratio = ratio rendering.isotropic_remeshing(mesh, .5 / opt.resolution) old_v = np.array(mesh.v) rendering.compute_mesh_affinity(mesh) rendering.border_indicator(mesh) weight = rendering.create_weighting_function(gt_transient, opt.gamma) global_counter = 0 l2 = np.empty(500) alpha = np.empty(500) for t in range(500): if mesh.f.shape[0] > 250000: break global_counter, l2_record = optimize_parameters.optimize_alpha( mesh, gt_transient, weight, opt, 50, global_counter) if t == 0: l2_0 = l2_record lr = (l2_record / l2_0) * lr0 * ((0.99)**(t / 2)) print('new lr %f' % lr) old_v = np.array(mesh.v) global_counter, convergence_flag, l2_record = optimize_parameters.optimize_shape( mesh, gt_transient, weight, opt, 15, lr, gt_mesh, global_counter) if convergence_flag: if opt.testing_flag == 1: opt.testing_flag = 0 opt.smooth_ratio = ratio / 10 + t / 100 print('shading') else: opt.testing_flag = 1 opt.smooth_ratio = ratio + t / 10 rendering.el_topo_gradient(mesh, old_v) rendering.el_topo_remeshing(mesh, .5 / opt.resolution) rendering.isotropic_remeshing(mesh, .5 / opt.resolution) rendering.compute_mesh_affinity(mesh) rendering.removeTriangle(mesh, opt) rendering.compute_mesh_affinity(mesh) rendering.border_indicator(mesh) filename = folder_name + '%05d.mat' % (t) scipy.io.savemat(filename, mdict={ 'v': mesh.v, 'f': mesh.f, 'alpha': mesh.alpha }) l2[t] = l2_record alpha[t] = mesh.alpha filename = folder_name + 'progress.mat' scipy.io.savemat(filename, mdict={'alpha': alpha, 'l2': l2})
def optimization(ratio): lr0 = 0.0001 / 3 lr = lr0 folder_name = os.getcwd() + '/progress-b-1-%f/' % ratio if not os.path.isdir(folder_name): os.mkdir(folder_name) resolution = 256 batch = 16 opt = OPT(20000, resolution) a = np.arange(opt.lighting.shape[0]) b = np.split(a, batch) gt_transient = np.empty( (opt.resolution * opt.resolution, opt.max_distance_bin), dtype=np.double, order='C') for i, x in zip(range(len(b)), b): filename = os.getcwd() + '/setup/bunny_transient_%d_%d.mat' % ( resolution, i) setup = scipy.io.loadmat(filename) gt_transient[x, :] = np.array(setup['gt_transient'], dtype=np.double, order='C') gt_mesh = MESH() gt_mesh.v = igl.eigen.MatrixXd(np.array(setup['gt_v'], dtype=np.double)) gt_mesh.f = igl.eigen.MatrixXd(np.array(setup['gt_f'], dtype=np.double)).castint() opt.smooth_weight = 0.001 mesh = MESH() mesh_init_location = os.getcwd( ) + '/init/cnlos_bunny_threshold_%d.obj' % resolution igl.readOBJ(mesh_init_location, mesh.v, mesh.f) mesh.v = np.array(mesh.v, dtype=np.float32, order='C') mesh.f = np.array(mesh.f, dtype=np.int32, order='C') opt.smooth_ratio = ratio rendering.isotropic_remeshing(mesh, .5 / opt.resolution) old_v = np.array(mesh.v) rendering.compute_mesh_affinity(mesh) rendering.border_indicator(mesh) optimization_v = torch.from_numpy(mesh.v[mesh.v_edge == 0, :]) optimization_v.requires_grad_() optimization_v_edge = torch.from_numpy(mesh.v[mesh.v_edge == 1, :]) optimization_v_edge.requires_grad_() weight = rendering.create_weighting_function(gt_transient, opt.gamma) optimizer = Adam_Modified([{ 'params': optimization_v }, { 'params': optimization_v_edge, 'lr': lr * opt.edge_lr_ratio }], lr=lr) dummy_loss = torch.sum(optimization_v) + torch.sum(optimization_v_edge) dummy_loss.backward() scale_flag = False remesh_flag = False weight_flag = True run_count = 0 l2_record = np.empty(opt.T) v2_record = np.empty(opt.T) l2_original_record = np.empty(opt.T) for t in range(opt.T): if remesh_flag: print('remesh') if mesh.f.shape[0] > 250000: l2_record = l2_record[range(t)] l2_original_record = l2_original_record[range(t)] v2_record = v2_record[range(t)] break eltopo_tic = time.time() rendering.el_topo_gradient(mesh, old_v) mesh.v = np.array(mesh.v, dtype=np.double, order='C') rendering.el_topo_remeshing(mesh, .5 / opt.resolution) rendering.isotropic_remeshing(mesh, .5 / opt.resolution) rendering.compute_mesh_affinity(mesh) rendering.removeTriangle(mesh, opt) #rendering.keep_largest_connected_component(mesh) old_v = np.array(mesh.v) rendering.compute_mesh_affinity(mesh) rendering.border_indicator(mesh) optimization_v = torch.from_numpy(mesh.v[mesh.v_edge == 0, :]) optimization_v.requires_grad_() optimization_v_edge = torch.from_numpy(mesh.v[mesh.v_edge == 1, :]) optimization_v_edge.requires_grad_() weight = rendering.create_weighting_function( gt_transient, opt.gamma) optimizer = Adam_Modified([{ 'params': optimization_v }, { 'params': optimization_v_edge, 'lr': lr * opt.edge_lr_ratio }], lr=lr) dummy_loss = torch.sum(optimization_v) + torch.sum( optimization_v_edge) dummy_loss.backward() remesh_flag = False run_count = 0 print('t = %d' % t) tic = time.time() transient = np.zeros(gt_transient.shape, dtype=np.double, order='C') grad = np.zeros(mesh.v.shape, dtype=np.double, order='C') tmp_opt = copy.deepcopy(opt) for i, x in zip(range(len(b)), b): tmp_opt.lighting = opt.lighting[x, :] tmp_opt.lighting_normal = opt.lighting_normal[x, :] transient[x, :], grad_tmp, length = rendering.inverseRendering( mesh, gt_transient[x, :], weight[x, :], tmp_opt) grad += grad_tmp grad /= len(b) smoothing_val, smoothing_grad = rendering.renderStreamedNormalSmoothing( mesh) l2, original_l2 = rendering.evaluate_loss_with_normal_smoothness( gt_transient, weight, transient, smoothing_val, mesh, opt) if weight_flag: opt.smooth_weight = original_l2 / smoothing_val / opt.smooth_ratio weight_flag = False print('new smooth weight %f' % opt.smooth_weight) if t > 0: lr = (original_l2 / l2_original_record[0]) * lr0 * ( (0.99)**(t / 15)) print('new lr %f' % lr) grad += opt.smooth_weight * smoothing_grad v2 = rendering.compute_v2(mesh.v, gt_mesh) print( '%05d update time: %8.8f L2 loss: %8.8f old_l2 loss: %8.8f v2: %8.8f' % (t, time.time() - tic, l2, original_l2, v2)) l2_record[t] = l2 l2_original_record[t] = original_l2 v2_record[t] = v2 filename = folder_name + '%05d.mat' % (t) scipy.io.savemat(filename, mdict={ 'v': mesh.v, 'f': mesh.f, 'transient': transient, 'l2': l2, 'l2_original': original_l2, 'grad': grad, 'smoothing_grad': smoothing_grad, 'sample_num': opt.sample_num }) run_count += 1 if run_count > 2: if ((l2_original_record[t - 1] - original_l2) / l2_original_record[t - 1]) < opt.loss_epsilon or ( (l2_record[t - 1] - l2) / l2_record[t - 1]) < opt.loss_epsilon: if opt.testing_flag == 1: opt.testing_flag = 0 opt.smooth_ratio = 10 + t / 100 print('shading based') else: opt.testing_flag = 1 opt.resolution *= 1.5 opt.sample_num *= 1.5 opt.loss_epsilon /= 2 opt.smooth_ratio = ratio + t / 10 print('remesh %d' % opt.resolution) remesh_flag = True weight_flag = True #opt.gamma #weight = rendering.create_weighting_function(gt_transient, opt.gamma) #opt.smooth_ratio *= 1.5 continue optimization_v.grad.data = torch.from_numpy( grad[mesh.v_edge == 0, :]).float() optimization_v_edge.grad.data = torch.from_numpy( grad[mesh.v_edge == 1, :]).float() optimizer.step() mesh.v[mesh.v_edge == 0, :] = np.array(optimization_v.data.numpy(), dtype=np.float32, order='C') mesh.v[mesh.v_edge == 1, :] = np.array( optimization_v_edge.data.numpy(), dtype=np.float32, order='C') if run_count == 15: remesh_flag = True filename = folder_name + 'loss_val.mat' scipy.io.savemat(filename, mdict={ 'l2': l2_record, 'l2_original_record': l2_original_record, 'v2_record': v2_record, 'weight': weight })
exp_folder = os.getcwd() + '/weight_test2/' if not os.path.isdir(exp_folder): os.mkdir(exp_folder) #gamma_list = [-2,-0.5,0, 1, 2.5, 5, 7.5, 10] gamma_list = [-2, -0.5, 0, 5, 10] for gamma in gamma_list: print('%f'%(gamma)) folder_name = exp_folder + 'progress_%f/'%(gamma) if not os.path.isdir(folder_name): os.mkdir(folder_name) #else: # continue weight = rendering.create_weighting_function(gt_transient, gamma, 'i') mesh = MESH() z = .38 v = np.array([[-.25, -.25, z], [.25, -.25, z], [.25, .25, z], [-.25, .25, z], [0, -.25, z], [.25, 0, z], [0, .25, z], [-.25,0,z], [0,0,z]]) f = np.array([[0, 8,4], [0,7,8], [4,8,1],[1,8,5],[8,2,5],[8,6,2],[7,3,8],[3,6,8]]) mesh.v = np.array(v, dtype=np.float32, order = 'C') mesh.f = np.array(f, dtype=np.int32, order = 'C') #mesh_init_location = os.getcwd() + '/cnlos_bunny_threshold.obj' #igl.readOBJ(mesh_init_location, mesh.v, mesh.f) #mesh.v = np.array(mesh.v, dtype=np.float32, order = 'C') #mesh.f = np.array(mesh.f, dtype=np.int32, order = 'C') mesh_optimization = MESH() mesh_optimization.v = torch.from_numpy(mesh.v) mesh_optimization.v.requires_grad_()
def optimization(ratio): lr0 = 0.0001 lr = lr0 folder_name = os.getcwd() + '/progress-b-1-%f/'% ratio if not os.path.isdir(folder_name): os.mkdir(folder_name) filename = os.getcwd() + '/transient.mat' setup = scipy.io.loadmat(filename) gt_transient = np.array(setup['rect_data'], dtype=np.double, order = 'C') gt_transient[:,:,range(600}] = 0 gt_transient = np.reshape((gt_transient, opt.resolution*opt.resolution, 2048)) opt = OPT(20000) mesh = MESH() mesh_init_location = os.getcwd() + '/cnlos_s_threshold.obj' igl.readOBJ(mesh_init_location, mesh.v, mesh.f) mesh.v = np.array(mesh.v, dtype=np.float32, order = 'C') mesh.f = np.array(mesh.f, dtype=np.int32, order = 'C') rendering.isotropic_remeshing(mesh, .5/opt.resolution) old_v = np.array(mesh.v) rendering.compute_mesh_affinity(mesh) rendering.border_indicator(mesh) optimization_v = torch.from_numpy(mesh.v[mesh.v_edge==0,:]) optimization_v.requires_grad_() optimization_v_edge = torch.from_numpy(mesh.v[mesh.v_edge==1,:]) optimization_v_edge.requires_grad_() weight = rendering.create_weighting_function(gt_transient, opt.gamma) optimizer = Adam_Modified([{'params':optimization_v}, {'params': optimization_v_edge, 'lr': lr*opt.edge_lr_ratio}], lr = lr) dummy_loss = torch.sum(optimization_v) + torch.sum(optimization_v_edge) dummy_loss.backward() scale_flag = False remesh_flag = False weight_flag = True run_count = 0 l2_record = np.empty(opt.T) v2_record = np.empty(opt.T) l2_original_record = np.empty(opt.T) for t in range(opt.T): if remesh_flag: print('remesh') if mesh.f.shape[0] > 250000: l2_record = l2_record[range(t)] l2_original_record = l2_original_record[range(t)] v2_record = v2_record[range(t)] break eltopo_tic = time.time() rendering.el_topo_gradient(mesh, old_v) mesh.v = np.array(mesh.v, dtype=np.double, order='C') rendering.el_topo_remeshing(mesh, .5/opt.resolution) rendering.isotropic_remeshing(mesh, .5/opt.resolution) rendering.removeTriangle(mesh,opt) #rendering.keep_largest_connected_component(mesh) old_v = np.array(mesh.v) rendering.compute_mesh_affinity(mesh) rendering.border_indicator(mesh) optimization_v = torch.from_numpy(mesh.v[mesh.v_edge==0,:]) optimization_v.requires_grad_() optimization_v_edge = torch.from_numpy(mesh.v[mesh.v_edge==1,:]) optimization_v_edge.requires_grad_() weight = rendering.create_weighting_function(gt_transient, opt.gamma) optimizer = Adam_Modified([{'params':optimization_v}, {'params': optimization_v_edge, 'lr': lr*opt.edge_lr_ratio}], lr = lr) dummy_loss = torch.sum(optimization_v) + torch.sum(optimization_v_edge) dummy_loss.backward() remesh_flag = False run_count = 0 print('t = %d'% t) tic = time.time() transient, grad, length = rendering.inverseRendering(mesh, gt_transient, weight, opt) smoothing_val, smoothing_grad = rendering.renderStreamedNormalSmoothing(mesh) l2, original_l2 = rendering.evaluate_loss_with_normal_smoothness(gt_transient, weight, transient, smoothing_val, mesh, opt) if weight_flag: opt.smooth_weight = original_l2/smoothing_val/opt.smooth_ratio weight_flag = False print('new smooth weight %f' %opt.smooth_weight) if t > 0: lr = (original_l2/l2_original_record[0]) * lr0 * ((0.99)**(t/15)) print('new lr %f' % lr) grad += opt.smooth_weight * smoothing_grad print('%05d update time: %8.8f L2 loss: %8.8f old_l2 loss: %8.8f'% (t, time.time() - tic, l2, original_l2)) l2_record[t] = l2 l2_original_record[t] = original_l2 v2_record[t] = v2 filename = folder_name + '%05d.mat'%(t) scipy.io.savemat(filename, mdict={'v':mesh.v, 'f':mesh.f, 'transient':transient, 'l2':l2, 'l2_original':original_l2, 'grad': grad, 'smoothing_grad':smoothing_grad, 'sample_num': opt.sample_num}) run_count += 1 if run_count > 2: if ((l2_original_record[t-1] - original_l2)/l2_original_record[t-1])< opt.loss_epsilon or ((l2_record[t-1] - l2)/l2_record[t-1])< opt.loss_epsilon: if opt.testing_flag == 1: opt.testing_flag = 0 opt.smooth_ratio = 10 + t/100 print('shading based') else: opt.testing_flag = 1 opt.resolution *= 1.5 opt.sample_num *= 1.5 opt.loss_epsilon /= 2 opt.smooth_ratio = ratio + t/10 print('remesh %d'%opt.resolution) remesh_flag = True weight_flag = True continue optimization_v.grad.data = torch.from_numpy(grad[mesh.v_edge==0,:]).float() optimization_v_edge.grad.data = torch.from_numpy(grad[mesh.v_edge==1,:]).float() optimizer.step() mesh.v[mesh.v_edge==0,:] = np.array(optimization_v.data.numpy(), dtype=np.float32, order='C') mesh.v[mesh.v_edge==1,:] = np.array(optimization_v_edge.data.numpy(), dtype=np.float32, order='C') if run_count == 15: remesh_flag = True filename = folder_name + 'loss_val.mat' scipy.io.savemat(filename, mdict={'l2':l2_record, 'l2_original_record': l2_original_record, 'weight':weight})