dummy_loss = torch.sum(mesh_optimization.v)
	dummy_loss.backward()

	l2_record = np.zeros(T)	
	v2_record = np.zeros(T)	
	l2_original_record = np.zeros(T)	
	run_count = 0

	for t in range(T):

	    print('t = %d'% t)    
	    tic = time.time()
	    transient, grad, pathlength = rendering.inverseRendering(mesh, gt_transient, weight, opt)	    
	    #grad += smooth_opt.weight*rendering.renderStreamedCurvatureGradient(mesh)
	    l2,  original_l2, intensity_loss = rendering.evaluate_loss_with_curvature(gt_transient, weight, transient, mesh, opt,  smooth_opt)  
	    v2 = rendering.compute_v2(mesh.v, gt_mesh)
	    print('%05d update time: %8.8f L2 loss: %8.8f  old_l2 loss: %8.8f v2: %8.8f'% (t, time.time() - tic, l2, original_l2, v2))
	    l2_record[t] = l2	
	    l2_original_record[t] = original_l2
	    v2_record[t] = v2
	    filename = folder_name + '%05d.mat'%(t)
	    scipy.io.savemat(filename, mdict={'v':mesh.v, 'f':mesh.f, 'transient':transient, 'l2':l2, 'l2_original':original_l2, 'grad': grad, 'w_width': opt.w_width, 'sample_num': opt.sample_num})

	    mesh_optimization.v.grad.data = torch.from_numpy(grad).float()
	    optimizer.step()
	    mesh.v = np.array(mesh_optimization.v.data.numpy(), dtype=np.float32, order='C')
	  
	    mesh_optimization.v.data = torch.from_numpy(mesh.v)
	    run_count += 1
	    if run_count > 2:
	        if ((l2_record[t-1] - l2)/l2_record[t-1])< loss_epsilon:
Пример #2
0
def optimization(ratio):
    lr0 = 0.0001 / 3
    lr = lr0
    folder_name = os.getcwd() + '/progress-b-1-%f/' % ratio
    if not os.path.isdir(folder_name):
        os.mkdir(folder_name)

    resolution = 256
    batch = 16

    opt = OPT(20000, resolution)
    a = np.arange(opt.lighting.shape[0])
    b = np.split(a, batch)

    gt_transient = np.empty(
        (opt.resolution * opt.resolution, opt.max_distance_bin),
        dtype=np.double,
        order='C')
    for i, x in zip(range(len(b)), b):
        filename = os.getcwd() + '/setup/bunny_transient_%d_%d.mat' % (
            resolution, i)
        setup = scipy.io.loadmat(filename)

        gt_transient[x, :] = np.array(setup['gt_transient'],
                                      dtype=np.double,
                                      order='C')

    gt_mesh = MESH()
    gt_mesh.v = igl.eigen.MatrixXd(np.array(setup['gt_v'], dtype=np.double))
    gt_mesh.f = igl.eigen.MatrixXd(np.array(setup['gt_f'],
                                            dtype=np.double)).castint()

    opt.smooth_weight = 0.001
    mesh = MESH()

    mesh_init_location = os.getcwd(
    ) + '/init/cnlos_bunny_threshold_%d.obj' % resolution
    igl.readOBJ(mesh_init_location, mesh.v, mesh.f)
    mesh.v = np.array(mesh.v, dtype=np.float32, order='C')
    mesh.f = np.array(mesh.f, dtype=np.int32, order='C')

    opt.smooth_ratio = ratio

    rendering.isotropic_remeshing(mesh, .5 / opt.resolution)
    old_v = np.array(mesh.v)
    rendering.compute_mesh_affinity(mesh)
    rendering.border_indicator(mesh)

    optimization_v = torch.from_numpy(mesh.v[mesh.v_edge == 0, :])
    optimization_v.requires_grad_()
    optimization_v_edge = torch.from_numpy(mesh.v[mesh.v_edge == 1, :])
    optimization_v_edge.requires_grad_()

    weight = rendering.create_weighting_function(gt_transient, opt.gamma)

    optimizer = Adam_Modified([{
        'params': optimization_v
    }, {
        'params': optimization_v_edge,
        'lr': lr * opt.edge_lr_ratio
    }],
                              lr=lr)
    dummy_loss = torch.sum(optimization_v) + torch.sum(optimization_v_edge)
    dummy_loss.backward()
    scale_flag = False
    remesh_flag = False
    weight_flag = True
    run_count = 0

    l2_record = np.empty(opt.T)
    v2_record = np.empty(opt.T)
    l2_original_record = np.empty(opt.T)

    for t in range(opt.T):
        if remesh_flag:
            print('remesh')
            if mesh.f.shape[0] > 250000:
                l2_record = l2_record[range(t)]
                l2_original_record = l2_original_record[range(t)]
                v2_record = v2_record[range(t)]
                break

            eltopo_tic = time.time()
            rendering.el_topo_gradient(mesh, old_v)
            mesh.v = np.array(mesh.v, dtype=np.double, order='C')
            rendering.el_topo_remeshing(mesh, .5 / opt.resolution)
            rendering.isotropic_remeshing(mesh, .5 / opt.resolution)

            rendering.compute_mesh_affinity(mesh)
            rendering.removeTriangle(mesh, opt)
            #rendering.keep_largest_connected_component(mesh)

            old_v = np.array(mesh.v)

            rendering.compute_mesh_affinity(mesh)
            rendering.border_indicator(mesh)
            optimization_v = torch.from_numpy(mesh.v[mesh.v_edge == 0, :])
            optimization_v.requires_grad_()
            optimization_v_edge = torch.from_numpy(mesh.v[mesh.v_edge == 1, :])
            optimization_v_edge.requires_grad_()

            weight = rendering.create_weighting_function(
                gt_transient, opt.gamma)

            optimizer = Adam_Modified([{
                'params': optimization_v
            }, {
                'params': optimization_v_edge,
                'lr': lr * opt.edge_lr_ratio
            }],
                                      lr=lr)
            dummy_loss = torch.sum(optimization_v) + torch.sum(
                optimization_v_edge)
            dummy_loss.backward()

            remesh_flag = False
            run_count = 0

        print('t = %d' % t)
        tic = time.time()

        transient = np.zeros(gt_transient.shape, dtype=np.double, order='C')
        grad = np.zeros(mesh.v.shape, dtype=np.double, order='C')
        tmp_opt = copy.deepcopy(opt)

        for i, x in zip(range(len(b)), b):
            tmp_opt.lighting = opt.lighting[x, :]
            tmp_opt.lighting_normal = opt.lighting_normal[x, :]

            transient[x, :], grad_tmp, length = rendering.inverseRendering(
                mesh, gt_transient[x, :], weight[x, :], tmp_opt)
            grad += grad_tmp
        grad /= len(b)

        smoothing_val, smoothing_grad = rendering.renderStreamedNormalSmoothing(
            mesh)
        l2, original_l2 = rendering.evaluate_loss_with_normal_smoothness(
            gt_transient, weight, transient, smoothing_val, mesh, opt)

        if weight_flag:
            opt.smooth_weight = original_l2 / smoothing_val / opt.smooth_ratio
            weight_flag = False
            print('new smooth weight %f' % opt.smooth_weight)
            if t > 0:
                lr = (original_l2 / l2_original_record[0]) * lr0 * (
                    (0.99)**(t / 15))
                print('new lr %f' % lr)

        grad += opt.smooth_weight * smoothing_grad
        v2 = rendering.compute_v2(mesh.v, gt_mesh)
        print(
            '%05d update time: %8.8f L2 loss: %8.8f  old_l2 loss: %8.8f v2: %8.8f'
            % (t, time.time() - tic, l2, original_l2, v2))
        l2_record[t] = l2
        l2_original_record[t] = original_l2
        v2_record[t] = v2
        filename = folder_name + '%05d.mat' % (t)
        scipy.io.savemat(filename,
                         mdict={
                             'v': mesh.v,
                             'f': mesh.f,
                             'transient': transient,
                             'l2': l2,
                             'l2_original': original_l2,
                             'grad': grad,
                             'smoothing_grad': smoothing_grad,
                             'sample_num': opt.sample_num
                         })

        run_count += 1

        if run_count > 2:
            if ((l2_original_record[t - 1] - original_l2) /
                    l2_original_record[t - 1]) < opt.loss_epsilon or (
                        (l2_record[t - 1] - l2) /
                        l2_record[t - 1]) < opt.loss_epsilon:
                if opt.testing_flag == 1:
                    opt.testing_flag = 0
                    opt.smooth_ratio = 10 + t / 100
                    print('shading based')
                else:
                    opt.testing_flag = 1
                    opt.resolution *= 1.5
                    opt.sample_num *= 1.5
                    opt.loss_epsilon /= 2
                    opt.smooth_ratio = ratio + t / 10
                    print('remesh %d' % opt.resolution)

                remesh_flag = True
                weight_flag = True
                #opt.gamma
                #weight = rendering.create_weighting_function(gt_transient, opt.gamma)
                #opt.smooth_ratio *= 1.5
                continue

        optimization_v.grad.data = torch.from_numpy(
            grad[mesh.v_edge == 0, :]).float()
        optimization_v_edge.grad.data = torch.from_numpy(
            grad[mesh.v_edge == 1, :]).float()
        optimizer.step()
        mesh.v[mesh.v_edge == 0, :] = np.array(optimization_v.data.numpy(),
                                               dtype=np.float32,
                                               order='C')
        mesh.v[mesh.v_edge == 1, :] = np.array(
            optimization_v_edge.data.numpy(), dtype=np.float32, order='C')

        if run_count == 15:
            remesh_flag = True

    filename = folder_name + 'loss_val.mat'
    scipy.io.savemat(filename,
                     mdict={
                         'l2': l2_record,
                         'l2_original_record': l2_original_record,
                         'v2_record': v2_record,
                         'weight': weight
                     })
def optimize_shape(mesh, gt_transient, weight, opt, T, lr, gt_mesh,
                   global_counter, folder):
    optimization_v = torch.from_numpy(mesh.v[mesh.v_edge == 0, :])
    optimization_v.requires_grad_()
    optimization_v_edge = torch.from_numpy(mesh.v[mesh.v_edge == 1, :])
    optimization_v_edge.requires_grad_()

    optimizer = Adam_Modified([{
        'params': optimization_v
    }, {
        'params': optimization_v_edge,
        'lr': lr * opt.edge_lr_ratio
    }],
                              lr=lr)
    dummy_loss = torch.sum(optimization_v) + torch.sum(optimization_v_edge)
    dummy_loss.backward()

    l2_record = np.empty(opt.T)
    v2_record = np.empty(opt.T)
    l2_original_record = np.empty(opt.T)

    for t in range(T):
        tic = time.time()
        transient, grad, length = rendering.inverseRendering(
            mesh, gt_transient, weight, opt)
        smoothing_val, smoothing_grad = rendering.renderStreamedNormalSmoothing(
            mesh)
        l2, original_l2 = rendering.evaluate_loss_with_normal_smoothness(
            gt_transient, weight, transient, smoothing_val, mesh, opt)
        if t == 0:
            opt.smooth_weight = original_l2 / smoothing_val / opt.smooth_ratio
            print('smoothness weight %f' % opt.smooth_weight)

        grad += opt.smooth_weight * smoothing_grad
        if opt.gt_mesh:
            v2 = rendering.compute_v2(mesh.v, gt_mesh)
        else:
            v2 = 0
        #filename = folder + 'test_%05d.mat'%(global_counter)
        #scipy.io.savemat(filename, mdict={'v':mesh.v, 'f':mesh.f})
        print(
            '%05d update time: %8.8f L2 loss: %8.8f  old_l2 loss: %8.8f v2: %8.8f'
            % (global_counter, time.time() - tic, l2, original_l2, v2))
        global_counter += 1

        l2_record[t] = l2
        l2_original_record[t] = original_l2
        v2_record[t] = v2

        if t > 2:
            if (l2_original_record[t - 1] - original_l2
                ) / l2_original_record[t - 1] < opt.loss_epsilon:
                return global_counter, True, original_l2
            if (l2_record[t - 1] - l2) / l2_record[t - 1] < opt.loss_epsilon:
                return global_counter, True, original_l2

        optimization_v.grad.data = torch.from_numpy(
            grad[mesh.v_edge == 0, :]).float()
        optimization_v_edge.grad.data = torch.from_numpy(
            grad[mesh.v_edge == 1, :]).float()
        optimizer.step()
        mesh.v[mesh.v_edge == 0, :] = np.array(optimization_v.data.numpy(),
                                               dtype=np.float32,
                                               order='C')
        mesh.v[mesh.v_edge == 1, :] = np.array(
            optimization_v_edge.data.numpy(), dtype=np.float32, order='C')

    return global_counter, False, original_l2