示例#1
0
    def test_gradient_backpropagation(self):
        print("*** TESTING BACKPROP FEATURES ***")
        for frequencies in [[fcen], [1 / 1.58, fcen, 1 / 1.53]]:
            ## filter/thresholding parameters
            filter_radius = 0.21985
            eta = 0.49093
            beta = 4.0698

            mapped_p = mapping(p, filter_radius, eta, beta)

            ## compute gradient using adjoint solver
            adjsol_obj, adjsol_grad = adjoint_solver(mapped_p,
                                                     MonitorObject.EIGENMODE,
                                                     frequencies)

            ## backpropagate the gradient
            if len(frequencies) > 1:
                bp_adjsol_grad = np.zeros(adjsol_grad.shape)
                for i in range(len(frequencies)):
                    bp_adjsol_grad[:, i] = tensor_jacobian_product(mapping, 0)(
                        p, filter_radius, eta, beta, adjsol_grad[:, i])
            else:
                bp_adjsol_grad = tensor_jacobian_product(mapping,
                                                         0)(p, filter_radius,
                                                            eta, beta,
                                                            adjsol_grad)

            ## compute unperturbed S12
            S12_unperturbed = forward_simulation(mapped_p,
                                                 MonitorObject.EIGENMODE,
                                                 frequencies)

            ## compare objective results
            print(
                "S12 -- adjoint solver: {}, traditional simulation: {}".format(
                    adjsol_obj, S12_unperturbed))
            np.testing.assert_array_almost_equal(adjsol_obj,
                                                 S12_unperturbed,
                                                 decimal=3)

            ## compute perturbed S12
            S12_perturbed = forward_simulation(
                mapping(p + dp, filter_radius, eta, beta),
                MonitorObject.EIGENMODE, frequencies)

            if bp_adjsol_grad.ndim < 2:
                bp_adjsol_grad = np.expand_dims(bp_adjsol_grad, axis=1)
            adj_scale = (dp[None, :] @ bp_adjsol_grad).flatten()
            fd_grad = S12_perturbed - S12_unperturbed
            print(
                "Directional derivative -- adjoint solver: {}, FD: {}".format(
                    adj_scale, fd_grad))
            np.testing.assert_array_almost_equal(adj_scale, fd_grad, decimal=5)
示例#2
0
    def test_offdiagonal(self):
        print("*** TESTING OFFDIAGONAL COMPONENTS ***")
        filt = lambda x: mpa.conic_filter(x.reshape(
            (Nx, Ny)), 0.25, design_region_size.x, design_region_size.y,
                                          design_region_resolution).flatten()

        ## test the single frequency and multi frequency case
        for frequencies in [[fcen], [1 / 1.58, fcen, 1 / 1.53]]:
            ## compute gradient using adjoint solver
            adjsol_obj, adjsol_grad = adjoint_solver(filt(p),
                                                     MonitorObject.EIGENMODE,
                                                     frequencies, sapphire)

            ## backpropagate the gradient
            if len(frequencies) > 1:
                bp_adjsol_grad = np.zeros(adjsol_grad.shape)
                for i in range(len(frequencies)):
                    bp_adjsol_grad[:, i] = tensor_jacobian_product(filt, 0)(
                        p, adjsol_grad[:, i])
            else:
                bp_adjsol_grad = tensor_jacobian_product(filt, 0)(p,
                                                                  adjsol_grad)

            ## compute unperturbed S12
            S12_unperturbed = forward_simulation(filt(p),
                                                 MonitorObject.EIGENMODE,
                                                 frequencies, sapphire)

            ## compare objective results
            print(
                "S12 -- adjoint solver: {}, traditional simulation: {}".format(
                    adjsol_obj, S12_unperturbed))
            self.assertClose(adjsol_obj, S12_unperturbed, epsilon=1e-6)

            ## compute perturbed S12
            S12_perturbed = forward_simulation(filt(p + dp),
                                               MonitorObject.EIGENMODE,
                                               frequencies, sapphire)

            ## compare gradients
            if bp_adjsol_grad.ndim < 2:
                bp_adjsol_grad = np.expand_dims(bp_adjsol_grad, axis=1)
            adj_scale = (dp[None, :] @ bp_adjsol_grad).flatten()
            fd_grad = S12_perturbed - S12_unperturbed
            print(
                "Directional derivative -- adjoint solver: {}, FD: {}".format(
                    adj_scale, fd_grad))
            tol = 0.1 if mp.is_single_precision() else 0.04
            self.assertClose(adj_scale, fd_grad, epsilon=tol)
示例#3
0
    def test_gradient_backpropagation(self):
        ## filter/thresholding parameters
        filter_radius = 0.21985
        eta = 0.49093
        beta = 4.0698

        mapped_p = mapping(p,filter_radius,eta,beta)

        ## compute gradient using adjoint solver
        adjsol_obj, adjsol_grad = adjoint_solver(mapped_p, MonitorObject.EIGENMODE)

        ## backpropagate the gradient
        bp_adjsol_grad = tensor_jacobian_product(mapping,0)(p,filter_radius,eta,beta,adjsol_grad)

        ## compute unperturbed S12
        S12_unperturbed = forward_simulation(mapped_p, MonitorObject.EIGENMODE)

        print("S12:, {:.6f}, {:.6f}".format(adjsol_obj,S12_unperturbed))
        self.assertAlmostEqual(adjsol_obj,S12_unperturbed,places=3)

        ## compute perturbed S12
        S12_perturbed = forward_simulation(mapping(p+dp,filter_radius,eta,beta), MonitorObject.EIGENMODE)

        print("directional_derivative:, {:.6f}, {:.6f}".format(np.dot(dp,bp_adjsol_grad),S12_perturbed-S12_unperturbed))
        self.assertAlmostEqual(np.dot(dp,bp_adjsol_grad),S12_perturbed-S12_unperturbed,places=5)
示例#4
0
def test_tensor_jacobian_product():
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5, 4, 3)
    V = npr.randn(5, 4)
    J = jacobian(fun)(a)
    check_equivalent(np.tensordot(V, J, axes=np.ndim(V)),
                     tensor_jacobian_product(fun)(a, V))
示例#5
0
def test_tensor_jacobian_product():
    # This function will have an asymmetric jacobian matrix.
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5)
    V = npr.randn(5)
    J = jacobian(fun)(a)
    check_equivalent(np.dot(V.T, J), tensor_jacobian_product(fun)(a, V))
示例#6
0
def test_tensor_jacobian_product():
    # This function will have an asymmetric jacobian matrix.
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5)
    V = npr.randn(5)
    J = jacobian(fun)(a)
    check_equivalent(np.dot(V.T, J), tensor_jacobian_product(fun)(a, V))
示例#7
0
    def test_gradient_backpropagation(self):
        p = np.random.rand(Nx * Ny)

        ## filter/thresholding parameters
        filter_radius = 0.21985
        eta = 0.49093
        beta = 4.0698

        mapped_p = mapping(p, filter_radius, eta, beta)

        ## compute gradient using adjoint solver
        adjsol_obj, adjsol_grad = adjoint_solver(mapped_p)

        ## backpropagate the gradient
        bp_adjsol_grad = tensor_jacobian_product(mapping,
                                                 0)(p, filter_radius, eta,
                                                    beta, adjsol_grad)

        ## compute unperturbed S12
        S12_unperturbed = forward_simulation(mapped_p)

        print("S12:, {:.6f}, {:.6f}".format(adjsol_obj, S12_unperturbed))
        self.assertAlmostEqual(adjsol_obj, S12_unperturbed, places=3)

        ## random epsilon perturbation for computing gradient via finite difference
        deps = 1e-5
        dp = deps * np.random.rand(Nx * Ny)

        ## compute perturbed S12
        S12_perturbed = forward_simulation(
            mapping(p + dp, filter_radius, eta, beta))

        print("directional_derivative:, {:.10f}, {:.10f}".format(
            np.dot(dp, bp_adjsol_grad), S12_perturbed - S12_unperturbed))
        self.assertAlmostEqual(np.dot(dp, bp_adjsol_grad),
                               S12_perturbed - S12_unperturbed,
                               places=6)
示例#8
0
def test_matrix_jacobian_product():
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5, 4)
    V = npr.randn(5, 4)
    J = jacobian(fun)(a)
    check_equivalent(np.tensordot(V, J), tensor_jacobian_product(fun)(a, V))
示例#9
0
def test_tensor_jacobian_product():
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5, 4, 3)
    V = npr.randn(5, 4)
    J = jacobian(fun)(a)
    check_equivalent(np.tensordot(V, J, axes=np.ndim(V)), tensor_jacobian_product(fun)(a, V))
示例#10
0
def test_matrix_jacobian_product():
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5, 4)
    V = npr.randn(5, 4)
    J = jacobian(fun)(a)
    check_equivalent(np.tensordot(V, J), tensor_jacobian_product(fun)(a, V))