예제 #1
0
    def test_parallel_optimization_with_grad(self):
        """
        Test a full least-squares optimization.
        """
        for ngroups in range(1, 4):
            for abs_step in [0, 1.0e-7]:
                # Only try rel_step=0 if abs_step is positive:
                rel_steps = [0, 1.0e-7]
                if abs_step == 0:
                    rel_steps = [1.0e-7]

                for rel_step in rel_steps:
                    for diff_method in ["forward", "centered"]:
                        logger.debug(f'ngroups={ngroups} abs_step={abs_step} ' \
                                     f'rel_step={rel_step} diff_method={diff_method}')
                        mpi = MpiPartition(ngroups=ngroups)
                        o = TestFunction3(mpi.comm_groups)
                        term1 = (o.f0, 0, 1)
                        term2 = (o.f1, 0, 1)
                        prob = LeastSquaresProblem([term1, term2], diff_method=diff_method,
                                                   abs_step=abs_step, rel_step=rel_step)
                        # Set initial condition different from 0,
                        # because otherwise abs_step=0 causes step
                        # size to be 0.
                        prob.x = [-0.1, 0.2]
                        least_squares_mpi_solve(prob, mpi)
                        self.assertAlmostEqual(prob.x[0], 1)
                        self.assertAlmostEqual(prob.x[1], 1)
예제 #2
0
 def test_solve_with_finite_differences(self):
     """
     Minimize a function for which analytic derivatives are not
     provided. Provides test coverage for the finite-differencing
     options.
     """
     #for solver in [least_squares_serial_solve]:
     for solver in solvers:
         if solver == serial_solve:
             continue
         for abs_step in [0, 1.0e-7]:
             rel_steps = [0, 1.0e-7]
             if abs_step == 0:
                 rel_steps = [1.0e-7]
             for rel_step in rel_steps:
                 for diff_method in ["forward", "centered"]:
                     logger.debug(f'solver={solver} diff_method={diff_method} ' \
                                  f'abs_step={abs_step} rel_step={rel_step}')
                     b = Beale()
                     b.set_dofs([0.1, -0.2])
                     prob = LeastSquaresProblem([(b, 0, 1)],
                                                diff_method=diff_method,
                                                abs_step=abs_step,
                                                rel_step=rel_step)
                     #least_squares_serial_solve(prob, grad=True)
                     solver(prob, grad=True)
                     np.testing.assert_allclose(prob.x, [3, 0.5])
                     np.testing.assert_allclose(prob.f(), [0, 0, 0],
                                                atol=1e-10)
예제 #3
0
 def test_solve_quadratic_fixed_supplying_properties(self):
     """
     Same as test_solve_quadratic_fixed, except supplying @properties
     rather than functions as targets.
     """
     for solver in solvers:
         iden1 = Identity()
         iden2 = Identity()
         iden3 = Identity()
         iden1.x = 4
         iden2.x = 5
         iden3.x = 6
         iden1.names = ['x1']
         iden2.names = ['x2']
         iden3.names = ['x3']
         iden1.fixed = [True]
         iden3.fixed = [True]
         # Try a mix of explicit LeastSquaresTerms and lists
         term1 = [iden1, 'f', 1, 1]
         term2 = [iden2, 'f', 2, 1 / 4.]
         term3 = LeastSquaresTerm.from_sigma(Target(iden3, 'f'), 3, sigma=3)
         prob = LeastSquaresProblem([term1, term2, term3])
         solver(prob)
         self.assertAlmostEqual(prob.objective(), 10)
         self.assertAlmostEqual(iden1.x, 4)
         self.assertAlmostEqual(iden2.x, 2)
         self.assertAlmostEqual(iden3.x, 6)
예제 #4
0
 def test_solve_quadratic_fixed(self):
     """
     Same as test_solve_quadratic, except with different weights and x
     and z are fixed, so only y is optimized.
     """
     for solver in solvers:
         iden1 = Identity()
         iden2 = Identity()
         iden3 = Identity()
         iden1.x = 4
         iden2.x = 5
         iden3.x = 6
         iden1.names = ['x1']
         iden2.names = ['x2']
         iden3.names = ['x3']
         iden1.fixed = [True]
         iden3.fixed = [True]
         term1 = (iden1.J, 1, 1)
         term2 = (iden2.J, 2, 1 / 4.)
         term3 = (iden3.J, 3, 1 / 9.)
         prob = LeastSquaresProblem([term1, term2, term3])
         solver(prob)
         self.assertAlmostEqual(prob.objective(), 10)
         self.assertAlmostEqual(iden1.x, 4)
         self.assertAlmostEqual(iden2.x, 2)
         self.assertAlmostEqual(iden3.x, 6)
예제 #5
0
 def test_solve_quadratic_fixed_supplying_objects(self):
     """
     Same as test_solve_quadratic_fixed, except supplying objects
     rather than functions as targets.
     """
     for solver in solvers:
         iden1 = Identity()
         iden2 = Identity()
         iden3 = Identity()
         iden1.x = 4
         iden2.x = 5
         iden3.x = 6
         iden1.names = ['x1']
         iden2.names = ['x2']
         iden3.names = ['x3']
         iden1.fixed = [True]
         iden3.fixed = [True]
         term1 = [iden1, 1, 1]
         term2 = [iden2, 2, 1 / 4.]
         term3 = [iden3, 3, 1 / 9.]
         prob = LeastSquaresProblem([term1, term2, term3])
         solver(prob)
         self.assertAlmostEqual(prob.objective(), 10)
         self.assertAlmostEqual(iden1.x, 4)
         self.assertAlmostEqual(iden2.x, 2)
         self.assertAlmostEqual(iden3.x, 6)
예제 #6
0
    def test_integrated_stellopt_scenarios_1dof_Garabedian(self):
        """
        This script implements the "1DOF_circularCrossSection_varyAxis_targetIota"
        example from
        https://github.com/landreman/stellopt_scenarios

        This example demonstrates optimizing a surface shape using the
        Garabedian representation instead of VMEC's RBC/ZBS representation.
        This optimization problem has one independent variable, the Garabedian
        Delta_{m=1, n=-1} coefficient, representing the helical excursion of
        the magnetic axis. The objective function is (iota - iota_target)^2,
        where iota is measured on the magnetic axis.

        Details of the optimum and a plot of the objective function landscape
        can be found here:
        https://github.com/landreman/stellopt_scenarios/tree/master/1DOF_circularCrossSection_varyAxis_targetIota
        """
        filename = os.path.join(TEST_DIR, '1DOF_Garabedian.sp')

        for mpol_ntor in [2, 4]:
            # Start with a default surface.
            equil = Spec(filename)
            equil.inputlist.mpol = mpol_ntor
            equil.inputlist.ntor = mpol_ntor

            # We will optimize in the space of Garabedian coefficients
            # rather than RBC/ZBS coefficients. To do this, we convert the
            # boundary to the Garabedian representation:
            surf = equil.boundary.to_Garabedian()
            equil.boundary = surf

            # SPEC parameters are all fixed by default, while surface
            # parameters are all non-fixed by default. You can choose
            # which parameters are optimized by setting their 'fixed'
            # attributes.
            surf.all_fixed()
            surf.set_fixed('Delta(1,-1)', False)

            # Use low resolution, for speed:
            equil.inputlist.lrad[0] = 4
            equil.inputlist.nppts = 100

            # Each Target is then equipped with a shift and weight, to become a
            # term in a least-squares objective function
            desired_iota = 0.41  # Sign was + for VMEC
            prob = LeastSquaresProblem([(equil.iota, desired_iota, 1)])

            # Check that the problem was set up correctly:
            self.assertEqual(len(prob.dofs.names), 1)
            self.assertEqual(prob.dofs.names[0][:11], 'Delta(1,-1)')
            np.testing.assert_allclose(prob.x, [0.1])
            self.assertEqual(prob.dofs.all_owners, [equil, surf])
            self.assertEqual(prob.dofs.dof_owners, [surf])

            # Solve the minimization problem:
            least_squares_serial_solve(prob)

            self.assertAlmostEqual(surf.get_Delta(1, -1), 0.08575, places=4)
            self.assertAlmostEqual(equil.iota(), desired_iota, places=5)
            self.assertLess(np.abs(prob.objective()), 1.0e-15)
예제 #7
0
 def test_solve_quadratic_fixed_supplying_attributes(self):
     """
     Same as test_solve_quadratic_fixed, except supplying attributes
     rather than functions as targets.
     """
     for solver in solvers:
         iden1 = Identity()
         iden2 = Identity()
         iden3 = Identity()
         iden1.x = 4
         iden2.x = 5
         iden3.x = 6
         iden1.names = ['x1']
         iden2.names = ['x2']
         iden3.names = ['x3']
         iden1.fixed = [True]
         iden3.fixed = [True]
         # Try a mix of explicit LeastSquaresTerms and tuples
         term1 = LeastSquaresTerm(Target(iden1, 'x'), 1, 1)
         term2 = (iden2, 'x', 2, 1 / 4.)
         term3 = (iden3, 'x', 3, 1 / 9.)
         prob = LeastSquaresProblem([term1, term2, term3])
         solver(prob)
         self.assertAlmostEqual(prob.objective(), 10)
         self.assertAlmostEqual(iden1.x, 4)
         self.assertAlmostEqual(iden2.x, 2)
         self.assertAlmostEqual(iden3.x, 6)
    def subtest_curve_length_optimisation(self, rotated):
        nquadrature = 100
        nfourier = 4
        nfp = 5
        curve = CurveRZFourier(nquadrature, nfourier, nfp, True)
        if rotated:
            curve = RotatedCurve(curve, 0.5, flip=False)

        # Initialize the Fourier amplitudes to some random values
        x0 = np.random.rand(curve.num_dofs()) - 0.5
        x0[0] = 3.0
        curve.set_dofs(x0)
        print('Initial curve dofs: ', curve.get_dofs())

        # Tell the curve object that the first Fourier mode is fixed, whereas
        # all the other dofs are not.
        curve.all_fixed(False)
        curve.fixed[0] = True

        # Presently in simsgeo, the length objective is a separate object
        # rather than a function of Curve itself.
        obj = make_optimizable(CurveLength(curve))

        # For now, we need to add this attribute to CurveLength. Eventually
        # this would hopefully be done in simsgeo, but for now I'll put it here.
        obj.depends_on = ['curve']

        print('Initial curve length: ', obj.J())

        # Each target function is then equipped with a shift and weight, to
        # become a term in a least-squares objective function.
        # A list of terms are combined to form a nonlinear-least-squares
        # problem.
        prob = LeastSquaresProblem([(obj, 0.0, 1.0)])

        # At the initial condition, get the Jacobian two ways: analytic
        # derivatives and finite differencing. The difference should be small.
        fd_jac = prob.dofs.fd_jac()
        jac = prob.dofs.jac()
        print('finite difference Jacobian:')
        print(fd_jac)
        print('Analytic Jacobian:')
        print(jac)
        print('Difference:')
        print(fd_jac - jac)
        assert np.allclose(fd_jac, jac, rtol=1e-4, atol=1e-4)

        # Solve the minimization problem:
        least_squares_serial_solve(prob, ftol=1e-10, xtol=1e-10, gtol=1e-10)

        print('At the optimum, x: ', prob.x)
        print(' Final curve dofs: ', curve.get_dofs())
        print(' Final curve length:    ', obj.J())
        print(' Expected final length: ', 2 * np.pi * x0[0])
        print(' objective function: ', prob.objective())
        assert abs(obj.J() - 2 * np.pi * x0[0]) < 1e-8
예제 #9
0
 def test_exceptions(self):
     """
     Verify that exceptions are raised when invalid inputs are
     provided.
     """
     # Argument must be a list in which each element is a
     # LeastSquaresTerm or a 3- or 4-element tuple/list.
     with self.assertRaises(TypeError):
         prob = LeastSquaresProblem(7)
     with self.assertRaises(ValueError):
         prob = LeastSquaresProblem([])
     with self.assertRaises(TypeError):
         prob = LeastSquaresProblem([7, 1])
예제 #10
0
 def test_solve_rosenbrock_using_vector(self):
     """
     Minimize the Rosenbrock function using a single vector-valued
     least-squares term.
     """
     for solver in solvers:
         for grad in [True, False]:
             r = Rosenbrock()
             prob = LeastSquaresProblem([(r.terms, 0, 1)])
             if solver == serial_solve:
                 if grad == True:
                     continue
                 else:
                     solver(prob, tol=1e-12)
             else:
                 solver(prob, grad=grad)
             self.assertAlmostEqual(prob.objective(), 0)
             v = r.get_dofs()
             self.assertAlmostEqual(v[0], 1)
             self.assertAlmostEqual(v[1], 1)
예제 #11
0
 def test_solve_quadratic(self):
     """
     Minimize f(x,y,z) = 1 * (x - 1) ^ 2 + 2 * (y - 2) ^ 2 + 3 * (z - 3) ^ 2.
     The optimum is at (x,y,z)=(1,2,3), and f=0 at this point.
     """
     for solver in solvers:
         iden1 = Identity()
         iden2 = Identity()
         iden3 = Identity()
         term1 = (iden1.J, 1, 1)
         term2 = (iden2.J, 2, 2)
         term3 = (iden3.J, 3, 3)
         prob = LeastSquaresProblem([term1, term2, term3])
         if solver == serial_solve:
             solver(prob, tol=1e-12)
         else:
             solver(prob)
         self.assertAlmostEqual(prob.objective(), 0)
         self.assertAlmostEqual(iden1.x, 1)
         self.assertAlmostEqual(iden2.x, 2)
         self.assertAlmostEqual(iden3.x, 3)
예제 #12
0
 def test_parallel_optimization_without_grad(self):
     """
     Test a full least-squares optimization.
     """
     for ngroups in range(1, 4):
         mpi = MpiPartition(ngroups=ngroups)
         o = TestFunction3(mpi.comm_groups)
         term1 = (o.f0, 0, 1)
         term2 = (o.f1, 0, 1)
         prob = LeastSquaresProblem([term1, term2])
         least_squares_mpi_solve(prob, mpi, grad=False)
         self.assertAlmostEqual(prob.x[0], 1)
         self.assertAlmostEqual(prob.x[1], 1)
예제 #13
0
 def test_solve_rosenbrock_using_scalars(self):
     """
     Minimize the Rosenbrock function using two separate least-squares
     terms.
     """
     for solver in solvers:
         for grad in [True, False]:
             r = Rosenbrock()
             term1 = (r.term1, 0, 1)
             term2 = (r.term2, 0, 1)
             prob = LeastSquaresProblem((term1, term2))
             if solver == serial_solve:
                 if grad == True:
                     continue
                 else:
                     solver(prob, tol=1e-12)
             else:
                 solver(prob, grad=grad)
             self.assertAlmostEqual(prob.objective(), 0)
             v = r.get_dofs()
             self.assertAlmostEqual(v[0], 1)
             self.assertAlmostEqual(v[1], 1)
예제 #14
0
 def test_failures(self):
     """
     Verify that the expected residuals are returned in cases where the
     objective function evaluations fail.
     """
     o1 = Failer()
     r1 = Rosenbrock()
     fail_val = 1.0e6
     prob1 = LeastSquaresProblem([(r1.terms, 0, 1), (o1, 0, 1)],
                                 fail=fail_val)
     # First evaluation should not fail.
     f = prob1.f()
     print(f)
     np.testing.assert_allclose(f, [-1, 0, 1, 1, 1])
     # Second evaluation should fail.
     f = prob1.f()
     print(f)
     np.testing.assert_allclose(f, np.full(5, fail_val))
     # Third evaluation should not fail.
     f = prob1.f()
     print(f)
     np.testing.assert_allclose(f, [-1, 0, 1, 1, 1])
예제 #15
0
 def test_outside_optimizer(self):
     """
     Verify that a least-squares problem can be passed to an outside
     optimization package to be solved, even when failures occur
     during evaluations.
     """
     ros = RosenbrockWithFailures(fail_interval=20)
     fail_val = 1.0e2
     prob = LeastSquaresProblem([(ros.terms, 0, 1)], fail=fail_val)
     # Just call the bare scipy.optimize.minimize function. This is
     # not really an "outside" package, but it is similar to how we
     # might want to call outside optimization libraries that are
     # completely separate from simsopt.
     result = minimize(prob.objective, prob.x)
     # Need a large tolerance, since the failures confuse the
     # optimizer
     np.testing.assert_allclose(result.x, [1, 1], atol=1e-2)
예제 #16
0
    def test_vmec_failure(self):
        """
        Verify that failures of VMEC are correctly caught and represented
        by large values of the objective function.
        """
        for j in range(2):
            filename = os.path.join(TEST_DIR, 'input.li383_low_res')
            vmec = Vmec(filename)
            # Use the objective function from
            # stellopt_scenarios_2DOF_targetIotaAndVolume:
            if j == 0:
                prob = LeastSquaresProblem([(vmec.iota_axis, 0.41, 1),
                                            (vmec.volume, 0.15, 1)])
                fail_val = 1e12
            else:
                # Try a custom failure value
                fail_val = 2.0e30
                prob = LeastSquaresProblem([(vmec.iota_axis, 0.41, 1),
                                            (vmec.volume, 0.15, 1)],
                                           fail=fail_val)

            r00 = vmec.boundary.get_rc(0, 0)
            # The first evaluation should succeed.
            f = prob.f()
            print(f[0], f[1])
            correct_f = [-0.004577338528148067, 2.8313872701632925]
            # Don't worry too much about accuracy here.
            np.testing.assert_allclose(f, correct_f, rtol=0.1)

            # Now set a crazy boundary shape to make VMEC fail. This
            # boundary causes VMEC to hit the max number of iterations
            # without meeting ftol.
            vmec.boundary.set_rc(0, 0, 0.2)
            vmec.need_to_run_code = True
            f = prob.f()
            print(f)
            np.testing.assert_allclose(f, np.full(2, fail_val))

            # Restore a reasonable boundary shape. VMEC should work again.
            vmec.boundary.set_rc(0, 0, r00)
            vmec.need_to_run_code = True
            f = prob.f()
            print(f)
            np.testing.assert_allclose(f, correct_f, rtol=0.1)

            # Now set a self-intersecting boundary shape. This causes VMEC
            # to fail with "ARNORM OR AZNORM EQUAL ZERO IN BCOVAR" before
            # it even starts iterating.
            orig_mode = vmec.boundary.get_rc(1, 3)
            vmec.boundary.set_rc(1, 3, 0.5)
            vmec.need_to_run_code = True
            f = prob.f()
            print(f)
            np.testing.assert_allclose(f, np.full(2, fail_val))

            # Restore a reasonable boundary shape. VMEC should work again.
            vmec.boundary.set_rc(1, 3, orig_mode)
            vmec.need_to_run_code = True
            f = prob.f()
            print(f)
            np.testing.assert_allclose(f, correct_f, rtol=0.1)
# iota = p / q
p = -2
q = 5
residue1 = Residue(spec, p, q)
residue2 = Residue(spec, p, q, theta=np.pi)

if mpi.group == 0:
    r1 = residue1.J()
    r2 = residue2.J()
if mpi.proc0_world:
    print("Initial residues:", r1, r2)
#exit(0)

# Define objective function
prob = LeastSquaresProblem([(vmec.aspect, 6, 1), (vmec.iota_axis, 0.385, 1),
                            (vmec.iota_edge, 0.415, 1), (qs, 0, 1),
                            (residue1, 0, 2), (residue2, 0, 2)])

# Check whether we're in the CI. If so, just do a single function
# evaluation rather than a real optimization.
ci = "CI" in os.environ and os.environ['CI'].lower() in ['1', 'true']
if ci:
    obj = prob.objective()
else:
    # Remove the max_nfev=1 in the next line to do a serious optimization:
    least_squares_mpi_solve(prob, mpi=mpi, grad=True, max_nfev=1)

if mpi.group == 0:
    r1 = residue1.J()
    r2 = residue2.J()
if mpi.proc0_world:
예제 #18
0
    def test_integrated_stellopt_scenarios_2dof(self):
        """
        This script implements the "2DOF_vmecOnly_targetIotaAndVolume" example from
        https://github.com/landreman/stellopt_scenarios

        This optimization problem has two independent variables, representing
        the helical shape of the magnetic axis. The problem also has two
        objectives: the plasma volume and the rotational transform on the
        magnetic axis.

        The resolution in this example (i.e. ns, mpol, and ntor) is somewhat
        lower than in the stellopt_scenarios version of the example, just so
        this example runs fast.

        Details of the optimum and a plot of the objective function landscape
        can be found here:
        https://github.com/landreman/stellopt_scenarios/tree/master/2DOF_vmecOnly_targetIotaAndVolume
        """
        filename = os.path.join(TEST_DIR, '2DOF_targetIotaAndVolume.sp')

        # Initialize SPEC from an input file
        equil = Spec(filename)
        surf = equil.boundary

        # VMEC parameters are all fixed by default, while surface parameters are all non-fixed by default.
        # You can choose which parameters are optimized by setting their 'fixed' attributes.
        surf.all_fixed()
        surf.set_fixed('rc(1,1)', False)
        surf.set_fixed('zs(1,1)', False)

        # Each Target is then equipped with a shift and weight, to become a
        # term in a least-squares objective function.  A list of terms are
        # combined to form a nonlinear-least-squares problem.
        desired_volume = 0.15
        volume_weight = 1
        term1 = (equil.volume, desired_volume, volume_weight)

        desired_iota = -0.41
        iota_weight = 1
        term2 = (equil.iota, desired_iota, iota_weight)

        prob = LeastSquaresProblem([term1, term2])

        # Solve the minimization problem:
        least_squares_serial_solve(prob)

        # The tests here are based on values from the VMEC version in
        # https://github.com/landreman/stellopt_scenarios/tree/master/2DOF_vmecOnly_targetIotaAndVolume
        # Due to this and the fact that we don't yet have iota on axis from SPEC, the tolerances are wide.
        """
        assert np.abs(surf.get_rc(1, 1) - 0.0313066948) < 0.001
        assert np.abs(surf.get_zs(1, 1) - (-0.031232391)) < 0.001
        assert np.abs(equil.volume() - 0.178091) < 0.001
        assert np.abs(surf.volume()  - 0.178091) < 0.001
        assert np.abs(equil.iota() - (-0.4114567)) < 0.001
        assert (prob.objective() - 7.912501330E-04) < 0.2e-4
        """
        self.assertAlmostEqual(surf.get_rc(1, 1), 0.0313066948, places=3)
        self.assertAlmostEqual(surf.get_zs(1, 1), -0.031232391, places=3)
        self.assertAlmostEqual(equil.volume(), 0.178091, places=3)
        self.assertAlmostEqual(surf.volume(), 0.178091, places=3)
        self.assertAlmostEqual(equil.iota(), -0.4114567, places=3)
        self.assertAlmostEqual(prob.objective(), 7.912501330E-04, places=3)
예제 #19
0
    def test_integrated_stellopt_scenarios_1dof(self):
        """
        This script implements the "1DOF_circularCrossSection_varyR0_targetVolume"
        example from
        https://github.com/landreman/stellopt_scenarios

        This optimization problem has one independent variable, representing
        the mean major radius. The problem also has one objective: the plasma
        volume. There is not actually any need to run an equilibrium code like
        SPEC since the objective function can be computed directly from the
        boundary shape. But this problem is a fast way to test the
        optimization infrastructure with SPEC.

        Details of the optimum and a plot of the objective function landscape
        can be found here:
        https://github.com/landreman/stellopt_scenarios/tree/master/1DOF_circularCrossSection_varyR0_targetVolume
        """
        for grad in [True, False]:
            # Start with a default surface.
            equil = Spec()
            surf = equil.boundary

            # Set the initial boundary shape. Here is one way to do it:
            surf.set('rc(0,0)', 1.0)
            # Here is another syntax that works:
            surf.set_rc(0, 1, 0.1)
            surf.set_zs(0, 1, 0.1)

            surf.set_rc(1, 0, 0.1)
            surf.set_zs(1, 0, 0.1)

            surf.set_rc(1, 1, 0)
            surf.set_zs(1, 1, 0)

            # SPEC parameters are all fixed by default, while surface
            # parameters are all non-fixed by default. You can choose
            # which parameters are optimized by setting their 'fixed'
            # attributes.
            surf.all_fixed()
            surf.set_fixed('rc(0,0)', False)

            # Turn off Poincare plots and use low resolution, for speed:
            equil.inputlist.nptrj[0] = 0
            equil.inputlist.lrad[0] = 2

            # Each Target is then equipped with a shift and weight, to become a
            # term in a least-squares objective function
            desired_volume = 0.15
            term1 = (equil.volume, desired_volume, 1)

            # A list of terms are combined to form a nonlinear-least-squares
            # problem.
            prob = LeastSquaresProblem([term1])

            # Check that the problem was set up correctly:
            self.assertEqual(len(prob.dofs.names), 1)
            self.assertEqual(prob.dofs.names[0][:7], 'rc(0,0)')
            np.testing.assert_allclose(prob.x, [1.0])
            self.assertEqual(prob.dofs.all_owners, [equil, surf])
            self.assertEqual(prob.dofs.dof_owners, [surf])

            # Solve the minimization problem:
            least_squares_serial_solve(prob, grad=grad)

            self.assertAlmostEqual(surf.get_rc(0, 0), 0.7599088773175, places=5)
            self.assertAlmostEqual(equil.volume(), 0.15, places=6)
            self.assertAlmostEqual(surf.volume(), 0.15, places=6)
            self.assertLess(np.abs(prob.objective()), 1.0e-15)
    def test_stellopt_scenarios_1DOF_circularCrossSection_varyR0_targetVolume(self):
        """
        This script implements the "1DOF_circularCrossSection_varyR0_targetVolume"
        example from
        https://github.com/landreman/stellopt_scenarios

        This optimization problem has one independent variable, representing
        the mean major radius. The problem also has one objective: the plasma
        volume. There is not actually any need to run an equilibrium code like
        VMEC since the objective function can be computed directly from the
        boundary shape. But this problem is a fast way to test the
        optimization infrastructure with VMEC.

        Details of the optimum and a plot of the objective function landscape
        can be found here:
        https://github.com/landreman/stellopt_scenarios/tree/master/1DOF_circularCrossSection_varyR0_targetVolume
        """

        # logging.basicConfig(level=logging.DEBUG)
        # logger = logging.getLogger('[{}]'.format(MPI.COMM_WORLD.Get_rank()) + __name__)
        logger = logging.getLogger(__name__)

        for ngroups in range(1, 1 + MPI.COMM_WORLD.Get_size()):
            for grad in [False, True]:
                # In the next line, we can adjust how many groups the pool of MPI
                # processes is split into.
                mpi = MpiPartition(ngroups=ngroups)
                mpi.write()

                # Start with a default surface, which is axisymmetric with major
                # radius 1 and minor radius 0.1.
                equil = Vmec(mpi=mpi)
                surf = equil.boundary

                # Set the initial boundary shape. Here is one syntax:
                surf.set('rc(0,0)', 1.0)
                # Here is another syntax:
                surf.set_rc(0, 1, 0.1)
                surf.set_zs(0, 1, 0.1)

                surf.set_rc(1, 0, 0.1)
                surf.set_zs(1, 0, 0.1)

                # VMEC parameters are all fixed by default, while surface parameters are all non-fixed by default.
                # You can choose which parameters are optimized by setting their 'fixed' attributes.
                surf.all_fixed()
                surf.set_fixed('rc(0,0)', False)

                # Each Target is then equipped with a shift and weight, to become a
                # term in a least-squares objective function
                desired_volume = 0.15
                prob = LeastSquaresProblem([(equil.volume, desired_volume, 1)])

                # Solve the minimization problem. We can choose whether to use a
                # derivative-free or derivative-based algorithm.
                least_squares_mpi_solve(prob, mpi=mpi, grad=grad)

                # Make sure all procs call VMEC:
                objective = prob.objective()
                if mpi.proc0_world:
                    print("At the optimum,")
                    print(" rc(m=0,n=0) = ", surf.get_rc(0, 0))
                    print(" volume, according to VMEC    = ", equil.volume())
                    print(" volume, according to Surface = ", surf.volume())
                    print(" objective function = ", objective)

                assert np.abs(surf.get_rc(0, 0) - 0.7599088773175) < 1.0e-5
                assert np.abs(equil.volume() - 0.15) < 1.0e-6
                assert np.abs(surf.volume() - 0.15) < 1.0e-6
                assert prob.objective() < 1.0e-15
예제 #21
0
    def test_fd_jac_eval_points(self):
        """
        Check that fd_jac_mpi is evaluating the residual functions at the
        expected locations.
        """
        for ngroups in range(1, 2):
            mpi = MpiPartition(ngroups)
            b = Beale()  # Any Optimizable object with 2 d.o.f.'s will do.

            # First examine 1-sided differences
            prob = LeastSquaresProblem([(b, 0, 1)], diff_method="forward",
                                       abs_step=1e-6, rel_step=1e-2)

            b.set_dofs([0, 0.2])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[0.0, 1e-6, 0],
                                   [0.2, 0.2, 0.202]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([0, 0])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[0.0, 1e-6, 0],
                                   [0.0, 0.0, 1e-6]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([-3, -4])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[-3.0, -2.97, -3.0],
                                   [-4.0, -4.0, -3.96]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([3e-7, 4e-7])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[3e-7, 1.3e-6, 3.0e-7],
                                   [4e-7, 4.0e-7, 1.4e-6]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            # Now examine centered differences
            prob = LeastSquaresProblem([(b, 0, 1)], diff_method="centered",
                                       abs_step=1e-6, rel_step=1e-2)

            b.set_dofs([0, 0.2])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[1e-6, -1e-6, 0, 0],
                                   [0.2, 0.2, 0.202, 0.198]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([0, 0])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[1e-6, -1e-6, 0, 0],
                                   [0, 0, 1e-6, -1e-6]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([-3, -4])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[-2.97, -3.03, -3.00, -3.00],
                                   [-4.00, -4.00, -3.96, -4.04]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([3e-7, 4e-7])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[1.3e-6, -0.7e-6, 3.0e-7, 3.00e-7],
                                   [4.0e-7, 4.00e-7, 1.4e-6, -0.6e-6]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)
예제 #22
0
# Define some Target objects that depend on Parameter objects. In the
# future these functions would involve codes like VMEC, but for now we
# just use the functions f(x) = x.
iden1 = Identity()
iden2 = Identity()
iden3 = Identity()

# Parameters are all not fixed by default, meaning they will not be
# optimized.  You can choose to exclude any subset of the parameters
# from the space of independent variables by setting their 'fixed'
# property to True.
#iden1.fixed[0] = True
#iden2.fixed[0] = True
#iden3.fixed[0] = True

# Each Target is then equipped with a shift and weight, to become a
# term in a least-squares objective function
term1 = (iden1, 1, 1)
term2 = (iden2, 2, 2)
term3 = (iden3, 3, 3)

# A list of terms are combined to form a nonlinear-least-squares problem.
prob = LeastSquaresProblem([term1, term2, term3])

# Solve the minimization problem:
least_squares_serial_solve(prob)

print("An optimum was found at x=", iden1.x, ", y=", iden2.x, \
      ", z=", iden3.x)
print("The minimum value of the objective function is ", prob.objective())
예제 #23
0
    def test_supply_tuples(self):
        """
        Test basic usage
        """
        # Objective function f(x) = ((x - 3) / 2) ** 2
        iden1 = Identity()
        term1 = (iden1.J, 3, 0.25)
        prob = LeastSquaresProblem([term1])
        self.assertAlmostEqual(prob.objective(), 2.25)
        self.assertAlmostEqual(prob.objective(), sum(t.f_out() for t in prob.terms))
        self.assertEqual(len(prob.dofs.f()), 1)
        self.assertAlmostEqual(prob.dofs.f()[0], 0)
        self.assertEqual(len(prob.f()), 1)
        self.assertAlmostEqual(prob.f()[0], -1.5)
        self.assertAlmostEqual(prob.objective_from_shifted_f(prob.f()), 2.25)
        self.assertAlmostEqual(prob.objective_from_unshifted_f(prob.dofs.f()), 2.25)
        iden1.set_dofs([10])
        self.assertAlmostEqual(prob.objective(), 12.25)
        self.assertAlmostEqual(prob.objective(), sum(t.f_out() for t in prob.terms))
        self.assertAlmostEqual(prob.objective_from_shifted_f(prob.f()), 12.25)
        self.assertAlmostEqual(prob.objective_from_unshifted_f(prob.dofs.f()), 12.25)
        self.assertAlmostEqual(prob.objective([0]), 2.25)
        self.assertAlmostEqual(prob.objective([10]), 12.25)
        self.assertEqual(prob.dofs.all_owners, [iden1])
        self.assertEqual(prob.dofs.dof_owners, [iden1])

        # Objective function
        # f(x,y) = ((x - 3) / 2) ** 2 + ((y + 4) / 5) ** 2
        iden2 = Identity()
        term2 = (iden2.J, -4, 0.04)
        prob = LeastSquaresProblem([term1, term2])
        self.assertAlmostEqual(prob.objective(), 12.89)
        self.assertAlmostEqual(prob.objective(), sum(t.f_out() for t in prob.terms))
        self.assertEqual(len(prob.f()), 2)
        self.assertAlmostEqual(prob.objective_from_shifted_f(prob.f()), 12.89)
        self.assertAlmostEqual(prob.objective_from_unshifted_f(prob.dofs.f()), 12.89)
        iden1.set_dofs([5])
        iden2.set_dofs([-7])
        self.assertAlmostEqual(prob.objective(), 1.36)
        self.assertAlmostEqual(prob.objective(), sum(t.f_out() for t in prob.terms))
        self.assertEqual(len(prob.f()), 2)
        self.assertAlmostEqual(prob.objective_from_shifted_f(prob.f()), 1.36)
        self.assertAlmostEqual(prob.objective_from_unshifted_f(prob.dofs.f()), 1.36)
        self.assertAlmostEqual(prob.objective([10, 0]), 12.89)
        self.assertAlmostEqual(prob.objective([5, -7]), 1.36)
        self.assertEqual(prob.dofs.dof_owners, [iden1, iden2])
        self.assertEqual(prob.dofs.all_owners, [iden1, iden2])
initial_r1 = residue1.J()
initial_r2 = residue2.J()
logging.info(f"Initial residues: {initial_r1}, {initial_r2}")
#exit(0)

# There is another island chain we'd like to control at iota = -12/11:
p = -12
q = 11
s_guess = -0.1

residue3 = Residue(s, p, q, s_guess=s_guess)
residue4 = Residue(s, p, q, s_guess=s_guess, theta=np.pi)

# Objective function is \sum_j residue_j ** 2
prob = LeastSquaresProblem([(residue1, 0, 1), (residue2, 0, 1),
                            (residue3, 0, 1), (residue4, 0, 1)])

# Solve the optimization problem:
least_squares_mpi_solve(prob, mpi=mpi, grad=True)

final_r1 = residue1.J()
final_r2 = residue2.J()
expected_solution = np.array([1.1076171888771095e-03, 4.5277618989828059e-04])
if mpi.proc0_world:
    logging.info(
        f"Final state vector: zs(6,1)={prob.x[0]}, zs(6,2)={prob.x[1]}")
    logging.info(f"Expected state vector: {expected_solution}")
    logging.info(
        f"Difference from expected solution: {prob.x - expected_solution}")
    logging.info(f"Final residues: {final_r1}, {final_r2}")
예제 #25
0
                         'input.nfp4_QH_warm_start'),
            mpi=mpi)

# Define parameter space:
surf = vmec.boundary
surf.all_fixed()
max_mode = 2
surf.fixed_range(mmin=0,
                 mmax=max_mode,
                 nmin=-max_mode,
                 nmax=max_mode,
                 fixed=False)
surf.set_fixed("rc(0,0)")  # Major radius

# Configure quasisymmetry objective:
qs = Quasisymmetry(
    Boozer(vmec),
    0.5,  # Radius to target
    1,
    1)  # (M, N) you want in |B|

# Define objective function
prob = LeastSquaresProblem([(vmec.aspect, 7, 1), (qs, 0, 1)],
                           rel_step=1e-3,
                           abs_step=1e-5)

# To keep this example fast, we stop after the first function
# evaluation. For a "real" optimization, remove the max_nfev
# parameter.
least_squares_mpi_solve(prob, mpi, grad=True, max_nfev=1)