コード例 #1
0
ファイル: test_mpi.py プロジェクト: zhisong/simsopt
    def test_ngroups_max(self):
        """
        Verify that all quantities make sense when ngroups >= nprocs_world
        and ngroups = None.
        """
        rank_world = MPI.COMM_WORLD.Get_rank()
        nprocs = MPI.COMM_WORLD.Get_size()

        for shift in range(-1, 3):
            if shift == -1:
                ngroups = None
            else:
                ngroups = nprocs + shift

        m = MpiPartition(ngroups=ngroups)

        self.assertEqual(m.ngroups, nprocs)

        self.assertEqual(m.rank_world, rank_world)
        self.assertEqual(m.rank_groups, 0)
        self.assertEqual(m.rank_leaders, rank_world)

        self.assertEqual(m.nprocs_world, nprocs)
        self.assertEqual(m.nprocs_groups, 1)
        self.assertEqual(m.nprocs_leaders, nprocs)

        self.assertEqual(m.proc0_world, rank_world == 0)
        self.assertTrue(m.proc0_groups)
コード例 #2
0
ファイル: test_mpi.py プロジェクト: rogeriojorge/simsopt
    def test_parallel_optimization_with_grad(self):
        """
        Test a full least-squares optimization.
        """
        for ngroups in range(1, 4):
            for abs_step in [0, 1.0e-7]:
                # Only try rel_step=0 if abs_step is positive:
                rel_steps = [0, 1.0e-7]
                if abs_step == 0:
                    rel_steps = [1.0e-7]

                for rel_step in rel_steps:
                    for diff_method in ["forward", "centered"]:
                        logger.debug(f'ngroups={ngroups} abs_step={abs_step} ' \
                                     f'rel_step={rel_step} diff_method={diff_method}')
                        mpi = MpiPartition(ngroups=ngroups)
                        o = TestFunction3(mpi.comm_groups)
                        term1 = (o.f0, 0, 1)
                        term2 = (o.f1, 0, 1)
                        prob = LeastSquaresProblem([term1, term2], diff_method=diff_method,
                                                   abs_step=abs_step, rel_step=rel_step)
                        # Set initial condition different from 0,
                        # because otherwise abs_step=0 causes step
                        # size to be 0.
                        prob.x = [-0.1, 0.2]
                        least_squares_mpi_solve(prob, mpi)
                        self.assertAlmostEqual(prob.x[0], 1)
                        self.assertAlmostEqual(prob.x[1], 1)
コード例 #3
0
ファイル: test_mpi.py プロジェクト: zhisong/simsopt
    def test_ngroups_scan(self):
        """
        Verify that all quantities make sense when ngroups >= nprocs_world
        and ngroups = None.
        """
        rank_world = MPI.COMM_WORLD.Get_rank()
        nprocs = MPI.COMM_WORLD.Get_size()

        for ngroups in range(-1, nprocs + 3):
            m = MpiPartition(ngroups=ngroups)

            self.assertGreaterEqual(m.ngroups, 1)
            self.assertLessEqual(m.ngroups, nprocs)

            self.assertEqual(m.rank_world, rank_world)
            self.assertGreaterEqual(m.rank_groups, 0)
            self.assertLess(m.rank_groups, nprocs)

            self.assertEqual(m.nprocs_world, nprocs)
            self.assertGreaterEqual(m.nprocs_groups, 1)
            self.assertLessEqual(m.nprocs_groups, nprocs)

            self.assertEqual(m.proc0_world, rank_world == 0)

            if m.proc0_groups:
                self.assertGreaterEqual(m.rank_leaders, 0)
                self.assertLessEqual(m.rank_leaders, nprocs)
                self.assertGreaterEqual(m.nprocs_leaders, 1)
                self.assertLessEqual(m.nprocs_leaders, nprocs)
            else:
                self.assertEqual(m.rank_leaders, -1)
                self.assertEqual(m.nprocs_leaders, -1)

            # The sizes of the worker groups should be relatively
            # even, with a difference of no more than 1 between the
            # largest and the smallest.
            if m.proc0_world:
                group_sizes = np.zeros(nprocs, dtype='i')
                group_sizes[0] = m.nprocs_groups
                for j in range(1, nprocs):
                    group_sizes[j] = m.comm_world.recv(tag=j)
                print('group_sizes:', group_sizes)
                self.assertLessEqual(
                    np.max(group_sizes) - np.min(group_sizes), 1)
            else:
                m.comm_world.send(m.nprocs_groups, 0, tag=m.rank_world)
        m.write()
コード例 #4
0
ファイル: test_mpi.py プロジェクト: zhisong/simsopt
    def test_ngroups1(self):
        """
        Verify that all quantities make sense when ngroups = 1.
        """
        rank_world = MPI.COMM_WORLD.Get_rank()
        nprocs = MPI.COMM_WORLD.Get_size()
        m = MpiPartition(ngroups=1)

        self.assertEqual(m.ngroups, 1)

        self.assertEqual(m.rank_world, rank_world)
        self.assertEqual(m.rank_groups, rank_world)
        self.assertEqual(m.rank_leaders, 0 if rank_world == 0 else -1)

        self.assertEqual(m.nprocs_world, nprocs)
        self.assertEqual(m.nprocs_groups, nprocs)
        self.assertEqual(m.nprocs_leaders, 1 if rank_world == 0 else -1)

        self.assertEqual(m.proc0_world, rank_world == 0)
        self.assertEqual(m.proc0_groups, rank_world == 0)
        m.write()
コード例 #5
0
ファイル: test_mpi.py プロジェクト: rogeriojorge/simsopt
 def test_parallel_optimization_without_grad(self):
     """
     Test a full least-squares optimization.
     """
     for ngroups in range(1, 4):
         mpi = MpiPartition(ngroups=ngroups)
         o = TestFunction3(mpi.comm_groups)
         term1 = (o.f0, 0, 1)
         term2 = (o.f1, 0, 1)
         prob = LeastSquaresProblem([term1, term2])
         least_squares_mpi_solve(prob, mpi, grad=False)
         self.assertAlmostEqual(prob.x[0], 1)
         self.assertAlmostEqual(prob.x[1], 1)
コード例 #6
0
ファイル: test_mpi.py プロジェクト: rogeriojorge/simsopt
    def test_fd_jac_abs_rel_steps(self):
        """
        Confirm that the parallel finite difference gradient gives nearly
        the same result regardless of whether absolute or relative
        steps are used.
        """
        rtol = 1e-6
        atol = 1e-6
        for ngroups in range(1, 4):
            for abs_step in [0, 1.0e-7]:
                # Only try rel_step=0 if abs_step is positive:
                rel_steps = [0, 1.0e-7]
                if abs_step == 0:
                    rel_steps = [1.0e-7]

                for rel_step in rel_steps:
                    for diff_method in ["forward", "centered"]:
                        logger.debug(f'ngroups={ngroups} abs_step={abs_step} ' \
                                     f'rel_step={rel_step} diff_method={diff_method}')
                        mpi = MpiPartition(ngroups=ngroups)
                        o = TestFunction1()
                        d = Dofs([o], diff_method=diff_method,
                                 abs_step=abs_step, rel_step=rel_step)
                        logger.debug('About to do worker loop 1')
                        jac, xs, evals = fd_jac_mpi(d, mpi)
                        jac_reference = np.array([[5.865175337071982e-01, -6.010834789627051e-01, 2.250910093037906e-01]])
                        if mpi.proc0_world:
                            np.testing.assert_allclose(jac, jac_reference, rtol=rtol, atol=atol)
                        # While we're at it, also test the serial FD Jacobian:
                        jac = d.fd_jac()
                        np.testing.assert_allclose(jac, jac_reference, rtol=rtol, atol=atol)

                        # Now try a case with different nparams and nfuncs.
                        o = TestFunction2()
                        d = Dofs([o.f0, o.f1, o.f2, o.f3], diff_method=diff_method,
                                 abs_step=abs_step, rel_step=rel_step)
                        logger.debug('About to do worker loop 2')
                        jac, xs, evals = fd_jac_mpi(d, mpi)
                        jac_reference = np.array([[8.657714037352271e-01, -8.872725151820582e-01],
                                                  [2.353410674116319e+00, -2.411856754314101e+00],
                                                  [6.397233469623842e+00, -6.556106388888594e+00],
                                                  [1.738948351093228e+01, -1.782134486205678e+01]])
                        if mpi.proc0_world:
                            np.testing.assert_allclose(jac, jac_reference, rtol=rtol, atol=atol)
                        # While we're at it, also test the serial FD Jacobian:
                        jac = d.fd_jac()
                        np.testing.assert_allclose(jac, jac_reference, rtol=rtol, atol=atol)
コード例 #7
0
 def test_parallel_optimization(self):
     """
     Test a full least-squares optimization.
     """
     rank_world = MPI.COMM_WORLD.Get_rank()
     logger.info(f"rank world is {rank_world}")
     nprocs = MPI.COMM_WORLD.Get_size()
     for ngroups in range(1, 4):
         #for grad in [True, False]:
         mpi = MpiPartition(ngroups=ngroups)
         o = TestFunction3(mpi.comm_groups)
         term1 = (o.f0, 0, 1)
         term2 = (o.f1, 0, 1)
         prob = LeastSquaresProblem.from_tuples([term1, term2])
         least_squares_mpi_solve(prob, mpi)  # , grad=grad)
         self.assertAlmostEqual(prob.full_x[0], 1)
         self.assertAlmostEqual(prob.full_x[1], 1)
from simsopt.mhd.spec import Residue
from simsopt.objectives.least_squares import LeastSquaresProblem
from simsopt.solve.mpi import least_squares_mpi_solve
"""
In this example, we simultaneously optimize for quasisymmetry and
the elimination of magnetic islands, with both VMEC and SPEC called in
the objective function.

Below, the argument max_nfev=1 in least_squares_mpi_solve causes the
optimization to stop after only a single iteration, so this example
does not take too long to run. For a real optimization, that argument
should be removed.
"""

log()
mpi = MpiPartition()
mpi.write()

vmec_filename = os.path.join(os.path.dirname(__file__), 'inputs',
                             'input.nfp2_QA_iota0.4_withIslands')
vmec = Vmec(vmec_filename, mpi=mpi)
surf = vmec.boundary

spec_filename = os.path.join(os.path.dirname(__file__), 'inputs',
                             'nfp2_QA_iota0.4_withIslands.sp')
spec = Spec(spec_filename, mpi=mpi)

# This next line is where the boundary surface objects of VMEC and
# SPEC are linked:
spec.boundary = surf
コード例 #9
0
def mpi_solve_1group(prob, **kwargs):
    least_squares_mpi_solve(prob, MpiPartition(ngroups=1), **kwargs)
コード例 #10
0
magnetic axis.

The resolution in this example (i.e. ns, mpol, and ntor) is somewhat
lower than in the stellopt_scenarios version of the example, just so
this example runs fast.

Details of the optimum and a plot of the objective function landscape
can be found here:
https://github.com/landreman/stellopt_scenarios/tree/master/2DOF_vmecOnly_targetIotaAndVolume
"""

# This next line turns on detailed logging. It can be commented out if
# you do not want such verbose output.
log(logging.INFO)

mpi = MpiPartition()

# Initialize VMEC from an input file:
vmec = Vmec(os.path.join(os.path.dirname(__file__), 'inputs',
                         'input.2DOF_vmecOnly_targetIotaAndVolume'),
            mpi=mpi)
surf = vmec.boundary

# Initialize SPEC from an input file:
spec = Spec(os.path.join(os.path.dirname(__file__), 'inputs',
                         '2DOF_targetIotaAndVolume.sp'),
            mpi=mpi)

# Set the SPEC boundary to be the same object as the VMEC boundary!
spec.boundary = surf
コード例 #11
0
#!/usr/bin/env python

from simsopt.util.mpi import MpiPartition
from simsopt.mhd import Vmec, Boozer, Quasisymmetry
from simsopt.objectives.least_squares import LeastSquaresProblem
from simsopt.solve.mpi import least_squares_mpi_solve
import os
"""
Optimize for quasi-helical symmetry (M=1, N=1) at a given radius.
"""

# This problem has 24 degrees of freedom, so we can use 24 + 1 = 25
# concurrent function evaluations for 1-sided finite difference
# gradients.
mpi = MpiPartition(25)

vmec = Vmec(os.path.join(os.path.dirname(__file__), 'inputs',
                         'input.nfp4_QH_warm_start'),
            mpi=mpi)

# Define parameter space:
surf = vmec.boundary
surf.all_fixed()
max_mode = 2
surf.fixed_range(mmin=0,
                 mmax=max_mode,
                 nmin=-max_mode,
                 nmax=max_mode,
                 fixed=False)
surf.set_fixed("rc(0,0)")  # Major radius
コード例 #12
0
ファイル: test_mpi.py プロジェクト: rogeriojorge/simsopt
    def test_fd_jac_eval_points(self):
        """
        Check that fd_jac_mpi is evaluating the residual functions at the
        expected locations.
        """
        for ngroups in range(1, 2):
            mpi = MpiPartition(ngroups)
            b = Beale()  # Any Optimizable object with 2 d.o.f.'s will do.

            # First examine 1-sided differences
            prob = LeastSquaresProblem([(b, 0, 1)], diff_method="forward",
                                       abs_step=1e-6, rel_step=1e-2)

            b.set_dofs([0, 0.2])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[0.0, 1e-6, 0],
                                   [0.2, 0.2, 0.202]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([0, 0])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[0.0, 1e-6, 0],
                                   [0.0, 0.0, 1e-6]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([-3, -4])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[-3.0, -2.97, -3.0],
                                   [-4.0, -4.0, -3.96]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([3e-7, 4e-7])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[3e-7, 1.3e-6, 3.0e-7],
                                   [4e-7, 4.0e-7, 1.4e-6]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            # Now examine centered differences
            prob = LeastSquaresProblem([(b, 0, 1)], diff_method="centered",
                                       abs_step=1e-6, rel_step=1e-2)

            b.set_dofs([0, 0.2])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[1e-6, -1e-6, 0, 0],
                                   [0.2, 0.2, 0.202, 0.198]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([0, 0])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[1e-6, -1e-6, 0, 0],
                                   [0, 0, 1e-6, -1e-6]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([-3, -4])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[-2.97, -3.03, -3.00, -3.00],
                                   [-4.00, -4.00, -3.96, -4.04]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)

            b.set_dofs([3e-7, 4e-7])
            jac, xs, evals = fd_jac_mpi(prob.dofs, mpi)
            xs_correct = np.array([[1.3e-6, -0.7e-6, 3.0e-7, 3.00e-7],
                                   [4.0e-7, 4.00e-7, 1.4e-6, -0.6e-6]])
            if mpi.proc0_groups:
                np.testing.assert_allclose(xs, xs_correct)
コード例 #13
0
ファイル: test_mpi.py プロジェクト: rogeriojorge/simsopt
    def test_fd_jac(self):
        """
        Test the parallel finite-difference Jacobian calculation.
        """
        abs_step = 1.0e-7
        rel_step = 0
        for ngroups in range(1, 4):
            logger.debug('ngroups={}'.format(ngroups))
            mpi = MpiPartition(ngroups=ngroups)
            o = TestFunction1()
            d = Dofs([o], diff_method="forward", abs_step=abs_step, rel_step=rel_step)
            logger.debug('About to do worker loop 1')
            jac, xs, evals = fd_jac_mpi(d, mpi)
            jac_reference = np.array([[5.865176283537110e-01, -6.010834349701177e-01, 2.250910244305793e-01]])
            if mpi.proc0_world:
                np.testing.assert_allclose(jac, jac_reference, rtol=1e-13, atol=1e-13)
            # While we're at it, also test the serial FD Jacobian:
            o.set_dofs(np.array([1.2, 0.9, -0.4]))
            jac = d.fd_jac()
            np.testing.assert_allclose(jac, jac_reference, rtol=1e-13, atol=1e-13)

            # Repeat with centered differences
            o.set_dofs(np.array([1.2, 0.9, -0.4]))
            logger.debug('About to do worker loop 2')
            d.diff_method = "centered"
            jac, xs, evals = fd_jac_mpi(d, mpi)
            jac_reference = np.array([[5.865175337071982e-01, -6.010834789627051e-01, 2.250910093037906e-01]])
            if mpi.proc0_world:
                np.testing.assert_allclose(jac, jac_reference, rtol=1e-13, atol=1e-13)
            # While we're at it, also test the serial FD Jacobian:
            o.set_dofs(np.array([1.2, 0.9, -0.4]))
            jac = d.fd_jac()
            np.testing.assert_allclose(jac, jac_reference, rtol=1e-13, atol=1e-13)

            # Now try a case with different nparams and nfuncs.
            o = TestFunction2()
            d = Dofs([o.f0, o.f1, o.f2, o.f3], diff_method="forward",
                     abs_step=abs_step, rel_step=rel_step)
            logger.debug('About to do worker loop 3')
            jac, xs, evals = fd_jac_mpi(d, mpi)
            jac_reference = np.array([[8.657715439008840e-01, -8.872724499564555e-01],
                                      [2.353411054922816e+00, -2.411856577788640e+00],
                                      [6.397234502131255e+00, -6.556105911492693e+00],
                                      [1.738948636642590e+01, -1.782134355643450e+01]])
            if mpi.proc0_world:
                np.testing.assert_allclose(jac, jac_reference, rtol=1e-13, atol=1e-13)
            # While we're at it, also test the serial FD Jacobian:
            o.set_dofs(np.array([1.2, 0.9]))
            jac = d.fd_jac()
            np.testing.assert_allclose(jac, jac_reference, rtol=1e-13, atol=1e-13)

            # Repeat with centered differences
            o.set_dofs(np.array([1.2, 0.9]))
            d.diff_method = "centered"
            logger.debug('About to do worker loop 4')
            jac, xs, evals = fd_jac_mpi(d, mpi)
            jac_reference = np.array([[8.657714037352271e-01, -8.872725151820582e-01],
                                      [2.353410674116319e+00, -2.411856754314101e+00],
                                      [6.397233469623842e+00, -6.556106388888594e+00],
                                      [1.738948351093228e+01, -1.782134486205678e+01]])
            if mpi.proc0_world:
                np.testing.assert_allclose(jac, jac_reference, rtol=1e-13, atol=1e-13)
            # While we're at it, also test the serial FD Jacobian:
            o.set_dofs(np.array([1.2, 0.9]))
            jac = d.fd_jac()
            np.testing.assert_allclose(jac, jac_reference, rtol=1e-13, atol=1e-13)
コード例 #14
0
The resolution in this example (i.e. ns, mpol, and ntor) is somewhat
lower than in the stellopt_scenarios version of the example, just so
this example runs fast.

Details of the optimum and a plot of the objective function landscape
can be found here:
https://github.com/landreman/stellopt_scenarios/tree/master/2DOF_vmecOnly_targetIotaAndVolume
"""

# This next line turns on detailed logging. It can be commented out if
# you do not want such verbose output.
log()

# In the next line, we can adjust how many groups the pool of MPI
# processes is split into.
mpi = MpiPartition(ngroups=3)
mpi.write()

# Initialize VMEC from an input file:
equil = Vmec(
    os.path.join(os.path.dirname(__file__), 'inputs',
                 'input.2DOF_vmecOnly_targetIotaAndVolume'), mpi)
surf = equil.boundary

# VMEC parameters are all fixed by default, while surface parameters
# are all non-fixed by default.  You can choose which parameters are
# optimized by setting their 'fixed' attributes.
surf.all_fixed()
surf.set_fixed('rc(1,1)', False)
surf.set_fixed('zs(1,1)', False)
コード例 #15
0
    def test_stellopt_scenarios_1DOF_circularCrossSection_varyR0_targetVolume(self):
        """
        This script implements the "1DOF_circularCrossSection_varyR0_targetVolume"
        example from
        https://github.com/landreman/stellopt_scenarios

        This optimization problem has one independent variable, representing
        the mean major radius. The problem also has one objective: the plasma
        volume. There is not actually any need to run an equilibrium code like
        VMEC since the objective function can be computed directly from the
        boundary shape. But this problem is a fast way to test the
        optimization infrastructure with VMEC.

        Details of the optimum and a plot of the objective function landscape
        can be found here:
        https://github.com/landreman/stellopt_scenarios/tree/master/1DOF_circularCrossSection_varyR0_targetVolume
        """

        # logging.basicConfig(level=logging.DEBUG)
        # logger = logging.getLogger('[{}]'.format(MPI.COMM_WORLD.Get_rank()) + __name__)
        logger = logging.getLogger(__name__)

        for ngroups in range(1, 1 + MPI.COMM_WORLD.Get_size()):
            for grad in [False, True]:
                # In the next line, we can adjust how many groups the pool of MPI
                # processes is split into.
                mpi = MpiPartition(ngroups=ngroups)
                mpi.write()

                # Start with a default surface, which is axisymmetric with major
                # radius 1 and minor radius 0.1.
                equil = Vmec(mpi=mpi)
                surf = equil.boundary

                # Set the initial boundary shape. Here is one syntax:
                surf.set('rc(0,0)', 1.0)
                # Here is another syntax:
                surf.set_rc(0, 1, 0.1)
                surf.set_zs(0, 1, 0.1)

                surf.set_rc(1, 0, 0.1)
                surf.set_zs(1, 0, 0.1)

                # VMEC parameters are all fixed by default, while surface parameters are all non-fixed by default.
                # You can choose which parameters are optimized by setting their 'fixed' attributes.
                surf.all_fixed()
                surf.set_fixed('rc(0,0)', False)

                # Each Target is then equipped with a shift and weight, to become a
                # term in a least-squares objective function
                desired_volume = 0.15
                prob = LeastSquaresProblem([(equil.volume, desired_volume, 1)])

                # Solve the minimization problem. We can choose whether to use a
                # derivative-free or derivative-based algorithm.
                least_squares_mpi_solve(prob, mpi=mpi, grad=grad)

                # Make sure all procs call VMEC:
                objective = prob.objective()
                if mpi.proc0_world:
                    print("At the optimum,")
                    print(" rc(m=0,n=0) = ", surf.get_rc(0, 0))
                    print(" volume, according to VMEC    = ", equil.volume())
                    print(" volume, according to Surface = ", surf.volume())
                    print(" objective function = ", objective)

                assert np.abs(surf.get_rc(0, 0) - 0.7599088773175) < 1.0e-5
                assert np.abs(equil.volume() - 0.15) < 1.0e-6
                assert np.abs(surf.volume() - 0.15) < 1.0e-6
                assert prob.objective() < 1.0e-15
コード例 #16
0
    def __init__(self, filename=None, mpi=None):
        """
        Constructor
        """
        if filename is None:
            # Read default input file, which should be in the same
            # directory as this file:
            filename = os.path.join(os.path.dirname(__file__), 'input.default')
            logger.info("Initializing a VMEC object from defaults in " \
                            + filename)
        else:
            logger.info("Initializing a VMEC object from file: " + filename)

        # Get MPI communicator:
        if mpi is None:
            self.mpi = MpiPartition(ngroups=1)
        else:
            self.mpi = mpi
        comm = self.mpi.comm_groups
        self.fcomm = comm.py2f()

        self.VMEC = VMEC(input_file=filename, comm=self.fcomm, \
                             verbose=MPI.COMM_WORLD.rank==0, group=self.mpi.group)
        objstr = " for Vmec " + str(hex(id(self)))
        # nfp and stelsym are initialized by the Equilibrium constructor:
        #Equilibrium.__init__(self)

        # For each VMEC input parameter in VMEC's fortran modules, create an attribute
        vi = self.VMEC.indata  # Shorthand
        self.nfp = vi.nfp
        self.stelsym = not vi.lasym
        # It probably makes sense for a vmec object to have mpol and
        # ntor attributes independent of the boundary, since the
        # boundary may be a kind of surface that does not use the same
        # Fourier representation. But if the surface is a
        # SurfaceRZFourier, how then should the mpol and ntor of this
        # surface be coordinated with the mpol and ntor of the Vmec
        # object?
        self.mpol = vi.mpol
        self.ntor = vi.ntor
        self.delt = vi.delt
        self.tcon0 = vi.tcon0
        self.phiedge = vi.phiedge
        self.curtor = vi.curtor
        self.gamma = vi.gamma
        self.boundary = optimizable(
            SurfaceRZFourier(nfp=self.nfp,
                             stelsym=self.stelsym,
                             mpol=self.mpol,
                             ntor=self.ntor))
        self.ncurr = vi.ncurr
        self.free_boundary = bool(vi.lfreeb)

        # Transfer boundary shape data from fortran to the ParameterArray:
        for m in range(vi.mpol + 1):
            for n in range(-vi.ntor, vi.ntor + 1):
                self.boundary.rc[m, n + vi.ntor] = vi.rbc[101 + n, m]
                self.boundary.zs[m, n + vi.ntor] = vi.zbs[101 + n, m]
        # Handle a few variables that are not Parameters:
        self.depends_on = ["boundary"]
        self.need_to_run_code = True

        self.fixed = np.full(len(self.get_dofs()), True)
        self.names = ['delt', 'tcon0', 'phiedge', 'curtor', 'gamma']