Exemplo n.º 1
0
def test_exchange_field_supported_methods(fixt):
    """
    Check that all supported methods give the same results
    as the default method.

    """
    A = 1
    REL_TOLERANCE = 1e-12
    mesh = df.UnitCubeMesh(10, 10, 10)
    Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 1)
    functionspace = df.VectorFunctionSpace(mesh, "CG", 1, 3)
    m = Field(functionspace)
    m.set(df.Expression(("0", "sin(x[0])", "cos(x[0])"), degree=1))
    exch = Exchange(A)
    exch.setup(m, Ms)
    H_default = exch.compute_field()

    supported_methods = list(Exchange._supported_methods)
    # no need to compare default method with itself
    supported_methods.remove(exch.method)
    # the project method for the exchange is too bad
    supported_methods.remove("project")

    for method in supported_methods:
        exch = Exchange(A, method=method)
        exch.setup(m, Ms)
        H = exch.compute_field()
        print "With method '{}', expecting H =\n{}\n, got H =\n{}.".format(
            method,
            H_default.reshape((3, -1)).mean(1),
            H.reshape((3, -1)).mean(1))

        rel_diff = np.abs((H - H_default) / H_default)
        assert np.nanmax(rel_diff) < REL_TOLERANCE
Exemplo n.º 2
0
def test_exchange_energy_analytical_2():
    """
    Compare one Exchange energy with the corresponding analytical result.

    """
    REL_TOLERANCE = 5e-5
    lx = 6
    ly = 3
    lz = 2
    nx = 300
    ny = nz = 1
    mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(lx, ly, lz), nx, ny, nz)
    unit_length = 1e-9
    functionspace = df.VectorFunctionSpace(mesh, "CG", 1, 3)
    Ms = Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 8e5)
    A = 13e-12
    m = Field(functionspace)
    m.set(
        df.Expression(['0', 'sin(2*pi*x[0]/l_x)', 'cos(2*pi*x[0]/l_x)'],
                      l_x=lx,
                      degree=1))
    exch = Exchange(A)
    exch.setup(m, Ms, unit_length=unit_length)
    E_expected = A * 4 * pi ** 2 * \
        (ly * unit_length) * (lz * unit_length) / (lx * unit_length)
    E = exch.compute_energy()
    print "expected energy: {}".format(E)
    print "computed energy: {}".format(E_expected)
    assert abs((E - E_expected) / E_expected) < REL_TOLERANCE
Exemplo n.º 3
0
def test_anisotropy_energy_analytical(fixt):
    """
    Compare one UniaxialAnisotropy energy with the corresponding analytical result.

    The magnetisation is m = (0, sqrt(1 - x^2), x) and the easy axis still
    a = (0, 0, 1). The squared dot product in the energy integral thus gives
    dot(a, m)^2 = x^2. Integrating x^2 gives (x^3)/3 and the analytical
    result with the constants we have chosen is 1 - 1/3 = 2/3.

    """
    mesh = df.UnitCubeMesh(1, 1, 1)
    functionspace = df.VectorFunctionSpace(mesh, "Lagrange", 1)
    K1 = 1
    Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 1)
    a = df.Constant((0, 0, 1))
    m = Field(functionspace)
    m.set(df.Expression(("0", "sqrt(1 - pow(x[0], 2))", "x[0]"), degree=1))
    anis = UniaxialAnisotropy(K1, a)
    anis.setup(m, Ms)

    E = anis.compute_energy()
    expected_E = float(2) / 3

    print "With m = (0, sqrt(1-x^2), x), expecting E = {}. Got E = {}.".format(
        expected_E, E)
    #assert abs(E - expected_E) < TOLERANCE
    assert np.allclose(E, expected_E, atol=1e-14, rtol=TOLERANCE)
Exemplo n.º 4
0
def helical_period(A, D):
    """
    Computes the helical period when exchange constant A and
    the constant D are given. Both A and D are Field objects.
    """
    dg_functionspace = df.FunctionSpace(A.mesh(), 'DG', 0)
    helical_period = Field(dg_functionspace)

    function = df.project(df.sqrt(4 * pi * A.f / D.f), dg_functionspace)
    helical_period.set(function)

    return helical_period
Exemplo n.º 5
0
    def check_energy_for_m(m, E_expected):
        """
        Helper function to compare the computed energy for a given
        magnetisation with an expected analytical value.
        """
        m_field = Field(S3)
        m_field.set(df.Constant(m))
        H_ext = Zeeman(H * np.array([1, 0, 0]))
        H_ext.setup(m_field, Ms, unit_length=unit_length)

        E_computed = H_ext.compute_energy()
        assert np.allclose(E_computed, E_expected, atol=0, rtol=1e-12)
Exemplo n.º 6
0
    def check_energies(m=None, theta=None):
        """
        Helper function to compare the computed energy for a given
        magnetisation with an expected analytical value. The argument
        theta is the angle between the magnetisation vector and the
        x-axis.

        """
        # Exactly one of m, theta must be given
        assert ((m is None or theta is None)
                and not (m is None and theta is None))
        if m is None:
            if theta is None:
                raise ValueError("Exactly one of m, theta must be given.")
            theta_rad = theta * pi / 180.
            m = (cos(theta_rad), sin(theta_rad), 0)
        else:
            if theta != None:
                raise ValueError("Exactly one of m, theta must be given.")
        m = m / np.linalg.norm(m)
        m_field = Field(S3)
        m_field.set(df.Constant(m))
        H_ext = Zeeman(H * np.array([1, 0, 0]))
        H_ext.setup(m_field,
                    Field(df.FunctionSpace(mesh, 'DG', 0), Ms),
                    unit_length=unit_length)

        #E_analytical_1 = -mu0 * Ms * H * volume_1 * cos(theta_rad)
        E_analytical_1 = -mu0 * Ms * H * volume_1 * np.dot(m, [1, 0, 0])
        E_analytical_2 = -mu0 * Ms * H * volume_2 * np.dot(m, [1, 0, 0])
        E_analytical_total = E_analytical_1 + E_analytical_2

        E_computed_1 = H_ext.compute_energy(dx=dx_disk_1)
        E_computed_2 = H_ext.compute_energy(dx=dx_disk_2)
        E_computed_total = H_ext.compute_energy(dx=df.dx)

        # Check that the sum of the computed energies for disk #1 and #2 equals
        # the total computed energy
        assert np.allclose(E_computed_1 + E_computed_2,
                           E_computed_total,
                           atol=0,
                           rtol=1e-12)

        # Check that the computed energies are within the tolerance of the
        # analytical expressions
        assert np.allclose(E_computed_1, E_analytical_1, atol=0, rtol=RTOL)
        assert np.allclose(E_computed_2, E_analytical_2, atol=0, rtol=RTOL)
        assert np.allclose(E_computed_total,
                           E_analytical_total,
                           atol=0,
                           rtol=RTOL)
Exemplo n.º 7
0
def exchange_length(A, Ms):
    """
    Computes the exchange length when the exchange constant A
    and the saturation magnetisation Ms are given. Both Ms and A
    are Field objects.

    """
    dg_functionspace = df.FunctionSpace(A.mesh(), 'DG', 0)
    exchange_length = Field(dg_functionspace)

    function = df.project(df.sqrt(2 * A.f / (mu0 * Ms.f**2)), dg_functionspace)
    exchange_length.set(function)

    return exchange_length
Exemplo n.º 8
0
def bloch_parameter(A, K1):
    """
    Computes the Bloch parameter when the exchange constant A
    and the anisotropy constant K1 are given. Both A and K1
    are Field objects.

    """
    dg_functionspace = df.FunctionSpace(A.mesh(), 'DG', 0)
    bloch_parameter = Field(dg_functionspace)

    function = df.project(df.sqrt(A.f / K1.f), dg_functionspace)
    bloch_parameter.set(function)

    return bloch_parameter
Exemplo n.º 9
0
def setup_finmag():
    mesh = from_geofile(os.path.join(MODULE_DIR, "sphere.geo"))

    S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
    m = Field(S3)
    m.set(df.Constant((1, 0, 0)))
    Ms = 1

    demag = Demag()
    demag.setup(m,
                Field(df.FunctionSpace(mesh, 'DG', 0), Ms),
                unit_length=1e-9)
    H = demag.compute_field()

    return dict(m=m, H=H, Ms=Ms, S3=S3, table=start_table())
Exemplo n.º 10
0
def uniformly_magnetised_sphere():
    Ms = 1
    mesh = sphere(r=1, maxh=0.25)
    S3 = df.VectorFunctionSpace(mesh, "CG", 1)
    m = Field(S3)
    m.set(df.Constant((1, 0, 0)))

    solutions = []
    for solver in solvers:
        demag = Demag(solver)
        demag.setup(m,
                    Field(df.FunctionSpace(mesh, 'DG', 0), Ms),
                    unit_length=1e-9)
        demag.H = demag.compute_field()
        solutions.append(demag)
    return solutions
Exemplo n.º 11
0
def test_anisotropy_energy_simple_configurations(fixt, m, expected_E):
    """
    Test some parallel and orthogonal configurations of m and a.

    """
    mesh = df.UnitCubeMesh(1, 1, 1)
    functionspace = df.VectorFunctionSpace(mesh, "Lagrange", 1)
    K1 = 1
    Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 1)
    a = df.Constant((0, 0, 1))
    m_field = Field(functionspace)
    m_field.set(df.Constant(m))
    anis = UniaxialAnisotropy(K1, a)
    anis.setup(m_field, Ms)

    E = anis.compute_energy()

    print "With m = {}, expecting E = {}. Got E = {}.".format(m, expected_E, E)
    #assert abs(E - expected_E) < TOLERANCE
    assert np.allclose(E, expected_E, atol=1e-14, rtol=TOLERANCE)
Exemplo n.º 12
0
def test_dmi_field():
    """
    Simulation 1 is computing H_dmi=dE_dM via assemble.
    Simulation 2 is computing H_dmi=g*M with a suitable pre-computed matrix g.
    Simulation 3 is computing g using a petsc matrix.

    We show that the three methods give equivalent results (this relies
    on H_dmi being linear in M).

    """
    m_initial = df.Expression(
        ('(2*x[0]-L)/L', 'sqrt(1 - ((2*x[0]-L)/L)*((2*x[0]-L)/L))', '0'),
        L=length,
        degree=1)
    m = Field(V)
    m.set(m_initial)
    dmi1 = DMI(D=5e-3, method="box-assemble")
    dmi1.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), 8.6e5))
    dmi2 = DMI(D=5e-3, method="box-matrix-numpy")
    dmi2.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), 8.6e5))
    dmi3 = DMI(D=5e-3, method="box-matrix-petsc")
    dmi3.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), 8.6e5))

    H_dmi1 = dmi1.compute_field()
    H_dmi2 = dmi2.compute_field()
    H_dmi3 = dmi3.compute_field()

    diff12 = np.max(np.abs(H_dmi1 - H_dmi2))
    diff13 = np.max(np.abs(H_dmi1 - H_dmi3))

    print "Difference between H_dmi1 and H_dmi2: max(abs(H_dmi1-H_dmi2))=%g" % diff12
    print "Max value = %g, relative error = %g " % (max(H_dmi1),
                                                    diff12 / max(H_dmi1))
    print "Difference between H_dmi1 and H_dmi3: max(abs(H_dmi1-H_dmi3))=%g" % diff13
    print "Max value = %g, relative error = %g " % (max(H_dmi1),
                                                    diff13 / max(H_dmi1))

    assert diff12 < 5e-8
    assert diff13 < 5e-8
    assert diff12 / max(H_dmi1) < 1e-14
    assert diff13 / max(H_dmi1) < 1e-14
Exemplo n.º 13
0
def test_cubic_anisotropy_energy():
    mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 40), 1, 1, 40)
    volume = mesh_volume(mesh) * unit_length**3
    S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
    S1 = df.FunctionSpace(mesh, "Lagrange", 1)

    m = Field(S3)
    m.set((0, 0, 1))

    Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)

    ca = CubicAnisotropy(u1, u2, K1, K2, K3)
    ca.setup(m, Ms_dg, unit_length)

    energy = ca.compute_energy()
    # energy_expected = 8.3e-20  # oommf cubicEight_100pc.mif -> ErFe2.odt
    energy_expected = compute_cubic_energy() * volume
    print "cubic anisotropy energy = {}, expected {}.".format(
        energy, energy_expected)

    rel_diff = abs(energy - energy_expected) / abs(energy_expected)
    assert rel_diff < 1e-10
Exemplo n.º 14
0
def test_exchange_energy_analytical(fixt):
    """
    Compare one Exchange energy with the corresponding analytical result.

    """
    REL_TOLERANCE = 1e-7

    A = 1
    mesh = df.UnitCubeMesh(10, 10, 10)
    Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 1)
    functionspace = df.VectorFunctionSpace(mesh, "CG", 1, 3)
    m = Field(functionspace)
    m.set(df.Expression(("x[0]", "x[2]", "-x[1]"), degree=1))
    exch = Exchange(A)
    exch.setup(m, Ms)
    E = exch.compute_energy()
    # integrating the vector laplacian, the latter gives 3 already
    expected_E = 3

    print "With m = (0, sqrt(1-x^2), x), " + \
        "expecting E = {}. Got E = {}.".format(expected_E, E)
    assert abs(E - expected_E) / expected_E < REL_TOLERANCE
Exemplo n.º 15
0
def export_normal_mode_animation(mesh,
                                 m0,
                                 freq,
                                 w,
                                 filename,
                                 num_cycles=1,
                                 num_snapshots_per_cycle=20,
                                 scaling=0.2,
                                 dm_only=False,
                                 save_h5=False):
    """
    Save a number of vtk files of different snapshots of a given normal mode.
    These can be imported and animated in Paraview.

    *Arguments*

    mesh :  dolfin.Mesh

        The mesh on which the magnetisation is defined.

    m0 :  numpy.array

        The ground state of the magnetisation for which the normal mode was computed.
        The size must be so that the array can be reshaped to size 3xN.

    freq :  float

        The frequency of the normal mode.

    w :  numpy.array

        The eigenvector representing the normal mode (as returned by `compute_eigenv`
        or `compute_eigenv_generalised`).

    filename :  string

        The filename of the exported animation files. Each individual frame will
        have the same basename but will be given a suffix indicating the frame
        number, too.

    num_cycles :  int

        The number of cycles to be animated.

    num_snapshots_per_cycle :  int

        The number of snapshot per cycle to be exported. Thus the total number of
        exported frames is num_cycles * num_snapshots_per_cycle.

    scaling : float

        If `dm_only` is False, this determines the maximum size of the
        oscillation (relative to the magnetisation vector) in the
        visualisation. If `dm_only` is True, this has no effect.

    dm_only :  bool (optional)

        If False (the default), plots `m0 + scaling*dm(t)`, where m0 is the
        average magnetisation and dm(t) the (spatially varying)
        oscillation corresponding to the frequency of the normal mode.
        If True, only `dm(t)` is plotted.

    """
    if freq.imag != 0 and abs(freq.imag) > 5e-3:
        logger.warning(
            "Frequency expected to be a real number. "
            "Got: {}. This may lead to unexpected behaviour".format(freq))
    freq = freq.real
    #basename = os.path.basename(re.sub('\.vtk$', '', filename))
    #dirname = os.path.dirname(filename)
    # if not os.path.exists(dirname):
    #    print "Creating directory '{}' as it doesn't exist.".format(dirname)
    #    os.makedirs(dirname)
    #mesh = comp.mesh
    #mesh_shape = mesh.mesh_size
    m0_array = m0.copy()  # we assume that sim is relaxed!!
    Q, R, S, Mcross = compute_tangential_space_basis(m0_array.reshape(
        3, 1, -1))
    Qt = mf_transpose(Q).copy()

    n = mesh.num_vertices()
    V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
    func = df.Function(V)
    func.rename('m', 'magnetisation')
    w_3d = mf_mult(Q, w.reshape((2, 1, n)))
    w_flat = w_3d.reshape(-1)
    phi = np.angle(w_flat)  # relative phases of the oscillations
    a = np.absolute(w_flat)
    a = a / a.max()  # normalised amplitudes of the oscillations

    t_end = num_cycles * 2 * pi / freq
    timesteps = np.linspace(0,
                            t_end,
                            num_cycles * num_snapshots_per_cycle,
                            endpoint=False)
    m_osc = np.zeros(3 * n)
    t0 = time()
    f = df.File(filename, 'compressed')
    field = Field(V, name='m')
    for (i, t) in enumerate(timesteps):
        logger.debug(
            "Saving animation snapshot for timestep {} ({}/{})".format(
                t, i, num_cycles * num_snapshots_per_cycle))
        if dm_only is False:
            m_osc = (m0_array +
                     scaling * a * np.cos(t * freq + phi)).reshape(-1)
        else:
            m_osc = (scaling * a * np.cos(t * freq + phi)).reshape(-1)
        #save_vector_field(m_osc, os.path.join(dirname, basename + '_{:04d}.vtk'.format(i)))
        func.vector().set_local(m_osc)
        f << func
        if save_h5:
            field.set(func)
            field.save_hdf5(filename[0:-4], i)
    field.close_hdf5()
    t1 = time()
    logger.debug("Saving the data to file '{}' took {} seconds".format(
        filename, t1 - t0))
Exemplo n.º 16
0
class MultiDomainTest(object):

    def __init__(self, mesh, get_domain_id, m_vals, Ms, unit_length=1e-9):
        """
        `get_domain_id` is a function of the form (x, y, z) -> id which maps
        some point coordinates in the mesh to an integer identifying the domain
        which the point belongs to.

        """
        self.mesh = mesh
        self.get_domain_id = get_domain_id
        self.domain_ids = [get_domain_id(pt) for pt in mesh.coordinates()]
        self.Ms = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)
        self.unit_length = unit_length
        #self.rtol = rtol

        domain_classes = {}
        for k in self.domain_ids:
            class DomainK(df.SubDomain):

                def inside(self, pt, on_boundary):
                    return get_domain_id(pt) == k
            domain_classes[k] = DomainK()
        domains = df.CellFunction("size_t", mesh)
        domains.set_all(0)
        for k, d in domain_classes.items():
            d.mark(domains, k)

        self.submeshes = [df.SubMesh(mesh, domains, i)
                          for i in self.domain_ids]
        self.dx = df.Measure("dx")[domains]

        def m_init(pt):
            return m_vals[self.get_domain_id(pt)]

        self.V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
        self.m = Field(self.V)
        self.m.set(m_init, normalised=True)

    def compute_energies_on_subdomains(self, interaction):
        """
        Given some interaction (such as Zeeman, Demag, Exchange, etc.),
        compute the associated energies on each subdomain as well as the
        total energy.

        *Returns*

        A pair (E_subdmns, E_total), where E_subdmns is a dictionary of
        energies indexed by the subdomain indices, and E_total is the total
        energy of the interaction.

        """
        interaction.setup(self.m, self.Ms, unit_length=self.unit_length)
        return {k: interaction.compute_energy(dx=self.dx(k)) for k in self.domain_ids},\
            interaction.compute_energy(df.dx)

    def check_energy_consistency(self, interaction):
        E_domains, E_total = self.compute_energies_on_subdomains(interaction)
        finmag.logger.debug("Energies on subdomains: {}".format(E_domains))
        finmag.logger.debug("Sum of energies on subdomains: {}; total energy: {}".format(
            sum(E_domains.values()), E_total))
        assert np.allclose(
            sum(E_domains.values()), E_total, atol=0, rtol=1e-12)
Exemplo n.º 17
0
class Physics(object):
    def __init__(self, mesh, unit_length):
        self.mesh = mesh
        self.unit_length = unit_length
        self.S1 = df.FunctionSpace(mesh, "CG", 1)
        self.S3 = df.VectorFunctionSpace(mesh, "CG", 1, dim=3)

        self.alpha = Field(self.S1, name="alpha")
        self.dmdt = Field(self.S3, name="dmdt")
        self.H = Field(self.S3, name="H")  # TODO: connect effective field to H
        self.m = Field(self.S3, name="m")
        self.Ms = Field(self.S1, name="Ms")
        self.pins = []  # TODO: connect pins to instant code

        self.effective_field = EffectiveField(self.m, self.Ms,
                                              self.unit_length)

        self.eq = Equation(self.m.as_vector(), self.H.as_vector(),
                           self.dmdt.as_vector())
        self.eq.set_alpha(self.alpha.as_vector())
        self.eq.set_gamma(consts.gamma)
        self.eq.set_saturation_magnetisation(self.Ms.as_vector())

    def hooks_scipy(self):
        """
        Methods that scipy calls during time integration.

        """
        return (self.solve_for, self.m.from_array)

    def hooks_sundials(self):
        """
        Methods that sundials calls during time integration.

        """
        return (self.sundials_rhs, self.sundials_jtimes, self.sundials_psetup,
                self.sundials_psolve, self.m.from_array)

    def hooks_sundials_parallel(self):
        """
        Methods that sundials calls during time integration when it
        operates in parallel mode.

        TODO: What does parallel sundials need?

        """
        return ()

    def solve(self, t):
        self.effective_field.update(t)
        self.H.set(
            self.effective_field.H_eff)  # FIXME: remove double book-keeping
        self.eq.solve()
        return self.dmdt.as_array()

    def solve_for(self, m, t):
        self.m.from_array(m)
        return self.solve(t)

    def sundials_jtimes(self, mp, J_mp, t, m, fy, tmp):
        """
        Computes the Jacobian-times-vector product, as used by sundials cvode.

        The time integration problem we need to solve is of type

        .. math::

                 \\frac{d y}{d t} = f(y,t)

        where y is the state vector (such as the magnetisation components for
        all sites), t is the time, and f(y,t) is the LLG equation.

        For the implicite integration schemes, sundials' cvode solver
        needs to know the Jacobian J, which is the derivative of the
        (vector-valued) function f(y,t) with respect to the (components
        of the vector) y. The Jacobian is a matrix.

        For a magnetic system N sites, the state vector y has 3N entries
        (because every site has 3 components). The Jacobian matrix J would
        thus have a size of 3N*3N. In general, this is too big to store.

        Fortunately, cvode only needs the result of the multiplication of some
        vector y' (provided by cvode) with the Jacobian. We can thus store
        the Jacobian in our own way (in particular as a sparse matrix
        if we leave out the demag field), and carry out the multiplication of
        J with y' when required, and that is what this function does.

        In more detail: We use the variable name mp to represent m' (i.e. mprime) which
        is just a notation to distinguish m' from m (and not any derivative).

        Our equation is:

        .. math::

             \\frac{dm}{dt} = LLG(m, H)

        And we're interested in computing the Jacobian (J) times vector (m') product

        .. math::

             J m' = [\\frac{dLLG(m, H)}{dm}] m'.

        However, the H field itself depends on m, so the total derivative J m'
        will have two terms

        .. math::

             \\frac{d LLG(m, H)}{dm} = \\frac{\\partial LLG(m, H)}{\\partial m} + [\\frac{\\partial LLG(m, H)}{\\partial H}] [\\frac{\\partial H(m)}{\\partial m}].


        This is a matrix identity, so to make the derivations easier (and since we don't need the full Jacobian matrix) we can write the Jacobian-times-vector product as a directional derivative:

        .. math::

             J m' = \\frac{d LLG(m + a m',H(m + a m'))}{d a}|_{a=0}


        The code to compute this derivative is in ``llg.cc`` but you can see that the derivative will depend
        on m, m', H(m), and dH(m+a m')/da [which is labelled H' in the code].

        Most of the components of the effective field are linear in m; if that's the case,
        the directional derivative H' is just H(m')

        .. math::

             H' = \\frac{d H(m+a m')}{da} = H(m')

        """
        assert m.shape == self.m.as_array().shape
        assert mp.shape == m.shape
        assert tmp.shape == m.shape

        # First, compute the derivative H' = dH_eff/dt
        self.m.from_array(mp)
        Hp = tmp.view()
        Hp[:] = self.effective_field.compute_jacobian_only(t)

        if not hasattr(
                self,
                'sundials_reuse_jacobean') or not self.sundials_reuse_jacobean:
            if not np.array_equal(self.m.as_array(), m):
                self.m.from_array(m)
                self.effective_field.update(t)

        self.eq.sundials_jtimes_serial(mp, Hp, J_mp)
        return 0

    def sundials_psetup(self, t, m, fy, jok, gamma, tmp1, tmp2, tmp3):
        # Note that some of the arguments are deliberately ignored, but they
        # need to be present because the function must have the correct signature
        # when it is passed to set_spils_preconditioner() in the cvode class.
        if not jok:
            self.m.from_array(m)
            self.sundials_reuse_jacobean = True
        return 0, not jok

    def sundials_psolve(self, t, y, fy, r, z, gamma, delta, lr, tmp):
        # Note that some of the arguments are deliberately ignored, but they
        # need to be present because the function must have the correct signature
        # when it is passed to set_spils_preconditioner() in the cvode class.
        z[:] = r
        return 0

    def sundials_rhs(self, t, y, ydot):
        """
        Computes the dm/dt right hand side ODE term, as used by sundials cvode.

        """
        ydot[:] = self.solve_for(y, t)
        return 0
Exemplo n.º 18
0
class Material(object):
    """
    The aim to define this class is to collect materials properties in one class, such as
    the common parameters Ms, A, and K since these properties may have different response
    to temperature T. Another reason is that saturation magnetisation Ms should be
    defined in cells in the framework of FEM but for some reasons it's convenience to use
    the related definition in nodes for dynamics, which will cause some confusion if put them
    in one class.

    Despite the traditional definition that the magnetisation M(r) are separated by the unit
    magnetisation m(r) and Ms which stored in nodes and cells respectively, we just focus on
    magnetisation M(r) and pass it into other classes such as Exchange, Anisotropy and Demag.
    Therefore, Ms in this class in fact is mainly used for users to input.

    Certainly, another way to deal with such confusion is to define different class for
    different scenarios, for example, if the simulation just focus on one material and at
    temperature zero we can define a class have constant Ms.


    We will adapt this class to the situation that LLB case.

    """
    def __init__(self, mesh, name='FePt', unit_length=1):
        self.mesh = mesh
        self.name = name
        self.S1 = df.FunctionSpace(mesh, "Lagrange", 1)
        self.S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)

        self.nxyz = mesh.num_vertices()
        self._m = Field(self.S3, name='m')

        self._T = np.zeros(self.nxyz)
        self._Ms = np.zeros(3 * self.nxyz)
        self._m_e = np.zeros(3 * self.nxyz)
        self.inv_chi_par = np.zeros(self.nxyz)
        self.h = np.zeros(3 * self.nxyz)
        self.unit_length = unit_length

        self.alpha = 0.5
        self.gamma_LL = consts.gamma

        if self.name == 'FePt':
            self.Tc = 660
            self.Ms0 = 1047785.4656
            self.A0 = 2.148042e-11
            self.K0 = 8.201968e6
            self.mu_a = 2.99e-23
        elif self.name == 'Nickel':
            self.Tc = 630
            self.Ms0 = 4.9e5
            self.A0 = 9e-12
            self.K0 = 0
            self.mu_a = 0.61e-23
        elif self.name == 'Permalloy':
            self.Tc = 870
            self.Ms0 = 8.6e5
            self.A0 = 13e-12
            self.K0 = 0
            # TODO: find the correct mu_a for permalloy
            self.mu_a = 1e-23
        else:
            raise NotImplementedError("Only FePt and Nickel available")

        self.volumes = df.assemble(
            df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) *
            df.dx).array()

        self.real_vol = self.volumes * self.unit_length**3

        self.mat = native_llb.Materials(self.Ms0, self.Tc, self.A0, self.K0,
                                        self.mu_a)

        dg = df.FunctionSpace(mesh, "DG", 0)
        self._A_dg = df.Function(dg)
        self._m_e_dg = df.Function(dg)

        self.T = 0
        self.Ms = self.Ms0 * self._m_e_dg.vector().array()

    @property
    def me(self):
        return self._m_e[0]

    def compute_field(self):
        native_llb.compute_relaxation_field(self._T, self.m, self.h, self._m_e,
                                            self.inv_chi_par, self.Tc)
        return self.h

    @property
    def T(self):
        return self._T

    @T.setter
    def T(self, value):
        self._T[:] = helpers.scalar_valued_function(
            value, self.S1).vector().array()[:]

        self._T_dg = helpers.scalar_valued_dg_function(value, self.mesh)

        As = self._A_dg.vector().array()
        Ts = self._T_dg.vector().array()
        mes = self._m_e_dg.vector().array()

        for i in range(len(Ts)):
            As[i] = self.mat.A(Ts[i])
            mes[i] = self.mat.m_e(Ts[i])

        self._A_dg.vector().set_local(As)
        self._m_e_dg.vector().set_local(mes)

        self._m_e.shape = (3, -1)
        for i in range(len(self._T)):
            self._m_e[:, i] = self.mat.m_e(self._T[i])
            self.inv_chi_par[i] = self.mat.inv_chi_par(self._T[i])
        self._m_e.shape = (-1, )

        # TODO: Trying to use spatial parameters
        self.inv_chi_perp = self.mat.inv_chi_perp(self._T[0])

    @property
    def Ms(self):
        return self._Ms

    @Ms.setter
    def Ms(self, value):
        self._Ms_dg = helpers.scalar_valued_dg_function(value, self.mesh)

        tmp_Ms = df.assemble(self._Ms_dg * df.dot(df.TestFunction(
            self.S3), df.Constant([1, 1, 1])) * df.dx) / self.volumes

        self._Ms[:] = tmp_Ms[:]

    @property
    def m(self):
        """
        not too good since this will return a copy
        try to solve this later
        """
        return self._m.vector().array()

    def set_m(self, value, **kwargs):
        """
        Set the magnetisation (scaled automatically).

        There are several ways to use this function. Either you provide
        a 3-tuple of numbers, which will get cast to a dolfin.Constant, or
        a dolfin.Constant directly.
        Then a 3-tuple of strings (with keyword arguments if needed) that will
        get cast to a dolfin.Expression, or directly a dolfin.Expression.
        You can provide a numpy.ndarray of nodal values of shape (3*n,),
        where n is the number of nodes.
        Finally, you can pass a function (any callable object will do) which
        accepts the coordinates of the mesh as a numpy.ndarray of
        shape (3, n) and returns the magnetisation like that as well.

        You can call this method anytime during the simulation. However, when
        providing a numpy array during time integration, the use of
        the attribute m instead of this method is advised for performance
        reasons and because the attribute m doesn't normalise the vector.

        """
        self._m.set(value)
Exemplo n.º 19
0
import finmag
import logging
from finmag.field import Field
from finmag import sim_with
from finmag.energies import Zeeman, TimeZeeman, DiscreteTimeZeeman, OscillatingZeeman
from finmag.util.consts import mu0
from finmag.util.meshes import pair_of_disks
from finmag.example import sphere_inside_airbox
from math import sqrt, pi, cos, sin
from zeeman import DipolarField

mesh = df.UnitCubeMesh(2, 2, 2)
S1 = df.FunctionSpace(mesh, "Lagrange", 1)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
m = Field(S3)
m.set(df.Constant((1, 0, 0)))
Ms = Field(S1, value=1)
TOL = 1e-14

logger = logging.getLogger('finmag')


def diff(H_ext, expected_field):
    """
    Helper function which computes the maximum deviation between H_ext
    and the expected field.
    """
    H = H_ext.compute_field().reshape((3, -1)).mean(1)
    print "Got H={}, expecting H_ref={}.".format(H, expected_field)
    return np.max(np.abs(H - expected_field))