def __init__(self, mesh, Ms=8e5, unit_length=1.0, name='unnamed', auto_save_data=True): self.mesh = mesh self.unit_length=unit_length self.DG = df.FunctionSpace(mesh, "DG", 0) self.DG3 = df.VectorFunctionSpace(mesh, "DG", 0, dim=3) self._m = df.Function(self.DG3) self.Ms = Ms self.nxyz_cell=mesh.num_cells() self._alpha = np.zeros(self.nxyz_cell) self.m = np.zeros(3*self.nxyz_cell) self.H_eff = np.zeros(3*self.nxyz_cell) self.dm_dt = np.zeros(3*self.nxyz_cell) self.set_default_values() self.auto_save_data = auto_save_data self.sanitized_name = helpers.clean_filename(name) if self.auto_save_data: self.ndtfilename = self.sanitized_name + '.ndt' self.tablewriter = Tablewriter(self.ndtfilename, self, override=True)
def __init__(self, mat, method='RK2b', name='unnamed', pbc2d=None): self.material = mat self._m = mat._m self.m = self._m.vector().array() self.S1 = mat.S1 self.S3 = mat.S3 self.mesh = self.S1.mesh() self.dm_dt = np.zeros(self.m.shape) self.H_eff = np.zeros(self.m.shape) self.time_scale = 1e-9 self.method = method self.pbc2d = pbc2d self.set_default_values() self.interactions.append(mat) if self.pbc2d: self.pbc2d = PeriodicBoundary2D(self.S3) self.name = name self.sanitized_name = helpers.clean_filename(name) self.logfilename = self.sanitized_name + '.log' self.ndtfilename = self.sanitized_name + '.ndt' helpers.start_logging_to_file(self.logfilename, mode='w', level=logging.DEBUG) self.scheduler = scheduler.Scheduler() self.domains = df.CellFunction("uint", self.mesh) self.domains.set_all(0) self.region_id = 0 self.tablewriter = Tablewriter(self.ndtfilename, self, override=True) self.overwrite_pvd_files = False self.vtk_export_filename = self.sanitized_name + '.pvd' self.vtk_saver = VTKSaver(self.vtk_export_filename, overwrite=True) self.scheduler_shortcuts = { 'save_ndt': LLB.save_ndt, 'save_vtk': LLB.save_vtk, }
def create_tablewriter(self): entities_energy = { 'step': { 'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps' }, 'energy': { 'unit': '<J>', 'get': lambda sim: sim.energy, 'header': ['image_%d' % i for i in range(self.image_num + 2)] } } self.tablewriter = Tablewriter('%s_energy.ndt' % (self.name), self, override=True, entities=entities_energy) entities_dm = { 'step': { 'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps' }, 'dms': { 'unit': '<1>', 'get': lambda sim: sim.distances, 'header': [ 'image_%d_%d' % (i, i + 1) for i in range(self.image_num + 1) ] } } self.tablewriter_dm = Tablewriter('%s_dms.ndt' % (self.name), self, override=True, entities=entities_dm)
def __init__(self, mesh, chi=0.001, unit_length=1e-9, name='unnamed', auto_save_data=True, type=1.0): #type=1 : cubic crystal #type=0 : uniaxial crystal self.mesh = mesh self.S1 = df.FunctionSpace(mesh, "Lagrange", 1) self.S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1,dim=3) #self._Ms = df.Function(self.S1) self._M = df.Function(self.S3) self._m = df.Function(self.S3) self.M = self._M.vector().array() self.dm_dt = np.zeros(self.M.shape) self.H_eff = np.zeros(self.M.shape) self.call_field_times=0 self.call_field_jtimes=0 self.chi = chi self.unit_length = unit_length self.set_default_values() self.auto_save_data=auto_save_data self.name = name self.sanitized_name = helpers.clean_filename(name) self.type = type assert (type==0 or type==1.0) if self.auto_save_data: self.ndtfilename = self.sanitized_name + '.ndt' self.tablewriter = Tablewriter(self.ndtfilename, self, override=True)
def __init__(self, sim, initial_images, interpolations=None, spring=0, name='unnamed', normalise=False): self.sim = sim self.name = name self.spring = spring self.normalise = normalise if interpolations is None: interpolations = [0 for i in range(len(initial_images) - 1)] self.initial_images = initial_images self.interpolations = interpolations if len(interpolations) != len(initial_images) - 1: raise RuntimeError("""the length of interpolations should equal to the length of the initial_images minus 1, i.e., len(interpolations) = len(initial_images) -1""") if len(initial_images) < 2: raise RuntimeError( """At least two images are needed to be provided.""") self.image_num = len(initial_images) + sum(interpolations) self.nxyz = len(initial_images[0]) self.all_m = np.zeros(self.nxyz * self.image_num) self.Heff = np.zeros(self.nxyz * (self.image_num - 2)) self.Heff.shape = (self.image_num - 2, -1) self.tangents = np.zeros(self.Heff.shape) self.images_energy = np.zeros(self.image_num) self.last_m = np.zeros(self.all_m.shape) self.spring_force = np.zeros(self.image_num - 2) self.t = 0 self.step = 1 self.integrator = None self.ode_count = 1 self.initial_image_coordinates() self.tablewriter = Tablewriter('%s_energy.ndt' % name, self, override=True) self.tablewriter.entities = { 'step': { 'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps' }, 'energy': { 'unit': '<J>', 'get': lambda sim: sim.images_energy, 'header': ['image_%d' % i for i in range(self.image_num + 2)] } } keys = self.tablewriter.entities.keys() keys.remove('step') self.tablewriter.entity_order = ['step'] + sorted(keys) self.tablewriter_dm = Tablewriter('%s_dms.ndt' % name, self, override=True) self.tablewriter_dm.entities = { 'step': { 'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps' }, 'dms': { 'unit': '<1>', 'get': lambda sim: sim.distances, 'header': [ 'image_%d_%d' % (i, i + 1) for i in range(self.image_num + 1) ] } } keys = self.tablewriter_dm.entities.keys() keys.remove('step') self.tablewriter_dm.entity_order = ['step'] + sorted(keys)
class NEB_Sundials(object): """ Nudged elastic band method by solving the differential equation using Sundials. """ def __init__(self, sim, initial_images, interpolations=None, spring=0, name='unnamed', normalise=False): self.sim = sim self.name = name self.spring = spring self.normalise = normalise if interpolations is None: interpolations = [0 for i in range(len(initial_images) - 1)] self.initial_images = initial_images self.interpolations = interpolations if len(interpolations) != len(initial_images) - 1: raise RuntimeError("""the length of interpolations should equal to the length of the initial_images minus 1, i.e., len(interpolations) = len(initial_images) -1""") if len(initial_images) < 2: raise RuntimeError( """At least two images are needed to be provided.""") self.image_num = len(initial_images) + sum(interpolations) self.nxyz = len(initial_images[0]) self.all_m = np.zeros(self.nxyz * self.image_num) self.Heff = np.zeros(self.nxyz * (self.image_num - 2)) self.Heff.shape = (self.image_num - 2, -1) self.tangents = np.zeros(self.Heff.shape) self.images_energy = np.zeros(self.image_num) self.last_m = np.zeros(self.all_m.shape) self.spring_force = np.zeros(self.image_num - 2) self.t = 0 self.step = 1 self.integrator = None self.ode_count = 1 self.initial_image_coordinates() self.tablewriter = Tablewriter('%s_energy.ndt' % name, self, override=True) self.tablewriter.entities = { 'step': { 'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps' }, 'energy': { 'unit': '<J>', 'get': lambda sim: sim.images_energy, 'header': ['image_%d' % i for i in range(self.image_num + 2)] } } keys = self.tablewriter.entities.keys() keys.remove('step') self.tablewriter.entity_order = ['step'] + sorted(keys) self.tablewriter_dm = Tablewriter('%s_dms.ndt' % name, self, override=True) self.tablewriter_dm.entities = { 'step': { 'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps' }, 'dms': { 'unit': '<1>', 'get': lambda sim: sim.distances, 'header': [ 'image_%d_%d' % (i, i + 1) for i in range(self.image_num + 1) ] } } keys = self.tablewriter_dm.entities.keys() keys.remove('step') self.tablewriter_dm.entity_order = ['step'] + sorted(keys) def initial_image_coordinates(self): image_id = 0 self.all_m.shape = (self.image_num, -1) for i in range(len(self.interpolations)): n = self.interpolations[i] m0 = self.initial_images[i] self.all_m[image_id][:] = m0[:] image_id = image_id + 1 m1 = self.initial_images[i + 1] if self.normalise: coords = linear_interpolation_two(m0, m1, n) else: coords = linear_interpolation_two_direct(m0, m1, n) for coord in coords: self.all_m[image_id][:] = coord[:] image_id = image_id + 1 m2 = self.initial_images[-1] self.all_m[image_id][:] = m2[:] for i in range(self.image_num): self.images_energy[i] = self.sim.energy(self.all_m[i]) self.all_m.shape = (-1, ) print self.all_m self.create_integrator() def save_npys(self): directory = 'npys_%s_%d' % (self.name, self.step) if not os.path.exists(directory): os.makedirs(directory) img_id = 1 self.all_m.shape = (self.image_num, -1) for i in range(self.image_num): name = os.path.join(directory, 'image_%d.npy' % img_id) np.save(name, self.all_m[i, :]) img_id += 1 self.all_m.shape = (-1, ) def create_integrator(self, reltol=1e-6, abstol=1e-6, nsteps=10000): integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON) integrator.init(self.sundials_rhs, 0, self.all_m) integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE) integrator.set_scalar_tolerances(reltol, abstol) integrator.set_max_num_steps(nsteps) self.integrator = integrator def compute_effective_field(self, y): y.shape = (self.image_num, -1) for i in range(1, self.image_num - 1): if self.normalise: normalise_m(y[i]) self.Heff[i - 1, :] = self.sim.gradient(y[i]) self.images_energy[i] = self.sim.energy(y[i]) y.shape = (-1, ) def compute_tangents(self, ys): ys.shape = (self.image_num, -1) native_neb.compute_tangents(ys, self.images_energy, self.tangents) for i in range(1, self.image_num - 1): dm1 = compute_dm(ys[i - 1], ys[i]) dm2 = compute_dm(ys[i + 1], ys[i]) self.spring_force[i - 1] = self.spring * (dm2 - dm1) ys.shape = (-1, ) def sundials_rhs(self, time, y, ydot): self.ode_count += 1 default_timer.start("sundials_rhs", self.__class__.__name__) self.compute_effective_field(y) self.compute_tangents(y) y.shape = (self.image_num, -1) ydot.shape = (self.image_num, -1) for i in range(1, self.image_num - 1): h = self.Heff[i - 1] t = self.tangents[i - 1] sf = self.spring_force[i - 1] h3 = h - np.dot(h, t) * t + sf * t #h4 = h - np.dot(h,t)*t + sf*t ydot[i, :] = h3[:] #it turns out that the following two lines are very important ydot[0, :] = 0 ydot[-1, :] = 0 y.shape = (-1, ) ydot.shape = (-1, ) default_timer.stop("sundials_rhs", self.__class__.__name__) return 0 def compute_distance(self): distance = [] y = self.all_m y.shape = (self.image_num, -1) for i in range(self.image_num - 1): dm = compute_dm(y[i], y[i + 1]) distance.append(dm) y.shape = (-1, ) self.distances = np.array(distance) def run_until(self, t): if t <= self.t: return self.integrator.advance_time(t, self.all_m) m = self.all_m y = self.last_m m.shape = (self.image_num, -1) y.shape = (self.image_num, -1) max_dmdt = 0 for i in range(self.image_num): dmdt = compute_dm(y[i], m[i]) / (t - self.t) if dmdt > max_dmdt: max_dmdt = dmdt m.shape = (-1, ) y.shape = (-1, ) self.last_m[:] = m[:] self.t = t return max_dmdt def relax(self, dt=1e-8, stopping_dmdt=1e4, max_steps=1000, save_ndt_steps=1, save_vtk_steps=100): if self.integrator is None: self.create_integrator() log.debug( "Relaxation parameters: stopping_dmdt={} (degrees per nanosecond), " "time_step={} s, max_steps={}.".format(stopping_dmdt, dt, max_steps)) for i in range(max_steps): cvode_dt = self.integrator.get_current_step() increment_dt = dt if cvode_dt > dt: increment_dt = cvode_dt dmdt = self.run_until(self.t + increment_dt) if i % save_ndt_steps == 0: self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() log.debug( "step: {:.3g}, step_size: {:.3g} and max_dmdt: {:.3g}.".format( self.step, increment_dt, dmdt)) if dmdt < stopping_dmdt: break self.step += 1 log.info( "Relaxation finished at time step = {:.4g}, t = {:.2g}, call rhs = {:.4g} and max_dmdt = {:.3g}" .format(self.step, self.t, self.ode_count, dmdt)) self.save_npys() print self.all_m
class NEB_Sundials(object): """ Nudged elastic band method by solving the differential equation using Sundials. """ def __init__(self, sim, initial_images, climbing_image=None, interpolations=None, spring=5e5, name='unnamed'): """ *Arguments* sim: the Simulation class initial_images: a list contain the initial value, which can have any of the forms accepted by the function 'finmag.util.helpers. vector_valued_function', for example, initial_images = [(0,0,1), (0,0,-1)] or with given defined function def init_m(pos): x=pos[0] if x<10: return (0,1,1) return (-1,0,0) initial_images = [(0,0,1), (0,0,-1), init_m ] are accepted forms. climbing_image : An integer with the index (from 1 to the total number of images minus two; it doesn't have any sense to use the extreme images) of the image with the largest energy, which will be updated in the NEB algorithm using the Climbing Image NEB method (no spring force and "with the component along the elastic band inverted" [*]). See: [*] Henkelman et al., The Journal of Chemical Physics 113, 9901 (2000) interpolations : a list only contain integers and the length of this list should equal to the length of the initial_images minus 1, i.e., len(interpolations) = len(initial_images) - 1 ** THIS IS not well defined in CARTESIAN coordinates** spring: the spring constant, a float value disable_tangent: this is an experimental option, by disabling the tangent, we can get a rough feeling about the local energy minima quickly. """ self.sim = sim self.name = name self.spring = spring # We set a minus one because the *sundials_rhs* function # only uses an array without counting the extreme images, # whose length is self.image_num (see below) if climbing_image is not None: self.climbing_image = climbing_image - 1 else: self.climbing_image = climbing_image # Dolfin function of the new _m_field (instead of _m) # self._m = sim.llg._m_field.f self._m = sim.m_field.f self.effective_field = sim.llg.effective_field if interpolations is None: interpolations = [0 for i in range(len(initial_images) - 1)] self.initial_images = initial_images self.interpolations = interpolations if len(interpolations) != len(initial_images) - 1: raise RuntimeError( """The length of interpolations should be equal to the length of the initial_images array minus 1, i.e., len(interpolations) = len(initial_images) - 1""") if len(initial_images) < 2: raise RuntimeError("""At least two images must be provided to create the energy band""") # the total image number including two ends self.total_image_num = len(initial_images) + sum(interpolations) self.image_num = self.total_image_num - 2 # Number of spins per image. The _m.vector has the form # [mx1, mx2, ..., my1, my2, ..., mz1, mz2] # Thus we divide by 3 to get the total of ms # self.nxyz = len(self._m.vector()) / 3 # Use the full vector from the field class to get the total number # of degrees of freedom when using PBC # (the self._m gave us the reduced number of spins when using PBC) self.nxyz = len(self.sim.m_field.get_ordered_numpy_array_xxx()) / 3 # Total number of degrees of freedom (3 components per spin) self.coords = np.zeros(3 * self.nxyz * self.total_image_num) self.last_m = np.zeros(self.coords.shape) self.Heff = np.zeros(self.coords.shape) self.Heff.shape = (self.total_image_num, -1) self.tangents = np.zeros(3 * self.nxyz * self.image_num) self.tangents.shape = (self.image_num, -1) self.energy = np.zeros(self.total_image_num) self.springs = np.zeros(self.image_num) self.t = 0 self.step = 0 self.ode_count = 1 self.integrator = None self.initial_image_coordinates() self.create_tablewriter() def create_tablewriter(self): entities_energy = { 'step': { 'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps' }, 'energy': { 'unit': '<J>', 'get': lambda sim: sim.energy, 'header': ['image_%d' % i for i in range(self.image_num + 2)] } } self.tablewriter = Tablewriter('%s_energy.ndt' % (self.name), self, override=True, entities=entities_energy) entities_dm = { 'step': { 'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps' }, 'dms': { 'unit': '<1>', 'get': lambda sim: sim.distances, 'header': [ 'image_%d_%d' % (i, i + 1) for i in range(self.image_num + 1) ] } } self.tablewriter_dm = Tablewriter('%s_dms.ndt' % (self.name), self, override=True, entities=entities_dm) def initial_image_coordinates(self): """ Generate the coordinates linearly according to the number of interpolations provided. Example: Imagine we have 4 images and we want 3 interpolations between every neighbouring pair, i.e interpolations = [3, 3, 3] 1. Imagine the initial states with the interpolation numbers and choose the first and second state 0 1 2 3 X -------- X --------- X -------- X 3 3 3 2. Counter image_id is set to 0 3. Set the image 0 magnetisation vector as m0 and append the values to self.coords[0]. Update the counter: image_id = 1 now 4. Set the image 1 magnetisation values as m1 and interpolate the values between m0 and m1, generating 3 arrays with the magnetisation values of every interpolation image. For every array, append the values to self.coords[i] with i = 1, 2 and 3 ; updating the counter every time, so image_id = 4 now 5. Append the value of m1 (image 1) in self.coords[4] Update counter (image_id = 5 now) 6. Move to the next pair of images, now set the 1-th image magnetisation values as m0 and append to self.coords[5] 7. Interpolate to get self.coords[i], for i = 6, 7, 8 ... 8. Repeat as before until move to the pair of images: 2 - 3 9. Finally append the magnetisation of the last image (self.initial_images[-1]). In this case, the 3rd image Then, for every magnetisation vector values array (self.coords[i]) append the value to the simulation and store the energies corresponding to every i-th image to the self.energy[i] arrays Finally, flatten the self.coords matrix (containing the magnetisation values of every image in different rows) ** Our generalised coordinates in the NEBM are the magnetisation values """ # Initiate the counter image_id = 0 self.coords.shape = (self.total_image_num, -1) # For every interpolation between images (zero if no interpolations # were specified) for i in range(len(self.interpolations)): # Store the number n = self.interpolations[i] # Save on the first image of a pair (step 1, 6, ...) self.sim.set_m(self.initial_images[i]) # m0 = self.sim.m # Use the full array for PBCs m0 = self.sim.m_field.get_ordered_numpy_array_xxx() self.coords[image_id][:] = m0[:] image_id = image_id + 1 # Set the second image in the pair as m1 and interpolate # (step 4 and 7), saving in corresponding self.coords entries self.sim.set_m(self.initial_images[i + 1]) # m1 = self.sim.m # Use the full array for PBCs m1 = self.sim.m_field.get_ordered_numpy_array_xxx() # Interpolations (arrays with magnetisation values) coords = linear_interpolation_two(m0, m1, n) for coord in coords: self.coords[image_id][:] = coord[:] image_id = image_id + 1 # Continue to the next pair of images # Append the magnetisation of the last image self.sim.set_m(self.initial_images[-1]) # m2 = self.sim.m # Use the full array for PBCs m2 = self.sim.m_field.get_ordered_numpy_array_xxx() self.coords[image_id][:] = m2[:] # Save the energies for i in range(self.total_image_num): # To assign the values from the full array using set_local # (with boundaries when using PBCs) to a reduced # sim.m_field.vector() , # we use the ordered dof to vertex map (d2v_xxx), # which has a reduced number of indexes # We take the map from the field class self._m.vector().set_local( self.coords[i][self.sim.m_field.d2v_xxx]) self.effective_field.update() self.energy[i] = self.effective_field.total_energy() # Flatten the array self.coords.shape = (-1, ) def save_vtks(self): """ Save vtk files in different folders, according to the simulation name and step. Files are saved as simname_simstep/image_00000x.vtu """ # Create the directory directory = 'vtks/%s_%d' % (self.name, self.step) if not os.path.exists(directory): os.makedirs(directory) # Save the vtk files with the finmag function # The last separator '_' is to distinguish the image # from its corresponding number, e.g. image_000001.pvd vtk_saver = VTKSaver('%s/image_.pvd' % (directory), overwrite=True) self.coords.shape = (self.total_image_num, -1) for i in range(self.total_image_num): # Save the REDUCED length arrray self._m.vector().set_local( self.coords[i, :][self.sim.m_field.d2v_xxx]) # set t =0, it seems that the parameter time is only # for the interface? vtk_saver.save_field(self._m, 0) self.coords.shape = (-1, ) def save_npys(self): """ Save npy files in different folders according to the simulation name and step Files are saved as: simname_simstep/image_x.npy """ # Create directory as simname_simstep directory = 'npys/%s_%d' % (self.name, self.step) if not os.path.exists(directory): os.makedirs(directory) # Save the images with the format: 'image_{}.npy' # where {} is the image number, starting from 0 self.coords.shape = (self.total_image_num, -1) for i in range(self.total_image_num): name = os.path.join(directory, 'image_%d.npy' % i) # Save the reduced length array # Since the dolfin vector (for the magnetisation) can have any # ordering, we rely on the fact that # this mapping does not change when we use the same mesh # when loading the system from a different simulation np.save(name, self.coords[i, :][self.sim.m_field.d2v_xxx]) self.coords.shape = (-1, ) def create_integrator(self, reltol=1e-6, abstol=1e-6, nsteps=10000): integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON) integrator.init(self.sundials_rhs, 0, self.coords) integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE) integrator.set_scalar_tolerances(reltol, abstol) integrator.set_max_num_steps(nsteps) self.integrator = integrator def compute_effective_field(self, y): y.shape = (self.total_image_num, -1) for i in range(self.image_num): # To update # the magnetisation we only need the reduced vector, thus # we use the d2v map self._m.vector().set_local(y[i + 1][self.sim.m_field.d2v_xxx]) self.effective_field.update() # Compute effective field, which is the gradient of # the energy in the NEB method (derivative with respect to # the generalised coordinates) # We need the whole system effective field # (thus we use the v2d map) h = self.effective_field.H_eff[self.sim.m_field.v2d_xxx] self.Heff[i + 1, :] = h[:] # Compute the total energy self.energy[i + 1] = self.effective_field.total_energy() # Compute the 'distance' or difference between neighbouring states # around y[i+1]. This is used to compute the spring force # dm1 = compute_dm(y[i], y[i + 1]) dm2 = compute_dm(y[i + 1], y[i + 2]) self.springs[i] = self.spring * (dm2 - dm1) # Use the native NEB (C++ code) to compute the tangents according # to the improved NEB method, developed by Henkelman and Jonsson # at: Henkelman et al., Journal of Chemical Physics 113, 22 (2000) native_neb.compute_tangents(y, self.energy, self.tangents) # native_neb.compute_springs(y,self.springs,self.spring) y.shape = (-1, ) def sundials_rhs(self, time, y, ydot): """ Right hand side of the optimization scheme used to find the minimum energy path. In our case, we use a LLG kind of equation: d Y / dt = Y x Y x D D = -( nabla E + [nabla E * t] t ) + F_spring where Y is an image: Y = (M_0, ... , M_N) and t is the tangent vector defined according to the energy of the neighbouring images (see Henkelman et al publication) If a climbing_image index is specified, the corresponding image will be iterated without the spring force and with an inversed component along the tangent """ # Update the ODE solver self.ode_count += 1 timer.start("sundials_rhs", self.__class__.__name__) # Compute the eff field H for every image, H = -nabla E # (derived with respect to M) self.compute_effective_field(y) # Reshape y and ydot in a matrix of total_image_num rows y.shape = (self.total_image_num, -1) ydot.shape = (self.total_image_num, -1) # Compute the total force for every image (not the extremes) # Rememeber that self.image_num = self.total_image_num - 2 # The total force is: # D = - (-nabla E + [nabla E * t] t) + F_spring # This value is different is a climbing image is specified: # D_climb = -nabla E + 2 * [nabla E * t] t for i in range(self.image_num): h = self.Heff[i + 1] t = self.tangents[i] sf = self.springs[i] if not (self.climbing_image and i == self.climbing_image): h3 = h - np.dot(h, t) * t + sf * t else: h3 = h - 2 * np.dot(h, t) * t h[:] = h3[:] #ydot[i+1, :] = h3[:] # Update the step with the optimisation algorithm, in this # case we use: dY /dt = Y x Y x D # (check the C++ code in finmag/native/src/) native_neb.compute_dm_dt(y, self.Heff, ydot) ydot[0, :] = 0 ydot[-1, :] = 0 y.shape = (-1, ) ydot.shape = (-1, ) timer.stop("sundials_rhs", self.__class__.__name__) return 0 def compute_distance(self): distance = [] ys = self.coords ys.shape = (self.total_image_num, -1) for i in range(self.total_image_num - 1): dm = compute_dm(ys[i], ys[i + 1]) distance.append(dm) ys.shape = (-1, ) self.distances = np.array(distance) def run_until(self, t): if t <= self.t: return self.integrator.advance_time(t, self.coords) m = self.coords y = self.last_m m.shape = (self.total_image_num, -1) y.shape = (self.total_image_num, -1) max_dmdt = 0 for i in range(1, self.image_num + 1): dmdt = compute_dm(y[i], m[i]) / (t - self.t) if dmdt > max_dmdt: max_dmdt = dmdt m.shape = (-1, ) y.shape = (-1, ) self.last_m[:] = m[:] self.t = t return max_dmdt def relax(self, dt=1e-8, stopping_dmdt=1e4, max_steps=1000, save_npy_steps=100, save_vtk_steps=100): if self.integrator is None: self.create_integrator() log.debug("Relaxation parameters: " "stopping_dmdt={} (degrees per nanosecond), " "time_step={} s, max_steps={}.".format( stopping_dmdt, dt, max_steps)) # Save the initial state i=0 self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() for i in range(max_steps): if i % save_vtk_steps == 0: self.save_vtks() if i % save_npy_steps == 0: self.save_npys() self.step += 1 cvode_dt = self.integrator.get_current_step() increment_dt = dt if cvode_dt > dt: increment_dt = cvode_dt dmdt = self.run_until(self.t + increment_dt) self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() log.debug("step: {:.3g}, step_size: {:.3g}" " and max_dmdt: {:.3g}.".format(self.step, increment_dt, dmdt)) if dmdt < stopping_dmdt: break log.info("Relaxation finished at time step = {:.4g}, " "t = {:.2g}, call rhs = {:.4g} " "and max_dmdt = {:.3g}".format(self.step, self.t, self.ode_count, dmdt)) self.save_vtks() self.save_npys()
class Sim(object): def __init__(self, mesh, Ms=8e5, unit_length=1.0, name='unnamed', auto_save_data=True): self.mesh = mesh self.unit_length=unit_length self.DG = df.FunctionSpace(mesh, "DG", 0) self.DG3 = df.VectorFunctionSpace(mesh, "DG", 0, dim=3) self._m = df.Function(self.DG3) self.Ms = Ms self.nxyz_cell=mesh.num_cells() self._alpha = np.zeros(self.nxyz_cell) self.m = np.zeros(3*self.nxyz_cell) self.H_eff = np.zeros(3*self.nxyz_cell) self.dm_dt = np.zeros(3*self.nxyz_cell) self.set_default_values() self.auto_save_data = auto_save_data self.sanitized_name = helpers.clean_filename(name) if self.auto_save_data: self.ndtfilename = self.sanitized_name + '.ndt' self.tablewriter = Tablewriter(self.ndtfilename, self, override=True) def set_default_values(self, reltol=1e-8, abstol=1e-8, nsteps=10000000): self._alpha_mult = df.Function(self.DG) self._alpha_mult.assign(df.Constant(1)) self._pins = np.array([], dtype="int") self.volumes = df.assemble(df.TestFunction(self.DG) * df.dx).array() self.real_volumes=self.volumes*self.unit_length**3 self.Volume = np.sum(self.volumes) self.alpha = 0.5 # alpha for solve: alpha * _alpha_mult self.gamma = consts.gamma self.c = 1e11 # 1/s numerical scaling correction \ # 0.1e12 1/s is the value used by default in nmag 0.2 self.do_precession = True self._pins = np.array([], dtype="int") self.interactions = [] self.t = 0 def set_up_solver(self, reltol=1e-8, abstol=1e-8, nsteps=10000): integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON) integrator.init(self.sundials_rhs, 0, self.m) integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE) integrator.set_scalar_tolerances(reltol, abstol) integrator.set_max_num_steps(nsteps) self.integrator = integrator @property def alpha(self): """The damping factor :math:`\\alpha`.""" return self._alpha @alpha.setter def alpha(self, value): self._alpha = value # need to update the alpha vector as well, which is # why we have this property at all. self.alpha_vec = self._alpha * self._alpha_mult.vector().array() @property def pins(self): return self._pins def compute_effective_field(self): self.H_eff[:]=0 for interaction in self.interactions: self.H_eff += interaction.compute_field() def add(self,interaction): interaction.setup(self.DG3, self._m, self.Ms, unit_length=self.unit_length) self.interactions.append(interaction) def run_until(self, t): if t < self.t: return elif t == 0: if self.auto_save_data: self.tablewriter.save() return self.integrator.advance_time(t,self.m) self._m.vector().set_local(self.m) self.t=t if self.auto_save_data: self.tablewriter.save() def sundials_rhs(self, t, y, ydot): self.t = t self.m[:]=y[:] self._m.vector().set_local(self.m) self.compute_effective_field() self.dm_dt[:]=0 char_time = 0.1 / self.c self.m.shape=(3,-1) self.H_eff.shape=(3,-1) self.dm_dt.shape=(3,-1) native_llg.calc_llg_dmdt(self.m, self.H_eff, t, self.dm_dt, self.pins, self.gamma, self.alpha_vec, char_time, self.do_precession) self.m.shape=(-1) self.dm_dt.shape=(-1) self.H_eff.shape=(-1) ydot[:] = self.dm_dt[:] return 0 def set_m(self,value): self.m[:]=helpers.vector_valued_dg_function(value, self.DG3, normalise=True).vector().array() self._m.vector().set_local(self.m) @property def m_average(self): """ Compute and return the average polarisation according to the formula :math:`\\langle m \\rangle = \\frac{1}{V} \int m \: \mathrm{d}V` """ mx = df.assemble(df.dot(self._m, df.Constant([1, 0, 0])) * df.dx) my = df.assemble(df.dot(self._m, df.Constant([0, 1, 0])) * df.dx) mz = df.assemble(df.dot(self._m, df.Constant([0, 0, 1])) * df.dx) return np.array([mx, my, mz])/self.Volume
class NEB_Sundials(object): """ Nudged elastic band method by solving the differential equation using Sundials. """ def __init__(self, sim, initial_images, climbing_image=None, interpolations=None, spring=5e5, name='unnamed'): """ *Arguments* sim: the Simulation class initial_images: a list contain the initial value, which can have any of the forms accepted by the function 'finmag.util.helpers. vector_valued_function', for example, initial_images = [(0,0,1), (0,0,-1)] or with given defined function def init_m(pos): x=pos[0] if x<10: return (0,1,1) return (-1,0,0) initial_images = [(0,0,1), (0,0,-1), init_m ] are accepted forms. climbing_image : An integer with the index (from 1 to the total number of images minus two; it doesn't have any sense to use the extreme images) of the image with the largest energy, which will be updated in the NEB algorithm using the Climbing Image NEB method (no spring force and "with the component along the elastic band inverted" [*]). See: [*] Henkelman et al., The Journal of Chemical Physics 113, 9901 (2000) interpolations : a list only contain integers and the length of this list should equal to the length of the initial_images minus 1, i.e., len(interpolations) = len(initial_images) - 1 ** THIS IS not well defined in CARTESIAN coordinates** spring: the spring constant, a float value disable_tangent: this is an experimental option, by disabling the tangent, we can get a rough feeling about the local energy minima quickly. """ self.sim = sim self.name = name self.spring = spring # We set a minus one because the *sundials_rhs* function # only uses an array without counting the extreme images, # whose length is self.image_num (see below) if climbing_image is not None: self.climbing_image = climbing_image - 1 else: self.climbing_image = climbing_image # Dolfin function of the new _m_field (instead of _m) self._m = sim.llg._m_field.f self.effective_field = sim.llg.effective_field if interpolations is None: interpolations = [0 for i in range(len(initial_images) - 1)] self.initial_images = initial_images self.interpolations = interpolations if len(interpolations) != len(initial_images) - 1: raise RuntimeError("""The length of interpolations should be equal to the length of the initial_images array minus 1, i.e., len(interpolations) = len(initial_images) - 1""") if len(initial_images) < 2: raise RuntimeError("""At least two images must be provided to create the energy band""") # the total image number including two ends self.total_image_num = len(initial_images) + sum(interpolations) self.image_num = self.total_image_num - 2 S3 = sim.S3 Vs = [] for i in range(self.image_num): Vs.append(S3) ME = df.MixedFunctionSpace(Vs) self.images_fun = df.Function(ME) #all the degree of freedom, which is a petsc vector self.coords = df.as_backend_type(self.images_fun.vector()).vec() self.t = 0 self.step = 0 self.ode_count = 1 self.integrator = None self.initial_image_coordinates() self.create_tablewriter() def linear_interpolation_two(self, image0, image1, n): """ Define a linear interpolation between two states of the energy band (m0, m1) to get an initial state. The interpolation is done in the magnetic moments that constitute the magnetic system. """ # Get the spherical coordinates dolfin functions # for the m0 and m1 magnetisation vector fields self.sim.set_m(self.initial_images[image0]) theta0, phi0 = self.sim._m_field.get_spherical() self.sim.set_m(self.initial_images[image1]) theta1, phi1 = self.sim._m_field.get_spherical() # To not depend on numpy arrays, we will assemble every # interpolation into dolfin functions and assign their # values to the subdomains of the MixedFunctionSpace of images # Create a scalar Function Space S1 = df.FunctionSpace(self.sim.m_field.functionspace.mesh(), 'CG', 1) # Define a variable to use as vector in all the assemble instances assemble_vector = None # Define the interpolations step for theta assemble_vector = df.assemble(df.dot((theta1 - theta0) / (n + 1)) * df.dP, tensor=assemble_vector) dtheta = df.Function(S1) dtheta.vector().axpy(1, assemble_vector) # The same for Phi assemble_vector = df.assemble(df.dot((theta1 - theta0) / (n + 1)) * df.dP, tensor=assemble_vector) dphi = df.Function(S1) dphi.vector().axpy(1, assemble_vector) # Now loop for every interpolation and assign it to # the MixedFunctionSpace for i in xrange(n): # Create a dolfin function from the FS, for the interpolation interpolation_theta = df.Function(S1) interpolation_phi = df.Function(S1) # Compute the radius using the assemble method with dolfin dP # (like a dirac delta to get values on every node of the mesh) # This returns a dolfin vector # Theta assemble_vector = df.assemble(df.dot(theta0 + (i + 1) * dtheta, # df.TestFunction(S1)) * df.dP, tensor=assemble_vector ) # Set the vector values to the dolfin function interpolation_theta.vector().axpy(1, assemble_vector) # Phi assemble_vector = df.assemble(df.dot(phi0 + (i + 1) * dphi, # df.TestFunction(S1)) * df.dP, tensor=assemble_vector ) # Set the vector values to the dolfin function interpolation_phi.vector().axpy(1, assemble_vector) # Now set this interpolation to the corresponding image # Set a vector function space for the simulation # magnetisation vector field interpolation = df.VectorFunction(self.sim.S3) interpolation = df.assemble(df.dot(df.as_vector((df.sin(interpolation_theta) * df.cos(interpolation_phi), df.sin(interpolation_theta) * df.sin(interpolation_phi), df.cos(interpolation_theta) )), df.TestFunction(self.sim.S3)) * df.dP ) interpolation.vector().axpy(1, interpolation) # Now assign the interpolation vector function values to the corresponding # image in the MixedFunctionSpace df.assign(self.images_fun.sub(i), interpolation) # coords = [] # for i in range(n): # theta_phi_interp = theta_phi0 + (i + 1) * d_theta_phi # coords.append(spherical2cartesian(theta_phi_interp)) # return coords def create_tablewriter(self): entities_energy = { 'step': {'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps'}, 'energy': {'unit': '<J>', 'get': lambda sim: sim.energy, 'header': ['image_%d' % i for i in range(self.image_num + 2)]} } self.tablewriter = Tablewriter( '%s_energy.ndt' % (self.name), self, override=True, entities=entities_energy) entities_dm = { 'step': {'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps'}, 'dms': {'unit': '<1>', 'get': lambda sim: sim.distances, 'header': ['image_%d_%d' % (i, i + 1) for i in range(self.image_num + 1)]} } self.tablewriter_dm = Tablewriter( '%s_dms.ndt' % (self.name), self, override=True, entities=entities_dm) def initial_image_coordinates(self): """ Generate the coordinates linearly according to the number of interpolations provided. Example: Imagine we have 4 images and we want 3 interpolations between every neighbouring pair, i.e interpolations = [3, 3, 3] 1. Imagine the initial states with the interpolation numbers and choose the first and second state 0 1 2 3 X -------- X --------- X -------- X 3 3 3 2. Counter image_id is set to 0 3. Set the image 0 magnetisation vector as m0 and append the values to self.coords[0]. Update the counter: image_id = 1 now 4. Set the image 1 magnetisation values as m1 and interpolate the values between m0 and m1, generating 3 arrays with the magnetisation values of every interpolation image. For every array, append the values to self.coords[i] with i = 1, 2 and 3 ; updating the counter every time, so image_id = 4 now 5. Append the value of m1 (image 1) in self.coords[4] Update counter (image_id = 5 now) 6. Move to the next pair of images, now set the 1-th image magnetisation values as m0 and append to self.coords[5] 7. Interpolate to get self.coords[i], for i = 6, 7, 8 ... 8. Repeat as before until move to the pair of images: 2 - 3 9. Finally append the magnetisation of the last image (self.initial_images[-1]). In this case, the 3rd image Then, for every magnetisation vector values array (self.coords[i]) append the value to the simulation and store the energies corresponding to every i-th image to the self.energy[i] arrays Finally, flatten the self.coords matrix (containing the magnetisation values of every image in different rows) ** Our generalised coordinates in the NEBM are the magnetisation values """ # Initiate the counter image_id = 0 # For every interpolation between images (zero if no interpolations # were specified) for i in range(len(self.interpolations)): # Store the number n = self.interpolations[i] # Save on the first image of a pair (step 1, 6, ...) self.sim.set_m(self.initial_images[i]) m0 = self.sim._m_field.get_ordered_numpy_array_xxx() df.assign(self.images_fun.sub(image_id),self.sim._m_field.f) image_id = image_id + 1 # Set the second image in the pair as m1 and interpolate # (step 4 and 7), saving in corresponding self.coords entries self.sim.set_m(self.initial_images[i + 1]) m1 = self.sim._m_field.get_ordered_numpy_array_xxx() # Interpolations (arrays with magnetisation values) coords = linear_interpolation_two(m0, m1, n) for coord in coords: self.sim.set_m(coord) df.assign(self.images_fun.sub(image_id), self.sim._m_field.f) self.coords[image_id][:] = coord[:] image_id = image_id + 1 # Continue to the next pair of images # Append the magnetisation of the last image self.sim.set_m(self.initial_images[-1]) df.assign(self.images_fun.sub(image_id), self.sim._m_field.f) """ # Save the energies for i in range(self.total_image_num): self._m.vector().set_local(self.coords[i]) self.effective_field.update() self.energy[i] = self.effective_field.total_energy() """ def save_vtks(self): """ Save vtk files in different folders, according to the simulation name and step. Files are saved as simname_simstep/image_00000x.vtu """ # Create the directory directory = 'vtks/%s_%d' % (self.name, self.step) if not os.path.exists(directory): os.makedirs(directory) # Save the vtk files with the finmag function # The last separator '_' is to distinguish the image # from its corresponding number, e.g. image_000001.pvd vtk_saver = VTKSaver('%s/image_.pvd' % (directory), overwrite=True) self.coords.shape = (self.total_image_num, -1) for i in range(self.total_image_num): self._m.vector().set_local(self.coords[i, :]) # set t =0, it seems that the parameter time is only # for the interface? vtk_saver.save_field(self._m, 0) self.coords.shape = (-1, ) def create_integrator(self, rtol=1e-6, atol=1e-6): integrator = cvode.CvodeSolver(self.sundials_rhs, 0, self.coords, rtol, atol) self.integrator = integrator def compute_effective_field(self, y): y.shape = (self.total_image_num, -1) """ for i in range(self.image_num): self._m.vector().set_local(y[i + 1]) # self.effective_field.update() # Compute effective field, which is the gradient of # the energy in the NEB method (derivative with respect to # the generalised coordinates) h = self.effective_field.H_eff # self.Heff[i + 1, :] = h[:] # Compute the total energy self.energy[i + 1] = self.effective_field.total_energy() # Compute the 'distance' or difference between neighbouring states # around y[i+1]. This is used to compute the spring force # dm1 = compute_dm(y[i], y[i + 1]) dm2 = compute_dm(y[i + 1], y[i + 2]) self.springs[i] = self.spring * (dm2 - dm1) # Use the native NEB (C++ code) to compute the tangents according # to the improved NEB method, developed by Henkelman and Jonsson # at: Henkelman et al., Journal of Chemical Physics 113, 22 (2000) native_neb.compute_tangents(y, self.energy, self.tangents) # native_neb.compute_springs(y,self.springs,self.spring) """ y.shape = (-1, ) def sundials_rhs(self, time, y, ydot): """ Right hand side of the optimization scheme used to find the minimum energy path. In our case, we use a LLG kind of equation: d Y / dt = Y x Y x D D = -( nabla E + [nabla E * t] t ) + F_spring where Y is an image: Y = (M_0, ... , M_N) and t is the tangent vector defined according to the energy of the neighbouring images (see Henkelman et al publication) If a climbing_image index is specified, the corresponding image will be iterated without the spring force and with an inversed component along the tangent """ # Update the ODE solver self.ode_count += 1 timer.start("sundials_rhs", self.__class__.__name__) # Compute the eff field H for every image, H = -nabla E # (derived with respect to M) self.compute_effective_field(y) # Reshape y and ydot in a matrix of total_image_num rows y.shape = (self.total_image_num, -1) ydot.shape = (self.total_image_num, -1) # Compute the total force for every image (not the extremes) # Rememeber that self.image_num = self.total_image_num - 2 # The total force is: # D = - (-nabla E + [nabla E * t] t) + F_spring # This value is different is a climbing image is specified: # D_climb = -nabla E + 2 * [nabla E * t] t for i in range(self.image_num): h = self.Heff[i + 1] t = self.tangents[i] sf = self.springs[i] if not (self.climbing_image and i == self.climbing_image): h3 = h - np.dot(h, t) * t + sf * t else: h3 = h - 2 * np.dot(h, t) * t h[:] = h3[:] #ydot[i+1, :] = h3[:] # Update the step with the optimisation algorithm, in this # case we use: dY /dt = Y x Y x D # (check the C++ code in finmag/native/src/) #native_neb.compute_dm_dt(y, self.Heff, ydot) ydot[0, :] = 0 ydot[-1, :] = 0 y.shape = (-1,) ydot.shape = (-1,) timer.stop("sundials_rhs", self.__class__.__name__) return 0 def compute_distance(self): distance = [] ys = self.coords ys.shape = (self.total_image_num, -1) for i in range(self.total_image_num - 1): dm = compute_dm(ys[i], ys[i + 1]) distance.append(dm) ys.shape = (-1, ) self.distances = np.array(distance) def run_until(self, t): if t <= self.t: return self.integrator.advance_time(t, self.coords) m = self.coords y = self.last_m m.shape = (self.total_image_num, -1) y.shape = (self.total_image_num, -1) max_dmdt = 0 for i in range(1, self.image_num + 1): dmdt = compute_dm(y[i], m[i]) / (t - self.t) if dmdt > max_dmdt: max_dmdt = dmdt m.shape = (-1,) y.shape = (-1,) self.last_m[:] = m[:] self.t = t return max_dmdt def relax(self, dt=1e-8, stopping_dmdt=1e4, max_steps=1000, save_npy_steps=100, save_vtk_steps=100): if self.integrator is None: self.create_integrator() log.debug("Relaxation parameters: " "stopping_dmdt={} (degrees per nanosecond), " "time_step={} s, max_steps={}.".format(stopping_dmdt, dt, max_steps)) # Save the initial state i=0 self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() for i in range(max_steps): if i % save_vtk_steps == 0: self.save_vtks() if i % save_npy_steps == 0: self.save_npys() self.step += 1 cvode_dt = self.integrator.get_current_step() increment_dt = dt if cvode_dt > dt: increment_dt = cvode_dt dmdt = self.run_until(self.t + increment_dt) self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() log.debug("step: {:.3g}, step_size: {:.3g}" " and max_dmdt: {:.3g}.".format(self.step, increment_dt, dmdt)) if dmdt < stopping_dmdt: break log.info("Relaxation finished at time step = {:.4g}, " "t = {:.2g}, call rhs = {:.4g} " "and max_dmdt = {:.3g}".format(self.step, self.t, self.ode_count, dmdt)) self.save_vtks() self.save_npys()
class LLB(object): def __init__(self, mat, method='RK2b', name='unnamed', pbc2d=None): self.material = mat self._m = mat._m self.m = self._m.vector().array() self.S1 = mat.S1 self.S3 = mat.S3 self.mesh = self.S1.mesh() self.dm_dt = np.zeros(self.m.shape) self.H_eff = np.zeros(self.m.shape) self.time_scale = 1e-9 self.method = method self.pbc2d = pbc2d self.set_default_values() self.interactions.append(mat) if self.pbc2d: self.pbc2d = PeriodicBoundary2D(self.S3) self.name = name self.sanitized_name = helpers.clean_filename(name) self.logfilename = self.sanitized_name + '.log' self.ndtfilename = self.sanitized_name + '.ndt' helpers.start_logging_to_file(self.logfilename, mode='w', level=logging.DEBUG) self.scheduler = scheduler.Scheduler() self.domains = df.CellFunction("uint", self.mesh) self.domains.set_all(0) self.region_id = 0 self.tablewriter = Tablewriter(self.ndtfilename, self, override=True) self.overwrite_pvd_files = False self.vtk_export_filename = self.sanitized_name + '.pvd' self.vtk_saver = VTKSaver(self.vtk_export_filename, overwrite=True) self.scheduler_shortcuts = { 'save_ndt': LLB.save_ndt, 'save_vtk': LLB.save_vtk, } def set_default_values(self): self.alpha = self.material.alpha self.gamma_G = 2.21e5 # m/(As) self.gamma_LL = self.gamma_G / (1. + self.alpha**2) self.t = 0.0 # s self.do_precession = True self.vol = df.assemble( df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) * df.dx).array() self.real_vol = self.vol * self.material.unit_length**3 self.nxyz = self.mesh.num_vertices() self._alpha = np.zeros(self.nxyz) self.pins = [] self._pre_rhs_callables = [] self._post_rhs_callables = [] self.interactions = [] def set_up_solver(self, reltol=1e-8, abstol=1e-8, nsteps=10000): integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON) integrator.init(self.sundials_rhs, 0, self.m) integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE) integrator.set_scalar_tolerances(reltol, abstol) integrator.set_max_num_steps(nsteps) self.integrator = integrator self.method = 'cvode' def set_up_stochastic_solver(self, dt=1e-13, using_type_II=True): self.using_type_II = using_type_II M_pred = np.zeros(self.m.shape) integrator = native_llb.StochasticLLBIntegrator( self.m, M_pred, self.material.Ms, self.material.T, self.material.real_vol, self.pins, self.stochastic_rhs, self.method) self.integrator = integrator self._seed = np.random.random_integers(4294967295) self.dt = dt @property def t(self): return self._t * self.time_scale @t.setter def t(self, value): self._t = value / self.time_scale @property def dt(self): return self._dt * self.time_scale @dt.setter def dt(self, value): self._dt = value / self.time_scale log.info("dt=%g." % self.dt) self.setup_parameters() @property def seed(self): return self._seed @seed.setter def seed(self, value): self._seed = value log.info("seed=%d." % self._seed) self.setup_parameters() def set_pins(self, nodes): pinlist = [] if hasattr(nodes, '__call__'): coords = self.mesh.coordinates() for i, c in enumerate(coords): if nodes(c): pinlist.append(i) else: pinlist = nodes self._pins = np.array(pinlist, dtype="int") if self.pbc2d: self._pins = np.concatenate([self.pbc2d.ids_pbc, self._pins]) if len(self._pins > 0): self.nxyz = self.S1.mesh().num_vertices() assert (np.min(self._pins) >= 0) assert (np.max(self._pins) < self.nxyz) def pins(self): return self._pins pins = property(pins, set_pins) def set_spatial_alpha(self, value): self._alpha[:] = helpers.scalar_valued_function( value, self.S1).vector().array()[:] def setup_parameters(self): self.integrator.set_parameters(self.dt, self.gamma_LL, self.alpha, self.material.Tc, self.seed, self.do_precession, self.using_type_II) def add(self, interaction): interaction.setup(self.material._m, self.material.Ms0, unit_length=self.material.unit_length) self.interactions.append(interaction) if interaction.__class__.__name__ == 'Zeeman': self.zeeman_interation = interaction self.tablewriter.add_entity( 'zeeman', { 'unit': '<A/m>', 'get': lambda sim: sim.zeeman_interation.average_field(), 'header': ('h_x', 'h_y', 'h_z') }) def compute_effective_field(self): self.H_eff[:] = 0 for interaction in self.interactions: self.H_eff += interaction.compute_field() def total_energy(self): # FIXME: change to the real total energy return 0 def stochastic_rhs(self, y): self._m.vector().set_local(y) for func in self._pre_rhs_callables: func(self.t) self.compute_effective_field() for func in self._post_rhs_callables: func(self) def sundials_rhs(self, t, y, ydot): self.t = t self._m.vector().set_local(y) for func in self._pre_rhs_callables: func(self.t) self.compute_effective_field() timer.start("sundials_rhs", self.__class__.__name__) # Use the same characteristic time as defined by c native_llb.calc_llb_dmdt(self._m.vector().array(), self.H_eff, self.dm_dt, self.material.T, self.pins, self._alpha, self.gamma_LL, self.material.Tc, self.do_precession) timer.stop("sundials_rhs", self.__class__.__name__) for func in self._post_rhs_callables: func(self) ydot[:] = self.dm_dt[:] return 0 def run_with_scheduler(self): if self.method == 'cvode': run_fun = self.run_until_sundial else: run_fun = self.run_until_stochastic for t in self.scheduler: run_fun(t) self.scheduler.reached(t) self.scheduler.finalise(t) def run_until(self, time): # Define function that stops integration and add it to scheduler. The # at_end parameter is required because t can be zero, which is # considered as False for comparison purposes in scheduler.add. def StopIntegration(): return False self.scheduler.add(StopIntegration, at=time, at_end=True) self.run_with_scheduler() def run_until_sundial(self, t): if t <= self.t: return self.integrator.advance_time(t, self.m) self._m.vector().set_local(self.m) self.t = t def run_until_stochastic(self, t): tp = t / self.time_scale if tp <= self._t: return try: while tp - self._t > 1e-12: self.integrator.run_step(self.H_eff) self._m.vector().set_local(self.m) if self.pbc2d: self.pbc2d.modify_m(self._m.vector()) self._t += self._dt except Exception, error: log.info(error) raise Exception(error) if abs(tp - self._t) < 1e-12: self._t = tp log.debug("Integrating dynamics up to t = %g" % t)
class NEB_Sundials(object): """ Nudged elastic band method by solving the differential equation using Sundials. """ def __init__(self, sim, initial_images, interpolations=None, spring=5e5, name='unnamed'): """ *Arguments* sim: the Simulation class initial_images: a list contain the initial value, which can have any of the forms accepted by the function 'finmag.util.helpers. vector_valued_function', for example, initial_images = [(0,0,1), (0,0,-1)] or with given defined function def init_m(pos): x=pos[0] if x<10: return (0,1,1) return (-1,0,0) initial_images = [(0,0,1), (0,0,-1), init_m ] are accepted forms. interpolations : a list only contain integers and the length of this list should equal to the length of the initial_images minus 1, i.e., len(interpolations) = len(initial_images) - 1 spring: the spring constant, a float value disable_tangent: this is an experimental option, by disabling the tangent, we can get a rough feeling about the local energy minima quickly. """ self.sim = sim self.name = name self.spring = spring # Dolfin function of the new _m_field (instead of _m) # self._m = sim.llg._m_field.f self._m = sim.m_field.f self.effective_field = sim.llg.effective_field if interpolations is None: interpolations = [0 for i in range(len(initial_images) - 1)] self.initial_images = initial_images self.interpolations = interpolations if len(interpolations) != len(initial_images) - 1: raise RuntimeError( """The length of interpolations should be equal to the length of the initial_images array minus 1, i.e., len(interpolations) = len(initial_images) - 1""") if len(initial_images) < 2: raise RuntimeError("""At least two images must be provided to create the energy band""") # the total image number including two ends self.total_image_num = len(initial_images) + sum(interpolations) # The number of images without the extremes self.image_num = self.total_image_num - 2 # Number of spins per image. The _m.vector has the form # [mx1, mx2, ..., my1, my2, ..., mz1, mz2] # Thus we divide by 3 to get the total of ms # self.nxyz = len(self._m.vector()) / 3 # Use the full vector from the field class to get the total number # of degrees of freedom when using PBC # (the self._m gave us the reduced number of spins when using PBC) self.nxyz = len(self.sim.m_field.get_ordered_numpy_array_xxx()) / 3 # Total number of degrees of freedom # (In spherical coords, we have 2 components per spin) self.coords = np.zeros(2 * self.nxyz * self.total_image_num) self.last_m = np.zeros(self.coords.shape) self.Heff = np.zeros(2 * self.nxyz * self.image_num) self.Heff.shape = (self.image_num, -1) self.tangents = np.zeros(self.Heff.shape) self.energy = np.zeros(self.total_image_num) self.springs = np.zeros(self.image_num) self.t = 0 self.step = 0 self.ode_count = 1 self.integrator = None self.initial_image_coordinates() self.create_tablewriter() def create_tablewriter(self): entities_energy = { 'step': { 'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps' }, 'energy': { 'unit': '<J>', 'get': lambda sim: sim.energy, 'header': ['image_%d' % i for i in range(self.image_num + 2)] } } self.tablewriter = Tablewriter('%s_energy.ndt' % (self.name), self, override=True, entities=entities_energy) entities_dm = { 'step': { 'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps' }, 'dms': { 'unit': '<1>', 'get': lambda sim: sim.distances, 'header': [ 'image_%d_%d' % (i, i + 1) for i in range(self.image_num + 1) ] } } self.tablewriter_dm = Tablewriter('%s_dms.ndt' % (self.name), self, override=True, entities=entities_dm) def initial_image_coordinates(self): """ Generate the coordinates linearly according to the number of interpolations provided. Example: Imagine we have 4 images and we want 3 interpolations between every neighbouring pair, i.e interpolations = [3, 3, 3] 1. Imagine the initial states with the interpolation numbers and choose the first and second state 0 1 2 3 X -------- X --------- X -------- X 3 3 3 2. Counter image_id is set to 0 3. Set the image 0 magnetisation vector as m0 and append the values to self.coords[0]. Update the counter: image_id = 1 now 4. Set the image 1 magnetisation values as m1 and interpolate the values between m0 and m1, generating 3 arrays with the magnetisation values of every interpolation image. For every array, append the values to self.coords[i] with i = 1, 2 and 3 ; updating the counter every time, so image_id = 4 now 5. Append the value of m1 (image 1) in self.coords[4] Update counter (image_id = 5 now) 6. Move to the next pair of images, now set the 1-th image magnetisation values as m0 and append to self.coords[5] 7. Interpolate to get self.coords[i], for i = 6, 7, 8 ... 8. Repeat as before until move to the pair of images: 2 - 3 9. Finally append the magnetisation of the last image (self.initial_images[-1]). In this case, the 3rd image Then, for every magnetisation vector values array (self.coords[i]) append the value to the simulation and store the energies corresponding to every i-th image to the self.energy[i] arrays Finally, flatten the self.coords matrix (containing the magnetisation values of every image in different rows) ** Our generalised coordinates in the NEBM are the magnetisation values """ image_id = 0 self.coords.shape = (self.total_image_num, -1) # For every interpolation between images (zero if no interpolations # were specified) for i in range(len(self.interpolations)): n = self.interpolations[i] # Save on the first image of a pair (step 1, 6, ...) self.sim.set_m(self.initial_images[i]) # m0 = self.sim.m # Use the full array for PBCs m0 = self.sim.m_field.get_ordered_numpy_array_xxx() # DEBUGGING # This shows that, when using PBC, # the vector() is reduced (boundary vlaues that are repeated) # while the ordered array consider all the spins # # print len(self.sim.m_field.f.vector()) # print len(self.sim.m_field.get_ordered_numpy_array_xxx()) # df.plot(self.sim.m_field.f, interactive=True) self.coords[image_id][:] = cartesian2spherical(m0) image_id = image_id + 1 # Set the second image in the pair as m1 and interpolate # (step 4 and 7), saving in corresponding self.coords entries self.sim.set_m(self.initial_images[i + 1]) # m1 = self.sim.m # Use the full array for PBCs m1 = self.sim.m_field.get_ordered_numpy_array_xxx() # Interpolations (arrays with magnetisation values) coords = linear_interpolation_two(m0, m1, n) for coord in coords: self.coords[image_id][:] = coord[:] image_id = image_id + 1 # Continue to the next pair of images # Append the magnetisation of the last image self.sim.set_m(self.initial_images[-1]) # m2 = self.sim.m # Use the full array for PBCs m2 = self.sim.m_field.get_ordered_numpy_array_xxx() self.coords[image_id][:] = cartesian2spherical(m2) # Save the energies for i in range(self.total_image_num): # To assign the values from the full array using set_local # (with boundaries when using PBCs) to a reduced # sim.m_field.vector() , # we use the ordered dof to vertex map (d2v_xxx), # which has a reduced number of indexes # (we take the value from the field class) self._m.vector().set_local( spherical2cartesian(self.coords[i])[self.sim.m_field.d2v_xxx]) # This is for checking that the interpolations worked # df.plot(self._m, interactive=True) self.effective_field.update() self.energy[i] = self.effective_field.total_energy() # Flatten the array self.coords.shape = (-1, ) def save_vtks(self): """ Save vtk files in different folders, according to the simulation name and step. Files are saved as simname_simstep/image_00000x.vtu """ # Create the directory directory = 'vtks/%s_%d' % (self.name, self.step) if not os.path.exists(directory): os.makedirs(directory) # Save the vtk files with the finmag function # The last separator '_' is to distinguish the image # from its corresponding number, e.g. image_000001.pvd vtk_saver = VTKSaver('%s/image_.pvd' % (directory), overwrite=True) self.coords.shape = (self.total_image_num, -1) for i in range(self.total_image_num): # We will save the vectors with the REDUCED length array self._m.vector().set_local( spherical2cartesian( self.coords[i, :])[self.sim.m_field.d2v_xxx]) # set t =0, it seems that the parameter time is only # for the interface? vtk_saver.save_field(self._m, 0) self.coords.shape = (-1, ) def save_npys(self): """ Save npy files in different folders according to the simulation name and step Files are saved as: simname_simstep/image_x.npy """ # Create directory as simname_simstep directory = 'npys/%s_%d' % (self.name, self.step) if not os.path.exists(directory): os.makedirs(directory) # Save the images with the format: 'image_{}.npy' # where {} is the image number, starting from 0 self.coords.shape = (self.total_image_num, -1) for i in range(self.total_image_num): name = os.path.join(directory, 'image_%d.npy' % i) # Save the reduced length array # Since the dolfin vector (for the magnetisation) can have any # ordering, we rely on the fact that # this mapping does not change when we use the same mesh # when loading the system from a different simulation # In the future it can be useful to save the mesh together with # the magnetisation in a single hdf5 file np.save( name, spherical2cartesian( self.coords[i, :])[self.sim.m_field.d2v_xxx]) self.coords.shape = (-1, ) def create_integrator(self, reltol=1e-6, abstol=1e-6, nsteps=10000): integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON) integrator.init(self.sundials_rhs, 0, self.coords) integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE) integrator.set_scalar_tolerances(reltol, abstol) integrator.set_max_num_steps(nsteps) self.integrator = integrator def compute_effective_field(self, y): """ Compute the effective field and the tangents using the formalism developed by Henkelman et al and applied to micromagnetics by Dittrich et al The tangents use the native NEB code in finmag/native/src/neb/helper.cc """ y.shape = (self.total_image_num, -1) for i in range(self.image_num): # Redefine the angles if phi is larger than pi # (see the corresponding function) check_boundary(y[i + 1]) # Transform the input 'y' to cartesian to compute the fields # # spherical2cartesian updates the full system vector (y), but to update # the magnetisation we only need the reduced vector, thus # we use the d2v map self._m.vector().set_local( spherical2cartesian(y[i + 1])[self.sim.m_field.d2v_xxx]) # self.effective_field.update() # Compute effective field, which is the gradient of # the energy in the NEB method (derivative with respect to # the generalised coordinates) # # To get the effective field for the whole system we use the v2d map h = self.effective_field.H_eff[self.sim.m_field.v2d_xxx] # Transform to spherical coordinates # DEBUG # print len(h) # print len(y[i + 1]) self.Heff[i, :] = cartesian2spherical_field(h, y[i + 1]) # Compute the total energy self.energy[i + 1] = self.effective_field.total_energy() # Compute the 'distance' or difference between neighbouring states # around y[i+1]. This is used to compute the spring force # dm1 = compute_dm(y[i], y[i + 1]) dm2 = compute_dm(y[i + 1], y[i + 2]) self.springs[i] = self.spring * (dm2 - dm1) # Use the native NEB to compute the tangents according to the # improved NEB method, developed by Henkelman and Jonsson # at: Henkelman et al., Journal of Chemical Physics 113, 22 (2000) native_neb.compute_tangents(y, self.energy, self.tangents) # native_neb.compute_springs(y,self.springs,self.spring) y.shape = (-1, ) def sundials_rhs(self, time, y, ydot): self.ode_count += 1 timer.start("sundials_rhs", self.__class__.__name__) self.compute_effective_field(y) ydot.shape = (self.total_image_num, -1) for i in range(self.image_num): h = self.Heff[i] t = self.tangents[i] sf = self.springs[i] h3 = h - np.dot(h, t) * t + sf * t ydot[i + 1, :] = h3[:] ydot[0, :] = 0 ydot[-1, :] = 0 ydot.shape = (-1, ) timer.stop("sundials_rhs", self.__class__.__name__) return 0 def compute_distance(self): distance = [] ys = self.coords ys.shape = (self.total_image_num, -1) for i in range(self.total_image_num - 1): dm = compute_dm(ys[i], ys[i + 1]) distance.append(dm) ys.shape = (-1, ) self.distances = np.array(distance) def run_until(self, t): if t <= self.t: return self.integrator.advance_time(t, self.coords) m = self.coords y = self.last_m m.shape = (self.total_image_num, -1) y.shape = (self.total_image_num, -1) max_dmdt = 0 for i in range(1, self.image_num + 1): dmdt = compute_dm(y[i], m[i]) / (t - self.t) if dmdt > max_dmdt: max_dmdt = dmdt m.shape = (-1, ) y.shape = (-1, ) self.last_m[:] = m[:] self.t = t return max_dmdt def relax(self, dt=1e-8, stopping_dmdt=1e4, max_steps=1000, save_npy_steps=100, save_vtk_steps=100): if self.integrator is None: self.create_integrator() log.debug("Relaxation parameters: " "stopping_dmdt={} (degrees per nanosecond), " "time_step={} s, max_steps={}.".format( stopping_dmdt, dt, max_steps)) # Write the initial step (step=0) self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() for i in range(max_steps): if i % save_vtk_steps == 0: self.save_vtks() if i % save_npy_steps == 0: self.save_npys() self.step += 1 cvode_dt = self.integrator.get_current_step() increment_dt = dt if cvode_dt > dt: increment_dt = cvode_dt dmdt = self.run_until(self.t + increment_dt) self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() log.debug("step: {:.3g}, step_size: {:.3g}" " and max_dmdt: {:.3g}.".format(self.step, increment_dt, dmdt)) if dmdt < stopping_dmdt: break log.info("Relaxation finished at time step = {:.4g}, " "t = {:.2g}, call rhs = {:.4g} " "and max_dmdt = {:.3g}".format(self.step, self.t, self.ode_count, dmdt)) self.save_vtks() self.save_npys() def __adjust_coords_once(self): self.compute_effective_field(self.coords) self.compute_distance() average_dm = np.mean(self.distances) # What about a local minimum? energy_barrier = np.max(self.energy) - np.min(self.energy) dm_threshold = average_dm / 5.0 energy_threshold = energy_barrier / 5.0 to_be_remove_id = -1 for i in range(self.image_num): e1 = self.energy[i + 1] - self.energy[i] e2 = self.energy[i + 2] - self.energy[i + 1] if self.distances[i] < dm_threshold and \ self.distances[i + 1] < dm_threshold \ and e1 * e2 > 0 \ and abs(e1) < energy_threshold \ and abs(e2) < energy_threshold: to_be_remove_id = i + 1 break if to_be_remove_id < 0: return -1 self.coords.shape = (self.total_image_num, -1) coords_list = [] for i in range(self.total_image_num): coords_list.append(self.coords[i].copy()) energy_diff = [] for i in range(self.total_image_num - 1): de = abs(self.energy[i] - self.energy[i + 1]) energy_diff.append(de) # if there is a saddle point, increase the weight # of the energy difference factor1 = 2.0 for i in range(1, self.total_image_num - 1): de1 = self.energy[i] - self.energy[i - 1] de2 = self.energy[i + 1] - self.energy[i] if de1 * de2 < 0: energy_diff[i - 1] *= factor1 energy_diff[i] *= factor1 factor2 = 2.0 for i in range(2, self.total_image_num - 2): de1 = self.energy[i - 1] - self.energy[i - 2] de2 = self.energy[i] - self.energy[i - 1] de3 = self.energy[i + 1] - self.energy[i] de4 = self.energy[i + 2] - self.energy[i + 1] if de1 * de2 > 0 and de3 * de4 > 0 and de2 * de3 < 0: energy_diff[i - 1] *= factor2 energy_diff[i] *= factor2 max_i = np.argmax(energy_diff) theta_phi = linear_interpolation(coords_list[max_i], coords_list[max_i + 1]) if to_be_remove_id < max_i: coords_list.insert(max_i + 1, theta_phi) coords_list.pop(to_be_remove_id) else: coords_list.pop(to_be_remove_id) coords_list.insert(max_i + 1, theta_phi) for i in range(self.total_image_num): m = coords_list[i] self.coords[i, :] = m[:] # print to_be_remove_id, max_i self.coords.shape = (-1, ) return 0 def adjust_coordinates(self): """ Adjust the coordinates automatically. """ for i in range(self.total_image_num / 2): if self.__adjust_coords_once() < 0: break """ self.compute_effective_field(self.coords) self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() self.step += 1 self.tablewriter.save() self.tablewriter_dm.save() """ log.info("Adjust the coordinates at step = {:.4g}, t = {:.6g},".format( self.step, self.t))
class LLB(object): """ Implementation of the Baryakhtar equation """ def __init__(self, mesh, chi=0.001, unit_length=1e-9, name='unnamed', auto_save_data=True, type=1.0): #type=1 : cubic crystal #type=0 : uniaxial crystal self.mesh = mesh self.S1 = df.FunctionSpace(mesh, "Lagrange", 1) self.S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1,dim=3) #self._Ms = df.Function(self.S1) self._M = df.Function(self.S3) self._m = df.Function(self.S3) self.M = self._M.vector().array() self.dm_dt = np.zeros(self.M.shape) self.H_eff = np.zeros(self.M.shape) self.call_field_times=0 self.call_field_jtimes=0 self.chi = chi self.unit_length = unit_length self.set_default_values() self.auto_save_data=auto_save_data self.name = name self.sanitized_name = helpers.clean_filename(name) self.type = type assert (type==0 or type==1.0) if self.auto_save_data: self.ndtfilename = self.sanitized_name + '.ndt' self.tablewriter = Tablewriter(self.ndtfilename, self, override=True) def set_default_values(self): self._alpha_mult = df.Function(self.S1) self._alpha_mult.assign(df.Constant(1.0)) self._beta_mult = df.Function(self.S1) self._beta_mult.assign(df.Constant(1.0)) self.alpha = 0.5 # alpha for solve: alpha * _alpha_mult self.beta=0 self.t = 0.0 # s self.do_precession = True u3 = df.TrialFunction(self.S3) v3 = df.TestFunction(self.S3) self.K = df.PETScMatrix() df.assemble(1.0/self.unit_length**2*df.inner(df.grad(u3),df.grad(v3))*df.dx, tensor=self.K) self.H_laplace = df.PETScVector() self.H_eff_vec = df.PETScVector(len(self.M)) self.vol = df.assemble(df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) * df.dx).array() self.gamma = consts.gamma #source for gamma: OOMMF manual, and in Werner Scholz thesis, #after (3.7), llg_gamma_G = m/(As). self.c = 1e11 # 1/s numerical scaling correction \ # 0.1e12 1/s is the value used by default in nmag 0.2 self.M0 = 8.6e5 # A/m saturation magnetisation self.t = 0.0 # s self._pins=np.zeros(self.S1.mesh().num_vertices(),dtype="int") self._pre_rhs_callables=[] self._post_rhs_callables=[] self.interactions = [] @property def alpha(self): """The damping factor :math:`\\alpha`.""" return self._alpha @alpha.setter def alpha(self, value): fun = df.Function(self.S1) if not isinstance(value, df.Expression): value=df.Constant(value) fun.assign(value) self.alpha_vec = fun.vector().array() self._alpha = self.alpha_vec @property def beta(self): return self._beta @beta.setter def beta(self, value): self._beta = value self.beta_vec = self._beta * self._beta_mult.vector().array() def add(self,interaction): interaction.setup(self.S3, self._M, self.M0, self.unit_length) self.interactions.append(interaction) if interaction.__class__.__name__=='Zeeman': self.zeeman_interation=interaction self.tablewriter.entities['zeeman']={ 'unit': '<A/m>', 'get': lambda sim: sim.zeeman_interation.average_field(), 'header': ('h_x', 'h_y', 'h_z')} self.tablewriter.update_entity_order() @property def pins(self): return self._pins @pins.setter def pins(self, value): self._pins[:]=helpers.scalar_valued_function(value,self.S1).vector().array()[:] def set_M(self, value, **kwargs): self._M = helpers.vector_valued_function(value, self.S3, normalise=False) self.M[:]=self._M.vector().array()[:] def set_up_solver(self, reltol=1e-6, abstol=1e-6, nsteps=100000,jacobian=False): integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON) integrator.init(self.sundials_rhs, 0, self.M) if jacobian: integrator.set_linear_solver_sp_gmr(sundials.PREC_LEFT) integrator.set_spils_jac_times_vec_fn(self.sundials_jtimes) integrator.set_spils_preconditioner(self.sundials_psetup, self.sundials_psolve) else: integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE) integrator.set_scalar_tolerances(reltol, abstol) integrator.set_max_num_steps(nsteps) self.integrator = integrator def compute_effective_field(self): H_eff = np.zeros(self.M.shape) for interaction in self.interactions: if interaction.__class__.__name__=='TimeZeemanPython': interaction.update(self.t) H_eff += interaction.compute_field() self.H_eff = H_eff self.H_eff_vec.set_local(H_eff) def compute_laplace_effective_field(self): self.K.mult(self.H_eff_vec, self.H_laplace) return -1.0*self.H_laplace.array()/self.vol def run_until(self, t): if t <= self.t: return self.integrator.advance_time(t, self.M) self._M.vector().set_local(self.M) self.t = t if self.auto_save_data: self.tablewriter.save() def sundials_rhs(self, t, y, ydot): self.t = t self.M[:] = y[:] self._M.vector().set_local(self.M) for func in self._pre_rhs_callables: func(self.t) self.call_field_times+=1 self.compute_effective_field() delta_Heff = self.compute_laplace_effective_field() #print self.H_eff #print 'delta_Heff',self.H_eff*0.01-delta_Heff*self.beta_vec[0] default_timer.start("sundials_rhs", self.__class__.__name__) # Use the same characteristic time as defined by c native_llg.calc_baryakhtar_dmdt(self.M, self.H_eff, delta_Heff, self.dm_dt, self.alpha_vec, self.beta_vec, self.M0, self.gamma, self.type, self.do_precession, self.pins) default_timer.stop("sundials_rhs", self.__class__.__name__) for func in self._post_rhs_callables: func(self) ydot[:] = self.dm_dt[:] return 0 def sundials_jtimes(self, mp, J_mp, t, m, fy, tmp): """ """ default_timer.start("sundials_jtimes", self.__class__.__name__) self.call_field_jtimes+=1 self._M.vector().set_local(m) self.compute_effective_field() print self.call_field_times,self.call_field_jtimes native_llg.calc_baryakhtar_jtimes(self._M.vector().array(), self.H_eff, mp, J_mp, self.gamma, self.chi, self.M0, self.do_precession, self.pins) default_timer.stop("sundials_jtimes", self.__class__.__name__) self.sundials_rhs(t, m, fy) # Nonnegative exit code indicates success return 0 def sundials_psetup(self, t, m, fy, jok, gamma, tmp1, tmp2, tmp3): # Note that some of the arguments are deliberately ignored, but they # need to be present because the function must have the correct signature # when it is passed to set_spils_preconditioner() in the cvode class. return 0, not jok def sundials_psolve(self, t, y, fy, r, z, gamma, delta, lr, tmp): # Note that some of the arguments are deliberately ignored, but they # need to be present because the function must have the correct signature # when it is passed to set_spils_preconditioner() in the cvode class. z[:] = r return 0 @property def Ms(self): a = self.M a.shape=(3,-1) res = np.sqrt(a[0]*a[0] + a[1]*a[1] + a[2]*a[2]) a.shape=(-1,) return res @property def m(self): mh = helpers.fnormalise(self.M) self._m.vector().set_local(mh) return mh @property def m_average(self): self._m.vector().set_local(helpers.fnormalise(self.M)) mx = df.assemble(df.dot(self._m, df.Constant([1, 0, 0])) * df.dx) my = df.assemble(df.dot(self._m, df.Constant([0, 1, 0])) * df.dx) mz = df.assemble(df.dot(self._m, df.Constant([0, 0, 1])) * df.dx) volume = df.assemble(df.Constant(1)*df.dx, mesh=self.mesh) return np.array([mx, my, mz])/volume