def __init__(self, comm, broadcast_comm, gd, aux_gd, spos_ac): self.comm = comm self.broadcast_comm = broadcast_comm self.gd = gd self.aux_gd = aux_gd rank_a = gd.get_ranks_from_positions(spos_ac) aux_rank_a = aux_gd.get_ranks_from_positions(spos_ac) self.partition = AtomPartition(gd.comm, rank_a, name='gd') if gd is aux_gd: name = 'aux-unextended' else: name = 'aux-extended' self.aux_partition = AtomPartition(aux_gd.comm, aux_rank_a, name=name) self.work_partition = AtomPartition(comm, np.zeros(len(spos_ac)), name='work').as_even_partition() if gd is aux_gd: aux_broadcast_comm = gd.comm.new_communicator([gd.comm.rank]) else: aux_broadcast_comm = broadcast_comm self.aux_dist = AtomicMatrixDistributor(self.partition, aux_broadcast_comm, self.aux_partition) self.work_dist = AtomicMatrixDistributor(self.partition, broadcast_comm, self.work_partition)
def redistribute_and_broadcast(self, dist_comm, dup_comm): # Data exists on self which is a "nice" distribution but now # we want it on sub_partition which has a smaller communicator # whose parent is self.comm. # # We want our own data replicated on each # XXX direct comparison of communicators are unsafe as we do not use # MPI_Comm_compare #assert subpartition.comm.parent == self.partition.comm from gpaw.utilities.partition import AtomPartition newrank_a = self.partition.rank_a % dist_comm.size masters_only_partition = AtomPartition(self.partition.comm, newrank_a) dst_partition = AtomPartition(dist_comm, newrank_a) copy = self.deepcopy() copy.redistribute(masters_only_partition) dst = ArrayDict(dst_partition, self.shapes_a, dtype=self.dtype, keymap=self.keymap) data = dst.toarray() if dup_comm.rank == 0: data0 = copy.toarray() data[:] = data0 dup_comm.broadcast(data, 0) dst.fromarray(data) return dst
def read(self, filename): from ase.io.trajectory import read_atoms self.log('Reading from {}'.format(filename)) self.reader = reader = Reader(filename) atoms = read_atoms(reader.atoms) self._set_atoms(atoms) res = reader.results self.results = dict((key, res.get(key)) for key in res.keys()) if self.results: self.log('Read {}'.format(', '.join(sorted(self.results)))) self.log('Reading input parameters:') # XXX param self.parameters = self.get_default_parameters() dct = {} for key, value in reader.parameters.asdict().items(): if (isinstance(value, dict) and isinstance(self.parameters[key], dict)): self.parameters[key].update(value) else: self.parameters[key] = value dct[key] = self.parameters[key] self.log.print_dict(dct) self.log() self.initialize(reading=True) self.density.read(reader) self.hamiltonian.read(reader) self.occupations.read(reader) self.scf.read(reader) self.wfs.read(reader) # We need to do this in a better way: XXX from gpaw.utilities.partition import AtomPartition atom_partition = AtomPartition(self.wfs.gd.comm, np.zeros(len(self.atoms), dtype=int)) self.wfs.atom_partition = atom_partition self.density.atom_partition = atom_partition self.hamiltonian.atom_partition = atom_partition rank_a = self.density.gd.get_ranks_from_positions(self.spos_ac) new_atom_partition = AtomPartition(self.density.gd.comm, rank_a) for obj in [self.density, self.hamiltonian]: obj.set_positions_without_ruining_everything( self.spos_ac, new_atom_partition) self.hamiltonian.xc.read(reader) if self.hamiltonian.xc.name == 'GLLBSC': # XXX GLLB: See test/lcaotddft/gllbsc.py self.occupations.calculate(self.wfs) return reader
def get_density(self, atom_indices=None, gridrefinement=2): """Get sum of atomic densities from the given atom list. All atoms are taken if the list is not given.""" all_atoms = self.calculator.get_atoms() if atom_indices is None: atom_indices = range(len(all_atoms)) density = self.calculator.density spos_ac = all_atoms.get_scaled_positions() rank_a = self.finegd.get_ranks_from_positions(spos_ac) density.set_positions(all_atoms.get_scaled_positions(), AtomPartition(self.finegd.comm, rank_a)) # select atoms atoms = [] D_asp = {} rank_a = [] all_D_asp = self.calculator.density.D_asp all_rank_a = self.calculator.density.atom_partition.rank_a for a in atom_indices: if a in all_D_asp: D_asp[len(atoms)] = all_D_asp.get(a) atoms.append(all_atoms[a]) rank_a.append(all_rank_a[a]) atoms = Atoms(atoms, cell=all_atoms.get_cell(), pbc=all_atoms.get_pbc()) spos_ac = atoms.get_scaled_positions() Z_a = atoms.get_atomic_numbers() par = self.calculator.parameters setups = Setups(Z_a, par.setups, par.basis, XC(par.xc), self.calculator.wfs.world) # initialize self.initialize(setups, self.calculator.timer, np.zeros(len(atoms)), False) self.set_mixer(None) # FIXME nparray causes partitionong.py test to fail self.set_positions(spos_ac, AtomPartition(self.gd.comm, rank_a)) self.D_asp = D_asp basis_functions = BasisFunctions( self.gd, [setup.phit_j for setup in self.setups], cut=True) basis_functions.set_positions(spos_ac) self.initialize_from_atomic_densities(basis_functions) aed_sg, gd = self.get_all_electron_density(atoms, gridrefinement) return aed_sg.sum(axis=0), gd
def read(self, reader): nt_xG = self.gd.empty(self.ncomponents) self.gd.distribute(reader.density.density, nt_xG) nt_xG *= reader.bohr**3 # Read atomic density matrices natoms = len(self.setups) atom_partition = AtomPartition(self.gd.comm, np.zeros(natoms, int), 'density-gd') D_asp = self.setups.empty_atomic_matrix(self.ncomponents, atom_partition) self.atom_partition = atom_partition # XXXXXX spos_ac = np.zeros((natoms, 3)) # XXXX self.atomdist = self.redistributor.get_atom_distributions(spos_ac) D_sP = reader.density.atomic_density_matrices if self.gd.comm.rank == 0: D_asp.update(unpack_atomic_matrices(D_sP, self.setups)) D_asp.check_consistency() if self.collinear: nt_sG = nt_xG nt_vG = None else: nt_sG = nt_xG[:1] nt_vG = nt_xG[1:] self.initialize_directly_from_arrays(nt_sG, nt_vG, D_asp)
def set_positions(self, spos_ac): self.positions_set = False rank_a = self.gd.get_ranks_from_positions(spos_ac) atom_partition = AtomPartition(self.gd.comm, rank_a) # XXX pass AtomPartition around instead of spos_ac? # All the classes passing around spos_ac end up needing the ranks # anyway. if self.rank_a is not None and self.kpt_u[0].P_ani is not None: self.timer.start('Redistribute') mynks = len(self.kpt_u) def get_empty(a): ni = self.setups[a].ni return np.empty((mynks, self.bd.mynbands, ni), self.dtype) self.atom_partition.redistribute(atom_partition, [kpt.P_ani for kpt in self.kpt_u], get_empty) self.timer.stop('Redistribute') self.rank_a = rank_a self.atom_partition = atom_partition self.kd.symmetry.check(spos_ac)
def set_positions(self, spos_ac, rank_a): atom_partition = AtomPartition(self.gd.comm, rank_a) self.nct.set_positions(spos_ac) self.ghat.set_positions(spos_ac) self.mixer.reset() #self.nt_sG = None self.nt_sg = None self.nt_g = None self.rhot_g = None self.Q_aL = None # If both old and new atomic ranks are present, start a blank dict if # it previously didn't exist but it will needed for the new atoms. assert rank_a is not None if (self.rank_a is not None and self.D_asp is None and (rank_a == self.gd.comm.rank).any()): self.D_asp = {} if (self.rank_a is not None and self.D_asp is not None and not isinstance(self.gd.comm, SerialCommunicator)): self.timer.start('Redistribute') def get_empty(a): ni = self.setups[a].ni return np.empty((self.ns, ni * (ni + 1) // 2)) self.atom_partition.redistribute(atom_partition, self.D_asp, get_empty) self.timer.stop('Redistribute') self.rank_a = rank_a self.atom_partition = atom_partition
def set_positions(self, spos_ac, rank_a): atom_partition = AtomPartition(self.gd.comm, rank_a) self.spos_ac = spos_ac self.vbar.set_positions(spos_ac) self.xc.set_positions(spos_ac) # If both old and new atomic ranks are present, start a blank dict if # it previously didn't exist but it will needed for the new atoms. # XXX what purpose does this serve? In what case does it happen? # How would one even go about figuring it out? Why does it all have # to be so unreadable? -Ask # if (self.rank_a is not None and self.dH_asp is None and (rank_a == self.gd.comm.rank).any()): self.dH_asp = {} if self.rank_a is not None and self.dH_asp is not None: self.timer.start('Redistribute') def get_empty(a): ni = self.setups[a].ni return np.empty((self.ns, ni * (ni + 1) // 2)) self.atom_partition.redistribute(atom_partition, self.dH_asp, get_empty) self.timer.stop('Redistribute') self.rank_a = rank_a self.atom_partition = atom_partition self.dh_distributor = AtomicMatrixDistributor(atom_partition, self.setups, self.kptband_comm, self.ns)
def read(self, reader): h = reader.hamiltonian # Read all energies: for name in ENERGY_NAMES: energy = h.get(name) if energy is not None: energy /= reader.ha setattr(self, name, energy) # Read pseudo potential on the coarse grid # and broadcast on kpt/band comm: self.vt_sG = self.gd.empty(self.nspins) self.gd.distribute(h.potential / reader.ha, self.vt_sG) self.atom_partition = AtomPartition(self.gd.comm, np.zeros(len(self.setups), int), name='hamiltonian-init-serial') # Read non-local part of hamiltonian self.dH_asp = {} dH_sP = h.atomic_hamiltonian_matrices / reader.ha if self.gd.comm.rank == 0: self.dH_asp = unpack_atomic_matrices(dH_sP, self.setups) if hasattr(self.poisson, 'read'): self.poisson.read(reader) self.poisson.set_grid_descriptor(self.finegd)
def initialize_positions(self, atoms=None): """Update the positions of the atoms.""" self.log('Initializing position-dependent things.\n') if atoms is None: atoms = self.atoms else: atoms = atoms.copy() self._set_atoms(atoms) mpi.synchronize_atoms(atoms, self.world) rank_a = self.wfs.gd.get_ranks_from_positions(self.spos_ac) atom_partition = AtomPartition(self.wfs.gd.comm, rank_a, name='gd') self.wfs.set_positions(self.spos_ac, atom_partition) self.density.set_positions(self.spos_ac, atom_partition) self.hamiltonian.set_positions(self.spos_ac, atom_partition)
def redistribute_atomic_matrices(D_asp, gd2, nspins, setups, redistributor, kptband_comm): D_sP = pack_atomic_matrices(D_asp) natoms = len(setups) atom_partition = AtomPartition(gd2.comm, np.zeros(natoms, int), 'density-gd') D_asp = setups.empty_atomic_matrix(nspins, atom_partition) spos_ac = np.zeros((natoms, 3)) # XXXX atomdist = redistributor.get_atom_distributions(spos_ac) if gd2.comm.rank == 0: if kptband_comm.rank > 0: nP = sum(setup.ni * (setup.ni + 1) // 2 for setup in setups) D_sP = np.empty((nspins, nP)) kptband_comm.broadcast(D_sP, 0) D_asp.update(unpack_atomic_matrices(D_sP, setups)) D_asp.check_consistency() return atom_partition, atomdist, D_asp
def get_density(self, atom_indices=None, gridrefinement=2): """Get sum of atomic densities from the given atom list. Parameters ---------- atom_indices : list_like All atoms are taken if the list is not given. gridrefinement : 1, 2, 4 Gridrefinement given to get_all_electron_density Returns ------- type spin summed density, grid_descriptor """ all_atoms = self.calculator.get_atoms() if atom_indices is None: atom_indices = range(len(all_atoms)) # select atoms atoms = self.calculator.get_atoms()[atom_indices] spos_ac = atoms.get_scaled_positions() Z_a = atoms.get_atomic_numbers() par = self.calculator.parameters setups = Setups(Z_a, par.setups, par.basis, XC(par.xc), self.calculator.wfs.world) # initialize self.initialize(setups, self.calculator.timer, np.zeros( (len(atoms), 3)), False) self.set_mixer(None) rank_a = self.gd.get_ranks_from_positions(spos_ac) self.set_positions(spos_ac, AtomPartition(self.gd.comm, rank_a)) basis_functions = BasisFunctions( self.gd, [setup.phit_j for setup in self.setups], cut=True) basis_functions.set_positions(spos_ac) self.initialize_from_atomic_densities(basis_functions) aed_sg, gd = self.get_all_electron_density(atoms, gridrefinement) return aed_sg.sum(axis=0), gd
def read_projections(self, reader): nslice = self.bd.get_slice() nproj_a = [setup.ni for setup in self.setups] atom_partition = AtomPartition(self.gd.comm, np.zeros(len(nproj_a), int)) for u, kpt in enumerate(self.kpt_u): if self.collinear: index = (kpt.s, kpt.k) else: index = (kpt.k, ) kpt.projections = Projections(self.bd.nbands, nproj_a, atom_partition, self.bd.comm, collinear=self.collinear, spin=kpt.s, dtype=self.dtype) if self.gd.comm.rank == 0: P_nI = reader.proxy('projections', *index)[nslice] if not self.collinear: P_nI.shape = (self.bd.mynbands, -1) kpt.projections.matrix.array[:] = P_nI
def set_positions(self, spos_ac, atom_partition=None): self.positions_set = False # rank_a = self.gd.get_ranks_from_positions(spos_ac) # atom_partition = AtomPartition(self.gd.comm, rank_a) # XXX pass AtomPartition around instead of spos_ac? # All the classes passing around spos_ac end up needing the ranks # anyway. if atom_partition is None: rank_a = self.gd.get_ranks_from_positions(spos_ac) atom_partition = AtomPartition(self.gd.comm, rank_a) if self.atom_partition is not None and self.kpt_u[0].P_ani is not None: with self.timer('Redistribute'): for kpt in self.mykpts: P = kpt.projections assert self.atom_partition == P.atom_partition kpt.projections = P.redist(atom_partition) assert atom_partition == kpt.projections.atom_partition self.atom_partition = atom_partition self.kd.symmetry.check(spos_ac) self.spos_ac = spos_ac
def read(paw, reader, read_projections=True): r = reader timer = paw.timer timer.start('Read') wfs = paw.wfs density = paw.density hamiltonian = paw.hamiltonian natoms = len(paw.atoms) world = paw.wfs.world gd = wfs.gd kd = wfs.kd bd = wfs.bd master = (world.rank == 0) parallel = (world.size > 1) version = r['version'] hdf5 = hasattr(r, 'hdf5') # Verify setup fingerprints and count projectors and atomic matrices: for setup in wfs.setups.setups.values(): try: key = atomic_names[setup.Z] + 'Fingerprint' if setup.type != 'paw': key += '(%s)' % setup.type if setup.fingerprint != r[key]: str = 'Setup for %s (%s) not compatible with restart file.' \ % (setup.symbol, setup.filename) if paw.input_parameters['idiotproof']: raise RuntimeError(str) else: warnings.warn(str) except (AttributeError, KeyError): str = 'Fingerprint of setup for %s (%s) not in restart file.' \ % (setup.symbol, setup.filename) if paw.input_parameters['idiotproof']: raise RuntimeError(str) else: warnings.warn(str) nproj = sum([setup.ni for setup in wfs.setups]) nadm = sum([setup.ni * (setup.ni + 1) // 2 for setup in wfs.setups]) # Verify dimensions for minimally required netCDF variables: ng = gd.get_size_of_global_array() shapes = { 'ngptsx': ng[0], 'ngptsy': ng[1], 'ngptsz': ng[2], 'nspins': wfs.nspins, 'nproj': nproj, 'nadm': nadm } for name, dim in shapes.items(): if r.dimension(name) != dim: raise ValueError('shape mismatch: expected %s=%d' % (name, dim)) timer.start('Density') density.read(r, parallel, wfs.kptband_comm) timer.stop('Density') timer.start('Hamiltonian') hamiltonian.read(r, parallel) timer.stop('Hamiltonian') from gpaw.utilities.partition import AtomPartition atom_partition = AtomPartition(gd.comm, np.zeros(natoms, dtype=int)) # <sarcasm>let's set some variables directly on some objects!</sarcasm> wfs.atom_partition = atom_partition wfs.rank_a = np.zeros(natoms, int) density.atom_partition = atom_partition hamiltonian.atom_partition = atom_partition if version > 0.3: Etot = hamiltonian.Etot energy_error = r['EnergyError'] if energy_error is not None: paw.scf.energies = [Etot, Etot + energy_error, Etot] wfs.eigensolver.error = r['EigenstateError'] if version < 1: wfs.eigensolver.error *= gd.dv else: paw.scf.converged = r['Converged'] if version > 0.6: if paw.occupations.fixmagmom: if 'FermiLevel' in r.get_parameters(): paw.occupations.set_fermi_levels_mean(r['FermiLevel']) if 'FermiSplit' in r.get_parameters(): paw.occupations.set_fermi_splitting(r['FermiSplit']) else: if 'FermiLevel' in r.get_parameters(): paw.occupations.set_fermi_level(r['FermiLevel']) else: if (not paw.input_parameters.fixmom and 'FermiLevel' in r.get_parameters()): paw.occupations.set_fermi_level(r['FermiLevel']) # Try to read the current time and kick strength in time-propagation TDDFT: for attr, name in [('time', 'Time'), ('niter', 'TimeSteps'), ('kick_strength', 'AbsorptionKick')]: if hasattr(paw, attr): try: if r.has_array(name): value = r.get(name, read=master) else: value = r[name] setattr(paw, attr, value) except KeyError: pass # Try to read FDTD-related data try: use_fdtd = r['FDTD'] except: use_fdtd = False if use_fdtd: from gpaw.fdtd.poisson_fdtd import FDTDPoissonSolver # fdtd_poisson will overwrite the poisson at a later stage paw.hamiltonian.fdtd_poisson = FDTDPoissonSolver(restart_reader=r, paw=paw) # Try to read the number of Delta SCF orbitals try: norbitals = r.dimension('norbitals') paw.occupations.norbitals = norbitals except (AttributeError, KeyError): norbitals = None nibzkpts = r.dimension('nibzkpts') nbands = r.dimension('nbands') nslice = bd.get_slice() if (nibzkpts != len(wfs.kd.ibzk_kc) or nbands != bd.comm.size * bd.mynbands): paw.scf.reset() else: # Verify that symmetries for for k-point reduction hasn't changed: tol = 1e-12 if master: bzk_kc = r.get('BZKPoints', read=master) weight_k = r.get('IBZKPointWeights', read=master) assert np.abs(bzk_kc - kd.bzk_kc).max() < tol assert np.abs(weight_k - kd.weight_k).max() < tol for kpt in wfs.kpt_u: # Eigenvalues and occupation numbers: timer.start('Band energies') k = kpt.k s = kpt.s if hdf5: # fully parallelized over spins, k-points do_read = (gd.comm.rank == 0) indices = [s, k] indices.append(nslice) kpt.eps_n = r.get('Eigenvalues', parallel=parallel, read=do_read, *indices) gd.comm.broadcast(kpt.eps_n, 0) kpt.f_n = r.get('OccupationNumbers', parallel=parallel, read=do_read, *indices) gd.comm.broadcast(kpt.f_n, 0) else: eps_n = r.get('Eigenvalues', s, k, read=master) f_n = r.get('OccupationNumbers', s, k, read=master) kpt.eps_n = eps_n[nslice].copy() kpt.f_n = f_n[nslice].copy() timer.stop('Band energies') if norbitals is not None: # XXX will probably fail for hdf5 timer.start('dSCF expansions') kpt.ne_o = np.empty(norbitals, dtype=float) kpt.c_on = np.empty((norbitals, bd.mynbands), dtype=complex) for o in range(norbitals): kpt.ne_o[o] = r.get('LinearExpansionOccupations', s, k, o, read=master) c_n = r.get('LinearExpansionCoefficients', s, k, o, read=master) kpt.c_on[o, :] = c_n[nslice] timer.stop('dSCF expansions') if (r.has_array('PseudoWaveFunctions') and paw.input_parameters.mode != 'lcao'): timer.start('Pseudo-wavefunctions') wfs.read(r, hdf5) timer.stop('Pseudo-wavefunctions') if (r.has_array('WaveFunctionCoefficients') and paw.input_parameters.mode == 'lcao'): wfs.read_coefficients(r) timer.start('Projections') if hdf5 and read_projections: # Domain masters read parallel over spin, kpoints and band groups cumproj_a = np.cumsum([0] + [setup.ni for setup in wfs.setups]) all_P_ni = np.empty((bd.mynbands, cumproj_a[-1]), dtype=wfs.dtype) for kpt in wfs.kpt_u: kpt.P_ani = {} indices = [kpt.s, kpt.k] indices.append(bd.get_slice()) do_read = (gd.comm.rank == 0) # timer.start('ProjectionsCritical(s=%d,k=%d)' % (kpt.s,kpt.k)) r.get('Projections', out=all_P_ni, parallel=parallel, read=do_read, *indices) # timer.stop('ProjectionsCritical(s=%d,k=%d)' % (kpt.s,kpt.k)) if gd.comm.rank == 0: for a in range(natoms): ni = wfs.setups[a].ni P_ni = np.empty((bd.mynbands, ni), dtype=wfs.dtype) P_ni[:] = all_P_ni[:, cumproj_a[a]:cumproj_a[a + 1]] kpt.P_ani[a] = P_ni del all_P_ni # delete a potentially large matrix elif read_projections and r.has_array('Projections'): wfs.read_projections(r) timer.stop('Projections') # Manage mode change: paw.scf.check_convergence(density, wfs.eigensolver, wfs, hamiltonian, paw.forces) newmode = paw.input_parameters.mode try: oldmode = r['Mode'] if oldmode == 'pw': from gpaw.wavefunctions.pw import PW oldmode = PW(ecut=r['PlaneWaveCutoff'] * Hartree) except (AttributeError, KeyError): oldmode = 'fd' # This is an old gpw file from before lcao existed if newmode == 'lcao': spos_ac = paw.atoms.get_scaled_positions() % 1.0 wfs.load_lazily(hamiltonian, spos_ac) if newmode != oldmode: paw.scf.reset() # Get the forces from the old calculation: if r.has_array('CartesianForces'): paw.forces.F_av = r.get('CartesianForces', broadcast=True) else: paw.forces.reset() hamiltonian.xc.read(r) timer.stop('Read')
def shape(a): return (a, a // 2) # Shapes: (0, 0), (1, 0), (2, 1), ... natoms = 33 if world.size == 1: rank_a = np.zeros(natoms, int) else: # When on more than 2 cores, make sure that at least one core # (rank=0) has zero entries: lower = 0 if world.size == 2 else 1 rank_a = gen.randint(lower, world.size, natoms) assert (rank_a < world.size).all() serial = AtomPartition(world, np.zeros(natoms, int)) partition = AtomPartition(world, rank_a) even_partition = partition.as_even_partition() def check(atomdict, title): if world.rank == world.size // 2 or world.rank == 0: print('rank %d %s: %s' % (world.rank, title.rjust(10), atomdict)) # Create a normal, "well-behaved" dict against which to test arraydict. ref = dict(atomdict) #print atomdict assert set(atomdict.keys()) == set(ref.keys()) # check keys() for a in atomdict: # check __iter__, __getitem__ #print ref[a].shape, atomdict[a].shape #ref[a].shape, atomdict[a].shape #print ref[a], atomdict[a]