def max_energy_ext_per_directory(energy_cutoffs): """ Get the energy extension string for each run directory, for a given set of calculations :param energy_cutoffs: of the form {'zr': {0: np.linspace(60, 120, num=4), 1: np.linspace(60, 120, num=4), 2: np.linspace(90, 300, num=4), 3: np.linspace(60, 120, num=4)}, 'o': {0: np.linspace(60, 120, num=4), 1: np.linspace(60, 120, num=4), 2: np.linspace(60, 120, num=4)} } :return: max_energy_exts """ max_energy_exts = [] for energy in restructure_energy_cutoffs(energy_cutoffs): max_energy_per_species = [ max(energy_per_l_channel.values()) for energy_per_l_channel in energy.values() ] max_energy_exts.append(str(int(max(max_energy_per_species)))) return max_energy_exts
def set_lo_channel_cutoffs(l_max: dict) -> list: """ Define the LO energy cut-offs per LO channel Ranges from (6, 5) to (7, 6) for (Zr, O) :return: list energy_cutoffs: Energy cut-offs for each LO channel of Zr and O """ # (6, 5) if l_max['zr'] == 6 and l_max['o'] == 5: energy_cutoffs = { 'zr': { 0: [80, 100, 120, 150, 180, 200], 1: [80, 100, 120, 150, 180, 200], 2: [80, 100, 120, 150, 180, 200], 3: [80, 100, 120, 150, 180, 200], 4: [80, 100, 120, 150, 180, 200], 5: [80, 100, 120, 150, 180, 200], 6: [80, 100, 120, 150, 180, 200] }, 'o': { 0: [80, 100, 120, 150, 180, 200], 1: [80, 100, 120, 150, 180, 200], 2: [80, 100, 120, 150, 180, 200], 3: [80, 100, 120, 150, 180, 200], 4: [80, 100, 120, 150, 180, 200], 5: [80, 100, 120, 150, 180, 200] } } # (7, 6) elif l_max['zr'] == 7 and l_max['o'] == 6: energy_cutoffs = { 'zr': { 0: [80, 100, 120, 150, 180, 200], 1: [80, 100, 120, 150, 180, 200], 2: [80, 100, 120, 150, 180, 200], 3: [80, 100, 120, 150, 180, 200], 4: [80, 100, 120, 150, 180, 200], 5: [80, 100, 120, 150, 180, 200], 6: [80, 100, 120, 150, 180, 200], 7: [80, 100, 120, 150, 180, 200] }, 'o': { 0: [80, 100, 120, 150, 180, 200], 1: [80, 100, 120, 150, 180, 200], 2: [80, 100, 120, 150, 180, 200], 3: [80, 100, 120, 150, 180, 200], 4: [80, 100, 120, 150, 180, 200], 5: [80, 100, 120, 150, 180, 200], 6: [80, 100, 120, 150, 180, 200] } } else: sys.exit("L max pair for Zr and O not valid") return restructure_energy_cutoffs(n_energies_per_channel, energy_cutoffs)
def gw_input(root_path: str, ground_state_dir: str, energy_cutoffs: List[int], species=['zr', 'o'], l_max={ 'zr': 6, 'o': 5 }): """ :param str root_path: Top level path to calculations :param ground_state_dir: Path to groundstate directory :param List[int] energy_cutoffs: LO energy cut-offs :param species: :param l_max: """ # Run script settings if cluster == 'Dune3': env_vars = OrderedDict([ ('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'), ('OUT', 'terminal.out') ]) module_envs = ['intel/2019'] slurm_directives = slurm.set_slurm_directives(time=[0, 72, 0, 0], partition='all', exclusive=True, nodes=4, ntasks_per_node=2, cpus_per_task=18, hint='nomultithread') elif cluster == 'HAWK': omp = 64 pbs_directives = set_pbs_pro_directives(time=[24, 00, 0], queue_name='normal', send_email='abe', nodes=2, mpi_ranks_per_node=1, omp_threads_per_process=omp, cores_per_node=128, node_type='rome', job_name='GW_gs') env_vars = OrderedDict([( 'EXE', '/zhome/academic/HLRS/pri/ipralbuc/exciting-oxygen_release/bin/exciting_mpismp' ), ('OUT', 'terminal.out')]) #module_envs = ['intel/19.1.0', 'mkl/19.1.0', 'impi/19.1.0'] module_envs = ['intel/19.1.0', 'impi/19.1.0'] mpi_options = ['omplace -nt ' + str(omp)] else: print('Cluster choice not recognised: ', cluster) # GW settings # Need some excessively large number for nempty => exciting takes upper bound write_input_file_in_root( root_path, A1_gs_input, GWInput(taskname="g0w0", nempty=3000, ngridq=[2, 2, 2], skipgnd=False, n_omega=32, freqmax=1.0)) # Default basis settings default_linear_energies = parse_lo_linear_energies(ground_state_dir) default_los = { 'zr': DefaultLOs(default_linear_energies['zr'], energy_tol=0.8), 'o': DefaultLOs(default_linear_energies['o'], energy_tol=0.8) } # Default basis strings with .format tags default_basis_string = { 'zr': parse_basis_as_string(os.path.join(ground_state_dir, "Zr.xml")), 'o': parse_basis_as_string(os.path.join(ground_state_dir, "O.xml")) } # LO energies lorecommendations = parse_lorecommendations( root_path + '/../../lorecommendations.dat', species) n_energies_per_channel = 3 energy_cutoffs = restructure_energy_cutoffs(n_energies_per_channel, energy_cutoffs) species_basis_string = "_".join(s.capitalize() + str(l_max[s]) for s in species) for ie, energy_cutoff in enumerate(energy_cutoffs): # Copy ground state directory to GW directory job_dir = root_path + '/max_energy_' + str(ie) print( 'Creating directory, with input.xml, run.sh and optimised basis:', job_dir) copy_tree(ground_state_dir, job_dir) # Copy input.xml with GW settings shutil.copy(root_path + "/input.xml", job_dir + "/input.xml") # New run script if cluster == 'Dune3': slurm_directives[ 'job-name'] = "gw_A1_lmax_" + species_basis_string + str(ie) write_file( job_dir + '/run.sh', slurm.set_slurm_script(slurm_directives, env_vars, module_envs)) else: pbs_directives['N'] = "gw_A1_lmax_" + species_basis_string + str( ie) write_file( job_dir + '/run.sh', set_pbs_pro(pbs_directives, env_vars, module_envs, mpi_options)) # Write optimised basis write_optimised_lo_bases(species, l_max, energy_cutoff, lorecommendations, default_basis_string, default_los, job_dir) # Remove problem LO from basis cut_lo_function(job_dir + '/Zr.xml')
def set_up_g0w0(root_path: str, energy_cutoffs: dict): # Material species = ['zr', 'o'] l_max = {'zr': 3, 'o': 2} gw_root = write_input_file_with_gw_settings( root_path, A1_gs_input, GWInput(taskname="g0w0", nempty=2000, ngridq=[2, 2, 2], skipgnd=False, n_omega=32)) # Default basis settings # NOTE in this case ground state is one level up default_linear_energies = parse_lo_linear_energies(root_path + "/../groundstate") default_los = { 'zr': DefaultLOs(default_linear_energies['zr'], energy_tol=1.5), 'o': DefaultLOs(default_linear_energies['o'], energy_tol=1.5) } # Default basis strings with .format tags default_basis_string = { 'zr': parse_basis_as_string(root_path + "/../groundstate/Zr.xml"), 'o': parse_basis_as_string(root_path + "/../groundstate/O.xml") } # LO recommendation energies lorecommendations = parse_lorecommendations( root_path + '/lorecommendations.dat', species) # Slurm script settings env_vars = OrderedDict([ ('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'), ('OUT', 'terminal.out') ]) module_envs = ['intel/2019'] slurm_directives = slurm.set_slurm_directives(time=[0, 24, 0, 0], partition='all', exclusive=True, nodes=4, ntasks_per_node=2, cpus_per_task=18, hint='nomultithread') species_basis_string = "".join(s.capitalize() + str(l_max[s]) + '_' for s in species) # TODO Would be better to label with extra LOs added for ie, energy_cutoff in enumerate( restructure_energy_cutoffs(len(energy_cutoffs['zr'][0]), energy_cutoffs)): # Copy ground state directory to GW directory # Use an index not max energy, as the max energy does not change in 3/4 runs job_dir = gw_root + '/max_energy_i' + str(ie) print( 'Creating directory, with input.xml, run.sh and optimised basis:', job_dir) copy_tree(root_path + '/../groundstate', job_dir) # Copy input.xml with GW settings shutil.copy(gw_root + "/input.xml", job_dir + "/input.xml") # New Slurm script slurm_directives[ 'job-name'] = "gw_A1_lmax_" + species_basis_string + str( ie) + 'loEcutoff' write_file( job_dir + '/run.sh', slurm.set_slurm_script(slurm_directives, env_vars, module_envs)) # Write optimised basis write_optimised_lo_basis('zr', l_max['zr'], energy_cutoff['zr'], lorecommendations['zr'], default_basis_string['zr'], default_los['zr'], job_dir) write_optimised_lo_basis('o', l_max['o'], energy_cutoff['o'], lorecommendations['o'], default_basis_string['o'], default_los['o'], job_dir) return
def set_lo_channel_cutoffs(l_max: dict) -> list: """ Define the LO energy cut-offs per LO channel Ranges from (4, 3) to (7, 6) for (Zr, O) I assume 150 will be enough but may need to go higher :return: list energy_cutoffs: Energy cut-offs for each LO channel of Zr and O """ # (4, 3) if l_max['zr'] == 4 and l_max['o'] == 3: energy_cutoffs = { 'zr': { 0: [80, 100, 120, 150, 180, 200, 250], 1: [80, 100, 120, 150, 180, 200, 250], 2: [80, 100, 120, 150, 180, 200, 250], 3: [80, 100, 120, 150, 180, 200, 250], 4: [80, 100, 120, 150, 180, 200, 250] }, 'o': { 0: [80, 100, 120, 150, 180, 200, 250], 1: [80, 100, 120, 150, 180, 200, 250], 2: [80, 100, 120, 150, 180, 200, 250], 3: [80, 100, 120, 150, 180, 200, 250] } } # (5, 4) elif l_max['zr'] == 5 and l_max['o'] == 4: energy_cutoffs = { 'zr': { 0: [80, 100, 120, 150, 180, 200, 250], 1: [80, 100, 120, 150, 180, 200, 250], 2: [80, 100, 120, 150, 180, 200, 250], 3: [80, 100, 120, 150, 180, 200, 250], 4: [80, 100, 120, 150, 180, 200, 250], 5: [80, 100, 120, 150, 180, 200, 250] }, 'o': { 0: [80, 100, 120, 150, 180, 200, 250], 1: [80, 100, 120, 150, 180, 200, 250], 2: [80, 100, 120, 150, 180, 200, 250], 3: [80, 100, 120, 150, 180, 200, 250], 4: [80, 100, 120, 150, 180, 200, 250] } } # (6, 5) elif l_max['zr'] == 6 and l_max['o'] == 5: energy_cutoffs = { 'zr': { 0: [80, 100, 120, 150, 180, 200, 250], 1: [80, 100, 120, 150, 180, 200, 250], 2: [80, 100, 120, 150, 180, 200, 250], 3: [80, 100, 120, 150, 180, 200, 250], 4: [80, 100, 120, 150, 180, 200, 250], 5: [80, 100, 120, 150, 180, 200, 250], 6: [80, 100, 120, 150, 180, 200, 250] }, 'o': { 0: [80, 100, 120, 150, 180, 200, 250], 1: [80, 100, 120, 150, 180, 200, 250], 2: [80, 100, 120, 150, 180, 200, 250], 3: [80, 100, 120, 150, 180, 200, 250], 4: [80, 100, 120, 150, 180, 200, 250], 5: [80, 100, 120, 150, 180, 200, 250] } } # (7, 6) elif l_max['zr'] == 7 and l_max['o'] == 6: energy_cutoffs = { 'zr': { 0: [80, 100, 120, 150, 180, 200, 250], 1: [80, 100, 120, 150, 180, 200, 250], 2: [80, 100, 120, 150, 180, 200, 250], 3: [80, 100, 120, 150, 180, 200, 250], 4: [80, 100, 120, 150, 180, 200, 250], 5: [80, 100, 120, 150, 180, 200, 250], 6: [80, 100, 120, 150, 180, 200, 250], 7: [80, 100, 120, 150, 180, 200, 250] }, 'o': { 0: [80, 100, 120, 150, 180, 200, 250], 1: [80, 100, 120, 150, 180, 200, 250], 2: [80, 100, 120, 150, 180, 200, 250], 3: [80, 100, 120, 150, 180, 200, 250], 4: [80, 100, 120, 150, 180, 200, 250], 5: [80, 100, 120, 150, 180, 200, 250], 6: [80, 100, 120, 150, 180, 200, 250] } } else: sys.exit("L max pair for Zr and O not valid") return restructure_energy_cutoffs(n_energies_per_channel, energy_cutoffs)
def set_up_g0w0(root_path: str): # Material species = ['zr', 'o'] l_max = {'zr': 3, 'o': 2} # GW root and exciting input file gw_root = write_input_file_with_gw_settings(root_path, A1_gs_input, GWInput(taskname="g0w0", nempty=2000, ngridq=[2, 2, 2], skipgnd=False, n_omega=32) ) # Default basis settings default_linear_energies = parse_lo_linear_energies(root_path + "/groundstate") default_los = {'zr': DefaultLOs(default_linear_energies['zr'], energy_tol=1.5), 'o': DefaultLOs(default_linear_energies['o'], energy_tol=1.5)} # Default basis strings with .format tags default_basis_string = {'zr': parse_basis_as_string(root_path + "/groundstate/Zr.xml"), 'o': parse_basis_as_string(root_path + "/groundstate/O.xml")} # LO recommendation energies lorecommendations = parse_lorecommendations(root_path + '/lorecommendations.dat', species) # Optimised LO energy cutoffs.zr l=2 channel requires way more LOs to converge. # HOWEVER, with a reduced MT radius, the max cut-off should be less than last time # Increased rgkmax to 8 # Directory index: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 energy_cutoffs = {'zr': {0: [75, 100, 100, 100, 100, 150, 160, 180, 200, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180], 1: [75, 100, 100, 100, 100, 150, 160, 180, 200, 180, 180, 180, 200, 250, 300, 350, 400, 460, 520], 2: [75, 100, 120, 150, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200], 3: [75, 100, 100, 100, 100, 150, 160, 180, 200, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180]}, 'o': {0: [75, 100, 100, 100, 100, 100, 100, 100, 100, 120, 140, 160, 140, 140, 140, 140, 140, 140, 140], 1: [75, 100, 100, 100, 100, 100, 100, 100, 100, 120, 140, 160, 140, 140, 140, 140, 140, 140, 140], 2: [75, 100, 100, 100, 100, 100, 100, 100, 100, 120, 140, 160, 140, 140, 140, 140, 140, 140, 140]} } # 12 - 15 really are trying to converge the l=1 channel of Zr, adding one LO at a time (see LO recommendations) # Then should add one LO into every l=0,2,3 of Zr and check how much it changes. # Slurm script settings env_vars = OrderedDict([('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'), ('OUT', 'terminal.out') ]) module_envs = ['intel/2019'] slurm_directives = slurm.set_slurm_directives(time=[0, 24, 0, 0], partition='all', exclusive=True, nodes=4, ntasks_per_node=2, cpus_per_task=18, hint='nomultithread') species_basis_string = "".join(s.capitalize() + str(l_max[s]) + '_' for s in species) for ie, energy_cutoff in enumerate(restructure_energy_cutoffs(len(energy_cutoffs['zr'][0]), energy_cutoffs)): # Copy ground state directory to GW directory # Use an index not max energy, as the max energy does not change in 3/4 runs job_dir = gw_root + '/max_energy_i' + str(ie) print('Creating directory, with input.xml, run.sh and optimised basis:', job_dir) copy_tree(root_path + '/groundstate', job_dir) # Copy input.xml with GW settings shutil.copy(gw_root + "/input.xml", job_dir + "/input.xml") # New Slurm script slurm_directives['job-name'] = "gw_A1_lmax_" + species_basis_string + str(ie) + 'loEcutoff' write_file(job_dir + '/run.sh', slurm.set_slurm_script(slurm_directives, env_vars, module_envs)) # Write optimised basis write_optimised_lo_basis('zr', l_max['zr'], energy_cutoff['zr'], lorecommendations['zr'], default_basis_string['zr'], default_los['zr'], job_dir) write_optimised_lo_basis('o', l_max['o'], energy_cutoff['o'], lorecommendations['o'], default_basis_string['o'], default_los['o'], job_dir) return