示例#1
0
def set_up_g0w0(root_path: str, q_grid: list):

    species = ['zr', 'o']
    l_max = {'zr': 3, 'o': 2}
    q_str = "".join(str(q) for q in q_grid)

    # GW input file
    gw_input_string = set_gw_input_string(A1_gs_input,
                                          GWInput(taskname="g0w0", nempty=2000, ngridq=q_grid, skipgnd=False, n_omega=32)
                                          )

    run_settings = {'222': {'nodes': 4,  'time': [0, 24, 0, 0]},
                    '444': {'nodes': 10, 'time': [0, 64, 0, 0]},
                    '666': {'nodes': 12, 'time': [0, 100, 0, 0]}
                    }

    # Slurm script settings
    env_vars = OrderedDict([('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
                            ('OUT', 'terminal.out')
                            ])
    module_envs = ['intel/2019']
    slurm_directives = slurm.set_slurm_directives(time=run_settings[q_str]['time'],
                                                  partition='all',
                                                  exclusive=True,
                                                  nodes=run_settings[q_str]['nodes'],
                                                  ntasks_per_node=2,
                                                  cpus_per_task=18,
                                                  hint='nomultithread')

    # Job directory
    job_dir = root + '/' + q_str

    # Write input, basis files and slurm file
    print('Creating directory, with input.xml, run.sh and optimised basis:', job_dir)
    copy_tree(root_path + '/groundstate', job_dir)

    # Copy input.xml with GW settings
    write_file(job_dir + "/input.xml", gw_input_string)

    # New Slurm script
    slurm_directives['job-name'] = "gw_A1_" + q_str
    write_file(job_dir + '/run.sh', slurm.set_slurm_script(slurm_directives, env_vars, module_envs))

    # Optimised bases
    write_file(job_dir + '/Zr.xml', zr_basis)
    write_file(job_dir + '/O.xml', o_basis)
示例#2
0
def vary_nempty(root: str, basis_file_path: str, nempty_range: List[int]):
    """
    Generate inputs with varying numbers of empty states

    """

    # Slurm script settings
    env_vars = OrderedDict([
        ('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
        ('OUT', 'terminal.out')
    ])
    module_envs = ['intel/2019']
    slurm_directives = slurm.set_slurm_directives(time=[0, 72, 0, 0],
                                                  partition='all',
                                                  exclusive=True,
                                                  nodes=4,
                                                  ntasks_per_node=1,
                                                  cpus_per_task=36,
                                                  hint='nomultithread')

    for n_empty in nempty_range:

        # Job directory
        job_dir = os.path.join(root, str(n_empty))

        # GW INPUT
        gw_settings = GWInput(taskname="g0w0",
                              nempty=n_empty,
                              ngridq=[2, 2, 2],
                              skipgnd=False,
                              n_omega=32,
                              freqmax=1.0)
        write_input_file_in_root(job_dir, A1_gs_input, gw_settings)

        # Just copy the basis files and STATE.OUT
        for species in ['Zr', 'O']:
            species_file = species + ".xml"
            shutil.copy(basis_file_path + "/" + species_file,
                        job_dir + "/" + species_file)
        shutil.copy(basis_file_path + "/STATE.OUT", job_dir + "/STATE.OUT")

        # Slurm script
        slurm_directives['job-name'] = "gw_nempty_" + str(n_empty)
        write_file(
            job_dir + '/run.sh',
            slurm.set_slurm_script(slurm_directives, env_vars, module_envs))
示例#3
0
def gw_input(root_path: str,
             ground_state_dir: str,
             energy_cutoffs: List[int],
             species=['zr', 'o'],
             l_max={
                 'zr': 6,
                 'o': 5
             }):
    """

    :param str root_path: Top level path to calculations
    :param ground_state_dir: Path to groundstate directory
    :param List[int] energy_cutoffs: LO energy cut-offs
    :param species:
    :param l_max:
    """

    # Run script settings
    if cluster == 'Dune3':
        env_vars = OrderedDict([
            ('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
            ('OUT', 'terminal.out')
        ])
        module_envs = ['intel/2019']
        slurm_directives = slurm.set_slurm_directives(time=[0, 72, 0, 0],
                                                      partition='all',
                                                      exclusive=True,
                                                      nodes=4,
                                                      ntasks_per_node=2,
                                                      cpus_per_task=18,
                                                      hint='nomultithread')
    elif cluster == 'HAWK':
        omp = 64
        pbs_directives = set_pbs_pro_directives(time=[24, 00, 0],
                                                queue_name='normal',
                                                send_email='abe',
                                                nodes=2,
                                                mpi_ranks_per_node=1,
                                                omp_threads_per_process=omp,
                                                cores_per_node=128,
                                                node_type='rome',
                                                job_name='GW_gs')

        env_vars = OrderedDict([(
            'EXE',
            '/zhome/academic/HLRS/pri/ipralbuc/exciting-oxygen_release/bin/exciting_mpismp'
        ), ('OUT', 'terminal.out')])
        #module_envs = ['intel/19.1.0', 'mkl/19.1.0', 'impi/19.1.0']
        module_envs = ['intel/19.1.0', 'impi/19.1.0']
        mpi_options = ['omplace -nt ' + str(omp)]

    else:
        print('Cluster choice not recognised: ', cluster)

    # GW settings
    # Need some excessively large number for nempty => exciting takes upper bound
    write_input_file_in_root(
        root_path, A1_gs_input,
        GWInput(taskname="g0w0",
                nempty=3000,
                ngridq=[2, 2, 2],
                skipgnd=False,
                n_omega=32,
                freqmax=1.0))

    # Default basis settings
    default_linear_energies = parse_lo_linear_energies(ground_state_dir)
    default_los = {
        'zr': DefaultLOs(default_linear_energies['zr'], energy_tol=0.8),
        'o': DefaultLOs(default_linear_energies['o'], energy_tol=0.8)
    }

    # Default basis strings with .format tags
    default_basis_string = {
        'zr': parse_basis_as_string(os.path.join(ground_state_dir, "Zr.xml")),
        'o': parse_basis_as_string(os.path.join(ground_state_dir, "O.xml"))
    }

    # LO energies
    lorecommendations = parse_lorecommendations(
        root_path + '/../../lorecommendations.dat', species)

    n_energies_per_channel = 3
    energy_cutoffs = restructure_energy_cutoffs(n_energies_per_channel,
                                                energy_cutoffs)

    species_basis_string = "_".join(s.capitalize() + str(l_max[s])
                                    for s in species)

    for ie, energy_cutoff in enumerate(energy_cutoffs):
        # Copy ground state directory to GW directory
        job_dir = root_path + '/max_energy_' + str(ie)
        print(
            'Creating directory, with input.xml, run.sh and optimised basis:',
            job_dir)
        copy_tree(ground_state_dir, job_dir)

        # Copy input.xml with GW settings
        shutil.copy(root_path + "/input.xml", job_dir + "/input.xml")

        # New run script
        if cluster == 'Dune3':
            slurm_directives[
                'job-name'] = "gw_A1_lmax_" + species_basis_string + str(ie)
            write_file(
                job_dir + '/run.sh',
                slurm.set_slurm_script(slurm_directives, env_vars,
                                       module_envs))
        else:
            pbs_directives['N'] = "gw_A1_lmax_" + species_basis_string + str(
                ie)
            write_file(
                job_dir + '/run.sh',
                set_pbs_pro(pbs_directives, env_vars, module_envs,
                            mpi_options))

        # Write optimised basis
        write_optimised_lo_bases(species, l_max, energy_cutoff,
                                 lorecommendations, default_basis_string,
                                 default_los, job_dir)

        # Remove problem LO from basis
        cut_lo_function(job_dir + '/Zr.xml')
示例#4
0
def set_up_g0w0(root_path: str):

    # Material
    species = ['zr', 'o']
    l_max = {'zr': 4, 'o': 3}

    # GW root and exciting input file
    gw_root = write_input_file_with_gw_settings(
        root_path, A1_gs_input,
        GWInput(taskname="g0w0",
                nempty=1000,
                ngridq=[2, 2, 2],
                skipgnd=False,
                n_omega=32))

    # Default basis settings
    default_linear_energies = parse_lo_linear_energies(root_path +
                                                       "/groundstate")
    default_los = {
        'zr': DefaultLOs(default_linear_energies['zr'], energy_tol=0.8),
        'o': DefaultLOs(default_linear_energies['o'], energy_tol=0.8)
    }

    # Default basis strings with .format tags
    default_basis_string = {
        'zr': parse_basis_as_string(root_path + "/groundstate/Zr.xml"),
        'o': parse_basis_as_string(root_path + "/groundstate/O.xml")
    }

    # LO recommendation energies
    lorecommendations = parse_lorecommendations(
        root_path + '/lorecommendations.dat', species)

    # Optimised LO energy cutoffs
    energy_cutoffs = {
        'zr': {
            0: [60, 80, 100, 120, 140, 160, 180, 200],
            1: [60, 80, 100, 120, 140, 160, 180, 200],
            2: [60, 80, 100, 120, 140, 160, 180, 200],
            3: [60, 80, 100, 120, 140, 160, 180, 200],
            4: [60, 80, 100, 120, 140, 160, 180, 200]
        },
        'o': {
            0: [60, 80, 100, 120, 140, 160, 180, 200],
            1: [60, 80, 100, 120, 140, 160, 180, 200],
            2: [60, 80, 100, 120, 140, 160, 180, 200],
            3: [60, 80, 100, 120, 140, 160, 180, 200]
        }
    }

    # Slurm script settings
    env_vars = OrderedDict([
        ('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
        ('OUT', 'terminal.out')
    ])
    module_envs = ['intel/2019']
    slurm_directives = slurm.set_slurm_directives(time=[0, 6, 0, 0],
                                                  partition='all',
                                                  exclusive=True,
                                                  nodes=1,
                                                  ntasks_per_node=2,
                                                  cpus_per_task=18,
                                                  hint='nomultithread')

    species_basis_string = ''
    for s in species:
        species_basis_string += s.capitalize() + str(l_max[s]) + '_'

    for energy_cutoff in restructure_energy_cutoffs(energy_cutoffs):
        # Copy groundstate directory to GW directory
        max_energy_per_species = [
            max(energy_per_l_channel.values())
            for energy_per_l_channel in energy_cutoff.values()
        ]

        job_dir = gw_root + '/max_energy_' + str(
            int(max(max_energy_per_species)))
        print(
            'Creating directory, with input.xml, run.sh and optimised basis:',
            job_dir)
        copy_tree(root_path + '/groundstate', job_dir)

        # Copy input.xml with GW settings
        shutil.copy(gw_root + "/input.xml", job_dir + "/input.xml")

        # New Slurm script
        slurm_directives[
            'job-name'] = "gw_A1_lmax_" + species_basis_string + str(
                int(max(max_energy_per_species))) + 'loEcutoff'
        write_file(
            job_dir + '/run.sh',
            slurm.set_slurm_script(slurm_directives, env_vars, module_envs))

        # Write optimised basis
        write_optimised_lo_basis('zr', l_max['zr'], energy_cutoff['zr'],
                                 lorecommendations['zr'],
                                 default_basis_string['zr'], default_los['zr'],
                                 job_dir)
        write_optimised_lo_basis('o', l_max['o'], energy_cutoff['o'],
                                 lorecommendations['o'],
                                 default_basis_string['o'], default_los['o'],
                                 job_dir)

    return
示例#5
0
def set_up_g0w0(root_path: str, energy_cutoffs: dict):
    # Material
    species = ['zr', 'o']
    l_max = {'zr': 3, 'o': 2}

    gw_root = write_input_file_with_gw_settings(
        root_path, A1_gs_input,
        GWInput(taskname="g0w0",
                nempty=2000,
                ngridq=[2, 2, 2],
                skipgnd=False,
                n_omega=32))

    # Default basis settings
    # NOTE in this case ground state is one level up
    default_linear_energies = parse_lo_linear_energies(root_path +
                                                       "/../groundstate")
    default_los = {
        'zr': DefaultLOs(default_linear_energies['zr'], energy_tol=1.5),
        'o': DefaultLOs(default_linear_energies['o'], energy_tol=1.5)
    }

    # Default basis strings with .format tags
    default_basis_string = {
        'zr': parse_basis_as_string(root_path + "/../groundstate/Zr.xml"),
        'o': parse_basis_as_string(root_path + "/../groundstate/O.xml")
    }

    # LO recommendation energies
    lorecommendations = parse_lorecommendations(
        root_path + '/lorecommendations.dat', species)

    # Slurm script settings
    env_vars = OrderedDict([
        ('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
        ('OUT', 'terminal.out')
    ])
    module_envs = ['intel/2019']
    slurm_directives = slurm.set_slurm_directives(time=[0, 24, 0, 0],
                                                  partition='all',
                                                  exclusive=True,
                                                  nodes=4,
                                                  ntasks_per_node=2,
                                                  cpus_per_task=18,
                                                  hint='nomultithread')

    species_basis_string = "".join(s.capitalize() + str(l_max[s]) + '_'
                                   for s in species)

    # TODO Would be better to label with extra LOs added
    for ie, energy_cutoff in enumerate(
            restructure_energy_cutoffs(len(energy_cutoffs['zr'][0]),
                                       energy_cutoffs)):
        # Copy ground state directory to GW directory
        # Use an index not max energy, as the max energy does not change in 3/4 runs
        job_dir = gw_root + '/max_energy_i' + str(ie)
        print(
            'Creating directory, with input.xml, run.sh and optimised basis:',
            job_dir)
        copy_tree(root_path + '/../groundstate', job_dir)

        # Copy input.xml with GW settings
        shutil.copy(gw_root + "/input.xml", job_dir + "/input.xml")

        # New Slurm script
        slurm_directives[
            'job-name'] = "gw_A1_lmax_" + species_basis_string + str(
                ie) + 'loEcutoff'
        write_file(
            job_dir + '/run.sh',
            slurm.set_slurm_script(slurm_directives, env_vars, module_envs))

        # Write optimised basis
        write_optimised_lo_basis('zr', l_max['zr'], energy_cutoff['zr'],
                                 lorecommendations['zr'],
                                 default_basis_string['zr'], default_los['zr'],
                                 job_dir)
        write_optimised_lo_basis('o', l_max['o'], energy_cutoff['o'],
                                 lorecommendations['o'],
                                 default_basis_string['o'], default_los['o'],
                                 job_dir)

    return
示例#6
0
def set_up_pure_mpi_scaling_tests(scaling_root:str):
    """
    Pure MPI scaling tests for GW, from 1 to 10 nodes, using all cores per node
    and no threading.

    GW settings of q = [2, 2, 2] and img f = 45 mean that at 10 nodes, each core (360)
    has one q-point and frequency point. Scaling is expected to be ~ linear for all
    calculations.

    TURNS OUT that GW is only MPI-parallelised over q-points, so this doesn't give anything
    useful.
    """

    input_xml = inputs_q222_set1.input_xml
    zr_basis_xml = inputs_q222_set1.zr_basis_xml
    o_basis_xml = inputs_q222_set1.o_basis_xml

    # Check GW input script settings
    match = re.search('nempty="(.+?)"', input_xml)
    n_empty = int(re.findall(r'\d+', match.group())[0])
    assert n_empty == 100, "n_empty != 100"

    match = re.search('ngridq="(.+?)"', input_xml)
    q_grid = [int(q) for q in re.findall(r'\d+', match.group())]
    assert q_grid == [2,2,2], "q_grid != [2,2,2]"

    # Slurm script settings
    env_vars = OrderedDict([('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
                            ('OUT', 'terminal.out'),
                            ('export MKL_NUM_THREADS', '1')
                            ])

    module_envs = ['intel/2019']

    # Cores per node
    ntasks_per_node = 36

    # OMP threads per MPI rank
    cpus_per_task = 1

    # Nodes to use in scaling tests
    nodes = np.arange(1, 11)

    # These nodes differ in memory or processor to the rest of Dune 3
    # hence exclude them
    exclude_nodes = ['node' + str(id) for id in range(197, 208 + 1)]

    # Timing in days, where key = node_count
    times = {1: [4, 0, 0, 0],
             2: [4, 0, 0, 0],
             3: [4, 0, 0, 0],

             4: [2, 0, 0, 0],
             5: [2, 0, 0, 0],
             6: [2, 0, 0, 0],

             7: [1, 0, 0, 0],
             8: [1, 0, 0, 0],
             9: [1, 0, 0, 0],
            10: [1, 0, 0, 0]}

    for node_count in nodes:
        job_dir = scaling_root + '/n_nodes_' + str(node_count)
        print("Writing files to:", job_dir)

        Path(job_dir).mkdir(parents=True, exist_ok=True)

        write_file(job_dir + '/input.xml', input_xml)
        write_file(job_dir + '/Zr.xml', zr_basis_xml)
        write_file(job_dir + '/O.xml', o_basis_xml)

        slurm_directives = slurm.set_slurm_directives(job_name='scaling-pure-mpi-GW',
                                                      time=times[node_count],
                                                      partition='all',
                                                      exclusive=True,
                                                      nodes=node_count,
                                                      ntasks_per_node=ntasks_per_node,
                                                      cpus_per_task=cpus_per_task,
                                                      hint='nomultithread',
                                                      exclude=exclude_nodes)
        write_file(job_dir + '/run.sh', slurm.set_slurm_script(slurm_directives, env_vars, module_envs))

    return
示例#7
0
def set_up_g0w0(root_path: str):

    # Material
    species = ['zr', 'o']
    l_max = {'zr': 4, 'o': 3}

    # GW root and exciting input file
    # nempty needs to be > 1000 to account for total number of empty states
    # as a consequence of these large LO basis sets.
    # Note, i0 and i1 were ok
    gw_root = write_input_file_with_gw_settings(
        root_path, A1_gs_input,
        GWInput(taskname="g0w0",
                nempty=2000,
                ngridq=[2, 2, 2],
                skipgnd=False,
                n_omega=32))

    # Default basis settings
    default_linear_energies = parse_lo_linear_energies(root_path +
                                                       "/groundstate")
    default_los = {
        'zr': DefaultLOs(default_linear_energies['zr'], energy_tol=1.5),
        'o': DefaultLOs(default_linear_energies['o'], energy_tol=1.5)
    }

    # Default basis strings with .format tags
    default_basis_string = {
        'zr': parse_basis_as_string(root_path + "/groundstate/Zr.xml"),
        'o': parse_basis_as_string(root_path + "/groundstate/O.xml")
    }

    # LO recommendation energies
    lorecommendations = parse_lorecommendations(
        root_path + '/lorecommendations.dat', species)

    # Optimised LO energy cutoffs.zr l=2 channel requires way more LOs to converge.
    # HOWEVER, with a reduced MT radius, the max cut-off should be less than last time
    # Increased rgkmax to 8
    energy_cutoffs = {
        'zr': {
            0: [75, 100, 120, 120, 120, 120, 120],
            1: [75, 100, 120, 120, 120, 120, 120],
            2: [75, 100, 100, 150, 200, 250, 300],
            3: [75, 100, 120, 120, 120, 120, 120],
            4: [75, 100, 120, 120, 120, 120, 120]
        },
        'o': {
            0: [75, 100, 120, 120, 120, 120, 120],
            1: [75, 100, 120, 120, 120, 120, 120],
            2: [75, 100, 120, 120, 120, 120, 120],
            3: [75, 100, 120, 120, 120, 120, 120]
        }
    }

    n_energies_per_channel = len(energy_cutoffs['zr'][0])

    # Slurm script settings
    env_vars = OrderedDict([
        ('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
        ('OUT', 'terminal.out')
    ])
    module_envs = ['intel/2019']
    slurm_directives = slurm.set_slurm_directives(time=[0, 24, 0, 0],
                                                  partition='all',
                                                  exclusive=True,
                                                  nodes=4,
                                                  ntasks_per_node=2,
                                                  cpus_per_task=18,
                                                  hint='nomultithread')

    species_basis_string = ''
    for s in species:
        species_basis_string += s.capitalize() + str(l_max[s]) + '_'

    print('here')

    for ie, energy_cutoff in enumerate(
            restructure_energy_cutoffs(n_energies_per_channel,
                                       energy_cutoffs)):
        # Copy groundstate directory to GW directory
        # Use an index not max energy, as the max energy does not change in 3/4 runs
        job_dir = gw_root + '/max_energy_i' + str(ie)
        print(
            'Creating directory, with input.xml, run.sh and optimised basis:',
            job_dir)
        print(root_path)
        copy_tree(root_path + '/groundstate', job_dir)

        # Copy input.xml with GW settings
        shutil.copy(gw_root + "/input.xml", job_dir + "/input.xml")

        # New Slurm script
        slurm_directives[
            'job-name'] = "gw_A1_lmax_" + species_basis_string + str(
                ie) + 'loEcutoff'
        write_file(
            job_dir + '/run.sh',
            slurm.set_slurm_script(slurm_directives, env_vars, module_envs))

        # Write optimised basis
        write_optimised_lo_basis('zr', l_max['zr'], energy_cutoff['zr'],
                                 lorecommendations['zr'],
                                 default_basis_string['zr'], default_los['zr'],
                                 job_dir)
        write_optimised_lo_basis('o', l_max['o'], energy_cutoff['o'],
                                 lorecommendations['o'],
                                 default_basis_string['o'], default_los['o'],
                                 job_dir)

    return
示例#8
0
def input_for_lmax_pair(root_path: str, species: list, l_max: dict):
    """
    Given an l_max pair, create G0W0 inputs for a specifid range of LO cut-offs per channel,
    as defined in set8/basis.py

    :param str root_path: Top level path to calculations
    :param List[str] species: List of species
    :param dict l_max: l_max associated with each species
    :return:
    """

    # Slurm script settings
    env_vars = OrderedDict([('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
                            ('OUT', 'terminal.out')
                            ])
    module_envs = ['intel/2019']
    slurm_directives = slurm.set_slurm_directives(time=[0, 72, 0, 0],
                                                  partition='all',
                                                  exclusive=True,
                                                  nodes=4,
                                                  ntasks_per_node=2,
                                                  cpus_per_task=18,
                                                  hint='nomultithread')

    # Need some excessively large number for nempty => exciting takes upper bound
    gw_root = write_input_file(root_path,
                               A1_gs_input,
                               GWInput(taskname="g0w0",
                                       nempty=2000,
                                       ngridq=[2, 2, 2],
                                       skipgnd=False,
                                       n_omega=32,
                                       freqmax=1.0)
                               )
    # Default basis settings
    default_linear_energies = parse_lo_linear_energies(root_path + "/groundstate")
    default_los = {'zr': DefaultLOs(default_linear_energies['zr'], energy_tol=0.8),
                   'o': DefaultLOs(default_linear_energies['o'],  energy_tol=0.8)}

    # Default basis strings with .format tags
    default_basis_string = {'zr': parse_basis_as_string(root_path + "/groundstate/Zr.xml"),
                            'o': parse_basis_as_string(root_path + "/groundstate/O.xml")}

    # LO energies
    lorecommendations = parse_lorecommendations(root_path + '/lorecommendations.dat', species)
    energy_cutoffs = set_lo_channel_cutoffs(l_max)

    species_basis_string = "_".join(s.capitalize() + str(l_max[s]) for s in species)

    # for ie, energy_cutoff in enumerate(energy_cutoffs):
    # Initial set 8
    # for ie in range(0, 4):
    # Set 8 part 2
    for ie in range(4, 7):
        energy_cutoff = energy_cutoffs[ie]
        # Copy ground state directory to GW directory
        job_dir = gw_root + '/max_energy_i' + str(ie)
        print('Creating directory, with input.xml, run.sh and optimised basis:', job_dir)
        print(root_path)
        copy_tree(root_path + '/groundstate', job_dir)

        # Copy input.xml with GW settings
        shutil.copy(gw_root + "/input.xml", job_dir + "/input.xml")

        # New Slurm script
        slurm_directives['job-name'] = "gw_A1_lmax_" + species_basis_string + str(ie)
        write_file(job_dir + '/run.sh', slurm.set_slurm_script(slurm_directives, env_vars, module_envs))

        # Write optimised basis
        write_optimised_lo_bases(species, l_max, energy_cutoff, lorecommendations,
                                 default_basis_string, default_los, job_dir)
示例#9
0
def set_up_g0w0(root_path: str):

    # Material
    species = ['zr', 'o']
    l_max = {'zr': 3, 'o': 2}

    # GW root and exciting input file
    gw_root = write_input_file_with_gw_settings(root_path,
                                                A1_gs_input,
                                                GWInput(taskname="g0w0", nempty=2000, ngridq=[2, 2, 2], skipgnd=False, n_omega=32)
                                                )

    # Default basis settings
    default_linear_energies = parse_lo_linear_energies(root_path + "/groundstate")
    default_los = {'zr': DefaultLOs(default_linear_energies['zr'], energy_tol=1.5),
                   'o': DefaultLOs(default_linear_energies['o'],  energy_tol=1.5)}

    # Default basis strings with .format tags
    default_basis_string = {'zr': parse_basis_as_string(root_path + "/groundstate/Zr.xml"),
                            'o': parse_basis_as_string(root_path + "/groundstate/O.xml")}

    # LO recommendation energies
    lorecommendations = parse_lorecommendations(root_path + '/lorecommendations.dat', species)

    # Optimised LO energy cutoffs.zr l=2 channel requires way more LOs to converge.
    # HOWEVER, with a reduced MT radius, the max cut-off should be less than last time
    # Increased rgkmax to 8
    #   Directory index:         0    1    2    3    4    5    6    7    8    9   10    11   12   13   14   15   16   17   18
    energy_cutoffs = {'zr': {0: [75, 100, 100, 100, 100, 150, 160, 180, 200, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180],
                             1: [75, 100, 100, 100, 100, 150, 160, 180, 200, 180, 180, 180, 200, 250, 300, 350, 400, 460, 520],
                             2: [75, 100, 120, 150, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200],
                             3: [75, 100, 100, 100, 100, 150, 160, 180, 200, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180]},

                       'o': {0: [75, 100, 100, 100, 100, 100, 100, 100, 100, 120, 140, 160, 140, 140, 140, 140, 140, 140, 140],
                             1: [75, 100, 100, 100, 100, 100, 100, 100, 100, 120, 140, 160, 140, 140, 140, 140, 140, 140, 140],
                             2: [75, 100, 100, 100, 100, 100, 100, 100, 100, 120, 140, 160, 140, 140, 140, 140, 140, 140, 140]}
                      }
    # 12 - 15 really are trying to converge the l=1 channel of Zr, adding one LO at a time (see LO recommendations)
    # Then should add one LO into every l=0,2,3 of Zr and check how much it changes.

    # Slurm script settings
    env_vars = OrderedDict([('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
                            ('OUT', 'terminal.out')
                            ])
    module_envs = ['intel/2019']
    slurm_directives = slurm.set_slurm_directives(time=[0, 24, 0, 0],
                                                  partition='all',
                                                  exclusive=True,
                                                  nodes=4,
                                                  ntasks_per_node=2,
                                                  cpus_per_task=18,
                                                  hint='nomultithread')

    species_basis_string = "".join(s.capitalize() + str(l_max[s]) + '_' for s in species)

    for ie, energy_cutoff in enumerate(restructure_energy_cutoffs(len(energy_cutoffs['zr'][0]), energy_cutoffs)):

        # Copy ground state directory to GW directory
        # Use an index not max energy, as the max energy does not change in 3/4 runs
        job_dir = gw_root + '/max_energy_i' + str(ie)
        print('Creating directory, with input.xml, run.sh and optimised basis:', job_dir)
        copy_tree(root_path + '/groundstate', job_dir)

        # Copy input.xml with GW settings
        shutil.copy(gw_root + "/input.xml", job_dir + "/input.xml")

        # New Slurm script
        slurm_directives['job-name'] = "gw_A1_lmax_" + species_basis_string + str(ie) + 'loEcutoff'
        write_file(job_dir + '/run.sh', slurm.set_slurm_script(slurm_directives, env_vars, module_envs))

        # Write optimised basis
        write_optimised_lo_basis('zr', l_max['zr'], energy_cutoff['zr'], lorecommendations['zr'],
                                 default_basis_string['zr'], default_los['zr'], job_dir)
        write_optimised_lo_basis('o', l_max['o'], energy_cutoff['o'], lorecommendations['o'],
                                 default_basis_string['o'], default_los['o'], job_dir)

    return
示例#10
0
def set_up_omp_mpi_scaling_tests(scaling_root: str):
    """
    OMP-MPI scaling tests for GW, from 1 to 10 nodes, using all cores per node
    with 4 MPI processes and 9 OMP threads per process.

    GW is only MPI-parallelised over q-points => use:
    q = [8,8,8] and n_frequency points = 1, such that this can scale on up to 512 cores.

    From the tests /users/sol/abuccheri/gw_benchmarks/scaling/mpi_omp_ratio/ratio
    q = 3x3x4 = 36 and n_freq = 2. One node, no hyperthreading.

        threads       cores          time (s)
        --------------------------------------------------
        1              36            768.92
        2              18            812.78
        4               9            643.22
        9               4            619.27
        18              2            907.17
        36              1            1484.77

    Implies that I should try the same scaling test with hybrid settings:
    n_threads = 9 and n_mpi_ranks = 4

    On Dune 3, CoresPerSocket=18 (and 2 sockets). If one uses I_MPI_PIN_DOMAIN = omp
    then that will create 4 domains, which is inconsistent with the number of sockets.
    I_MPI_PIN_DOMAIN = sock is probably optimal.

    """

    input_xml = inputs_set2.input_xml
    zr_basis_xml = inputs_set2.zr_basis_xml
    o_basis_xml = inputs_set2.o_basis_xml

    # Check GW input script settings
    match = re.search('nempty="(.+?)"', input_xml)
    n_empty = int(re.findall(r'\d+', match.group())[0])
    assert n_empty == 100, "n_empty != 100"

    match = re.search('ngridq="(.+?)"', input_xml)
    q_grid = [int(q) for q in re.findall(r'\d+', match.group())]
    assert q_grid == [8, 8, 8], "q_grid != [8, 8, 8]"

    # Slurm script settings
    env_vars = OrderedDict([
        ('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
        ('OUT', 'terminal.out'), ('export MKL_NUM_THREADS', '1'),
        ('export I_MPI_PIN_DOMAIN', 'sock')
    ])

    module_envs = ['intel/2019']

    # Cores per node
    ntasks_per_node = 4

    # OMP threads per MPI rank
    cpus_per_task = 9

    # Nodes to use in scaling tests
    # Dune 3 only appears to have 10 nodes available from nodes 181 - 196
    nodes = np.arange(1, 10 + 1)

    # These nodes differ in memory or processor to the rest of Dune 3
    # hence exclude 197 - 208
    exclude_nodes = ['node' + str(id) for id in range(197, 208 + 1)]

    # Timing in days, where key = node_count
    times = {
        1: [4, 0, 0, 0],
        2: [4, 0, 0, 0],
        3: [4, 0, 0, 0],
        4: [2, 0, 0, 0],
        5: [2, 0, 0, 0],
        6: [2, 0, 0, 0],
        7: [1, 0, 0, 0],
        8: [1, 0, 0, 0],
        9: [1, 0, 0, 0],
        10: [1, 0, 0, 0],
        11: [1, 0, 0, 0],
        12: [1, 0, 0, 0],
        13: [1, 0, 0, 0],
        14: [1, 0, 0, 0]
    }

    for node_count in nodes:
        job_dir = scaling_root + '/n_nodes_' + str(node_count)
        print("Writing files to:", job_dir)

        Path(job_dir).mkdir(parents=True, exist_ok=True)

        write_file(job_dir + '/input.xml', input_xml)
        write_file(job_dir + '/Zr.xml', zr_basis_xml)
        write_file(job_dir + '/O.xml', o_basis_xml)

        slurm_directives = slurm.set_slurm_directives(
            job_name='scaling-omp-mpi-GW',
            time=times[node_count],
            partition='all',
            exclusive=True,
            nodes=node_count,
            ntasks_per_node=ntasks_per_node,
            cpus_per_task=cpus_per_task,
            hint='nomultithread',
            exclude=exclude_nodes)
        write_file(
            job_dir + '/run.sh',
            slurm.set_slurm_script(slurm_directives, env_vars, module_envs))

    return
示例#11
0
def set_up_g0w0(root_path: str):

    # Material
    species = ['zr', 'o']
    l_max = {'zr': 4, 'o': 3}

    # GW root and exciting input file
    # nempty set to some EXCESSIVELY large value, such that exciting takes all states.
    gw_root = write_input_file_with_gw_settings(
        root_path, A1_gs_input,
        GWInput(taskname="g0w0",
                nempty=2000,
                ngridq=[2, 2, 2],
                skipgnd=False,
                n_omega=32))

    # Default basis settings
    default_linear_energies = parse_lo_linear_energies(root_path +
                                                       "/groundstate")
    default_los = {
        'zr': DefaultLOs(default_linear_energies['zr'], energy_tol=0.8),
        'o': DefaultLOs(default_linear_energies['o'], energy_tol=0.8)
    }

    # Default basis strings with .format tags
    default_basis_string = {
        'zr': parse_basis_as_string(root_path + "/groundstate/Zr.xml"),
        'o': parse_basis_as_string(root_path + "/groundstate/O.xml")
    }

    # LO recommendation energies
    lorecommendations = parse_lorecommendations(
        root_path + '/lorecommendations.dat', species)

    # Optimised LO energy cutoffs. For first three calculations, keep Zr l=2 fixed and increase all other channels ALOT.
    # See what effect that has. Then increase zr l=2 more to check that it is also converged.
    # Final calculation. Keep all channels the same as the run, but increase lmax=2 to check it's converged
    n_energies_per_channel = 8
    #                             i0    i1   i2   i3   i4   i5   i6   i7      Struggling to run i3, but i1 looks converged, hence why I've added i4
    energy_cutoffs = {
        'zr': {
            0: [150, 200, 250, 250, 200, 120, 120, 120],
            1: [150, 200, 250, 250, 200, 120, 120, 120],
            2: [300, 300, 300, 350, 350, 350, 400, 450],
            3: [150, 200, 250, 250, 200, 120, 120, 120],
            4: [150, 200, 250, 250, 200, 120, 120, 120]
        },
        'o': {
            0: [150, 200, 250, 250, 200, 120, 120, 120],
            1: [150, 200, 250, 250, 200, 120, 120, 120],
            2: [150, 200, 250, 250, 200, 120, 120, 120],
            3: [150, 200, 250, 250, 200, 120, 120, 120]
        }
    }

    # Note, i6 misses a function at 401 Ha and whilst i7 runs, it returns a metallic solution
    # Re-running i7 with

    # Slurm script settings
    env_vars = OrderedDict([
        ('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
        ('OUT', 'terminal.out')
    ])
    module_envs = ['intel/2019']
    slurm_directives = slurm.set_slurm_directives(time=[0, 24, 0, 0],
                                                  partition='all',
                                                  exclusive=True,
                                                  nodes=4,
                                                  ntasks_per_node=2,
                                                  cpus_per_task=18,
                                                  hint='nomultithread')

    species_basis_string = ''
    for s in species:
        species_basis_string += s.capitalize() + str(l_max[s]) + '_'

    print('here')

    for ie, energy_cutoff in enumerate(
            restructure_energy_cutoffs(n_energies_per_channel,
                                       energy_cutoffs)):
        # Copy groundstate directory to GW directory
        # Use an index not max energy, as the max energy does not change in 3/4 runs
        job_dir = gw_root + '/max_energy_i' + str(ie)
        print(
            'Creating directory, with input.xml, run.sh and optimised basis:',
            job_dir)
        print(root_path)
        copy_tree(root_path + '/groundstate', job_dir)

        # Copy input.xml with GW settings
        shutil.copy(gw_root + "/input.xml", job_dir + "/input.xml")

        # New Slurm script
        slurm_directives[
            'job-name'] = "gw_A1_lmax_" + species_basis_string + str(
                ie) + 'loEcutoff'
        write_file(
            job_dir + '/run.sh',
            slurm.set_slurm_script(slurm_directives, env_vars, module_envs))

        # Write optimised basis
        write_optimised_lo_basis('zr', l_max['zr'], energy_cutoff['zr'],
                                 lorecommendations['zr'],
                                 default_basis_string['zr'], default_los['zr'],
                                 job_dir)
        write_optimised_lo_basis('o', l_max['o'], energy_cutoff['o'],
                                 lorecommendations['o'],
                                 default_basis_string['o'], default_los['o'],
                                 job_dir)

    return
示例#12
0
def set_up_pure_mpi_scaling_tests(scaling_root: str):
    """
    Pure MPI scaling tests for GW, from 1 to 14 nodes, using all cores per node
    and no threading.

    GW is only MPI-parallelised over q-points => use:
    q = [8,8,8] and n_frequency points = 1, such that this can scale on up to 512 cores.

    """

    input_xml = inputs_set2.input_xml
    zr_basis_xml = inputs_set2.zr_basis_xml
    o_basis_xml = inputs_set2.o_basis_xml

    # Check GW input script settings
    match = re.search('nempty="(.+?)"', input_xml)
    n_empty = int(re.findall(r'\d+', match.group())[0])
    assert n_empty == 100, "n_empty != 100"

    match = re.search('ngridq="(.+?)"', input_xml)
    q_grid = [int(q) for q in re.findall(r'\d+', match.group())]
    assert q_grid == [8, 8, 8], "q_grid != [8, 8, 8]"

    # Slurm script settings
    env_vars = OrderedDict([
        ('EXE', '/users/sol/abuccheri/exciting/bin/excitingmpismp'),
        ('OUT', 'terminal.out'), ('export MKL_NUM_THREADS', '1')
    ])

    module_envs = ['intel/2019']

    # Cores per node
    ntasks_per_node = 36

    # OMP threads per MPI rank
    cpus_per_task = 1

    # Nodes to use in scaling tests
    # Dune 3 only appears to have 10 nodes available from nodes 181 - 196
    nodes = np.arange(1, 10 + 1)

    # These nodes differ in memory or processor to the rest of Dune 3
    # hence exclude 197 - 208
    exclude_nodes = ['node' + str(id) for id in range(197, 208 + 1)]

    # Timing in days, where key = node_count
    times = {
        1: [4, 0, 0, 0],
        2: [4, 0, 0, 0],
        3: [4, 0, 0, 0],
        4: [2, 0, 0, 0],
        5: [2, 0, 0, 0],
        6: [2, 0, 0, 0],
        7: [1, 0, 0, 0],
        8: [1, 0, 0, 0],
        9: [1, 0, 0, 0],
        10: [1, 0, 0, 0],
        11: [1, 0, 0, 0],
        12: [1, 0, 0, 0],
        13: [1, 0, 0, 0],
        14: [1, 0, 0, 0]
    }

    for node_count in nodes:
        job_dir = scaling_root + '/n_nodes_' + str(node_count)
        print("Writing files to:", job_dir)

        Path(job_dir).mkdir(parents=True, exist_ok=True)

        write_file(job_dir + '/input.xml', input_xml)
        write_file(job_dir + '/Zr.xml', zr_basis_xml)
        write_file(job_dir + '/O.xml', o_basis_xml)

        slurm_directives = slurm.set_slurm_directives(
            job_name='scaling-pure-mpi-GW',
            time=times[node_count],
            partition='all',
            exclusive=True,
            nodes=node_count,
            ntasks_per_node=ntasks_per_node,
            cpus_per_task=cpus_per_task,
            hint='nomultithread',
            exclude=exclude_nodes)
        write_file(
            job_dir + '/run.sh',
            slurm.set_slurm_script(slurm_directives, env_vars, module_envs))

    return