コード例 #1
0
    def get_pseudos(options):
        """
        Find pseudos in paths, return :class:`PseudoTable` object sorted by atomic number Z.
        Accepts filepaths or directory.
        """
        exts=("psp8",)

        paths = options.pseudos
        if len(paths) == 1 and os.path.isdir(paths[0]):
            top = paths[0]
            paths = find_exts(top, exts, exclude_dirs="_*")
            #table = PseudoTable.from_dir(paths[0])

        pseudos = []
        for p in paths:
            try:
                pseudos.append(Pseudo.from_file(p))
            except Exception as exc:
                warn("Error in %s:\n%s" % (p, exc))

        table = PseudoTable(pseudos)

        # Here we select a subset of pseudos according to family or rows
        if options.rows:
            table = table.select_rows(options.rows)
        elif options.family:
            table = table.select_families(options.family)

        if options.symbols:
            table = table.select_symbols(options.symbols)

        return table.sort_by_z()
コード例 #2
0
ファイル: strategies.py プロジェクト: zacharygibbs/pymatgen
def select_pseudos(pseudos, structure, ret_table=True):
    """
    Given a list of pseudos and a pymatgen structure, extract the pseudopotentials 
    for the calculation (useful when we receive an entire periodic table).

    Raises:
        ValueError if no pseudo is found or multiple occurrences are found.
    """
    table = PseudoTable.astable(pseudos)

    pseudos = []
    for symbol in structure.types_of_specie:
        # Get the list of pseudopotentials in table from atom symbol.
        pseudos_for_type = table.pseudos_with_symbol(symbol)

        if not pseudos_for_type:
            raise ValueError("Cannot find pseudo for symbol %s" % symbol)

        if len(pseudos_for_type) > 1:
            raise ValueError("Find multiple pseudos for symbol %s" % symbol)

        pseudos.append(pseudos_for_type[0])

    if ret_table:
        return PseudoTable(pseudos)
    else:
        return pseudos
コード例 #3
0
ファイル: input.py プロジェクト: gmrigna/abipy
    def __init__(self,
                 pseudos,
                 pseudo_dir="",
                 structure=None,
                 ndtset=1,
                 comment="",
                 decorators=None):
        """
        Args:
            pseudos: String or list of string with the name of the pseudopotential files.
            pseudo_dir: Name of the directory where the pseudopotential files are located.
            structure: file with the structure, :class:`Structure` object or dictionary with ABINIT geo variable
            ndtset: Number of datasets.
            comment: Optional string with a comment that will be placed at the beginning of the file.
            decorators: List of `AbinitInputDecorator` objects.
        """
        # Dataset[0] contains the global variables common to the different datasets
        # Dataset[1:ndtset+1] stores the variables specific to the different datasets.
        self._ndtset = ndtset

        self._datasets = []
        for i in range(ndtset + 1):
            dt0 = None
            if i > 0: dt0 = self._datasets[0]
            self._datasets.append(Dataset(index=i, dt0=dt0))

        self._datasets[0]["ndtset"] = ndtset

        # Setup of the pseudopotential files.
        if isinstance(pseudos, PseudoTable):
            self._pseudos = pseudos

        elif all(isinstance(p, Pseudo) for p in pseudos):
            self._pseudos = PseudoTable(pseudos)

        else:
            # String(s)
            pseudo_dir = os.path.abspath(pseudo_dir)
            pseudo_paths = [
                os.path.join(pseudo_dir, p) for p in list_strings(pseudos)
            ]

            missing = [p for p in pseudo_paths if not os.path.exists(p)]
            if missing:
                raise self.Error(
                    "Cannot find the following pseudopotential files:\n%s" %
                    str(missing))

            self._pseudos = PseudoTable(pseudo_paths)

        if structure is not None: self.set_structure(structure)
        if comment is not None: self.set_comment(comment)

        self._decorators = [] if not decorators else decorators
コード例 #4
0
def num_valence_electrons(pseudos, structure):
    """
    Compute the number of valence electrons from 
    a list of pseudopotentials and the crystalline structure.

    Args:
        pseudos: 
            List of strings, list of of pseudos or `PseudoTable` instance.
        structure:
            Pymatgen structure.

    Raises:
        ValueError if cannot find a pseudo in the input pseudos or if the
        input list contains more than one pseudo for the chemical symbols
        appearing in structure.
    """
    table = PseudoTable.astable(pseudos)

    valence = 0.0
    for site in structure:
        entries = table.pseudos_with_symbol(site.specie.symbol)
        if len(entries) != 1:
            raise ValueError("Found %d entries for symbol %s" % (len(entries), site.specie.symbol))
        valence += entries[0].Z_val

    return valence
コード例 #5
0
ファイル: strategies.py プロジェクト: brendaneng1/pymatgen
def select_pseudos(pseudos, structure, ret_table=True):
    """
    Given a list of pseudos and a pymatgen structure, extract the pseudopotentials 
    for the calculation (useful when we receive an entire periodic table).

    Raises:
        ValueError if no pseudo is found or multiple occurrences are found.
    """
    table = PseudoTable.astable(pseudos)

    pseudos = []
    for symbol in structure.types_of_specie:
        # Get the list of pseudopotentials in table from atom symbol.
        pseudos_for_type = table.pseudos_with_symbol(symbol)
                                                                             
        if not pseudos_for_type:
            raise ValueError("Cannot find pseudo for symbol %s" % symbol)

        if len(pseudos_for_type) > 1:
            raise ValueError("Find multiple pseudos for symbol %s" % symbol)
                                                                             
        pseudos.append(pseudos_for_type[0])

    if ret_table:
        return PseudoTable(pseudos)
    else:
        return pseudos
コード例 #6
0
ファイル: strategies.py プロジェクト: zacharygibbs/pymatgen
def num_valence_electrons(pseudos, structure):
    """
    Compute the number of valence electrons from 
    a list of pseudopotentials and the crystalline structure.

    Args:
        pseudos: 
            List of strings, list of of pseudos or `PseudoTable` instance.
        structure:
            Pymatgen structure.

    Raises:
        ValueError if cannot find a pseudo in the input pseudos or if the
        input list contains more than one pseudo for the chemical symbols
        appearing in structure.
    """
    table = PseudoTable.astable(pseudos)

    valence = 0.0
    for site in structure:
        entries = table.pseudos_with_symbol(site.specie.symbol)
        if len(entries) != 1:
            raise ValueError("Found %d entries for symbol %s" %
                             (len(entries), site.specie.symbol))
        valence += entries[0].Z_val

    return valence
コード例 #7
0
ファイル: structure.py プロジェクト: kidaa/abipy
    def num_valence_electrons(self, pseudos):
        """
        Returns the number of valence electrons.

        Args:
            pseudos: List of :class:`Pseudo` objects or list of filenames.
        """
        nval, table = 0, PseudoTable.as_table(pseudos)
        for site in self:
            pseudo = table.pseudo_with_symbol(site.species_string)
            nval += pseudo.Z_val

        return nval
コード例 #8
0
    def num_valence_electrons(self, pseudos):
        """
        Returns the number of valence electrons.

        Args:
            pseudos: List of :class:`Pseudo` objects or list of filenames.
        """
        nval, table = 0, PseudoTable.as_table(pseudos)
        for site in self:
            pseudo = table.pseudo_with_symbol(site.species_string)
            nval += pseudo.Z_val

        return nval
コード例 #9
0
ファイル: GWworks.py プロジェクト: gmrigna/abipy
 def __init__(self, structure, spec, option=None):
     self.structure = structure
     self.spec = spec
     self.option = option
     self.bands_fac = 1
     self.tests = self.__class__.get_defaults_tests()
     self.convs = self.__class__.get_defaults_convs()
     self.response_models = self.__class__.get_response_models()
     if self.option is None:
         self.all_converged = False
     elif len(self.option) == len(self.convs):
         self.all_converged = True
     else:
         self.all_converged = False
     path_add = '.conv' if self.all_converged else ''
     self.work_dir = s_name(self.structure) + path_add
     abi_pseudo = os.environ['ABINIT_PS_EXT']
     abi_pseudo_dir = os.environ['ABINIT_PS']
     pseudos = []
     for element in self.structure.composition.element_composition:
         pseudo = os.path.join(abi_pseudo_dir, str(element) + abi_pseudo)
         pseudos.append(pseudo)
     self.pseudo_table = PseudoTable(pseudos)
コード例 #10
0
ファイル: structure.py プロジェクト: GkAntonius/abipy
    def num_valence_electrons(self, pseudos):
        """
        Returns the number of valence electrons.

        Args:
            pseudos:
                List of `Pseudo` objects or list of pseudopotential filenames.
        """
        table = PseudoTable.as_table(pseudos)

        nval = 0
        for site in self:
            symbol = site.species_string
            pseudos = table.pseudos_with_symbol(symbol)
            assert len(pseudos) == 1
            nval += pseudos[0].Z_val 

        return nval
コード例 #11
0
ファイル: strategies.py プロジェクト: akashneo/pymatgen
def select_pseudos(pseudos, structure):
    """
    Given a list of pseudos and a pymatgen structure, extract the pseudopotentials 
    for the calculation (useful when we receive an entire periodic table).

    Raises:
        ValueError if no pseudo is found or multiple occurrences are found.
    """
    table = PseudoTable.astable(pseudos)

    pseudos = []
    for typ in structure.types_of_specie:
        # Get the list of pseudopotentials in table from atom symbol.
        pseudos_for_type = table.pseudos_with_symbol(typ)
                                                                             
        if pseudos_for_type is None or len(pseudos_for_type) != 1:
            raise ValueError("Cannot find unique pseudo for type %s" % typ)
                                                                             
        pseudos.append(pseudos_for_type[0])
                                                                                 
    return PseudoTable(pseudos)
コード例 #12
0
ファイル: GWworkflows.py プロジェクト: malusdiaz/pymatgen
 def __init__(self, structure, spec, option=None):
     self.structure = structure
     self.spec = spec
     self.option = option
     self.tests = self.__class__.get_defaults_tests()
     self.convs = self.__class__.get_defaults_convs()
     self.response_models = self.__class__.get_response_models()
     if self.option is None:
         self.all_converged = False
     elif len(self.option) == len(self.convs):
         self.all_converged = True
     else:
         self.all_converged = False
     path_add = '.conv' if self.all_converged else ''
     self.work_dir = s_name(self.structure)+path_add
     abi_pseudo = os.environ['ABINIT_PS_EXT']
     abi_pseudo_dir = os.environ['ABINIT_PS']
     pseudos = []
     for element in self.structure.composition.element_composition:
         pseudo = os.path.join(abi_pseudo_dir, str(element) + abi_pseudo)
         pseudos.append(pseudo)
     self.pseudo_table = PseudoTable(pseudos)
コード例 #13
0
def scf_ph_inputs(structure, options):
    """
    This function constructs the input files for the phonon calculation: 
    GS input + the input files for the phonon calculation.
    """

    abi_pseudo = os.environ['ABINIT_PS_EXT']
    abi_pseudo_dir = os.environ['ABINIT_PS']
    pseudos = []
    for element in structure.composition.element_composition:
        pseudo = os.path.join(abi_pseudo_dir, str(element) + abi_pseudo)
        pseudos.append(pseudo)
    pseudos = PseudoTable(pseudos)

    #print('bounds:\n', structure.calc_kptbounds)
    #print('ngkpt:\n', structure.calc_ngkpt(4))
    print('ks:\n',
          structure.calc_ksampling(4))  # try to get the qpoints from this ...

    qptbounds = structure.calc_kptbounds()
    qptbounds = np.reshape(qptbounds, (-1, 3))

    # List of q-points for the phonon calculation.
    qpoints = [
        0.00000000E+00,
        0.00000000E+00,
        0.00000000E+00,
        2.50000000E-01,
        0.00000000E+00,
        0.00000000E+00,
        2.50000000E-01,
        0.00000000E+00,
        2.50000000E+00,
        5.00000000E-01,
        0.00000000E+00,
        0.00000000E+00,
        2.50000000E-01,
        2.50000000E-01,
        0.00000000E+00,
        5.00000000E-01,
        2.50000000E-01,
        0.00000000E+00,
        -2.50000000E-01,
        2.50000000E-01,
        0.00000000E+00,
        5.00000000E-01,
        5.00000000E-01,
        0.00000000E+00,
        0.00000000E+00,
        0.00000000E+00,
        2.50000000E-01,
        -2.50000000E-01,
        5.00000000E-01,
        2.50000000E-01,
    ]
    qpoints2 = [
        0.00000000E+00,
        0.00000000E+00,
        0.00000000E+00,
        5.00000000E-01,
        0.00000000E+00,
        0.00000000E+00,
        0.00000000E-01,
        5.00000000E-01,
        0.00000000E+00,
        0.00000000E+00,
        0.00000000E+00,
        5.00000000E-01,
        5.00000000E-01,
        5.00000000E-01,
        0.00000000E+00,
        0.00000000E+00,
        5.00000000E-01,
        5.00000000E-01,
        5.00000000E-01,
        0.00000000E+00,
        5.00000000E-01,
        5.00000000E-01,
        5.00000000E-01,
        5.00000000E-01,
    ]

    qpoints = np.reshape(qpoints, (-1, 3))
    qpoints = unique_rows(np.concatenate((qpoints, qptbounds), axis=0))

    if os.path.isfile('qpoints'):
        f = open('qpoints', 'r')
        qpoints = np.reshape(ast.literal_eval(f.read()), (-1, 3))
        f.close()

    # Global variables used both for the GS and the DFPT run.
    global_vars = dict(istwfk='*1',
                       ecut=16.0,
                       ngkpt=[8, 8, 8],
                       shiftk=[0, 0, 0],
                       paral_kgb=0,
                       nstep=200)

    global_vars.update(options)

    to_vecs(global_vars)

    inp = abilab.AbiInput(pseudos=pseudos, ndtset=1 + len(qpoints))

    inp.set_structure(structure)
    inp.set_variables(**global_vars)

    inp[1].set_variables(tolwfr=1.0e-18, prtden=1, paral_kgb=1)

    for i, qpt in enumerate(qpoints):
        # Response-function calculation for phonons.
        inp[i + 2].set_variables(
            tolvrs=1.0e-10,
            kptopt=3,
            iscf=5,
            rfphon=1,  # Will consider phonon-type perturbation
            nqpt=1,  # One wavevector is to be considered
            qpt=qpt,  # This wavevector is q=0 (Gamma)
        )

        #rfatpol   1 1   # Only the first atom is displaced
        #rfdir   1 0 0   # Along the first reduced coordinate axis
        #kptopt   2      # Automatic generation of k points, taking

    # Split input into gs_inp and ph_inputs
    return inp.split_datasets()
コード例 #14
0
ファイル: GWworks.py プロジェクト: image-tester/pymatgen
class SingleAbinitGWWork():
    """
    GW workflow for Abinit
    """
    RESPONSE_MODELS = ["cd", "godby", "hybersten", "linden", "farid"]
    TESTS = {'ecuteps': {'test_range': (10, 14), 'method': 'direct', 'control': "gap", 'level': "sigma"},
             'nscf_nbands': {'test_range': (30, 40), 'method': 'set_bands', 'control': "gap", 'level': "nscf"},
             'response_model': {'test_range': RESPONSE_MODELS, 'method': 'direct', 'control': 'gap', 'level': 'screening'}}
    # scf level test are run independently, the last value will be used in the nscf and sigma tests
    #'test': {'test_range': (1, 2, 3), 'method': 'direct', 'control': "e_ks_max", 'level': "scf"},
    CONVS = {'ecut': {'test_range': (52, 48, 44), 'method': 'direct', 'control': "e_ks_max", 'level': "scf"},
             'ecuteps': {'test_range': (4, 8, 12, 16, 20), 'method': 'direct', 'control': "gap", 'level': "sigma"},
             'nscf_nbands': {'test_range': (5, 10, 20, 30), 'method': 'set_bands', 'control': "gap", 'level': "nscf"}}

    def __init__(self, structure, spec, option=None):
        self.structure = structure
        self.spec = spec
        self.option = option
        self.bands_fac = 1
        self.tests = self.__class__.get_defaults_tests()
        self.convs = self.__class__.get_defaults_convs()
        self.response_models = self.__class__.get_response_models()
        if self.option is None:
            self.all_converged = False
        elif len(self.option) == len(self.convs):
            self.all_converged = True
        else:
            self.all_converged = False
        path_add = '.conv' if self.all_converged else ''
        self.work_dir = s_name(self.structure)+path_add
        abi_pseudo = os.environ['ABINIT_PS_EXT']
        abi_pseudo_dir = os.environ['ABINIT_PS']
        pseudos = []
        for element in self.structure.composition.element_composition:
            pseudo = os.path.join(abi_pseudo_dir, str(element) + abi_pseudo)
            pseudos.append(pseudo)
        self.pseudo_table = PseudoTable(pseudos)

    @classmethod
    def get_defaults_tests(cls):
        return copy.deepcopy(cls.TESTS)

    @classmethod
    def get_defaults_convs(cls):
        return copy.deepcopy(cls.CONVS)

    @classmethod
    def get_response_models(cls):
        return copy.deepcopy(cls.RESPONSE_MODELS)

    def get_electrons(self, structure):
        """
        Method for retrieving the number of valence electrons
        """
        electrons = 0

        for element in structure.species:
            entries = self.pseudo_table.pseudos_with_symbol(element.symbol)
            assert len(entries) == 1
            pseudo = entries[0]
            electrons += pseudo.Z_val
        return electrons

    def get_bands(self, structure):
        """
        Method for retrieving the standard number of bands
        """
        bands = self.get_electrons(structure) / 2 + len(structure)
        return int(bands)

    def get_work_dir(self):
            name = s_name(self.structure)
            if not self.all_converged:
                return str(name)+'_'+str(self.option['test'])+'_'+str(self.option['value'])
            else:
                return str(name)

    def create(self):
        """
        create single abinit G0W0 flow
        """
        manager = 'slurm' if 'ceci' in self.spec['mode'] else 'shell'
        # an AbiStructure object has an overwritten version of get_sorted_structure that sorts according to Z
        # this could also be pulled into the constructor of Abistructure
        #abi_structure = self.structure.get_sorted_structure()
        from abipy import abilab
        item = copy.copy(self.structure.item)
        self.structure.__class__ = abilab.Structure
        self.structure = self.structure.get_sorted_structure_z()
        self.structure.item = item
        abi_structure = self.structure
        manager = TaskManager.from_user_config()
        # Initialize the flow.
        flow = Flow(self.work_dir, manager, pickle_protocol=0)
        # flow = Flow(self.work_dir, manager)

        # kpoint grid defined over density 40 > ~ 3 3 3
        if self.spec['converge'] and not self.all_converged:
            # (2x2x2) gamma centered mesh for the convergence test on nbands and ecuteps
            # if kp_in is present in the specs a kp_in X kp_in x kp_in mesh is used for the convergence studie
            if 'kp_in' in self.spec.keys():
                if self.spec['kp_in'] > 9:
                    print('WARNING:\nkp_in should be < 10 to generate an n x n x n mesh\nfor larger values a grid with '
                          'density kp_in will be generated')
                scf_kppa = self.spec['kp_in']
            else:
                scf_kppa = 2
        else:
            # use the specified density for the final calculation with the converged nbands and ecuteps of other
            # stand alone calculations
            scf_kppa = self.spec['kp_grid_dens']
        gamma = True

        # 'standard' parameters for stand alone calculation
        nb = self.get_bands(self.structure)
        nscf_nband = [10 * nb]

        nksmall = None
        ecuteps = [8]
        ecutsigx = 44

        extra_abivars = dict(
            paral_kgb=1,
            inclvkb=2,
            ecut=44,
            pawecutdg=88,
            gwmem='10',
            getden=-1,
            istwfk="*1",
            timopt=-1,
            nbdbuf=8
        )

        # read user defined extra abivars from file  'extra_abivars' should be dictionary
        extra_abivars.update(read_extra_abivars())
        #self.bands_fac = 0.5 if 'gwcomp' in extra_abivars.keys() else 1
        #self.convs['nscf_nbands']['test_range'] = tuple([self.bands_fac*x for x in self.convs['nscf_nbands']['test_range']])

        response_models = ['godby']
        if 'ppmodel' in extra_abivars.keys():
            response_models = [extra_abivars.pop('ppmodel')]

        if self.option is not None:
            for k in self.option.keys():
                if k in ['ecuteps', 'nscf_nbands']:
                    pass
                else:
                    extra_abivars.update({k: self.option[k]})
                    if k == 'ecut':
                        extra_abivars.update({'pawecutdg': self.option[k]*2})

        try:
            grid = read_grid_from_file(s_name(self.structure)+".full_res")['grid']
            all_done = read_grid_from_file(s_name(self.structure)+".full_res")['all_done']
            workdir = os.path.join(s_name(self.structure), 'w'+str(grid))
        except (IOError, OSError):
            grid = 0
            all_done = False
            workdir = None

        if not all_done:
            if (self.spec['test'] or self.spec['converge']) and not self.all_converged:
                if self.spec['test']:
                    print('| setting test calculation')
                    tests = SingleAbinitGWWork(self.structure, self.spec).tests
                    response_models = []
                else:
                    if grid == 0:
                        print('| setting convergence calculations for grid 0')
                        #tests = SingleAbinitGWWorkFlow(self.structure, self.spec).convs
                        tests = self.convs
                    else:
                        print('| extending grid')
                        #tests = expand(SingleAbinitGWWorkFlow(self.structure, self.spec).convs, grid)
                        tests = expand(self.convs, grid)
                ecuteps = []
                nscf_nband = []
                for test in tests:
                    if tests[test]['level'] == 'scf':
                        if self.option is None:
                            extra_abivars.update({test + '_s': tests[test]['test_range']})
                        elif test in self.option:
                            extra_abivars.update({test: self.option[test]})
                        else:
                            extra_abivars.update({test + '_s': tests[test]['test_range']})
                    else:
                        for value in tests[test]['test_range']:
                            if test == 'nscf_nbands':
                                nscf_nband.append(value * self.get_bands(self.structure))
                                #scr_nband takes nscf_nbands if not specified
                                #sigma_nband takes scr_nbands if not specified
                            if test == 'ecuteps':
                                ecuteps.append(value)
                            if test == 'response_model':
                                response_models.append(value)
            elif self.all_converged:
                print('| setting up for testing the converged values at the high kp grid ')
                # add a bandstructure and dos calculation
                nksmall = 30
                # in this case a convergence study has already been performed.
                # The resulting parameters are passed as option
                ecuteps = [self.option['ecuteps'], self.option['ecuteps'] + self.convs['ecuteps']['test_range'][1] -
                                                   self.convs['ecuteps']['test_range'][0]]
                nscf_nband = [self.option['nscf_nbands'], self.option['nscf_nbands'] + self.convs['nscf_nbands'][
                    'test_range'][1] - self.convs['nscf_nbands']['test_range'][0]]
                # for option in self.option:
                #    if option not in ['ecuteps', 'nscf_nband']:
                #        extra_abivars.update({option + '_s': self.option[option]})
        else:
            print('| all is done for this material')
            return

        logger.info('ecuteps : ', ecuteps)
        logger.info('extra   : ', extra_abivars)
        logger.info('nscf_nb : ', nscf_nband)

        work = g0w0_extended(abi_structure, self.pseudo_table, scf_kppa, nscf_nband, ecuteps, ecutsigx,
                             accuracy="normal", spin_mode="unpolarized", smearing=None, response_models=response_models,
                             charge=0.0, sigma_nband=None, scr_nband=None, gamma=gamma, nksmall=nksmall, **extra_abivars)

        flow.register_work(work, workdir=workdir)

        return flow.allocate()

    def create_job_file(self, serial=True):
        """
        Create the jobfile for starting all schedulers manually
        serial = True creates a list that can be submitted as job that runs all schedulers a a batch job
        (the job header needs to be added)
        serial = False creates a list that can be used to start all schedulers on the frontend in the background
        """
        job_file = open("job_collection", mode='a')
        if serial:
            job_file.write('abirun.py ' + self.work_dir + ' scheduler > ' + self.work_dir + '.log\n')
        else:
            job_file.write('nohup abirun.py ' + self.work_dir + ' scheduler > ' + self.work_dir + '.log & \n')
            job_file.write('sleep 2\n')
        job_file.close()
コード例 #15
0
ファイル: GWworks.py プロジェクト: gmrigna/abipy
class SingleAbinitGWWork():
    """
    GW flow for Abinit
    """
    RESPONSE_MODELS = ["cd", "godby", "hybersten", "linden", "farid"]
    TESTS = {
        'ecuteps': {
            'test_range': (10, 14),
            'method': 'direct',
            'control': "gap",
            'level': "sigma"
        },
        'nscf_nbands': {
            'test_range': (30, 40),
            'method': 'set_bands',
            'control': "gap",
            'level': "nscf"
        },
        'response_model': {
            'test_range': RESPONSE_MODELS,
            'method': 'direct',
            'control': 'gap',
            'level': 'screening'
        }
    }
    # scf level test are run independently, the last value will be used in the nscf and sigma tests
    #'test': {'test_range': (1, 2, 3), 'method': 'direct', 'control': "e_ks_max", 'level': "scf"},
    CONVS = {
        'ecut': {
            'test_range': (50, 48, 46, 44),
            'method': 'direct',
            'control': "e_ks_max",
            'level': "scf"
        },
        'ecuteps': {
            'test_range': (4, 8, 12, 16, 20),
            'method': 'direct',
            'control': "gap",
            'level': "sigma"
        },
        'nscf_nbands': {
            'test_range': (5, 10, 20, 30),
            'method': 'set_bands',
            'control': "gap",
            'level': "nscf"
        }
    }

    def __init__(self, structure, spec, option=None):
        self.structure = structure
        self.spec = spec
        self.option = option
        self.bands_fac = 1
        self.tests = self.__class__.get_defaults_tests()
        self.convs = self.__class__.get_defaults_convs()
        self.response_models = self.__class__.get_response_models()
        if self.option is None:
            self.all_converged = False
        elif len(self.option) == len(self.convs):
            self.all_converged = True
        else:
            self.all_converged = False
        path_add = '.conv' if self.all_converged else ''
        self.work_dir = s_name(self.structure) + path_add
        abi_pseudo = os.environ['ABINIT_PS_EXT']
        abi_pseudo_dir = os.environ['ABINIT_PS']
        pseudos = []
        for element in self.structure.composition.element_composition:
            pseudo = os.path.join(abi_pseudo_dir, str(element) + abi_pseudo)
            pseudos.append(pseudo)
        self.pseudo_table = PseudoTable(pseudos)

    @classmethod
    def get_defaults_tests(cls):
        return copy.deepcopy(cls.TESTS)

    @classmethod
    def get_defaults_convs(cls):
        return copy.deepcopy(cls.CONVS)

    @classmethod
    def get_response_models(cls):
        return copy.deepcopy(cls.RESPONSE_MODELS)

    def get_electrons(self, structure):
        """
        Method for retrieving the number of valence electrons
        """
        electrons = 0

        for element in structure.species:
            entries = self.pseudo_table.pseudos_with_symbol(element.symbol)
            assert len(entries) == 1
            pseudo = entries[0]
            electrons += pseudo.Z_val
        return electrons

    def get_bands(self, structure):
        """
        Method for retrieving the standard number of bands
        """
        bands = self.get_electrons(structure) / 2 + len(structure)
        return int(bands)

    def get_work_dir(self):
        name = s_name(self.structure)
        if not self.all_converged:
            return str(name) + '_' + str(self.option['test']) + '_' + str(
                self.option['value'])
        else:
            return str(name)

    def create(self):
        """
        create single abinit G0W0 flow
        """
        manager = 'slurm' if 'ceci' in self.spec['mode'] else 'shell'
        # an AbiStructure object has an overwritten version of get_sorted_structure that sorts according to Z
        # this could also be pulled into the constructor of Abistructure
        #abi_structure = self.structure.get_sorted_structure()
        from abipy import abilab
        item = copy.copy(self.structure.item)
        self.structure.__class__ = abilab.Structure
        self.structure = self.structure.get_sorted_structure_z()
        self.structure.item = item
        abi_structure = self.structure
        manager = TaskManager.from_user_config()
        # Initialize the flow.
        flow = Flow(self.work_dir, manager, pickle_protocol=0)
        # flow = Flow(self.work_dir, manager)

        # kpoint grid defined over density 40 > ~ 3 3 3
        if self.spec['converge'] and not self.all_converged:
            # (2x2x2) gamma centered mesh for the convergence test on nbands and ecuteps
            # if kp_in is present in the specs a kp_in X kp_in x kp_in mesh is used for the convergence studie
            if 'kp_in' in self.spec.keys():
                if self.spec['kp_in'] > 9:
                    print(
                        'WARNING:\nkp_in should be < 10 to generate an n x n x n mesh\nfor larger values a grid with '
                        'density kp_in will be generated')
                scf_kppa = self.spec['kp_in']
            else:
                scf_kppa = 2
        else:
            # use the specified density for the final calculation with the converged nbands and ecuteps of other
            # stand alone calculations
            scf_kppa = self.spec['kp_grid_dens']
        gamma = True

        # 'standard' parameters for stand alone calculation
        scf_nband = self.get_bands(self.structure)
        nscf_nband = [10 * scf_nband]

        nksmall = None
        ecuteps = [8]
        ecutsigx = 44

        extra_abivars = dict(paral_kgb=1,
                             inclvkb=2,
                             ecut=44,
                             pawecutdg=88,
                             gwmem='10',
                             getden=-1,
                             istwfk="*1",
                             timopt=-1,
                             nbdbuf=8,
                             prtsuscep=0)

        # read user defined extra abivars from file  'extra_abivars' should be dictionary
        extra_abivars.update(read_extra_abivars())
        #self.bands_fac = 0.5 if 'gwcomp' in extra_abivars.keys() else 1
        #self.convs['nscf_nbands']['test_range'] = tuple([self.bands_fac*x for x in self.convs['nscf_nbands']['test_range']])

        response_models = ['godby']
        if 'ppmodel' in extra_abivars.keys():
            response_models = [extra_abivars.pop('ppmodel')]

        if self.option is not None:
            for k in self.option.keys():
                if k in ['ecuteps', 'nscf_nbands']:
                    pass
                else:
                    extra_abivars.update({k: self.option[k]})
                    if k == 'ecut':
                        extra_abivars.update({'pawecutdg': self.option[k] * 2})

        try:
            grid = read_grid_from_file(s_name(self.structure) +
                                       ".full_res")['grid']
            all_done = read_grid_from_file(
                s_name(self.structure) + ".full_res")['all_done']
            workdir = os.path.join(s_name(self.structure), 'w' + str(grid))
        except (IOError, OSError):
            grid = 0
            all_done = False
            workdir = None

        if not all_done:
            if (self.spec['test']
                    or self.spec['converge']) and not self.all_converged:
                if self.spec['test']:
                    print('| setting test calculation')
                    tests = SingleAbinitGWWork(self.structure, self.spec).tests
                    response_models = []
                else:
                    if grid == 0:
                        print('| setting convergence calculations for grid 0')
                        #tests = SingleAbinitGWWorkFlow(self.structure, self.spec).convs
                        tests = self.convs
                    else:
                        print('| extending grid')
                        #tests = expand(SingleAbinitGWWorkFlow(self.structure, self.spec).convs, grid)
                        tests = expand(self.convs, grid)
                ecuteps = []
                nscf_nband = []
                for test in tests:
                    if tests[test]['level'] == 'scf':
                        if self.option is None:
                            extra_abivars.update(
                                {test + '_s': tests[test]['test_range']})
                        elif test in self.option:
                            extra_abivars.update({test: self.option[test]})
                        else:
                            extra_abivars.update(
                                {test + '_s': tests[test]['test_range']})
                    else:
                        for value in tests[test]['test_range']:
                            if test == 'nscf_nbands':
                                nscf_nband.append(
                                    value * self.get_bands(self.structure))
                                #scr_nband takes nscf_nbands if not specified
                                #sigma_nband takes scr_nbands if not specified
                            if test == 'ecuteps':
                                ecuteps.append(value)
                            if test == 'response_model':
                                response_models.append(value)
            elif self.all_converged:
                print(
                    '| setting up for testing the converged values at the high kp grid '
                )
                # add a bandstructure and dos calculation
                if os.path.isfile('bands'):
                    nksmall = -30
                    #negative value > only bandstructure
                else:
                    nksmall = 30
                # in this case a convergence study has already been performed.
                # The resulting parameters are passed as option
                ecuteps = [
                    self.option['ecuteps'], self.option['ecuteps'] +
                    self.convs['ecuteps']['test_range'][1] -
                    self.convs['ecuteps']['test_range'][0]
                ]
                nscf_nband = [
                    self.option['nscf_nbands'], self.option['nscf_nbands'] +
                    self.convs['nscf_nbands']['test_range'][1] -
                    self.convs['nscf_nbands']['test_range'][0]
                ]
                # for option in self.option:
                #    if option not in ['ecuteps', 'nscf_nband']:
                #        extra_abivars.update({option + '_s': self.option[option]})
        else:
            print('| all is done for this material')
            return

        logger.info('ecuteps : ', ecuteps)
        logger.info('extra   : ', extra_abivars)
        logger.info('nscf_nb : ', nscf_nband)

        work = g0w0_extended_work(abi_structure,
                                  self.pseudo_table,
                                  scf_kppa,
                                  nscf_nband,
                                  ecuteps,
                                  ecutsigx,
                                  scf_nband,
                                  accuracy="normal",
                                  spin_mode="unpolarized",
                                  smearing=None,
                                  response_models=response_models,
                                  charge=0.0,
                                  sigma_nband=None,
                                  scr_nband=None,
                                  gamma=gamma,
                                  nksmall=nksmall,
                                  **extra_abivars)

        flow.register_work(work, workdir=workdir)

        return flow.allocate()

    def create_job_file(self, serial=True):
        """
        Create the jobfile for starting all schedulers manually
        serial = True creates a list that can be submitted as job that runs all schedulers a a batch job
        (the job header needs to be added)
        serial = False creates a list that can be used to start all schedulers on the frontend in the background
        """
        job_file = open("job_collection", mode='a')
        if serial:
            job_file.write('abirun.py ' + self.work_dir + ' scheduler > ' +
                           self.work_dir + '.log\n')
            job_file.write('rm ' + self.work_dir + '/w*/t*/outdata/out_SCR\n')
        else:
            job_file.write('nohup abirun.py ' + self.work_dir +
                           ' scheduler > ' + self.work_dir + '.log & \n')
            job_file.write('sleep 2\n')
        job_file.close()
コード例 #16
0
ファイル: df.py プロジェクト: rahul1126/pseudo_dojo
def main():
    top = "."
    try:
        top = sys.argv[1]
    except IndexError:
        top = "."

    pseudos = []
    for dirpath, dirnames, filenames in os.walk(top):
        # Exclude pseudos in _inputs
        if os.path.basename(dirpath) == "_inputs": continue

        #print(filenames)
        pseudos.extend([
            Pseudo.from_file(os.path.join(dirpath, f)) for f in filenames
            if f.endswith(".psp8")
        ])
        #if f.endswith(".psp8") and "-" not in f])
        #if f.endswith(".psp8") and "-" in f])
    #print(pseudos)

    from pymatgen.io.abinitio.pseudos import PseudoTable
    pseudos = PseudoTable(pseudos)
    data, errors = pseudos.get_dojo_dataframe()
    print(data)

    if errors:
        print("ERRORS:")
        pprint(errors)

    accuracies = ["low", "normal", "high"]
    keys = [
        "dfact_meV", "v0", "b0_GPa", "b1", "ecut", "fcc_a0_rel_err",
        "bcc_a0_rel_err"
    ]
    columns = ["symbol"] + [acc + "_" + k for k in keys for acc in accuracies]

    #print(columns)

    ##print(rows)
    #data = pd.DataFrame(rows, index=names, columns=columns)
    #data = data[data["high_dfact_meV"] <= data["high_dfact_meV"].mean()]
    #data = data[data["high_dfact_meV"] <= 9]

    def calc_rerrors(data):
        # Relative error
        data["low_dfact_abserr"] = data["low_dfact_meV"] - data[
            "high_dfact_meV"]
        data["normal_dfact_abserr"] = data["normal_dfact_meV"] - data[
            "high_dfact_meV"]
        data["low_dfact_rerr"] = 100 * (
            data["low_dfact_meV"] -
            data["high_dfact_meV"]) / data["high_dfact_meV"]
        data["normal_dfact_rerr"] = 100 * (
            data["normal_dfact_meV"] -
            data["high_dfact_meV"]) / data["high_dfact_meV"]

        for k in ["v0", "b0_GPa", "b1"]:
            data["low_" + k + "_abserr"] = data["low_" + k] - data["high_" + k]
            data["normal_" + k +
                 "_abserr"] = data["normal_" + k] - data["high_" + k]
            data["low_" + k +
                 "_rerr"] = 100 * (data["low_" + k] -
                                   data["high_" + k]) / data["high_" + k]
            data["normal_" + k +
                 "_rerr"] = 100 * (data["normal_" + k] -
                                   data["high_" + k]) / data["high_" + k]

        return data

    #import seaborn as sns
    import matplotlib.pyplot as plt
    #data = calc_rerrors(data)
    #g = sns.PairGrid(data, x_vars="Z", y_vars=[
    #    "low_ecut",
    #    "low_dfact_meV",
    #    #"normal_ecut",
    #    #"low_dfact_meV",
    #    #"high_dfact_meV",
    #    #"low_v0_rerr", "low_b0_GPa_rerr", "low_b1_rerr",
    #    ]
    #) #, hue="smoker")
    #g.map(plt.scatter)
    #g.add_legend()

    #data["high_dfact_meV"].hist(bins=200)
    #data["high_fcc_a0_rel_err"].hist(bins=200)
    #data["high_bcc_a0_rel_err"].hist(bins=200)

    keys = [
        "dfact_meV",
        #"dfactprime_meV",
        "bcc_a0_rel_err",
        "fcc_a0_rel_err",
        #"ecut",
    ]

    fig, ax_list = plt.subplots(nrows=len(keys),
                                ncols=1,
                                sharex=False,
                                squeeze=False)
    ax_list = ax_list.ravel()

    #kind = "density"
    kind = "scatter"
    kind = "line"
    zmin, zmax = 1, 110
    for ax, key in zip(ax_list, keys):
        for acc in accuracies:
            c = data[acc + "_" + key][data.Z <= zmax]
            c = c[data.Z >= zmin]
            c.plot(kind=kind, ax=ax, style="o-", legend=True)
            #c.hist(ax=ax, bins=200)

    plt.show()

    wrong = data[data["high_b1"] < 0]
    if not wrong.empty:
        print("WRONG".center(80, "*") + "\n", wrong)

    data = data[
        [acc + "_dfact_meV"
         for acc in accuracies] + [acc + "_ecut" for acc in accuracies]
        #+ [acc + "_fcc_a0_rel_err" for acc in accuracies]
        #+ [acc + "_bcc_a0_rel_err" for acc in accuracies]
    ]

    print("\nONCVPSP TABLE:\n")  #.center(80, "="))
    columns = [acc + "_dfact_meV" for acc in accuracies]
    columns += [acc + "_ecut" for acc in accuracies]
    #print(data.to_string(columns=columns))
    tablefmt = "grid"
    print(tabulate(data[columns], headers="keys", tablefmt=tablefmt))

    print("\nSTATS:\n")  #.center(80, "="))
    #print(data.describe())
    print(tabulate(data.describe(), headers="keys", tablefmt=tablefmt))

    bad = data[data["high_dfact_meV"] > data["high_dfact_meV"].mean()]
    print("\nPSEUDOS with high_dfact > mean:\n")  # ".center(80, "*"))
    print(tabulate(bad, headers="keys", tablefmt=tablefmt))
コード例 #17
0
ファイル: __init__.py プロジェクト: gmrigna/abipy
def pseudos(*filenames):
    """Returns a PseudoTable constructed from the input filenames  located in tests/data/pseudos."""
    pseudos = list(map(pseudo, filenames))
    return PseudoTable(pseudos)
コード例 #18
0
ファイル: df.py プロジェクト: davidwaroquiers/pseudo_dojo
def main():
    top = "."
    try:
        top = sys.argv[1]
    except IndexError:
        top = "."

    pseudos = []
    for dirpath, dirnames, filenames in os.walk(top):
        # Exclude pseudos in _inputs
        if os.path.basename(dirpath) == "_inputs": continue

        #print(filenames)
        pseudos.extend([Pseudo.from_file(os.path.join(dirpath, f)) for f in filenames 
            if f.endswith(".psp8")])
            #if f.endswith(".psp8") and "-" not in f])
            #if f.endswith(".psp8") and "-" in f])
    #print(pseudos)

    from pymatgen.io.abinitio.pseudos import PseudoTable
    pseudos = PseudoTable(pseudos)
    data, errors = pseudos.get_dojo_dataframe()
    print(data)

    if errors:
        print("ERRORS:")
        pprint(errors)

    accuracies = ["low", "normal", "high"]
    keys = ["dfact_meV", "v0", "b0_GPa", "b1", "ecut", "fcc_a0_rel_err", "bcc_a0_rel_err"]
    columns = ["symbol"] + [acc + "_" + k for k in keys for acc in accuracies]
    #print(columns)

    ##print(rows)
    #data = pd.DataFrame(rows, index=names, columns=columns)
    #data = data[data["high_dfact_meV"] <= data["high_dfact_meV"].mean()]
    #data = data[data["high_dfact_meV"] <= 9]

    def calc_rerrors(data):
        # Relative error
        data["low_dfact_abserr"] = data["low_dfact_meV"] - data["high_dfact_meV"]
        data["normal_dfact_abserr"] =  data["normal_dfact_meV"] - data["high_dfact_meV"]
        data["low_dfact_rerr"] = 100 * (data["low_dfact_meV"] - data["high_dfact_meV"]) / data["high_dfact_meV"]
        data["normal_dfact_rerr"] = 100 * (data["normal_dfact_meV"] - data["high_dfact_meV"]) / data["high_dfact_meV"]

        for k in ["v0", "b0_GPa", "b1"]:
            data["low_" + k + "_abserr"] = data["low_" + k] - data["high_" + k]
            data["normal_" + k + "_abserr"] = data["normal_" + k] - data["high_" + k]
            data["low_" + k + "_rerr"] = 100 * (data["low_" + k] - data["high_" + k]) / data["high_" + k]
            data["normal_" + k + "_rerr"] = 100 * (data["normal_" + k] - data["high_" + k]) / data["high_" + k]

        return data

    #import seaborn as sns
    import matplotlib.pyplot as plt
    #data = calc_rerrors(data)
    #g = sns.PairGrid(data, x_vars="Z", y_vars=[
    #    "low_ecut",
    #    "low_dfact_meV",
    #    #"normal_ecut",
    #    #"low_dfact_meV",
    #    #"high_dfact_meV", 
    #    #"low_v0_rerr", "low_b0_GPa_rerr", "low_b1_rerr",
    #    ]
    #) #, hue="smoker")
    #g.map(plt.scatter)
    #g.add_legend()

    #data["high_dfact_meV"].hist(bins=200)
    #data["high_fcc_a0_rel_err"].hist(bins=200)
    #data["high_bcc_a0_rel_err"].hist(bins=200)

    keys = [
        "dfact_meV", 
        #"dfactprime_meV", 
        "bcc_a0_rel_err", "fcc_a0_rel_err", 
        #"ecut",
    ]

    fig, ax_list = plt.subplots(nrows=len(keys), ncols=1, sharex=False, squeeze=False)
    ax_list = ax_list.ravel()

    #kind = "density"
    kind = "scatter"
    kind = "line"
    zmin, zmax = 1, 110
    for ax, key in zip(ax_list, keys):
        for acc in accuracies:
            c = data[acc + "_" + key][data.Z <= zmax]
            c = c[data.Z >= zmin]
            c.plot(kind=kind, ax=ax, style="o-", legend=True)
            #c.hist(ax=ax, bins=200)

    plt.show()

    wrong = data[data["high_b1"] < 0]
    if not wrong.empty:
        print("WRONG".center(80, "*") + "\n", wrong)

    data = data[
        [acc + "_dfact_meV" for acc in accuracies]
        + [acc + "_ecut" for acc in accuracies]
        #+ [acc + "_fcc_a0_rel_err" for acc in accuracies]
        #+ [acc + "_bcc_a0_rel_err" for acc in accuracies]
    ]

    print("\nONCVPSP TABLE:\n") #.center(80, "="))
    columns = [acc + "_dfact_meV" for acc in accuracies] 
    columns += [acc + "_ecut" for acc in accuracies] 
    #print(data.to_string(columns=columns))
    tablefmt = "grid"
    print(tabulate(data[columns], headers="keys", tablefmt=tablefmt))

    print("\nSTATS:\n") #.center(80, "="))
    #print(data.describe())
    print(tabulate(data.describe(), headers="keys", tablefmt=tablefmt))

    bad = data[data["high_dfact_meV"] > data["high_dfact_meV"].mean()]
    print("\nPSEUDOS with high_dfact > mean:\n") # ".center(80, "*"))
    print(tabulate(bad, headers="keys", tablefmt=tablefmt))