def set_status(self, structure): self._grid = 0 self._all_done = False self._workdir = None self._converged = is_converged(False, structure) try: self._grid = read_grid_from_file(s_name(structure)+".full_res")['grid'] self._all_done = read_grid_from_file(s_name(structure)+".full_res")['all_done'] self._workdir = os.path.join(s_name(structure), 'work_'+str(self.grid)) except (IOError, OSError): pass
def set_status(self, structure): self._grid = 0 self._all_done = False self._workdir = None self._converged = is_converged(False, structure) try: self._grid = read_grid_from_file(s_name(structure) + ".full_res")['grid'] self._all_done = read_grid_from_file( s_name(structure) + ".full_res")['all_done'] self._workdir = os.path.join(s_name(structure), 'work_' + str(self.grid)) except (IOError, OSError): pass
def get_work_dir(self): name = s_name(self.structure) if not self.all_converged: return str(name) + '_' + str(self.option['test']) + '_' + str( self.option['value']) else: return str(name)
def __init__(self, structure, spec, option=None): self.structure = structure self.spec = spec self.option = option self.bands_fac = 1 self.tests = self.__class__.get_defaults_tests() self.convs = self.__class__.get_defaults_convs() self.response_models = self.__class__.get_response_models() if self.option is None: self.all_converged = False elif len(self.option) == len(self.convs): self.all_converged = True else: self.all_converged = False path_add = '.conv' if self.all_converged else '' self.work_dir = s_name(self.structure)+path_add try: abi_pseudo = os.environ['ABINIT_PS_EXT'] abi_pseudo_dir = os.environ['ABINIT_PS'] except KeyError: abi_pseudo = None abi_pseudo_dir = None pseudos = [] for element in self.structure.composition.element_composition: pseudo = os.path.join(abi_pseudo_dir, str(element) + abi_pseudo) pseudos.append(pseudo) self.pseudo_table = PseudoTable(pseudos)
def __init__(self, structure, spec, option=None): self.structure = structure self.spec = spec self.option = option self.bands_fac = 1 self.tests = self.__class__.get_defaults_tests() self.convs = self.__class__.get_defaults_convs() self.response_models = self.__class__.get_response_models() if self.option is None: self.all_converged = False elif len(self.option) == len(self.convs): self.all_converged = True else: self.all_converged = False path_add = '.conv' if self.all_converged else '' self.work_dir = s_name(self.structure) + path_add try: abi_pseudo = os.environ['ABINIT_PS_EXT'] abi_pseudo_dir = os.environ['ABINIT_PS'] except KeyError: abi_pseudo = None abi_pseudo_dir = None pseudos = [] for element in self.structure.composition.element_composition: pseudo = os.path.join(abi_pseudo_dir, str(element) + abi_pseudo) pseudos.append(pseudo) self.pseudo_table = PseudoTable(pseudos)
def __init__(self, structure, spec): self.structure = structure self.spec = spec self.data = {} self.code_interface = get_code_interface(spec["code"]) self.conv_res = {"control": {}, "values": {}, "derivatives": {}} self.full_res = {"all_done": False, "grid": 0} self.name = s_name(structure) self.type = {"parm_scr": False, "full": False, "single": False, "test": False}
def print_results(self, structure, file_name='convergence_results'): """ """ data = GWConvergenceData(spec=self, structure=structure) if data.read_conv_res_from_file(os.path.join(s_name(structure)+'.res', s_name(structure)+'.conv_res')): s = '%s %s %s ' % (s_name(structure), str(data.conv_res['values']['ecuteps']), str(data.conv_res['values'] ['nscf_nbands'])) else: s = '%s 0.0 0.0 ' % s_name(structure) con_dat = self.code_interface.read_convergence_data(s_name(structure)+'.res') if con_dat is not None: s += '%s ' % con_dat['gwgap'] else: s += '0.0 ' s += '\n' f = open(file_name, 'a') f.write(str(s)) f.close()
def __init__(self, structure, spec): self.structure = structure self.spec = spec self.data = {} self.code_interface = get_code_interface(spec['code']) self.conv_res = {'control': {}, 'values': {}, 'derivatives': {}} self.full_res = {'all_done': False, 'grid': 0} if structure is not None: self.name = s_name(structure) else: self.name = 'notknown' self.type = {'parm_scr': False, 'full': False, 'single': False, 'test': False}
def print_results(self, structure, file_name="convergence_results"): """ """ data = GWConvergenceData(spec=self, structure=structure) if data.read_conv_res_from_file(os.path.join(s_name(structure) + ".res", s_name(structure) + ".conv_res")): s = "%s %s %s " % ( s_name(structure), str(data.conv_res["values"]["ecuteps"]), str(data.conv_res["values"]["nscf_nbands"]), ) else: s = "%s 0.0 0.0 " % s_name(structure) con_dat = self.code_interface.read_convergence_data(s_name(structure) + ".res") if con_dat is not None: s += "%s " % con_dat["gwgap"] else: s += "0.0 " s += "\n" f = open(file_name, "a") f.write(s) f.close()
def print_results(self, structure, file_name='convergence_results'): """ """ data = GWConvergenceData(spec=self, structure=structure) if data.read_conv_res_from_file( os.path.join( s_name(structure) + '.res', s_name(structure) + '.conv_res')): s = '%s %s %s ' % (s_name(structure), str(data.conv_res['values']['ecuteps']), str(data.conv_res['values']['nscf_nbands'])) else: s = '%s 0.0 0.0 ' % s_name(structure) con_dat = self.code_interface.read_convergence_data( s_name(structure) + '.res') if con_dat is not None: s += '%s ' % con_dat['gwgap'] else: s += '0.0 ' s += '\n' f = open(file_name, 'a') f.write(str(s)) f.close()
def __init__(self, structure, spec): self.structure = structure self.spec = spec self.data = {} self.code_interface = get_code_interface(spec['code']) self.conv_res = {'control': {}, 'values': {}, 'derivatives': {}} self.full_res = {'all_done': False, 'grid': 0} if structure is not None: self.name = s_name(structure) else: self.name = 'notknown' self.type = { 'parm_scr': False, 'full': False, 'single': False, 'test': False }
def execute_flow(self, structure, spec_data): """ execute spec prepare input/jobfiles or submit to fw for a given structure for vasp the different jobs are created into a flow todo this should actually create and execute a VaspGWWorkFlow(GWWorkflow) """ # general part for the base class grid = 0 all_done = False converged = is_converged(False, structure) try: grid = read_grid_from_file(s_name(structure) + ".full_res")['grid'] all_done = read_grid_from_file(s_name(structure) + ".full_res")['all_done'] except (IOError, OSError): pass if all_done: print('| all is done for this material') return # specific part if spec_data['mode'] == 'fw': fw_work_flow = VaspGWFWWorkFlow() else: fw_work_flow = [] if spec_data['test'] or spec_data['converge']: if spec_data['test']: tests_prep = GWscDFTPrepVaspInputSet(structure, spec_data).tests tests_prep.update( GWDFTDiagVaspInputSet(structure, spec_data).tests) elif spec_data['converge'] and converged: tests_prep = self.get_conv_res_test(spec_data, structure)['tests_prep'] else: tests_prep = GWscDFTPrepVaspInputSet(structure, spec_data).convs tests_prep.update( GWDFTDiagVaspInputSet(structure, spec_data).convs) if grid > 0: tests_prep = expand(tests=tests_prep, level=grid) print(tests_prep) for test_prep in tests_prep: print('setting up test for: ' + test_prep) for value_prep in tests_prep[test_prep]['test_range']: print("**" + str(value_prep) + "**") option = {'test_prep': test_prep, 'value_prep': value_prep} self.create_job(spec_data, structure, 'prep', fw_work_flow, converged, option) for job in spec_data['jobs'][1:]: if job == 'G0W0': if spec_data['test']: tests = GWG0W0VaspInputSet( structure, spec_data).tests elif spec_data['converge'] and converged: tests = self.get_conv_res_test( spec_data, structure)['tests'] else: tests = GWG0W0VaspInputSet( structure, spec_data).convs if grid > 0: tests = expand(tests=tests, level=grid) print(tests) if job in ['GW0', 'scGW0']: input_set = GWG0W0VaspInputSet( structure, spec_data) input_set.gw0_on() if spec_data['test']: tests = input_set.tests else: tests = input_set.tests for test in tests: print(' setting up test for: ' + test) for value in tests[test]['test_range']: print(" **" + str(value) + "**") option.update({'test': test, 'value': value}) self.create_job(spec_data, structure, job, fw_work_flow, converged, option)
def create(self): """ create single abinit G0W0 flow """ # manager = 'slurm' if 'ceci' in self.spec['mode'] else 'shell' # an AbiStructure object has an overwritten version of get_sorted_structure that sorts according to Z # this could also be pulled into the constructor of Abistructure # abi_structure = self.structure.get_sorted_structure() from abipy import abilab item = copy.copy(self.structure.item) self.structure.__class__ = abilab.Structure self.structure = self.structure.get_sorted_structure_z() self.structure.item = item abi_structure = self.structure manager = TaskManager.from_user_config() # Initialize the flow. flow = Flow(self.work_dir, manager, pickle_protocol=0) # flow = Flow(self.work_dir, manager) # kpoint grid defined over density 40 > ~ 3 3 3 if self.spec['converge'] and not self.all_converged: # (2x2x2) gamma centered mesh for the convergence test on nbands and ecuteps # if kp_in is present in the specs a kp_in X kp_in x kp_in mesh is used for the convergence study if 'kp_in' in self.spec.data.keys(): if self.spec['kp_in'] > 9: print('WARNING:\nkp_in should be < 13 to generate an n x n x n mesh\nfor larger values a grid with ' 'density kp_in will be generated') kppa = self.spec['kp_in'] else: kppa = 2 else: # use the specified density for the final calculation with the converged nbands and ecuteps of other # stand alone calculations kppa = self.spec['kp_grid_dens'] gamma = True # 'standard' parameters for stand alone calculation scf_nband = self.get_bands(self.structure) + 20 # additional bands to accommodate for nbdbuf and a bit extra nscf_nband = [10 * self.get_bands(self.structure)] nksmall = None ecuteps = [8] extra_abivars = dict() # read user defined extra abivars from file 'extra_abivars' should be dictionary extra_abivars.update(read_extra_abivars()) # self.bands_fac = 0.5 if 'gwcomp' in extra_abivars.keys() else 1 # self.convs['nscf_nbands']['test_range'] = # tuple([self.bands_fac*x for x in self.convs['nscf_nbands']['test_range']]) ecut = extra_abivars.pop('ecut', 44) ecutsigx = extra_abivars.pop('ecutsigx', 44) if ecutsigx > ecut: raise RuntimeError('ecutsigx can not be largen than ecut') if ecutsigx < max(ecuteps): raise RuntimeError('ecutsigx < ecuteps this is not realistic') response_models = ['godby'] if 'ppmodel' in extra_abivars.keys(): response_models = [extra_abivars.pop('ppmodel')] if self.option is not None: for k in self.option.keys(): if k == 'ecut': ecut = self.option[k] if k in ['ecuteps', 'nscf_nbands']: pass else: extra_abivars.update({k: self.option[k]}) try: grid = read_grid_from_file(s_name(self.structure)+".full_res")['grid'] all_done = read_grid_from_file(s_name(self.structure)+".full_res")['all_done'] workdir = os.path.join(s_name(self.structure), 'w'+str(grid)) except (IOError, OSError): grid = 0 all_done = False workdir = None if not all_done: if (self.spec['test'] or self.spec['converge']) and not self.all_converged: if self.spec['test']: print('| setting test calculation') tests = SingleAbinitGWWork(self.structure, self.spec).tests response_models = [] else: if grid == 0: print('| setting convergence calculations for grid 0') # tests = SingleAbinitGWWorkFlow(self.structure, self.spec).convs tests = self.convs else: print('| extending grid') # tests = expand(SingleAbinitGWWorkFlow(self.structure, self.spec).convs, grid) tests = expand(self.convs, grid) ecuteps = [] nscf_nband = [] for test in tests: if tests[test]['level'] == 'scf': if self.option is None: extra_abivars.update({test + '_s': tests[test]['test_range']}) elif test in self.option: extra_abivars.update({test: self.option[test]}) else: extra_abivars.update({test + '_s': tests[test]['test_range']}) else: for value in tests[test]['test_range']: if test == 'nscf_nbands': nscf_nband.append(value * self.get_bands(self.structure)) # scr_nband takes nscf_nbands if not specified # sigma_nband takes scr_nbands if not specified if test == 'ecuteps': ecuteps.append(value) if test == 'response_model': response_models.append(value) elif self.all_converged: print('| setting up for testing the converged values at the high kp grid ') # add a bandstructure and dos calculation if os.path.isfile('bands'): nksmall = -30 # negative value > only bandstructure else: nksmall = 30 # in this case a convergence study has already been performed. # The resulting parameters are passed as option ecuteps = [self.option['ecuteps'], self.option['ecuteps'] + self.convs['ecuteps']['test_range'][1] - self.convs['ecuteps']['test_range'][0]] nscf_nband = [self.option['nscf_nbands'], self.option['nscf_nbands'] + self.convs['nscf_nbands'][ 'test_range'][1] - self.convs['nscf_nbands']['test_range'][0]] # for option in self.option: # if option not in ['ecuteps', 'nscf_nband']: # extra_abivars.update({option + '_s': self.option[option]}) else: print('| all is done for this material') return logger.info('ecuteps : %s ' % str(ecuteps)) logger.info('extra : %s ' % str(extra_abivars)) logger.info('nscf_nb : %s ' % str(nscf_nband)) inputs = g0w0_convergence_inputs(abi_structure, self.pseudo_table, kppa, nscf_nband, ecuteps, ecutsigx, scf_nband, ecut, accuracy="normal", spin_mode="unpolarized", smearing=None, response_models=response_models, charge=0.0, sigma_nband=None, scr_nband=None, gamma=gamma, nksmall=nksmall, extra_abivars=extra_abivars) work = G0W0Work(scf_inputs=inputs[0], nscf_inputs=inputs[1], scr_inputs=inputs[2], sigma_inputs=inputs[3]) # work = g0w0_extended_work(abi_structure, self.pseudo_table, kppa, nscf_nband, ecuteps, ecutsigx, scf_nband, # accuracy="normal", spin_mode="unpolarized", smearing=None, response_models=response_models, # charge=0.0, sigma_nband=None, scr_nband=None, gamma=gamma, nksmall=nksmall, **extra_abivars) print(workdir) flow.register_work(work, workdir=workdir) return flow.allocate()
def create(self): """ create single abinit G0W0 flow """ # manager = 'slurm' if 'ceci' in self.spec['mode'] else 'shell' # an AbiStructure object has an overwritten version of get_sorted_structure that sorts according to Z # this could also be pulled into the constructor of Abistructure # abi_structure = self.structure.get_sorted_structure() from abipy import abilab item = copy.copy(self.structure.item) self.structure.__class__ = abilab.Structure self.structure = self.structure.get_sorted_structure_z() self.structure.item = item abi_structure = self.structure manager = TaskManager.from_user_config() # Initialize the flow. flow = Flow(self.work_dir, manager, pickle_protocol=0) # flow = Flow(self.work_dir, manager) # kpoint grid defined over density 40 > ~ 3 3 3 if self.spec['converge'] and not self.all_converged: # (2x2x2) gamma centered mesh for the convergence test on nbands and ecuteps # if kp_in is present in the specs a kp_in X kp_in x kp_in mesh is used for the convergence study if 'kp_in' in self.spec.data.keys(): if self.spec['kp_in'] > 9: print( 'WARNING:\nkp_in should be < 13 to generate an n x n x n mesh\nfor larger values a grid with ' 'density kp_in will be generated') kppa = self.spec['kp_in'] else: kppa = 2 else: # use the specified density for the final calculation with the converged nbands and ecuteps of other # stand alone calculations kppa = self.spec['kp_grid_dens'] gamma = True # 'standard' parameters for stand alone calculation scf_nband = self.get_bands(self.structure) + 20 # additional bands to accommodate for nbdbuf and a bit extra nscf_nband = [10 * self.get_bands(self.structure)] nksmall = None ecuteps = [8] extra_abivars = dict() # read user defined extra abivars from file 'extra_abivars' should be dictionary extra_abivars.update(read_extra_abivars()) # self.bands_fac = 0.5 if 'gwcomp' in extra_abivars.keys() else 1 # self.convs['nscf_nbands']['test_range'] = # tuple([self.bands_fac*x for x in self.convs['nscf_nbands']['test_range']]) ecut = extra_abivars.pop('ecut', 44) ecutsigx = extra_abivars.pop('ecutsigx', 44) if ecutsigx > ecut: raise RuntimeError('ecutsigx can not be largen than ecut') if ecutsigx < max(ecuteps): raise RuntimeError('ecutsigx < ecuteps this is not realistic') response_models = ['godby'] if 'ppmodel' in extra_abivars.keys(): response_models = [extra_abivars.pop('ppmodel')] if self.option is not None: for k in self.option.keys(): if k == 'ecut': ecut = self.option[k] if k in ['ecuteps', 'nscf_nbands']: pass else: extra_abivars.update({k: self.option[k]}) try: grid = read_grid_from_file(s_name(self.structure) + ".full_res")['grid'] all_done = read_grid_from_file( s_name(self.structure) + ".full_res")['all_done'] workdir = os.path.join(s_name(self.structure), 'w' + str(grid)) except (IOError, OSError): grid = 0 all_done = False workdir = None if not all_done: if (self.spec['test'] or self.spec['converge']) and not self.all_converged: if self.spec['test']: print('| setting test calculation') tests = SingleAbinitGWWork(self.structure, self.spec).tests response_models = [] else: if grid == 0: print('| setting convergence calculations for grid 0') # tests = SingleAbinitGWWorkFlow(self.structure, self.spec).convs tests = self.convs else: print('| extending grid') # tests = expand(SingleAbinitGWWorkFlow(self.structure, self.spec).convs, grid) tests = expand(self.convs, grid) ecuteps = [] nscf_nband = [] for test in tests: if tests[test]['level'] == 'scf': if self.option is None: extra_abivars.update( {test + '_s': tests[test]['test_range']}) elif test in self.option: extra_abivars.update({test: self.option[test]}) else: extra_abivars.update( {test + '_s': tests[test]['test_range']}) else: for value in tests[test]['test_range']: if test == 'nscf_nbands': nscf_nband.append( value * self.get_bands(self.structure)) # scr_nband takes nscf_nbands if not specified # sigma_nband takes scr_nbands if not specified if test == 'ecuteps': ecuteps.append(value) if test == 'response_model': response_models.append(value) elif self.all_converged: print( '| setting up for testing the converged values at the high kp grid ' ) # add a bandstructure and dos calculation if os.path.isfile('bands'): nksmall = -30 # negative value > only bandstructure else: nksmall = 30 # in this case a convergence study has already been performed. # The resulting parameters are passed as option ecuteps = [ self.option['ecuteps'], self.option['ecuteps'] + self.convs['ecuteps']['test_range'][1] - self.convs['ecuteps']['test_range'][0] ] nscf_nband = [ self.option['nscf_nbands'], self.option['nscf_nbands'] + self.convs['nscf_nbands']['test_range'][1] - self.convs['nscf_nbands']['test_range'][0] ] # for option in self.option: # if option not in ['ecuteps', 'nscf_nband']: # extra_abivars.update({option + '_s': self.option[option]}) else: print('| all is done for this material') return logger.info('ecuteps : %s ' % str(ecuteps)) logger.info('extra : %s ' % str(extra_abivars)) logger.info('nscf_nb : %s ' % str(nscf_nband)) inputs = g0w0_convergence_inputs(abi_structure, self.pseudo_table, kppa, nscf_nband, ecuteps, ecutsigx, scf_nband, ecut, accuracy="normal", spin_mode="unpolarized", smearing=None, response_models=response_models, charge=0.0, sigma_nband=None, scr_nband=None, gamma=gamma, nksmall=nksmall, extra_abivars=extra_abivars) work = G0W0Work(scf_inputs=inputs[0], nscf_inputs=inputs[1], scr_inputs=inputs[2], sigma_inputs=inputs[3]) # work = g0w0_extended_work(abi_structure, self.pseudo_table, kppa, nscf_nband, ecuteps, ecutsigx, scf_nband, # accuracy="normal", spin_mode="unpolarized", smearing=None, response_models=response_models, # charge=0.0, sigma_nband=None, scr_nband=None, gamma=gamma, nksmall=nksmall, **extra_abivars) print(workdir) flow.register_work(work, workdir=workdir) return flow.allocate()
def loop_structures(self, mode='i'): """ reading the structures specified in spec, add special points, and excecute the specs mode: i: loop structures for input generation o: loop structures for output parsing w: print all results """ print('loop structures mode ', mode) try: mp_key = os.environ['MP_KEY'] except KeyError: mp_key = None mp_list_vasp = [ 'mp-149', 'mp-2534', 'mp-8062', 'mp-2469', 'mp-1550', 'mp-830', 'mp-1986', 'mp-10695', 'mp-66', 'mp-1639', 'mp-1265', 'mp-1138', 'mp-23155', 'mp-111' ] if self.data['source'] == 'mp-vasp': items_list = mp_list_vasp elif self.data['source'] in ['poscar', 'cif']: files = os.listdir('.') items_list = files elif self.data['source'] == 'mar_exp': items_list = [] local_serv = pymongo.Connection("marilyn.pcpm.ucl.ac.be") local_db_gaps = local_serv.band_gaps pwd = os.environ['MAR_PAS'] local_db_gaps.authenticate("setten", pwd) for c in local_db_gaps.exp.find(): name = Structure.from_dict(c['icsd_data']['structure']).composition.reduced_formula, c['icsd_id'],\ c['MP_id'] print(name) # Structure.from_dict(c['icsd_data']['structure']).to(fmt='cif',filename=name) items_list.append({ 'name': 'mp-' + c['MP_id'], 'icsd': c['icsd_id'], 'mp': c['MP_id'] }) else: items_list = [line.strip() for line in open(self.data['source'])] for item in items_list: print('\n') # special case, this should be encaptulated if self.data['source'] == 'mar_exp': print('structure from marilyn', item['name'], item['icsd'], item['mp']) exp = local_db_gaps.exp.find({'MP_id': item['mp']})[0] structure = Structure.from_dict(exp['icsd_data']['structure']) structure = refine_structure(structure) structure.to(fmt='cif', filename=item['name']) try: kpts = local_db_gaps.GGA_BS.find({'transformations.history.0.id': item['icsd']})[0]\ ['calculations'][-1]['band_structure']['kpoints'] except (IndexError, KeyError): kpts = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] structure.kpts = kpts print('kpoints:', structure.kpts[0], structure.kpts[1]) structure.item = item['name'] else: if item.startswith('POSCAR_'): structure = pmg.read_structure(item) comment = Poscar.from_file(item).comment # print comment if comment.startswith("gap"): structure.vbm_l = comment.split(" ")[1] structure.vbm = (comment.split(" ")[2], comment.split(" ")[3], comment.split(" ")[4]) structure.cbm_l = comment.split(" ")[5] structure.cbm = (comment.split(" ")[6], comment.split(" ")[7], comment.split(" ")[8]) else: # print "no bandstructure information available, adding GG as 'gap'" structure = add_gg_gap(structure) elif 'cif' in item: structure = Structure.from_file(item) structure = add_gg_gap(structure) elif item.startswith('mp-'): with MPRester(mp_key) as mp_database: print('structure from mp database', item) structure = mp_database.get_structure_by_material_id( item, final=True) try: bandstructure = mp_database.get_bandstructure_by_material_id( item) structure.vbm_l = bandstructure.kpoints[ bandstructure.get_vbm()['kpoint_index'] [0]].label structure.cbm_l = bandstructure.kpoints[ bandstructure.get_cbm()['kpoint_index'] [0]].label structure.cbm = tuple( bandstructure.kpoints[bandstructure.get_cbm( )['kpoint_index'][0]].frac_coords) structure.vbm = tuple( bandstructure.kpoints[bandstructure.get_vbm( )['kpoint_index'][0]].frac_coords) except (MPRestError, IndexError, KeyError) as err: print(err.message) structure = add_gg_gap(structure) else: continue structure.kpts = [list(structure.cbm), list(structure.vbm)] structure.item = item print(item, s_name(structure)) if mode == 'i': try: self.execute_flow(structure) except Exception as exc: print('input generation failed') print(exc) elif mode == 'w': try: self.print_results(structure) except: print('writing output failed') elif mode == 's': try: self.insert_in_database(structure) except: print('database insertion failed') elif mode == 'o': # if os.path.isdir(s_name(structure)) or os.path.isdir(s_name(structure)+'.conv'): try: self.process_data(structure) except: print('output parsing failed') if 'ceci' in self.data['mode'] and mode == 'i': os.chmod("job_collection", stat.S_IRWXU)
def get_work_dir(self): name = s_name(self.structure) if not self.all_converged: return str(name)+'_'+str(self.option['test'])+'_'+str(self.option['value']) else: return str(name)
def process_data(self, structure): """ Process the data of a set of GW calculations: for 'single' and 'test' calculations the data is read and outputted for the parameter scanning part of a convergence calculation the data is read and parameters that provide converged results are determined for the 'full' part of a convergence calculation the data is read and it is tested if the slopes are in agreement with the scanning part """ data = GWConvergenceData(spec=self, structure=structure) if self.data['converge']: done = False try: data.read_full_res_from_file() if data.full_res['all_done']: done = True print('| no action needed al is done already') except (IOError, OSError, SyntaxError): pass data.set_type() while not done: if data.type['parm_scr']: data.read() if len(data.data) == 0: print('| parm_scr type calculation but no data found.') break if len(data.data) < 9: # todo this should be calculated print( '| parm_scr type calculation but no complete data found,' ' check if all calculations are done.') break if data.find_conv_pars_scf('ecut', 'full_width', self['tol'])[0]: print( '| parm_scr type calculation, converged scf values found' ) else: print( '| parm_scr type calculation, no converged scf values found' ) data.full_res.update({ 'remark': 'No converged SCf parameter found. Continue anyway.' }) data.conv_res['values'].update( {'ecut': 44 / eV_to_Ha}) # internally we work in eV data.conv_res['control'].update({'ecut': True}) # if ecut is provided in extra_abivars overwrite in any case .. this is done at input generation # if 'ecut' in read_extra_abivars().keys(): # data.conv_res['values'].update({'ecut': read_extra_abivars()['ecut']}) # should be in eV # if converged ok, if not increase the grid parameter of the next set of calculations extrapolated = data.find_conv_pars(self['tol']) if data.conv_res['control']['nbands']: print( '| parm_scr type calculation, converged values found, extrapolated value: %s' % extrapolated[4]) else: print( '| parm_scr type calculation, no converged values found, increasing grid' ) data.full_res['grid'] += 1 data.print_full_res() data.print_conv_res() # plot data: print_gnuplot_header('plots', s_name(structure) + ' tol = ' + str(self['tol']), filetype=None) data.print_gnuplot_line('plots') data.print_plot_data() done = True elif data.type['full']: if not data.read_conv_res_from_file( s_name(structure) + '.conv_res'): print( '| Full type calculation but the conv_res file is not available, trying to reconstruct' ) data.read() data.find_conv_pars(self['tol']) data.print_conv_res() data.read(subset='.conv') if len(data.data) == 0: print('| Full type calculation but no data found.') break if len(data.data) < 4: print( '| Full type calculation but no complete data found.' ) for item in data.data: print(item) break if data.test_full_kp_results(tol_rel=1, tol_abs=0.0015): print( '| Full type calculation and the full results agree with the parm_scr.' ' All_done for this compound.') data.full_res.update({'all_done': True}) data.print_full_res() done = True # data.print_plot_data() self.code_interface.store_results( name=s_name(structure)) else: print( '| Full type calculation but the full results do not agree with the parm_scr.' ) print( '| Increase the tol to find better converged parameters and test the full grid again.' ) print('| TODO') data.full_res.update({ 'remark': 'no agreement at high dens kp mesh,', 'all_done': True }) # read the system specific tol for System.conv_res # if it's not there create it from the global tol # reduce tol # set data.type to convergence # loop done = True else: done = True elif self.data['test']: data.read() data.set_type() data.print_plot_data() else: data.read() data.set_type() data.print_plot_data()
def execute_flow(self, structure, spec_data): """ execute spec prepare input/jobfiles or submit to fw for a given structure for vasp the different jobs are created into a flow todo this should actually create and execute a VaspGWWorkFlow(GWWorkflow) """ # general part for the base class grid = 0 all_done = False converged = is_converged(False, structure) try: grid = read_grid_from_file(s_name(structure)+".full_res")['grid'] all_done = read_grid_from_file(s_name(structure)+".full_res")['all_done'] except (IOError, OSError): pass if all_done: print('| all is done for this material') return # specific part if spec_data['mode'] == 'fw': fw_work_flow = VaspGWFWWorkFlow() else: fw_work_flow = [] if spec_data['test'] or spec_data['converge']: if spec_data['test']: tests_prep = GWscDFTPrepVaspInputSet(structure, spec_data).tests tests_prep.update(GWDFTDiagVaspInputSet(structure, spec_data).tests) elif spec_data['converge'] and converged: tests_prep = self.get_conv_res_test(spec_data, structure)['tests_prep'] else: tests_prep = GWscDFTPrepVaspInputSet(structure, spec_data).convs tests_prep.update(GWDFTDiagVaspInputSet(structure, spec_data).convs) if grid > 0: tests_prep = expand(tests=tests_prep, level=grid) print(tests_prep) for test_prep in tests_prep: print('setting up test for: ' + test_prep) for value_prep in tests_prep[test_prep]['test_range']: print("**" + str(value_prep) + "**") option = {'test_prep': test_prep, 'value_prep': value_prep} self.create_job(spec_data, structure, 'prep', fw_work_flow, converged, option) for job in spec_data['jobs'][1:]: if job == 'G0W0': if spec_data['test']: tests = GWG0W0VaspInputSet(structure, spec_data).tests elif spec_data['converge'] and converged: tests = self.get_conv_res_test(spec_data, structure)['tests'] else: tests = GWG0W0VaspInputSet(structure, spec_data).convs if grid > 0: tests = expand(tests=tests, level=grid) print(tests) if job in ['GW0', 'scGW0']: input_set = GWG0W0VaspInputSet(structure, spec_data) input_set.gw0_on() if spec_data['test']: tests = input_set.tests else: tests = input_set.tests for test in tests: print(' setting up test for: ' + test) for value in tests[test]['test_range']: print(" **" + str(value) + "**") option.update({'test': test, 'value': value}) self.create_job(spec_data, structure, job, fw_work_flow, converged, option)
def create_input(self): """ create vasp input """ option_name = '' path_add = '' if self.spec['converge'] and self.converged: path_add = '.conv' if self.option is None: path = s_name(self.structure) else: path = os.path.join(s_name(self.structure) + path_add, str(self.option['test_prep'])+str(self.option['value_prep'])) if 'test' in self.option.keys(): option_name = '.'+str(self.option['test'])+str(self.option['value']) if self.job == 'prep': inpset = GWscDFTPrepVaspInputSet(self.structure, self.spec, functional=self.spec['functional']) if self.spec['converge'] and not self.converged: spec_tmp = self.spec.copy() spec_tmp.update({'kp_grid_dens': 2}) inpset = GWscDFTPrepVaspInputSet(self.structure, spec_tmp, functional=self.spec['functional']) inpset.incar_settings.update({"ENCUT": 800}) if self.spec['test'] or self.spec['converge']: if self.option['test_prep'] in GWscDFTPrepVaspInputSet.get_defaults_convs().keys() or self.option['test_prep'] in GWscDFTPrepVaspInputSet.get_defaults_tests().keys(): inpset.set_test(self.option['test_prep'], self.option['value_prep']) if self.spec["prec"] == "h": inpset.set_prec_high() inpset.write_input(self.structure, path) inpset = GWDFTDiagVaspInputSet(self.structure, self.spec, functional=self.spec['functional']) if self.spec["prec"] == "h": inpset.set_prec_high() if self.spec['converge'] and not self.converged: spec_tmp = self.spec.copy() spec_tmp.update({'kp_grid_dens': 2}) inpset = GWDFTDiagVaspInputSet(self.structure, spec_tmp, functional=self.spec['functional']) inpset.incar_settings.update({"ENCUT": 800}) if self.spec['test'] or self.spec['converge']: inpset.set_test(self.option['test_prep'], self.option['value_prep']) inpset.get_incar(self.structure).write_file(os.path.join(path, 'INCAR.DIAG')) if self.job == 'G0W0': inpset = GWG0W0VaspInputSet(self.structure, self.spec, functional=self.spec['functional']) if self.spec['converge'] and not self.converged: spec_tmp = self.spec.copy() spec_tmp.update({'kp_grid_dens': 2}) inpset = GWG0W0VaspInputSet(self.structure, spec_tmp, functional=self.spec['functional']) inpset.incar_settings.update({"ENCUT": 800}) if self.spec['test'] or self.spec['converge']: inpset.set_test(self.option['test_prep'], self.option['value_prep']) inpset.set_test(self.option['test'], self.option['value']) if self.spec["prec"] == "h": inpset.set_prec_high() if self.spec['kp_grid_dens'] > 20: #inpset.wannier_on() inpset.write_input(self.structure, os.path.join(path, 'G0W0'+option_name)) #w_inpset = Wannier90InputSet(self.spec) #w_inpset.write_file(self.structure, os.path.join(path, 'G0W0'+option_name)) else: inpset.write_input(self.structure, os.path.join(path, 'G0W0'+option_name)) if self.job == 'GW0': inpset = GWG0W0VaspInputSet(self.structure, self.spec, functional=self.spec['functional']) if self.spec['converge'] and not self.converged: spec_tmp = self.spec.copy() spec_tmp.update({'kp_grid_dens': 2}) inpset = GWG0W0VaspInputSet(self.structure, spec_tmp, functional=self.spec['functional']) inpset.incar_settings.update({"ENCUT": 800}) if self.spec['test'] or self.spec['converge']: inpset.set_test(self.option['test_prep'], self.option['value_prep']) inpset.set_test(self.option['test'], self.option['value']) if self.spec["prec"] == "h": inpset.set_prec_high() inpset.gw0_on() if self.spec['kp_grid_dens'] > 20: #inpset.wannier_on() inpset.write_input(self.structure, os.path.join(path, 'GW0'+option_name)) #w_inpset = Wannier90InputSet(self.spec) #w_inpset.write_file(self.structure, os.path.join(path, 'GW0'+option_name)) else: inpset.write_input(self.structure, os.path.join(path, 'GW0'+option_name)) if self.job == 'scGW0': inpset = GWG0W0VaspInputSet(self.structure, self.spec, functional=self.spec['functional']) if self.spec['converge'] and not self.converged: spec_tmp = self.spec.copy() spec_tmp.update({'kp_grid_dens': 2}) inpset = GWG0W0VaspInputSet(self.structure, spec_tmp, functional=self.spec['functional']) inpset.incar_settings.update({"ENCUT": 800}) if self.spec['test'] or self.spec['converge']: inpset.set_test(self.option['test_prep'], self.option['value_prep']) inpset.set_test(self.option['test'], self.option['value']) if self.spec["prec"] == "h": inpset.set_prec_high() inpset.gw0_on(qpsc=True) if self.spec['kp_grid_dens'] > 20: inpset.wannier_on() inpset.write_input(self.structure, os.path.join(path, 'scGW0'+option_name)) w_inpset = Wannier90InputSet(self.spec) w_inpset.write_file(self.structure, os.path.join(path, 'scGW0'+option_name)) else: inpset.write_input(self.structure, os.path.join(path, 'scGW0'+option_name))
def loop_structures(self, mode='i'): """ reading the structures specified in spec, add special points, and excecute the specs mode: i: loop structures for input generation o: loop structures for output parsing w: print all results """ print('loop structures mode ', mode) try: mp_key = os.environ['MP_KEY'] except KeyError: mp_key = None mp_list_vasp = ['mp-149', 'mp-2534', 'mp-8062', 'mp-2469', 'mp-1550', 'mp-830', 'mp-1986', 'mp-10695', 'mp-66', 'mp-1639', 'mp-1265', 'mp-1138', 'mp-23155', 'mp-111'] if self.data['source'] == 'mp-vasp': items_list = mp_list_vasp elif self.data['source'] in ['poscar', 'cif']: files = os.listdir('.') items_list = files elif self.data['source'] == 'mar_exp': items_list = [] local_serv = pymongo.Connection("marilyn.pcpm.ucl.ac.be") local_db_gaps = local_serv.band_gaps pwd = os.environ['MAR_PAS'] local_db_gaps.authenticate("setten", pwd) for c in local_db_gaps.exp.find(): name = Structure.from_dict(c['icsd_data']['structure']).composition.reduced_formula, c['icsd_id'],\ c['MP_id'] print(name) # Structure.from_dict(c['icsd_data']['structure']).to(fmt='cif',filename=name) items_list.append({'name': 'mp-' + c['MP_id'], 'icsd': c['icsd_id'], 'mp': c['MP_id']}) else: items_list = [line.strip() for line in open(self.data['source'])] for item in items_list: print('\n') # special case, this should be encaptulated if self.data['source'] == 'mar_exp': print('structure from marilyn', item['name'], item['icsd'], item['mp']) exp = local_db_gaps.exp.find({'MP_id': item['mp']})[0] structure = Structure.from_dict(exp['icsd_data']['structure']) structure = refine_structure(structure) structure.to(fmt='cif', filename=item['name']) try: kpts = local_db_gaps.GGA_BS.find({'transformations.history.0.id': item['icsd']})[0]\ ['calculations'][-1]['band_structure']['kpoints'] except (IndexError, KeyError): kpts = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] structure.kpts = kpts print('kpoints:', structure.kpts[0], structure.kpts[1]) structure.item = item['name'] else: if item.startswith('POSCAR_'): structure = pmg.read_structure(item) comment = Poscar.from_file(item).comment # print comment if comment.startswith("gap"): structure.vbm_l = comment.split(" ")[1] structure.vbm = (comment.split(" ")[2], comment.split(" ")[3], comment.split(" ")[4]) structure.cbm_l = comment.split(" ")[5] structure.cbm = (comment.split(" ")[6], comment.split(" ")[7], comment.split(" ")[8]) else: # print "no bandstructure information available, adding GG as 'gap'" structure = add_gg_gap(structure) elif 'cif' in item: structure = Structure.from_file(item) structure = add_gg_gap(structure) elif item.startswith('mp-'): with MPRester(mp_key) as mp_database: print('structure from mp database', item) structure = mp_database.get_structure_by_material_id(item, final=True) try: bandstructure = mp_database.get_bandstructure_by_material_id(item) structure.vbm_l = bandstructure.kpoints[bandstructure.get_vbm()['kpoint_index'][0]].label structure.cbm_l = bandstructure.kpoints[bandstructure.get_cbm()['kpoint_index'][0]].label structure.cbm = tuple(bandstructure.kpoints[bandstructure.get_cbm()['kpoint_index'][0]].frac_coords) structure.vbm = tuple(bandstructure.kpoints[bandstructure.get_vbm()['kpoint_index'][0]].frac_coords) except (MPRestError, IndexError, KeyError) as err: print(err.message) structure = add_gg_gap(structure) else: continue structure.kpts = [list(structure.cbm), list(structure.vbm)] structure.item = item print(item, s_name(structure)) if mode == 'i': try: self.execute_flow(structure) except Exception as exc: print('input generation failed') print(exc) elif mode == 'w': try: self.print_results(structure) except: print('writing output failed') elif mode == 's': try: self.insert_in_database(structure) except: print('database insertion failed') elif mode == 'o': # if os.path.isdir(s_name(structure)) or os.path.isdir(s_name(structure)+'.conv'): try: self.process_data(structure) except: print('output parsing failed') if 'ceci' in self.data['mode'] and mode == 'i': os.chmod("job_collection", stat.S_IRWXU)
def process_data(self, structure): """ Process the data of a set of GW calculations: for 'single' and 'test' calculations the data is read and outputted for the parameter scanning part of a convergence calculation the data is read and parameters that provide converged results are determined for the 'full' part of a convergence calculation the data is read and it is tested if the slopes are in agreement with the scanning part """ data = GWConvergenceData(spec=self, structure=structure) if self.data["converge"]: done = False try: data.read_full_res_from_file() if data.full_res["all_done"]: done = True print("| no action needed al is done already") except (IOError, OSError, SyntaxError): pass data.set_type() while not done: if data.type["parm_scr"]: data.read() if len(data.data) == 0: print("| parm_scr type calculation but no data found.") break if len(data.data) < 9: # todo this should be calculated print( "| parm_scr type calculation but no complete data found," " check if all calculations are done." ) break if data.find_conv_pars_scf("ecut", "full_width", self["tol"])[0]: print("| parm_scr type calculation, converged scf values found") else: print("| parm_scr type calculation, no converged scf values found") data.full_res.update({"remark": "No converged SCf parameter found. Continue anyway."}) data.conv_res["values"].update({"ecut": 40 * eV_to_Ha}) data.conv_res["control"].update({"ecut": True}) # if ecut is provided in extra_abivars overwrite in any case .. if "ecut" in read_extra_abivars().keys(): data.conv_res["values"].update({"ecut": read_extra_abivars()["ecut"] * eV_to_Ha}) # if converged ok, if not increase the grid parameter of the next set of calculations extrapolated = data.find_conv_pars(self["tol"]) if data.conv_res["control"]["nbands"]: print( "| parm_scr type calculation, converged values found, extrapolated value: %s" % extrapolated[4] ) else: print("| parm_scr type calculation, no converged values found, increasing grid") data.full_res["grid"] += 1 data.print_full_res() data.print_conv_res() # plot data: print_gnuplot_header("plots", s_name(structure) + " tol = " + str(self["tol"]), filetype=None) data.print_gnuplot_line("plots") data.print_plot_data() done = True elif data.type["full"]: if not data.read_conv_res_from_file(s_name(structure) + ".conv_res"): print("| Full type calculation but the conv_res file is not available, trying to reconstruct") data.read() data.find_conv_pars(self["tol"]) data.print_conv_res() data.read(subset=".conv") if len(data.data) == 0: print("| Full type calculation but no data found.") break if len(data.data) < 4: print("| Full type calculation but no complete data found.") for item in data.data: print(item) break if data.test_full_kp_results(tol_rel=1, tol_abs=0.0015): print( "| Full type calculation and the full results agree with the parm_scr." " All_done for this compound." ) data.full_res.update({"all_done": True}) data.print_full_res() done = True # data.print_plot_data() self.code_interface.store_results(name=s_name(structure)) else: print("| Full type calculation but the full results do not agree with the parm_scr.") print("| Increase the tol to find better converged parameters and test the full grid again.") print("| TODO") data.full_res.update({"remark": "no agreement at high dens kp mesh,", "all_done": True}) # read the system specific tol for System.conv_res # if it's not there create it from the global tol # reduce tol # set data.type to convergence # loop done = True elif self.data["test"]: data.read() data.set_type() data.print_plot_data() else: data.read() data.set_type() data.print_plot_data()
def process_data(self, structure): """ Process the data of a set of GW calculations: for 'single' and 'test' calculations the data is read and outputted for the parameter scanning part of a convergence calculation the data is read and parameters that provide converged results are determined for the 'full' part of a convergence calculation the data is read and it is tested if the slopes are in agreement with the scanning part """ data = GWConvergenceData(spec=self, structure=structure) if self.data['converge']: done = False try: data.read_full_res_from_file() if data.full_res['all_done']: done = True print('| no action needed al is done already') except (IOError, OSError, SyntaxError): pass data.set_type() while not done: if data.type['parm_scr']: data.read() if len(data.data) == 0: print('| parm_scr type calculation but no data found.') break if len(data.data) < 9: # todo this should be calculated print('| parm_scr type calculation but no complete data found,' ' check if all calculations are done.') break if data.find_conv_pars_scf('ecut', 'full_width', self['tol'])[0]: print('| parm_scr type calculation, converged scf values found') else: print('| parm_scr type calculation, no converged scf values found') data.full_res.update({'remark': 'No converged SCf parameter found. Continue anyway.'}) data.conv_res['values'].update({'ecut': 44/eV_to_Ha}) # internally we work in eV data.conv_res['control'].update({'ecut': True}) # if ecut is provided in extra_abivars overwrite in any case .. this is done at input generation # if 'ecut' in read_extra_abivars().keys(): # data.conv_res['values'].update({'ecut': read_extra_abivars()['ecut']}) # should be in eV # if converged ok, if not increase the grid parameter of the next set of calculations extrapolated = data.find_conv_pars(self['tol']) if data.conv_res['control']['nbands']: print('| parm_scr type calculation, converged values found, extrapolated value: %s' % extrapolated[4]) else: print('| parm_scr type calculation, no converged values found, increasing grid') data.full_res['grid'] += 1 data.print_full_res() data.print_conv_res() # plot data: print_gnuplot_header('plots', s_name(structure)+' tol = '+str(self['tol']), filetype=None) data.print_gnuplot_line('plots') data.print_plot_data() done = True elif data.type['full']: if not data.read_conv_res_from_file(s_name(structure)+'.conv_res'): print('| Full type calculation but the conv_res file is not available, trying to reconstruct') data.read() data.find_conv_pars(self['tol']) data.print_conv_res() data.read(subset='.conv') if len(data.data) == 0: print('| Full type calculation but no data found.') break if len(data.data) < 4: print('| Full type calculation but no complete data found.') for item in data.data: print(item) break if data.test_full_kp_results(tol_rel=1, tol_abs=0.0015): print('| Full type calculation and the full results agree with the parm_scr.' ' All_done for this compound.') data.full_res.update({'all_done': True}) data.print_full_res() done = True # data.print_plot_data() self.code_interface.store_results(name=s_name(structure)) else: print('| Full type calculation but the full results do not agree with the parm_scr.') print('| Increase the tol to find better converged parameters and test the full grid again.') print('| TODO') data.full_res.update({'remark': 'no agreement at high dens kp mesh,', 'all_done': True}) # read the system specific tol for System.conv_res # if it's not there create it from the global tol # reduce tol # set data.type to convergence # loop done = True else: done = True elif self.data['test']: data.read() data.set_type() data.print_plot_data() else: data.read() data.set_type() data.print_plot_data()
def create_input(self): """ create vasp input """ option_name = '' path_add = '' if self.spec['converge'] and self.converged: path_add = '.conv' if self.option is None: path = s_name(self.structure) else: path = os.path.join( s_name(self.structure) + path_add, str(self.option['test_prep']) + str(self.option['value_prep'])) if 'test' in self.option.keys(): option_name = '.' + str(self.option['test']) + str( self.option['value']) if self.job == 'prep': inpset = GWscDFTPrepVaspInputSet( self.structure, self.spec, functional=self.spec['functional']) if self.spec['converge'] and not self.converged: spec_tmp = self.spec.copy() spec_tmp.update({'kp_grid_dens': 2}) inpset = GWscDFTPrepVaspInputSet( self.structure, spec_tmp, functional=self.spec['functional']) inpset.incar_settings.update({"ENCUT": 800}) if self.spec['test'] or self.spec['converge']: if self.option[ 'test_prep'] in GWscDFTPrepVaspInputSet.get_defaults_convs( ).keys() or self.option[ 'test_prep'] in GWscDFTPrepVaspInputSet.get_defaults_tests( ).keys(): inpset.set_test(self.option['test_prep'], self.option['value_prep']) if self.spec["prec"] == "h": inpset.set_prec_high() inpset.write_input(self.structure, path) inpset = GWDFTDiagVaspInputSet(self.structure, self.spec, functional=self.spec['functional']) if self.spec["prec"] == "h": inpset.set_prec_high() if self.spec['converge'] and not self.converged: spec_tmp = self.spec.copy() spec_tmp.update({'kp_grid_dens': 2}) inpset = GWDFTDiagVaspInputSet( self.structure, spec_tmp, functional=self.spec['functional']) inpset.incar_settings.update({"ENCUT": 800}) if self.spec['test'] or self.spec['converge']: inpset.set_test(self.option['test_prep'], self.option['value_prep']) inpset.get_incar(self.structure).write_file( os.path.join(path, 'INCAR.DIAG')) if self.job == 'G0W0': inpset = GWG0W0VaspInputSet(self.structure, self.spec, functional=self.spec['functional']) if self.spec['converge'] and not self.converged: spec_tmp = self.spec.copy() spec_tmp.update({'kp_grid_dens': 2}) inpset = GWG0W0VaspInputSet(self.structure, spec_tmp, functional=self.spec['functional']) inpset.incar_settings.update({"ENCUT": 800}) if self.spec['test'] or self.spec['converge']: inpset.set_test(self.option['test_prep'], self.option['value_prep']) inpset.set_test(self.option['test'], self.option['value']) if self.spec["prec"] == "h": inpset.set_prec_high() if self.spec['kp_grid_dens'] > 20: #inpset.wannier_on() inpset.write_input(self.structure, os.path.join(path, 'G0W0' + option_name)) #w_inpset = Wannier90InputSet(self.spec) #w_inpset.write_file(self.structure, os.path.join(path, 'G0W0'+option_name)) else: inpset.write_input(self.structure, os.path.join(path, 'G0W0' + option_name)) if self.job == 'GW0': inpset = GWG0W0VaspInputSet(self.structure, self.spec, functional=self.spec['functional']) if self.spec['converge'] and not self.converged: spec_tmp = self.spec.copy() spec_tmp.update({'kp_grid_dens': 2}) inpset = GWG0W0VaspInputSet(self.structure, spec_tmp, functional=self.spec['functional']) inpset.incar_settings.update({"ENCUT": 800}) if self.spec['test'] or self.spec['converge']: inpset.set_test(self.option['test_prep'], self.option['value_prep']) inpset.set_test(self.option['test'], self.option['value']) if self.spec["prec"] == "h": inpset.set_prec_high() inpset.gw0_on() if self.spec['kp_grid_dens'] > 20: #inpset.wannier_on() inpset.write_input(self.structure, os.path.join(path, 'GW0' + option_name)) #w_inpset = Wannier90InputSet(self.spec) #w_inpset.write_file(self.structure, os.path.join(path, 'GW0'+option_name)) else: inpset.write_input(self.structure, os.path.join(path, 'GW0' + option_name)) if self.job == 'scGW0': inpset = GWG0W0VaspInputSet(self.structure, self.spec, functional=self.spec['functional']) if self.spec['converge'] and not self.converged: spec_tmp = self.spec.copy() spec_tmp.update({'kp_grid_dens': 2}) inpset = GWG0W0VaspInputSet(self.structure, spec_tmp, functional=self.spec['functional']) inpset.incar_settings.update({"ENCUT": 800}) if self.spec['test'] or self.spec['converge']: inpset.set_test(self.option['test_prep'], self.option['value_prep']) inpset.set_test(self.option['test'], self.option['value']) if self.spec["prec"] == "h": inpset.set_prec_high() inpset.gw0_on(qpsc=True) if self.spec['kp_grid_dens'] > 20: inpset.wannier_on() inpset.write_input(self.structure, os.path.join(path, 'scGW0' + option_name)) w_inpset = Wannier90InputSet(self.spec) w_inpset.write_file(self.structure, os.path.join(path, 'scGW0' + option_name)) else: inpset.write_input(self.structure, os.path.join(path, 'scGW0' + option_name))
def insert_in_database(self, structure, clean_on_ok=False, db_name='GW_results', collection='general'): """ insert the convergence data and the 'sigres' in a database """ data = GWConvergenceData(spec=self, structure=structure) success = data.read_conv_res_from_file(os.path.join(s_name(structure)+'.res', s_name(structure)+'.conv_res')) con_dat = self.code_interface.read_convergence_data(s_name(structure)+'.res') try: f = open('extra_abivars', mode='r') extra = ast.literal_eval(f.read()) f.close() except (OSError, IOError): extra = None ps = self.code_interface.read_ps_dir() results_file = os.path.join(s_name(structure)+'.res', self.code_interface.gw_data_file) ksbands_file = os.path.join(s_name(structure)+'.res', self.code_interface.ks_bands_file) data_file = os.path.join(s_name(structure)+'.res', s_name(structure)+'.data') if success and con_dat is not None: query = {'system': s_name(structure), 'item': structure.item, 'spec_hash': hash(self), 'extra_vars_hash': hash(None) if extra is None else hash(frozenset(extra.items())), 'ps': ps} print('query:', query) entry = copy.deepcopy(query) entry.update({'conv_res': data.conv_res, 'spec': self.to_dict(), 'extra_vars': extra, 'structure': structure.as_dict(), 'gw_results': con_dat, 'results_file': results_file, 'ksbands_file': ksbands_file, 'data_file': data_file}) # generic section that should go into the base class like # insert_in_database(query, entry, db_name, collection, server="marilyn.pcpm.ucl.ac.be") local_serv = pymongo.Connection("marilyn.pcpm.ucl.ac.be") try: user = os.environ['MAR_USER'] except KeyError: user = raw_input('DataBase user name: ') try: pwd = os.environ['MAR_PAS'] except KeyError: pwd = raw_input('DataBase pwd: ') db = local_serv[db_name] db.authenticate(user, pwd) col = db[collection] print(col) gfs = gridfs.GridFS(db) count = col.find(query).count() if count == 0: try: with open(entry['results_file'], 'r') as f: entry['results_file'] = gfs.put(f.read()) except IOError: print(entry['results_file'], 'not found') try: with open(entry['ksbands_file'], 'r') as f: entry['ksbands_file'] = gfs.put(f.read()) except IOError: print(entry['ksbands_file'], 'not found') try: with open(entry['data_file'], 'r') as f: entry['data_file'] = gfs.put(f.read()) except IOError: print(entry['data_file'], 'not found') col.insert(entry) print('inserted', s_name(structure)) elif count == 1: new_entry = col.find_one(query) try: print('removing file ', new_entry['results_file'], 'from db') gfs.remove(new_entry['results_file']) except: print('remove failed') try: print('removing file ', new_entry['ksbands_file'], 'from db') gfs.remove(new_entry['ksbands_file']) except: print('remove failed') try: print('removing file ', new_entry['data_file'], 'from db') gfs.remove(new_entry['data_file']) except: print('remove failed') new_entry.update(entry) print('adding', new_entry['results_file'], new_entry['data_file']) try: with open(new_entry['results_file'], 'r') as f: new_entry['results_file'] = gfs.put(f) except IOError: print(new_entry['results_file'], 'not found') try: with open(new_entry['ksbands_file'], 'r') as f: new_entry['ksbands_file'] = gfs.put(f) except IOError: print(new_entry['ksbands_file'], 'not found') try: with open(new_entry['data_file'], 'r') as f: new_entry['data_file'] = gfs.put(f) except IOError: print(new_entry['data_file'], 'not found') print('as ', new_entry['results_file'], new_entry['data_file']) col.save(new_entry) print('updated', s_name(structure)) else: print('duplicate entry ... ') local_serv.disconnect()
def insert_in_database(self, structure, clean_on_ok=False, db_name='GW_results', collection='general'): """ insert the convergence data and the 'sigres' in a database """ data = GWConvergenceData(spec=self, structure=structure) success = data.read_conv_res_from_file( os.path.join( s_name(structure) + '.res', s_name(structure) + '.conv_res')) con_dat = self.code_interface.read_convergence_data( s_name(structure) + '.res') try: f = open('extra_abivars', mode='r') extra = ast.literal_eval(f.read()) f.close() except (OSError, IOError): extra = None ps = self.code_interface.read_ps_dir() results_file = os.path.join( s_name(structure) + '.res', self.code_interface.gw_data_file) ksbands_file = os.path.join( s_name(structure) + '.res', self.code_interface.ks_bands_file) data_file = os.path.join( s_name(structure) + '.res', s_name(structure) + '.data') if success and con_dat is not None: query = { 'system': s_name(structure), 'item': structure.item, 'spec_hash': hash(self), 'extra_vars_hash': hash(None) if extra is None else hash(frozenset(extra.items())), 'ps': ps } print('query:', query) entry = copy.deepcopy(query) entry.update({ 'conv_res': data.conv_res, 'spec': self.to_dict(), 'extra_vars': extra, 'structure': structure.as_dict(), 'gw_results': con_dat, 'results_file': results_file, 'ksbands_file': ksbands_file, 'data_file': data_file }) # generic section that should go into the base class like # insert_in_database(query, entry, db_name, collection, server="marilyn.pcpm.ucl.ac.be") local_serv = pymongo.Connection("marilyn.pcpm.ucl.ac.be") try: user = os.environ['MAR_USER'] except KeyError: user = raw_input('DataBase user name: ') try: pwd = os.environ['MAR_PAS'] except KeyError: pwd = raw_input('DataBase pwd: ') db = local_serv[db_name] db.authenticate(user, pwd) col = db[collection] print(col) gfs = gridfs.GridFS(db) count = col.find(query).count() if count == 0: try: with open(entry['results_file'], 'r') as f: entry['results_file'] = gfs.put(f.read()) except IOError: print(entry['results_file'], 'not found') try: with open(entry['ksbands_file'], 'r') as f: entry['ksbands_file'] = gfs.put(f.read()) except IOError: print(entry['ksbands_file'], 'not found') try: with open(entry['data_file'], 'r') as f: entry['data_file'] = gfs.put(f.read()) except IOError: print(entry['data_file'], 'not found') col.insert(entry) print('inserted', s_name(structure)) elif count == 1: new_entry = col.find_one(query) try: print('removing file ', new_entry['results_file'], 'from db') gfs.remove(new_entry['results_file']) except: print('remove failed') try: print('removing file ', new_entry['ksbands_file'], 'from db') gfs.remove(new_entry['ksbands_file']) except: print('remove failed') try: print('removing file ', new_entry['data_file'], 'from db') gfs.remove(new_entry['data_file']) except: print('remove failed') new_entry.update(entry) print('adding', new_entry['results_file'], new_entry['data_file']) try: with open(new_entry['results_file'], 'r') as f: new_entry['results_file'] = gfs.put(f) except IOError: print(new_entry['results_file'], 'not found') try: with open(new_entry['ksbands_file'], 'r') as f: new_entry['ksbands_file'] = gfs.put(f) except IOError: print(new_entry['ksbands_file'], 'not found') try: with open(new_entry['data_file'], 'r') as f: new_entry['data_file'] = gfs.put(f) except IOError: print(new_entry['data_file'], 'not found') print('as ', new_entry['results_file'], new_entry['data_file']) col.save(new_entry) print('updated', s_name(structure)) else: print('duplicate entry ... ') local_serv.disconnect()
def create_job_script(self, add_to_collection=True, mode='pbspro'): if mode == 'slurm': """ Create job script for ceci. """ npar = GWscDFTPrepVaspInputSet(self.structure, self.spec, functional=self.spec['functional']).get_npar(self.structure) if self.option is not None: option_prep_name = str(self.option['test_prep']) + str(self.option['value_prep']) if 'test' in self.option.keys(): option_name = str('.') + str(self.option['test']) + str(self.option['value']) else: option_prep_name = option_name = '' # npar = int(os.environ['NPARGWCALC']) header = ("#!/bin/bash \n" "## standard header for Ceci clusters ## \n" "#SBATCH [email protected] \n" "#SBATCH --mail-type=ALL\n" "#SBATCH --time=2-24:0:0 \n" "#SBATCH --cpus-per-task=1 \n" "#SBATCH --mem-per-cpu=4000 \n") path_add = '' if self.spec['converge'] and self.converged: path_add = '.conv' if self.job == 'prep': path = os.path.join(s_name(self.structure) + path_add, option_prep_name) # create this job job_file = open(name=os.path.join(path, 'job'), mode='w') job_file.write(header) job_file.write('#SBATCH --job-name='+s_name(self.structure)+self.job+'\n') job_file.write('#SBATCH --ntasks='+str(npar)+'\n') job_file.write('module load vasp \n') job_file.write('mpirun vasp \n') job_file.write('cp OUTCAR OUTCAR.sc \n') job_file.write('cp INCAR.DIAG INCAR \n') job_file.write('mpirun vasp \n') job_file.write('cp OUTCAR OUTCAR.diag \n') job_file.close() os.chmod(os.path.join(path, 'job'), stat.S_IRWXU) if add_to_collection: job_file = open("job_collection", mode='a') job_file.write('cd ' + path + ' \n') job_file.write('sbatch job \n') job_file.write('cd .. \n') job_file.close() os.chmod("job_collection", stat.S_IRWXU) if self.job in ['G0W0', 'GW0', 'scGW0']: path = os.path.join(s_name(self.structure) + path_add, option_prep_name, self.job + option_name) # create this job job_file = open(name=path+'/job', mode='w') job_file.write(header) job_file.write('#SBATCH --job-name='+s_name(self.structure)+self.job+'\n') job_file.write('#SBATCH --ntasks='+str(npar)+'\n') job_file.write('module load vasp/5.2_par_wannier90 \n') job_file.write('cp ../CHGCAR ../WAVECAR ../WAVEDER . \n') job_file.write('mpirun vasp \n') job_file.write('rm W* \n') #job_file.write('workon pymatgen-GW; get_gap > gap; deactivate') #job_file.write('echo '+path+'`get_gap` >> ../../gaps.dat') job_file.close() os.chmod(path+'/job', stat.S_IRWXU) path = os.path.join(s_name(self.structure) + path_add, option_prep_name) # 'append submission of this job script to that of prep for this structure' if add_to_collection: job_file = open(name=os.path.join(path, 'job'), mode='a') job_file.write('cd ' + self.job + option_name + ' \n') job_file.write('sbatch job \n') job_file.write('cd .. \n') job_file.close() elif mode == 'pbspro': """ Create job script for pbse pro Zenobe. """ npar = GWscDFTPrepVaspInputSet(self.structure, self.spec, functional=self.spec['functional']).get_npar(self.structure) #npar = 96 if self.option is not None: option_prep_name = str(self.option['test_prep']) + str(self.option['value_prep']) if 'test' in self.option.keys(): option_name = str('.') + str(self.option['test']) + str(self.option['value']) else: option_prep_name = option_name = '' # npar = int(os.environ['NPARGWCALC']) header = str("#!/bin/bash \n" + "## standard header for zenobe ## \n" + "#!/bin/bash \n" + "#PBS -q main\n" + "#PBS -l walltime=24:0:00\n" + "#PBS -r y \n" + "#PBS -m abe\n" + "#PBS -M [email protected]\n" + "#PBS -W group_list=naps\n" + "#PBS -l pvmem=1900mb\n") path_add = '' if self.spec['converge'] and self.converged: path_add = '.conv' if self.job == 'prep': path = os.path.join(s_name(self.structure) + path_add, option_prep_name) abs_path = os.path.abspath(path) # create this job job_file = open(name=os.path.join(path, 'job'), mode='w') job_file.write(header) job_file.write("#PBS -l select=%s:ncpus=1:vmem=1900mb:mpiprocs=1:ompthreads=1\n" % str(npar)) job_file.write('#PBS -o %s/queue.qout\n#PBS -e %s/queue.qerr\ncd %s\n' % (abs_path, abs_path, abs_path)) job_file.write('mpirun -n %s vasp \n' % str(npar)) job_file.write('cp OUTCAR OUTCAR.sc \n') job_file.write('cp INCAR.DIAG INCAR \n') job_file.write('mpirun -n %s vasp \n' % str(npar)) job_file.write('cp OUTCAR OUTCAR.diag \n') job_file.close() os.chmod(os.path.join(path, 'job'), stat.S_IRWXU) if add_to_collection: job_file = open("job_collection", mode='a') job_file.write('cd ' + path + ' \n') job_file.write('qsub job \n') job_file.write('cd ../.. \n') job_file.close() os.chmod("job_collection", stat.S_IRWXU) if self.job in ['G0W0', 'GW0', 'scGW0']: path = os.path.join(s_name(self.structure) + path_add, option_prep_name, self.job + option_name) abs_path = os.path.abspath(path) # create this job job_file = open(name=path+'/job', mode='w') job_file.write(header) job_file.write("#PBS -l select=%s:ncpus=1:vmem=1000mb:mpiprocs=1:ompthreads=1\n" % str(npar)) job_file.write('#PBS -o %s/queue.qout\n#PBS -e %s/queue.qerr\ncd %s\n' % (abs_path, abs_path, abs_path)) job_file.write('cp ../CHGCAR ../WAVECAR ../WAVEDER . \n') job_file.write('mpirun -n %s vasp \n' % str(npar)) job_file.write('rm W* \n') #job_file.write('workon pymatgen-GW; get_gap > gap; deactivate') #job_file.write('echo '+path+'`get_gap` >> ../../gaps.dat') job_file.close() os.chmod(path+'/job', stat.S_IRWXU) path = os.path.join(s_name(self.structure) + path_add, option_prep_name) # 'append submission of this job script to that of prep for this structure' if add_to_collection: job_file = open(name=os.path.join(path, 'job'), mode='a') job_file.write('cd ' + self.job + option_name + ' \n') job_file.write('qsub job \n') job_file.write('cd .. \n') job_file.close()
def insert_in_database(self, structure, clean_on_ok=False, db_name="GW_results", collection="general"): """ insert the convergence data and the 'sigres' in a database """ data = GWConvergenceData(spec=self, structure=structure) success = data.read_conv_res_from_file( os.path.join(s_name(structure) + ".res", s_name(structure) + ".conv_res") ) con_dat = self.code_interface.read_convergence_data(s_name(structure) + ".res") try: f = open("extra_abivars", mode="r") extra = ast.literal_eval(f.read()) f.close() except (OSError, IOError): extra = None ps = self.code_interface.read_ps_dir() results_file = os.path.join(s_name(structure) + ".res", self.code_interface.gw_data_file) data_file = os.path.join(s_name(structure) + ".res", s_name(structure) + ".data") if success and con_dat is not None: query = { "system": s_name(structure), "item": structure.item, "spec_hash": hash(self), "extra_vars_hash": hash(None) if extra is None else hash(frozenset(extra.items())), "ps": ps, } print("query:", query) entry = copy.deepcopy(query) entry.update( { "conv_res": data.conv_res, "spec": self.to_dict(), "extra_vars": extra, "structure": structure.as_dict(), "gw_results": con_dat, "results_file": results_file, "data_file": data_file, } ) # generic section that should go into the base class like # insert_in_database(query, entry, db_name, collection, server="marilyn.pcpm.ucl.ac.be") local_serv = pymongo.Connection("marilyn.pcpm.ucl.ac.be") try: user = os.environ["MAR_USER"] except KeyError: user = raw_input("DataBase user name: ") try: pwd = os.environ["MAR_PAS"] except KeyError: pwd = raw_input("DataBase pwd: ") db = local_serv[db_name] db.authenticate(user, pwd) col = db[collection] print(col) gfs = gridfs.GridFS(db) count = col.find(query).count() if count == 0: try: with open(entry["results_file"], "r") as f: entry["results_file"] = gfs.put(f.read()) except IOError: print(entry["results_file"], "not found") try: with open(entry["data_file"], "r") as f: entry["data_file"] = gfs.put(f.read()) except IOError: print(entry["data_file"], "not found") col.insert(entry) print("inserted", s_name(structure)) elif count == 1: new_entry = col.find_one(query) try: print("removing file ", new_entry["results_file"], "from db") gfs.remove(new_entry["results_file"]) except: print("remove failed") try: print("removing file ", new_entry["data_file"], "from db") gfs.remove(new_entry["data_file"]) except: print("remove failed") new_entry.update(entry) print("adding", new_entry["results_file"], new_entry["data_file"]) try: with open(new_entry["results_file"], "r") as f: new_entry["results_file"] = gfs.put(f) except IOError: print(new_entry["results_file"], "not found") try: with open(new_entry["data_file"], "r") as f: new_entry["data_file"] = gfs.put(f) except IOError: print(new_entry["data_file"], "not found") print("as ", new_entry["results_file"], new_entry["data_file"]) col.save(new_entry) print("updated", s_name(structure)) else: print("duplicate entry ... ") local_serv.disconnect()
def create_job_script(self, add_to_collection=True, mode='pbspro'): if mode == 'slurm': """ Create job script for ceci. """ npar = GWscDFTPrepVaspInputSet( self.structure, self.spec, functional=self.spec['functional']).get_npar(self.structure) if self.option is not None: option_prep_name = str(self.option['test_prep']) + str( self.option['value_prep']) if 'test' in self.option.keys(): option_name = str('.') + str(self.option['test']) + str( self.option['value']) else: option_prep_name = option_name = '' # npar = int(os.environ['NPARGWCALC']) header = ("#!/bin/bash \n" "## standard header for Ceci clusters ## \n" "#SBATCH [email protected] \n" "#SBATCH --mail-type=ALL\n" "#SBATCH --time=2-24:0:0 \n" "#SBATCH --cpus-per-task=1 \n" "#SBATCH --mem-per-cpu=4000 \n") path_add = '' if self.spec['converge'] and self.converged: path_add = '.conv' if self.job == 'prep': path = os.path.join( s_name(self.structure) + path_add, option_prep_name) # create this job job_file = open(name=os.path.join(path, 'job'), mode='w') job_file.write(header) job_file.write('#SBATCH --job-name=' + s_name(self.structure) + self.job + '\n') job_file.write('#SBATCH --ntasks=' + str(npar) + '\n') job_file.write('module load vasp \n') job_file.write('mpirun vasp \n') job_file.write('cp OUTCAR OUTCAR.sc \n') job_file.write('cp INCAR.DIAG INCAR \n') job_file.write('mpirun vasp \n') job_file.write('cp OUTCAR OUTCAR.diag \n') job_file.close() os.chmod(os.path.join(path, 'job'), stat.S_IRWXU) if add_to_collection: job_file = open("job_collection", mode='a') job_file.write('cd ' + path + ' \n') job_file.write('sbatch job \n') job_file.write('cd .. \n') job_file.close() os.chmod("job_collection", stat.S_IRWXU) if self.job in ['G0W0', 'GW0', 'scGW0']: path = os.path.join( s_name(self.structure) + path_add, option_prep_name, self.job + option_name) # create this job job_file = open(name=path + '/job', mode='w') job_file.write(header) job_file.write('#SBATCH --job-name=' + s_name(self.structure) + self.job + '\n') job_file.write('#SBATCH --ntasks=' + str(npar) + '\n') job_file.write('module load vasp/5.2_par_wannier90 \n') job_file.write('cp ../CHGCAR ../WAVECAR ../WAVEDER . \n') job_file.write('mpirun vasp \n') job_file.write('rm W* \n') #job_file.write('workon pymatgen-GW; get_gap > gap; deactivate') #job_file.write('echo '+path+'`get_gap` >> ../../gaps.dat') job_file.close() os.chmod(path + '/job', stat.S_IRWXU) path = os.path.join( s_name(self.structure) + path_add, option_prep_name) # 'append submission of this job script to that of prep for this structure' if add_to_collection: job_file = open(name=os.path.join(path, 'job'), mode='a') job_file.write('cd ' + self.job + option_name + ' \n') job_file.write('sbatch job \n') job_file.write('cd .. \n') job_file.close() elif mode == 'pbspro': """ Create job script for pbse pro Zenobe. """ npar = GWscDFTPrepVaspInputSet( self.structure, self.spec, functional=self.spec['functional']).get_npar(self.structure) #npar = 96 if self.option is not None: option_prep_name = str(self.option['test_prep']) + str( self.option['value_prep']) if 'test' in self.option.keys(): option_name = str('.') + str(self.option['test']) + str( self.option['value']) else: option_prep_name = option_name = '' # npar = int(os.environ['NPARGWCALC']) header = str("#!/bin/bash \n" + "## standard header for zenobe ## \n" + "#!/bin/bash \n" + "#PBS -q main\n" + "#PBS -l walltime=24:0:00\n" + "#PBS -r y \n" + "#PBS -m abe\n" + "#PBS -M [email protected]\n" + "#PBS -W group_list=naps\n" + "#PBS -l pvmem=1900mb\n") path_add = '' if self.spec['converge'] and self.converged: path_add = '.conv' if self.job == 'prep': path = os.path.join( s_name(self.structure) + path_add, option_prep_name) abs_path = os.path.abspath(path) # create this job job_file = open(name=os.path.join(path, 'job'), mode='w') job_file.write(header) job_file.write( "#PBS -l select=%s:ncpus=1:vmem=1900mb:mpiprocs=1:ompthreads=1\n" % str(npar)) job_file.write( '#PBS -o %s/queue.qout\n#PBS -e %s/queue.qerr\ncd %s\n' % (abs_path, abs_path, abs_path)) job_file.write('mpirun -n %s vasp \n' % str(npar)) job_file.write('cp OUTCAR OUTCAR.sc \n') job_file.write('cp INCAR.DIAG INCAR \n') job_file.write('mpirun -n %s vasp \n' % str(npar)) job_file.write('cp OUTCAR OUTCAR.diag \n') job_file.close() os.chmod(os.path.join(path, 'job'), stat.S_IRWXU) if add_to_collection: job_file = open("job_collection", mode='a') job_file.write('cd ' + path + ' \n') job_file.write('qsub job \n') job_file.write('cd ../.. \n') job_file.close() os.chmod("job_collection", stat.S_IRWXU) if self.job in ['G0W0', 'GW0', 'scGW0']: path = os.path.join( s_name(self.structure) + path_add, option_prep_name, self.job + option_name) abs_path = os.path.abspath(path) # create this job job_file = open(name=path + '/job', mode='w') job_file.write(header) job_file.write( "#PBS -l select=%s:ncpus=1:vmem=1000mb:mpiprocs=1:ompthreads=1\n" % str(npar)) job_file.write( '#PBS -o %s/queue.qout\n#PBS -e %s/queue.qerr\ncd %s\n' % (abs_path, abs_path, abs_path)) job_file.write('cp ../CHGCAR ../WAVECAR ../WAVEDER . \n') job_file.write('mpirun -n %s vasp \n' % str(npar)) job_file.write('rm W* \n') #job_file.write('workon pymatgen-GW; get_gap > gap; deactivate') #job_file.write('echo '+path+'`get_gap` >> ../../gaps.dat') job_file.close() os.chmod(path + '/job', stat.S_IRWXU) path = os.path.join( s_name(self.structure) + path_add, option_prep_name) # 'append submission of this job script to that of prep for this structure' if add_to_collection: job_file = open(name=os.path.join(path, 'job'), mode='a') job_file.write('cd ' + self.job + option_name + ' \n') job_file.write('qsub job \n') job_file.write('cd .. \n') job_file.close()
def loop_structures(self, mode="i"): """ reading the structures specified in spec, add special points, and excecute the specs mode: i: loop structures for input generation o: loop structures for output parsing w: print all results """ print("loop structures mode ", mode) mp_key = os.environ["MP_KEY"] mp_list_vasp = [ "mp-149", "mp-2534", "mp-8062", "mp-2469", "mp-1550", "mp-830", "mp-1986", "mp-10695", "mp-66", "mp-1639", "mp-1265", "mp-1138", "mp-23155", "mp-111", ] if self.data["source"] == "mp-vasp": items_list = mp_list_vasp elif self.data["source"] in ["poscar", "cif"]: files = os.listdir(".") items_list = files elif self.data["source"] == "mar_exp": items_list = [] local_serv = pymongo.Connection("marilyn.pcpm.ucl.ac.be") local_db_gaps = local_serv.band_gaps pwd = os.environ["MAR_PAS"] local_db_gaps.authenticate("setten", pwd) for c in local_db_gaps.exp.find(): name = ( Structure.from_dict(c["icsd_data"]["structure"]).composition.reduced_formula, c["icsd_id"], c["MP_id"], ) print(name) # Structure.from_dict(c['icsd_data']['structure']).to(fmt='cif',filename=name) items_list.append({"name": "mp-" + c["MP_id"], "icsd": c["icsd_id"], "mp": c["MP_id"]}) else: items_list = [line.strip() for line in open(self.data["source"])] for item in items_list: print("\n") # special case, this should be encaptulated if self.data["source"] == "mar_exp": print("structure from marilyn", item["name"], item["icsd"], item["mp"]) exp = local_db_gaps.exp.find({"MP_id": item["mp"]})[0] structure = Structure.from_dict(exp["icsd_data"]["structure"]) structure = refine_structure(structure) structure.to(fmt="cif", filename=item["name"]) try: kpts = local_db_gaps.GGA_BS.find({"transformations.history.0.id": item["icsd"]})[0]["calculations"][ -1 ]["band_structure"]["kpoints"] except (IndexError, KeyError): kpts = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] structure.kpts = kpts print("kpoints:", structure.kpts[0], structure.kpts[1]) structure.item = item["name"] else: if item.startswith("POSCAR_"): structure = pmg.read_structure(item) comment = Poscar.from_file(item).comment # print comment if comment.startswith("gap"): structure.vbm_l = comment.split(" ")[1] structure.vbm = (comment.split(" ")[2], comment.split(" ")[3], comment.split(" ")[4]) structure.cbm_l = comment.split(" ")[5] structure.cbm = (comment.split(" ")[6], comment.split(" ")[7], comment.split(" ")[8]) else: # print "no bandstructure information available, adding GG as 'gap'" structure = add_gg_gap(structure) elif "xyz" in item: structure = pmg.read_structure(item) raise NotImplementedError elif item.startswith("mp-"): with MPRester(mp_key) as mp_database: print("structure from mp database", item) structure = mp_database.get_structure_by_material_id(item, final=True) try: bandstructure = mp_database.get_bandstructure_by_material_id(item) structure.vbm_l = bandstructure.kpoints[bandstructure.get_vbm()["kpoint_index"][0]].label structure.cbm_l = bandstructure.kpoints[bandstructure.get_cbm()["kpoint_index"][0]].label structure.cbm = tuple( bandstructure.kpoints[bandstructure.get_cbm()["kpoint_index"][0]].frac_coords ) structure.vbm = tuple( bandstructure.kpoints[bandstructure.get_vbm()["kpoint_index"][0]].frac_coords ) except (MPRestError, IndexError, KeyError) as err: print(err.message) structure = add_gg_gap(structure) else: continue structure.kpts = [list(structure.cbm), list(structure.vbm)] structure.item = item print(item, s_name(structure)) if mode == "i": try: self.excecute_flow(structure) except Exception as exc: print("input generation failed") print(exc) elif mode == "w": try: self.print_results(structure) except: print("writing output failed") elif mode == "s": try: self.insert_in_database(structure) except: print("database insertion failed") elif mode == "o": # if os.path.isdir(s_name(structure)) or os.path.isdir(s_name(structure)+'.conv'): try: self.process_data(structure) except: print("output parsing failed") if "ceci" in self.data["mode"] and mode == "i": os.chmod("job_collection", stat.S_IRWXU)