def test_file_usage(fixture_sandbox, aiida_localhost, generate_calc_job): """Test a base template that uses two files.""" file1_node = orm.SinglefileData(io.BytesIO(b'Content of file 1')) file2_node = orm.SinglefileData(io.BytesIO(b'Content of file 2')) # Check that the files are correctly copied to the copy list entry_point_name = 'templatereplacer' inputs = { 'code': orm.Code(remote_computer_exec=(aiida_localhost, '/bin/bash')), 'metadata': { 'options': { 'resources': { 'num_machines': 1, 'tot_num_mpiprocs': 1 } } }, 'template': orm.Dict(dict={ 'files_to_copy': [('filenode1', 'file1.txt'), ('filenode2', 'file2.txt')], }), 'files': { 'filenode1': file1_node, 'filenode2': file2_node } } calc_info = generate_calc_job(fixture_sandbox, entry_point_name, inputs) reference_copy_list = [] for node_idname, target_path in inputs['template']['files_to_copy']: file_node = inputs['files'][node_idname] reference_copy_list.append((file_node.uuid, file_node.filename, target_path)) assert sorted(calc_info.local_copy_list) == sorted(reference_copy_list)
def get_file_section(): """Provide necessary parameter files such as pseudopotientials, basis sets, etc.""" with open(pathlib.Path(__file__).parent / 'BASIS_MOLOPT', 'rb') as handle: basis = orm.SinglefileData(file=handle) with open(pathlib.Path(__file__).parent / 'BASIS_MOLOPT_UCL', 'rb') as handle: basis_ucl = orm.SinglefileData(file=handle) with open(pathlib.Path(__file__).parent / 'GTH_POTENTIALS', 'rb') as handle: potential = orm.SinglefileData(file=handle) with open(pathlib.Path(__file__).parent / 'dftd3.dat', 'rb') as handle: dftd3_params = orm.SinglefileData(file=handle) with open(pathlib.Path(__file__).parent / 'xTB_parameters', 'rb') as handle: xtb_params = orm.SinglefileData(file=handle) return { 'basis': basis, 'basis_ucl': basis_ucl, 'potential': potential, 'dftd3_params': dftd3_params, 'xtb_dat': xtb_params, }
def test_provenance_exclude_list(self): """Test the functionality of the `CalcInfo.provenance_exclude_list` attribute.""" import tempfile code = orm.Code(input_plugin_name='arithmetic.add', remote_computer_exec=[self.computer, '/bin/true']).store() with tempfile.NamedTemporaryFile('w+') as handle: handle.write('dummy_content') handle.flush() file_one = orm.SinglefileData(file=handle.name) with tempfile.NamedTemporaryFile('w+') as handle: handle.write('dummy_content') handle.flush() file_two = orm.SinglefileData(file=handle.name) inputs = { 'code': code, 'files': { # Note the `FileCalcJob` will turn underscores in the key into forward slashes making a nested hierarchy 'base_a_sub_one': file_one, 'base_b_two': file_two, }, 'settings': orm.Dict(dict={'provenance_exclude_list': ['base/a/sub/one']}), 'metadata': { 'dry_run': True, 'options': { 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 } } } } # We perform a `dry_run` because the calculation cannot actually run, however, the contents will still be # written to the node's repository so we can check it contains the expected contents. _, node = launch.run_get_node(FileCalcJob, **inputs) self.assertIn('folder', node.dry_run_info) # Verify that the folder (representing the node's repository) indeed do not contain the input files. Note, # however, that the directory hierarchy should be there, albeit empty self.assertIn('base', node.list_object_names()) self.assertEqual(sorted(['b']), sorted(node.list_object_names(os.path.join('base')))) self.assertEqual(['two'], node.list_object_names(os.path.join('base', 'b')))
def generate_inputs(): """Minimal input for pw2wannier90 calculations.""" basepath = os.path.dirname(os.path.abspath(__file__)) nnkp_filepath = os.path.join(basepath, 'fixtures', 'pw2wannier90', 'inputs', 'aiida.nnkp') parameters = { 'inputpp': { 'write_amn': False, 'write_mmn': False, 'write_unk': False, 'scdm_proj': True, 'scdm_entanglement': 'isolated', } } settings = {'ADDITIONAL_RETRIEVE_LIST': ['*.amn', '*.mmn', '*.eig']} # Since we don't actually run pw2wannier.x, we only pretend to have the output folder # of a parent pw.x calculation. The nnkp file, instead, is real. inputs = { 'parent_folder': orm.FolderData().store(), 'nnkp_file': orm.SinglefileData(file=nnkp_filepath).store(), 'parameters': orm.Dict(dict=parameters), 'settings': orm.Dict(dict=settings), } return AttributeDict(inputs)
def inner(): inputs = get_top_level_insb_inputs() inputs['fp_run_workflow'] = QuantumEspressoFirstPrinciplesRun inputs['fp_run'] = get_qe_specific_fp_run_inputs() inputs['code_tbmodels'] = orm.Code.get_from_string('tbmodels') inputs['model_evaluation_workflow'] = BandDifferenceModelEvaluation inputs['model_evaluation'] = { 'code_bands_inspect': orm.Code.get_from_string('bands_inspect') } inputs['wannier_parameters'] = orm.Dict(dict=dict( num_wann=14, num_bands=36, dis_num_iter=1000, num_iter=0, spinors=True, )) inputs['wannier_projections'] = orm.List( list=['In : s; px; py; pz', 'Sb : px; py; pz']) inputs['wannier'] = dict(code=code_wannier90, metadata=get_metadata_singlecore()) inputs['symmetries'] = orm.SinglefileData( file=str((test_data_dir / 'symmetries.hdf5').resolve())) inputs['slice_reference_bands'] = orm.List(list=list(range(12, 26))) inputs['slice_tb_model'] = orm.List( list=[0, 2, 3, 1, 5, 6, 4, 7, 9, 10, 8, 12, 13, 11]) return inputs
def test_strains(configure_with_daemon, strain_inputs, sample): """ Basic test for the ApplyStrainsWithSymmetries workchain. """ from aiida.engine import run from aiida import orm from aiida_strain import ApplyStrainsWithSymmetry inputs = strain_inputs strain_list = inputs['strain_strengths'].get_attribute('list') result = run(ApplyStrainsWithSymmetry, symmetries=orm.SinglefileData(file=sample('symmetries.hdf5')), symmetry_repr_code=orm.Code.get_from_string('symmetry_repr'), **inputs) for strain_val in strain_list: structure_key = ('structure_{}'.format(strain_val).replace( '.', '_dot_').replace('-', 'm_')) assert structure_key in result assert isinstance(result[structure_key], orm.StructureData) symmetries_key = ('symmetries_{}'.format(strain_val).replace( '.', '_dot_').replace('-', 'm_')) assert symmetries_key in result assert isinstance(result[symmetries_key], orm.SinglefileData) for key in result: assert '__' not in key key_unescaped = key.replace('_dot_', '.').replace('_m_', '_-') assert len(key_unescaped.split('_')) <= 2
def test_calcjob_dry_run_no_provenance(self): """Test that dry run with `store_provenance=False` still works for unstored inputs. The special thing about this test is that the unstored input nodes that will be used in the `local_copy_list`. This was broken as the code in `upload_calculation` assumed that the nodes could be loaded through their UUID which is not the case in the `store_provenance=False` mode with unstored nodes. Note that it also explicitly tests nested namespaces as that is a non-trivial case. """ import os import tempfile code = orm.Code(input_plugin_name='arithmetic.add', remote_computer_exec=[self.computer, '/bin/true']).store() with tempfile.NamedTemporaryFile('w+') as handle: handle.write('dummy_content') handle.flush() single_file = orm.SinglefileData(file=handle.name) file_one = orm.SinglefileData(file=handle.name) file_two = orm.SinglefileData(file=handle.name) inputs = { 'code': code, 'single_file': single_file, 'files': { 'file_one': file_one, 'file_two': file_two, }, 'metadata': { 'dry_run': True, 'store_provenance': False, 'options': { 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 } } } } _, node = launch.run_get_node(FileCalcJob, **inputs) self.assertIn('folder', node.dry_run_info) for filename in ['single_file', 'file_one', 'file_two']: self.assertIn(filename, os.listdir(node.dry_run_info['folder']))
def test_content(self): """Test that `verdi data singlefile content` returns the content of the file.""" content = 'abc\ncde' singlefile = orm.SinglefileData(file=io.BytesIO(content.encode('utf8'))).store() options = [str(singlefile.uuid)] result = self.cli_runner.invoke(cmd_singlefile.singlefile_content, options, catch_exceptions=False) for line in result.output.split('\n'): self.assertIn(line, content)
def _generate_diff_inputs(): with open(datadir / 'file1.txt', 'rb') as f1_obj: file1 = orm.SinglefileData(file=f1_obj) with open(datadir / 'file2.txt', 'rb') as f2_obj: file2 = orm.SinglefileData(file=f2_obj) inputs = { "file1": file1, "file2": file2, "metadata": { "options": { "withmpi": False, "resources": { "num_machines": 1, "num_mpiprocs_per_machine": 1 } } }, "parameters": DataFactory("diff")(dict={ "ignore-case": False }) } return inputs
def band_difference_builder(configure, shared_datadir): # pylint: disable=unused-argument """ Create inputs for the band difference workflow. """ from aiida_tbextraction.model_evaluation import BandDifferenceModelEvaluation from aiida_bands_inspect.io import read_bands builder = BandDifferenceModelEvaluation.get_builder() builder.code_tbmodels = orm.Code.get_from_string('tbmodels') builder.code_bands_inspect = orm.Code.get_from_string('bands_inspect') with (shared_datadir / 'silicon/model.hdf5').open('rb') as model_file: builder.tb_model = orm.SinglefileData(file=model_file) builder.reference_bands = read_bands(shared_datadir / 'silicon/bands.hdf5') return builder
def create_builder(): builder = OptimizeFirstPrinciplesTightBinding.get_builder() # Add the input structure builder.structure = orm.StructureData() builder.structure.set_ase(read_vasp('inputs/POSCAR')) # Specify that QuantumEspressoFirstPrinciplesRun should be used to run the first-principles calculations builder.fp_run_workflow = QuantumEspressoFirstPrinciplesRun # Set the inputs for the QuantumEspressoFirstPrinciplesRun workflow common_qe_parameters = orm.Dict( dict=dict( CONTROL=dict(etot_conv_thr=1e-3), SYSTEM=dict(noncolin=True, lspinorb=True, nbnd=36, ecutwfc=30), ) ) pseudo_dir = pathlib.Path(__file__ ).parent.absolute() / 'inputs' / 'pseudos' pseudos = { 'In': orm.UpfData( file=str(pseudo_dir / 'In.rel-pbe-dn-kjpaw_psl.1.0.0.UPF') ), 'Sb': orm.UpfData(file=str(pseudo_dir / 'Sb.rel-pbe-n-kjpaw_psl.1.0.0.UPF')) } # We use the same general Quantum ESPRESSO parameters for the # scf, nscf, and bands calculations. The calculation type and # k-points will be set by the workflow. repeated_pw_inputs = { 'pseudos': pseudos, 'parameters': common_qe_parameters, 'metadata': METADATA_PW, 'code': CODE_PW } builder.fp_run = { 'scf': repeated_pw_inputs, 'bands': { 'pw': repeated_pw_inputs }, 'to_wannier': { 'nscf': repeated_pw_inputs, 'pw2wannier': { 'code': CODE_PW2WANNIER, 'metadata': METADATA_PW2WANNIER }, 'wannier': { 'code': CODE_WANNIER, 'metadata': METADATA_WANNIER } } } # Setting the k-points for the reference bandstructure kpoints_list = [] kvals = np.linspace(0, 0.5, 20, endpoint=False) kvals_rev = np.linspace(0.5, 0, 20, endpoint=False) for k in kvals_rev: kpoints_list.append((k, k, 0)) # Z to Gamma for k in kvals: kpoints_list.append((0, k, k)) # Gamma to X for k in kvals: kpoints_list.append((k, 0.5, 0.5)) # X to L for k in kvals_rev: kpoints_list.append((k, k, k)) # L to Gamma for k in np.linspace(0, 0.375, 21, endpoint=True): kpoints_list.append((k, k, 2 * k)) # Gamma to K builder.kpoints = orm.KpointsData() builder.kpoints.set_kpoints( kpoints_list, labels=[(i * 20, label) for i, label in enumerate(['Z', 'G', 'X', 'L', 'G', 'K'])] ) # Setting the k-points mesh used to run the SCF and Wannier calculations builder.kpoints_mesh = orm.KpointsData() builder.kpoints_mesh.set_kpoints_mesh([6, 6, 6]) builder.wannier.code = CODE_WANNIER builder.code_tbmodels = orm.Code.get_from_string('tbmodels') # Setting the workflow to evaluate the tight-binding models builder.model_evaluation_workflow = BandDifferenceModelEvaluation # Setting the additional inputs for the model evaluation workflow builder.model_evaluation = dict( code_bands_inspect=orm.Code.get_from_string('bands_inspect') ) # Set the initial energy window value builder.initial_window = orm.List(list=[-1, 3, 10, 18]) # Tolerance for the energy window. builder.window_tol = orm.Float(1.5) # Tolerance for the 'cost_value'. builder.cost_tol = orm.Float(0.3) # The tolerances are set higher than might be appropriate for a 'production' # run to make the example run more quickly. # Setting the parameters for Wannier90 builder.wannier_parameters = orm.Dict( dict=dict( num_wann=14, num_bands=36, dis_num_iter=100, num_iter=0, spinors=True, # exclude_bands=range(1, ) ) ) # Choose the Wannier90 trial orbitals builder.wannier_projections = orm.List( list=['In : s; pz; px; py', 'Sb : pz; px; py'] ) # Set the resource requirements for the Wannier90 run builder.wannier.metadata = METADATA_WANNIER # Set the symmetry file builder.symmetries = orm.SinglefileData( file=os.path.abspath('inputs/symmetries.hdf5') ) # Pick the relevant bands from the reference calculation builder.slice_reference_bands = orm.List(list=list(range(12, 26))) return builder
def window_search_builder(test_data_dir, code_wannier90): # pylint: disable=too-many-locals,useless-suppression """ Sets up the process builder for window_search tests, and adds the inputs. """ builder = WindowSearch.get_builder() input_folder = orm.FolderData() input_folder_path = test_data_dir / 'wannier_input_folder' for filename in os.listdir(input_folder_path): input_folder.put_object_from_file( path=str((input_folder_path / filename).resolve()), key=filename ) builder.wannier.local_input_folder = input_folder builder.wannier.code = code_wannier90 builder.code_tbmodels = orm.Code.get_from_string('tbmodels') builder.model_evaluation_workflow = BandDifferenceModelEvaluation # print(builder.model_evaluation.dynamic) builder.model_evaluation = { 'code_bands_inspect': orm.Code.get_from_string('bands_inspect'), } builder.reference_bands = read(test_data_dir / 'bands.hdf5') initial_window = orm.List() initial_window.extend([-4.5, -4, 6.5, 16]) builder.initial_window = initial_window builder.window_tol = orm.Float(1.5) a = 3.2395 # pylint: disable=invalid-name structure = orm.StructureData() structure.set_pymatgen_structure( pymatgen.Structure( lattice=[[0, a, a], [a, 0, a], [a, a, 0]], species=['In', 'Sb'], coords=[[0] * 3, [0.25] * 3] ) ) builder.structure = structure wannier_parameters = orm.Dict( dict=dict( num_wann=14, num_bands=36, dis_num_iter=1000, num_iter=0, spinors=True, mp_grid=[6, 6, 6], ) ) builder.wannier.parameters = wannier_parameters builder.wannier.metadata.options = { 'resources': { 'num_machines': 1, 'tot_num_mpiprocs': 1 }, 'withmpi': False } builder.symmetries = orm.SinglefileData( file=str((test_data_dir / 'symmetries.hdf5').resolve()) ) slice_idx = orm.List() slice_idx.extend([0, 2, 3, 1, 5, 6, 4, 7, 9, 10, 8, 12, 13, 11]) builder.slice_idx = slice_idx k_values = [ x if x <= 0.5 else -1 + x for x in np.linspace(0, 1, 6, endpoint=False) ] k_points = [ list(reversed(k)) for k in itertools.product(k_values, repeat=3) ] wannier_bands = orm.BandsData() wannier_bands.set_kpoints(k_points) # Just let every energy window be valid. wannier_bands.set_bands(np.array([[0] * 14] * len(k_points))) builder.wannier_bands = wannier_bands return builder
def inner(window_values, slice_, symmetries): builder = RunWindow.get_builder() input_folder = orm.FolderData() input_folder_path = test_data_dir / 'wannier_input_folder' for filename in os.listdir(input_folder_path): input_folder.put_object_from_file(path=str( (input_folder_path / filename).resolve()), key=filename) builder.wannier.local_input_folder = input_folder builder.wannier.code = code_wannier90 builder.code_tbmodels = orm.Code.get_from_string('tbmodels') builder.model_evaluation_workflow = BandDifferenceModelEvaluation builder.reference_bands = read(test_data_dir / 'bands.hdf5') builder.model_evaluation = { 'code_bands_inspect': orm.Code.get_from_string('bands_inspect'), } window = orm.List(list=window_values) builder.window = window k_values = [ x if x <= 0.5 else -1 + x for x in np.linspace(0, 1, 6, endpoint=False) ] k_points = [ list(reversed(k)) for k in itertools.product(k_values, repeat=3) ] wannier_kpoints = orm.KpointsData() wannier_kpoints.set_kpoints(k_points) builder.wannier.kpoints = wannier_kpoints wannier_bands = orm.BandsData() wannier_bands.set_kpoints(k_points) # Just let every energy window be valid. wannier_bands.set_bands( np.array([[-20] * 10 + [-0.5] * 7 + [0.5] * 7 + [20] * 12] * len(k_points))) builder.wannier_bands = wannier_bands a = 3.2395 # pylint: disable=invalid-name structure = orm.StructureData() structure.set_pymatgen_structure( pymatgen.Structure(lattice=[[0, a, a], [a, 0, a], [a, a, 0]], species=['In', 'Sb'], coords=[[0] * 3, [0.25] * 3])) builder.structure = structure wannier_parameters = orm.Dict(dict=dict( num_wann=14, num_bands=36, dis_num_iter=1000, num_iter=0, spinors=True, mp_grid=[6, 6, 6], )) builder.wannier.parameters = wannier_parameters builder.wannier.metadata.options = { 'resources': { 'num_machines': 1, 'tot_num_mpiprocs': 1 }, 'withmpi': False } if symmetries: builder.symmetries = orm.SinglefileData( file=str(test_data_dir / 'symmetries.hdf5')) if slice_: slice_idx = orm.List() slice_idx.extend([0, 2, 3, 1, 5, 6, 4, 7, 9, 10, 8, 12, 13, 11]) builder.slice_idx = slice_idx return builder
def test_tbextraction(configure_with_daemon, test_data_dir, slice_, symmetries, code_wannier90): # pylint: disable=unused-argument """ Run the tight-binding calculation workflow, optionally including symmetrization and slicing of orbitals. """ builder = TightBindingCalculation.get_builder() wannier_input_folder = orm.FolderData() wannier_input_folder_path = test_data_dir / 'wannier_input_folder' for filename in os.listdir(wannier_input_folder_path): wannier_input_folder.put_object_from_file(path=str( (wannier_input_folder_path / filename).resolve()), key=filename) builder.wannier.local_input_folder = wannier_input_folder builder.wannier.code = code_wannier90 builder.code_tbmodels = orm.Code.get_from_string('tbmodels') k_values = [ x if x <= 0.5 else -1 + x for x in np.linspace(0, 1, 6, endpoint=False) ] k_points = [ list(reversed(k)) for k in itertools.product(k_values, repeat=3) ] wannier_kpoints = orm.KpointsData() wannier_kpoints.set_kpoints(k_points) builder.wannier.kpoints = wannier_kpoints a = 3.2395 # pylint: disable=invalid-name structure = orm.StructureData() structure.set_pymatgen_structure( pymatgen.Structure(lattice=[[0, a, a], [a, 0, a], [a, a, 0]], species=['In', 'Sb'], coords=[[0] * 3, [0.25] * 3])) builder.structure = structure builder.wannier.parameters = orm.Dict(dict=dict(num_wann=14, num_bands=36, dis_num_iter=1000, num_iter=0, dis_win_min=-4.5, dis_win_max=16., dis_froz_min=-4, dis_froz_max=6.5, spinors=True, mp_grid=[6, 6, 6])) builder.wannier.metadata.options = { 'resources': { 'num_machines': 1, 'tot_num_mpiprocs': 1 }, 'withmpi': False } if symmetries: builder.symmetries = orm.SinglefileData(file=str(test_data_dir / 'symmetries.hdf5')) if slice_: slice_idx = orm.List() slice_idx.extend([0, 2, 3, 1, 5, 6, 4, 7, 9, 10, 8, 12, 13, 11]) builder.slice_idx = slice_idx result, node = run_get_node(builder) assert node.is_finished_ok assert 'tb_model' in result
#!/usr/bin/env runaiida # -*- coding: utf-8 -*- # © 2017-2019, ETH Zurich, Institut für Theoretische Physik # Author: Dominik Gresch <*****@*****.**> """ Example applying uniaxial 110 strain on InSb, and filtering the symmetries. """ import sys from os.path import dirname, abspath sys.path.append(dirname(abspath(__file__))) from aiida import orm from aiida.engine.launch import run from aiida_strain import ApplyStrainsWithSymmetry from run_strain import get_strain_input if __name__ == '__main__': print( run(ApplyStrainsWithSymmetry, symmetries=orm.SinglefileData(file=abspath('symmetries.hdf5')), symmetry_repr_code=orm.Code.get_from_string('symmetry-repr'), **get_strain_input()))