예제 #1
0
def test_relax_wc(fresh_aiida_env, vasp_params, potentials, mock_vasp):
    # def test_relax_wc(fresh_aiida_env, vasp_params, potentials, mock_vasp, mock_relax_wc):
    """Test submitting only, not correctness, with mocked vasp code."""
    from aiida.orm import Code
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.relax')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer, store=True)

    structure = PoscarParser(file_path=data_path('test_relax_wc', 'inp', 'POSCAR')).structure
    kpoints = KpointsParser(file_path=data_path('test_relax_wc', 'inp', 'KPOINTS')).kpoints
    parameters = IncarParser(file_path=data_path('test_relax_wc', 'inp', 'INCAR')).incar
    parameters['system'] = 'test-case:test_relax_wc'
    parameters = {'incar': {k: v for k, v in parameters.items() if k not in ['isif', 'ibrion', 'nsw', 'ediffg']}}
    parameters['relax'] = {}
    parameters['relax']['perform'] = True
    parameters['relax']['algo'] = 'cg'
    parameters['relax']['force_cutoff'] = 0.01

    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = structure
    inputs.kpoints = kpoints
    inputs.parameters = get_data_node('dict', dict=parameters)
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'max_wallclock_seconds': 1,
                                       'import_sys_environment': True,
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                   })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    inputs.verbose = get_data_node('bool', True)
    results, node = run.get_node(workchain, **inputs)
    assert node.exit_status == 0
    assert 'relax' in results
    relax = results['relax']
    assert 'structure' in relax
    sites = relax['structure'].sites
    assert sites[0].kind_name == 'Si'
    assert sites[1].kind_name == 'Si'
    np.testing.assert_allclose(sites[0].position, [4.8125, 4.8125, 4.8125])
    np.testing.assert_allclose(sites[1].position, [0.6875, 0.6875, 0.715])
예제 #2
0
    def relax_input_structure(self):
        '''
        Run a relax/vc-relax calculation to find the ground state structure

        NOTE: I would really like to modify this to be flexible (LAMMPS or QE)
        '''
        inputs = AttributeDict(self.exposed_inputs(PwRelaxWorkChain,
                                                   namespace='initial_relax'))
        inputs.structure = self.inputs.structure
        inputs.clean_workdir = self.inputs.clean_workdir

        future = self.submit(PwRelaxWorkChain, **inputs)
        self.report('Launching PwRelaxWorkChain<{}>'.format(future.pk))
        self.to_context(initial_relax_workchain=future)
예제 #3
0
def test_vasp_wc_chgcar(fresh_aiida_env, vasp_params, potentials, vasp_kpoints,
                        vasp_structure, mock_vasp):
    """Test submitting only, not correctness, with mocked vasp code, test fetching of the CHGCAR."""
    from aiida.orm import Code
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.vasp')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer, store=True)

    kpoints, _ = vasp_kpoints
    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = vasp_structure
    inputs.parameters = get_data_node('dict',
                                      dict={'incar': vasp_params.get_dict()})
    inputs.kpoints = kpoints
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                       'max_wallclock_seconds': 3600
                                   })
    inputs.settings = get_data_node('dict',
                                    dict={
                                        'ADDITIONAL_RETRIEVE_LIST': ['CHGCAR'],
                                        'parser_settings': {
                                            'add_chgcar': True
                                        }
                                    })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    inputs.verbose = get_data_node('bool', True)
    results, node = run.get_node(workchain, **inputs)
    assert node.exit_status == 0
    assert 'chgcar' in results
    assert results['chgcar'].get_content() == 'This is a test CHGCAR file.\n'
예제 #4
0
def test_vasp_wc(fresh_aiida_env, vasp_params, potentials, vasp_kpoints,
                 vasp_structure, mock_vasp):
    """Test submitting only, not correctness, with mocked vasp code."""
    from aiida.orm import Code
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.vasp')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer, store=True)

    kpoints, _ = vasp_kpoints
    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = vasp_structure
    inputs.parameters = get_data_node('dict',
                                      dict={'incar': vasp_params.get_dict()})
    inputs.kpoints = kpoints
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                       'max_wallclock_seconds': 3600
                                   })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    inputs.verbose = get_data_node('bool', True)
    results, node = run.get_node(workchain, **inputs)

    assert node.exit_status == 0
    assert 'retrieved' in results
    assert 'misc' in results
    assert 'remote_folder' in results
    misc = results['misc'].get_dict()
    assert misc['maximum_stress'] == 22.8499295
    assert misc['total_energies']['energy_extrapolated'] == -14.16209692
예제 #5
0
    def compute_deformed_structures(self):
        '''
        Run relax workflows for each deformed structure

        NOTE: I would really like to modify this to be flexible (LAMMPS or QE)
        '''
        self.report('Computing deformed structures')
        deformed_structures = self.ctx.deformed_structures

        for key_index in range(len(deformed_structures)):
            inputs = AttributeDict(self.exposed_inputs(PwRelaxWorkChain,
                                                       namespace='elastic_relax'))
            inputs.structure = deformed_structures[key_index]
            inputs.clean_workdir = self.inputs.clean_workdir


            future = self.submit(PwRelaxWorkChain, **inputs)

            deformation_key = 'deformation_{}'.format(key_index)
            self.report('launching PwRelaxWorkChain<{}>'.format(future.pk))
            self.to_context(**{deformation_key: future})
예제 #6
0
def test_converge_wc(fresh_aiida_env, potentials, mock_vasp):
    """Test submitting only, not correctness, with mocked vasp code."""
    from aiida.orm import Code
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.converge')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer, store=True)

    structure = PoscarParser(
        file_path=data_path('test_converge_wc', 'inp', 'POSCAR')).structure
    parameters = IncarParser(
        file_path=data_path('test_converge_wc', 'inp', 'INCAR')).incar
    parameters['system'] = 'test-case:test_converge_wc'
    parameters = {
        k: v
        for k, v in parameters.items()
        if k not in ['isif', 'ibrion', 'encut', 'nsw']
    }

    restart_clean_workdir = get_data_node('bool', False)
    restart_clean_workdir.store()

    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = structure
    inputs.parameters = get_data_node('dict', dict={'incar': parameters})
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                       'max_wallclock_seconds': 3600
                                   })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    relax = AttributeDict()
    converge = AttributeDict()
    converge.relax = get_data_node('bool', False)
    converge.compress = get_data_node('bool', False)
    converge.displace = get_data_node('bool', False)
    converge.pwcutoff_samples = get_data_node('int', 3)
    converge.k_samples = get_data_node('int', 3)
    relax.perform = get_data_node('bool', True)
    inputs.relax = relax
    inputs.converge = converge
    inputs.verbose = get_data_node('bool', True)
    results, node = run.get_node(workchain, **inputs)
    assert node.exit_status == 0
    converge = results['converge']
    assert 'data' in converge

    conv_data = converge['data']
    try:
        conv_data.get_array('pw_regular')
    except KeyError:
        pytest.fail('Did not find pw_regular in converge.data')
    try:
        conv_data.get_array('kpoints_regular')
    except KeyError:
        pytest.fail('Did not find kpoints_regular in converge.data')

    assert 'pwcutoff_recommended' in converge
    try:
        _encut = converge['pwcutoff_recommended'].value
    except AttributeError:
        pytest.fail('pwcutoff_recommended does not have the expected format')
    assert 'kpoints_recommended' in converge
    try:
        _kpoints = converge['kpoints_recommended'].get_kpoints_mesh()
    except AttributeError:
        pytest.fail('kpoints_recommended does not have the expected format')
예제 #7
0
def test_converge_wc_pw(fresh_aiida_env, vasp_params, potentials, mock_vasp):
    """Test convergence workflow using mock code."""
    from aiida.orm import Code
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.converge')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer).store()

    structure = PoscarParser(file_path=data_path('test_converge_wc/pw/200',
                                                 'inp', 'POSCAR')).structure
    parameters = IncarParser(
        file_path=data_path('test_converge_wc/pw/200', 'inp', 'INCAR')).incar
    parameters['system'] = 'test-case:test_converge_wc'
    parameters = {
        k: v
        for k, v in parameters.items()
        if k not in ['isif', 'ibrion', 'encut', 'nsw']
    }
    kpoints = KpointsParser(file_path=data_path('test_converge_wc/pw/200',
                                                'inp', 'KPOINTS')).kpoints

    restart_clean_workdir = get_data_node('bool', False)
    restart_clean_workdir.store()

    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = structure
    inputs.kpoints = kpoints
    inputs.parameters = get_data_node('dict', dict={'incar': parameters})
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                       'max_wallclock_seconds': 3600
                                   })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    relax = AttributeDict()
    converge = AttributeDict()
    relax.perform = get_data_node('bool', False)
    converge.relax = get_data_node('bool', False)
    converge.testing = get_data_node('bool', True)
    converge.compress = get_data_node('bool', False)
    converge.displace = get_data_node('bool', False)
    converge.pwcutoff_samples = get_data_node('int', 3)
    converge.k_samples = get_data_node('int', 3)
    inputs.relax = relax
    inputs.converge = converge
    inputs.verbose = get_data_node('bool', True)
    results, node = run.get_node(workchain, **inputs)
    assert node.exit_status == 0
    assert 'converge' in results
    converge = results['converge']
    assert 'data' in converge
    conv_data = converge['data']
    try:
        conv_data = conv_data.get_array('pw_regular')
    except KeyError:
        pytest.fail('Did not find pw_regular in converge.data')
    conv_data_test = np.array([[200.0, -10.77974998, 0.0, 0.0, 0.5984],
                               [250.0, -10.80762044, 0.0, 0.0, 0.5912],
                               [300.0, -10.82261992, 0.0, 0.0, 0.5876]])
    np.testing.assert_allclose(conv_data, conv_data_test)

    assert 'pwcutoff_recommended' in converge
    try:
        _encut = converge['pwcutoff_recommended'].value
        np.testing.assert_equal(_encut, 300)
    except AttributeError:
        pytest.fail('pwcutoff_recommended does not have the expected format')
예제 #8
0
def test_bands_wc(fresh_aiida_env, potentials, mock_vasp):
    """Test with mocked vasp code."""
    from aiida.orm import Code, Log, RemoteData
    from aiida.plugins import WorkflowFactory
    from aiida.engine import run

    workchain = WorkflowFactory('vasp.bands')

    mock_vasp.store()
    create_authinfo(computer=mock_vasp.computer, store=True)

    structure = PoscarParser(file_path=data_path('test_bands_wc', 'inp', 'POSCAR')).structure
    parameters = IncarParser(file_path=data_path('test_bands_wc', 'inp', 'INCAR')).incar
    parameters['system'] = 'test-case:test_bands_wc'
    # Make sure we replace encut with pwcutoff
    del parameters['encut']
    parameters = {'vasp': parameters}
    parameters['electronic'] = {'pwcutoff': 200}

    inputs = AttributeDict()
    inputs.code = Code.get_from_string('mock-vasp@localhost')
    inputs.structure = structure
    inputs.parameters = get_data_node('dict', dict=parameters)
    inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
    inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
    inputs.options = get_data_node('dict',
                                   dict={
                                       'withmpi': False,
                                       'queue_name': 'None',
                                       'resources': {
                                           'num_machines': 1,
                                           'num_mpiprocs_per_machine': 1
                                       },
                                       'max_wallclock_seconds': 3600
                                   })
    inputs.max_iterations = get_data_node('int', 1)
    inputs.clean_workdir = get_data_node('bool', False)
    inputs.verbose = get_data_node('bool', True)
    # Also set the restart folder as we assume a bands data will start from
    # a previous calculation that is sitting in the restart folder
    inputs.restart_folder = RemoteData(computer=inputs.code.computer, remote_path=data_path('test_bands_wc', 'inp'))
    results, node = run.get_node(workchain, **inputs)
    assert node.exit_status == 0
    assert 'bands' in results
    kpoints = results['bands'].get_kpoints()
    test_array = np.array([[0., 0., 0.], [0.02272727, 0., 0.02272727], [0.04545454, 0., 0.04545454], [0.06818182, 0., 0.06818182],
                           [0.09090909, 0., 0.09090909], [0.11363636, 0., 0.11363636], [0.13636364, 0., 0.13636364],
                           [0.15909091, 0., 0.15909091], [0.18181818, 0., 0.18181818], [0.20454545, 0., 0.20454545],
                           [0.22727273, 0., 0.22727273], [0.25, 0., 0.25], [0.27272727, 0., 0.27272727], [0.29545455, 0., 0.29545455],
                           [0.31818182, 0., 0.31818182], [0.34090909, 0., 0.34090909], [0.36363636, 0., 0.36363636],
                           [0.38636364, 0., 0.38636364], [0.40909091, 0., 0.40909091], [0.43181818, 0., 0.43181818],
                           [0.45454545, 0., 0.45454545], [0.47727273, 0., 0.47727273], [0.5, 0., 0.5], [0.51785714, 0.03571429, 0.51785714],
                           [0.53571429, 0.07142857, 0.53571429], [0.55357143, 0.10714286, 0.55357143], [0.57142857, 0.14285714, 0.57142857],
                           [0.58928571, 0.17857143, 0.58928571], [0.60714286, 0.21428571, 0.60714286], [0.625, 0.25, 0.625],
                           [0.375, 0.375, 0.75], [0.35869565, 0.35869565, 0.7173913], [0.3423913, 0.3423913, 0.68478261],
                           [0.32608696, 0.32608696, 0.65217391], [0.30978261, 0.30978261, 0.61956522], [0.29347826, 0.29347826, 0.58695652],
                           [0.27717391, 0.27717391, 0.55434783], [0.26086957, 0.26086957, 0.52173913], [0.24456522, 0.24456522, 0.48913043],
                           [0.22826087, 0.22826087, 0.45652174], [0.21195652, 0.21195652, 0.42391304], [0.19565217, 0.19565217, 0.39130435],
                           [0.17934783, 0.17934783, 0.35869565], [0.16304348, 0.16304348, 0.32608696], [0.14673913, 0.14673913, 0.29347826],
                           [0.13043478, 0.13043478, 0.26086957], [0.11413044, 0.11413044, 0.22826087], [0.09782609, 0.09782609, 0.19565217],
                           [0.08152174, 0.08152174, 0.16304348], [0.06521739, 0.06521739, 0.13043478], [0.04891304, 0.04891304, 0.09782609],
                           [0.0326087, 0.0326087, 0.06521739], [0.01630435, 0.01630435, 0.0326087], [0., 0., 0.],
                           [0.02631579, 0.02631579, 0.02631579], [0.05263158, 0.05263158, 0.05263158], [0.07894737, 0.07894737, 0.07894737],
                           [0.10526316, 0.10526316, 0.10526316], [0.13157895, 0.13157895, 0.13157895], [0.15789474, 0.15789474, 0.15789474],
                           [0.18421053, 0.18421053, 0.18421053], [0.21052632, 0.21052632, 0.21052632], [0.2368421, 0.2368421, 0.2368421],
                           [0.26315789, 0.26315789, 0.26315789], [0.28947368, 0.28947368, 0.28947368], [0.31578947, 0.31578947, 0.31578947],
                           [0.34210526, 0.34210526, 0.34210526], [0.36842105, 0.36842105, 0.36842105], [0.39473684, 0.39473684, 0.39473684],
                           [0.42105263, 0.42105263, 0.42105263], [0.44736842, 0.44736842, 0.44736842], [0.47368421, 0.47368421, 0.47368421],
                           [0.5, 0.5, 0.5], [0.5, 0.48333333, 0.51666667], [0.5, 0.46666667, 0.53333333], [0.5, 0.45, 0.55],
                           [0.5, 0.43333333, 0.56666667], [0.5, 0.41666667, 0.58333333], [0.5, 0.4, 0.6], [0.5, 0.38333333, 0.61666667],
                           [0.5, 0.36666667, 0.63333333], [0.5, 0.35, 0.65], [0.5, 0.33333333, 0.66666667], [0.5, 0.31666667, 0.68333333],
                           [0.5, 0.3, 0.7], [0.5, 0.28333333, 0.71666667], [0.5, 0.26666667, 0.73333333], [0.5, 0.25, 0.75],
                           [0.5, 0.225, 0.725], [0.5, 0.2, 0.7], [0.5, 0.175, 0.675], [0.5, 0.15, 0.65], [0.5, 0.125, 0.625],
                           [0.5, 0.1, 0.6], [0.5, 0.075, 0.575], [0.5, 0.05, 0.55], [0.5, 0.025, 0.525], [0.5, 0., 0.5]])
    np.testing.assert_allclose(kpoints, test_array)
    bands = results['bands'].get_bands()
    assert bands.shape == (1, 98, 20)
    np.testing.assert_allclose(bands[0, 0, 0:3], np.array([-6.0753, 6.0254, 6.0254]))
    np.testing.assert_allclose(bands[0, 2, 0:3], np.array([-6.0386, 5.7955, 5.8737]))
    np.testing.assert_allclose(bands[0, 97, 0:3], np.array([-1.867, -1.867, 3.1102]))