Exemplo n.º 1
0
def optic_flow_from_files():
    # Optic does not support MPI with ncpus > 1.
    manager = abilab.TaskManager.from_user_config()
    manager.set_mpi_procs(1)

    flow = abilab.Flow(workdir="OPTIC_FROM_FILE", manager=manager)
    
    ddk_nodes = [
        "/Users/gmatteo/Coding/abipy/abipy/data/runs/OPTIC/work_1/task_0/outdata/out_1WF",
        "/Users/gmatteo/Coding/abipy/abipy/data/runs/OPTIC/work_1/task_1/outdata/out_1WF",
        "/Users/gmatteo/Coding/abipy/abipy/data/runs/OPTIC/work_1/task_2/outdata/out_1WF",
    ]
    nscf_node = "/Users/gmatteo/Coding/abipy/abipy/data/runs/OPTIC/work_0/task_1/outdata/out_WFK"

    optic_task = abilab.OpticTask(optic_input, nscf_node=nscf_node, ddk_nodes=ddk_nodes)
    flow.register_task(optic_task)
                                                                                                                          
    return flow
Exemplo n.º 2
0
def build_flow(options):
    flow = make_base_flow(options)

    optic_input = abilab.OpticInput(
        broadening=0.002,
        domega=0.0003,
        maxomega=0.3,
        scissor=0.000,
        tolerance=0.002,
        num_lin_comp=1,
        lin_comp=11,
        num_nonlin_comp=2,
        nonlin_comp=(123, 222),
    )

    mpi_list = options.mpi_list
    if mpi_list is None:
        mpi_list = [1, 2, 4, 8]
        print("Using mpi_list:", mpi_list)
    else:
        print("Using mpi_list from cmd line:", mpi_list)

    work = abilab.Work()
    for mpi_procs, omp_threads in product(mpi_list, options.omp_list):
        if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        optic_task = abilab.OpticTask(optic_input,
                                      manager=manager,
                                      nscf_node=flow[0].nscf_task,
                                      ddk_nodes=flow[1])
        work.register_task(optic_task)

    flow.register_work(work)

    return flow.allocate()
Exemplo n.º 3
0
def build_flow(options, paral_kgb=0):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    workdir = options.workdir
    if not options.workdir:
        workdir = os.path.basename(__file__).replace(".py", "").replace(
            "run_", "flow_")

    multi = abilab.MultiDataset(structure=data.structure_from_ucell("GaAs"),
                                pseudos=data.pseudos("31ga.pspnc",
                                                     "33as.pspnc"),
                                ndtset=5)

    # Global variables
    kmesh = dict(ngkpt=[4, 4, 4],
                 nshiftk=4,
                 shiftk=[[0.5, 0.5, 0.5], [0.5, 0.0, 0.0], [0.0, 0.5, 0.0],
                         [0.0, 0.0, 0.5]])

    global_vars = dict(ecut=2, paral_kgb=paral_kgb)
    global_vars.update(kmesh)

    multi.set_vars(global_vars)

    # Dataset 1 (GS run)
    multi[0].set_vars(
        tolvrs=1e-6,
        nband=4,
    )

    # NSCF run with large number of bands, and points in the the full BZ
    multi[1].set_vars(
        iscf=-2,
        nband=20,
        nstep=25,
        kptopt=1,
        tolwfr=1.e-9,
        #kptopt=3,
    )

    # Fourth dataset : ddk response function along axis 1
    # Fifth dataset : ddk response function along axis 2
    # Sixth dataset : ddk response function along axis 3
    for dir in range(3):
        rfdir = 3 * [0]
        rfdir[dir] = 1

        multi[2 + dir].set_vars(
            iscf=-3,
            nband=20,
            nstep=1,
            nline=0,
            prtwf=3,
            kptopt=3,
            nqpt=1,
            qpt=[0.0, 0.0, 0.0],
            rfdir=rfdir,
            rfelfd=2,
            tolwfr=1.e-9,
        )

    scf_inp, nscf_inp, ddk1, ddk2, ddk3 = multi.split_datasets()

    # Initialize the flow.
    flow = abilab.Flow(workdir, manager=options.manager, remove=options.remove)

    bands_work = abilab.BandStructureWork(scf_inp, nscf_inp)
    flow.register_work(bands_work)

    ddk_work = abilab.Work()
    for inp in [ddk1, ddk2, ddk3]:
        ddk_work.register_ddk_task(inp, deps={bands_work.nscf_task: "WFK"})

    flow.register_work(ddk_work)

    # Optic does not support MPI with ncpus > 1.
    optic_input = abilab.OpticInput(
        broadening=0.002,  # Value of the smearing factor, in Hartree
        domega=0.0003,  # Frequency mesh.
        maxomega=0.3,
        scissor=0.000,  # Scissor shift if needed, in Hartree
        tolerance=0.002,  # Tolerance on closeness of singularities (in Hartree)
        num_lin_comp=
        1,  # Number of components of linear optic tensor to be computed
        lin_comp=11,  # Linear coefficients to be computed (x=1, y=2, z=3)
        num_nonlin_comp=
        2,  # Number of components of nonlinear optic tensor to be computed
        nonlin_comp=(123, 222),  # Non-linear coefficients to be computed
    )

    # TODO
    # Check is the order of the 1WF files is relevant. Can we use DDK files ordered
    # in an arbitrary way or do we have to pass (x,y,z)?
    optic_task = abilab.OpticTask(optic_input,
                                  nscf_node=bands_work.nscf_task,
                                  ddk_nodes=ddk_work)
    flow.register_task(optic_task)

    return flow
Exemplo n.º 4
0
def raman_work(structure, pseudos, ngkpt, shiftk, ddk_manager, shell_manager):
    # Generate 3 different input files for computing optical properties with BSE.

    multi = abilab.MultiDataset(structure, pseudos=pseudos, ndtset=5)
    multi.set_vars(global_vars)
    multi.set_kmesh(ngkpt=ngkpt, shiftk=shiftk)

    # GS run
    multi[0].set_vars(
        tolvrs=1e+8,
        nband=59,
    )

    # NSCF run
    multi[1].set_vars(
        iscf=-2,
        nband=100,
        kptopt=1,
        tolwfr=1.e+12,
    )

    # DDK along 3 directions
    # Third dataset : ddk response function along axis 1
    # Fourth dataset : ddk response function along axis 2
    # Fifth dataset : ddk response function along axis 3
    for dir in range(3):
        rfdir = 3 * [0]
        rfdir[dir] = 1

        multi[2 + dir].set_vars(
            iscf=-3,
            nband=100,
            nstep=1,
            nline=0,
            prtwf=3,
            kptopt=1,
            nqpt=1,
            qpt=[0.0, 0.0, 0.0],
            rfdir=rfdir,
            rfelfd=2,
            tolwfr=1.e+12,
        )

    scf_inp, nscf_inp, ddk1, ddk2, ddk3 = multi.split_datasets()
    ddk_inputs = [ddk1, ddk2, ddk3]

    work = abilab.Work()
    scf_t = work.register_scf_task(scf_inp)
    nscf_t = work.register_nscf_task(nscf_inp, deps={scf_t: "DEN"})

    ddk_nodes = []
    for inp in ddk_inputs:
        ddk_t = work.register_ddk_task(inp, deps={nscf_t: "WFK"})
        ddk_t.set_manager(ddk_manager)
        ddk_nodes.append(ddk_t)

    optic_t = abilab.OpticTask(optic_input,
                               nscf_node=nscf_t,
                               ddk_nodes=ddk_nodes,
                               manager=shell_manager)

    work.register(optic_t)

    return work
Exemplo n.º 5
0
def itest_optic_flow(fwp, tvars):
    """Test optic calculations."""
    if tvars.paral_kgb == 1:
        pytest.xfail(
            "Optic flow with paral_kgb==1 is expected to fail (implementation problem)"
        )
    """
    0.002         ! Value of the smearing factor, in Hartree
    0.0003  0.3   ! Difference between frequency values (in Hartree), and maximum frequency ( 1 Ha is about 27.211 eV)
    0.000         ! Scissor shift if needed, in Hartree
    0.002         ! Tolerance on closeness of singularities (in Hartree)
    1             ! Number of components of linear optic tensor to be computed
    11            ! Linear coefficients to be computed (x=1, y=2, z=3)
    2             ! Number of components of nonlinear optic tensor to be computed
    123 222       ! Non-linear coefficients to be computed
    """
    optic_input = abilab.OpticInput(
        broadening=0.002,
        domega=0.0003,
        maxomega=0.3,
        scissor=0.000,
        tolerance=0.002,
        num_lin_comp=1,
        lin_comp=11,
        num_nonlin_comp=2,
        nonlin_comp=(123, 222),
    )
    print(optic_input)
    #raise ValueError()

    scf_inp, nscf_inp, ddk1, ddk2, ddk3 = make_inputs(tvars)

    flow = abilab.Flow(fwp.workdir, manager=fwp.manager)

    bands_work = abilab.BandStructureWork(scf_inp, nscf_inp)
    flow.register_work(bands_work)

    # work with DDK tasks.
    ddk_work = abilab.Work()
    for inp in [ddk1, ddk2, ddk3]:
        ddk_work.register_ddk_task(inp, deps={bands_work.nscf_task: "WFK"})

    flow.register_work(ddk_work)
    flow.allocate()
    flow.build_and_pickle_dump(abivalidate=True)

    # Run the tasks
    for task in flow.iflat_tasks():
        task.start_and_wait()
        assert task.status == task.S_DONE

    flow.check_status()
    assert flow.all_ok

    # Optic does not support MPI with ncores > 1 hence we have to construct a manager with mpi_procs==1
    shell_manager = fwp.manager.to_shell_manager(mpi_procs=1)

    # Build optic task and register it
    optic_task1 = abilab.OpticTask(optic_input,
                                   nscf_node=bands_work.nscf_task,
                                   ddk_nodes=ddk_work,
                                   manager=shell_manager)

    flow.register_task(optic_task1)
    flow.allocate()
    flow.build_and_pickle_dump(abivalidate=True)

    optic_task1.start_and_wait()
    assert optic_task1.status == optic_task1.S_DONE

    # Now we do a similar calculation but the dependencies are represented by
    # strings with the path to the input files instead of task objects.
    ddk_nodes = [task.outdir.has_abiext("1WF") for task in ddk_work]
    #ddk_nodes = [task.outdir.has_abiext("DDK") for task in ddk_work]
    print("ddk_nodes:", ddk_nodes)
    assert all(ddk_nodes)

    #nscf_node = bands_work.nscf_task
    nscf_node = bands_work.nscf_task.outdir.has_abiext("WFK")
    assert nscf_node

    # This does not work yet
    optic_task2 = abilab.OpticTask(optic_input,
                                   nscf_node=nscf_node,
                                   ddk_nodes=ddk_nodes)
    flow.register_task(optic_task2)
    flow.allocate()
    flow.build_and_pickle_dump(abivalidate=True)
    assert len(flow) == 4

    optic_task2.start_and_wait()
    assert optic_task2.status == optic_task2.S_DONE

    flow.check_status()
    flow.show_status()
    assert flow.all_ok
    assert all(work.finalized for work in flow)

    #assert flow.validate_json_schema()

    # Test get_results
    optic_task2.get_results()