Beispiel #1
0
def build_flow(options):
    gs_inp, ph_inp = make_inputs()

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)
    gs_work = abilab.Work()
    gs_work.register_scf_task(gs_inp)
    flow.register_work(gs_work)
    flow.exclude_from_benchmark(gs_work)

    # Get the list of possible parallel configurations from abinit autoparal.
    max_ncpus, min_eff = options.max_ncpus, options.min_eff
    print("Getting all autoparal confs up to max_ncpus: ", max_ncpus,
          " with efficiency >= ", min_eff)

    pconfs = ph_inp.abiget_autoparal_pconfs(max_ncpus, autoparal=1)
    print(pconfs)

    work = abilab.Work()
    for conf, omp_threads in product(pconfs, options.omp_list):
        if not options.accept_conf(conf, omp_threads): continue

        manager = options.manager.new_with_fixed_mpi_omp(
            conf.mpi_procs, omp_threads)
        inp = ph_inp.new_with_vars(conf.vars)
        work.register_phonon_task(inp,
                                  manager=manager,
                                  deps={gs_work[0]: "WFK"})

    print("Found %d configurations" % len(work))
    flow.register_work(work)

    return flow.allocate()
Beispiel #2
0
def bse_benchmark(options):
    """
    Build an `AbinitWorkflow` used for benchmarking ABINIT.
    """
    gs_inp, bse_inp = make_inputs(paw=options.paw)
    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    gs_work = abilab.Work()
    gs_work.register_scf_task(gs_inp)
    flow.register_work(gs_work)
    flow.exclude_from_benchmark(gs_work)

    mpi_list = options.mpi_list

    if options.mpi_list is None:
        nkpt = len(gs_inp.abiget_ibz().points)
        ntrans = (2 * 2 * nkpt)**2
        mpi_list = [p for p in range(1, 1 + ntrans) if ntrans % p == 0]
    print("Using mpi_list:", mpi_list)

    bse_work = abilab.Work()
    for mpi_procs, omp_threads in product(mpi_list, options.omp_list):
        if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        bse_work.register_bse_task(bse_inp,
                                   manager=manager,
                                   deps={gs_work[0]: "WFK"})
    flow.register_work(bse_work)

    return flow.allocate()
Beispiel #3
0
def itest_dilatmx_error_handler(fwp, tvars):
    """
     Test cell relaxation with automatic restart in the presence of dilatmx error.
     """
    # Build the flow
    flow = abilab.Flow(fwp.workdir, manager=fwp.manager)

    # Decrease the volume to trigger DilatmxError
    ion_input, ioncell_input = make_ion_ioncell_inputs(tvars,
                                                       dilatmx=1.01,
                                                       scalevol=0.8)

    work = abilab.Work()
    work.register_relax_task(ioncell_input)

    flow.register_work(work)
    flow.allocate()
    assert flow.make_scheduler().start() == 0
    flow.show_status()

    assert all(work.finalized for work in flow)
    assert flow.all_ok

    # t0 should have reached S_OK, and we should have DilatmxError in the corrections.
    t0 = work[0]
    assert t0.status == t0.S_OK
    print(t0.corrections)
    assert t0.num_corrections == 1
    assert t0.corrections[0]["event"]["@class"] == "DilatmxError"
Beispiel #4
0
def scr_benchmark(options):
    """
    Build an `AbinitWorkflow` used for benchmarking ABINIT.
    """
    gs_inp, nscf_inp, scr_inp = make_inputs(paw=options.paw)
    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    bands = abilab.BandStructureWork(gs_inp, nscf_inp)
    flow.register_work(bands)
    flow.exclude_from_benchmark(bands)

    print("Using mpi_list:", options.mpi_list)
    mpi_list = options.mpi_list

    for nband in [200, 400, 600]:
        scr_work = abilab.Work()
        if options.mpi_list is None:
            # Cannot call autoparal here because we need a WFK file.
            print("Using hard coded values for mpi_list")
            mpi_list = [
                np for np in range(1, nband + 1) if abs((nband - 28) % np) < 1
            ]
        print("Using nband %d and mpi_list: %s" % (nband, mpi_list))

        for mpi_procs, omp_threads in product(mpi_list, options.omp_list):
            if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
            manager = options.manager.new_with_fixed_mpi_omp(
                mpi_procs, omp_threads)
            scr_work.register_scr_task(scr_inp,
                                       manager=manager,
                                       deps={bands.nscf_task: "WFK"})
        flow.register_work(scr_work)

    return flow.allocate()
Beispiel #5
0
def build_flow(options):
    flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove)

    template = make_input()

    # Processor distribution.
    pconfs = [
      dict(npkpt=1, npband=13, npfft=10), # 130   
      dict(npkpt=1, npband=26, npfft=10), # 260   
      dict(npkpt=1, npband=65, npfft=8 ), # 520   
      dict(npkpt=1, npband=65, npfft=16), # 1040  
    ]

    for wfoptalg in [None, 1]:
        work = abilab.Work()
        for d, omp_threads in product(pconfs, options.omp_list):
            mpi_procs = reduce(operator.mul, d.values(), 1)
            if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
            manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
            print("wfoptalg:", wfoptalg, "done with MPI_PROCS:", mpi_procs, "and:", d)
            inp = template.new_with_vars(d, wfoptalg=wfoptalg)
            work.register_scf_task(inp, manager=manager)

        flow.register_work(work)

    return flow.allocate()
Beispiel #6
0
def build_flow(options):
    template = make_input()

    # Get the list of possible parallel configurations from abinit autoparal.
    max_ncpus, min_eff = options.max_ncpus, options.min_eff
    print("Getting all autoparal confs up to max_ncpus: ", max_ncpus,
          " with efficiency >= ", min_eff)

    pconfs = template.abiget_autoparal_pconfs(max_ncpus,
                                              autoparal=1,
                                              verbose=options.verbose)
    if options.verbose: print(pconfs)

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    omp_threads = 1
    for accesswff in [1, 3]:  # [MPI-IO, Netcdf]
        work = abilab.Work()
        for conf in pconfs:
            mpi_procs = conf.mpi_ncpus
            omp_threads = conf.omp_ncpus
            if not options.accept_conf(conf, omp_threads): continue

            # Two GS-SCF tasks. The first one produces the WKF, the second one reads it.
            manager = options.manager.new_with_fixed_mpi_omp(
                mpi_procs, omp_threads)
            inp = template.new_with_vars(conf.vars, accesswff=accesswff)
            task0 = work.register_scf_task(inp, manager=manager)
            work.register_scf_task(inp, manager=manager, deps={task0: "WFK"})

        print("Found %d configurations" % len(work))
        flow.register_work(work)

    return flow.allocate()
Beispiel #7
0
def build_flow(options):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    workdir = options.workdir
    if not options.workdir:
        workdir = os.path.basename(__file__).replace(".py", "").replace("run_", "flow_") 

    # Create the flow
    flow = abilab.Flow(workdir, manager=options.manager, remove=options.remove)

    # Create a relaxation work and add it to the flow.
    ion_inp, ioncell_inp = make_ion_ioncell_inputs()

    relax_work = abilab.RelaxWork(ion_inp, ioncell_inp)
    flow.register_work(relax_work)

    #bands_work = abilab.BandStructureWork(scf_input, nscf_input)
    bands_work = abilab.Work()
    deps = {relax_work[-1]: "@structure"}
    deps = {relax_work[-1]: ["DEN", "@structure"]}  # --> This is not possible because the file ext is changed!
    #deps = {relax_work[-1]: ["WFK", "@structure"]} # --> This triggers an infamous bug in abinit

    bands_work.register_relax_task(ioncell_inp, deps=deps)
    flow.register_work(bands_work)

    return flow
Beispiel #8
0
def build_flow(options):
    template = make_input()

    # Get the list of possible parallel configurations from abinit autoparal.
    max_ncpus, min_eff = options.max_ncpus, options.min_eff
    print("Getting all autoparal confs up to max_ncpus: ",max_ncpus," with efficiency >= ",min_eff)

    pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose)
    if options.verbose: print(pconfs)

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove)

    omp_threads = 1
    for istwfk in [1, 2]:
        work = abilab.Work()
        for conf in pconfs:
            mpi_procs = conf.mpi_ncpus
            if not options.accept_conf(conf, omp_threads): continue

            manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
            inp = template.new_with_vars(conf.vars, istwfk=istwfk)
            work.register_scf_task(inp, manager=manager)

        print("Found %d configurations" % len(work))
        flow.register_work(work)

    return flow.allocate()
Beispiel #9
0
def build_flow(options):
    fftalg_list = [312, 402, 401]
    ecut_list = list(range(200, 610, 100))
    ecut_list = [
        400,
    ]

    if options.mpi_list is None: mpi_list = [2, 4, 6, 8]
    print("Using mpi_list:", mpi_list)

    template = make_input()
    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    omp_threads = 1
    for fftalg in fftalg_list:
        work = abilab.Work()
        for npfft in mpi_list:
            if not options.accept_mpi_omp(npfft, omp_threads): continue
            manager = options.manager.new_with_fixed_mpi_omp(
                npfft, omp_threads)
            for inp in abilab.input_gen(template,
                                        fftalg=fftalg,
                                        npfft=npfft,
                                        ecut=ecut_list):
                work.register_scf_task(inp, manager=manager)
        flow.register_work(work)

    return flow.allocate()
Beispiel #10
0
def build_flow(options):
    template = make_input()

    # Get the list of possible parallel configurations from abinit autoparal.
    #max_ncpus, min_eff = options.max_ncpus, options.min_eff
    #print("Getting all autoparal configurations up to max_ncpus: ",max_ncpus," with efficiency >= ",min_eff)
    #pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose)
    #if options.verbose: print(pconfs)

    # Processor distribution.
    pconfs = [
        dict(npkpt=64, npband=1, npfft=2),  # 128 
        dict(npkpt=64, npband=2, npfft=2),  # 256   
        dict(npkpt=64, npband=2, npfft=4),  # 512   
        dict(npkpt=64, npband=4, npfft=4),  # 1024  
    ]

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    for wfoptalg in [None, 1]:
        work = abilab.Work()
        for conf, omp_threads in product(pconfs, options.omp_list):
            #if not options.accept_conf(conf, omp_threads): continue
            mpi_procs = omp_threads * reduce(operator.mul, conf.values(), 1)

            manager = options.manager.new_with_fixed_mpi_omp(
                mpi_procs, omp_threads)
            inp = template.new_with_vars(conf, wfoptalg=wfoptalg)
            work.register_scf_task(inp, manager=manager)

        print("Found %d configurations" % len(work))
        flow.register_work(work)

    return flow.allocate()
Beispiel #11
0
def raman_flow():

    # Get the unperturbed structure.
    base_structure = abilab.Structure.from_abivars(unit_cell)

    pseudos = ["14si.pspnc"]

    workdir = os.path.join(os.path.dirname(__file__), "test_abipy_new")

    manager = abilab.TaskManager.from_user_config()
    #manager = abilab.TaskManager.from_file("bfo_manager.yml")

    policy = TaskPolicy(autoparal=0)
    gs_manager = manager.deepcopy()

    # Initialize flow. Each workflow in the flow defines a complete BSE calculation for given eta.
    flow = abilab.Flow(workdir, manager)

    # There will be kppa/natom kpoints in the unit cell !
    kppa = 3456  # ngkpt = [12,12,12] for the primitive cell
    kppa_gs = 1728  # ngkpt = [8,8,8] for the primitive cell

    etas = [-1, 0, 1]  # Anyway, it rescales everything at the end :-)
    eta = 0.01

    scale_matrix = [[-1, 0, 1], [-1, 1, 0], [-1, -1, 0]]

    ph_tot = np.array([[0.01, 0.011, 0.021], [-0.01, 0.041, -0.02]])
    modifier = abilab.StructureModifier(base_structure)

    displaced_structure = modifier.frozen_phonon([0.5, 0.5, 0.5],
                                                 ph_tot,
                                                 do_real=True,
                                                 frac_coords=False,
                                                 scale_matrix=scale_matrix)

    structure = displaced_structure

    ksampgs = KSampling.automatic_density(structure,
                                          kppa_gs,
                                          chksymbreak=0,
                                          shifts=[0, 0, 0])

    gs_inp = gs_input(structure, pseudos, ksampgs)
    wflow = abilab.Work()
    gs_t = wflow.register_scf_task(gs_inp)
    gs_t.set_manager(gs_manager)
    flow.register_work(wflow, workdir="gs_task")

    ksamp = KSampling.automatic_density(structure,
                                        kppa,
                                        chksymbreak=0,
                                        shifts=[1 / 4, 1 / 4, 1 / 4])
    flow.register_work(raman_workflow(structure, pseudos, gs_t, ksamp),
                       workdir="bse_task")

    return flow.allocate()
Beispiel #12
0
def raman_workflow(structure, pseudos, scf_t, ksamp):
    # Generate 3 different input files for computing optical properties with BSE.

    inp = abilab.MultiDataset(structure, pseudos=pseudos, ndtset=2)

    inp.set_vars(**global_vars)

    vars_ksamp = ksamp.to_abivars()
    vars_ksamp.pop("#comment", None)

    inp.set_vars(**vars_ksamp)

    # NSCF run
    inp[0].set_vars(
        iscf=-2,
        nband=10 * len(structure),
        nbdbuf=2 * len(structure),
        nstep=500,
        tolwfr=1.e-22,
    )

    inp[1].set_vars(
        gwmem=10,
        gwpara=2,
        optdriver=99,
        nband=5 * len(structure),  # 10 bands for 2 atoms
        bs_loband=1 * len(structure),  # start at 2 for 2 atoms
        bs_algorithm=2,  # Haydock
        bs_calctype=1,  # KS wavefunctions
        bs_coulomb_term=21,  # Use model dielectric function
        mdf_epsinf=12.0,
        bs_exchange_term=1,  # Exchange term included
        bs_freq_mesh="0 10 0.01 eV",
        bs_hayd_term=0,
        bs_coupling=0,
        bs_haydock_tol="-0.01 0",
        bs_haydock_niter=1000,
        inclvkb=2,
        ecuteps=4,
        soenergy="0.8 eV",
        ecutwfn=global_vars["ecut"],
    )

    nscf_inp, bse_inp = inp.split_datasets()

    workflow = abilab.Work()
    nscf_t = workflow.register(nscf_inp,
                               deps={scf_t: "DEN"},
                               task_class=abilab.NscfTask)
    bse_t = workflow.register_bse_task(bse_inp, deps={nscf_t: "WFK"})

    return workflow
Beispiel #13
0
def make_g0w0_scissors_flow(workdir="flow_lesson_g0w0", ngkpt=[2,2,2]):
    # Change the value of ngkpt below to perform a GW calculation with a different k-mesh.
    scf, bands_nscf, dos_nscf, gw_nscf, scr, sig = make_inputs(ngkpt=ngkpt)

    flow = abilab.Flow(workdir=workdir)
    work0 = abilab.BandStructureWork(scf, bands_nscf, dos_inputs=dos_nscf)
    flow.register_work(work0)

    work1 = abilab.Work()
    gw_nscf_task = work1.register_nscf_task(gw_nscf, deps={work0[0]: "DEN"})
    scr_task = work1.register_scr_task(scr, deps={gw_nscf_task: "WFK"})
    sigma_task = work1.register_sigma_task(sig, deps={gw_nscf_task: "WFK", scr_task: "SCR"})
    flow.register_work(work1)

    return flow.allocate()
Beispiel #14
0
def build_flow(options):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    workdir = options.workdir
    if not options.workdir:
        workdir = os.path.basename(__file__).replace(".py", "").replace(
            "run_", "flow_")

    # Get our templates
    scf_inp, nscf_inp, scr_inp, sig_inp = make_inputs()

    ecuteps_list = np.arange(2, 8, 2)
    max_ecuteps = max(ecuteps_list)

    flow = abilab.Flow(workdir=workdir,
                       manager=options.manager,
                       remove=options.remove)

    # Band structure work to produce the WFK file
    bands = abilab.BandStructureWork(scf_inp, nscf_inp)
    flow.register_work(bands)

    # Build a work made of two SCR runs with different value of nband
    # Use max_ecuteps for the dielectric matrix (sigma tasks will
    # read a submatrix when we test the convergence wrt to ecuteps.
    scr_work = abilab.Work()

    for inp in abilab.input_gen(scr_inp, nband=[10, 15]):
        inp.set_vars(ecuteps=max_ecuteps)
        scr_work.register_scr_task(inp, deps={bands.nscf_task: "WFK"})

    flow.register_work(scr_work)

    # Do a convergence study wrt ecuteps, each work is connected to a
    # different SCR file computed with a different value of nband.

    # Build a list of sigma inputs with different ecuteps
    sigma_inputs = list(abilab.input_gen(sig_inp, ecuteps=ecuteps_list))

    for scr_task in scr_work:
        sigma_conv = abilab.SigmaConvWork(wfk_node=bands.nscf_task,
                                          scr_node=scr_task,
                                          sigma_inputs=sigma_inputs)
        flow.register_work(sigma_conv)

    return flow
Beispiel #15
0
def build_flow(options):
    flow, eph_inp = make_flow_ephinp(options)

    mpi_list = options.mpi_list
    if mpi_list is None:
        nkpt = len(eph_inp.abiget_ibz().points)
        nks = nkpt * eph_inp["nsppol"]
        mpi_list = [p for p in range(1, nks+1) if nks % p == 0]
        print("Using mpi_list:", mpi_list)
    else:
        print("Using mpi_list from cmd line:", mpi_list)

    eph_work = abilab.Work()
    for mpi_procs, omp_threads in product(mpi_list, options.omp_list):
        if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
        manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
        eph_work.register_eph_task(eph_inp, manager=manager, deps={flow[0][0]: "WFK", flow[1]: ["DDB", "DVDB"]})

    flow.register_work(eph_work)
    return flow.allocate()
Beispiel #16
0
def build_flow(options):
    inp = make_input(paw=options.paw)
    nkpt = len(inp.abiget_ibz().points)

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)
    work = abilab.Work()

    omp_list = options.omp_list
    if omp_list is None: omp_list = [1, 2, 4, 6]
    print("Using omp_list:", omp_list)

    mpi_procs = 1
    for omp_threads in omp_list:
        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        work.register(inp, manager=manager)

    flow.register_work(work)
    return flow.allocate()
Beispiel #17
0
def build_flow(options):
    template = make_input()

    # Get the list of possible parallel configurations from abinit autoparal.
    max_ncpus, min_eff = options.max_ncpus, options.min_eff
    if max_ncpus is None:
        nkpt = len(template.abiget_ibz().points)
        max_ncpus = nkpt * template["nsppol"] * template["nband"] * 4
    print("Getting all autoparal confs up to max_ncpus: ", max_ncpus,
          " with efficiency >= ", min_eff)

    pconfs = template.abiget_autoparal_pconfs(max_ncpus,
                                              autoparal=1,
                                              verbose=options.verbose)
    if options.verbose: print(pconfs)

    #parallelization
    #paral_kgb 1
    #npband 15
    #npfft 3
    #npkpt 4
    #bandpp 2

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    work = abilab.Work()
    for conf, omp_threads in product(pconfs, options.omp_list):
        mpi_procs = conf.mpi_ncpus
        if not options.accept_conf(conf, omp_threads): continue

        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        inp = template.new_with_vars(conf.vars)
        work.register_scf_task(inp, manager=manager)

    print("Found %d configurations" % len(work))
    flow.register_work(work)

    return flow.allocate()
Beispiel #18
0
def build_flow(options):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    workdir = options.workdir
    if not options.workdir:
        workdir = os.path.basename(__file__).replace(".py", "").replace("run_","flow_") 

    # Get the SCF and the NSCF input.
    scf_input, nscf_input = make_scf_nscf_inputs()

    # Build the flow.
    flow = abilab.Flow(workdir, manager=options.manager, remove=options.remove)

    # Create a Work, all tasks in work will read the file f
    # Note that the file must exist when the work is created
    # Use the standard approach based on tasks and works if
    # there's a node who needs a file produced in the future.
    work = abilab.Work()
    denfile = abidata.ref_file("si_DEN-etsf.nc")
    work.register(nscf_input, deps={denfile: "DEN"})
    flow.register_work(work)

    return flow
Beispiel #19
0
def build_flow(options):
    gs_inp, nscf_inp = make_inputs(options)

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    mpi_list = options.mpi_list
    if mpi_list is None:
        # Get the list of possible parallel configurations from abinit autoparal.
        max_ncpus, min_eff = options.max_ncpus, options.min_eff
        print("Getting all autoparal confs up to max_ncpus: ", max_ncpus,
              " with efficiency >= ", min_eff)

        pconfs = gs_inp.abiget_autoparal_pconfs(max_ncpus, autoparal=1)

    else:
        print("Initializing autoparal from command line options")
        from pymatgen.io.abinit.tasks import ParalHints
        pconfs = ParalHints.from_mpi_omp_lists(mpi_list, options.omp_list)
        print(pconfs)

    work = abilab.Work()
    for conf, omp_threads in product(pconfs, options.omp_list):
        mpi_procs = conf.mpi_ncpus
        #if not options.accept_mpi_omp(mpi_procs,omp_threads): continue
        if not options.accept_conf(conf, omp_threads): continue

        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        inp = gs_inp.new_with_vars(conf.vars)
        scf_task = work.register_scf_task(inp, manager=manager)

        inp2 = nscf_inp.new_with_vars(conf.vars)
        work.register_nscf_task(inp2, manager=manager, deps={scf_task: "DEN"})

    print("Found %d configurations" % len(work))
    flow.register_work(work)

    return flow.allocate()
Beispiel #20
0
def itest_flow_with_deadlocks(fwp):
    """
    Test the behaviour of the scheduler in the presence of a deadlock
    when we ignore errored tasks and we try to run all tasks in the flow.
    The scheduler should detect the deadlock and exit when no other task can be executed.
    """
    # Get the SCF and the NSCF input.
    scf_input, nscf_input = make_scf_nscf_inputs()

    # Build the flow.
    flow = abilab.Flow(fwp.workdir, manager=fwp.manager)
    work0 = abilab.BandStructureWork(scf_input, nscf_input, dos_inputs=nscf_input)
    flow.register_work(work0)
    scf_task, nscf_task, dos_task = work0[0], work0[1], work0[2]

    work1 = abilab.Work()
    work1.register_nscf_task(nscf_input, deps={scf_task: "DEN", dos_task: "WFK"})
    # This task will deadlock when nscf_task reaches S_ERROR.
    work1.register_nscf_task(nscf_input, deps={scf_task: "DEN", nscf_task: "WFK"})
    flow.register_work(work1)

    flow.allocate()

    # Mock an Errored nscf_task. This will cause a deadlock in the flow.
    nscf_task = mocks.change_task_start(nscf_task)

    # Here we set max_num_abierrs to a very large number.
    sched = flow.make_scheduler()
    sched.max_num_abierrs = 10000
    assert sched.start() == 0
    flow.check_status(show=True)

    assert not flow.all_ok
    assert all(task.status == task.S_OK for task in [scf_task, dos_task, work1[0]])
    assert all(task.status == task.S_ERROR for task in [nscf_task])
    g = flow.find_deadlocks()
    assert g.deadlocked and not g.runnables and not g.running
    assert work1[1] in g.deadlocked
Beispiel #21
0
def build_flow(options):
    inp = make_input()

    mpi_list = options.mpi_list
    if mpi_list is None:
        nkpt = len(inp.abiget_ibz().points)
        nks = nkpt * inp["nsppol"]
        mpi_list = [p for p in range(1, nks + 1) if nks % p == 0]
    print("Using mpi_list:", mpi_list)

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    for useylm in [0, 1]:
        work = abilab.Work()
        for mpi_procs, omp_threads in product(mpi_list, options.omp_list):
            if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
            manager = options.manager.new_with_fixed_mpi_omp(
                mpi_procs, omp_threads)
            work.register_scf_task(inp.new_with_vars(useylm=useylm),
                                   manager=manager)
        flow.register_work(work)

    return flow.allocate()
Beispiel #22
0
def build_flow(options):
    flow = make_base_flow(options)

    optic_input = abilab.OpticInput(
        broadening=0.002,
        domega=0.0003,
        maxomega=0.3,
        scissor=0.000,
        tolerance=0.002,
        num_lin_comp=1,
        lin_comp=11,
        num_nonlin_comp=2,
        nonlin_comp=(123, 222),
    )

    mpi_list = options.mpi_list
    if mpi_list is None:
        mpi_list = [1, 2, 4, 8]
        print("Using mpi_list:", mpi_list)
    else:
        print("Using mpi_list from cmd line:", mpi_list)

    work = abilab.Work()
    for mpi_procs, omp_threads in product(mpi_list, options.omp_list):
        if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        optic_task = abilab.OpticTask(optic_input,
                                      manager=manager,
                                      nscf_node=flow[0].nscf_task,
                                      ddk_nodes=flow[1])
        work.register_task(optic_task)

    flow.register_work(work)

    return flow.allocate()
Beispiel #23
0
def run_annaddb(flow, structure):

    #structure = flow[0][0].
    manager = abilab.TaskManager.from_user_config()

    # We should have a DDB files with IFC(q) in work.outdir
    ddb_files = []
    for work in flow[1:]:
        ddbs = work.outdir.list_filepaths(wildcard="*DDB")
        assert len(ddbs) == 1
        ddb_files.append(ddbs[0])

    # TODO: Check automatic restart
    assert all(work.finalized for work in flow)
    assert flow.all_ok

    # Merge the DDB files
    out_ddb = flow.outdir.path_in("flow_DDB")
    ddb_path = abilab.Mrgddb(manager=manager).merge(
        flow.outdir.path,
        ddb_files,
        out_ddb=out_ddb,
        description="DDB generated by %s" % __file__)

    assert ddb_path == out_ddb

    # Build new work with Anaddb tasks.
    # Construct a manager with mpi_ncpus==1 since  anaddb do not support mpi_ncpus > 1 (except in elphon)
    shell_manager = manager.to_shell_manager()
    awork = abilab.Work(manager=shell_manager)

    # modes
    anaddb_input = abilab.AnaddbInput.modes(structure)
    atask = abilab.AnaddbTask(anaddb_input,
                              ddb_node=ddb_path,
                              manager=shell_manager)
    awork.register(atask)

    # Thermodynamics
    anaddb_input = abilab.AnaddbInput.thermo(structure,
                                             ngqpt=(40, 40, 40),
                                             nqsmall=20)
    atask = abilab.AnaddbTask(anaddb_input,
                              ddb_node=ddb_path,
                              manager=shell_manager)
    awork.register(atask)

    # Phonons bands and DOS with gaussian method
    anaddb_input = abilab.AnaddbInput.phbands_and_dos(
        structure,
        ngqpt=(4, 4, 4),
        nqsmall=10,
        ndivsm=5,
        dos_method="gaussian: 0.001 eV")
    atask = abilab.AnaddbTask(anaddb_input,
                              ddb_node=ddb_path,
                              manager=shell_manager)
    awork.register(atask)

    # Phonons bands and DOS with tetrahedron method
    anaddb_input = abilab.AnaddbInput.phbands_and_dos(structure,
                                                      ngqpt=(4, 4, 4),
                                                      nqsmall=10,
                                                      ndivsm=5,
                                                      dos_method="tetra")
    atask = abilab.AnaddbTask(anaddb_input,
                              ddb_node=ddb_path,
                              manager=shell_manager)
    awork.register(atask)

    flow.register_work(awork)
    flow.allocate()
    flow.build()

    for i, atask in enumerate(awork):
        print("about to run anaddb task: %d" % i)
        atask.start_and_wait()
        #assert atask.status == atask.S_DONE
        atask.check_status()
Beispiel #24
0
def build_flow(options, paral_kgb=0):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    workdir = options.workdir
    if not options.workdir:
        workdir = os.path.basename(__file__).replace(".py", "").replace(
            "run_", "flow_")

    multi = abilab.MultiDataset(structure=data.structure_from_ucell("GaAs"),
                                pseudos=data.pseudos("31ga.pspnc",
                                                     "33as.pspnc"),
                                ndtset=5)

    # Global variables
    kmesh = dict(ngkpt=[4, 4, 4],
                 nshiftk=4,
                 shiftk=[[0.5, 0.5, 0.5], [0.5, 0.0, 0.0], [0.0, 0.5, 0.0],
                         [0.0, 0.0, 0.5]])

    global_vars = dict(ecut=2, paral_kgb=paral_kgb)
    global_vars.update(kmesh)

    multi.set_vars(global_vars)

    # Dataset 1 (GS run)
    multi[0].set_vars(
        tolvrs=1e-6,
        nband=4,
    )

    # NSCF run with large number of bands, and points in the the full BZ
    multi[1].set_vars(
        iscf=-2,
        nband=20,
        nstep=25,
        kptopt=1,
        tolwfr=1.e-9,
        #kptopt=3,
    )

    # Fourth dataset : ddk response function along axis 1
    # Fifth dataset : ddk response function along axis 2
    # Sixth dataset : ddk response function along axis 3
    for dir in range(3):
        rfdir = 3 * [0]
        rfdir[dir] = 1

        multi[2 + dir].set_vars(
            iscf=-3,
            nband=20,
            nstep=1,
            nline=0,
            prtwf=3,
            kptopt=3,
            nqpt=1,
            qpt=[0.0, 0.0, 0.0],
            rfdir=rfdir,
            rfelfd=2,
            tolwfr=1.e-9,
        )

    scf_inp, nscf_inp, ddk1, ddk2, ddk3 = multi.split_datasets()

    # Initialize the flow.
    flow = abilab.Flow(workdir, manager=options.manager, remove=options.remove)

    bands_work = abilab.BandStructureWork(scf_inp, nscf_inp)
    flow.register_work(bands_work)

    ddk_work = abilab.Work()
    for inp in [ddk1, ddk2, ddk3]:
        ddk_work.register_ddk_task(inp, deps={bands_work.nscf_task: "WFK"})

    flow.register_work(ddk_work)

    # Optic does not support MPI with ncpus > 1.
    optic_input = abilab.OpticInput(
        broadening=0.002,  # Value of the smearing factor, in Hartree
        domega=0.0003,  # Frequency mesh.
        maxomega=0.3,
        scissor=0.000,  # Scissor shift if needed, in Hartree
        tolerance=0.002,  # Tolerance on closeness of singularities (in Hartree)
        num_lin_comp=
        1,  # Number of components of linear optic tensor to be computed
        lin_comp=11,  # Linear coefficients to be computed (x=1, y=2, z=3)
        num_nonlin_comp=
        2,  # Number of components of nonlinear optic tensor to be computed
        nonlin_comp=(123, 222),  # Non-linear coefficients to be computed
    )

    # TODO
    # Check is the order of the 1WF files is relevant. Can we use DDK files ordered
    # in an arbitrary way or do we have to pass (x,y,z)?
    optic_task = abilab.OpticTask(optic_input,
                                  nscf_node=bands_work.nscf_task,
                                  ddk_nodes=ddk_work)
    flow.register_task(optic_task)

    return flow
Beispiel #25
0
def build_flow(options):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    workdir = options.workdir
    if not options.workdir:
        workdir = os.path.basename(__file__).replace(".py", "").replace("run_","flow_") 

    # Preparatory run for E-PH calculations.
    # The sequence of datasets makes the ground states and
    # all of the independent perturbations of the single Al atom 
    # for the irreducible qpoints in a 4x4x4 grid.
    # Note that the q-point grid must be a sub-grid of the k-point grid (here 8x8x8)
    pseudos = abidata.pseudos("Al.oncvpsp")

    structure = abilab.Structure.from_abivars(
        acell=3*[7.5],
        rprim=[0.0, 0.5, 0.5, 
               0.5, 0.0, 0.5,
               0.5, 0.5, 0.0],
        typat=1,
        xred=[0.0, 0.0, 0.0],
        ntypat=1,
        znucl=13,
    )

    gs_inp = abilab.AbinitInput(structure, pseudos)

    gs_inp.set_vars(
        prtpot=1,
        istwfk="*1",
        ecut=12.0,
        nband=5,
        occopt=7,    # include metallic occupation function with a small smearing
        tsmear=0.04,
        tolvrs=1e-7,
        timopt=-1,
    )

    # The kpoint grid is minimalistic to keep the calculation manageable.
    gs_inp.set_kmesh(
        ngkpt=[8, 8, 8], 
        kptopt=3,
        shiftk=[0.0, 0.0, 0.0],
    )

    # Phonon calculation with 4x4x4
    qpoints = np.reshape([
         0.00000000e+00,  0.00000000e+00,  0.00000000e+00, 
         2.50000000e-01,  0.00000000e+00,  0.00000000e+00,
         5.00000000e-01,  0.00000000e+00,  0.00000000e+00,
         2.50000000e-01,  2.50000000e-01,  0.00000000e+00,
         5.00000000e-01,  2.50000000e-01,  0.00000000e+00,
        -2.50000000e-01,  2.50000000e-01,  0.00000000e+00,
         5.00000000e-01,  5.00000000e-01,  0.00000000e+00,
        -2.50000000e-01,  5.00000000e-01,  2.50000000e-01,
        ], (-1,3))

    flow = abilab.Flow(workdir, manager=options.manager, remove=options.remove)
    work0 = flow.register_task(gs_inp, task_class=abilab.ScfTask)

    ph_work = abilab.PhononWork.from_scf_task(work0[0], qpoints)
    flow.register_work(ph_work)

    # Build input file for E-PH run.
    eph_inp = gs_inp.new_with_vars(
        optdriver=7,
        #ddb_ngqpt=[1, 1, 1],  # q-mesh used to produce the DDB file (must be consisten with DDB data)
        ddb_ngqpt=[4, 4, 4],   # q-mesh used to produce the DDB file (must be consisten with DDB data)
        eph_intmeth=2,         # Tetra
        eph_fsewin="0.8 eV",   # Energy window around Ef
        eph_mustar=0.12,       # mustar parameter
        # q-path for phonons and phonon linewidths.
        ph_ndivsm=20,
        ph_nqpath=3,
        ph_qpath= [
          0  , 0  , 0, 
          0.5, 0  , 0,
          0.5, 0.5, 0,],
        # phonon DOS obtained via Fourier interpolation
        ph_intmeth=2,            # Tetra for phonon DOS and A2F
        ph_smear="0.001 eV",
        ph_wstep="0.0001 eV",
        ph_ngqpt=[16, 16, 16],   # q-mesh for Fourier interpolatation of IFC and a2F(w)
        ph_nqshift=1,
        ph_qshift=[0, 0, 0],
    )

    eph_work = abilab.Work()
    eph_task = eph_work.register_eph_task(eph_inp, deps={work0[0]: "WFK", ph_work: ["DDB", "DVDB"]})
    flow.register_work(eph_work)

    # EPH does not support autoparal
    flow.allocate()
    eph_task.with_fixed_mpi_omp(1, 1)
                                                               
    return flow
Beispiel #26
0
def itest_phonon_flow(fwp, tvars):
    """
    Create an `Abinit` for phonon calculations:

        1) One work for the GS run.

        2) nqpt works for phonon calculations. Each work contains
           nirred tasks where nirred is the number of irreducible phonon perturbations
           for that particular q-point.
    """
    if tvars.paral_kgb == 1:
        pytest.xfail("Phonon flow with paral_kgb==1 is expected to fail (implementation problem)")

    all_inps = scf_ph_inputs(tvars)
    scf_input, ph_inputs = all_inps[0], all_inps[1:]

    flow = abilab.phonon_flow(fwp.workdir, scf_input, ph_inputs, manager=fwp.manager)
    flow.build_and_pickle_dump()

    t0 = flow[0][0]
    t0.start_and_wait()

    assert t0.uses_paral_kgb(tvars.paral_kgb)

    flow.check_status()
    assert t0.status == t0.S_OK
    flow.show_status()

    for work in flow[1:]:
        for task in work:
            task.start_and_wait()
            assert task.status == t0.S_DONE

    flow.check_status(show=True)

    # We should have a DDB files with IFC(q) in work.outdir
    ddb_files = []
    for work in flow[1:]:
        ddbs = work.outdir.list_filepaths(wildcard="*DDB")
        assert len(ddbs) == 1
        ddb_files.append(ddbs[0])

    assert all(work.finalized for work in flow)
    assert flow.all_ok

    # Merge the DDB files
    out_ddb = flow.outdir.path_in("flow_DDB")
    ddb_path = abilab.Mrgddb().merge(flow.outdir.path, ddb_files, out_ddb=out_ddb, 
                                     description="DDB generated by %s" % __file__)
    assert ddb_path == out_ddb

    # Test PhononTask inspect method
    ph_task = flow[1][0]

    # paral_kgb does not make sense for DFPT!
    assert not ph_task.uses_paral_kgb(tvars.paral_kgb)

    if has_matplotlib():
        ph_task.inspect(show=False)

    # Test get_results
    ph_task.get_results()

    # Build new work with Anaddb tasks.
    # Construct a manager with mpi_procs==1 since anaddb do not support mpi_procs > 1 (except in elphon)
    shell_manager = fwp.manager.to_shell_manager(mpi_procs=1)
    awork = abilab.Work(manager=shell_manager)

    # Phonons bands and DOS with gaussian method
    anaddb_input = abilab.AnaddbInput.phbands_and_dos(
        scf_input.structure, ngqpt=(4, 4, 4), ndivsm=5, nqsmall=10, dos_method="gaussian: 0.001 eV")

    atask = abilab.AnaddbTask(anaddb_input, ddb_node=ddb_path, manager=shell_manager)
    awork.register(atask)

    # Phonons bands and DOS with tetrahedron method
    anaddb_input = abilab.AnaddbInput.phbands_and_dos(
        scf_input.structure, ngqpt=(4, 4, 4), ndivsm=5, nqsmall=10, dos_method="tetra")

    atask = abilab.AnaddbTask(anaddb_input, ddb_node=ddb_path, manager=shell_manager)
    awork.register(atask)

    flow.register_work(awork)
    flow.allocate()
    flow.build()

    for i, atask in enumerate(awork):
        atask.history.info("about to run anaddb task: %d", i)
        atask.start_and_wait()
        assert atask.status == atask.S_DONE
        atask.check_status()
        assert atask.status == atask.S_OK
Beispiel #27
0
def itest_optic_flow(fwp, tvars):
    """Test optic calculations."""
    if tvars.paral_kgb == 1:
        pytest.xfail(
            "Optic flow with paral_kgb==1 is expected to fail (implementation problem)"
        )
    """
    0.002         ! Value of the smearing factor, in Hartree
    0.0003  0.3   ! Difference between frequency values (in Hartree), and maximum frequency ( 1 Ha is about 27.211 eV)
    0.000         ! Scissor shift if needed, in Hartree
    0.002         ! Tolerance on closeness of singularities (in Hartree)
    1             ! Number of components of linear optic tensor to be computed
    11            ! Linear coefficients to be computed (x=1, y=2, z=3)
    2             ! Number of components of nonlinear optic tensor to be computed
    123 222       ! Non-linear coefficients to be computed
    """
    optic_input = abilab.OpticInput(
        broadening=0.002,
        domega=0.0003,
        maxomega=0.3,
        scissor=0.000,
        tolerance=0.002,
        num_lin_comp=1,
        lin_comp=11,
        num_nonlin_comp=2,
        nonlin_comp=(123, 222),
    )
    print(optic_input)
    #raise ValueError()

    scf_inp, nscf_inp, ddk1, ddk2, ddk3 = make_inputs(tvars)

    flow = abilab.Flow(fwp.workdir, manager=fwp.manager)

    bands_work = abilab.BandStructureWork(scf_inp, nscf_inp)
    flow.register_work(bands_work)

    # work with DDK tasks.
    ddk_work = abilab.Work()
    for inp in [ddk1, ddk2, ddk3]:
        ddk_work.register_ddk_task(inp, deps={bands_work.nscf_task: "WFK"})

    flow.register_work(ddk_work)
    flow.allocate()
    flow.build_and_pickle_dump(abivalidate=True)

    # Run the tasks
    for task in flow.iflat_tasks():
        task.start_and_wait()
        assert task.status == task.S_DONE

    flow.check_status()
    assert flow.all_ok

    # Optic does not support MPI with ncores > 1 hence we have to construct a manager with mpi_procs==1
    shell_manager = fwp.manager.to_shell_manager(mpi_procs=1)

    # Build optic task and register it
    optic_task1 = abilab.OpticTask(optic_input,
                                   nscf_node=bands_work.nscf_task,
                                   ddk_nodes=ddk_work,
                                   manager=shell_manager)

    flow.register_task(optic_task1)
    flow.allocate()
    flow.build_and_pickle_dump(abivalidate=True)

    optic_task1.start_and_wait()
    assert optic_task1.status == optic_task1.S_DONE

    # Now we do a similar calculation but the dependencies are represented by
    # strings with the path to the input files instead of task objects.
    ddk_nodes = [task.outdir.has_abiext("1WF") for task in ddk_work]
    #ddk_nodes = [task.outdir.has_abiext("DDK") for task in ddk_work]
    print("ddk_nodes:", ddk_nodes)
    assert all(ddk_nodes)

    #nscf_node = bands_work.nscf_task
    nscf_node = bands_work.nscf_task.outdir.has_abiext("WFK")
    assert nscf_node

    # This does not work yet
    optic_task2 = abilab.OpticTask(optic_input,
                                   nscf_node=nscf_node,
                                   ddk_nodes=ddk_nodes)
    flow.register_task(optic_task2)
    flow.allocate()
    flow.build_and_pickle_dump(abivalidate=True)
    assert len(flow) == 4

    optic_task2.start_and_wait()
    assert optic_task2.status == optic_task2.S_DONE

    flow.check_status()
    flow.show_status()
    assert flow.all_ok
    assert all(work.finalized for work in flow)

    #assert flow.validate_json_schema()

    # Test get_results
    optic_task2.get_results()
Beispiel #28
0
def make_base_flow(options):
    multi = abilab.MultiDataset(structure=data.structure_from_ucell("GaAs"),
                                pseudos=data.pseudos("31ga.pspnc",
                                                     "33as.pspnc"),
                                ndtset=5)

    # Global variables
    kmesh = dict(ngkpt=[4, 4, 4],
                 nshiftk=4,
                 shiftk=[[0.5, 0.5, 0.5], [0.5, 0.0, 0.0], [0.0, 0.5, 0.0],
                         [0.0, 0.0, 0.5]])

    paral_kgb = 1
    global_vars = dict(ecut=2, paral_kgb=paral_kgb)
    global_vars.update(kmesh)

    multi.set_vars(global_vars)

    # Dataset 1 (GS run)
    multi[0].set_vars(
        tolvrs=1e-6,
        nband=4,
    )

    # NSCF run with large number of bands, and points in the the full BZ
    multi[1].set_vars(
        iscf=-2,
        nband=20,
        nstep=25,
        kptopt=1,
        tolwfr=1.e-9,
        #kptopt=3,
    )

    # Fourth dataset: ddk response function along axis 1
    # Fifth dataset: ddk response function along axis 2
    # Sixth dataset: ddk response function along axis 3
    for dir in range(3):
        rfdir = 3 * [0]
        rfdir[dir] = 1

        multi[2 + dir].set_vars(
            iscf=-3,
            nband=20,
            nstep=1,
            nline=0,
            prtwf=3,
            kptopt=3,
            nqpt=1,
            qpt=[0.0, 0.0, 0.0],
            rfdir=rfdir,
            rfelfd=2,
            tolwfr=1.e-9,
        )

    scf_inp, nscf_inp, ddk1, ddk2, ddk3 = multi.split_datasets()

    # Initialize the flow.
    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.manager)

    bands_work = abilab.BandStructureWork(scf_inp, nscf_inp)
    flow.register_work(bands_work)
    flow.exclude_from_benchmark(bands_work)

    ddk_work = abilab.Work()
    for inp in [ddk1, ddk2, ddk3]:
        ddk_work.register_ddk_task(inp, deps={bands_work.nscf_task: "WFK"})

    flow.register_work(ddk_work)
    flow.exclude_from_benchmark(ddk_work)

    return flow
Beispiel #29
0
def raman_work(structure, pseudos, ngkpt, shiftk, ddk_manager, shell_manager):
    # Generate 3 different input files for computing optical properties with BSE.

    multi = abilab.MultiDataset(structure, pseudos=pseudos, ndtset=5)
    multi.set_vars(global_vars)
    multi.set_kmesh(ngkpt=ngkpt, shiftk=shiftk)

    # GS run
    multi[0].set_vars(
        tolvrs=1e+8,
        nband=59,
    )

    # NSCF run
    multi[1].set_vars(
        iscf=-2,
        nband=100,
        kptopt=1,
        tolwfr=1.e+12,
    )

    # DDK along 3 directions
    # Third dataset : ddk response function along axis 1
    # Fourth dataset : ddk response function along axis 2
    # Fifth dataset : ddk response function along axis 3
    for dir in range(3):
        rfdir = 3 * [0]
        rfdir[dir] = 1

        multi[2 + dir].set_vars(
            iscf=-3,
            nband=100,
            nstep=1,
            nline=0,
            prtwf=3,
            kptopt=1,
            nqpt=1,
            qpt=[0.0, 0.0, 0.0],
            rfdir=rfdir,
            rfelfd=2,
            tolwfr=1.e+12,
        )

    scf_inp, nscf_inp, ddk1, ddk2, ddk3 = multi.split_datasets()
    ddk_inputs = [ddk1, ddk2, ddk3]

    work = abilab.Work()
    scf_t = work.register_scf_task(scf_inp)
    nscf_t = work.register_nscf_task(nscf_inp, deps={scf_t: "DEN"})

    ddk_nodes = []
    for inp in ddk_inputs:
        ddk_t = work.register_ddk_task(inp, deps={nscf_t: "WFK"})
        ddk_t.set_manager(ddk_manager)
        ddk_nodes.append(ddk_t)

    optic_t = abilab.OpticTask(optic_input,
                               nscf_node=nscf_t,
                               ddk_nodes=ddk_nodes,
                               manager=shell_manager)

    work.register(optic_t)

    return work