Exemplo n.º 1
0
def build_flow(options):
    """
    Build an `AbinitWorkflow` used for benchmarking ABINIT.
    """
    gs_inp, bse_inp = make_inputs(paw=options.paw)
    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    gs_work = flowtk.Work()
    gs_work.register_scf_task(gs_inp)
    flow.register_work(gs_work)
    flow.exclude_from_benchmark(gs_work)

    mpi_list = options.mpi_list

    if options.mpi_list is None:
        nkpt = len(gs_inp.abiget_ibz().points)
        ntrans = (2 * 2 * nkpt)**2
        mpi_list = [p for p in range(1, 1 + ntrans) if ntrans % p == 0]
    if options.verbose: print("Using mpi_list:", mpi_list)

    bse_work = flowtk.Work()
    for mpi_procs, omp_threads in product(mpi_list, options.omp_list):
        if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        bse_work.register_bse_task(bse_inp,
                                   manager=manager,
                                   deps={gs_work[0]: "WFK"})
    flow.register_work(bse_work)

    return flow.allocate()
Exemplo n.º 2
0
def build_flow(options):
    gs_inp, ph_inp = make_inputs()

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove)
    gs_work = flowtk.Work()
    gs_work.register_scf_task(gs_inp)
    flow.register_work(gs_work)
    flow.exclude_from_benchmark(gs_work)

    # Get the list of possible parallel configurations from abinit autoparal.
    max_ncpus, min_eff = options.max_ncpus, options.min_eff
    print("Getting all autoparal confs up to max_ncpus:", max_ncpus, "with efficiency >=", min_eff)

    pconfs = ph_inp.abiget_autoparal_pconfs(max_ncpus, autoparal=1)
    if options.verbose: print(pconfs)

    omp_threads = 1
    work = flowtk.Work()
    for conf in pconfs:
        mpi_procs = conf.mpi_ncpus
        if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
        if min_eff is not None and conf.efficiency < min_eff: continue

        if options.verbose: print(conf)
        manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
        inp = ph_inp.new_with_vars(conf.vars)
        work.register_phonon_task(inp, manager=manager, deps={gs_work[0]: "WFK"})

    print("Found %d configurations" % len(work))
    flow.register_work(work)

    return flow.allocate()
Exemplo n.º 3
0
def build_flow(options):
    flow, eph_inp = make_flow_ephinp(options)

    mpi_list = options.mpi_list
    if mpi_list is None:
        nkpt = len(eph_inp.abiget_ibz().points)
        nks = nkpt * eph_inp["nsppol"]
        mpi_list = [p for p in range(1, nks + 1) if nks % p == 0]
        if options.verbose: print("Using mpi_list:", mpi_list)
    else:
        print("Using mpi_list from cmd line:", mpi_list)

    eph_work = flowtk.Work()
    for mpi_procs, omp_threads in product(mpi_list, options.omp_list):
        if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        eph_work.register_eph_task(eph_inp,
                                   manager=manager,
                                   deps={
                                       flow[0][0]: "WFK",
                                       flow[1]: ["DDB", "DVDB"]
                                   })

    flow.register_work(eph_work)
    return flow.allocate()
Exemplo n.º 4
0
def build_flow(options):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    if not options.workdir:
        options.workdir = os.path.basename(__file__).replace(
            ".py", "").replace("run_", "flow_")

    flow = flowtk.Flow(workdir=options.workdir, manager=options.manager)

    for nspinor in (1, 2):
        #for nspinor in (2,):
        # Get our templates
        scf_inp, bands_inp, nscf_inp, scr_inp, sig_inp = make_inputs(nspinor)

        # Band structure work to produce the WFK file
        bands_work = flowtk.BandStructureWork(scf_inp,
                                              bands_inp,
                                              dos_inputs=[nscf_inp])
        flow.register_work(bands_work)

        # Build a work made of two SCR runs with different value of nband
        gw_work = flowtk.Work()
        scr_task = gw_work.register_scr_task(scr_inp,
                                             deps={bands_work[2]: "WFK"})
        gw_work.register_sigma_task(sig_inp,
                                    deps={
                                        bands_work[2]: "WFK",
                                        scr_task: "SCR"
                                    })
        flow.register_work(gw_work)

    return flow
Exemplo n.º 5
0
def build_flow(options):
    template = make_input()

    # Get the list of possible parallel configurations from abinit autoparal.
    #max_ncpus, min_eff = options.max_ncpus, options.min_eff
    #print("Getting all autoparal configurations up to max_ncpus: ",max_ncpus," with efficiency >= ",min_eff)
    #pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose)
    #if options.verbose: print(pconfs)

    # Processor distribution.
    pconfs = [
      dict(npkpt=64, npband=1, npfft=2), # 128
      dict(npkpt=64, npband=2, npfft=2), # 256
      dict(npkpt=64, npband=2, npfft=4), # 512
      dict(npkpt=64, npband=4, npfft=4), # 1024
    ]

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove)

    for wfoptalg in [None, 1]:
        work = flowtk.Work()
        for conf, omp_threads in product(pconfs, options.omp_list):
            #if not options.accept_conf(conf, omp_threads): continue
            mpi_procs = omp_threads * reduce(operator.mul, conf.values(), 1)

            manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
            inp = template.new_with_vars(conf, wfoptalg=wfoptalg)
            work.register_scf_task(inp, manager=manager)

        print("Found %d configurations" % len(work))
        flow.register_work(work)

    return flow.allocate()
Exemplo n.º 6
0
def build_flow(options):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    if not options.workdir:
        options.workdir = os.path.basename(__file__).replace(".py", "").replace("run_", "flow_")

    structure = abidata.structure_from_ucell("MgB2")

    # Get pseudos from a table.
    table = abilab.PseudoTable(abidata.pseudos("12mg.pspnc", "5b.pspnc"))
    pseudos = table.get_pseudos_for_structure(structure)

    nval = structure.num_valence_electrons(pseudos)
    #print(nval)

    flow = flowtk.Flow(workdir=options.workdir)

    scf_work = flowtk.Work()
    ngkpt_list = [[4, 4, 4], [8, 8, 8], [12, 12, 12]]
    tsmear_list = [0.01, 0.02, 0.04]
    for ngkpt in ngkpt_list:
        for tsmear in tsmear_list:
            scf_input = make_scf_input(structure, ngkpt, tsmear, pseudos)
            scf_work.register_scf_task(scf_input)
    flow.register_work(scf_work)

    # This call uses the information reported in the GS task to
    # compute all the independent atomic perturbations corresponding to a [4, 4, 4] q-mesh.
    for scf_task in scf_work:
        ph_work = flowtk.PhononWork.from_scf_task(scf_task, qpoints=[4, 4, 4], is_ngqpt=True)
        flow.register_work(ph_work)

    return flow.allocate(use_smartio=True)
Exemplo n.º 7
0
def build_flow(options):
    fftalg_list = [312, 402, 401]
    ecut_list = list(range(200, 610, 100))
    ecut_list = [
        400,
    ]

    if options.mpi_list is None: mpi_list = [2, 4, 6, 8]
    if options.verbose: print("Using mpi_list:", mpi_list)

    template = make_input()
    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    omp_threads = 1
    for fftalg in fftalg_list:
        work = flowtk.Work()
        for npfft in mpi_list:
            if not options.accept_mpi_omp(npfft, omp_threads): continue
            manager = options.manager.new_with_fixed_mpi_omp(
                npfft, omp_threads)
            for inp in abilab.input_gen(template,
                                        fftalg=fftalg,
                                        npfft=npfft,
                                        ecut=ecut_list):
                work.register_scf_task(inp, manager=manager)
        flow.register_work(work)

    return flow.allocate()
Exemplo n.º 8
0
def itest_dilatmx_error_handler(fwp, tvars):
    """
    Test cell relaxation with automatic restart in the presence of dilatmx error.
    """
    # Build the flow
    flow = flowtk.Flow(fwp.workdir, manager=fwp.manager)

    # Decrease the volume to trigger DilatmxError
    ion_input, ioncell_input = make_ion_ioncell_inputs(tvars,
                                                       dilatmx=1.01,
                                                       scalevol=0.8)

    work = flowtk.Work()
    work.register_relax_task(ioncell_input)

    flow.register_work(work)
    flow.allocate()
    assert flow.make_scheduler().start() == 0
    flow.show_status()

    assert all(work.finalized for work in flow)
    assert flow.all_ok

    # t0 should have reached S_OK, and we should have DilatmxError in the corrections.
    t0 = work[0]
    assert t0.status == t0.S_OK
    print(t0.corrections)
    assert t0.num_corrections > 0
    assert t0.corrections[0]["event"]["@class"] == "DilatmxError"
Exemplo n.º 9
0
def build_flow(options):
    template = make_input()

    # Get the list of possible parallel configurations from abinit autoparal.
    max_ncpus, min_eff = options.max_ncpus, options.min_eff
    print("Getting all autoparal confs up to max_ncpus: ", max_ncpus,
          " with efficiency >= ", min_eff)

    pconfs = template.abiget_autoparal_pconfs(max_ncpus,
                                              autoparal=1,
                                              verbose=options.verbose)
    if options.verbose: print(pconfs)

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    omp_threads = 1
    for iomode in [1, 3]:  # [MPI-IO, Netcdf]
        work = flowtk.Work()
        for conf in pconfs:
            mpi_procs = conf.mpi_ncpus
            omp_threads = conf.omp_ncpus
            if not options.accept_conf(conf, omp_threads): continue

            # Two GS-SCF tasks. The first one produces the WKF, the second one reads it.
            manager = options.manager.new_with_fixed_mpi_omp(
                mpi_procs, omp_threads)
            inp = template.new_with_vars(conf.vars, iomode=iomode)
            task0 = work.register_scf_task(inp, manager=manager)
            work.register_scf_task(inp, manager=manager, deps={task0: "WFK"})

        print("Found %d configurations" % len(work))
        flow.register_work(work)

    return flow.allocate()
Exemplo n.º 10
0
def build_flow(options):
    gs_inp, nscf_inp, ddk_inp = make_inputs()

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    ebands_work = flowtk.BandStructureWork(gs_inp, nscf_inp)
    flow.register_work(ebands_work)
    flow.exclude_from_benchmark(ebands_work)

    # Get the list of possible parallel configurations from abinit autoparal.
    max_ncpus, min_eff = options.max_ncpus, options.min_eff
    print("Getting all autoparal confs up to max_ncpus: ", max_ncpus,
          " with efficiency >= ", min_eff)

    pconfs = ddk_inp.abiget_autoparal_pconfs(max_ncpus, autoparal=1)
    if options.verbose: print(pconfs)

    work = flowtk.Work()
    for conf, omp_threads in product(pconfs, options.omp_list):
        mpi_procs = conf.mpi_ncpus
        if not options.accept_conf(conf, omp_threads): continue

        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        inp = ddk_inp.new_with_vars(conf.vars)
        work.register_ddk_task(inp,
                               manager=manager,
                               deps={ebands_work[1]: "WFK"})

    print("Found %d configurations" % len(work))
    flow.register_work(work)

    return flow.allocate()
Exemplo n.º 11
0
def build_flow(options):
    template = make_input()
    #template.abivalidate()

    # Get the list of possible parallel configurations from abinit autoparal.
    max_ncpus, min_eff = options.max_ncpus, options.min_eff
    if max_ncpus is None:
        nkpt = len(template.abiget_ibz().points)
        max_ncpus = nkpt * template["nsppol"] * template["nband"] * 4
    print("Getting all autoparal confs up to max_ncpus:", max_ncpus,
          "with efficiency >=", min_eff)

    pconfs = template.abiget_autoparal_pconfs(max_ncpus,
                                              autoparal=1,
                                              verbose=options.verbose)
    if options.verbose: print(pconfs)

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    work = flowtk.Work()
    for conf, omp_threads in product(pconfs, options.omp_list):
        mpi_procs = conf.mpi_ncpus
        if not options.accept_conf(conf, omp_threads): continue

        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        inp = template.new_with_vars(conf.vars)
        work.register_scf_task(inp, manager=manager)

    print("Found %d configurations" % len(work))
    flow.register_work(work)

    return flow.allocate()
Exemplo n.º 12
0
def build_flow(options):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    if not options.workdir:
        options.workdir = os.path.basename(sys.argv[0]).replace(
            ".py", "").replace("run_", "flow_")

    # Init structure from internal database.
    structure = abidata.structure_from_ucell("MgB2")

    # Our pseudopotentials.
    pseudos = abilab.PseudoTable(["Mg-low.psp8", "B.psp8"])

    flow = flowtk.Flow(workdir=options.workdir)

    # Build work of GS task. Each gs_task uses different (ngkpt, tsmear) values
    # and represent the starting point of the phonon works.
    ngkpt = [12, 12, 12]
    tsmear = 0.02
    scf_input, nscf_input = make_scf_nscf_inputs(structure, ngkpt, tsmear,
                                                 pseudos)

    gs_work = flowtk.Work()
    scf_task = gs_work.register_scf_task(scf_input)
    nscf_task = gs_work.register_nscf_task(nscf_input, deps={scf_task: "DEN"})
    flow.register_work(gs_work)

    # This call uses the information reported in the GS task to
    # compute all the independent atomic perturbations corresponding to a [6, 6, 6] q-mesh.
    ph_work = flowtk.PhononWork.from_scf_task(scf_task,
                                              qpoints=[4, 4, 4],
                                              is_ngqpt=True)
    flow.register_work(ph_work)

    return flow.allocate(use_smartio=True)
Exemplo n.º 13
0
def build_flow(options):
    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    template = make_input()

    # Processor distribution.
    pconfs = [
        dict(npkpt=2, npband=8, npfft=8),  # 128 processeurs
        dict(npkpt=2, npband=16, npfft=8),  # 256 processeurs
        dict(npkpt=2, npband=16, npfft=16),  # 512 processeurs
        dict(npkpt=2, npband=16, npfft=32),  # 1024 processeurs
    ]

    for wfoptalg in [None, 1]:
        work = flowtk.Work()
        for d, omp_threads in product(pconfs, options.omp_list):
            mpi_procs = reduce(operator.mul, d.values(), 1)
            if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
            manager = options.manager.new_with_fixed_mpi_omp(
                mpi_procs, omp_threads)
            if options.verbose:
                print("wfoptalg:", wfoptalg, "done with MPI_PROCS:", mpi_procs,
                      "and:", d)
            inp = template.new_with_vars(d, np_slk=64)
            work.register_scf_task(inp, manager=manager)

        flow.register_work(work)

    return flow.allocate()
Exemplo n.º 14
0
def build_flow(options):
    template = make_input()

    # Get the list of possible parallel configurations from abinit autoparal.
    max_ncpus, min_eff = options.max_ncpus, options.min_eff
    print("Getting all autoparal confs up to max_ncpus: ",max_ncpus," with efficiency >= ",min_eff)

    pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose)
    if options.verbose: print(pconfs)

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove)

    omp_threads = 1
    for istwfk in [1, 2]:
        work = flowtk.Work()
        for conf in pconfs:
            mpi_procs = conf.mpi_ncpus
            if not options.accept_conf(conf, omp_threads): continue

            manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
            inp = template.new_with_vars(conf.vars, istwfk=istwfk)
            work.register_scf_task(inp, manager=manager)

        print("Found %d configurations" % len(work))
        flow.register_work(work)

    return flow.allocate()
Exemplo n.º 15
0
def build_flow(options):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    if not options.workdir:
        if os.getenv("READTHEDOCS", False):
            __file__ = os.path.join(os.getcwd(), "run_relax.py")
        options.workdir = os.path.basename(__file__).replace(
            ".py", "").replace("run_", "flow_")

    # Create the flow
    flow = flowtk.Flow(options.workdir, manager=options.manager)

    # Create a relaxation work and add it to the flow.
    ion_inp, ioncell_inp = make_ion_ioncell_inputs()

    relax_work = flowtk.RelaxWork(ion_inp, ioncell_inp)
    flow.register_work(relax_work)

    #bands_work = flowtk.BandStructureWork(scf_input, nscf_input)
    bands_work = flowtk.Work()
    deps = {relax_work[-1]: "@structure"}
    deps = {
        relax_work[-1]: ["DEN", "@structure"]
    }  # --> This is not possible because the file ext is changed!
    #deps = {relax_work[-1]: ["WFK", "@structure"]} # --> This triggers an infamous bug in abinit

    bands_work.register_relax_task(ioncell_inp, deps=deps)
    flow.register_work(bands_work)

    return flow
def build_flow(options):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    if not options.workdir:
        options.workdir = os.path.basename(sys.argv[0]).replace(
            ".py", "").replace("run_", "flow_")

    structure = abidata.structure_from_ucell("MgB2")

    # Get pseudos from a table.
    table = abilab.PseudoTable(abidata.pseudos("12mg.pspnc", "5b.pspnc"))
    pseudos = table.get_pseudos_for_structure(structure)

    flow = flowtk.Flow(workdir=options.workdir)

    # Build work of GS task. Each gs_task uses different (ngkpt, tsmear) values
    # and represent the starting point of the phonon works.
    scf_work = flowtk.Work()
    ngkpt_list = [[4, 4, 4], [8, 8, 8]]  #, [12, 12, 12]]
    tsmear_list = [0.01, 0.02]  # , 0.04]
    for ngkpt in ngkpt_list:
        for tsmear in tsmear_list:
            scf_input = make_scf_input(structure, ngkpt, tsmear, pseudos)
            scf_work.register_scf_task(scf_input)
    flow.register_work(scf_work)

    # This call uses the information reported in the GS task to
    # compute all the independent atomic perturbations corresponding to a [2, 2, 2] q-mesh.
    # For each GS task, construct a phonon work that will inherit (ngkpt, tsmear) from scf_task.
    for scf_task in scf_work:
        ph_work = flowtk.PhononWork.from_scf_task(scf_task,
                                                  qpoints=[2, 2, 2],
                                                  is_ngqpt=True)
        flow.register_work(ph_work)

    return flow.allocate(use_smartio=True)
Exemplo n.º 17
0
def build_flow(options):
    """
    Build an `AbinitWorkflow` used for benchmarking ABINIT.
    """
    gs_inp, nscf_inp, scr_inp = make_inputs(paw=options.paw)
    flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove)

    bands = flowtk.BandStructureWork(gs_inp, nscf_inp)
    flow.register_work(bands)
    flow.exclude_from_benchmark(bands)

    #for nband in [200, 400, 600]:
    for nband in [600]:
        scr_work = flowtk.Work()
        inp = scr_inp.new_with_vars(nband=nband)
        mpi_list = options.mpi_list
        if mpi_list is None:
            # Cannot call autoparal here because we need a WFK file.
            print("Using hard coded values for mpi_list")
            mpi_list = [np for np in range(1, nband+1) if abs((nband - 4) % np) < 1]
        if options.verbose: print("Using nband %d and mpi_list: %s" % (nband, mpi_list))

        for mpi_procs, omp_threads in product(mpi_list, options.omp_list):
            if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
            manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
            scr_work.register_scr_task(inp, manager=manager, deps={bands.nscf_task: "WFK"})

        flow.register_work(scr_work)

    return flow.allocate()
Exemplo n.º 18
0
def build_g0w0_flow(options=None, ngkpt=(2, 2, 2)):
    """
    Build and return a flow with two works.
    The first work is a standard KS band-structure calculation that consists of
    an initial GS calculation to get the density followed by two NSCF calculations.

    The first NSCF task computes the KS eigenvalues on a high-symmetry path in the BZ,
    whereas the second NSCF task employs a homogeneous k-mesh so that one can compute
    the DOS from the KS eigenvalues.

    The second work represents the real GW workflow that uses the density computed in the first task of
    the previous work  to compute the KS bands for many empty states.
    The WFK file produced in this step is then used to compute the screened interaction $W$.
    Finally, we perform a self-energy calculation that uses the $W$ produced
    in the previous step and the WFK file to compute the matrix elements of the self-energy and
    the $G_0W_0$ corrections for all the k-points in the IBZ and 8 bands (4 occupied + 4 empty)
    """

    # Call make_input to build our 4 input objects.
    scf, bands_nscf, dos_nscf, gw_nscf, scr, sig = make_inputs(ngkpt=ngkpt)

    workdir = options.workdir if (options and options.workdir) else "flow_g0w0"
    flow = flowtk.Flow(workdir=workdir)

    # Add KS band structure work (SCF-GS followed by two NSCF runs
    # (the first one is done on a k-path, the second on the IBZ to compute the DOS
    work0 = flowtk.BandStructureWork(scf, bands_nscf, dos_inputs=dos_nscf)
    flow.register_work(work0)

    # Create new Work for GW
    work1 = flowtk.Work()

    # NSCF run with empty states
    gw_nscf_task = work1.register_nscf_task(gw_nscf, deps={work0[0]: "DEN"})

    # SCR run with WFK produced by previous task.
    scr_task = work1.register_scr_task(scr, deps={gw_nscf_task: "WFK"})

    # SIGMA task (requires WFK with empty states and SCR file)
    sigma_task = work1.register_sigma_task(sig,
                                           deps={
                                               gw_nscf_task: "WFK",
                                               scr_task: "SCR"
                                           })

    # Add GW work to flow.
    flow.register_work(work1)

    return flow
Exemplo n.º 19
0
def itest_flow_with_deadlocks(fwp):
    """
    Test the behaviour of the scheduler in the presence of a deadlock
    when we ignore errored tasks and we try to run all tasks in the flow.
    The scheduler should detect the deadlock and exit when no other task can be executed.
    """
    # Get the SCF and the NSCF input.
    scf_input, nscf_input = make_scf_nscf_inputs()

    # Build the flow.
    flow = flowtk.Flow(fwp.workdir, manager=fwp.manager)
    work0 = flowtk.BandStructureWork(scf_input,
                                     nscf_input,
                                     dos_inputs=nscf_input)
    flow.register_work(work0)
    scf_task, nscf_task, dos_task = work0[0], work0[1], work0[2]

    work1 = flowtk.Work()
    work1.register_nscf_task(nscf_input,
                             deps={
                                 scf_task: "DEN",
                                 dos_task: "WFK"
                             })
    # This task will deadlock when nscf_task reaches S_ERROR.
    work1.register_nscf_task(nscf_input,
                             deps={
                                 scf_task: "DEN",
                                 nscf_task: "WFK"
                             })
    flow.register_work(work1)

    flow.allocate()

    # Mock an Errored nscf_task. This will cause a deadlock in the flow.
    nscf_task = mocks.change_task_start(nscf_task)

    # Here we set max_num_abierrs to a very large number.
    sched = flow.make_scheduler()
    sched.max_num_abierrs = 10000
    assert sched.start() == 0
    flow.check_status(show=True)

    assert not flow.all_ok
    assert all(task.status == task.S_OK
               for task in [scf_task, dos_task, work1[0]])
    assert all(task.status == task.S_ERROR for task in [nscf_task])
    g = flow.find_deadlocks()
    assert g.deadlocked and not g.runnables and not g.running
    assert work1[1] in g.deadlocked
Exemplo n.º 20
0
def build_flow(options):
    inp = make_input(paw=options.paw)
    nkpt = len(inp.abiget_ibz().points)

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove)
    work = flowtk.Work()

    omp_list = options.omp_list
    if omp_list is None: omp_list = [1, 2, 4, 6]
    print("Using omp_list:", omp_list)

    mpi_procs = 1
    for omp_threads in omp_list:
        manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
        work.register(inp, manager=manager)

    flow.register_work(work)
    return flow.allocate()
Exemplo n.º 21
0
    def test_infinite_flow(self):
        si_structure = abidata.structure_from_cif("si.cif")
        gsinp = gs_input(si_structure,
                         pseudos=abidata.pseudos("14si.pspnc"),
                         ecut=4)

        flow = flowtk.Flow.temporary_flow()
        work = flowtk.Work()
        gstask = work.register_scf_task(gsinp)
        flow.register_work(work)
        flow.allocate()

        mocks.infinite_flow(flow)
        flow.check_status()
        assert (t.status == flow.S_INIT for t in flow)

        mocks.change_task_start(gstask, mocked_status="Error")
        assert gstask.start() == 1 and gstask.status == gstask.S_ERROR
Exemplo n.º 22
0
def build_flow(options):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    if not options.workdir:
        if os.getenv("READTHEDOCS", False):
            __file__ = os.path.join(os.getcwd(), "run_gwconv_ecuteps.py")
        options.workdir = os.path.basename(__file__).replace(
            ".py", "").replace("run_", "flow_")

    # Get our templates
    scf_inp, nscf_inp, scr_inp, sig_inp = make_inputs()

    ecuteps_list = np.arange(2, 8, 2)
    max_ecuteps = max(ecuteps_list)

    flow = flowtk.Flow(workdir=options.workdir, manager=options.manager)

    # Band structure work to produce the WFK file
    bands = flowtk.BandStructureWork(scf_inp, nscf_inp)
    flow.register_work(bands)

    # Build a work made of two SCR runs with different value of nband
    # Use max_ecuteps for the dielectric matrix (sigma tasks will
    # read a submatrix when we test the convergence wrt to ecuteps.
    scr_work = flowtk.Work()

    for inp in scr_inp.generate(nband=[10, 15]):
        inp.set_vars(ecuteps=max_ecuteps)
        scr_work.register_scr_task(inp, deps={bands.nscf_task: "WFK"})

    flow.register_work(scr_work)

    # Do a convergence study wrt ecuteps, each work is connected to a
    # different SCR file computed with a different value of nband.

    # Build a list of sigma inputs with different ecuteps
    sigma_inputs = list(sig_inp.generate(ecuteps=ecuteps_list))

    for scr_task in scr_work:
        sigma_conv = flowtk.SigmaConvWork(wfk_node=bands.nscf_task,
                                          scr_node=scr_task,
                                          sigma_inputs=sigma_inputs)
        flow.register_work(sigma_conv)

    return flow
Exemplo n.º 23
0
def make_g0w0_scissors_flow(workdir="flow_lesson_g0w0", ngkpt=(2, 2, 2)):
    # Change the value of ngkpt below to perform a GW calculation with a different k-mesh.
    scf, bands_nscf, dos_nscf, gw_nscf, scr, sig = make_inputs(ngkpt=ngkpt)

    flow = flowtk.Flow(workdir=workdir)
    work0 = flowtk.BandStructureWork(scf, bands_nscf, dos_inputs=dos_nscf)
    flow.register_work(work0)

    work1 = flowtk.Work()
    gw_nscf_task = work1.register_nscf_task(gw_nscf, deps={work0[0]: "DEN"})
    scr_task = work1.register_scr_task(scr, deps={gw_nscf_task: "WFK"})
    sigma_task = work1.register_sigma_task(sig,
                                           deps={
                                               gw_nscf_task: "WFK",
                                               scr_task: "SCR"
                                           })
    flow.register_work(work1)

    return flow.allocate()
Exemplo n.º 24
0
def build_flow(options):
    # Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
    if not options.workdir:
        options.workdir = os.path.basename(__file__).replace(".py", "").replace("run_", "flow_")

    # Get the SCF and the NSCF input.
    scf_input, nscf_input = make_scf_nscf_inputs()

    # Build the flow.
    flow = flowtk.Flow(options.workdir, manager=options.manager)

    # Create a Work, all tasks in work will start from the DEN file.
    # Note that the file must exist when the work is created
    # Use the standard approach based on tasks and works if
    # there's a node who needs a file produced in the future.
    work = flowtk.Work()
    den_filepath = abidata.ref_file("si_DEN.nc")
    work.register_nscf_task(nscf_input, deps={den_filepath: "DEN"})
    flow.register_work(work)

    return flow
Exemplo n.º 25
0
def build_flow(options):
    gs_inp, nscf_inp = make_inputs(options)

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    mpi_list = options.mpi_list
    if mpi_list is None:
        # Get the list of possible parallel configurations from abinit autoparal.
        max_ncpus, min_eff = options.max_ncpus, options.min_eff
        print("Getting all autoparal confs up to max_ncpus:", max_ncpus,
              "with efficiency >=", min_eff)

        pconfs = gs_inp.abiget_autoparal_pconfs(max_ncpus, autoparal=1)

    else:
        print("Initializing autoparal from command line options")
        pconfs = ParalHints.from_mpi_omp_lists(mpi_list, options.omp_list)
        if options.verbose: print(pconfs)

    work = flowtk.Work()
    for conf, omp_threads in product(pconfs, options.omp_list):
        mpi_procs = conf.mpi_ncpus
        #if not options.accept_mpi_omp(mpi_procs,omp_threads): continue
        if not options.accept_conf(conf, omp_threads): continue

        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        inp = gs_inp.new_with_vars(conf.vars)
        scf_task = work.register_scf_task(inp, manager=manager)

        inp2 = nscf_inp.new_with_vars(conf.vars)
        work.register_nscf_task(inp2, manager=manager, deps={scf_task: "DEN"})

    print("Found %d configurations" % len(work))
    flow.register_work(work)

    return flow.allocate()
Exemplo n.º 26
0
def build_flow(options):
    inp = make_input()

    mpi_list = options.mpi_list
    if mpi_list is None:
        nkpt = len(inp.abiget_ibz().points)
        nks = nkpt * inp["nsppol"]
        mpi_list = [p for p in range(1, nks + 1) if nks % p == 0]
    if options.verbose: print("Using mpi_list:", mpi_list)

    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.remove)

    for useylm in [0, 1]:
        work = flowtk.Work()
        for mpi_procs, omp_threads in product(mpi_list, options.omp_list):
            if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
            manager = options.manager.new_with_fixed_mpi_omp(
                mpi_procs, omp_threads)
            work.register_scf_task(inp.new_with_vars(useylm=useylm),
                                   manager=manager)
        flow.register_work(work)

    return flow.allocate()
Exemplo n.º 27
0
def build_flow(options):
    flow = make_base_flow(options)

    optic_input = abilab.OpticInput(
        broadening=0.002,
        domega=0.0003,
        maxomega=0.3,
        scissor=0.000,
        tolerance=0.002,
        num_lin_comp=1,
        lin_comp=11,
        num_nonlin_comp=2,
        nonlin_comp=(123, 222),
    )

    mpi_list = options.mpi_list
    if mpi_list is None:
        mpi_list = [1, 2, 4, 8]
        print("Using mpi_list:", mpi_list)
    else:
        print("Using mpi_list from cmd line:", mpi_list)

    work = flowtk.Work()
    for mpi_procs, omp_threads in product(mpi_list, options.omp_list):
        if not options.accept_mpi_omp(mpi_procs, omp_threads): continue
        manager = options.manager.new_with_fixed_mpi_omp(
            mpi_procs, omp_threads)
        optic_task = flowtk.OpticTask(optic_input,
                                      manager=manager,
                                      nscf_node=flow[0].nscf_task,
                                      ddk_nodes=flow[1])
        work.register_task(optic_task)

    flow.register_work(work)

    return flow.allocate()
Exemplo n.º 28
0
def make_base_flow(options):
    multi = abilab.MultiDataset(structure=data.structure_from_ucell("GaAs"),
                                pseudos=data.pseudos("31ga.pspnc",
                                                     "33as.pspnc"),
                                ndtset=5)

    # Global variables
    kmesh = dict(ngkpt=[4, 4, 4],
                 nshiftk=4,
                 shiftk=[[0.5, 0.5, 0.5], [0.5, 0.0, 0.0], [0.0, 0.5, 0.0],
                         [0.0, 0.0, 0.5]])

    paral_kgb = 1
    global_vars = dict(ecut=2, paral_kgb=paral_kgb)
    global_vars.update(kmesh)

    multi.set_vars(global_vars)

    # Dataset 1 (GS run)
    multi[0].set_vars(
        tolvrs=1e-6,
        nband=4,
    )

    # NSCF run with large number of bands, and points in the the full BZ
    multi[1].set_vars(
        iscf=-2,
        nband=20,
        nstep=25,
        kptopt=1,
        tolwfr=1.e-9,
        #kptopt=3,
    )

    # Fourth dataset: ddk response function along axis 1
    # Fifth dataset: ddk response function along axis 2
    # Sixth dataset: ddk response function along axis 3
    for dir in range(3):
        rfdir = 3 * [0]
        rfdir[dir] = 1

        multi[2 + dir].set_vars(
            iscf=-3,
            nband=20,
            nstep=1,
            nline=0,
            prtwf=3,
            kptopt=3,
            nqpt=1,
            qpt=[0.0, 0.0, 0.0],
            rfdir=rfdir,
            rfelfd=2,
            tolwfr=1.e-9,
        )

    scf_inp, nscf_inp, ddk1, ddk2, ddk3 = multi.split_datasets()

    # Initialize the flow.
    flow = BenchmarkFlow(workdir=options.get_workdir(__file__),
                         remove=options.manager)

    bands_work = flowtk.BandStructureWork(scf_inp, nscf_inp)
    flow.register_work(bands_work)
    flow.exclude_from_benchmark(bands_work)

    ddk_work = flowtk.Work()
    for inp in [ddk1, ddk2, ddk3]:
        ddk_work.register_ddk_task(inp, deps={bands_work.nscf_task: "WFK"})

    flow.register_work(ddk_work)
    flow.exclude_from_benchmark(ddk_work)

    return flow
Exemplo n.º 29
0
def build_flow(options):
    """
    Build and return an AbiPy flow to compute phonon linewidths and Eliashberg function in Aluminium:

        1. Compute DFPT phonons on a 4x4x4 q-mesh with a coarse 8x8x8 k-sampling

        2. Generate 3 WFK files on a much denser k-mesh (x16, x24, x32)

        3. Run the EPH code with:

          - one of the WFK files generated in point 2.
          - interpolated DFPT potentials (from the initial 4x4x4 to a 8x8x8 q-mesh)

        4. Analyze the convergence of the results wrt nkpt.

    Note that the q-point grid must be a sub-grid of the k-point grid
    """
    workdir = options.workdir if (options
                                  and options.workdir) else "flow_eph_al"

    # Create empty flow.
    flow = flowtk.Flow(workdir=workdir)

    # Init structure. Use NC pseudo
    structure = abilab.Structure.fcc(a=7.5, species=["Al"], units="bohr")
    pseudos = abidata.pseudos("Al.oncvpsp")

    # Input for GS part.
    gs_inp = abilab.AbinitInput(structure, pseudos)
    gs_inp.set_vars(
        istwfk="*1",
        ecut=8.0,
        nband=4,
        occopt=7,  # Include metallic occupation function with a small smearing
        tsmear=0.04,
        tolvrs=1e-7,
    )

    # The k-grid is minimalistic to keep the calculation manageable.
    gs_inp.set_kmesh(
        ngkpt=[8, 8, 8],
        shiftk=[0.0, 0.0, 0.0],
    )

    # Build new input for NSCF calculation along k-path (automatically selected by AbiPy)
    # Used to plot the KS band structure.
    nscf_kpath_inp = gs_inp.new_with_vars(
        nband=4,
        tolwfr=1e-16,
        iscf=-2,
    )
    nscf_kpath_inp.set_kpath(ndivsm=10)

    # Build NSCF inputs with denser k-meshes
    # This step generates the WFK files used to compute the Eliashberg function.
    # We have a cubic material so we only need to specify the first number of divisions.
    nk_list = [16, 24, 32]

    nscf_kmesh_inputs = []
    for nk in nk_list:
        new_inp = gs_inp.new_with_vars(
            tolwfr=1e-16,
            iscf=-2,
            ngkpt=[nk] * 3,
            shiftk=[0.0, 0.0, 0.0],
        )
        nscf_kmesh_inputs.append(new_inp)

    # Register GS + NSCF kpath + NSCF with k-meshes in work0.
    work0 = flowtk.BandStructureWork(gs_inp,
                                     nscf_kpath_inp,
                                     dos_inputs=nscf_kmesh_inputs)
    flow.register_work(work0)

    # Generate Phonon work with 4x4x4 q-mesh
    # Reuse the variables from GS input and let AbiPy handle the generation of the input files
    # Note that the q-point grid is a sub-grid of the k-mesh so we do not need WFQ on k+q mesh.
    ddb_ngqpt = [4, 4, 4]
    ph_work = flowtk.PhononWork.from_scf_task(work0[0],
                                              ddb_ngqpt,
                                              is_ngqpt=True)
    flow.register_work(ph_work)

    # Ssction for EPH calculation: compute linewidths with different WKK files.
    eph_work = flowtk.Work()
    for ik, nk in enumerate(nk_list):
        # Each task uses a different WFK file. DDB and DBDB do not change.
        eph_deps = {work0[2 + ik]: "WFK", ph_work: ["DDB", "DVDB"]}

        # Interpolate DFPT potentials 4x4x4 --> 8x8x8
        eph_ngqpt_fine = (8, 8, 8)

        # Build input for E-PH run. See also v7/Input/t85.in
        # The k-points must be in the WFK file
        eph_inp = gs_inp.new_with_vars(
            optdriver=7,  # Enter EPH driver.
            eph_task=1,  # Compute phonon linewidths in metals.
            ddb_ngqpt=
            ddb_ngqpt,  # q-mesh used to produce the DDB file (must be consistent with DDB data)
            eph_fsewin=
            "0.8 eV",  # Energy window around Ef (only states in this window are included)
            eph_intmeth=2,  # Tetra method
            #eph_intmeth=1,                 # Gaussian
            #eph_fsmear=eph_fsmear * abilab.units.eV_to_Ha, # Broadening
            eph_ngqpt_fine=
            eph_ngqpt_fine,  # Interpolate DFPT potentials if != ddb_ngqpt
            eph_mustar=0.12,  # mustar parameter
            ngkpt=[nk] * 3,
            shiftk=[0.0, 0.0, 0.0],
        )

        # Set q-path to interpolate phonons and phonon linewidths.
        eph_inp.set_qpath(10)

        # Set q-mesh for phonons DOS and a2F(w)
        eph_inp.set_phdos_qmesh(nqsmall=24, method="tetra")
        eph_work.register_eph_task(eph_inp, deps=eph_deps)

    flow.register_work(eph_work)

    # Avoid producing (big) output files that not required by children.
    flow.allocate(use_smartio=True)

    return flow
Exemplo n.º 30
0
def raman_work(structure, pseudos, ngkpt, shiftk):
    # Generate 3 different input files for computing optical properties with Optic.

    global_vars = dict(
        istwfk="*1",
        paral_kgb=0,
        ecut=8,
        nstep=200,
        diemac=12,
        ixc=7,
        chksymbreak=0,
        #accesswff=3
    )

    multi = abilab.MultiDataset(structure, pseudos=pseudos, ndtset=5)
    multi.set_vars(global_vars)
    multi.set_kmesh(ngkpt=ngkpt, shiftk=shiftk)

    # GS run
    multi[0].set_vars(
        tolvrs=1e-8,
        nband=20,
        nbdbuf=2,
    )

    # Note kptopt 2 in NSCF and DDK
    # In principle kptopt 2 is needed only in DDK.
    # one could do a first NSCF run with kptopt 1, reread with kptopt 2 and enter DDK...

    # NSCF run
    multi[1].set_vars(
        iscf=-2,
        nband=40,
        nbdbuf=5,
        kptopt=2,
        tolwfr=1.e-12,
    )

    # DDK along 3 directions
    # Third dataset : ddk response function along axis 1
    # Fourth dataset : ddk response function along axis 2
    # Fifth dataset : ddk response function along axis 3
    for idir in range(3):
        rfdir = 3 * [0]
        rfdir[idir] = 1

        multi[2 + idir].set_vars(
            iscf=-3,
            nband=40,
            nbdbuf=5,
            nstep=1,
            nline=0,
            prtwf=3,
            kptopt=2,
            nqpt=1,
            qpt=[0.0, 0.0, 0.0],
            rfdir=rfdir,
            rfelfd=2,
            tolwfr=1.e-12,
        )

    scf_inp, nscf_inp, ddk1, ddk2, ddk3 = multi.split_datasets()
    ddk_inputs = [ddk1, ddk2, ddk3]

    work = flowtk.Work()
    scf_t = work.register_scf_task(scf_inp)
    nscf_t = work.register_nscf_task(nscf_inp, deps={scf_t: "DEN"})

    ddk_nodes = []
    for inp in ddk_inputs:
        ddk_t = work.register_ddk_task(inp, deps={nscf_t: "WFK"})
        ddk_nodes.append(ddk_t)

    optic_input = abilab.OpticInput(
        broadening=0.002,  # Value of the smearing factor, in Hartree
        domega=0.0003,  # Frequency mesh.
        maxomega=0.3,
        scissor=0.000,  # Scissor shift if needed, in Hartree
        tolerance=0.002,  # Tolerance on closeness of singularities (in Hartree)
        num_lin_comp=
        6,  # Number of components of linear optic tensor to be computed
        lin_comp=(11, 12, 13, 22, 23,
                  33),  # Linear coefficients to be computed (x=1, y=2, z=3)
        num_nonlin_comp=
        0  # Number of components of nonlinear optic tensor to be computed
        #nonlin_comp=(123, 222),
    )

    optic_t = flowtk.OpticTask(optic_input,
                               nscf_node=nscf_t,
                               ddk_nodes=ddk_nodes)
    work.register(optic_t)

    return work