def build_flow(options): flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) template = make_input() # Processor distribution. pconfs = [ dict(npkpt=1, npband=13, npfft=10), # 130 dict(npkpt=1, npband=26, npfft=10), # 260 dict(npkpt=1, npband=65, npfft=8 ), # 520 dict(npkpt=1, npband=65, npfft=16), # 1040 ] for wfoptalg in [None, 1]: work = abilab.Work() for d, omp_threads in product(pconfs, options.omp_list): mpi_procs = reduce(operator.mul, d.values(), 1) if not options.accept_mpi_omp(mpi_procs, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) print("wfoptalg:", wfoptalg, "done with MPI_PROCS:", mpi_procs, "and:", d) inp = template.new_with_vars(d, wfoptalg=wfoptalg) work.register_scf_task(inp, manager=manager) flow.register_work(work) return flow.allocate()
def build_flow(options): template = make_input() # Get the list of possible parallel configurations from abinit autoparal. #max_ncpus, min_eff = options.max_ncpus, options.min_eff #print("Getting all autoparal configurations up to max_ncpus: ",max_ncpus," with efficiency >= ",min_eff) #pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose) #if options.verbose: print(pconfs) # Processor distribution. pconfs = [ dict(npkpt=64, npband=1, npfft=2), # 128 dict(npkpt=64, npband=2, npfft=2), # 256 dict(npkpt=64, npband=2, npfft=4), # 512 dict(npkpt=64, npband=4, npfft=4), # 1024 ] flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) for wfoptalg in [None, 1]: work = flowtk.Work() for conf, omp_threads in product(pconfs, options.omp_list): #if not options.accept_conf(conf, omp_threads): continue mpi_procs = omp_threads * reduce(operator.mul, conf.values(), 1) manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) inp = template.new_with_vars(conf, wfoptalg=wfoptalg) work.register_scf_task(inp, manager=manager) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): """ Build an `AbinitWorkflow` used for benchmarking ABINIT. """ gs_inp, nscf_inp, scr_inp = make_inputs(paw=options.paw) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) bands = flowtk.BandStructureWork(gs_inp, nscf_inp) flow.register_work(bands) flow.exclude_from_benchmark(bands) #for nband in [200, 400, 600]: for nband in [600]: scr_work = flowtk.Work() inp = scr_inp.new_with_vars(nband=nband) mpi_list = options.mpi_list if mpi_list is None: # Cannot call autoparal here because we need a WFK file. print("Using hard coded values for mpi_list") mpi_list = [np for np in range(1, nband+1) if abs((nband - 4) % np) < 1] if options.verbose: print("Using nband %d and mpi_list: %s" % (nband, mpi_list)) for mpi_procs, omp_threads in product(mpi_list, options.omp_list): if not options.accept_mpi_omp(mpi_procs, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) scr_work.register_scr_task(inp, manager=manager, deps={bands.nscf_task: "WFK"}) flow.register_work(scr_work) return flow.allocate()
def build_flow(options): """ Build an `AbinitWorkflow` used for benchmarking ABINIT. """ gs_inp, bse_inp = make_inputs(paw=options.paw) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) gs_work = flowtk.Work() gs_work.register_scf_task(gs_inp) flow.register_work(gs_work) flow.exclude_from_benchmark(gs_work) mpi_list = options.mpi_list if options.mpi_list is None: nkpt = len(gs_inp.abiget_ibz().points) ntrans = (2 * 2 * nkpt)**2 mpi_list = [p for p in range(1, 1 + ntrans) if ntrans % p == 0] if options.verbose: print("Using mpi_list:", mpi_list) bse_work = flowtk.Work() for mpi_procs, omp_threads in product(mpi_list, options.omp_list): if not options.accept_mpi_omp(mpi_procs, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp( mpi_procs, omp_threads) bse_work.register_bse_task(bse_inp, manager=manager, deps={gs_work[0]: "WFK"}) flow.register_work(bse_work) return flow.allocate()
def build_flow(options): template = make_input() # Get the list of possible parallel configurations from abinit autoparal. max_ncpus, min_eff = options.max_ncpus, options.min_eff print("Getting all autoparal confs up to max_ncpus: ", max_ncpus, " with efficiency >= ", min_eff) pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose) if options.verbose: print(pconfs) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) omp_threads = 1 for accesswff in [1, 3]: # [MPI-IO, Netcdf] work = abilab.Work() for conf in pconfs: mpi_procs = conf.mpi_ncpus omp_threads = conf.omp_ncpus if not options.accept_conf(conf, omp_threads): continue # Two GS-SCF tasks. The first one produces the WKF, the second one reads it. manager = options.manager.new_with_fixed_mpi_omp( mpi_procs, omp_threads) inp = template.new_with_vars(conf.vars, accesswff=accesswff) task0 = work.register_scf_task(inp, manager=manager) work.register_scf_task(inp, manager=manager, deps={task0: "WFK"}) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): template = make_input() # Get the list of possible parallel configurations from abinit autoparal. max_ncpus, min_eff = options.max_ncpus, options.min_eff print("Getting all autoparal confs up to max_ncpus: ",max_ncpus," with efficiency >= ",min_eff) pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose) if options.verbose: print(pconfs) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) omp_threads = 1 for accesswff in [1, 3]: # [MPI-IO, Netcdf] work = abilab.Work() for conf in pconfs: mpi_procs = conf.mpi_ncpus; omp_threads = conf.omp_ncpus if not options.accept_conf(conf, omp_threads): continue # Two GS-SCF tasks. The first one produces the WKF, the second one reads it. manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) inp = template.new_with_vars(conf.vars, accesswff=accesswff) task0 = work.register_scf_task(inp, manager=manager) work.register_scf_task(inp, manager=manager, deps={task0: "WFK"}) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): gs_inp, ph_inp = make_inputs() flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) gs_work = flowtk.Work() gs_work.register_scf_task(gs_inp) flow.register_work(gs_work) flow.exclude_from_benchmark(gs_work) # Get the list of possible parallel configurations from abinit autoparal. max_ncpus, min_eff = options.max_ncpus, options.min_eff print("Getting all autoparal confs up to max_ncpus:", max_ncpus, "with efficiency >=", min_eff) pconfs = ph_inp.abiget_autoparal_pconfs(max_ncpus, autoparal=1) if options.verbose: print(pconfs) omp_threads = 1 work = flowtk.Work() for conf in pconfs: mpi_procs = conf.mpi_ncpus if not options.accept_mpi_omp(mpi_procs, omp_threads): continue if min_eff is not None and conf.efficiency < min_eff: continue if options.verbose: print(conf) manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) inp = ph_inp.new_with_vars(conf.vars) work.register_phonon_task(inp, manager=manager, deps={gs_work[0]: "WFK"}) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): template = make_input() #template.abivalidate() # Get the list of possible parallel configurations from abinit autoparal. max_ncpus, min_eff = options.max_ncpus, options.min_eff if max_ncpus is None: nkpt = len(template.abiget_ibz().points) max_ncpus = nkpt * template["nsppol"] * template["nband"] * 4 print("Getting all autoparal confs up to max_ncpus:", max_ncpus, "with efficiency >=", min_eff) pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose) if options.verbose: print(pconfs) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) work = flowtk.Work() for conf, omp_threads in product(pconfs, options.omp_list): mpi_procs = conf.mpi_ncpus if not options.accept_conf(conf, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) inp = template.new_with_vars(conf.vars) work.register_scf_task(inp, manager=manager) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): template = make_input() # Get the list of possible parallel configurations from abinit autoparal. max_ncpus, min_eff = options.max_ncpus, options.min_eff print("Getting all autoparal confs up to max_ncpus: ",max_ncpus," with efficiency >= ",min_eff) pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose) if options.verbose: print(pconfs) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) omp_threads = 1 for istwfk in [1, 2]: work = abilab.Work() for conf in pconfs: mpi_procs = conf.mpi_ncpus if not options.accept_conf(conf, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) inp = template.new_with_vars(conf.vars, istwfk=istwfk) work.register_scf_task(inp, manager=manager) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): gs_inp, nscf_inp, ddk_inp = make_inputs() flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) ebands_work = flowtk.BandStructureWork(gs_inp, nscf_inp) flow.register_work(ebands_work) flow.exclude_from_benchmark(ebands_work) # Get the list of possible parallel configurations from abinit autoparal. max_ncpus, min_eff = options.max_ncpus, options.min_eff print("Getting all autoparal confs up to max_ncpus: ",max_ncpus," with efficiency >= ",min_eff) pconfs = ddk_inp.abiget_autoparal_pconfs(max_ncpus, autoparal=1) if options.verbose: print(pconfs) work = flowtk.Work() for conf, omp_threads in product(pconfs, options.omp_list): mpi_procs = conf.mpi_ncpus if not options.accept_conf(conf, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) inp = ddk_inp.new_with_vars(conf.vars) work.register_ddk_task(inp, manager=manager, deps={ebands_work[1]: "WFK"}) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): gs_inp, nscf_inp = make_inputs(options) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) mpi_list = options.mpi_list if mpi_list is None: # Get the list of possible parallel configurations from abinit autoparal. max_ncpus, min_eff = options.max_ncpus, options.min_eff print("Getting all autoparal confs up to max_ncpus:", max_ncpus, "with efficiency >=", min_eff) pconfs = gs_inp.abiget_autoparal_pconfs(max_ncpus, autoparal=1) else: print("Initializing autoparal from command line options") pconfs = ParalHints.from_mpi_omp_lists(mpi_list, options.omp_list) if options.verbose: print(pconfs) work = flowtk.Work() for conf, omp_threads in product(pconfs, options.omp_list): mpi_procs = conf.mpi_ncpus #if not options.accept_mpi_omp(mpi_procs,omp_threads): continue if not options.accept_conf(conf,omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) inp = gs_inp.new_with_vars(conf.vars) scf_task = work.register_scf_task(inp, manager=manager) inp2 = nscf_inp.new_with_vars(conf.vars) work.register_nscf_task(inp2, manager=manager, deps={scf_task: "DEN"}) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): fftalg_list = [312, 402, 401] ecut_list = list(range(200, 610, 100)) ecut_list = [ 400, ] if options.mpi_list is None: mpi_list = [2, 4, 6, 8] if options.verbose: print("Using mpi_list:", mpi_list) template = make_input() flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) omp_threads = 1 for fftalg in fftalg_list: work = flowtk.Work() for npfft in mpi_list: if not options.accept_mpi_omp(npfft, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp( npfft, omp_threads) for inp in abilab.input_gen(template, fftalg=fftalg, npfft=npfft, ecut=ecut_list): work.register_scf_task(inp, manager=manager) flow.register_work(work) return flow.allocate()
def build_flow(options): template = make_input() # Get the list of possible parallel configurations from abinit autoparal. max_ncpus, min_eff = options.max_ncpus, options.min_eff print("Getting all autoparal confs up to max_ncpus: ",max_ncpus," with efficiency >= ",min_eff) pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose) if options.verbose: print(pconfs) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) omp_threads = 1 for istwfk in [1, 2]: work = flowtk.Work() for conf in pconfs: mpi_procs = conf.mpi_ncpus if not options.accept_conf(conf, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) inp = template.new_with_vars(conf.vars, istwfk=istwfk) work.register_scf_task(inp, manager=manager) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) template = make_input() # Processor distribution. pconfs = [ dict(npkpt=2, npband=8 , npfft=8 ), # 128 processeurs dict(npkpt=2, npband=16, npfft=8 ), # 256 processeurs dict(npkpt=2, npband=16, npfft=16), # 512 processeurs dict(npkpt=2, npband=16, npfft=32), # 1024 processeurs ] for wfoptalg in [None, 1]: work = flowtk.Work() for d, omp_threads in product(pconfs, options.omp_list): mpi_procs = reduce(operator.mul, d.values(), 1) if not options.accept_mpi_omp(mpi_procs, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) if options.verbose: print("wfoptalg:", wfoptalg, "done with MPI_PROCS:", mpi_procs, "and:", d) inp = template.new_with_vars(d, np_slk=64) work.register_scf_task(inp, manager=manager) flow.register_work(work) return flow.allocate()
def build_flow(options): """ Build an `AbinitWorkflow` used for benchmarking ABINIT. """ gs_inp, nscf_inp, scr_inp = make_inputs(paw=options.paw) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) bands = flowtk.BandStructureWork(gs_inp, nscf_inp) flow.register_work(bands) flow.exclude_from_benchmark(bands) #for nband in [200, 400, 600]: for nband in [600]: scr_work = flowtk.Work() inp = scr_inp.new_with_vars(nband=nband) mpi_list = options.mpi_list if mpi_list is None: # Cannot call autoparal here because we need a WFK file. print("Using hard coded values for mpi_list") mpi_list = [np for np in range(1, nband+1) if abs((nband - 4) % np) < 1] if options.verbose: print("Using nband %d and mpi_list: %s" % (nband, mpi_list)) for mpi_procs, omp_threads in product(mpi_list, options.omp_list): if not options.accept_mpi_omp(mpi_procs, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) scr_work.register_scr_task(inp, manager=manager, deps={bands.nscf_task: "WFK"}) flow.register_work(scr_work) return flow.allocate()
def build_flow(options): template = make_input() #template.abivalidate() # Get the list of possible parallel configurations from abinit autoparal. max_ncpus, min_eff = options.max_ncpus, options.min_eff if max_ncpus is None: nkpt = len(template.abiget_ibz().points) max_ncpus = nkpt * template["nsppol"] * template["nband"] * 4 print("Getting all autoparal confs up to max_ncpus:", max_ncpus, "with efficiency >=", min_eff) pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose) if options.verbose: print(pconfs) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) work = flowtk.Work() for conf, omp_threads in product(pconfs, options.omp_list): mpi_procs = conf.mpi_ncpus if not options.accept_conf(conf, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp( mpi_procs, omp_threads) inp = template.new_with_vars(conf.vars) work.register_scf_task(inp, manager=manager) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): """ Build an `AbinitWorkflow` used for benchmarking ABINIT. """ gs_inp, bse_inp = make_inputs(paw=options.paw) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) gs_work = flowtk.Work() gs_work.register_scf_task(gs_inp) flow.register_work(gs_work) flow.exclude_from_benchmark(gs_work) mpi_list = options.mpi_list if options.mpi_list is None: nkpt = len(gs_inp.abiget_ibz().points) ntrans = (2*2*nkpt)**2 mpi_list = [p for p in range(1, 1 + ntrans) if ntrans % p == 0] if options.verbose: print("Using mpi_list:", mpi_list) bse_work = flowtk.Work() for mpi_procs, omp_threads in product(mpi_list, options.omp_list): if not options.accept_mpi_omp(mpi_procs, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) bse_work.register_bse_task(bse_inp, manager=manager, deps={gs_work[0]: "WFK"}) flow.register_work(bse_work) return flow.allocate()
def build_flow(options): # pragma: no cover flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) template = make_input() # Processor distribution. #pconfs = [ # dict #npimage=10, # CPU distribution over images #npband=10, #npfft=2, #bandpp=1, # CPU distribution for 20 CPU cores per image #] # Get the list of possible parallel configurations from abinit autoparal. #max_ncpus, min_eff = options.max_ncpus, options.min_eff #print("Getting all autoparal configurations up to max_ncpus: ",max_ncpus," with efficiency >= ",min_eff) #pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose) #if options.verbose: #print(pconfs) #%% nprocs_to_test = 200 #work = flowtk.Work() #for d, omp_threads in product(pconfs, options.omp_list): # mpi_procs = reduce(operator.mul, d.values(), 1) # if not options.accept_mpi_omp(mpi_procs, omp_threads): continue # manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) # print("wfoptalg:", wfoptalg, "done with MPI_PROCS:", mpi_procs, "and:", d) # inp = template.new_with_vars(d) # work.register_scf_task(inp, manager=manager) #flow.register_work(work) return flow.allocate()
def build_flow(options): flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) template = make_input() # Processor distribution. pconfs = [ dict(npkpt=2, npband=8, npfft=8), # 128 processeurs dict(npkpt=2, npband=16, npfft=8), # 256 processeurs dict(npkpt=2, npband=16, npfft=16), # 512 processeurs dict(npkpt=2, npband=16, npfft=32), # 1024 processeurs ] for wfoptalg in [None, 1]: work = flowtk.Work() for d, omp_threads in product(pconfs, options.omp_list): mpi_procs = reduce(operator.mul, d.values(), 1) if not options.accept_mpi_omp(mpi_procs, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp( mpi_procs, omp_threads) if options.verbose: print("wfoptalg:", wfoptalg, "done with MPI_PROCS:", mpi_procs, "and:", d) inp = template.new_with_vars(d, np_slk=64) work.register_scf_task(inp, manager=manager) flow.register_work(work) return flow.allocate()
def build_flow(options): # pragma: no cover flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) template = make_input() # Processor distribution. #pconfs = [ # dict #npimage=10, # CPU distribution over images #npband=10, #npfft=2, #bandpp=1, # CPU distribution for 20 CPU cores per image #] # Get the list of possible parallel configurations from abinit autoparal. #max_ncpus, min_eff = options.max_ncpus, options.min_eff #print("Getting all autoparal configurations up to max_ncpus: ",max_ncpus," with efficiency >= ",min_eff) #pconfs = template.abiget_autoparal_pconfs(max_ncpus, autoparal=1, verbose=options.verbose) #if options.verbose: #print(pconfs) #%% nprocs_to_test = 200 #work = flowtk.Work() #for d, omp_threads in product(pconfs, options.omp_list): # mpi_procs = reduce(operator.mul, d.values(), 1) # if not options.accept_mpi_omp(mpi_procs, omp_threads): continue # manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) # print("wfoptalg:", wfoptalg, "done with MPI_PROCS:", mpi_procs, "and:", d) # inp = template.new_with_vars(d) # work.register_scf_task(inp, manager=manager) #flow.register_work(work) return flow.allocate()
def build_flow(options): gs_inp, ph_inp = make_inputs() flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) gs_work = abilab.Work() gs_work.register_scf_task(gs_inp) flow.register_work(gs_work) flow.exclude_from_benchmark(gs_work) # Get the list of possible parallel configurations from abinit autoparal. max_ncpus, min_eff = options.max_ncpus, options.min_eff print("Getting all autoparal confs up to max_ncpus: ", max_ncpus, " with efficiency >= ", min_eff) pconfs = ph_inp.abiget_autoparal_pconfs(max_ncpus, autoparal=1) print(pconfs) work = abilab.Work() for conf, omp_threads in product(pconfs, options.omp_list): if not options.accept_conf(conf, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp( conf.mpi_procs, omp_threads) inp = ph_inp.new_with_vars(conf.vars) work.register_phonon_task(inp, manager=manager, deps={gs_work[0]: "WFK"}) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): inp = make_input(paw=options.paw) nkpt = len(inp.abiget_ibz().points) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) work = abilab.Work() omp_list = options.omp_list if omp_list is None: omp_list = [1, 2, 4, 6] print("Using omp_list:", omp_list) mpi_procs = 1 for omp_threads in omp_list: manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) work.register(inp, manager=manager) flow.register_work(work) return flow.allocate()
def build_flow(options): inp = make_input(paw=options.paw) nkpt = len(inp.abiget_ibz().points) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) work = flowtk.Work() omp_list = options.omp_list if omp_list is None: omp_list = [1, 2, 4, 6] print("Using omp_list:", omp_list) mpi_procs = 1 for omp_threads in omp_list: manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) work.register(inp, manager=manager) flow.register_work(work) return flow.allocate()
def build_flow(options): inp = make_input() mpi_list = options.mpi_list if mpi_list is None: nkpt = len(inp.abiget_ibz().points) nks = nkpt * inp["nsppol"] mpi_list = [p for p in range(1, nks + 1) if nks % p == 0] if options.verbose: print("Using mpi_list:", mpi_list) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) for useylm in [0, 1]: work = flowtk.Work() for mpi_procs, omp_threads in product(mpi_list, options.omp_list): if not options.accept_mpi_omp(mpi_procs, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads) work.register_scf_task(inp.new_with_vars(useylm=useylm), manager=manager) flow.register_work(work) return flow.allocate()
def build_flow(options): gs_inp, nscf_inp = make_inputs(options) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) mpi_list = options.mpi_list if mpi_list is None: # Get the list of possible parallel configurations from abinit autoparal. max_ncpus, min_eff = options.max_ncpus, options.min_eff print("Getting all autoparal confs up to max_ncpus: ", max_ncpus, " with efficiency >= ", min_eff) pconfs = gs_inp.abiget_autoparal_pconfs(max_ncpus, autoparal=1) else: print("Initializing autoparal from command line options") from pymatgen.io.abinit.tasks import ParalHints pconfs = ParalHints.from_mpi_omp_lists(mpi_list, options.omp_list) print(pconfs) work = abilab.Work() for conf, omp_threads in product(pconfs, options.omp_list): mpi_procs = conf.mpi_ncpus #if not options.accept_mpi_omp(mpi_procs,omp_threads): continue if not options.accept_conf(conf, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp( mpi_procs, omp_threads) inp = gs_inp.new_with_vars(conf.vars) scf_task = work.register_scf_task(inp, manager=manager) inp2 = nscf_inp.new_with_vars(conf.vars) work.register_nscf_task(inp2, manager=manager, deps={scf_task: "DEN"}) print("Found %d configurations" % len(work)) flow.register_work(work) return flow.allocate()
def build_flow(options): fftalg_list = [312, 402, 401] ecut_list = list(range(200, 610, 100)) ecut_list = [400,] if options.mpi_list is None: mpi_list = [2, 4, 6, 8] print("Using mpi_list:", mpi_list) template = make_input() flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) omp_threads = 1 for fftalg in fftalg_list: work = abilab.Work() for npfft in mpi_list: if not options.accept_mpi_omp(npfft, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp(npfft, omp_threads) for inp in abilab.input_gen(template, fftalg=fftalg, npfft=npfft, ecut=ecut_list): work.register_scf_task(inp, manager=manager) flow.register_work(work) return flow.allocate()
def build_flow(options): inp = make_input() mpi_list = options.mpi_list if mpi_list is None: nkpt = len(inp.abiget_ibz().points) nks = nkpt * inp["nsppol"] mpi_list = [p for p in range(1, nks + 1) if nks % p == 0] print("Using mpi_list:", mpi_list) flow = BenchmarkFlow(workdir=options.get_workdir(__file__), remove=options.remove) for useylm in [0, 1]: work = abilab.Work() for mpi_procs, omp_threads in product(mpi_list, options.omp_list): if not options.accept_mpi_omp(mpi_procs, omp_threads): continue manager = options.manager.new_with_fixed_mpi_omp( mpi_procs, omp_threads) work.register_scf_task(inp.new_with_vars(useylm=useylm), manager=manager) flow.register_work(work) return flow.allocate()