Beispiel #1
0
def _workflow_run(template, ffjfile, output):
    """
    The solve workflow.
    """

    femfatjob = FemfatJob(ffjfile)

    ##################################
    # Collect needed resources.
    ##################################

    femfatjob.jobname = ask_jobname()["jobname"]

    femfatjob.timelimit = 60 * int(ask_timelimit()["timelimit"])

    femfatjob.femfat_module = ask_femfat_module()

    femfatjob.cpus = ask_cpus_int()["cpus"]

    femfatjob.partitions = ask_partitions()["partitions"]

    ##########################################
    # Info gathered, dispatch to job rendering
    ##########################################

    templates_dir = os.path.join(os.path.dirname(jobbers.femfat.__file__),
                                 "templates")

    if template:
        femfatjob.template = template
    else:
        femfat_template = config["femfat"]["template"].get()
        femfatjob.template = "{}/{}".format(templates_dir, femfat_template)

    render_to_out(femfatjob, output)
Beispiel #2
0
def _workflow_solve(template, inpfile, output):
    """
    The solve workflow.
    """
    solvejob = SolveJob(inpfile)

    # Collect needed resources.
    solvejob.abaqus_module = ask_abaqus_module()

    solvejob.cpus = ask_cpus_int()["cpus"]

    solvejob.abaqus_licenses = ask_abaqus_licenses()

    solvejob.partitions = ask_partitions()["partitions"]

    # Info gathered, dispatch to job rendering
    templates_dir = os.path.join(os.path.dirname(jobbers.abaqus.__file__),
                                 "templates")

    if template:
        solvejob.template = template
    else:
        solve_template = config["abaqus"]["solve_template"].get()
        solvejob.template = "{}/{}".format(templates_dir, solve_template)

    render_to_out(solvejob, output)
Beispiel #3
0
def _workflow_run(template, output):
    """
    The solve workflow.
    """

    tensorjob = TensorJob()

    ##################################
    ## Collect needed resources.
    ##################################

    tensorjob.jobname = ask_jobname()['jobname']

    tensorjob.timelimit = 60 * int(ask_timelimit()['timelimit'])

    tensorjob.gpus = ask_gpus()['gpus']

    tensorjob.partitions = ask_partitions()['partitions']

    ##########################################
    # Info gathered, dispatch to job rendering
    ##########################################

    templates_dir = os.path.join(os.path.dirname(jobbers.tensorflow.__file__),
                                 'templates')

    if template:
        tensorjob.template = template
    else:
        tensor_template = config['tensorflow']['template'].get()
        tensorjob.template = "{}/{}".format(templates_dir, tensor_template)

    render_to_out(tensorjob, output)
Beispiel #4
0
def _workflow_solve(template, inpfile, output):
    """
    The solve workflow.
    """
    solvejob = SolveJob(inpfile)

    ##################################
    ## Collect needed resources.
    ##################################
    solvejob.abaqus_module = ask_abaqus_module()

    solvejob.cpus = ask_cpus_int()['cpus']

    lics_needed = calculate_abaqus_licenses(solvejob.cpus)

    solvejob.abaqus_licenses = ask_abaqus_licenses()

    solvejob.partitions = ask_partitions()['partitions']

    ##########################################
    # Info gathered, dispatch to job rendering
    ##########################################

    templates_dir = os.path.join(os.path.dirname(jobbers.abaqus.__file__),
                                 'templates')

    if template:
        solvejob.template = template
    else:
        solve_template = config['abaqus']['solve_template'].get()
        solvejob.template = "{}/{}".format(templates_dir, solve_template)

    render_to_out(solvejob, output)
Beispiel #5
0
def _workflow_generic(template, output):
    generic_job = GenericJob()
    generic_job.generic_resources = ask_generic_resources()

    templates_dir = os.path.join(os.path.dirname(jobbers.abaqus.__file__),
                                 "templates")

    if template:
        generic_job.template = template
    else:
        generic_job.template = "{}/{}".format(templates_dir,
                                              "abaqus-generic-template.j2")

    render_to_out(generic_job, output)
Beispiel #6
0
def _workflow_solve_parallel(template, inpfile, output):
    """
    The solve-parallel sub workflow.
    """
    solvejob = SolveJob(inpfile)

    ##################################
    ## Collect needed resources.
    ##################################
    solvejob.abaqus_module = ask_abaqus_module()

    solvejob.jobname = ask_jobname()['jobname']

    solvejob.nodes = ask_nodes()['nodes']

    solvejob.ntasks_per_node = 36  # We guess that cores =36 based on cluster sizes

    solvejob.cpus = int(solvejob.nodes * solvejob.ntasks_per_node)

    lics_needed = calculate_abaqus_licenses(solvejob.cpus)

    # solvejob.abaqus_licenses = ask_abaqus_licenses_parallel()

    solvejob.abaqus_licenses = {
        'license': 'abaqus@flex_host',
        'volume': lics_needed
    }

    solvejob.scratch = ask_scratch()['scratch']

    solvejob.partitions = ask_partitions()['partitions']

    solvejob.timelimit = int(ask_timelimit()['timelimit']) * 60

    ##########################################
    # Info gathered, dispatch to job rendering
    ##########################################

    templates_dir = os.path.join(os.path.dirname(jobbers.abaqus.__file__),
                                 'templates')

    if template:
        solvejob.template = template
    else:
        solve_par_template = config['abaqus']['solve_parallel_template'].get()
        solvejob.template = "{}/{}".format(templates_dir,
                                           str(solve_par_template))

    render_to_out(solvejob, output)
Beispiel #7
0
def _workflow_solve_parallel(template, inpfile, output):
    """
    The solve-parallel sub workflow.
    """
    solvejob = SolveJob(inpfile)

    # If job is a restart read job, ask for restart files.
    if inpfile.restart_read:
        restartfile = ask_restart()
        solvejob.restartjobname = os.path.splitext(
            os.path.basename(str(restartfile)))[0]
        solvejob.inpfile.restart_file = solvejob.restartjobname

    # Collect needed resources.
    solvejob.abaqus_module = ask_abaqus_module()

    solvejob.jobname = ask_jobname(solvejob.inpfile.file.stem)["jobname"]

    solvejob.nodes = ask_nodes()["nodes"]

    # TODO: This should not be hardcoded here. Cluster config?
    # SLURM alternative --mincpus <n>  Controls the minimum number of CPUs allocated per node as the number
    # as the number of nodes is set and exclusive mode is used. Only relevant then whe have more type of hardware´s
    solvejob.ntasks_per_node = 36  # We guess that cores =36 based on cluster sizes

    solvejob.cpus = int(solvejob.nodes * solvejob.ntasks_per_node)

    lics_needed = calculate_abaqus_licenses(solvejob.cpus)
    solvejob.abaqus_licenses = {
        "license": "abaqus@slurmdbd",
        "volume": lics_needed
    }

    # solvejob.abaqus_licenses = ask_abaqus_licenses_parallel()

    solvejob.abaqus_licenses = {
        "license": "abaqus@slurmdbd",
        "volume": lics_needed
    }

    # 20190521: Do not ask for scratch at the moment, go with config default /jhacxc
    # solvejob.scratch = ask_scratch()['scratch']
    solvejob.scratch = config["slurm"]["shared_scratch"].get()

    # 20190521: Do not ask for partitions at the moment, go with config defaults /jhacxc
    # solvejob.partitions = ask_partitions()['partitions']
    solvejob.partitions.append(config["slurm"]["default_partition"].get())

    solvejob.timelimit = int(ask_timelimit()["timelimit"]) * 60

    # Ask for masternode mem (GiB), convert to MiB which is Slurm default
    # Note: Multiply by 950 (not 1024) to make sure limit is below memory output of 'slurm -C'
    solvejob.masternode_mem = int(
        float(ask_masternode_mem()["memory"]) * float(1024) * 0.95)
    # For distributed jobs, explicitly set worker node limit if defined
    try:
        solvejob.workernode_mem = int(
            float(config["abaqus"]["workernode_mem_default"].get()[0]) *
            1024.0 * 0.95)
    except confuse.NotFoundError:
        pass

    # Info gathered, dispatch to job rendering
    templates_dir = os.path.join(os.path.dirname(jobbers.abaqus.__file__),
                                 "templates")

    if template:
        solvejob.template = template
    else:
        solve_par_template = config["abaqus"][
            "solve_distributed_template"].get()
        solvejob.template = str(pathlib.Path(templates_dir,
                                             solve_par_template))

    render_to_out(solvejob, output)
Beispiel #8
0
def _workflow_solve_eigen(template, inpfile, output):
    """
    The solve-eigen sub workflow.
    """
    solvejob = SolveJob(inpfile)

    # If job is a restart read job, ask for restart files.
    if inpfile.restart_read:
        restartfile = ask_restart()
        solvejob.restartjobname = os.path.splitext(
            os.path.basename(str(restartfile)))[0]
        solvejob.inpfile.restart_file = solvejob.restartjobname

    ##################################
    # Collect needed resources.
    ##################################
    solvejob.abaqus_module = ask_abaqus_module()

    solvejob.jobname = ask_jobname(solvejob.inpfile.file.stem)["jobname"]

    solvejob.nodes = 1

    solvejob.gpus = ask_gpus_bool()["gpus"]

    # TODO: This should not be hardcoded here. Cluster config?
    solvejob.ntasks_per_node = 36  # We guess that cores =36 based on cluster sizes

    solvejob.cpus = int(solvejob.nodes * solvejob.ntasks_per_node)

    lics_needed = calculate_abaqus_licenses(solvejob.cpus)

    # solvejob.abaqus_licenses = ask_abaqus_licenses_parallel()

    solvejob.abaqus_licenses = {
        "license": "abaqus@slurmdbd",
        "volume": lics_needed
    }

    # 20190521: Do not ask for scratch at the moment, go with config default /jhacxc
    # solvejob.scratch = ask_scratch()['scratch']
    solvejob.scratch = config["slurm"]["shared_scratch"].get()

    # 20190521: Do not ask for partitions at the moment, go with config defaults /jhacxc
    # solvejob.partitions = ask_partitions()['partitions']
    solvejob.partitions.append(config["slurm"]["default_partition"].get())

    solvejob.timelimit = int(ask_timelimit()["timelimit"]) * 60

    # Ask for masternode mem (GiB), convert to MiB which is Slurm default
    # Note: Multiply by 950 (not 1024) to make sure limit is below memory output of 'slurm -C'
    # TEMPHACK: If GPU is selected, do not ask for mem, set over 1TB to get a GPU node
    if solvejob.gpus:
        solvejob.masternode_mem = 1048576
    else:
        solvejob.masternode_mem = int(
            float(ask_masternode_mem()["memory"]) * float(1024) * 0.95)

    ##########################################
    # Info gathered, dispatch to job rendering
    ##########################################

    templates_dir = os.path.join(os.path.dirname(jobbers.abaqus.__file__),
                                 "templates")

    if template:
        solvejob.template = template
    else:
        solve_par_template = config["abaqus"][
            "solve_eigenfrequency_template"].get()
        # solvejob.template = "{}/{}".format(templates_dir, str(solve_par_template))
        solvejob.template = str(pathlib.Path(templates_dir,
                                             solve_par_template))

    render_to_out(solvejob, output)