コード例 #1
0
ファイル: cli_qa_qc.py プロジェクト: barbarmarc/reV
def get_sc_cmd(name, sc_table, out_dir, sub_dir, columns, plot_type, cmap,
               lcoe, log_file, verbose, terminal):
    """Build CLI call for supply_curve."""

    args = ('-sct {sc_table} '
            '-o {out_dir} '
            '-sd {sub_dir} '
            '-cols {columns} '
            '-plt {plot_type} '
            '-cmap {cmap} '
            '-lcoe {lcoe} '
            '-log {log_file} ')

    args = args.format(
        sc_table=SLURM.s(sc_table),
        out_dir=SLURM.s(out_dir),
        sub_dir=SLURM.s(sub_dir),
        columns=SLURM.s(columns),
        plot_type=SLURM.s(plot_type),
        cmap=SLURM.s(cmap),
        lcoe=SLURM.s(lcoe),
        log_file=SLURM.s(log_file),
    )

    if verbose:
        args += '-v '

    if terminal:
        args += '-t '

    cmd = ('python -m reV.qa_qc.cli_qa_qc -n {} supply-curve {}'.format(
        SLURM.s(name), args))

    return cmd
コード例 #2
0
ファイル: cli_rep_profiles.py プロジェクト: barbarmarc/reV
def slurm(ctx, alloc, memory, walltime, feature, conda_env, module,
          stdout_path):
    """slurm (Eagle) submission tool for reV representative profiles."""

    name = ctx.obj['NAME']
    gen_fpath = ctx.obj['GEN_FPATH']
    rev_summary = ctx.obj['REV_SUMMARY']
    reg_cols = ctx.obj['REG_COLS']
    cf_dset = ctx.obj['CF_DSET']
    rep_method = ctx.obj['REP_METHOD']
    err_method = ctx.obj['ERR_METHOD']
    weight = ctx.obj['WEIGHT']
    n_profiles = ctx.obj['N_PROFILES']
    out_dir = ctx.obj['OUT_DIR']
    log_dir = ctx.obj['LOG_DIR']
    max_workers = ctx.obj['MAX_WORKERS']
    aggregate_profiles = ctx.obj['AGGREGATE_PROFILES']
    verbose = ctx.obj['VERBOSE']

    if stdout_path is None:
        stdout_path = os.path.join(log_dir, 'stdout/')

    cmd = get_node_cmd(name, gen_fpath, rev_summary, reg_cols, cf_dset,
                       rep_method, err_method, weight, n_profiles, out_dir,
                       log_dir, max_workers, aggregate_profiles, verbose)

    status = Status.retrieve_job_status(out_dir, 'rep-profiles', name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, out_dir))
    else:
        logger.info('Running reV SC rep profiles on SLURM with '
                    'node name "{}"'.format(name))
        slurm = SLURM(cmd,
                      alloc=alloc,
                      memory=memory,
                      walltime=walltime,
                      feature=feature,
                      name=name,
                      stdout_path=stdout_path,
                      conda_env=conda_env,
                      module=module)
        if slurm.id:
            msg = ('Kicked off reV rep profiles job "{}" '
                   '(SLURM jobid #{}).'.format(name, slurm.id))
            Status.add_job(out_dir,
                           'rep-profiles',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': slurm.id,
                               'hardware': 'eagle',
                               'fout': '{}.h5'.format(name),
                               'dirout': out_dir
                           })
        else:
            msg = ('Was unable to kick off reV rep profiles job "{}". '
                   'Please see the stdout error messages'.format(name))
    click.echo(msg)
    logger.info(msg)
コード例 #3
0
def slurm(ctx, alloc, memory, walltime, feature, module, conda_env,
          stdout_path):
    """slurm (eagle) submission tool for reV supply curve."""
    name = ctx.obj['NAME']
    sc_points = ctx.obj['SC_POINTS']
    trans_table = ctx.obj['TRANS_TABLE']
    fixed_charge_rate = ctx.obj['FIXED_CHARGE_RATE']
    sc_features = ctx.obj['SC_FEATURES']
    transmission_costs = ctx.obj['TRANSMISSION_COSTS']
    simple = ctx.obj['SIMPLE']
    line_limited = ctx.obj['LINE_LIMITED']
    sort_on = ctx.obj['SORT_ON']
    offshore_trans_table = ctx.obj['OFFSHORE_TRANS_TABLE']
    wind_dirs = ctx.obj['WIND_DIRS']
    n_dirs = ctx.obj['N_DIRS']
    downwind = ctx.obj['DOWNWIND']
    offshore_compete = ctx.obj['OFFSHORE_COMPETE']
    max_workers = ctx.obj['MAX_WORKERS']
    out_dir = ctx.obj['OUT_DIR']
    log_dir = ctx.obj['LOG_DIR']
    verbose = ctx.obj['VERBOSE']

    if stdout_path is None:
        stdout_path = os.path.join(log_dir, 'stdout/')

    cmd = get_node_cmd(name, sc_points, trans_table, fixed_charge_rate,
                       sc_features, transmission_costs, sort_on,
                       offshore_trans_table, wind_dirs, n_dirs, downwind,
                       offshore_compete, max_workers, out_dir, log_dir,
                       simple, line_limited, verbose)

    status = Status.retrieve_job_status(out_dir, 'supply-curve', name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'
               .format(name, out_dir))
    else:
        logger.info('Running reV Supply Curve on SLURM with '
                    'node name "{}"'.format(name))
        logger.debug('\t{}'.format(cmd))
        slurm = SLURM(cmd, alloc=alloc, memory=memory,
                      walltime=walltime, feature=feature,
                      name=name, stdout_path=stdout_path,
                      conda_env=conda_env, module=module)
        if slurm.id:
            msg = ('Kicked off reV SC job "{}" (SLURM jobid #{}).'
                   .format(name, slurm.id))
            Status.add_job(
                out_dir, 'supply-curve', name, replace=True,
                job_attrs={'job_id': slurm.id, 'hardware': 'eagle',
                           'fout': '{}.csv'.format(name), 'dirout': out_dir})
        else:
            msg = ('Was unable to kick off reV SC job "{}". Please see the '
                   'stdout error messages'.format(name))
    click.echo(msg)
    logger.info(msg)
コード例 #4
0
ファイル: cli_offshore.py プロジェクト: barbarmarc/reV
def slurm(ctx, alloc, feature, memory, walltime, module, conda_env,
          stdout_path):
    """slurm (Eagle) submission tool for reV supply curve aggregation."""

    name = ctx.obj['NAME']
    gen_fpath = ctx.obj['GEN_FPATH']
    offshore_fpath = ctx.obj['OFFSHORE_FPATH']
    project_points = ctx.obj['PROJECT_POINTS']
    sam_files = ctx.obj['SAM_FILES']
    log_dir = ctx.obj['LOG_DIR']
    out_dir = ctx.obj['OUT_DIR']
    verbose = ctx.obj['VERBOSE']

    if stdout_path is None:
        stdout_path = os.path.join(log_dir, 'stdout/')

    cmd = get_node_cmd(name, gen_fpath, offshore_fpath, project_points,
                       sam_files, log_dir, verbose)

    status = Status.retrieve_job_status(out_dir, 'offshore', name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, out_dir))
    else:
        logger.info('Running reV offshore aggregation on SLURM with '
                    'node name "{}"'.format(name))
        slurm = SLURM(cmd,
                      alloc=alloc,
                      memory=memory,
                      walltime=walltime,
                      feature=feature,
                      name=name,
                      stdout_path=stdout_path,
                      conda_env=conda_env,
                      module=module)
        if slurm.id:
            msg = ('Kicked off reV offshore job "{}" '
                   '(SLURM jobid #{}).'.format(name, slurm.id))
            Status.add_job(out_dir,
                           'offshore',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': slurm.id,
                               'hardware': 'eagle',
                               'fout': '{}.csv'.format(name),
                               'dirout': out_dir
                           })
        else:
            msg = ('Was unable to kick off reV offshore job "{}". Please see '
                   'the stdout error messages'.format(name))
    click.echo(msg)
    logger.info(msg)
コード例 #5
0
ファイル: cli_multi_year.py プロジェクト: barbarmarc/reV
def get_slurm_cmd(name, my_file, group_params, verbose=False):
    """Make a reV multi-year collection local CLI call string.

    Parameters
    ----------
    name : str
        reV collection jobname.
    my_file : str
        Path to .h5 file to use for multi-year collection.
    group_params : list
        List of groups and their parameters to collect
    verbose : bool
        Flag to turn on DEBUG logging

    Returns
    -------
    cmd : str
        Argument to call the neccesary CLI calls on the node to collect
        desired groups
    """
    # make a cli arg string for direct() in this module
    main_args = ('-n {name} '
                 '{v}'.format(
                     name=SLURM.s(name),
                     v='-v ' if verbose else '',
                 ))

    collect_args = '-gp {} '.format(SLURM.s(group_params))

    # Python command that will be executed on a node
    # command strings after cli v7.0 use dashes instead of underscores

    cmd = ('python -m reV.handlers.cli_multi_year {} direct -f {} '
           'multi-year-groups {}'.format(main_args, SLURM.s(my_file),
                                         collect_args))
    logger.debug('Creating the following command line call:\n\t{}'.format(cmd))
    return cmd
コード例 #6
0
ファイル: cli_multi_year.py プロジェクト: barbarmarc/reV
def multi_year_slurm(ctx, alloc, walltime, feature, memory, conda_env, module,
                     stdout_path, group_params, verbose):
    """
    Run multi year collection and means on HPC via SLURM job submission.
    """

    name = ctx.obj['NAME']
    my_file = ctx.obj['MY_FILE']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    status = Status.retrieve_job_status(os.path.dirname(my_file), 'multi-year',
                                        name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, os.path.dirname(my_file)))
    else:
        logger.info('Running reV multi-year collection on SLURM with node '
                    ' name "{}", collecting into "{}".'.format(name, my_file))
        # create and submit the SLURM job
        slurm_cmd = get_slurm_cmd(name, my_file, group_params, verbose=verbose)
        slurm = SLURM(slurm_cmd,
                      alloc=alloc,
                      memory=memory,
                      walltime=walltime,
                      feature=feature,
                      name=name,
                      stdout_path=stdout_path,
                      conda_env=conda_env,
                      module=module)
        if slurm.id:
            msg = ('Kicked off reV multi-year collection job "{}" '
                   '(SLURM jobid #{}).'.format(name, slurm.id))
            # add job to reV status file.
            Status.add_job(os.path.dirname(my_file),
                           'multi-year',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': slurm.id,
                               'hardware': 'eagle',
                               'fout': os.path.basename(my_file),
                               'dirout': os.path.dirname(my_file)
                           })
        else:
            msg = ('Was unable to kick off reV collection job "{}". '
                   'Please see the stdout error messages'.format(name))
    click.echo(msg)
    logger.info(msg)
コード例 #7
0
ファイル: cli_offshore.py プロジェクト: barbarmarc/reV
def get_node_cmd(name, gen_fpath, offshore_fpath, points, sam_files, log_dir,
                 verbose):
    """Get a CLI call command for the offshore aggregation cli."""

    args = ('-gf {gen_fpath} '
            '-of {offshore_fpath} '
            '-pp {points} '
            '-sf {sam_files} '
            '-ld {log_dir} ')

    args = args.format(
        gen_fpath=SLURM.s(gen_fpath),
        offshore_fpath=SLURM.s(offshore_fpath),
        points=SLURM.s(points),
        sam_files=SLURM.s(sam_files),
        log_dir=SLURM.s(log_dir),
    )

    if verbose:
        args += '-v '

    cmd = ('python -m reV.offshore.cli_offshore -n {} direct {}'.format(
        SLURM.s(name), args))
    return cmd
コード例 #8
0
def get_node_cmd(name, sc_points, trans_table, fixed_charge_rate, sc_features,
                 transmission_costs, sort_on, offshore_trans_table, wind_dirs,
                 n_dirs, downwind, offshore_compete, max_workers, out_dir,
                 log_dir, simple, line_limited, verbose):
    """Get a CLI call command for the Supply Curve cli."""

    args = ('-sc {sc_points} '
            '-tt {trans_table} '
            '-fcr {fixed_charge_rate} '
            '-scf {sc_features} '
            '-tc {transmission_costs} '
            '-so {sort_on} '
            '-ott {offshore_trans_table}'
            '-dirs {n_dirs} '
            '-mw {max_workers} '
            '-o {out_dir} '
            '-ld {log_dir} '
            )

    args = args.format(sc_points=SLURM.s(sc_points),
                       trans_table=SLURM.s(trans_table),
                       fixed_charge_rate=SLURM.s(fixed_charge_rate),
                       sc_features=SLURM.s(sc_features),
                       transmission_costs=SLURM.s(transmission_costs),
                       sort_on=SLURM.s(sort_on),
                       offshore_trans_table=SLURM.s(offshore_trans_table),
                       n_dirs=SLURM.s(n_dirs),
                       max_workers=SLURM.s(max_workers),
                       out_dir=SLURM.s(out_dir),
                       log_dir=SLURM.s(log_dir),
                       )

    if wind_dirs is not None:
        args += '-wd {wind_dirs} '.format(wind_dirs=SLURM.s(wind_dirs))

    if downwind:
        args += '-dw '

    if offshore_compete:
        args += '-oc'

    if simple:
        args += '-s '
    elif line_limited:
        args += '-ll '

    if verbose:
        args += '-v '

    cmd = ('python -m reV.supply_curve.cli_supply_curve -n {} direct {}'
           .format(SLURM.s(name), args))

    return cmd
コード例 #9
0
def test_eagle(year):
    """Gen PV CF profiles with write to disk and compare against rev1."""
    res_file = TESTDATADIR + '/nsrdb/ri_100_nsrdb_{}.h5'.format(year)
    sam_files = TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json'
    rev2_out_dir = os.path.join(TESTDATADIR, 'ri_pv_reV2')
    rev2_out = 'gen_ri_pv_smart_{}.h5'.format(year)

    if not os.path.exists(rev2_out_dir):
        os.mkdir(rev2_out_dir)

    name = 'etest'
    points = slice(0, 100)
    verbose = True

    log_level = 'DEBUG'
    log_file = os.path.join(rev2_out_dir, '{}.log'.format(name))
    modules = [__name__, 'reV.utilities', 'reV.generation']
    for mod in modules:
        init_logger(mod, log_level=log_level, log_file=log_file)

    cmd = get_node_cmd(name=name,
                       tech='pvwattsv5',
                       points=points,
                       points_range=None,
                       sam_files=sam_files,
                       res_file=res_file,
                       sites_per_worker=None,
                       max_workers=None,
                       fout=rev2_out,
                       dirout=rev2_out_dir,
                       logdir=rev2_out_dir,
                       output_request=('cf_profile', 'cf_mean'),
                       verbose=verbose)

    # create and submit the SLURM job
    slurm = SLURM(cmd,
                  alloc='rev',
                  memory=96,
                  walltime=0.1,
                  name=name,
                  stdout_path=rev2_out_dir)

    while True:
        status = slurm.check_status(name, var='name')
        if status == 'CG':
            break
        else:
            time.sleep(5)

    # get reV 2.0 generation profiles from disk
    flist = os.listdir(rev2_out_dir)
    for fname in flist:
        if '.h5' in fname:
            if rev2_out.strip('.h5') in fname:
                full_f = os.path.join(rev2_out_dir, fname)
                with Outputs(full_f, 'r') as cf:
                    rev2_profiles = cf['cf_profile']
                break

    # get reV 1.0 generation profiles
    rev1_profiles = get_r1_profiles(year=year)
    rev1_profiles = rev1_profiles[:, points]

    result = np.allclose(rev1_profiles, rev2_profiles, rtol=RTOL, atol=ATOL)
    if result:
        # remove output files if test passes.
        flist = os.listdir(rev2_out_dir)
        for fname in flist:
            os.remove(os.path.join(rev2_out_dir, fname))

    assert result is True
コード例 #10
0
ファイル: cli_qa_qc.py プロジェクト: barbarmarc/reV
def launch_slurm(config, verbose):
    """
    Launch slurm QA/QC job

    Parameters
    ----------
    config : dict
        'reV QA/QC configuration dictionary'
    """

    out_dir = config.dirout
    log_file = os.path.join(config.logdir, config.name + '.log')
    stdout_path = os.path.join(config.logdir, 'stdout/')

    node_cmd = []
    terminal = False
    for i, module in enumerate(config.module_names):
        module_config = config.get_module_inputs(module)
        fpaths = module_config.fpath

        if isinstance(fpaths, (str, type(None))):
            fpaths = [fpaths]

        for j, fpath in enumerate(fpaths):
            if (i == len(config.module_names) - 1) and (j == len(fpaths) - 1):
                terminal = True
            if module.lower() == 'exclusions':
                node_cmd.append(
                    get_excl_cmd(config.name, module_config.excl_fpath,
                                 out_dir, module_config.sub_dir,
                                 module_config.excl_dict,
                                 module_config.area_filter_kernel,
                                 module_config.min_area,
                                 module_config.plot_type, module_config.cmap,
                                 module_config.plot_step, log_file, verbose,
                                 terminal))
            elif fpath.endswith('.h5'):
                node_cmd.append(
                    get_h5_cmd(config.name, fpath, out_dir,
                               module_config.sub_dir, module_config.dsets,
                               module_config.group, module_config.process_size,
                               module_config.max_workers,
                               module_config.plot_type, module_config.cmap,
                               log_file, verbose, terminal))
            elif fpath.endswith('.csv'):
                node_cmd.append(
                    get_sc_cmd(config.name, fpath, out_dir,
                               module_config.sub_dir, module_config.columns,
                               module_config.plot_type, module_config.cmap,
                               module_config.lcoe, log_file, verbose,
                               terminal))
            else:
                msg = ("Cannot run QA/QC for {}: 'fpath' must be a '*.h5' "
                       "or '*.csv' reV output file, but {} was given!".format(
                           module, fpath))
                logger.error(msg)
                raise ValueError(msg)

    status = Status.retrieve_job_status(out_dir, 'qa-qc', config.name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(config.name, out_dir))
    else:
        node_cmd = '\n'.join(node_cmd)
        logger.info('Running reV QA-QC on SLURM with '
                    'node name "{}"'.format(config.name))
        slurm = SLURM(node_cmd,
                      name=config.name,
                      alloc=config.execution_control.allocation,
                      memory=config.execution_control.memory,
                      feature=config.execution_control.feature,
                      walltime=config.execution_control.walltime,
                      conda_env=config.execution_control.conda_env,
                      module=config.execution_control.module,
                      stdout_path=stdout_path)
        if slurm.id:
            msg = ('Kicked off reV QA-QC job "{}" '
                   '(SLURM jobid #{}).'.format(config.name, slurm.id))
            Status.add_job(out_dir,
                           'qa-qc',
                           config.name,
                           replace=True,
                           job_attrs={
                               'job_id': slurm.id,
                               'hardware': 'eagle',
                               'dirout': out_dir
                           })
        else:
            msg = ('Was unable to kick off reV QA-QC job "{}". '
                   'Please see the stdout error messages'.format(config.name))

    click.echo(msg)
    logger.info(msg)
コード例 #11
0
ファイル: cli_gen.py プロジェクト: barbarmarc/reV
def slurm(ctx, nodes, alloc, memory, walltime, feature, conda_env, module,
          stdout_path, verbose):
    """Run generation on HPC via SLURM job submission."""

    name = ctx.obj['NAME']
    tech = ctx.obj['TECH']
    points = ctx.obj['POINTS']
    sam_files = ctx.obj['SAM_FILES']
    res_file = ctx.obj['RES_FILE']
    sites_per_worker = ctx.obj['SITES_PER_WORKER']
    fout = ctx.obj['FOUT']
    dirout = ctx.obj['DIROUT']
    logdir = ctx.obj['LOGDIR']
    output_request = ctx.obj['OUTPUT_REQUEST']
    max_workers = ctx.obj['MAX_WORKERS']
    mem_util_lim = ctx.obj['MEM_UTIL_LIM']
    timeout = ctx.obj['TIMEOUT']
    curtailment = ctx.obj['CURTAILMENT']
    downscale = ctx.obj['DOWNSCALE']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    # initialize an info logger on the year level
    init_mult(name, logdir, modules=[__name__, 'reV.generation.generation',
                                     'reV.config', 'reV.utilities', 'reV.SAM'],
              verbose=False)

    pc = get_node_pc(points, sam_files, tech, res_file, nodes)

    for i, split in enumerate(pc):
        node_name, fout_node = get_node_name_fout(name, fout, i, pc,
                                                  hpc='slurm')

        cmd = get_node_cmd(node_name, tech, sam_files, res_file,
                           points=points, points_range=split.split_range,
                           sites_per_worker=sites_per_worker,
                           max_workers=max_workers, fout=fout_node,
                           dirout=dirout, logdir=logdir,
                           output_request=output_request,
                           mem_util_lim=mem_util_lim, timeout=timeout,
                           curtailment=curtailment,
                           downscale=downscale, verbose=verbose)

        status = Status.retrieve_job_status(dirout, 'generation', node_name)
        if status == 'successful':
            msg = ('Job "{}" is successful in status json found in "{}", '
                   'not re-running.'
                   .format(node_name, dirout))
        else:
            logger.info('Running reV generation on SLURM with node name "{}" '
                        'for {} (points range: {}).'
                        .format(node_name, pc, split.split_range))
            # create and submit the SLURM job
            slurm = SLURM(cmd, alloc=alloc, memory=memory, walltime=walltime,
                          feature=feature, name=node_name,
                          stdout_path=stdout_path, conda_env=conda_env,
                          module=module)
            if slurm.id:
                msg = ('Kicked off reV generation job "{}" (SLURM jobid #{}).'
                       .format(node_name, slurm.id))
                # add job to reV status file.
                Status.add_job(
                    dirout, 'generation', node_name, replace=True,
                    job_attrs={'job_id': slurm.id, 'hardware': 'eagle',
                               'fout': fout_node, 'dirout': dirout})
            else:
                msg = ('Was unable to kick off reV generation job "{}". '
                       'Please see the stdout error messages'
                       .format(node_name))

        click.echo(msg)
        logger.info(msg)
コード例 #12
0
ファイル: cli_collect.py プロジェクト: barbarmarc/reV
def get_node_cmd(name,
                 h5_file,
                 h5_dir,
                 project_points,
                 dsets,
                 file_prefix=None,
                 log_dir='./logs/',
                 purge_chunks=False,
                 verbose=False):
    """Make a reV collection local CLI call string.

    Parameters
    ----------
    name : str
        reV collection jobname.
    h5_file : str
        Path to .h5 file into which data will be collected
    h5_dir : str
        Root directory containing .h5 files to combine
    project_points : str | slice | list | pandas.DataFrame
        Project points that correspond to the full collection of points
        contained in the .h5 files to be collected
    dsets : list
        List of datasets (strings) to be collected.
    file_prefix : str
        .h5 file prefix, if None collect all files on h5_dir
    log_dir : str
        Log directory.
    purge_chunks : bool
        Flag to delete the chunked files after collection.
    verbose : bool
        Flag to turn on DEBUG logging

    Returns
    -------
    cmd : str
        Single line command line argument to call the following CLI with
        appropriately formatted arguments based on input args:
            python -m reV.handlers.cli_collect [args] collect
    """
    # make a cli arg string for direct() in this module
    args = ('-f {h5_file} '
            '-d {h5_dir} '
            '-pp {project_points} '
            '-ds {dsets} '
            '-fp {file_prefix} '
            '-ld {log_dir} '
            '{purge}'
            '{v}'.format(
                h5_file=SLURM.s(h5_file),
                h5_dir=SLURM.s(h5_dir),
                project_points=SLURM.s(project_points),
                dsets=SLURM.s(dsets),
                file_prefix=SLURM.s(file_prefix),
                log_dir=SLURM.s(log_dir),
                purge='-p ' if purge_chunks else '',
                v='-v ' if verbose else '',
            ))

    # Python command that will be executed on a node
    # command strings after cli v7.0 use dashes instead of underscores
    cmd = ('python -m reV.handlers.cli_collect -n {} direct {} collect'.format(
        SLURM.s(name), args))
    logger.debug('Creating the following command line call:\n\t{}'.format(cmd))

    return cmd
コード例 #13
0
ファイル: cli_qa_qc.py プロジェクト: barbarmarc/reV
def get_h5_cmd(name, h5_file, out_dir, sub_dir, dsets, group, process_size,
               max_workers, plot_type, cmap, log_file, verbose, terminal):
    """Build CLI call for reV_h5."""

    args = ('-h5 {h5_file} '
            '-o {out_dir} '
            '-sd {sub_dir} '
            '-ds {dsets} '
            '-grp {group} '
            '-ps {process_size} '
            '-w {max_workers} '
            '-plt {plot_type} '
            '-cmap {cmap} '
            '-log {log_file} ')

    args = args.format(
        h5_file=SLURM.s(h5_file),
        out_dir=SLURM.s(out_dir),
        sub_dir=SLURM.s(sub_dir),
        dsets=SLURM.s(dsets),
        group=SLURM.s(group),
        process_size=SLURM.s(process_size),
        max_workers=SLURM.s(max_workers),
        plot_type=SLURM.s(plot_type),
        cmap=SLURM.s(cmap),
        log_file=SLURM.s(log_file),
    )

    if verbose:
        args += '-v '

    if terminal:
        args += '-t '

    cmd = ('python -m reV.qa_qc.cli_qa_qc -n {} rev-h5 {}'.format(
        SLURM.s(name), args))

    return cmd
コード例 #14
0
ファイル: cli_sc_aggregation.py プロジェクト: barbarmarc/reV
def slurm(ctx, alloc, walltime, feature, memory, module, conda_env,
          stdout_path):
    """slurm (Eagle) submission tool for reV supply curve aggregation."""
    name = ctx.obj['NAME']
    excl_fpath = ctx.obj['EXCL_FPATH']
    gen_fpath = ctx.obj['GEN_FPATH']
    res_fpath = ctx.obj['RES_FPATH']
    tm_dset = ctx.obj['TM_DSET']
    excl_dict = ctx.obj['EXCL_DICT']
    check_excl_layers = ctx.obj['CHECK_LAYERS']
    res_class_dset = ctx.obj['RES_CLASS_DSET']
    res_class_bins = ctx.obj['RES_CLASS_BINS']
    cf_dset = ctx.obj['CF_DSET']
    lcoe_dset = ctx.obj['LCOE_DSET']
    data_layers = ctx.obj['DATA_LAYERS']
    resolution = ctx.obj['RESOLUTION']
    excl_area = ctx.obj['EXCL_AREA']
    power_density = ctx.obj['POWER_DENSITY']
    area_filter_kernel = ctx.obj['AREA_FILTER_KERNEL']
    min_area = ctx.obj['MIN_AREA']
    friction_fpath = ctx.obj['FRICTION_FPATH']
    friction_dset = ctx.obj['FRICTION_DSET']
    out_dir = ctx.obj['OUT_DIR']
    log_dir = ctx.obj['LOG_DIR']
    verbose = ctx.obj['VERBOSE']

    if stdout_path is None:
        stdout_path = os.path.join(log_dir, 'stdout/')

    cmd = get_node_cmd(name, excl_fpath, gen_fpath, res_fpath,
                       tm_dset, excl_dict, check_excl_layers,
                       res_class_dset, res_class_bins,
                       cf_dset, lcoe_dset, data_layers,
                       resolution, excl_area,
                       power_density, area_filter_kernel, min_area,
                       friction_fpath, friction_dset,
                       out_dir, log_dir, verbose)

    status = Status.retrieve_job_status(out_dir, 'supply-curve-aggregation',
                                        name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'
               .format(name, out_dir))
    else:
        logger.info('Running reV SC aggregation on SLURM with '
                    'node name "{}"'.format(name))
        slurm = SLURM(cmd, alloc=alloc, memory=memory,
                      walltime=walltime, feature=feature,
                      name=name, stdout_path=stdout_path,
                      conda_env=conda_env, module=module)
        if slurm.id:
            msg = ('Kicked off reV SC aggregation job "{}" '
                   '(SLURM jobid #{}).'
                   .format(name, slurm.id))
            Status.add_job(
                out_dir, 'supply-curve-aggregation', name, replace=True,
                job_attrs={'job_id': slurm.id, 'hardware': 'eagle',
                           'fout': '{}.csv'.format(name), 'dirout': out_dir})
        else:
            msg = ('Was unable to kick off reV SC job "{}". '
                   'Please see the stdout error messages'
                   .format(name))
    click.echo(msg)
    logger.info(msg)
コード例 #15
0
ファイル: cli_sc_aggregation.py プロジェクト: barbarmarc/reV
def get_node_cmd(name, excl_fpath, gen_fpath, res_fpath, tm_dset, excl_dict,
                 check_excl_layers, res_class_dset, res_class_bins, cf_dset,
                 lcoe_dset, data_layers, resolution, excl_area, power_density,
                 area_filter_kernel, min_area, friction_fpath, friction_dset,
                 out_dir, log_dir, verbose):
    """Get a CLI call command for the SC aggregation cli."""

    args = ('-ef {excl_fpath} '
            '-gf {gen_fpath} '
            '-rf {res_fpath} '
            '-tm {tm_dset} '
            '-exd {excl_dict} '
            '-cd {res_class_dset} '
            '-cb {res_class_bins} '
            '-cf {cf_dset} '
            '-lc {lcoe_dset} '
            '-d {data_layers} '
            '-r {resolution} '
            '-ea {excl_area} '
            '-pd {power_density} '
            '-afk {area_filter_kernel} '
            '-ma {min_area} '
            '-ff {friction_fpath} '
            '-fd {friction_dset} '
            '-o {out_dir} '
            '-ld {log_dir} '
            )

    args = args.format(excl_fpath=SLURM.s(excl_fpath),
                       gen_fpath=SLURM.s(gen_fpath),
                       res_fpath=SLURM.s(res_fpath),
                       tm_dset=SLURM.s(tm_dset),
                       excl_dict=SLURM.s(excl_dict),
                       res_class_dset=SLURM.s(res_class_dset),
                       res_class_bins=SLURM.s(res_class_bins),
                       cf_dset=SLURM.s(cf_dset),
                       lcoe_dset=SLURM.s(lcoe_dset),
                       data_layers=SLURM.s(data_layers),
                       resolution=SLURM.s(resolution),
                       excl_area=SLURM.s(excl_area),
                       power_density=SLURM.s(power_density),
                       area_filter_kernel=SLURM.s(area_filter_kernel),
                       min_area=SLURM.s(min_area),
                       friction_fpath=SLURM.s(friction_fpath),
                       friction_dset=SLURM.s(friction_dset),
                       out_dir=SLURM.s(out_dir),
                       log_dir=SLURM.s(log_dir),
                       )

    if check_excl_layers:
        args += '-cl '

    if verbose:
        args += '-v '

    cmd = ('python -m reV.supply_curve.cli_sc_aggregation -n {} direct {}'
           .format(SLURM.s(name), args))

    return cmd
コード例 #16
0
ファイル: cli_rep_profiles.py プロジェクト: barbarmarc/reV
def get_node_cmd(name, gen_fpath, rev_summary, reg_cols, cf_dset, rep_method,
                 err_method, weight, n_profiles, out_dir, log_dir, max_workers,
                 aggregate_profiles, verbose):
    """Get a CLI call command for the rep profiles cli."""

    args = ('-g {gen_fpath} '
            '-r {rev_summary} '
            '-rc {reg_cols} '
            '-cf {cf_dset} '
            '-rm {rep_method} '
            '-em {err_method} '
            '-w {weight} '
            '-np {n_profiles} '
            '-od {out_dir} '
            '-ld {log_dir} '
            '-mw {max_workers} ')

    args = args.format(
        gen_fpath=SLURM.s(gen_fpath),
        rev_summary=SLURM.s(rev_summary),
        reg_cols=SLURM.s(reg_cols),
        cf_dset=SLURM.s(cf_dset),
        rep_method=SLURM.s(rep_method),
        err_method=SLURM.s(err_method),
        weight=SLURM.s(weight),
        n_profiles=SLURM.s(n_profiles),
        out_dir=SLURM.s(out_dir),
        log_dir=SLURM.s(log_dir),
        max_workers=SLURM.s(max_workers),
    )

    if aggregate_profiles:
        args += '-agg '

    if verbose:
        args += '-v '

    cmd = (
        'python -m reV.rep_profiles.cli_rep_profiles -n {} direct {}'.format(
            SLURM.s(name), args))

    return cmd
コード例 #17
0
ファイル: cli_econ.py プロジェクト: barbarmarc/reV
def slurm(ctx, nodes, alloc, memory, walltime, feature, module, conda_env,
          stdout_path, verbose):
    """Run econ on HPC via SLURM job submission."""

    name = ctx.obj['NAME']
    points = ctx.obj['POINTS']
    sam_files = ctx.obj['SAM_FILES']
    cf_file = ctx.obj['CF_FILE']
    cf_year = ctx.obj['CF_YEAR']
    site_data = ctx.obj['SITE_DATA']
    sites_per_worker = ctx.obj['SITES_PER_WORKER']
    max_workers = ctx.obj['MAX_WORKERS']
    timeout = ctx.obj['TIMEOUT']
    fout = ctx.obj['FOUT']
    dirout = ctx.obj['DIROUT']
    logdir = ctx.obj['LOGDIR']
    output_request = ctx.obj['OUTPUT_REQUEST']
    append = ctx.obj['APPEND']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    # initialize an info logger on the year level
    init_mult(name,
              logdir,
              modules=[
                  __name__, 'reV.econ.econ', 'reV.config', 'reV.utilities',
                  'reV.SAM', 'rex.utilities'
              ],
              verbose=False)

    if append:
        pc = [None]
    else:
        pc = get_node_pc(points, sam_files, nodes)

    for i, split in enumerate(pc):
        node_name, fout_node = get_node_name_fout(name,
                                                  fout,
                                                  i,
                                                  pc,
                                                  hpc='slurm')
        node_name = node_name.replace('gen', 'econ')

        points_range = split.split_range if split is not None else None
        cmd = get_node_cmd(node_name,
                           sam_files,
                           cf_file,
                           cf_year=cf_year,
                           site_data=site_data,
                           points=points,
                           points_range=points_range,
                           sites_per_worker=sites_per_worker,
                           max_workers=max_workers,
                           timeout=timeout,
                           fout=fout_node,
                           dirout=dirout,
                           logdir=logdir,
                           output_request=output_request,
                           append=append,
                           verbose=verbose)

        status = Status.retrieve_job_status(dirout, 'econ', node_name)

        if status == 'successful':
            msg = ('Job "{}" is successful in status json found in "{}", '
                   'not re-running.'.format(node_name, dirout))
        else:
            logger.info('Running reV econ on SLURM with node name "{}" for '
                        '{} (points range: {}).'.format(
                            node_name, pc, points_range))
            # create and submit the SLURM job
            slurm = SLURM(cmd,
                          alloc=alloc,
                          memory=memory,
                          walltime=walltime,
                          feature=feature,
                          name=node_name,
                          stdout_path=stdout_path,
                          conda_env=conda_env,
                          module=module)
            if slurm.id:
                msg = (
                    'Kicked off reV econ job "{}" (SLURM jobid #{}).'.format(
                        node_name, slurm.id))
                # add job to reV status file.
                Status.add_job(dirout,
                               'econ',
                               node_name,
                               replace=True,
                               job_attrs={
                                   'job_id': slurm.id,
                                   'hardware': 'eagle',
                                   'fout': fout_node,
                                   'dirout': dirout
                               })
            else:
                msg = (
                    'Was unable to kick off reV econ job "{}". '
                    'Please see the stdout error messages'.format(node_name))

        click.echo(msg)
        logger.info(msg)
コード例 #18
0
ファイル: cli_qa_qc.py プロジェクト: barbarmarc/reV
def get_excl_cmd(name, excl_fpath, out_dir, sub_dir, excl_dict,
                 area_filter_kernel, min_area, plot_type, cmap, plot_step,
                 log_file, verbose, terminal):
    """Build CLI call for exclusions."""

    args = ('-excl {excl_fpath} '
            '-o {out_dir} '
            '-sd {sub_dir} '
            '-exd {excl_dict} '
            '-afk {area_filter_kernel} '
            '-ma {min_area} '
            '-plt {plot_type} '
            '-cmap {cmap} '
            '-step {plot_step} '
            '-log {log_file} ')

    args = args.format(
        excl_fpath=SLURM.s(excl_fpath),
        out_dir=SLURM.s(out_dir),
        sub_dir=SLURM.s(sub_dir),
        excl_dict=SLURM.s(excl_dict),
        area_filter_kernel=SLURM.s(area_filter_kernel),
        min_area=SLURM.s(min_area),
        plot_type=SLURM.s(plot_type),
        cmap=SLURM.s(cmap),
        plot_step=SLURM.s(plot_step),
        log_file=SLURM.s(log_file),
    )

    if verbose:
        args += '-v '

    if terminal:
        args += '-t '

    cmd = ('python -m reV.qa_qc.cli_qa_qc -n {} exclusions {}'.format(
        SLURM.s(name), args))

    return cmd
コード例 #19
0
ファイル: cli_collect.py プロジェクト: barbarmarc/reV
def collect_slurm(ctx, alloc, memory, walltime, feature, conda_env, module,
                  stdout_path, verbose):
    """Run collection on HPC via SLURM job submission."""

    name = ctx.obj['NAME']
    h5_file = ctx.obj['H5_FILE']
    h5_dir = ctx.obj['H5_DIR']
    log_dir = ctx.obj['LOG_DIR']
    project_points = ctx.obj['PROJECT_POINTS']
    dsets = ctx.obj['DSETS']
    file_prefix = ctx.obj['FILE_PREFIX']
    purge_chunks = ctx.obj['PURGE_CHUNKS']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    cmd = get_node_cmd(name,
                       h5_file,
                       h5_dir,
                       project_points,
                       dsets,
                       file_prefix=file_prefix,
                       log_dir=log_dir,
                       purge_chunks=purge_chunks,
                       verbose=verbose)

    status = Status.retrieve_job_status(os.path.dirname(h5_file), 'collect',
                                        name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, os.path.dirname(h5_file)))
    else:
        logger.info(
            'Running reV collection on SLURM with node name "{}", '
            'collecting data to "{}" from "{}" with file prefix "{}".'.format(
                name, h5_file, h5_dir, file_prefix))
        # create and submit the SLURM job
        slurm = SLURM(cmd,
                      alloc=alloc,
                      memory=memory,
                      walltime=walltime,
                      feature=feature,
                      name=name,
                      conda_env=conda_env,
                      module=module,
                      stdout_path=stdout_path)
        if slurm.id:
            msg = (
                'Kicked off reV collection job "{}" (SLURM jobid #{}).'.format(
                    name, slurm.id))
            # add job to reV status file.
            Status.add_job(os.path.dirname(h5_file),
                           'collect',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': slurm.id,
                               'hardware': 'eagle',
                               'fout': os.path.basename(h5_file),
                               'dirout': os.path.dirname(h5_file)
                           })
        else:
            msg = ('Was unable to kick off reV collection job "{}". '
                   'Please see the stdout error messages'.format(name))
    click.echo(msg)
    logger.info(msg)
コード例 #20
0
ファイル: cli_gen.py プロジェクト: barbarmarc/reV
def get_node_cmd(name, tech, sam_files, res_file, points=slice(0, 100),
                 points_range=None, sites_per_worker=None, max_workers=None,
                 fout='reV.h5', dirout='./out/gen_out',
                 logdir='./out/log_gen', output_request=('cf_mean',),
                 mem_util_lim=0.4, timeout=1800, curtailment=None,
                 downscale=None, verbose=False):
    """Make a reV geneneration direct-local CLI call string.

    Parameters
    ----------
    name : str
        Name of the job to be submitted.
    tech : str
        Name of the reV technology to be analyzed.
        (e.g. pv, csp, landbasedwind, offshorewind).
    sam_files : dict | str | list
        SAM input configuration ID(s) and file path(s). Keys are the SAM
        config ID(s), top level value is the SAM path. Can also be a single
        config file str. If it's a list, it is mapped to the sorted list
        of unique configs requested by points csv.
    res_file : str
        WTK or NSRDB resource file name + path.
    points : slice | str | list | tuple
        Slice/list specifying project points, string pointing to a project
    points_range : list | None
        Optional range list to run a subset of sites
    sites_per_worker : int | None
        Number of sites to be analyzed in serial on a single local core.
    max_workers : int | None
        Number of workers to use on a node. None defaults to all available
        workers.
    fout : str
        Target filename to dump generation outputs.
    dirout : str
        Target directory to dump generation fout.
    logdir : str
        Target directory to save log files.
    output_request : list | tuple
        Output variables requested from SAM.
    mem_util_lim : float
        Memory utilization limit (fractional).
    timeout : int | float
        Number of seconds to wait for parallel run iteration to complete
        before returning zeros. Default is 1800 seconds.
    curtailment : NoneType | str
        Pointer to a file containing curtailment input parameters or None if
        no curtailment.
    downscale : NoneType | str
        Option for NSRDB resource downscaling to higher temporal
        resolution. Expects a string in the Pandas frequency format,
        e.g. '5min'.
    verbose : bool
        Flag to turn on debug logging. Default is False.

    Returns
    -------
    cmd : str
        Single line command line argument to call the following CLI with
        appropriately formatted arguments based on input args:
            python -m reV.generation.cli_gen [args] direct [args] local [args]
    """

    # mark a cli arg string for main() in this module
    arg_main = ('-n {name} '.format(name=SLURM.s(name)))

    # make some strings only if specified
    cstr = '-curt {} '.format(SLURM.s(curtailment))
    dstr = '-ds {} '.format(SLURM.s(downscale))

    # make a cli arg string for direct() in this module
    arg_direct = ('-t {tech} '
                  '-p {points} '
                  '-sf {sam_files} '
                  '-rf {res_file} '
                  '-spw {sites_per_worker} '
                  '-fo {fout} '
                  '-do {dirout} '
                  '-lo {logdir} '
                  '-or {out_req} '
                  '-mem {mem} '
                  '{curt}'
                  '{ds}')
    arg_direct = arg_direct.format(
        tech=SLURM.s(tech),
        points=SLURM.s(points),
        sam_files=SLURM.s(sam_files),
        res_file=SLURM.s(res_file),
        sites_per_worker=SLURM.s(sites_per_worker),
        fout=SLURM.s(fout),
        dirout=SLURM.s(dirout),
        logdir=SLURM.s(logdir),
        out_req=SLURM.s(output_request),
        mem=SLURM.s(mem_util_lim),
        curt=cstr if curtailment else '',
        ds=dstr if downscale else '')

    # make a cli arg string for local() in this module
    arg_loc = ('-mw {max_workers} '
               '-to {timeout} '
               '-pr {points_range} '
               '{v}'.format(max_workers=SLURM.s(max_workers),
                            timeout=SLURM.s(timeout),
                            points_range=SLURM.s(points_range),
                            v='-v' if verbose else ''))

    # Python command that will be executed on a node
    # command strings after cli v7.0 use dashes instead of underscores
    cmd = ('python -m reV.generation.cli_gen '
           '{arg_main} direct {arg_direct} local {arg_loc}'
           .format(arg_main=arg_main,
                   arg_direct=arg_direct,
                   arg_loc=arg_loc))
    logger.debug('Creating the following command line call:\n\t{}'.format(cmd))
    return cmd