Example #1
0
def slurm(ctx, alloc, memory, walltime, feature, conda_env, module,
          stdout_path):
    """slurm (Eagle) submission tool for reV representative profiles."""

    name = ctx.obj['NAME']
    gen_fpath = ctx.obj['GEN_FPATH']
    rev_summary = ctx.obj['REV_SUMMARY']
    reg_cols = ctx.obj['REG_COLS']
    cf_dset = ctx.obj['CF_DSET']
    rep_method = ctx.obj['REP_METHOD']
    err_method = ctx.obj['ERR_METHOD']
    weight = ctx.obj['WEIGHT']
    n_profiles = ctx.obj['N_PROFILES']
    out_dir = ctx.obj['OUT_DIR']
    log_dir = ctx.obj['LOG_DIR']
    max_workers = ctx.obj['MAX_WORKERS']
    aggregate_profiles = ctx.obj['AGGREGATE_PROFILES']
    verbose = ctx.obj['VERBOSE']

    if stdout_path is None:
        stdout_path = os.path.join(log_dir, 'stdout/')

    cmd = get_node_cmd(name, gen_fpath, rev_summary, reg_cols, cf_dset,
                       rep_method, err_method, weight, n_profiles, out_dir,
                       log_dir, max_workers, aggregate_profiles, verbose)

    status = Status.retrieve_job_status(out_dir, 'rep-profiles', name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, out_dir))
    else:
        logger.info('Running reV SC rep profiles on SLURM with '
                    'node name "{}"'.format(name))
        slurm = SLURM(cmd,
                      alloc=alloc,
                      memory=memory,
                      walltime=walltime,
                      feature=feature,
                      name=name,
                      stdout_path=stdout_path,
                      conda_env=conda_env,
                      module=module)
        if slurm.id:
            msg = ('Kicked off reV rep profiles job "{}" '
                   '(SLURM jobid #{}).'.format(name, slurm.id))
            Status.add_job(out_dir,
                           'rep-profiles',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': slurm.id,
                               'hardware': 'eagle',
                               'fout': '{}.h5'.format(name),
                               'dirout': out_dir
                           })
        else:
            msg = ('Was unable to kick off reV rep profiles job "{}". '
                   'Please see the stdout error messages'.format(name))
    click.echo(msg)
    logger.info(msg)
Example #2
0
def from_config(ctx, config_file, verbose):
    """Run reV gen from a config file."""
    name = ctx.obj['NAME']

    # Instantiate the config object
    config = MultiYearConfig(config_file)

    # take name from config if not default
    if config.name.lower() != 'rev':
        name = config.name
        ctx.obj['NAME'] = name

    # Enforce verbosity if logging level is specified in the config
    if config.log_level == logging.DEBUG:
        verbose = True

    # make output directory if does not exist
    if not os.path.exists(config.dirout):
        os.makedirs(config.dirout)

    # initialize loggers.
    init_mult(name, config.logdir, modules=[__name__, 'reV'], verbose=verbose)

    # Initial log statements
    logger.info(
        'Running reV multi-year from config file: "{}"'.format(config_file))
    logger.info('Target output directory: "{}"'.format(config.dirout))
    logger.info('Target logging directory: "{}"'.format(config.logdir))

    ctx.obj['MY_FILE'] = config.my_file
    if config.execution_control.option == 'local':

        ctx.obj['NAME'] = name
        status = Status.retrieve_job_status(config.dirout, 'multi-year', name)
        if status != 'successful':
            Status.add_job(config.dirout,
                           'multi-year',
                           name,
                           replace=True,
                           job_attrs={
                               'hardware': 'local',
                               'fout': ctx.obj['MY_FILE'],
                               'dirout': config.dirout
                           })
            group_params = json.dumps(config.group_params)
            ctx.invoke(multi_year_groups, group_params=group_params)

    elif config.execution_control.option in ('eagle', 'slurm'):
        ctx.obj['NAME'] = name
        ctx.invoke(multi_year_slurm,
                   alloc=config.execution_control.allocation,
                   walltime=config.execution_control.walltime,
                   feature=config.execution_control.feature,
                   memory=config.execution_control.memory,
                   conda_env=config.execution_control.conda_env,
                   module=config.execution_control.module,
                   stdout_path=os.path.join(config.logdir, 'stdout'),
                   group_params=json.dumps(config.group_params),
                   verbose=verbose)
Example #3
0
def test_job_exists():
    """Test job addition and exist check"""
    purge()
    Status.add_job(STATUS_DIR, 'generation', 'test1',
                   job_attrs={'job_status': 'submitted'})
    exists = Status.job_exists(STATUS_DIR, 'test1')
    assert exists
    purge()
Example #4
0
def multi_year_slurm(ctx, group_params, alloc, walltime, feature, memory,
                     conda_env, module, stdout_path, verbose):
    """
    Run multi year collection and means on HPC via SLURM job submission.
    """

    name = ctx.obj['NAME']
    my_file = ctx.obj['MY_FILE']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    slurm_manager = ctx.obj.get('SLURM_MANAGER', None)
    if slurm_manager is None:
        slurm_manager = SLURM()
        ctx.obj['SLURM_MANAGER'] = slurm_manager

    status = Status.retrieve_job_status(os.path.dirname(my_file),
                                        'multi-year',
                                        name,
                                        hardware='eagle',
                                        subprocess_manager=slurm_manager)

    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, os.path.dirname(my_file)))
    elif 'fail' not in str(status).lower() and status is not None:
        msg = ('Job "{}" was found with status "{}", not resubmitting'.format(
            name, status))
    else:
        logger.info('Running reV multi-year collection on SLURM with node '
                    ' name "{}", collecting into "{}".'.format(name, my_file))
        # create and submit the SLURM job
        slurm_cmd = get_slurm_cmd(name, my_file, group_params, verbose=verbose)
        out = slurm_manager.sbatch(slurm_cmd,
                                   alloc=alloc,
                                   memory=memory,
                                   walltime=walltime,
                                   feature=feature,
                                   name=name,
                                   stdout_path=stdout_path,
                                   conda_env=conda_env,
                                   module=module)[0]
        if out:
            msg = ('Kicked off reV multi-year collection job "{}" '
                   '(SLURM jobid #{}).'.format(name, out))
            # add job to reV status file.
            Status.add_job(os.path.dirname(my_file),
                           'multi-year',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': out,
                               'hardware': 'eagle',
                               'fout': os.path.basename(my_file),
                               'dirout': os.path.dirname(my_file)
                           })

    click.echo(msg)
    logger.info(msg)
Example #5
0
def launch_slurm(config, verbose):
    """
    Launch slurm QA/QC job

    Parameters
    ----------
    config : dict
        'reV QA/QC configuration dictionary'
    """

    out_dir = config.dirout
    log_file = os.path.join(config.logdir, config.name + '.log')
    stdout_path = os.path.join(config.logdir, 'stdout/')
    node_cmd = get_multiple_cmds(config, out_dir, log_file, verbose)

    slurm_manager = SLURM()
    status = Status.retrieve_job_status(out_dir,
                                        'qa-qc',
                                        config.name,
                                        hardware='eagle',
                                        subprocess_manager=slurm_manager)

    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(config.name, out_dir))
    elif 'fail' not in str(status).lower() and status is not None:
        msg = ('Job "{}" was found with status "{}", not resubmitting'.format(
            config.name, status))
    else:
        logger.info('Running reV QA-QC on SLURM with '
                    'node name "{}"'.format(config.name))
        out = slurm_manager.sbatch(
            node_cmd,
            name=config.name,
            alloc=config.execution_control.allocation,
            memory=config.execution_control.memory,
            feature=config.execution_control.feature,
            walltime=config.execution_control.walltime,
            conda_env=config.execution_control.conda_env,
            module=config.execution_control.module,
            stdout_path=stdout_path)[0]
        if out:
            msg = ('Kicked off reV QA-QC job "{}" '
                   '(SLURM jobid #{}).'.format(config.name, out))
            Status.add_job(out_dir,
                           'qa-qc',
                           config.name,
                           replace=True,
                           job_attrs={
                               'job_id': out,
                               'hardware': 'eagle',
                               'dirout': out_dir
                           })

    click.echo(msg)
    logger.info(msg)
Example #6
0
def slurm(ctx, alloc, memory, walltime, feature, module, conda_env,
          stdout_path):
    """slurm (eagle) submission tool for reV supply curve."""
    name = ctx.obj['NAME']
    sc_points = ctx.obj['SC_POINTS']
    trans_table = ctx.obj['TRANS_TABLE']
    fixed_charge_rate = ctx.obj['FIXED_CHARGE_RATE']
    sc_features = ctx.obj['SC_FEATURES']
    transmission_costs = ctx.obj['TRANSMISSION_COSTS']
    simple = ctx.obj['SIMPLE']
    line_limited = ctx.obj['LINE_LIMITED']
    sort_on = ctx.obj['SORT_ON']
    offshore_trans_table = ctx.obj['OFFSHORE_TRANS_TABLE']
    wind_dirs = ctx.obj['WIND_DIRS']
    n_dirs = ctx.obj['N_DIRS']
    downwind = ctx.obj['DOWNWIND']
    offshore_compete = ctx.obj['OFFSHORE_COMPETE']
    max_workers = ctx.obj['MAX_WORKERS']
    out_dir = ctx.obj['OUT_DIR']
    log_dir = ctx.obj['LOG_DIR']
    verbose = ctx.obj['VERBOSE']

    if stdout_path is None:
        stdout_path = os.path.join(log_dir, 'stdout/')

    cmd = get_node_cmd(name, sc_points, trans_table, fixed_charge_rate,
                       sc_features, transmission_costs, sort_on,
                       offshore_trans_table, wind_dirs, n_dirs, downwind,
                       offshore_compete, max_workers, out_dir, log_dir,
                       simple, line_limited, verbose)

    status = Status.retrieve_job_status(out_dir, 'supply-curve', name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'
               .format(name, out_dir))
    else:
        logger.info('Running reV Supply Curve on SLURM with '
                    'node name "{}"'.format(name))
        logger.debug('\t{}'.format(cmd))
        slurm = SLURM(cmd, alloc=alloc, memory=memory,
                      walltime=walltime, feature=feature,
                      name=name, stdout_path=stdout_path,
                      conda_env=conda_env, module=module)
        if slurm.id:
            msg = ('Kicked off reV SC job "{}" (SLURM jobid #{}).'
                   .format(name, slurm.id))
            Status.add_job(
                out_dir, 'supply-curve', name, replace=True,
                job_attrs={'job_id': slurm.id, 'hardware': 'eagle',
                           'fout': '{}.csv'.format(name), 'dirout': out_dir})
        else:
            msg = ('Was unable to kick off reV SC job "{}". Please see the '
                   'stdout error messages'.format(name))
    click.echo(msg)
    logger.info(msg)
Example #7
0
def slurm(ctx, alloc, feature, memory, walltime, module, conda_env,
          stdout_path):
    """slurm (Eagle) submission tool for reV supply curve aggregation."""

    name = ctx.obj['NAME']
    gen_fpath = ctx.obj['GEN_FPATH']
    offshore_fpath = ctx.obj['OFFSHORE_FPATH']
    project_points = ctx.obj['PROJECT_POINTS']
    sam_files = ctx.obj['SAM_FILES']
    log_dir = ctx.obj['LOG_DIR']
    out_dir = ctx.obj['OUT_DIR']
    verbose = ctx.obj['VERBOSE']

    if stdout_path is None:
        stdout_path = os.path.join(log_dir, 'stdout/')

    cmd = get_node_cmd(name, gen_fpath, offshore_fpath, project_points,
                       sam_files, log_dir, verbose)

    status = Status.retrieve_job_status(out_dir, 'offshore', name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, out_dir))
    else:
        logger.info('Running reV offshore aggregation on SLURM with '
                    'node name "{}"'.format(name))
        slurm = SLURM(cmd,
                      alloc=alloc,
                      memory=memory,
                      walltime=walltime,
                      feature=feature,
                      name=name,
                      stdout_path=stdout_path,
                      conda_env=conda_env,
                      module=module)
        if slurm.id:
            msg = ('Kicked off reV offshore job "{}" '
                   '(SLURM jobid #{}).'.format(name, slurm.id))
            Status.add_job(out_dir,
                           'offshore',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': slurm.id,
                               'hardware': 'eagle',
                               'fout': '{}.csv'.format(name),
                               'dirout': out_dir
                           })
        else:
            msg = ('Was unable to kick off reV offshore job "{}". Please see '
                   'the stdout error messages'.format(name))
    click.echo(msg)
    logger.info(msg)
Example #8
0
def test_job_addition():
    """Test job addition and exist check"""
    purge()
    Status.add_job(STATUS_DIR, 'generation', 'test1')
    status1 = Status(STATUS_DIR).data['generation']['test1']['job_status']

    Status.add_job(STATUS_DIR, 'generation', 'test1',
                   job_attrs={'job_status': 'finished', 'additional': 'test'})
    status2 = Status(STATUS_DIR).data['generation']['test1']['job_status']

    assert status2 == status1
    purge()
Example #9
0
def test_job_replacement():
    """Test job addition and replacement"""
    purge()
    Status.add_job(STATUS_DIR, 'generation', 'test1',
                   job_attrs={'job_status': 'submitted'})

    Status.add_job(STATUS_DIR, 'generation', 'test1',
                   job_attrs={'addition': 'test', 'job_status': 'finished'},
                   replace=True)

    status = Status(STATUS_DIR).data['generation']['test1']['job_status']
    addition = Status(STATUS_DIR).data['generation']['test1']['addition']
    assert status == 'finished'
    assert addition == 'test'
    purge()
Example #10
0
def multi_year_slurm(ctx, alloc, walltime, feature, memory, conda_env, module,
                     stdout_path, group_params, verbose):
    """
    Run multi year collection and means on HPC via SLURM job submission.
    """

    name = ctx.obj['NAME']
    my_file = ctx.obj['MY_FILE']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    status = Status.retrieve_job_status(os.path.dirname(my_file), 'multi-year',
                                        name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, os.path.dirname(my_file)))
    else:
        logger.info('Running reV multi-year collection on SLURM with node '
                    ' name "{}", collecting into "{}".'.format(name, my_file))
        # create and submit the SLURM job
        slurm_cmd = get_slurm_cmd(name, my_file, group_params, verbose=verbose)
        slurm = SLURM(slurm_cmd,
                      alloc=alloc,
                      memory=memory,
                      walltime=walltime,
                      feature=feature,
                      name=name,
                      stdout_path=stdout_path,
                      conda_env=conda_env,
                      module=module)
        if slurm.id:
            msg = ('Kicked off reV multi-year collection job "{}" '
                   '(SLURM jobid #{}).'.format(name, slurm.id))
            # add job to reV status file.
            Status.add_job(os.path.dirname(my_file),
                           'multi-year',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': slurm.id,
                               'hardware': 'eagle',
                               'fout': os.path.basename(my_file),
                               'dirout': os.path.dirname(my_file)
                           })
        else:
            msg = ('Was unable to kick off reV collection job "{}". '
                   'Please see the stdout error messages'.format(name))
    click.echo(msg)
    logger.info(msg)
Example #11
0
def from_config(ctx, config_file, verbose):
    """Run reV offshore aggregation from a config file."""
    # Instantiate the config object
    config = OffshoreConfig(config_file)
    name = ctx.obj['NAME']

    # take name from config if not default
    if config.name.lower() != 'rev':
        name = config.name
        ctx.obj['NAME'] = name

    # Enforce verbosity if logging level is specified in the config
    if config.log_level == logging.DEBUG:
        verbose = True

    # initialize loggers
    init_mult(
        name,
        config.logdir,
        modules=[__name__, 'reV.config', 'reV.utilities', 'rex.utilities'],
        verbose=verbose)

    # Initial log statements
    logger.info('Running reV offshore aggregation from config '
                'file: "{}"'.format(config_file))
    logger.info('Target output directory: "{}"'.format(config.dirout))
    logger.info('Target logging directory: "{}"'.format(config.logdir))
    logger.debug('The full configuration input is as follows:\n{}'.format(
        pprint.pformat(config, indent=4)))

    for i, gen_fpath in enumerate(config.parse_gen_fpaths()):
        job_name = '{}_{}'.format(name, str(i).zfill(2))

        if config.execution_control.option == 'local':
            status = Status.retrieve_job_status(config.dirout, 'offshore',
                                                job_name)
            if status != 'successful':
                Status.add_job(config.dirout,
                               'offshore',
                               job_name,
                               replace=True,
                               job_attrs={
                                   'hardware': 'local',
                                   'fout': '{}_offshore.h5'.format(job_name),
                                   'dirout': config.dirout,
                                   'finput': gen_fpath
                               })
                ctx.invoke(direct,
                           gen_fpath=gen_fpath,
                           offshore_fpath=config.offshore_fpath,
                           points=config.project_points,
                           sam_files=config.sam_files,
                           logdir=config.logdir,
                           verbose=verbose)

        elif config.execution_control.option in ('eagle', 'slurm'):

            ctx.obj['NAME'] = job_name
            ctx.obj['GEN_FPATH'] = gen_fpath
            ctx.obj['OFFSHORE_FPATH'] = config.offshore_fpath
            ctx.obj['PROJECT_POINTS'] = config.project_points
            ctx.obj['SAM_FILES'] = config.sam_files
            ctx.obj['OUT_DIR'] = config.dirout
            ctx.obj['LOG_DIR'] = config.logdir
            ctx.obj['VERBOSE'] = verbose

            ctx.invoke(slurm,
                       alloc=config.execution_control.allocation,
                       memory=config.execution_control.memory,
                       walltime=config.execution_control.walltime,
                       feature=config.execution_control.feature,
                       module=config.execution_control.module,
                       conda_env=config.execution_control.conda_env)
Example #12
0
def slurm(ctx, nodes, alloc, memory, walltime, feature, module, conda_env,
          stdout_path, verbose):
    """Run econ on HPC via SLURM job submission."""

    name = ctx.obj['NAME']
    points = ctx.obj['POINTS']
    sam_files = ctx.obj['SAM_FILES']
    cf_file = ctx.obj['CF_FILE']
    cf_year = ctx.obj['CF_YEAR']
    site_data = ctx.obj['SITE_DATA']
    sites_per_worker = ctx.obj['SITES_PER_WORKER']
    max_workers = ctx.obj['MAX_WORKERS']
    timeout = ctx.obj['TIMEOUT']
    fout = ctx.obj['FOUT']
    dirout = ctx.obj['DIROUT']
    logdir = ctx.obj['LOGDIR']
    output_request = ctx.obj['OUTPUT_REQUEST']
    append = ctx.obj['APPEND']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    # initialize an info logger on the year level
    init_mult(name,
              logdir,
              modules=[
                  __name__, 'reV.econ.econ', 'reV.config', 'reV.utilities',
                  'reV.SAM', 'rex.utilities'
              ],
              verbose=False)

    if append:
        pc = [None]
    else:
        pc = get_node_pc(points, sam_files, nodes)

    for i, split in enumerate(pc):
        node_name, fout_node = get_node_name_fout(name,
                                                  fout,
                                                  i,
                                                  pc,
                                                  hpc='slurm')
        node_name = node_name.replace('gen', 'econ')

        points_range = split.split_range if split is not None else None
        cmd = get_node_cmd(node_name,
                           sam_files,
                           cf_file,
                           cf_year=cf_year,
                           site_data=site_data,
                           points=points,
                           points_range=points_range,
                           sites_per_worker=sites_per_worker,
                           max_workers=max_workers,
                           timeout=timeout,
                           fout=fout_node,
                           dirout=dirout,
                           logdir=logdir,
                           output_request=output_request,
                           append=append,
                           verbose=verbose)

        status = Status.retrieve_job_status(dirout, 'econ', node_name)

        if status == 'successful':
            msg = ('Job "{}" is successful in status json found in "{}", '
                   'not re-running.'.format(node_name, dirout))
        else:
            logger.info('Running reV econ on SLURM with node name "{}" for '
                        '{} (points range: {}).'.format(
                            node_name, pc, points_range))
            # create and submit the SLURM job
            slurm = SLURM(cmd,
                          alloc=alloc,
                          memory=memory,
                          walltime=walltime,
                          feature=feature,
                          name=node_name,
                          stdout_path=stdout_path,
                          conda_env=conda_env,
                          module=module)
            if slurm.id:
                msg = (
                    'Kicked off reV econ job "{}" (SLURM jobid #{}).'.format(
                        node_name, slurm.id))
                # add job to reV status file.
                Status.add_job(dirout,
                               'econ',
                               node_name,
                               replace=True,
                               job_attrs={
                                   'job_id': slurm.id,
                                   'hardware': 'eagle',
                                   'fout': fout_node,
                                   'dirout': dirout
                               })
            else:
                msg = (
                    'Was unable to kick off reV econ job "{}". '
                    'Please see the stdout error messages'.format(node_name))

        click.echo(msg)
        logger.info(msg)
Example #13
0
def submit_from_config(ctx, name, cf_file, year, config, verbose):
    """Function to submit one year from a config file.

    Parameters
    ----------
    ctx : cli.ctx
        Click context object. Use case: data = ctx.obj['key']
    cf_file : str
        reV generation file with capacity factors to calculate econ for.
    name : str
        Job name.
    year : int | str | NoneType
        4 digit year or None.
    config : reV.config.EconConfig
        Econ config object.
    """

    # set the year-specific variables
    ctx.obj['CF_FILE'] = cf_file
    ctx.obj['CF_YEAR'] = year

    # check to make sure that the year matches the resource file
    if str(year) not in cf_file:
        warn('reV gen results file and year do not appear to match. '
             'Expected the string representation of the year '
             'to be in the generation results file name. '
             'Year: {}, generation results file: {}'.format(year, cf_file))

    # if the year isn't in the name, add it before setting the file output
    ctx.obj['FOUT'] = make_fout(name, year)
    if config.append:
        ctx.obj['FOUT'] = os.path.basename(cf_file)

    # invoke direct methods based on the config execution option
    if config.execution_control.option == 'local':
        name_year = make_fout(name, year).replace('.h5', '')
        name_year = name_year.replace('gen', 'econ')
        ctx.obj['NAME'] = name_year
        status = Status.retrieve_job_status(config.dirout, 'econ', name_year)
        if status != 'successful':
            Status.add_job(config.dirout,
                           'econ',
                           name_year,
                           replace=True,
                           job_attrs={
                               'hardware': 'local',
                               'fout': ctx.obj['FOUT'],
                               'dirout': config.dirout
                           })
            ctx.invoke(local,
                       max_workers=config.execution_control.max_workers,
                       timeout=config.timeout,
                       points_range=None,
                       verbose=verbose)

    elif config.execution_control.option in ('eagle', 'slurm'):
        if not parse_year(name, option='bool') and year:
            # Add year to name before submitting
            ctx.obj['NAME'] = '{}_{}'.format(name, str(year))
        ctx.invoke(slurm,
                   nodes=config.execution_control.nodes,
                   alloc=config.execution_control.allocation,
                   walltime=config.execution_control.walltime,
                   memory=config.execution_control.memory,
                   feature=config.execution_control.feature,
                   module=config.execution_control.module,
                   conda_env=config.execution_control.conda_env,
                   stdout_path=os.path.join(config.logdir, 'stdout'),
                   verbose=verbose)
Example #14
0
def slurm(ctx, alloc, walltime, feature, memory, module, conda_env,
          stdout_path):
    """slurm (Eagle) submission tool for reV supply curve aggregation."""
    name = ctx.obj['NAME']
    excl_fpath = ctx.obj['EXCL_FPATH']
    gen_fpath = ctx.obj['GEN_FPATH']
    res_fpath = ctx.obj['RES_FPATH']
    tm_dset = ctx.obj['TM_DSET']
    excl_dict = ctx.obj['EXCL_DICT']
    check_excl_layers = ctx.obj['CHECK_LAYERS']
    res_class_dset = ctx.obj['RES_CLASS_DSET']
    res_class_bins = ctx.obj['RES_CLASS_BINS']
    cf_dset = ctx.obj['CF_DSET']
    lcoe_dset = ctx.obj['LCOE_DSET']
    data_layers = ctx.obj['DATA_LAYERS']
    resolution = ctx.obj['RESOLUTION']
    excl_area = ctx.obj['EXCL_AREA']
    power_density = ctx.obj['POWER_DENSITY']
    area_filter_kernel = ctx.obj['AREA_FILTER_KERNEL']
    min_area = ctx.obj['MIN_AREA']
    friction_fpath = ctx.obj['FRICTION_FPATH']
    friction_dset = ctx.obj['FRICTION_DSET']
    out_dir = ctx.obj['OUT_DIR']
    log_dir = ctx.obj['LOG_DIR']
    verbose = ctx.obj['VERBOSE']

    if stdout_path is None:
        stdout_path = os.path.join(log_dir, 'stdout/')

    cmd = get_node_cmd(name, excl_fpath, gen_fpath, res_fpath,
                       tm_dset, excl_dict, check_excl_layers,
                       res_class_dset, res_class_bins,
                       cf_dset, lcoe_dset, data_layers,
                       resolution, excl_area,
                       power_density, area_filter_kernel, min_area,
                       friction_fpath, friction_dset,
                       out_dir, log_dir, verbose)

    status = Status.retrieve_job_status(out_dir, 'supply-curve-aggregation',
                                        name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'
               .format(name, out_dir))
    else:
        logger.info('Running reV SC aggregation on SLURM with '
                    'node name "{}"'.format(name))
        slurm = SLURM(cmd, alloc=alloc, memory=memory,
                      walltime=walltime, feature=feature,
                      name=name, stdout_path=stdout_path,
                      conda_env=conda_env, module=module)
        if slurm.id:
            msg = ('Kicked off reV SC aggregation job "{}" '
                   '(SLURM jobid #{}).'
                   .format(name, slurm.id))
            Status.add_job(
                out_dir, 'supply-curve-aggregation', name, replace=True,
                job_attrs={'job_id': slurm.id, 'hardware': 'eagle',
                           'fout': '{}.csv'.format(name), 'dirout': out_dir})
        else:
            msg = ('Was unable to kick off reV SC job "{}". '
                   'Please see the stdout error messages'
                   .format(name))
    click.echo(msg)
    logger.info(msg)
Example #15
0
def launch_slurm(config, verbose):
    """
    Launch slurm QA/QC job

    Parameters
    ----------
    config : dict
        'reV QA/QC configuration dictionary'
    """

    out_dir = config.dirout
    log_file = os.path.join(config.logdir, config.name + '.log')
    stdout_path = os.path.join(config.logdir, 'stdout/')

    node_cmd = []
    terminal = False
    for i, module in enumerate(config.module_names):
        module_config = config.get_module_inputs(module)
        fpaths = module_config.fpath

        if isinstance(fpaths, (str, type(None))):
            fpaths = [fpaths]

        for j, fpath in enumerate(fpaths):
            if (i == len(config.module_names) - 1) and (j == len(fpaths) - 1):
                terminal = True
            if module.lower() == 'exclusions':
                node_cmd.append(
                    get_excl_cmd(config.name, module_config.excl_fpath,
                                 out_dir, module_config.sub_dir,
                                 module_config.excl_dict,
                                 module_config.area_filter_kernel,
                                 module_config.min_area,
                                 module_config.plot_type, module_config.cmap,
                                 module_config.plot_step, log_file, verbose,
                                 terminal))
            elif fpath.endswith('.h5'):
                node_cmd.append(
                    get_h5_cmd(config.name, fpath, out_dir,
                               module_config.sub_dir, module_config.dsets,
                               module_config.group, module_config.process_size,
                               module_config.max_workers,
                               module_config.plot_type, module_config.cmap,
                               log_file, verbose, terminal))
            elif fpath.endswith('.csv'):
                node_cmd.append(
                    get_sc_cmd(config.name, fpath, out_dir,
                               module_config.sub_dir, module_config.columns,
                               module_config.plot_type, module_config.cmap,
                               module_config.lcoe, log_file, verbose,
                               terminal))
            else:
                msg = ("Cannot run QA/QC for {}: 'fpath' must be a '*.h5' "
                       "or '*.csv' reV output file, but {} was given!".format(
                           module, fpath))
                logger.error(msg)
                raise ValueError(msg)

    status = Status.retrieve_job_status(out_dir, 'qa-qc', config.name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(config.name, out_dir))
    else:
        node_cmd = '\n'.join(node_cmd)
        logger.info('Running reV QA-QC on SLURM with '
                    'node name "{}"'.format(config.name))
        slurm = SLURM(node_cmd,
                      name=config.name,
                      alloc=config.execution_control.allocation,
                      memory=config.execution_control.memory,
                      feature=config.execution_control.feature,
                      walltime=config.execution_control.walltime,
                      conda_env=config.execution_control.conda_env,
                      module=config.execution_control.module,
                      stdout_path=stdout_path)
        if slurm.id:
            msg = ('Kicked off reV QA-QC job "{}" '
                   '(SLURM jobid #{}).'.format(config.name, slurm.id))
            Status.add_job(out_dir,
                           'qa-qc',
                           config.name,
                           replace=True,
                           job_attrs={
                               'job_id': slurm.id,
                               'hardware': 'eagle',
                               'dirout': out_dir
                           })
        else:
            msg = ('Was unable to kick off reV QA-QC job "{}". '
                   'Please see the stdout error messages'.format(config.name))

    click.echo(msg)
    logger.info(msg)
Example #16
0
def from_config(ctx, config_file, verbose):
    """Run reV gen from a config file."""
    name = ctx.obj['NAME']

    # Instantiate the config object
    config = CollectionConfig(config_file)

    # take name from config if not default
    if config.name.lower() != 'rev':
        name = config.name
        ctx.obj['NAME'] = name

    # Enforce verbosity if logging level is specified in the config
    if config.log_level == logging.DEBUG:
        verbose = True

    # make output directory if does not exist
    if not os.path.exists(config.dirout):
        os.makedirs(config.dirout)

    # initialize loggers.
    init_mult(name,
              config.logdir,
              modules=[__name__, 'reV.handlers.collection'],
              verbose=verbose)

    # Initial log statements
    logger.info(
        'Running reV collection from config file: "{}"'.format(config_file))
    logger.info('Target output directory: "{}"'.format(config.dirout))
    logger.info('Target logging directory: "{}"'.format(config.logdir))
    logger.info('Target collection directory: "{}"'.format(config.coldir))
    logger.info('The following project points were specified: "{}"'.format(
        config.get('project_points', None)))
    logger.debug('The full configuration input is as follows:\n{}'.format(
        pprint.pformat(config, indent=4)))

    # set config objects to be passed through invoke to direct methods
    ctx.obj['H5_DIR'] = config.coldir
    ctx.obj['LOG_DIR'] = config.logdir
    ctx.obj['DSETS'] = config.dsets
    ctx.obj['PROJECT_POINTS'] = config.project_points
    ctx.obj['PURGE_CHUNKS'] = config.purge_chunks
    ctx.obj['VERBOSE'] = verbose

    for file_prefix in config.file_prefixes:
        ctx.obj['NAME'] = name + '_{}'.format(file_prefix)
        ctx.obj['H5_FILE'] = os.path.join(config.dirout, file_prefix + '.h5')
        ctx.obj['FILE_PREFIX'] = file_prefix

        if config.execution_control.option == 'local':
            status = Status.retrieve_job_status(config.dirout, 'collect',
                                                ctx.obj['NAME'])
            if status != 'successful':
                Status.add_job(config.dirout,
                               'collect',
                               ctx.obj['NAME'],
                               replace=True,
                               job_attrs={
                                   'hardware': 'local',
                                   'fout': file_prefix + '.h5',
                                   'dirout': config.dirout
                               })
                ctx.invoke(collect)

        elif config.execution_control.option in ('eagle', 'slurm'):
            ctx.invoke(collect_slurm,
                       alloc=config.execution_control.allocation,
                       memory=config.execution_control.memory,
                       walltime=config.execution_control.walltime,
                       feature=config.execution_control.feature,
                       conda_env=config.execution_control.conda_env,
                       module=config.execution_control.module,
                       stdout_path=os.path.join(config.logdir, 'stdout'),
                       verbose=verbose)
Example #17
0
def collect_slurm(ctx, alloc, memory, walltime, feature, conda_env, module,
                  stdout_path, verbose):
    """Run collection on HPC via SLURM job submission."""

    name = ctx.obj['NAME']
    h5_file = ctx.obj['H5_FILE']
    h5_dir = ctx.obj['H5_DIR']
    log_dir = ctx.obj['LOG_DIR']
    project_points = ctx.obj['PROJECT_POINTS']
    dsets = ctx.obj['DSETS']
    file_prefix = ctx.obj['FILE_PREFIX']
    purge_chunks = ctx.obj['PURGE_CHUNKS']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    slurm_manager = ctx.obj.get('SLURM_MANAGER', None)
    if slurm_manager is None:
        slurm_manager = SLURM()
        ctx.obj['SLURM_MANAGER'] = slurm_manager

    cmd = get_node_cmd(name,
                       h5_file,
                       h5_dir,
                       project_points,
                       dsets,
                       file_prefix=file_prefix,
                       log_dir=log_dir,
                       purge_chunks=purge_chunks,
                       verbose=verbose)

    status = Status.retrieve_job_status(os.path.dirname(h5_file),
                                        'collect',
                                        name,
                                        hardware='eagle',
                                        subprocess_manager=slurm_manager)

    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, os.path.dirname(h5_file)))
    elif 'fail' not in str(status).lower() and status is not None:
        msg = ('Job "{}" was found with status "{}", not resubmitting'.format(
            name, status))
    else:
        logger.info(
            'Running reV collection on SLURM with node name "{}", '
            'collecting data to "{}" from "{}" with file prefix "{}".'.format(
                name, h5_file, h5_dir, file_prefix))
        out = slurm_manager.sbatch(cmd,
                                   alloc=alloc,
                                   memory=memory,
                                   walltime=walltime,
                                   feature=feature,
                                   name=name,
                                   stdout_path=stdout_path,
                                   conda_env=conda_env,
                                   module=module)[0]
        if out:
            msg = (
                'Kicked off reV collection job "{}" (SLURM jobid #{}).'.format(
                    name, out))
            # add job to reV status file.
            Status.add_job(os.path.dirname(h5_file),
                           'collect',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': out,
                               'hardware': 'eagle',
                               'fout': os.path.basename(h5_file),
                               'dirout': os.path.dirname(h5_file)
                           })

    click.echo(msg)
    logger.info(msg)
Example #18
0
def slurm(ctx, alloc, feature, memory, walltime, module, conda_env,
          stdout_path):
    """slurm (Eagle) submission tool for reV supply curve aggregation."""

    name = ctx.obj['NAME']
    gen_fpath = ctx.obj['GEN_FPATH']
    offshore_fpath = ctx.obj['OFFSHORE_FPATH']
    project_points = ctx.obj['PROJECT_POINTS']
    sam_files = ctx.obj['SAM_FILES']
    log_dir = ctx.obj['LOG_DIR']
    out_dir = ctx.obj['OUT_DIR']
    verbose = ctx.obj['VERBOSE']

    if stdout_path is None:
        stdout_path = os.path.join(log_dir, 'stdout/')

    cmd = get_node_cmd(name, gen_fpath, offshore_fpath, project_points,
                       sam_files, log_dir, verbose)
    slurm_manager = ctx.obj.get('SLURM_MANAGER', None)
    if slurm_manager is None:
        slurm_manager = SLURM()
        ctx.obj['SLURM_MANAGER'] = slurm_manager

    status = Status.retrieve_job_status(out_dir,
                                        'offshore',
                                        name,
                                        hardware='eagle',
                                        subprocess_manager=slurm_manager)

    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, out_dir))
    elif 'fail' not in str(status).lower() and status is not None:
        msg = ('Job "{}" was found with status "{}", not resubmitting'.format(
            name, status))
    else:
        logger.info('Running reV offshore aggregation on SLURM with '
                    'node name "{}"'.format(name))
        out = slurm_manager.sbatch(cmd,
                                   alloc=alloc,
                                   memory=memory,
                                   walltime=walltime,
                                   feature=feature,
                                   name=name,
                                   stdout_path=stdout_path,
                                   conda_env=conda_env,
                                   module=module)[0]
        if out:
            msg = ('Kicked off reV offshore job "{}" '
                   '(SLURM jobid #{}).'.format(name, out))
            Status.add_job(out_dir,
                           'offshore',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': out,
                               'hardware': 'eagle',
                               'fout': '{}.csv'.format(name),
                               'dirout': out_dir
                           })

    click.echo(msg)
    logger.info(msg)
Example #19
0
def slurm(ctx, alloc, nodes, memory, walltime, feature, module, conda_env,
          stdout_path, verbose):
    """Run econ on HPC via SLURM job submission."""

    name = ctx.obj['NAME']
    points = ctx.obj['POINTS']
    sam_files = ctx.obj['SAM_FILES']
    cf_file = ctx.obj['CF_FILE']
    year = ctx.obj['YEAR']
    site_data = ctx.obj['SITE_DATA']
    sites_per_worker = ctx.obj['SITES_PER_WORKER']
    max_workers = ctx.obj['MAX_WORKERS']
    timeout = ctx.obj['TIMEOUT']
    fout = ctx.obj['FOUT']
    dirout = ctx.obj['DIROUT']
    logdir = ctx.obj['LOGDIR']
    output_request = ctx.obj['OUTPUT_REQUEST']
    append = ctx.obj['APPEND']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    slurm_manager = ctx.obj.get('SLURM_MANAGER', None)
    if slurm_manager is None:
        slurm_manager = SLURM()
        ctx.obj['SLURM_MANAGER'] = slurm_manager

    if append:
        pc = [None]
    else:
        pc = get_node_pc(points, sam_files, nodes)

    for i, split in enumerate(pc):
        node_name, fout_node = get_node_name_fout(name,
                                                  fout,
                                                  i,
                                                  pc,
                                                  hpc='slurm')
        node_name = node_name.replace('gen', 'econ')

        points_range = split.split_range if split is not None else None
        cmd = get_node_cmd(node_name,
                           sam_files,
                           cf_file,
                           year=year,
                           site_data=site_data,
                           points=points,
                           points_range=points_range,
                           sites_per_worker=sites_per_worker,
                           max_workers=max_workers,
                           timeout=timeout,
                           fout=fout_node,
                           dirout=dirout,
                           logdir=logdir,
                           output_request=output_request,
                           append=append,
                           verbose=verbose)

        status = Status.retrieve_job_status(dirout,
                                            'econ',
                                            node_name,
                                            hardware='eagle',
                                            subprocess_manager=slurm_manager)

        if status == 'successful':
            msg = ('Job "{}" is successful in status json found in "{}", '
                   'not re-running.'.format(node_name, dirout))
        elif 'fail' not in str(status).lower() and status is not None:
            msg = (
                'Job "{}" was found with status "{}", not resubmitting'.format(
                    node_name, status))
        else:
            logger.info('Running reV econ on SLURM with node name "{}" for '
                        '{} (points range: {}).'.format(
                            node_name, pc, points_range))
            # create and submit the SLURM job
            out = slurm_manager.sbatch(cmd,
                                       alloc=alloc,
                                       memory=memory,
                                       walltime=walltime,
                                       feature=feature,
                                       name=node_name,
                                       stdout_path=stdout_path,
                                       conda_env=conda_env,
                                       module=module)[0]
            if out:
                msg = (
                    'Kicked off reV econ job "{}" (SLURM jobid #{}).'.format(
                        node_name, out))
                # add job to reV status file.
                Status.add_job(dirout,
                               'econ',
                               node_name,
                               replace=True,
                               job_attrs={
                                   'job_id': out,
                                   'hardware': 'eagle',
                                   'fout': fout_node,
                                   'dirout': dirout
                               })

        click.echo(msg)
        logger.info(msg)
Example #20
0
def from_config(ctx, config_file, verbose):
    """Run reV SC aggregation from a config file."""
    name = ctx.obj['NAME']

    # Instantiate the config object
    config = SupplyCurveAggregationConfig(config_file)

    # take name from config if not default
    if config.name.lower() != 'rev':
        name = config.name
        ctx.obj['NAME'] = name

    # Enforce verbosity if logging level is specified in the config
    if config.log_level == logging.DEBUG:
        verbose = True

    # initialize loggers
    init_mult(
        name,
        config.logdir,
        modules=[__name__, 'reV.config', 'reV.utilities', 'rex.utilities'],
        verbose=verbose)

    # Initial log statements
    logger.info('Running reV supply curve aggregation from config '
                'file: "{}"'.format(config_file))
    logger.info('Target output directory: "{}"'.format(config.dirout))
    logger.info('Target logging directory: "{}"'.format(config.logdir))
    logger.debug('The full configuration input is as follows:\n{}'.format(
        pprint.pformat(config, indent=4)))

    if config.execution_control.option == 'local':
        status = Status.retrieve_job_status(config.dirout,
                                            'supply-curve-aggregation', name)
        if status != 'successful':
            Status.add_job(config.dirout,
                           'supply-curve-aggregation',
                           name,
                           replace=True,
                           job_attrs={
                               'hardware': 'local',
                               'fout': '{}.csv'.format(name),
                               'dirout': config.dirout
                           })
            ctx.invoke(direct,
                       excl_fpath=config.excl_fpath,
                       gen_fpath=config.gen_fpath,
                       econ_fpath=config.econ_fpath,
                       res_fpath=config.res_fpath,
                       tm_dset=config.tm_dset,
                       excl_dict=config.excl_dict,
                       check_excl_layers=config.check_excl_layers,
                       res_class_dset=config.res_class_dset,
                       res_class_bins=config.res_class_bins,
                       cf_dset=config.cf_dset,
                       lcoe_dset=config.lcoe_dset,
                       h5_dsets=config.h5_dsets,
                       data_layers=config.data_layers,
                       resolution=config.resolution,
                       excl_area=config.excl_area,
                       power_density=config.power_density,
                       area_filter_kernel=config.area_filter_kernel,
                       min_area=config.min_area,
                       friction_fpath=config.friction_fpath,
                       friction_dset=config.friction_dset,
                       out_dir=config.dirout,
                       log_dir=config.logdir,
                       verbose=verbose)

    elif config.execution_control.option in ('eagle', 'slurm'):

        ctx.obj['NAME'] = name
        ctx.obj['EXCL_FPATH'] = config.excl_fpath
        ctx.obj['GEN_FPATH'] = config.gen_fpath
        ctx.obj['ECON_FPATH'] = config.econ_fpath
        ctx.obj['RES_FPATH'] = config.res_fpath
        ctx.obj['TM_DSET'] = config.tm_dset
        ctx.obj['EXCL_DICT'] = config.excl_dict
        ctx.obj['CHECK_LAYERS'] = config.check_excl_layers
        ctx.obj['RES_CLASS_DSET'] = config.res_class_dset
        ctx.obj['RES_CLASS_BINS'] = config.res_class_bins
        ctx.obj['CF_DSET'] = config.cf_dset
        ctx.obj['LCOE_DSET'] = config.lcoe_dset
        ctx.obj['H5_DSETS'] = config.h5_dsets
        ctx.obj['DATA_LAYERS'] = config.data_layers
        ctx.obj['RESOLUTION'] = config.resolution
        ctx.obj['EXCL_AREA'] = config.excl_area
        ctx.obj['POWER_DENSITY'] = config.power_density
        ctx.obj['AREA_FILTER_KERNEL'] = config.area_filter_kernel
        ctx.obj['MIN_AREA'] = config.min_area
        ctx.obj['FRICTION_FPATH'] = config.friction_fpath
        ctx.obj['FRICTION_DSET'] = config.friction_dset
        ctx.obj['OUT_DIR'] = config.dirout
        ctx.obj['LOG_DIR'] = config.logdir
        ctx.obj['VERBOSE'] = verbose

        ctx.invoke(slurm,
                   alloc=config.execution_control.allocation,
                   memory=config.execution_control.memory,
                   feature=config.execution_control.feature,
                   walltime=config.execution_control.walltime,
                   conda_env=config.execution_control.conda_env,
                   module=config.execution_control.module)
Example #21
0
def slurm(ctx, alloc, walltime, feature, memory, module, conda_env,
          stdout_path):
    """slurm (Eagle) submission tool for reV supply curve aggregation."""
    name = ctx.obj['NAME']
    excl_fpath = ctx.obj['EXCL_FPATH']
    gen_fpath = ctx.obj['GEN_FPATH']
    econ_fpath = ctx.obj['ECON_FPATH']
    res_fpath = ctx.obj['RES_FPATH']
    tm_dset = ctx.obj['TM_DSET']
    excl_dict = ctx.obj['EXCL_DICT']
    check_excl_layers = ctx.obj['CHECK_LAYERS']
    res_class_dset = ctx.obj['RES_CLASS_DSET']
    res_class_bins = ctx.obj['RES_CLASS_BINS']
    cf_dset = ctx.obj['CF_DSET']
    lcoe_dset = ctx.obj['LCOE_DSET']
    h5_dsets = ctx.obj['H5_DSETS']
    data_layers = ctx.obj['DATA_LAYERS']
    resolution = ctx.obj['RESOLUTION']
    excl_area = ctx.obj['EXCL_AREA']
    power_density = ctx.obj['POWER_DENSITY']
    area_filter_kernel = ctx.obj['AREA_FILTER_KERNEL']
    min_area = ctx.obj['MIN_AREA']
    friction_fpath = ctx.obj['FRICTION_FPATH']
    friction_dset = ctx.obj['FRICTION_DSET']
    out_dir = ctx.obj['OUT_DIR']
    log_dir = ctx.obj['LOG_DIR']
    verbose = ctx.obj['VERBOSE']

    if stdout_path is None:
        stdout_path = os.path.join(log_dir, 'stdout/')

    cmd = get_node_cmd(name, excl_fpath, gen_fpath, econ_fpath, res_fpath,
                       tm_dset, excl_dict, check_excl_layers, res_class_dset,
                       res_class_bins, cf_dset, lcoe_dset, h5_dsets,
                       data_layers, resolution, excl_area, power_density,
                       area_filter_kernel, min_area, friction_fpath,
                       friction_dset, out_dir, log_dir, verbose)

    slurm_manager = ctx.obj.get('SLURM_MANAGER', None)
    if slurm_manager is None:
        slurm_manager = SLURM()
        ctx.obj['SLURM_MANAGER'] = slurm_manager

    status = Status.retrieve_job_status(out_dir,
                                        'supply-curve-aggregation',
                                        name,
                                        hardware='eagle',
                                        subprocess_manager=slurm_manager)

    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, out_dir))
    elif 'fail' not in str(status).lower() and status is not None:
        msg = ('Job "{}" was found with status "{}", not resubmitting'.format(
            name, status))
    else:
        logger.info('Running reV SC aggregation on SLURM with '
                    'node name "{}"'.format(name))
        out = slurm_manager.sbatch(cmd,
                                   alloc=alloc,
                                   memory=memory,
                                   walltime=walltime,
                                   feature=feature,
                                   name=name,
                                   stdout_path=stdout_path,
                                   conda_env=conda_env,
                                   module=module)[0]
        if out:
            msg = ('Kicked off reV SC aggregation job "{}" '
                   '(SLURM jobid #{}).'.format(name, out))
            Status.add_job(out_dir,
                           'supply-curve-aggregation',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': out,
                               'hardware': 'eagle',
                               'fout': '{}.csv'.format(name),
                               'dirout': out_dir
                           })

    click.echo(msg)
    logger.info(msg)
Example #22
0
def submit_from_config(ctx, name, year, config, i, verbose=False):
    """Function to submit one year from a config file.

    Parameters
    ----------
    ctx : cli.ctx
        Click context object. Use case: data = ctx.obj['key']
    name : str
        Job name.
    year : int | str | NoneType
        4 digit year or None.
    config : reV.config.GenConfig
        Generation config object.
    i : int
        Index variable associated with the index of the year in analysis years.
    verbose : bool
        Flag to turn on debug logging. Default is not verbose.
    """
    res_files = config.parse_res_files()
    # set the year-specific variables
    ctx.obj['RES_FILE'] = res_files[i]

    # check to make sure that the year matches the resource file
    if str(year) not in res_files[i]:
        warn('Resource file and year do not appear to match. '
             'Expected the string representation of the year '
             'to be in the resource file name. '
             'Year: {}, Resource file: {}'.format(year, res_files[i]))

    # if the year isn't in the name, add it before setting the file output
    ctx.obj['FOUT'] = make_fout(name, year)

    # invoke direct methods based on the config execution option
    if config.execution_control.option == 'local':
        name_year = make_fout(name, year).replace('.h5', '')
        ctx.obj['NAME'] = name_year
        status = Status.retrieve_job_status(config.dirout, 'generation',
                                            name_year)
        if status != 'successful':
            Status.add_job(config.dirout,
                           'generation',
                           name_year,
                           replace=True,
                           job_attrs={
                               'hardware': 'local',
                               'fout': ctx.obj['FOUT'],
                               'dirout': config.dirout
                           })
            ctx.invoke(local,
                       max_workers=config.execution_control.max_workers,
                       timeout=config.timeout,
                       points_range=None,
                       verbose=verbose)

    elif config.execution_control.option in ('eagle', 'slurm'):
        if not parse_year(name, option='bool') and year:
            # Add year to name before submitting
            ctx.obj['NAME'] = '{}_{}'.format(name, str(year))
        ctx.invoke(slurm,
                   nodes=config.execution_control.nodes,
                   alloc=config.execution_control.allocation,
                   walltime=config.execution_control.walltime,
                   memory=config.execution_control.memory,
                   feature=config.execution_control.feature,
                   conda_env=config.execution_control.conda_env,
                   module=config.execution_control.module,
                   stdout_path=os.path.join(config.logdir, 'stdout'),
                   verbose=verbose)
Example #23
0
def collect_slurm(ctx, alloc, memory, walltime, feature, conda_env, module,
                  stdout_path, verbose):
    """Run collection on HPC via SLURM job submission."""

    name = ctx.obj['NAME']
    h5_file = ctx.obj['H5_FILE']
    h5_dir = ctx.obj['H5_DIR']
    log_dir = ctx.obj['LOG_DIR']
    project_points = ctx.obj['PROJECT_POINTS']
    dsets = ctx.obj['DSETS']
    file_prefix = ctx.obj['FILE_PREFIX']
    purge_chunks = ctx.obj['PURGE_CHUNKS']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    cmd = get_node_cmd(name,
                       h5_file,
                       h5_dir,
                       project_points,
                       dsets,
                       file_prefix=file_prefix,
                       log_dir=log_dir,
                       purge_chunks=purge_chunks,
                       verbose=verbose)

    status = Status.retrieve_job_status(os.path.dirname(h5_file), 'collect',
                                        name)
    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, os.path.dirname(h5_file)))
    else:
        logger.info(
            'Running reV collection on SLURM with node name "{}", '
            'collecting data to "{}" from "{}" with file prefix "{}".'.format(
                name, h5_file, h5_dir, file_prefix))
        # create and submit the SLURM job
        slurm = SLURM(cmd,
                      alloc=alloc,
                      memory=memory,
                      walltime=walltime,
                      feature=feature,
                      name=name,
                      conda_env=conda_env,
                      module=module,
                      stdout_path=stdout_path)
        if slurm.id:
            msg = (
                'Kicked off reV collection job "{}" (SLURM jobid #{}).'.format(
                    name, slurm.id))
            # add job to reV status file.
            Status.add_job(os.path.dirname(h5_file),
                           'collect',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': slurm.id,
                               'hardware': 'eagle',
                               'fout': os.path.basename(h5_file),
                               'dirout': os.path.dirname(h5_file)
                           })
        else:
            msg = ('Was unable to kick off reV collection job "{}". '
                   'Please see the stdout error messages'.format(name))
    click.echo(msg)
    logger.info(msg)
Example #24
0
def slurm(ctx, alloc, memory, walltime, feature, conda_env, module,
          stdout_path):
    """slurm (Eagle) submission tool for reV representative profiles."""

    name = ctx.obj['NAME']
    gen_fpath = ctx.obj['GEN_FPATH']
    rev_summary = ctx.obj['REV_SUMMARY']
    reg_cols = ctx.obj['REG_COLS']
    cf_dset = ctx.obj['CF_DSET']
    rep_method = ctx.obj['REP_METHOD']
    err_method = ctx.obj['ERR_METHOD']
    weight = ctx.obj['WEIGHT']
    n_profiles = ctx.obj['N_PROFILES']
    out_dir = ctx.obj['OUT_DIR']
    log_dir = ctx.obj['LOG_DIR']
    max_workers = ctx.obj['MAX_WORKERS']
    aggregate_profiles = ctx.obj['AGGREGATE_PROFILES']
    verbose = ctx.obj['VERBOSE']

    if stdout_path is None:
        stdout_path = os.path.join(log_dir, 'stdout/')

    cmd = get_node_cmd(name, gen_fpath, rev_summary, reg_cols, cf_dset,
                       rep_method, err_method, weight, n_profiles, out_dir,
                       log_dir, max_workers, aggregate_profiles, verbose)

    slurm_manager = ctx.obj.get('SLURM_MANAGER', None)
    if slurm_manager is None:
        slurm_manager = SLURM()
        ctx.obj['SLURM_MANAGER'] = slurm_manager

    status = Status.retrieve_job_status(out_dir,
                                        'rep-profiles',
                                        name,
                                        hardware='eagle',
                                        subprocess_manager=slurm_manager)

    if status == 'successful':
        msg = ('Job "{}" is successful in status json found in "{}", '
               'not re-running.'.format(name, out_dir))
    elif 'fail' not in str(status).lower() and status is not None:
        msg = ('Job "{}" was found with status "{}", not resubmitting'.format(
            name, status))
    else:
        logger.info('Running reV SC rep profiles on SLURM with '
                    'node name "{}"'.format(name))
        out = slurm_manager.sbatch(cmd,
                                   alloc=alloc,
                                   memory=memory,
                                   walltime=walltime,
                                   feature=feature,
                                   name=name,
                                   stdout_path=stdout_path,
                                   conda_env=conda_env,
                                   module=module)[0]
        if out:
            msg = ('Kicked off reV rep profiles job "{}" '
                   '(SLURM jobid #{}).'.format(name, out))
            Status.add_job(out_dir,
                           'rep-profiles',
                           name,
                           replace=True,
                           job_attrs={
                               'job_id': out,
                               'hardware': 'eagle',
                               'fout': '{}.h5'.format(name),
                               'dirout': out_dir
                           })

    click.echo(msg)
    logger.info(msg)
Example #25
0
def from_config(ctx, config_file, verbose):
    """Run reV QA/QC from a config file."""
    name = ctx.obj['NAME']

    # Instantiate the config object
    config = QaQcConfig(config_file)

    # take name from config if not default
    if config.name.lower() != 'rev':
        name = config.name
        ctx.obj['NAME'] = name

    # Enforce verbosity if logging level is specified in the config
    verbose = config.log_level == logging.DEBUG

    # initialize loggers
    init_mult(name, config.logdir, modules=['reV', 'rex'], verbose=verbose)

    # Initial log statements
    logger.info('Running reV supply curve from config '
                'file: "{}"'.format(config_file))
    logger.info('Target output directory: "{}"'.format(config.dirout))
    logger.info('Target logging directory: "{}"'.format(config.logdir))
    logger.debug('The full configuration input is as follows:\n{}'.format(
        pprint.pformat(config, indent=4)))

    if config.execution_control.option == 'local':
        status = Status.retrieve_job_status(config.dirout, 'qa-qc', name)
        if status != 'successful':
            Status.add_job(config.dirout,
                           'qa-qc',
                           name,
                           replace=True,
                           job_attrs={
                               'hardware': 'local',
                               'dirout': config.dirout
                           })

            terminal = False
            for i, module in enumerate(config.module_names):
                if i == len(config.module_names) - 1:
                    terminal = True

                module_config = config.get_module_inputs(module)
                fpath = module_config.fpath
                if module.lower() == 'exclusions':
                    log_file = os.path.join(
                        config.logdir,
                        os.path.basename(fpath).replace('.h5', '.log'))
                    afk = module_config.area_filter_kernel
                    ctx.invoke(exclusions,
                               excl_fpath=fpath,
                               out_dir=config.dirout,
                               sub_dir=module_config.sub_dir,
                               excl_dict=module_config.excl_dict,
                               area_filter_kernel=afk,
                               min_area=module_config.min_area,
                               plot_type=module_config.plot_type,
                               cmap=module_config.cmap,
                               plot_step=module_config.plot_step,
                               log_file=log_file,
                               verbose=verbose,
                               terminal=terminal)

                elif fpath.endswith('.h5'):
                    log_file = os.path.join(
                        config.logdir,
                        os.path.basename(fpath).replace('.h5', '.log'))
                    ctx.invoke(reV_h5,
                               h5_file=fpath,
                               out_dir=config.dirout,
                               sub_dir=module_config.sub_dir,
                               dsets=module_config.dsets,
                               group=module_config.group,
                               process_size=module_config.process_size,
                               max_workers=module_config.max_workers,
                               plot_type=module_config.plot_type,
                               cmap=module_config.cmap,
                               log_file=log_file,
                               verbose=verbose,
                               terminal=terminal)

                elif fpath.endswith('.csv'):
                    log_file = os.path.join(
                        config.logdir,
                        os.path.basename(fpath).replace('.csv', '.log'))
                    ctx.invoke(supply_curve,
                               sc_table=fpath,
                               out_dir=config.dirout,
                               sub_dir=module_config.sub_dir,
                               columns=module_config.columns,
                               plot_type=module_config.plot_type,
                               cmap=module_config.cmap,
                               lcoe=module_config.lcoe,
                               log_file=log_file,
                               verbose=verbose,
                               terminal=terminal)
                else:
                    msg = (
                        "Cannot run QA/QC for {}: 'fpath' must be a '*.h5' "
                        "or '*.csv' reV output file, but {} was given!".format(
                            module, fpath))
                    logger.error(msg)
                    raise ValueError(msg)

    elif config.execution_control.option in ('eagle', 'slurm'):
        launch_slurm(config, verbose)
Example #26
0
def from_config(ctx, config_file, verbose):
    """Run reV representative profiles from a config file."""
    name = ctx.obj['NAME']

    # Instantiate the config object
    config = RepProfilesConfig(config_file)

    # take name from config if not default
    if config.name.lower() != 'rev':
        name = config.name
        ctx.obj['NAME'] = name

    # Enforce verbosity if logging level is specified in the config
    if config.log_level == logging.DEBUG:
        verbose = True

    # initialize loggers
    init_mult(
        name,
        config.logdir,
        modules=[__name__, 'reV.config', 'reV.utilities', 'rex.utilities'],
        verbose=verbose)

    # Initial log statements
    logger.info('Running reV representative profiles from config '
                'file: "{}"'.format(config_file))
    logger.info('Target output directory: "{}"'.format(config.dirout))
    logger.info('Target logging directory: "{}"'.format(config.logdir))
    logger.debug('The full configuration input is as follows:\n{}'.format(
        pprint.pformat(config, indent=4)))

    if config.years[0] is not None and '{}' in config.cf_dset:
        fpaths = [config.gen_fpath for _ in config.years]
        names = [name + '_{}'.format(y) for y in config.years]
        dsets = [config.cf_dset.format(y) for y in config.years]

    elif config.years[0] is not None and '{}' in config.gen_fpath:
        fpaths = [config.gen_fpath.format(y) for y in config.years]
        names = [name + '_{}'.format(y) for y in config.years]
        dsets = [config.cf_dset for _ in config.years]

    else:
        fpaths = [config.gen_fpath]
        names = [name]
        dsets = [config.cf_dset]

    for name, gen_fpath, dset in zip(names, fpaths, dsets):

        if config.execution_control.option == 'local':
            status = Status.retrieve_job_status(config.dirout, 'rep-profiles',
                                                name)
            if status != 'successful':
                Status.add_job(config.dirout,
                               'rep-profiles',
                               name,
                               replace=True,
                               job_attrs={
                                   'hardware': 'local',
                                   'fout': '{}.h5'.format(name),
                                   'dirout': config.dirout
                               })
                ctx.invoke(direct,
                           gen_fpath=gen_fpath,
                           rev_summary=config.rev_summary,
                           reg_cols=config.reg_cols,
                           cf_dset=dset,
                           rep_method=config.rep_method,
                           err_method=config.err_method,
                           weight=config.weight,
                           out_dir=config.dirout,
                           log_dir=config.logdir,
                           n_profiles=config.n_profiles,
                           max_workers=config.execution_control.max_workers,
                           aggregate_profiles=config.aggregate_profiles,
                           verbose=verbose)

        elif config.execution_control.option in ('eagle', 'slurm'):
            ctx.obj['NAME'] = name
            ctx.obj['GEN_FPATH'] = gen_fpath
            ctx.obj['REV_SUMMARY'] = config.rev_summary
            ctx.obj['REG_COLS'] = config.reg_cols
            ctx.obj['CF_DSET'] = dset
            ctx.obj['REP_METHOD'] = config.rep_method
            ctx.obj['ERR_METHOD'] = config.err_method
            ctx.obj['WEIGHT'] = config.weight
            ctx.obj['N_PROFILES'] = config.n_profiles
            ctx.obj['OUT_DIR'] = config.dirout
            ctx.obj['LOG_DIR'] = config.logdir
            ctx.obj['MAX_WORKERS'] = config.execution_control.max_workers
            ctx.obj['AGGREGATE_PROFILES'] = config.aggregate_profiles
            ctx.obj['VERBOSE'] = verbose

            ctx.invoke(slurm,
                       alloc=config.execution_control.allocation,
                       memory=config.execution_control.memory,
                       walltime=config.execution_control.walltime,
                       feature=config.execution_control.feature,
                       conda_env=config.execution_control.conda_env,
                       module=config.execution_control.module)
Example #27
0
def from_config(ctx, config_file, verbose):
    """Run reV supply curve compute from a config file."""
    name = ctx.obj['NAME']

    # Instantiate the config object
    config = SupplyCurveConfig(config_file)

    # take name from config if not default
    if config.name.lower() != 'rev':
        name = config.name
        ctx.obj['NAME'] = name

    # Enforce verbosity if logging level is specified in the config
    if config.log_level == logging.DEBUG:
        verbose = True

    # initialize loggers
    init_mult(
        name,
        config.logdir,
        modules=[__name__, 'reV.config', 'reV.utilities', 'rex.utilities'],
        verbose=verbose)

    # Initial log statements
    logger.info('Running reV supply curve from config '
                'file: "{}"'.format(config_file))
    logger.info('Target output directory: "{}"'.format(config.dirout))
    logger.info('Target logging directory: "{}"'.format(config.logdir))
    logger.debug('The full configuration input is as follows:\n{}'.format(
        pprint.pformat(config, indent=4)))

    if config.execution_control.option == 'local':
        status = Status.retrieve_job_status(config.dirout, 'supply-curve',
                                            name)
        if status != 'successful':
            Status.add_job(config.dirout,
                           'supply-curve',
                           name,
                           replace=True,
                           job_attrs={
                               'hardware': 'local',
                               'fout': '{}.csv'.format(name),
                               'dirout': config.dirout
                           })
            ctx.invoke(direct,
                       sc_points=config.sc_points,
                       trans_table=config.trans_table,
                       fixed_charge_rate=config.fixed_charge_rate,
                       sc_features=config.sc_features,
                       transmission_costs=config.transmission_costs,
                       sort_on=config.sort_on,
                       wind_dirs=config.wind_dirs,
                       n_dirs=config.n_dirs,
                       downwind=config.downwind,
                       max_workers=config.max_workers,
                       out_dir=config.dirout,
                       log_dir=config.logdir,
                       simple=config.simple,
                       line_limited=config.line_limited,
                       verbose=verbose)

    elif config.execution_control.option in ('eagle', 'slurm'):

        ctx.obj['NAME'] = name
        ctx.obj['SC_POINTS'] = config.sc_points
        ctx.obj['TRANS_TABLE'] = config.trans_table
        ctx.obj['FIXED_CHARGE_RATE'] = config.fixed_charge_rate
        ctx.obj['SC_FEATURES'] = config.sc_features
        ctx.obj['TRANSMISSION_COSTS'] = config.transmission_costs
        ctx.obj['SORT_ON'] = config.sort_on
        ctx.obj['OFFSHORE_TRANS_TABLE'] = config.offshore_trans_table
        ctx.obj['WIND_DIRS'] = config.wind_dirs
        ctx.obj['N_DIRS'] = config.n_dirs
        ctx.obj['DOWNWIND'] = config.downwind
        ctx.obj['OFFSHORE_COMPETE'] = config.offshore_compete
        ctx.obj['MAX_WORKERS'] = config.max_workers
        ctx.obj['OUT_DIR'] = config.dirout
        ctx.obj['LOG_DIR'] = config.logdir
        ctx.obj['SIMPLE'] = config.simple
        ctx.obj['LINE_LIMITED'] = config.line_limited
        ctx.obj['VERBOSE'] = verbose

        ctx.invoke(slurm,
                   alloc=config.execution_control.allocation,
                   memory=config.execution_control.memory,
                   walltime=config.execution_control.walltime,
                   feature=config.execution_control.feature,
                   conda_env=config.execution_control.conda_env,
                   module=config.execution_control.module)
Example #28
0
def slurm(ctx, nodes, alloc, memory, walltime, feature, conda_env, module,
          stdout_path, verbose):
    """Run generation on HPC via SLURM job submission."""

    name = ctx.obj['NAME']
    tech = ctx.obj['TECH']
    points = ctx.obj['POINTS']
    sam_files = ctx.obj['SAM_FILES']
    res_file = ctx.obj['RES_FILE']
    sites_per_worker = ctx.obj['SITES_PER_WORKER']
    fout = ctx.obj['FOUT']
    dirout = ctx.obj['DIROUT']
    logdir = ctx.obj['LOGDIR']
    output_request = ctx.obj['OUTPUT_REQUEST']
    max_workers = ctx.obj['MAX_WORKERS']
    mem_util_lim = ctx.obj['MEM_UTIL_LIM']
    timeout = ctx.obj['TIMEOUT']
    curtailment = ctx.obj['CURTAILMENT']
    downscale = ctx.obj['DOWNSCALE']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    # initialize an info logger on the year level
    init_mult(name, logdir, modules=[__name__, 'reV.generation.generation',
                                     'reV.config', 'reV.utilities', 'reV.SAM'],
              verbose=False)

    pc = get_node_pc(points, sam_files, tech, res_file, nodes)

    for i, split in enumerate(pc):
        node_name, fout_node = get_node_name_fout(name, fout, i, pc,
                                                  hpc='slurm')

        cmd = get_node_cmd(node_name, tech, sam_files, res_file,
                           points=points, points_range=split.split_range,
                           sites_per_worker=sites_per_worker,
                           max_workers=max_workers, fout=fout_node,
                           dirout=dirout, logdir=logdir,
                           output_request=output_request,
                           mem_util_lim=mem_util_lim, timeout=timeout,
                           curtailment=curtailment,
                           downscale=downscale, verbose=verbose)

        status = Status.retrieve_job_status(dirout, 'generation', node_name)
        if status == 'successful':
            msg = ('Job "{}" is successful in status json found in "{}", '
                   'not re-running.'
                   .format(node_name, dirout))
        else:
            logger.info('Running reV generation on SLURM with node name "{}" '
                        'for {} (points range: {}).'
                        .format(node_name, pc, split.split_range))
            # create and submit the SLURM job
            slurm = SLURM(cmd, alloc=alloc, memory=memory, walltime=walltime,
                          feature=feature, name=node_name,
                          stdout_path=stdout_path, conda_env=conda_env,
                          module=module)
            if slurm.id:
                msg = ('Kicked off reV generation job "{}" (SLURM jobid #{}).'
                       .format(node_name, slurm.id))
                # add job to reV status file.
                Status.add_job(
                    dirout, 'generation', node_name, replace=True,
                    job_attrs={'job_id': slurm.id, 'hardware': 'eagle',
                               'fout': fout_node, 'dirout': dirout})
            else:
                msg = ('Was unable to kick off reV generation job "{}". '
                       'Please see the stdout error messages'
                       .format(node_name))

        click.echo(msg)
        logger.info(msg)