Exemplo n.º 1
0
def supply_curve(ctx, sc_table, out_dir, sub_dir, columns, plot_type, cmap,
                 lcoe, log_file, verbose, terminal):
    """
    Summarize and plot reV Supply Curve data
    """
    name = ctx.obj['NAME']
    if any([verbose, ctx.obj['VERBOSE']]):
        log_level = 'DEBUG'
    else:
        log_level = 'INFO'

    init_logger('reV', log_file=log_file, log_level=log_level)

    qa_dir = out_dir
    if sub_dir is not None:
        qa_dir = os.path.join(out_dir, sub_dir)

    QaQc.supply_curve(sc_table,
                      qa_dir,
                      columns=columns,
                      lcoe=lcoe,
                      plot_type=plot_type,
                      cmap=cmap)

    if terminal:
        status = {
            'dirout': out_dir,
            'job_status': 'successful',
            'finput': sc_table
        }
        Status.make_job_file(out_dir, 'qa-qc', name, status)
Exemplo n.º 2
0
def reV_h5(ctx, h5_file, out_dir, sub_dir, dsets, group, process_size,
           max_workers, plot_type, cmap, log_file, verbose, terminal):
    """
    Summarize and plot data for reV h5_file
    """
    name = ctx.obj['NAME']
    if any([verbose, ctx.obj['VERBOSE']]):
        log_level = 'DEBUG'
    else:
        log_level = 'INFO'

    init_logger('reV', log_file=log_file, log_level=log_level)

    qa_dir = out_dir
    if sub_dir is not None:
        qa_dir = os.path.join(out_dir, sub_dir)

    QaQc.h5(h5_file,
            qa_dir,
            dsets=dsets,
            group=group,
            process_size=process_size,
            max_workers=max_workers,
            plot_type=plot_type,
            cmap=cmap)

    if terminal:
        status = {
            'dirout': out_dir,
            'job_status': 'successful',
            'finput': h5_file
        }
        Status.make_job_file(out_dir, 'qa-qc', name, status)
Exemplo n.º 3
0
def test_make_file():
    """Test file creation and reading"""
    purge()
    Status.make_job_file(STATUS_DIR, 'generation', 'test1', TEST_1_ATTRS_1)
    status = Status.retrieve_job_status(STATUS_DIR, 'generation', 'test1')
    msg = 'Failed, status is "{}"'.format(status)
    assert status == 'R', msg
    purge()
Exemplo n.º 4
0
def direct(ctx, gen_fpath, rev_summary, reg_cols, cf_dset, rep_method,
           err_method, weight, n_profiles, out_dir, log_dir, max_workers,
           aggregate_profiles, verbose):
    """reV representative profiles CLI."""
    name = ctx.obj['NAME']
    ctx.obj['GEN_FPATH'] = gen_fpath
    ctx.obj['REV_SUMMARY'] = rev_summary
    ctx.obj['REG_COLS'] = reg_cols
    ctx.obj['CF_DSET'] = cf_dset
    ctx.obj['REP_METHOD'] = rep_method
    ctx.obj['ERR_METHOD'] = err_method
    ctx.obj['WEIGHT'] = weight
    ctx.obj['N_PROFILES'] = n_profiles
    ctx.obj['OUT_DIR'] = out_dir
    ctx.obj['LOG_DIR'] = log_dir
    ctx.obj['MAX_WORKERS'] = max_workers
    ctx.obj['AGGREGATE_PROFILES'] = aggregate_profiles
    ctx.obj['VERBOSE'] = verbose

    if ctx.invoked_subcommand is None:
        t0 = time.time()
        init_mult(name, log_dir, modules=['reV', 'rex'], verbose=verbose)

        fn_out = '{}.h5'.format(name)
        fout = os.path.join(out_dir, fn_out)

        if aggregate_profiles:
            AggregatedRepProfiles.run(gen_fpath,
                                      rev_summary,
                                      cf_dset=cf_dset,
                                      weight=weight,
                                      fout=fout,
                                      max_workers=max_workers)
        else:
            RepProfiles.run(gen_fpath,
                            rev_summary,
                            reg_cols,
                            cf_dset=cf_dset,
                            rep_method=rep_method,
                            err_method=err_method,
                            weight=weight,
                            fout=fout,
                            n_profiles=n_profiles,
                            max_workers=max_workers)

        runtime = (time.time() - t0) / 60
        logger.info('reV representative profiles complete. '
                    'Time elapsed: {:.2f} min. Target output dir: {}'.format(
                        runtime, out_dir))

        status = {
            'dirout': out_dir,
            'fout': fn_out,
            'job_status': 'successful',
            'runtime': runtime,
            'finput': [gen_fpath, rev_summary]
        }
        Status.make_job_file(out_dir, 'rep-profiles', name, status)
Exemplo n.º 5
0
def multi_year_groups(ctx, group_params, verbose):
    """Run multi year collection and means for multiple groups."""
    name = ctx.obj['NAME']
    my_file = ctx.obj['MY_FILE']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    # initialize loggers for multiple modules
    log_dir = os.path.dirname(my_file)
    init_mult(name,
              log_dir,
              modules=[__name__, 'reV.handlers.multi_year'],
              verbose=verbose,
              node=True)

    for key, val in ctx.obj.items():
        logger.debug('ctx var passed to collection method: "{}" : "{}" '
                     'with type "{}"'.format(key, val, type(val)))

    logger.info('Multi-year collection is being run with job name "{}". '
                'Target output path is: {}'.format(name, my_file))
    ts = time.time()
    for group_name, group in json.loads(group_params).items():
        logger.info('- Collecting datasets "{}" from "{}" into "{}/"'.format(
            group['dsets'], group['source_files'], group_name))
        t0 = time.time()
        for dset in group['dsets']:
            if MultiYear.is_profile(group['source_files'], dset):
                MultiYear.collect_profiles(my_file,
                                           group['source_files'],
                                           dset,
                                           group=group['group'])
            else:
                MultiYear.collect_means(my_file,
                                        group['source_files'],
                                        dset,
                                        group=group['group'])

        runtime = (time.time() - t0) / 60
        logger.info('- {} collection completed in: {:.2f} min.'.format(
            group_name, runtime))

    runtime = (time.time() - ts) / 60
    logger.info(
        'Multi-year collection completed in : {:.2f} min.'.format(runtime))

    # add job to reV status file.
    status = {
        'dirout': os.path.dirname(my_file),
        'fout': os.path.basename(my_file),
        'job_status': 'successful',
        'runtime': runtime
    }
    Status.make_job_file(os.path.dirname(my_file), 'multi-year', name, status)
Exemplo n.º 6
0
def test_file_collection():
    """Test file creation and collection"""
    purge()

    Status.make_job_file(STATUS_DIR, 'generation', 'test1', TEST_1_ATTRS_1)
    Status.make_job_file(STATUS_DIR, 'generation', 'test2', TEST_2_ATTRS_1)

    Status.update(STATUS_DIR)
    with open(os.path.join(STATUS_DIR, 'rev_status.json'), 'r') as f:
        data = json.load(f)
    assert str(TEST_1_ATTRS_1) in str(data)
    assert str(TEST_2_ATTRS_1) in str(data)
    purge()
Exemplo n.º 7
0
def multi_year(ctx, source_files, group, dsets, pass_through_dsets, verbose):
    """Run multi year collection and means on local worker."""

    name = ctx.obj['NAME']
    my_file = ctx.obj['MY_FILE']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    # initialize loggers for multiple modules
    log_dir = os.path.dirname(my_file)
    init_mult(name,
              log_dir,
              modules=[__name__, 'reV.handlers.multi_year'],
              verbose=verbose,
              node=True)

    for key, val in ctx.obj.items():
        logger.debug('ctx var passed to collection method: "{}" : "{}" '
                     'with type "{}"'.format(key, val, type(val)))

    logger.info('Multi-year collection is being run for "{}" '
                'with job name "{}" on {}. Target output path is: {}'.format(
                    dsets, name, source_files, my_file))
    t0 = time.time()

    for dset in dsets:
        if MultiYear.is_profile(source_files, dset):
            MultiYear.collect_profiles(my_file,
                                       source_files,
                                       dset,
                                       group=group)
        else:
            MultiYear.collect_means(my_file, source_files, dset, group=group)

    if pass_through_dsets is not None:
        for dset in pass_through_dsets:
            MultiYear.pass_through(my_file, source_files, dset, group=group)

    runtime = (time.time() - t0) / 60
    logger.info(
        'Multi-year collection completed in: {:.2f} min.'.format(runtime))

    # add job to reV status file.
    status = {
        'dirout': os.path.dirname(my_file),
        'fout': os.path.basename(my_file),
        'job_status': 'successful',
        'runtime': runtime,
        'finput': source_files
    }
    Status.make_job_file(os.path.dirname(my_file), 'multi-year', name, status)
Exemplo n.º 8
0
def direct(ctx, gen_fpath, offshore_fpath, points, sam_files, log_dir,
           verbose):
    """Main entry point to run offshore wind aggregation"""
    name = ctx.obj['NAME']
    ctx.obj['GEN_FPATH'] = gen_fpath
    ctx.obj['OFFSHORE_FPATH'] = offshore_fpath
    ctx.obj['POINTS'] = points
    ctx.obj['SAM_FILES'] = sam_files
    ctx.obj['OUT_DIR'] = os.path.dirname(gen_fpath)
    ctx.obj['LOG_DIR'] = log_dir
    ctx.obj['VERBOSE'] = verbose

    if ctx.invoked_subcommand is None:
        t0 = time.time()
        init_mult(name,
                  log_dir,
                  modules=[__name__, 'reV.offshore', 'reV.handlers', 'rex'],
                  verbose=verbose,
                  node=True)

        fpath_out = gen_fpath.replace('.h5', '_offshore.h5')

        try:
            Offshore.run(gen_fpath,
                         offshore_fpath,
                         points,
                         sam_files,
                         fpath_out=fpath_out)
        except Exception as e:
            logger.exception('Offshore module failed, received the '
                             'following exception:\n{}'.format(e))
            raise e

        runtime = (time.time() - t0) / 60

        status = {
            'dirout': os.path.dirname(fpath_out),
            'fout': os.path.basename(fpath_out),
            'job_status': 'successful',
            'runtime': runtime,
            'finput': gen_fpath
        }
        Status.make_job_file(os.path.dirname(fpath_out), 'offshore', name,
                             status)
Exemplo n.º 9
0
def exclusions(ctx, excl_fpath, out_dir, sub_dir, excl_dict,
               area_filter_kernel, min_area, plot_type, cmap, plot_step,
               log_file, verbose, terminal):
    """
    Extract and plot reV exclusions mask
    """
    name = ctx.obj['NAME']
    if any([verbose, ctx.obj['VERBOSE']]):
        log_level = 'DEBUG'
    else:
        log_level = 'INFO'

    init_logger('reV', log_file=log_file, log_level=log_level)

    qa_dir = out_dir
    if sub_dir is not None:
        qa_dir = os.path.join(out_dir, sub_dir)

    if isinstance(excl_dict, str):
        excl_dict = dict_str_load(excl_dict)

    QaQc.exclusions_mask(excl_fpath,
                         qa_dir,
                         layers_dict=excl_dict,
                         min_area=min_area,
                         kernel=area_filter_kernel,
                         plot_type=plot_type,
                         cmap=cmap,
                         plot_step=plot_step)

    if terminal:
        status = {
            'dirout': out_dir,
            'job_status': 'successful',
            'finput': excl_fpath
        }
        Status.make_job_file(out_dir, 'qa-qc', name, status)
Exemplo n.º 10
0
def direct(ctx, sc_points, trans_table, fixed_charge_rate, sc_features,
           transmission_costs, sort_on, offshore_trans_table, wind_dirs,
           n_dirs, downwind, offshore_compete, max_workers, out_dir, log_dir,
           simple, line_limited, verbose):
    """reV Supply Curve CLI."""
    name = ctx.obj['NAME']
    ctx.obj['SC_POINTS'] = sc_points
    ctx.obj['TRANS_TABLE'] = trans_table
    ctx.obj['FIXED_CHARGE_RATE'] = fixed_charge_rate
    ctx.obj['SC_FEATURES'] = sc_features
    ctx.obj['TRANSMISSION_COSTS'] = transmission_costs
    ctx.obj['SORT_ON'] = sort_on
    ctx.obj['OFFSHORE_TRANS_TABLE'] = offshore_trans_table
    ctx.obj['WIND_DIRS'] = wind_dirs
    ctx.obj['N_DIRS'] = n_dirs
    ctx.obj['DOWNWIND'] = downwind
    ctx.obj['MAX_WORKERS'] = max_workers
    ctx.obj['OFFSHORE_COMPETE'] = offshore_compete
    ctx.obj['OUT_DIR'] = out_dir
    ctx.obj['LOG_DIR'] = log_dir
    ctx.obj['SIMPLE'] = simple
    ctx.obj['LINE_LIMITED'] = line_limited
    ctx.obj['VERBOSE'] = verbose

    if ctx.invoked_subcommand is None:
        t0 = time.time()
        init_mult(
            name,
            log_dir,
            modules=[__name__, 'reV.supply_curve', 'reV.handlers', 'rex'],
            verbose=verbose)

        if isinstance(transmission_costs, str):
            transmission_costs = dict_str_load(transmission_costs)

        offshore_table = offshore_trans_table
        try:
            if simple:
                out = SupplyCurve.simple(sc_points,
                                         trans_table,
                                         fixed_charge_rate,
                                         sc_features=sc_features,
                                         transmission_costs=transmission_costs,
                                         sort_on=sort_on,
                                         wind_dirs=wind_dirs,
                                         n_dirs=n_dirs,
                                         downwind=downwind,
                                         max_workers=max_workers,
                                         offshore_trans_table=offshore_table,
                                         offshore_compete=offshore_compete)
            else:
                out = SupplyCurve.full(sc_points,
                                       trans_table,
                                       fixed_charge_rate,
                                       sc_features=sc_features,
                                       transmission_costs=transmission_costs,
                                       line_limited=line_limited,
                                       sort_on=sort_on,
                                       wind_dirs=wind_dirs,
                                       n_dirs=n_dirs,
                                       downwind=downwind,
                                       max_workers=max_workers,
                                       offshore_trans_table=offshore_table,
                                       offshore_compete=offshore_compete)
        except Exception as e:
            logger.exception('Supply curve compute failed. Received the '
                             'following error:\n{}'.format(e))
            raise e

        fn_out = '{}.csv'.format(name)
        fpath_out = os.path.join(out_dir, fn_out)
        out.to_csv(fpath_out, index=False)

        runtime = (time.time() - t0) / 60
        logger.info('Supply curve complete. Time elapsed: {:.2f} min. '
                    'Target output dir: {}'.format(runtime, out_dir))

        finput = [sc_points, trans_table]
        if sc_features is not None:
            finput.append(sc_features)

        if transmission_costs is not None:
            finput.append(transmission_costs)

        # add job to reV status file.
        status = {
            'dirout': out_dir,
            'fout': fn_out,
            'job_status': 'successful',
            'runtime': runtime,
            'finput': finput
        }
        Status.make_job_file(out_dir, 'supply-curve', name, status)
Exemplo n.º 11
0
def collect(ctx, verbose):
    """Run collection on local worker."""

    name = ctx.obj['NAME']
    h5_file = ctx.obj['H5_FILE']
    h5_dir = ctx.obj['H5_DIR']
    project_points = ctx.obj['PROJECT_POINTS']
    dsets = ctx.obj['DSETS']
    file_prefix = ctx.obj['FILE_PREFIX']
    log_dir = ctx.obj['LOG_DIR']
    purge_chunks = ctx.obj['PURGE_CHUNKS']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    # initialize loggers for multiple modules
    init_mult(name,
              log_dir,
              modules=[__name__, 'reV.handlers.collection'],
              verbose=verbose,
              node=True)

    for key, val in ctx.obj.items():
        logger.debug('ctx var passed to collection method: "{}" : "{}" '
                     'with type "{}"'.format(key, val, type(val)))

    logger.info('Collection is being run for "{}" with job name "{}" '
                'and collection dir: {}. Target output path is: {}'.format(
                    dsets, name, h5_dir, h5_file))
    t0 = time.time()

    Collector.collect(h5_file,
                      h5_dir,
                      project_points,
                      dsets[0],
                      file_prefix=file_prefix)

    if len(dsets) > 1:
        for dset_name in dsets[1:]:
            Collector.add_dataset(h5_file,
                                  h5_dir,
                                  dset_name,
                                  file_prefix=file_prefix)

    if purge_chunks:
        Collector.purge_chunks(h5_file,
                               h5_dir,
                               project_points,
                               file_prefix=file_prefix)
    else:
        Collector.move_chunks(h5_file,
                              h5_dir,
                              project_points,
                              file_prefix=file_prefix)

    runtime = (time.time() - t0) / 60
    logger.info('Collection completed in: {:.2f} min.'.format(runtime))

    # add job to reV status file.
    status = {
        'dirout': os.path.dirname(h5_file),
        'fout': os.path.basename(h5_file),
        'job_status': 'successful',
        'runtime': runtime,
        'finput': os.path.join(h5_dir, '{}*.h5'.format(file_prefix))
    }
    Status.make_job_file(os.path.dirname(h5_file), 'collect', name, status)
Exemplo n.º 12
0
def local(ctx, max_workers, timeout, points_range, verbose):
    """Run econ on local worker(s)."""

    name = ctx.obj['NAME']
    points = ctx.obj['POINTS']
    sam_files = ctx.obj['SAM_FILES']
    cf_file = ctx.obj['CF_FILE']
    cf_year = ctx.obj['CF_YEAR']
    site_data = ctx.obj['SITE_DATA']
    sites_per_worker = ctx.obj['SITES_PER_WORKER']
    fout = ctx.obj['FOUT']
    dirout = ctx.obj['DIROUT']
    logdir = ctx.obj['LOGDIR']
    output_request = ctx.obj['OUTPUT_REQUEST']
    append = ctx.obj['APPEND']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    if append:
        fout = os.path.basename(cf_file)
        dirout = os.path.dirname(cf_file)

    # initialize loggers for multiple modules
    log_modules = [
        __name__, 'reV.econ.econ', 'reV.generation', 'reV.config',
        'reV.utilities', 'reV.SAM', 'reV.handlers', 'rex.utilities'
    ]
    init_mult(name, logdir, modules=log_modules, verbose=verbose, node=True)

    for key, val in ctx.obj.items():
        logger.debug('ctx var passed to local method: "{}" : "{}" with type '
                     '"{}"'.format(key, val, type(val)))

    logger.info(
        'Econ local is being run with with job name "{}" and '
        'generation results file: {}. Target output path is: {}'.format(
            name, cf_file, os.path.join(dirout, fout)))
    t0 = time.time()

    # Execute the Generation module with smart data flushing.
    Econ.reV_run(points=points,
                 sam_files=sam_files,
                 cf_file=cf_file,
                 cf_year=cf_year,
                 site_data=site_data,
                 output_request=output_request,
                 max_workers=max_workers,
                 timeout=timeout,
                 sites_per_worker=sites_per_worker,
                 points_range=points_range,
                 fout=fout,
                 dirout=dirout,
                 append=append)

    tmp_str = ' with points range {}'.format(points_range)
    runtime = (time.time() - t0) / 60
    logger.info('Econ compute complete for project points "{0}"{1}. '
                'Time elapsed: {2:.2f} min. Target output dir: {3}'.format(
                    points, tmp_str if points_range else '', runtime, dirout))

    # add job to reV status file.
    status = {
        'dirout': dirout,
        'fout': fout,
        'job_status': 'successful',
        'runtime': runtime,
        'finput': cf_file
    }
    Status.make_job_file(dirout, 'econ', name, status)
Exemplo n.º 13
0
def direct(ctx, excl_fpath, gen_fpath, econ_fpath, res_fpath, tm_dset,
           excl_dict, check_excl_layers, res_class_dset, res_class_bins,
           cf_dset, lcoe_dset, h5_dsets, data_layers, resolution, excl_area,
           power_density, area_filter_kernel, min_area, friction_fpath,
           friction_dset, out_dir, log_dir, verbose):
    """reV Supply Curve Aggregation Summary CLI."""
    name = ctx.obj['NAME']
    ctx.obj['EXCL_FPATH'] = excl_fpath
    ctx.obj['GEN_FPATH'] = gen_fpath
    ctx.obj['ECON_FPATH'] = econ_fpath
    ctx.obj['RES_FPATH'] = res_fpath
    ctx.obj['TM_DSET'] = tm_dset
    ctx.obj['EXCL_DICT'] = excl_dict
    ctx.obj['CHECK_LAYERS'] = check_excl_layers
    ctx.obj['RES_CLASS_DSET'] = res_class_dset
    ctx.obj['RES_CLASS_BINS'] = res_class_bins
    ctx.obj['CF_DSET'] = cf_dset
    ctx.obj['LCOE_DSET'] = lcoe_dset
    ctx.obj['H5_DSETS'] = h5_dsets
    ctx.obj['DATA_LAYERS'] = data_layers
    ctx.obj['RESOLUTION'] = resolution
    ctx.obj['EXCL_AREA'] = excl_area
    ctx.obj['POWER_DENSITY'] = power_density
    ctx.obj['AREA_FILTER_KERNEL'] = area_filter_kernel
    ctx.obj['MIN_AREA'] = min_area
    ctx.obj['FRICTION_FPATH'] = friction_fpath
    ctx.obj['FRICTION_DSET'] = friction_dset
    ctx.obj['OUT_DIR'] = out_dir
    ctx.obj['LOG_DIR'] = log_dir
    ctx.obj['VERBOSE'] = verbose

    if ctx.invoked_subcommand is None:
        t0 = time.time()
        init_mult(name,
                  log_dir,
                  modules=[__name__, 'reV.supply_curve'],
                  verbose=verbose)

        with h5py.File(excl_fpath, mode='r') as f:
            dsets = list(f)
        if tm_dset not in dsets:
            try:
                TechMapping.run(excl_fpath, res_fpath, tm_dset)
            except Exception as e:
                logger.exception('TechMapping process failed. Received the '
                                 'following error:\n{}'.format(e))
                raise e

        if isinstance(excl_dict, str):
            excl_dict = dict_str_load(excl_dict)

        if isinstance(data_layers, str):
            data_layers = dict_str_load(data_layers)

        try:
            summary = SupplyCurveAggregation.summary(
                excl_fpath,
                gen_fpath,
                tm_dset,
                econ_fpath=econ_fpath,
                excl_dict=excl_dict,
                res_class_dset=res_class_dset,
                res_class_bins=res_class_bins,
                cf_dset=cf_dset,
                lcoe_dset=lcoe_dset,
                h5_dsets=h5_dsets,
                data_layers=data_layers,
                resolution=resolution,
                excl_area=excl_area,
                power_density=power_density,
                area_filter_kernel=area_filter_kernel,
                min_area=min_area,
                friction_fpath=friction_fpath,
                friction_dset=friction_dset,
                check_excl_layers=check_excl_layers)

        except Exception as e:
            logger.exception('Supply curve Aggregation failed. Received the '
                             'following error:\n{}'.format(e))
            raise e

        fn_out = '{}.csv'.format(name)
        fpath_out = os.path.join(out_dir, fn_out)
        summary.to_csv(fpath_out)

        runtime = (time.time() - t0) / 60
        logger.info('Supply curve aggregation complete. '
                    'Time elapsed: {:.2f} min. Target output dir: {}'.format(
                        runtime, out_dir))

        finput = [excl_fpath, gen_fpath]
        if res_fpath is not None:
            finput.append(res_fpath)

        # add job to reV status file.
        status = {
            'dirout': out_dir,
            'fout': fn_out,
            'job_status': 'successful',
            'runtime': runtime,
            'finput': finput,
            'excl_fpath': excl_fpath,
            'excl_dict': excl_dict,
            'area_filter_kernel': area_filter_kernel,
            'min_area': min_area
        }
        Status.make_job_file(out_dir, 'supply-curve-aggregation', name, status)
Exemplo n.º 14
0
def local(ctx, max_workers, timeout, points_range, verbose):
    """Run generation on local worker(s)."""

    name = ctx.obj['NAME']
    tech = ctx.obj['TECH']
    points = ctx.obj['POINTS']
    sam_files = ctx.obj['SAM_FILES']
    res_file = ctx.obj['RES_FILE']
    sites_per_worker = ctx.obj['SITES_PER_WORKER']
    fout = ctx.obj['FOUT']
    dirout = ctx.obj['DIROUT']
    logdir = ctx.obj['LOGDIR']
    output_request = ctx.obj['OUTPUT_REQUEST']
    site_data = ctx.obj['SITE_DATA']
    mem_util_lim = ctx.obj['MEM_UTIL_LIM']
    curtailment = ctx.obj['CURTAILMENT']
    verbose = any([verbose, ctx.obj['VERBOSE']])

    # initialize loggers for multiple modules
    init_mult(name, logdir, modules=['reV', 'rex'], verbose=verbose, node=True)

    for key, val in ctx.obj.items():
        logger.debug('ctx var passed to local method: "{}" : "{}" with type '
                     '"{}"'.format(key, val, type(val)))

    logger.info('Gen local is being run with with job name "{}" and resource '
                'file: {}. Target output path is: {}'.format(
                    name, res_file, os.path.join(dirout, fout)))
    t0 = time.time()

    points = _parse_points(ctx)

    # Execute the Generation module with smart data flushing.
    Gen.reV_run(tech=tech,
                points=points,
                sam_files=sam_files,
                res_file=res_file,
                site_data=site_data,
                output_request=output_request,
                curtailment=curtailment,
                max_workers=max_workers,
                sites_per_worker=sites_per_worker,
                points_range=points_range,
                fout=fout,
                dirout=dirout,
                mem_util_lim=mem_util_lim,
                timeout=timeout)

    tmp_str = ' with points range {}'.format(points_range)
    runtime = (time.time() - t0) / 60
    logger.info('Gen compute complete for project points "{0}"{1}. '
                'Time elapsed: {2:.2f} min. Target output dir: {3}'.format(
                    points, tmp_str if points_range else '', runtime, dirout))

    # add job to reV status file.
    status = {
        'dirout': dirout,
        'fout': fout,
        'job_status': 'successful',
        'runtime': runtime,
        'finput': res_file
    }
    Status.make_job_file(dirout, 'generation', name, status)