Exemple #1
0
def costmin_model_for_county(request):
    model, dataplate = build_model(
        model_spec_name='costmin_total_Npercentreduction',
        geoscale='county',
        geoentities='Adams, PA',
        savedata2file=False,
        baseloadingfilename='2010NoActionLoads_updated.csv')

    return model
Exemple #2
0
def test_default_modelgen_AdamsPACounty(costmin_model_spec_path):
    model, dataplate = build_model(
        model_spec_name=costmin_model_spec_path,
        geoscale='county',
        geoentities='Adams, PA',
        baseloadingfilename='2010NoActionLoads_updated.csv',
        savedata2file=False)

    # Verify the original load is numeric, and positive
    # OLD Obsolete: abs(pyo.value(mh.model.originalload['N']) - 3601593.97050113) < 1000
    assert pyo.value(model.original_load_expr['N']) > 0
Exemple #3
0
def test_default_modelgen_BroomeNYCounty(costmin_model_spec_path):
    model, dataplate = build_model(
        model_spec_name=costmin_model_spec_path,
        geoscale='county',
        geoentities='Broome, NY',
        baseloadingfilename='2010NoActionLoads_updated.csv',
        savedata2file=False)

    # OLD Obsolete: abs(pyo.value(mdlhandler.model.originalload['N']) - 3344694.57286031) < 1000
    # Verify the original load is numeric, and positive
    assert pyo.value(model.original_load_expr['N']) > 0
Exemple #4
0
def test_default_modelgen_NorthUmberlandLrseg(costmin_model_spec_path):
    model, dataplate = build_model(
        model_spec_name=costmin_model_spec_path,
        geoscale='lrseg',
        geoentities=['N51133RL0_6450_0000'],
        baseloadingfilename='2010NoActionLoads_updated.csv',
        savedata2file=False)

    # Verify the original load is about right
    # OLD Obsolete: abs(pyo.value(mdlhandler.model.originalload['N']) - 572816.402650118) < 1000
    # Verify the original load is numeric, and positive
    assert pyo.value(model.original_load_expr['N']) > 0
Exemple #5
0
def costmin_model_for_singlelrseg(request):
    THIS_DIR = os.path.dirname(os.path.abspath(__file__))
    costmin_model_spec_path = os.path.join(THIS_DIR,
                                           'costmin_total_Npercentreduction')

    model, dataplate = build_model(
        model_spec_name=costmin_model_spec_path,
        geoscale='lrseg',
        geoentities=['N51133RL0_6450_0000'],
        baseloadingfilename='2010NoActionLoads_updated.csv',
        savedata2file=False)
    return model
Exemple #6
0
def test_expression_pickling_AdamsPA(costmin_model_spec_path):
    model, dataplate = build_model(
        model_spec_name=costmin_model_spec_path,
        geoscale='county',
        geoentities='Adams, PA',
        baseloadingfilename='2010NoActionLoads_updated.csv',
        savedata2file=False)

    pickle_str = cloudpickle.dumps(model.original_load_expr)
    loaded_model_expr = cloudpickle.loads(pickle_str)

    assert [(k, v) for k, v in model.original_load_expr.items()] == \
           [(k, v) for k, v in loaded_model_expr.items()]
Exemple #7
0
import numpy as np
import pandas as pd
import pyomo.environ as pyo

from bayota_settings.base import get_bayota_version

print("using bayota version '%s'" % get_bayota_version())

from bayota_settings.base import get_source_csvs_dir

print("getting source csvs from: '%s'" % get_source_csvs_dir())

from bayom_e.model_handling.interface import build_model

modelspec = '/Users/Danny/bayota_ws_0.1b1.dev2/specification_files/model_specs/costmin_total_Npercentreduction_updated.yaml'
geoscale = 'county'
geoentities = 'Adams, PA'
baseloadingfilename = '2010NoActionLoads_updated.csv'

with open(modelspec, 'r') as stream:
    modelspec_dict = yaml.safe_load(stream)

modelspec_dict['variant'] = 'lp'

print(modelspec_dict)
my_model, dp = build_model(modelspec_dict,
                           geoscale,
                           geoentities,
                           baseloadingfilename,
                           savedata2file=False,
                           log_level='INFO')
def main(control_file,
         dryrun=False,
         use_s3_ws=False,
         save_to_s3=False,
         log_level='INFO') -> int:
    if save_to_s3 or use_s3_ws:
        # Connection with S3 is established.
        s3ops = establish_s3_connection(log_level, logger=None)

    # If using s3, required workspace directories are pulled from buckets.
    if use_s3_ws:
        pull_workspace_subdir_from_s3(subdirname='control',
                                      s3ops=s3ops,
                                      log_level=log_level)
        pull_workspace_subdir_from_s3(subdirname='data',
                                      s3ops=s3ops,
                                      log_level=log_level)
        pull_workspace_subdir_from_s3(subdirname='specfiles',
                                      s3ops=s3ops,
                                      log_level=log_level)

    # Control file is read.
    control_dict, \
    baseloadingfilename, \
    compact_geo_entity_str, \
    geography_entity, \
    geography_scale, \
    model_spec_name, \
    saved_model_name, \
    savedata2file = read_model_controlfile(control_file_name=control_file)

    # Logging formats are set up.
    logger = set_up_detailedfilelogger(
        loggername=model_spec_name,
        filename=f"step2_modelgeneration_{compact_geo_entity_str}.log",
        level=log_level,
        also_logtoconsole=True,
        add_filehandler_if_already_exists=True,
        add_consolehandler_if_already_exists=False)

    logger.info('v--------------------------------------------v')
    logger.info(' ************** Model Generation ************')
    logger.info(' geographies specification: %s' % geography_entity)
    logger.info('^--------------------------------------------^')

    # A progress report is started.
    progress_dict = control_dict.copy()
    progress_dict['run_timestamps'] = {
        'step2_generatemodel_start':
        datetime.datetime.today().strftime('%Y-%m-%d-%H:%M:%S')
    }

    # Model is generated.
    my_model = None
    if notdry(dryrun, logger, '--Dryrun-- Would generate model'):
        starttime_modelinstantiation = time.time()  # Wall time - clock starts.

        my_model, dataplate = build_model(
            model_spec_name=model_spec_name,
            geoscale=geography_scale.lower(),
            geoentities=geography_entity,
            baseloadingfilename=baseloadingfilename,
            savedata2file=savedata2file)

        timefor_modelinstantiation = time.time(
        ) - starttime_modelinstantiation  # Wall time - clock stops.
        logger.info('*model instantiation done* <- it took %f seconds>' %
                    timefor_modelinstantiation)

    # Model is saved.
    if not saved_model_name:
        saved_model_name = 'saved_instance.pickle'
    savepath = os.path.join(get_model_instances_dir(), saved_model_name)
    logger.debug(f"*saving model as pickle to {savepath}*")
    save_model_pickle(model=my_model, savepath=savepath, dryrun=dryrun)
    logger.debug(f"*model saved*")

    # If using s3, model instance is moved to bucket.
    if save_to_s3:
        move_model_pickle_to_s3(logger, s3ops, savepath)

    # Progress report is finalized with timestamp and saved.
    progress_dict['run_timestamps'][
        'step2_generatemodel_done'] = datetime.datetime.today().strftime(
            '%Y-%m-%d-%H:%M:%S')
    progress_file_name = write_progress_file(
        progress_dict, control_name=control_dict['study']['uuid'])
    if save_to_s3:
        move_controlfile_to_s3(logger,
                               s3ops,
                               controlfile_name=progress_file_name,
                               no_s3=False)
    logger.debug(f"*model generation done*")
    return 0  # a clean, no-issue, exit