def delete_unused_measures(dry_run, path, config):
    cfg.initialize_config(path, config, 'delete_orphans_log.txt')

    scenario_files = glob(os.path.join(cfg.workingdir, '*.json'))

    if not scenario_files:
        raise IOError("No scenario json files found in {}".format(path))

    used_measure_ids = {
        category: set()
        for category in Scenario.MEASURE_CATEGORIES
    }

    for scenario_file in scenario_files:
        scenario = Scenario(scenario_file)
        for category, measure_ids in scenario.all_measure_ids_by_category(
        ).iteritems():
            used_measure_ids[category].update(measure_ids)

    for category, measure_ids in used_measure_ids.iteritems():
        where = ''
        if measure_ids:
            where = ' WHERE id NOT IN ({})'.format(', '.join(
                str(measure_id) for measure_id in measure_ids))
        query = 'DELETE FROM "{}"{};'.format(category, where)
        logging.info("Executing `{}`".format(query))
        cfg.cur.execute(query)

    if dry_run:
        logging.info("Rolling back (dry run was requested)")
        cfg.con.rollback()
    else:
        logging.info("Committing")
        cfg.con.commit()
def delete_unused_measures(dry_run, path, config):
    cfg.initialize_config(path, config, 'delete_orphans_log.txt')

    scenario_files = glob(os.path.join(cfg.workingdir, '*.json'))

    if not scenario_files:
        raise IOError("No scenario json files found in {}".format(path))

    used_measure_ids = {category: set() for category in Scenario.MEASURE_CATEGORIES}

    for scenario_file in scenario_files:
        scenario = Scenario(scenario_file)
        for category, measure_ids in scenario.all_measure_ids_by_category().iteritems():
            used_measure_ids[category].update(measure_ids)

    for category, measure_ids in used_measure_ids.iteritems():
        where = ''
        if measure_ids:
            where = ' WHERE id NOT IN ({})'.format(', '.join(str(measure_id) for measure_id in measure_ids))
        query = 'DELETE FROM "{}"{};'.format(category, where)
        logging.info("Executing `{}`".format(query))
        cfg.cur.execute(query)

    if dry_run:
        logging.info("Rolling back (dry run was requested)")
        cfg.con.rollback()
    else:
        logging.info("Committing")
        cfg.con.commit()
Esempio n. 3
0
def run(path, config, scenario_ids, load_demand=False, solve_demand=True, load_supply=False, solve_supply=True, load_error=False,
        export_results=True, pickle_shapes=True, save_models=True, log_name=None, api_run=False, clear_results=False):
    global model
    cfg.initialize_config(path, config, log_name)
    cfg.geo.log_geo()
    shape.init_shapes(pickle_shapes)

    if not scenario_ids:
        scenario_ids = [os.path.basename(p) for p in glob.glob(os.path.join(cfg.workingdir, '*.json'))]
        if not scenario_ids:
            raise ValueError, "No scenarios specified and no .json files found in working directory."

    # Users may have specified a scenario using the full filename, but for our purposes the 'id' of the scenario
    # is just the part before the .json
    scenario_ids = [os.path.splitext(s)[0] for s in scenario_ids]

    logging.info('Scenario run list: {}'.format(', '.join(scenario_ids)))
    for scenario_id in scenario_ids:
        scenario_start_time = time.time()
        logging.info('Starting scenario {}'.format(scenario_id))
        logging.info('Start time {}'.format(str(datetime.datetime.now()).split('.')[0]))
        if api_run:
            # FIXME: This will be broken since we changed the scenario list from a list of database ids to a list of
            # filenames. The API-related code will need to be updated before we can update the server with newer
            # model code.
            util.update_status(scenario_id, 3)
            scenario_name = util.scenario_name(scenario_id)
            subject = 'Now running: EnergyPathways scenario "%s"' % (scenario_name,)
            body = 'EnergyPathways is now running your scenario titled "%s". A scenario run generally ' \
                   'finishes within a few hours, and you will receive another email when your run is complete. ' \
                   'If more than 24 hours pass without you receiving a confirmation email, please log in to ' \
                   'https://energypathways.com to check the status of the run. ' \
                   'If the run is not complete, please reply to this email and we will investigate.' % (scenario_name,)
            send_gmail(scenario_id, subject, body)

        model = load_model(load_demand, load_supply, load_error, scenario_id, api_run)
        if not load_error:
            model.run(scenario_id,
                      solve_demand=solve_demand,
                      solve_supply=solve_supply,
                      load_demand=load_demand,
                      load_supply=load_supply,
                      export_results=export_results,
                      save_models=save_models,
                      append_results=False if (scenario_id == scenario_ids[0] and clear_results) else True)

        if api_run:
            util.update_status(scenario_id, 4)
            subject = 'Completed: EnergyPathways scenario "%s"' % (scenario_name,)
            body = 'EnergyPathways has completed running your scenario titled "%s". ' \
                   'Please return to https://energypathways.com to view your results.' % (scenario_name,)
            send_gmail(scenario_id, subject, body)

        logging.info('EnergyPATHWAYS run for scenario_id {} successful!'.format(scenario_id))
        logging.info('Scenario calculation time {}'.format(str(datetime.timedelta(seconds=time.time() - scenario_start_time)).split('.')[0]))
    logging.info('Total calculation time {}'.format(str(datetime.timedelta(seconds=time.time() - run_start_time)).split('.')[0]))
    logging.shutdown()
    logging.getLogger(None).handlers = [] # necessary to totally flush the logger
Esempio n. 4
0
def run(scenarios, load_demand=False, solve_demand=True, load_supply=False, solve_supply=True, load_error=False, export_results=True, save_models=True, clear_results=False, subfolders=False, rio_scenario=None):
    global model
    cfg.initialize_config()
    if not subfolders:
        GeoMapper.get_instance().log_geo()

    shape.Shapes.get_instance(cfg.getParam('database_path'))

    if not scenarios:
        scenarios = [os.path.basename(p) for p in glob.glob(os.path.join(cfg.workingdir, '*.json'))]
        if not scenarios:
            raise ValueError, "No scenarios specified and no .json files found in working directory."

    scenarios = util.ensure_iterable(scenarios)
    scenarios = [os.path.splitext(s)[0] for s in scenarios]
    rio_scenario = [None]*len(scenarios) if rio_scenario is None or not len(rio_scenario) else util.ensure_iterable(rio_scenario)

    combined_scenarios = zip(scenarios, rio_scenario)
    logging.info('Scenario run list: {}'.format(', '.join(scenarios)))
    for scenario, rio_scenario in combined_scenarios:
        if subfolders:
            logging.shutdown()
            logging.getLogger(None).handlers = []
            cfg.initialize_config()
            logging.info('SUBFOLDERS ARE IN USE')
            GeoMapper.get_instance().log_geo()
        scenario_start_time = time.time()
        logging.info('Starting scenario {}'.format(scenario))
        logging.info('Start time {}'.format(str(datetime.datetime.now()).split('.')[0]))
        if cfg.rio_supply_run:
            model = load_model(load_demand, load_supply, load_error, scenario, rio_scenario)
        else:
            model = load_model(load_demand, load_supply, load_error, scenario, None)
        if not load_error:
            model.run(scenario,
                      solve_demand=solve_demand,
                      solve_supply=solve_supply,
                      load_demand=load_demand,
                      load_supply=load_supply,
                      export_results=export_results,
                      save_models=save_models,
                      append_results=False if (scenario == scenarios[0] and clear_results) else True,rio_scenario=rio_scenario)

        logging.info('EnergyPATHWAYS run for scenario {} successful!'.format(scenario))
        logging.info('Scenario calculation time {}'.format(str(datetime.timedelta(seconds=time.time() - scenario_start_time)).split('.')[0]))
    logging.info('Total calculation time {}'.format(str(datetime.timedelta(seconds=time.time() - run_start_time)).split('.')[0]))
    logging.shutdown()
    logging.getLogger(None).handlers = [] # necessary to totally flush the logger
import energyPATHWAYS.shape as shape
from energyPATHWAYS.outputs import Output
import csv
import time
import datetime
import logging
import cProfile
import traceback
import pandas as pd

# set up a dummy model
path = os.getcwd()
config = 'config.INI'
scenario_id = 1

cfg.initialize_config(path, config, _log_name='log.log')
cfg.primary_geography = 'intersection_id'

model = PathwaysModel(scenario_id, api_run=False)
# model.run(scenario_id, solve_demand=False, solve_supply=False, save_models=False, append_results=False)

demand = model.demand
demand.add_drivers()

existing_geo_map_key_ids, existing_geo_map_key_names = zip(*util.sql_read_table('GeographyMapKeys'))
next_map_key_id = max(existing_geo_map_key_ids)+1
next_geo_map_id = max(util.sql_read_table('GeographyMap', 'id'))+1

###############################################
# user inputs
driver_ids_to_make_map_keys = [
Esempio n. 6
0
def run(path,
        config,
        scenario,
        load_demand=False,
        solve_demand=True,
        load_supply=False,
        solve_supply=True,
        load_error=False,
        export_results=True,
        pickle_shapes=True,
        save_models=True,
        log_name=None,
        api_run=False,
        clear_results=False):
    global model
    cfg.initialize_config(path, config, log_name)
    cfg.geo.log_geo()
    shape.init_shapes(pickle_shapes)

    scenario_ids = parse_scenario_ids(scenario)
    logging.info('Scenario_ids run list = {}'.format(scenario_ids))
    for scenario_id in scenario_ids:
        scenario_start_time = time.time()
        logging.info('Starting scenario_id {}'.format(scenario_id))
        if api_run:
            util.update_status(scenario_id, 3)
            scenario_name = util.scenario_name(scenario_id)
            subject = 'Now running: EnergyPathways scenario "%s"' % (
                scenario_name, )
            body = 'EnergyPathways is now running your scenario titled "%s". A scenario run generally ' \
                   'finishes within a few hours, and you will receive another email when your run is complete. ' \
                   'If more than 24 hours pass without you receiving a confirmation email, please log in to ' \
                   'https://energypathways.com to check the status of the run. ' \
                   'If the run is not complete, please reply to this email and we will investigate.' % (scenario_name,)
            send_gmail(scenario_id, subject, body)

        model = load_model(load_demand, load_supply, load_error, scenario_id,
                           api_run)
        if not load_error:
            model.run(
                scenario_id,
                solve_demand=solve_demand,
                solve_supply=solve_supply,
                load_demand=load_demand,
                load_supply=load_supply,
                export_results=export_results,
                save_models=save_models,
                append_results=False if
                (scenario_id == scenario_ids[0] and clear_results) else True)

        if api_run:
            util.update_status(scenario_id, 4)
            subject = 'Completed: EnergyPathways scenario "%s"' % (
                scenario_name, )
            body = 'EnergyPathways has completed running your scenario titled "%s". ' \
                   'Please return to https://energypathways.com to view your results.' % (scenario_name,)
            send_gmail(scenario_id, subject, body)

        logging.info(
            'EnergyPATHWAYS run for scenario_id {} successful!'.format(
                scenario_id))
        logging.info(
            'Scenario calculation time {} seconds'.format(time.time() -
                                                          scenario_start_time))
    logging.info('Total calculation time {} seconds'.format(time.time() -
                                                            run_start_time))
    logging.shutdown()
    logging.getLogger(None).handlers = [
    ]  # necessary to totally flush the logger
Esempio n. 7
0
def run(path,
        config,
        scenario_ids,
        load_demand=False,
        solve_demand=True,
        load_supply=False,
        solve_supply=True,
        load_error=False,
        export_results=True,
        pickle_shapes=True,
        save_models=True,
        log_name=None,
        api_run=False,
        clear_results=False):
    global model
    cfg.initialize_config(path, config, log_name)
    cfg.geo.log_geo()
    shape.init_shapes(pickle_shapes)

    if not scenario_ids:
        scenario_ids = [
            os.path.basename(p)
            for p in glob.glob(os.path.join(cfg.workingdir, '*.json'))
        ]
        if not scenario_ids:
            raise ValueError, "No scenarios specified and no .json files found in working directory."

    # Users may have specified a scenario using the full filename, but for our purposes the 'id' of the scenario
    # is just the part before the .json
    scenario_ids = [os.path.splitext(s)[0] for s in scenario_ids]

    logging.info('Scenario run list: {}'.format(', '.join(scenario_ids)))
    for scenario_id in scenario_ids:
        scenario_start_time = time.time()
        logging.info('Starting scenario {}'.format(scenario_id))
        logging.info('Start time {}'.format(
            str(datetime.datetime.now()).split('.')[0]))
        if api_run:
            # FIXME: This will be broken since we changed the scenario list from a list of database ids to a list of
            # filenames. The API-related code will need to be updated before we can update the server with newer
            # model code.
            util.update_status(scenario_id, 3)
            scenario_name = util.scenario_name(scenario_id)
            subject = 'Now running: EnergyPathways scenario "%s"' % (
                scenario_name, )
            body = 'EnergyPathways is now running your scenario titled "%s". A scenario run generally ' \
                   'finishes within a few hours, and you will receive another email when your run is complete. ' \
                   'If more than 24 hours pass without you receiving a confirmation email, please log in to ' \
                   'https://energypathways.com to check the status of the run. ' \
                   'If the run is not complete, please reply to this email and we will investigate.' % (scenario_name,)
            send_gmail(scenario_id, subject, body)

        model = load_model(load_demand, load_supply, load_error, scenario_id,
                           api_run)
        if not load_error:
            model.run(
                scenario_id,
                solve_demand=solve_demand,
                solve_supply=solve_supply,
                load_demand=load_demand,
                load_supply=load_supply,
                export_results=export_results,
                save_models=save_models,
                append_results=False if
                (scenario_id == scenario_ids[0] and clear_results) else True)

        if api_run:
            util.update_status(scenario_id, 4)
            subject = 'Completed: EnergyPathways scenario "%s"' % (
                scenario_name, )
            body = 'EnergyPathways has completed running your scenario titled "%s". ' \
                   'Please return to https://energypathways.com to view your results.' % (scenario_name,)
            send_gmail(scenario_id, subject, body)

        logging.info(
            'EnergyPATHWAYS run for scenario_id {} successful!'.format(
                scenario_id))
        logging.info('Scenario calculation time {}'.format(
            str(datetime.timedelta(seconds=time.time() -
                                   scenario_start_time)).split('.')[0]))
    logging.info('Total calculation time {}'.format(
        str(datetime.timedelta(seconds=time.time() -
                               run_start_time)).split('.')[0]))
    logging.shutdown()
    logging.getLogger(None).handlers = [
    ]  # necessary to totally flush the logger
Esempio n. 8
0
import energyPATHWAYS.shape as shape
from energyPATHWAYS.outputs import Output
import csv
import time
import datetime
import logging
import cProfile
import traceback
import pandas as pd

# set up a dummy model
path = os.getcwd()
config = 'config.INI'
scenario_id = 1

cfg.initialize_config(path, config, _log_name='log.log')
cfg.primary_geography = 'intersection_id'

model = PathwaysModel(scenario_id, api_run=False)
# model.run(scenario_id, solve_demand=False, solve_supply=False, save_models=False, append_results=False)

demand = model.demand
demand.add_drivers()

existing_geo_map_key_ids, existing_geo_map_key_names = zip(
    *util.sql_read_table('GeographyMapKeys'))
next_map_key_id = max(existing_geo_map_key_ids) + 1
next_geo_map_id = max(util.sql_read_table('GeographyMap', 'id')) + 1

###############################################
# user inputs