def reset_input_validation(conn, scenario_id): """ Reset input validation: delete old input validation outputs and reset the input validation status. :param conn: database connection :param scenario_id: scenario_id :return: """ c = conn.cursor() sql = """ DELETE FROM status_validation WHERE scenario_id = ?; """ spin_on_database_lock(conn=conn, cursor=c, sql=sql, data=(scenario_id, ), many=False) sql = """ UPDATE scenarios SET validation_status_id = 0 WHERE scenario_id = ?; """ spin_on_database_lock(conn=conn, cursor=c, sql=sql, data=(scenario_id, ), many=False)
def process_results(db, c, scenario_id, subscenarios, quiet): """ Aggregate capacity costs by "to_zone" load zone, and break out into spinup_or_lookahead. :param db: :param c: :param subscenarios: :param quiet: :return: """ if not quiet: print("aggregate tx capacity costs by load zone") # Delete old resulst del_sql = """ DELETE FROM results_transmission_costs_capacity_agg WHERE scenario_id = ? """ spin_on_database_lock(conn=db, cursor=c, sql=del_sql, data=(scenario_id,), many=False) # Insert new results agg_sql = """ INSERT INTO results_transmission_costs_capacity_agg (scenario_id, load_zone, period, subproblem_id, stage_id, spinup_or_lookahead, fraction_of_hours_in_subproblem, capacity_cost) SELECT scenario_id, load_zone, period, subproblem_id, stage_id, spinup_or_lookahead, fraction_of_hours_in_subproblem, (capacity_cost * fraction_of_hours_in_subproblem) AS capacity_cost FROM spinup_or_lookahead_ratios -- Add load_zones LEFT JOIN (SELECT scenario_id, load_zone FROM inputs_geography_load_zones INNER JOIN (SELECT scenario_id, load_zone_scenario_id FROM scenarios WHERE scenario_id = ?) AS scen_tbl USING (load_zone_scenario_id) ) AS lz_tbl USING (scenario_id) -- Now that we have all scenario_id, subproblem_id, stage_id, period, -- load_zone, and spinup_or_lookahead combinations add the tx capacity -- costs which will be derated by the fraction_of_hours_in_subproblem INNER JOIN (SELECT scenario_id, subproblem_id, stage_id, period, load_zone_to AS load_zone, SUM(capacity_cost) AS capacity_cost FROM results_transmission_costs_capacity GROUP BY scenario_id, subproblem_id, stage_id, period, load_zone ) AS cap_table USING (scenario_id, subproblem_id, stage_id, period, load_zone) ;""" spin_on_database_lock(conn=db, cursor=c, sql=agg_sql, data=(scenario_id,), many=False)
def update_scenario_single_column(io, c, scenario_name, column_name, column_value): """ :param io: :param c: :param scenario_name: :param column_name: :param column_value: :return: """ # If no value specified, update to NULL if column_value is None: column_value = 'NULL' # Update the column value for the scenario update_sql = """ UPDATE scenarios SET {} = ? WHERE scenario_name = ?; """.format(column_name) spin_on_database_lock(conn=io, cursor=c, sql=update_sql, data=(column_value, scenario_name), many=False)
def delete_scenario_results(conn, scenario_id): """ :param conn: :param scenario_id: :return: Delete scenario from all results tables. """ c = conn.cursor() all_tables = c.execute( "SELECT name FROM sqlite_master WHERE type='table';").fetchall() results_tables = [ tbl[0] for tbl in all_tables if tbl[0].startswith("results") ] # Delete from all results tables for tbl in results_tables: sql = """ DELETE FROM {} WHERE scenario_id = ?; """.format(tbl) spin_on_database_lock(conn=conn, cursor=c, sql=sql, data=(scenario_id, ), many=False)
def generic_insert_subscenario_info( conn, subscenario, table, subscenario_data, project_flag ): """ :param conn: the database connection object :param subscenario: str :param table: str :param subscenario_data: list of tuples :param project_flag: boolean Generic function that loads subscenario info for a particular subscenario. The subscenario_data are given as lists of tuples. """ c = conn.cursor() # Load in the subscenario name and description if not project_flag: subs_sql = """ INSERT INTO subscenarios_{} ({}, name, description) VALUES (?, ?, ?); """.format(table, subscenario) else: subs_sql = """ INSERT INTO subscenarios_{} (project, {}, name, description) VALUES (?, ?, ?, ?); """.format(table, subscenario) spin_on_database_lock(conn=conn, cursor=c, sql=subs_sql, data=subscenario_data) c.close()
def import_results_into_database(scenario_id, subproblem, stage, c, db, results_directory, quiet): """ :param scenario_id: :param c: :param db: :param results_directory: :param quiet: :return: """ if not quiet: print("system prm simple elcc") # Delete prior results and create temporary import table for ordering setup_results_import( conn=db, cursor=c, table="results_system_prm", scenario_id=scenario_id, subproblem=subproblem, stage=stage, ) # Load results into the temporary table results = [] with open(os.path.join(results_directory, "prm_elcc_simple.csv"), "r") as emissions_file: reader = csv.reader(emissions_file) next(reader) # skip header for row in reader: prm_zone = row[0] period = row[1] elcc = row[2] results.append( (scenario_id, prm_zone, period, subproblem, stage, elcc)) insert_temp_sql = """ INSERT INTO temp_results_system_prm{} (scenario_id, prm_zone, period, subproblem_id, stage_id, elcc_simple_mw) VALUES (?, ?, ?, ?, ?, ?);""".format(scenario_id) spin_on_database_lock(conn=db, cursor=c, sql=insert_temp_sql, data=results) # Insert sorted results into permanent results table insert_sql = """ INSERT INTO results_system_prm (scenario_id, prm_zone, period, subproblem_id, stage_id, elcc_simple_mw) SELECT scenario_id, prm_zone, period, subproblem_id, stage_id, elcc_simple_mw FROM temp_results_system_prm{} ORDER BY scenario_id, prm_zone, period, subproblem_id, stage_id; """.format(scenario_id) spin_on_database_lock(conn=db, cursor=c, sql=insert_sql, data=(), many=False)
def process_module_specific_results(db, c, scenario_id, subscenarios, quiet): """ Aggregate scheduled curtailment. :param db: :param c: :param subscenarios: :param quiet: :return: """ if not quiet: print("aggregate hydro curtailment") # Delete old aggregated hydro curtailment results del_sql = """ DELETE FROM results_project_curtailment_hydro WHERE scenario_id = ? """ spin_on_database_lock(conn=db, cursor=c, sql=del_sql, data=(scenario_id, ), many=False) # Aggregate hydro curtailment (just scheduled curtailment) agg_sql = """ INSERT INTO results_project_curtailment_hydro (scenario_id, subproblem_id, stage_id, period, timepoint, timepoint_weight, number_of_hours_in_timepoint, month, hour_of_day, load_zone, scheduled_curtailment_mw) SELECT scenario_id, subproblem_id, stage_id, period, timepoint, timepoint_weight, number_of_hours_in_timepoint, month, hour_of_day, load_zone, scheduled_curtailment_mw FROM ( SELECT scenario_id, subproblem_id, stage_id, period, timepoint, timepoint_weight, number_of_hours_in_timepoint, load_zone, sum(scheduled_curtailment_mw) AS scheduled_curtailment_mw FROM results_project_dispatch WHERE operational_type = 'gen_hydro' GROUP BY scenario_id, subproblem_id, stage_id, timepoint, load_zone ) as agg_curtailment_tbl JOIN ( SELECT subproblem_id, stage_id, timepoint, month, hour_of_day FROM inputs_temporal WHERE temporal_scenario_id = ( SELECT temporal_scenario_id FROM scenarios WHERE scenario_id = ? ) ) as tmp_info_tbl USING (subproblem_id, stage_id, timepoint) WHERE scenario_id = ? ORDER BY subproblem_id, stage_id, load_zone, timepoint; """ spin_on_database_lock(conn=db, cursor=c, sql=agg_sql, data=(scenario_id, scenario_id), many=False)
def record_end_time(db_path, scenario, process_id, end_time): """ :param db_path: :param scenario: :param process_id: :param end_time: :return: Record the scenario run's process ID. """ conn = connect_to_database(db_path=db_path) c = conn.cursor() sql = """ UPDATE scenarios SET run_end_time = ? WHERE scenario_name = ? AND run_process_id = ?; """ spin_on_database_lock(conn=conn, cursor=c, sql=sql, data=(end_time, scenario, process_id), many=False) conn.close()
def remove_from_queue_if_in_queue(db_path, scenario, queue_order_id): """ :param db_path: :param scenario: :param queue_order_id: :return: If running from the queue, remove from the queue """ conn = connect_to_database(db_path=db_path) c = conn.cursor() if queue_order_id is not None: print("Removing scenario ID {} from queue".format(scenario)) sql = """ UPDATE scenarios SET queue_order_id = NULL WHERE scenario_name = ? """ spin_on_database_lock(conn=conn, cursor=c, sql=sql, data=(scenario, ), many=False) else: pass conn.close()
def update_prj_zone_column(conn, scenario_id, subscenarios, subscenario, subsc_tbl, prj_tbl, col): """ :param conn: :param scenario_id: :param subscenarios: :param subscenario: :param prj_tbl: :param col: Update a column of a project table based on the scenario's relevant subscenario ID. """ c = conn.cursor() # Determine the zones for each project project_zones = c.execute("""SELECT project, {} FROM {} WHERE {} = {}""".format(col, subsc_tbl, subscenario, getattr(subscenarios, subscenario.upper()))).fetchall() updates = [] for (prj, zone) in project_zones: updates.append((zone, scenario_id, prj)) sql = """ UPDATE {} SET {} = ? WHERE scenario_id = ? AND project = ?; """.format(prj_tbl, col) spin_on_database_lock(conn=conn, cursor=c, sql=sql, data=updates)
def process_results(db, c, scenario_id, subscenarios, quiet): """ :param db: :param c: :param subscenarios: :param quiet: :return: """ if not quiet: print("add spinup_or_lookahead flag") # Update tables with spinup_or_lookahead_flag tables_to_update = [ "results_project_availability_endogenous", "results_project_dispatch", "results_project_curtailment_variable", "results_project_curtailment_hydro", "results_project_dispatch_by_technology", "results_project_lf_reserves_up", "results_project_lf_reserves_down", "results_project_regulation_up", "results_project_regulation_down", "results_project_frequency_response", "results_project_spinning_reserves", "results_project_costs_operations", "results_project_fuel_burn", "results_project_carbon_emissions", "results_project_rps", "results_transmission_imports_exports", "results_transmission_operations", "results_transmission_hurdle_costs", "results_transmission_carbon_emissions", "results_system_load_balance", "results_system_lf_reserves_up_balance", "results_system_lf_reserves_down_balance", "results_system_regulation_up_balance", "results_system_regulation_down_balance", "results_system_frequency_response_balance", "results_system_frequency_response_partial_balance", "results_system_spinning_reserves_balance" ] for tbl in tables_to_update: if not quiet: print("... {}".format(tbl)) sql = """ UPDATE results_project_dispatch SET spinup_or_lookahead = ( SELECT spinup_or_lookahead FROM inputs_temporal WHERE temporal_scenario_id = ( SELECT temporal_scenario_id FROM scenarios WHERE scenario_id = ? ) AND results_project_dispatch.subproblem_id = inputs_temporal.subproblem_id AND results_project_dispatch.stage_id = inputs_temporal.stage_id AND results_project_dispatch.timepoint = inputs_temporal.timepoint ); """.format(tbl) spin_on_database_lock(conn=db, cursor=c, sql=sql, data=(scenario_id, ), many=False)
def update_validation_status(conn, scenario_id): """ :param conn: :param scenario_id: :return: """ c = conn.cursor() validations = c.execute("""SELECT scenario_id FROM status_validation WHERE scenario_id = {}""".format(str(scenario_id))).fetchall() if validations: status = 2 else: status = 1 sql = """ UPDATE scenarios SET validation_status_id = ? WHERE scenario_id = ?; """ spin_on_database_lock(conn=conn, cursor=c, sql=sql, data=(status, scenario_id), many=False)
def generic_delete_subscenario( conn, subscenario, subscenario_id, project, subscenario_table, input_tables, project_flag ): """ :param conn: :param subscenario: str :param subscenario_id: int :param project: str :param subscenario_table: str :param input_tables: list of strings :param project_flag: boolean Delete prior data for a particular subscenario and subscenario ID. Some subscenarios have more than one input table associated with them, so we iterate over those. Here, we assume the input tables are in the correct order to avoid FOREIGN KEY errors. """ c = conn.cursor() # Create the SQL delete statements for the subscenario info and input # tables if not project_flag: delete_data = (subscenario_id,) del_inputs_sql_list = [ """ DELETE FROM {} WHERE {} = ?; """.format(table, subscenario) for table in input_tables ] del_subscenario_sql = """ DELETE FROM {} WHERE {} = ?; """.format(subscenario_table, subscenario) else: delete_data = (project, subscenario_id,) del_inputs_sql_list = [ """ DELETE FROM {} WHERE project = ? AND {} = ?; """.format(table, subscenario) for table in input_tables ] del_subscenario_sql = """ DELETE FROM {} WHERE project = ? AND {} = ?; """.format(subscenario_table, subscenario) # Delete the inputs and subscenario info for del_inputs_sql in del_inputs_sql_list: spin_on_database_lock(conn=conn, cursor=c, sql=del_inputs_sql, data=delete_data, many=False) spin_on_database_lock(conn=conn, cursor=c, sql=del_subscenario_sql, data=delete_data, many=False) c.close()
def generic_insert_subscenario_data( conn, subscenario, table, inputs_data, project_flag, csv_headers=None, ): """ :param conn: the database connection object :param subscenario: str :param table: str :param inputs_data: list of tuples :param project_flag: boolean :param csv_headers: list of strings Generic function that loads subscenario info and inputs data for a particular subscenario. The subscenario_data and inputs_data are given as lists of tuples. If csv_headers are passed, this function also validates that they match the columns of the table into which we're inserting. """ c = conn.cursor() # Insert the subscenario data # Get column names for this table table_data_query = c.execute( """SELECT * FROM inputs_{};""".format(table) ) # If we have passed headers, check that they are as expected (i.e. # the same as in the table we're inserting into) column_names = [s[0] for s in table_data_query.description] if csv_headers is not None: if project_flag: headers_for_validation = \ ["project", subscenario] + csv_headers else: headers_for_validation = \ [subscenario] + csv_headers if headers_for_validation != column_names: raise AssertionError( """ Headers and table column names don't match. Column names are {}. Header names are {}. Please ensure that your header names are the same as the database column names. """.format(column_names, headers_for_validation) ) # Create the appropriate strings needed for the insert query column_string = ", ".join(column_names) values_string = ", ".join(["?"] * len(column_names)) inputs_sql = """ INSERT INTO inputs_{} ({}) VALUES ({}); """.format(table, column_string, values_string) spin_on_database_lock(conn=conn, cursor=c, sql=inputs_sql, data=inputs_data) c.close()
def import_results_into_database(scenario_id, subproblem, stage, c, db, results_directory, quiet): """ :param scenario_id: :param c: :param db: :param results_directory: :param quiet: :return: """ if not quiet: print("system prm elcc surface") # PRM contribution from the ELCC surface # Prior results should have already been cleared by # system.prm.aggregate_project_simple_prm_contribution, # then elcc_simple_mw imported # Update results_system_prm with NULL for surface contribution just in # case (instead of clearing prior results) nullify_sql = """ UPDATE results_system_prm SET elcc_surface_mw = NULL WHERE scenario_id = ? AND subproblem_id = ? AND stage_id = ?; """ spin_on_database_lock( conn=db, cursor=c, sql=nullify_sql, data=(scenario_id, subproblem, stage), many=False, ) results = [] with open(os.path.join(results_directory, "prm_elcc_surface.csv"), "r") as surface_file: reader = csv.reader(surface_file) next(reader) # skip header for row in reader: prm_zone = row[0] period = row[1] elcc = row[2] results.append( (elcc, scenario_id, prm_zone, period, subproblem, stage)) update_sql = """ UPDATE results_system_prm SET elcc_surface_mw = ? WHERE scenario_id = ? AND prm_zone = ? AND period = ? AND subproblem_id = ? AND stage_id = ? """ spin_on_database_lock(conn=db, cursor=c, sql=update_sql, data=results)
def create_scenario(io, c, column_values_dict): """ Flexible way to insert a scenario that does not require specifying values for all columns. Columns can be skipped entirely or None can be specified as their value (in which case this function will insert a NULL value for that column). The scenario_id column is auto increment, so should not be inserted directly. If the scenario_id is specified, it will be skipped (not inserted) and a warning will be raised. :param io: the database connection object :param c: database cursor object :param column_values_dict: dictionary containing the scenarios table column names to populate as keys and the scenarios table column values as the dictionary values :return: None """ column_names_sql_string = str() column_values_sql_string = str() column_values_data = tuple() # TODO: add a check that the column names are correct and values are # integers for column_name in column_values_dict.keys(): if column_name == 'scenario_id': warnings.warn( "The scenario_id is an AUTOINCREMENT column and should not be " "inserted directly. \n" "Your scenario will be assigned a scenario_id automatically.\n" "Remove the 'scenario_id' key from the dictionary to avoid " "seeing this warning again.") else: if list(column_values_dict.keys()).index(column_name) == 0: column_names_sql_string += "{}, ".format(column_name) column_values_sql_string += "?," column_values_data = (column_values_dict[column_name], ) elif list(column_values_dict.keys()).index(column_name) \ == len(list(column_values_dict.keys())) - 1: column_names_sql_string += "{}".format(column_name) column_values_sql_string += "?" column_values_data = \ column_values_data + (column_values_dict[column_name],) else: column_names_sql_string += "{}, ".format(column_name) column_values_sql_string += "?," column_values_data = \ column_values_data + (column_values_dict[column_name],) sql = """ INSERT INTO scenarios ({}) VALUES ({}); """.format(column_names_sql_string, column_values_sql_string) spin_on_database_lock(conn=io, cursor=c, sql=sql, data=column_values_data, many=False)
def write_validation_to_database( conn, scenario_id, subproblem_id, stage_id, gridpath_module, db_table, severity, errors, ): """ Write all validations in the `errors` list and the associated meta-data (scenario_id, subproblem, stage, etc.) to the status_validation database table. :param conn: The database connection :param scenario_id: The scenario ID of the scenario that is being validated :param subproblem_id: The active subproblem ID that is being validated :param stage_id: The active stage ID that is being validated :param gridpath_module: The gridpath_module that performed the validation :param db_table: The database table that contains the validation errors :param severity: The severity of the validation error :param errors: The list of validation errors to be written to database, with each error a string describing the issue. :return: """ # If there are no validation errors to write, simply exit here if not errors: return False # add timestamp (ISO8601 strings, so truncate to ms) timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] rows = [( scenario_id, subproblem_id, stage_id, gridpath_module, db_table, severity, error, timestamp, ) for error in errors] c = conn.cursor() sql = """ INSERT INTO status_validation (scenario_id, subproblem_id, stage_id, gridpath_module, db_table, severity, description, time_stamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?); """ spin_on_database_lock(conn, c, sql, rows) c.close() return True
def process_results(db, c, scenario_id, subscenarios, quiet): """ :param db: :param c: :param subscenarios: :param quiet: :return: """ if not quiet: print("update carbon cap zones") # Figure out carbon_cap zone for each project project_zones = c.execute("""SELECT project, carbon_cap_zone FROM inputs_project_carbon_cap_zones WHERE project_carbon_cap_zone_scenario_id = {}""".format( subscenarios.PROJECT_CARBON_CAP_ZONE_SCENARIO_ID)).fetchall() # Update tables with carbon cap zone tables_to_update = [ "results_project_capacity", "results_project_dispatch", "results_project_fuel_burn", "results_project_frequency_response", "results_project_lf_reserves_up", "results_project_lf_reserves_down", "results_project_regulation_up", "results_project_regulation_down", "results_project_costs_capacity", "results_project_costs_operations", "results_project_carbon_emissions", "results_project_elcc_simple", "results_project_elcc_surface" ] updates = [] for (prj, zone) in project_zones: updates.append((zone, scenario_id, prj)) for tbl in tables_to_update: sql = """ UPDATE {} SET carbon_cap_zone = ? WHERE scenario_id = ? AND project = ?; """.format(tbl) spin_on_database_lock(conn=db, cursor=c, sql=sql, data=updates) # Set carbon_cap_zone to 'no_carbon_cap' for all other projects # This helps for later joins (can't join on NULL values) for tbl in tables_to_update: no_cc_sql = """ UPDATE {} SET carbon_cap_zone = 'no_carbon_cap' WHERE scenario_id = ? AND carbon_cap_zone IS NULL; """.format(tbl) spin_on_database_lock(conn=db, cursor=c, sql=no_cc_sql, data=(scenario_id, ), many=False)
def remove_scenario_from_queue(db_path, scenario_id): conn = connect_to_database(db_path=db_path) c = conn.cursor() sql = """ UPDATE scenarios SET queue_order_id = NULL, run_status_id = 0 WHERE scenario_id = ?; """ spin_on_database_lock(conn=conn, cursor=c, sql=sql, data=(scenario_id,), many=False)
def exit_gracefully(): print('Exiting gracefully') args = sys.argv[1:] parsed_args = parse_arguments(args) conn = connect_to_database(db_path=parsed_args.database) c = conn.cursor() sql = """ UPDATE scenarios SET queue_order_id = NULL; """ conn.commit() spin_on_database_lock(conn=conn, cursor=c, sql=sql, data=(), many=False)
def repopulate_tables( conn, project_flag, subscenario, subscenario_id, project, base_table, base_subscenario, scenario_reupdate_tuples, base_subscenario_ids_str, base_subscenario_ids_data ): """ :param conn: :param project_flag: boolean :param subscenario: str :param subscenario_id: int :param project: str :param base_table: str :param base_subscenario: int :param scenario_reupdate_tuples: list of tuples :param base_subscenario_ids_str: str :param base_subscenario_ids_data: tuple If project-level subscenario, update the base subscenario table with the values passed. Update the scenarios table with the values passed. """ c = conn.cursor() # Update the base table if project-level if there's any update data if project_flag and base_subscenario_ids_data: base_subscenario_reupdate_sql = """ UPDATE {} SET {} = ? WHERE {} in ({}) AND project = ? """.format(base_table, subscenario, base_subscenario, base_subscenario_ids_str) base_subscenario_update_tuple = \ (int(subscenario_id),) + tuple(base_subscenario_ids_data) \ + (project,) spin_on_database_lock(conn=conn, cursor=c, sql=base_subscenario_reupdate_sql, data=base_subscenario_update_tuple, many=False) # Update the scenarios table if there's any update data if scenario_reupdate_tuples: scenario_reupdate_sql = """ UPDATE scenarios SET {} = ? WHERE scenario_id = ? """.format(base_subscenario if project_flag else subscenario) spin_on_database_lock(conn=conn, cursor=c, sql=scenario_reupdate_sql, data=scenario_reupdate_tuples) c.close()
def update_capacity_results_table( db, c, results_directory, scenario_id, subproblem, stage, results_file ): results = [] with open(os.path.join(results_directory, results_file), "r") as capacity_file: reader = csv.reader(capacity_file) header = next(reader) for row in reader: project = row[0] period = row[1] new_build_mw = get_column_row_value(header, "new_build_mw", row) new_build_mwh = get_column_row_value(header, "new_build_mwh", row) new_build_binary = get_column_row_value(header, "new_build_binary", row) retired_mw = get_column_row_value(header, "retired_mw", row) retired_binary = get_column_row_value(header, "retired_binary", row) results.append( ( new_build_mw, new_build_mwh, new_build_binary, retired_mw, retired_binary, scenario_id, project, period, subproblem, stage, ) ) # Update the results table with the module-specific results update_sql = """ UPDATE results_project_capacity SET new_build_mw = ?, new_build_mwh = ?, new_build_binary = ?, retired_mw = ?, retired_binary = ? WHERE scenario_id = ? AND project = ? AND period = ? AND subproblem_id = ? AND stage_id = ?; """ spin_on_database_lock(conn=db, cursor=c, sql=update_sql, data=results)
def process_results(db, c, scenario_id, subscenarios, quiet): """ Aggregate costs by zone and period TODO: by technology too? :param db: :param c: :param subscenarios: :param quiet: :return: """ if not quiet: print("aggregate costs") # Delete old results del_sql = """ DELETE FROM results_project_costs_operations_agg WHERE scenario_id = ? """ spin_on_database_lock(conn=db, cursor=c, sql=del_sql, data=(scenario_id, ), many=False) # Aggregate operational costs by period and load zone agg_sql = """ INSERT INTO results_project_costs_operations_agg (scenario_id, subproblem_id, stage_id, period, load_zone, spinup_or_lookahead, variable_om_cost, fuel_cost, startup_cost, shutdown_cost) SELECT scenario_id, subproblem_id, stage_id, period, load_zone, spinup_or_lookahead, SUM(fuel_cost * timepoint_weight * number_of_hours_in_timepoint) AS fuel_cost, SUM(variable_om_cost * timepoint_weight * number_of_hours_in_timepoint) AS variable_om_cost, SUM(startup_cost * timepoint_weight) AS startup_cost, SUM(shutdown_cost * timepoint_weight) AS shutdown_cost FROM results_project_costs_operations WHERE scenario_id = ? GROUP BY subproblem_id, stage_id, period, load_zone, spinup_or_lookahead ORDER BY subproblem_id, stage_id, period, load_zone, spinup_or_lookahead ;""" spin_on_database_lock(conn=db, cursor=c, sql=agg_sql, data=(scenario_id, ), many=False)
def add_scenario_to_queue(db_path, scenario_id): conn = connect_to_database(db_path=db_path) c = conn.cursor() next_queue_id = get_max_queue_order_id(c=c) + 1 sql = """ UPDATE scenarios SET queue_order_id = ?, run_status_id = 5 WHERE scenario_id = ?; """ spin_on_database_lock( conn=conn, cursor=c, sql=sql, data=(next_queue_id, scenario_id), many=False )
def import_results_into_database( scenario_id, subproblem, stage, c, db, results_directory, quiet ): """ :param scenario_id: :param c: :param db: :param results_directory: :param quiet: :return: """ # TODO: change this to say NPV and have negatives for the cost # components or flag revenue and cost components if not quiet: print("results system cost") # Delete prior results and create temporary import table for ordering setup_results_import( conn=db, cursor=c, table="results_system_costs", scenario_id=scenario_id, subproblem=subproblem, stage=stage, ) df = pd.read_csv(os.path.join(results_directory, "npv.csv")) df["scenario_id"] = scenario_id df["subproblem_id"] = subproblem df["stage_id"] = stage results = df.to_records(index=False) # Register numpy types with sqlite, so that they are properly inserted # from pandas dataframes # https://stackoverflow.com/questions/38753737/inserting-numpy-integer-types-into-sqlite-with-python3 sqlite3.register_adapter(np.int64, lambda val: int(val)) sqlite3.register_adapter(np.float64, lambda val: float(val)) insert_sql = """ INSERT INTO results_system_costs ({}) VALUES ({}); """.format( ", ".join(df.columns), ", ".join(["?"] * (len(df.columns))) ) spin_on_database_lock(conn=db, cursor=c, sql=insert_sql, data=results)
def delete_scenario_results_and_status(conn, scenario_id): """ :param conn: :param scenario_id: :return: Delete scenario results and statuses from relevant tables. """ c = conn.cursor() all_tables = c.execute( "SELECT name FROM sqlite_master WHERE type='table';").fetchall() results_tables = [ tbl[0] for tbl in all_tables if tbl[0].startswith("results") ] status_tables = [ tbl[0] for tbl in all_tables if tbl[0].startswith("status") ] # Delete from all results and status tables for tbl in results_tables + status_tables: sql = """ DELETE FROM {} WHERE scenario_id = ?; """.format(tbl) spin_on_database_lock(conn=conn, cursor=c, sql=sql, data=(scenario_id, ), many=False) # Update statuses in scenarios table to defaults status_sql = """ UPDATE scenarios SET validation_status_id=0, queue_order_id=NULL, run_status_id=0, run_process_id=NULL WHERE scenario_id = ? """ spin_on_database_lock(conn=conn, cursor=c, sql=status_sql, data=(scenario_id, ), many=False)
def import_results_into_database(scenario_id, subproblem, stage, c, db, results_directory, quiet): """ :param scenario_id: :param subproblem: :param stage: :param c: :param db: :param results_directory: :param quiet: :return: """ # Delete prior results for endogenous types del_sql = """ DELETE FROM results_project_availability_endogenous WHERE scenario_id = ? AND subproblem_id = ? AND stage_id = ?; """ spin_on_database_lock( conn=db, cursor=c, sql=del_sql, data=(scenario_id, subproblem, stage), many=False, ) # Load in the required availability type modules required_availability_type_modules = get_required_availability_type_modules( scenario_id, c) imported_availability_modules = load_availability_type_modules( required_availability_type_modules) # Import module-specific results for op_m in required_availability_type_modules: if hasattr(imported_availability_modules[op_m], "import_results_into_database"): imported_availability_modules[op_m].import_results_into_database( scenario_id, subproblem, stage, c, db, results_directory, quiet) else: pass
def delete_scenario(conn, scenario_id): """ :param conn: the database connection object :param scenario_id: the scenario_id to delete Delete a scenario fully, i.e. delete from all results tables, status tables, and the scenarios table. """ # Delete results and statuses delete_scenario_results_and_status(conn=conn, scenario_id=scenario_id) # Delete from scenarios table c = conn.cursor() sc_id_sql = "DELETE FROM scenarios WHERE scenario_id = ?" spin_on_database_lock(conn=conn, cursor=c, sql=sc_id_sql, data=(scenario_id, ), many=False)
def process_results(db, c, scenario_id, subscenarios, quiet): """ :param db: :param c: :param subscenarios: :param quiet: :return: """ if not quiet: print("add spinup_or_lookahead flag") # Update tables with spinup_or_lookahead_flag tables_to_update = determine_table_subset_by_start_and_column( conn=db, tbl_start="results_", cols=["timepoint", "spinup_or_lookahead"]) for tbl in tables_to_update: if not quiet: print("... {}".format(tbl)) sql = """ UPDATE {} SET spinup_or_lookahead = ( SELECT spinup_or_lookahead FROM inputs_temporal WHERE temporal_scenario_id = ( SELECT temporal_scenario_id FROM scenarios WHERE scenario_id = ? ) AND {}.subproblem_id = inputs_temporal.subproblem_id AND {}.stage_id = inputs_temporal.stage_id AND {}.timepoint = inputs_temporal.timepoint ); """.format(tbl, tbl, tbl, tbl) spin_on_database_lock(conn=db, cursor=c, sql=sql, data=(scenario_id, ), many=False)
def load_aux_data(conn, filename, sql): """ :param conn: :param filename: :param sql: :return: """ data = [] cursor = conn.cursor() file_path = os.path.join(os.path.dirname(__file__), "data", filename) with open(file_path, "r") as f: reader = csv.reader(f, delimiter=",") next(reader) for row in reader: data.append(tuple([row[i] for i in range(len(row))])) spin_on_database_lock(conn=conn, cursor=cursor, sql=sql, data=data)