def __init__(self, scenario, scenario_location): self.main_scenario_directory = determine_scenario_directory( scenario_location=scenario_location, scenario_name=scenario ) # Check if the scenario actually exists if not os.path.exists(self.main_scenario_directory): raise IOError("Scenario '{}/{}' does not exist. Please verify" " scenario name and scenario location" .format(scenario_location, scenario)) # Check if there are subproblem directories self.subproblems = \ check_for_integer_subdirectories(self.main_scenario_directory) # Make dictionary for the stages by subproblem, starting with empty # list for each subproblem self.stages_by_subproblem = { subp: [] for subp in self.subproblems } # If we have subproblems, check for stage subdirectories for each # subproblem directory if self.subproblems: for subproblem in self.subproblems: subproblem_dir = os.path.join( self.main_scenario_directory, subproblem ) stages = check_for_integer_subdirectories(subproblem_dir) # If the list isn't empty, update the stage dictionary and # create the stage pass-through directory and input file # TODO: we probably don't need a directory for the # pass-through inputs, as it's only one file if stages: self.stages_by_subproblem[subproblem] = stages # Create the commitment pass-through file (also deletes any # prior results) # First create the pass-through directory if it doesn't # exist # TODO: need better handling of deleting prior results? pass_through_directory = \ os.path.join(subproblem_dir, "pass_through_inputs") if not os.path.exists(pass_through_directory): os.makedirs(pass_through_directory) with open( os.path.join( pass_through_directory, "fixed_commitment.tab" ), "w", newline="" ) as fixed_commitment_file: fixed_commitment_writer = writer( fixed_commitment_file, delimiter="\t", lineterminator="\n" ) fixed_commitment_writer.writerow( ["project", "timepoint", "stage", "final_commitment_stage", "commitment"])
def main(args=None): """ :return: """ if args is None: args = sys.argv[1:] parsed_arguments = parse_arguments(args=args) db_path = parsed_arguments.database scenario_id_arg = parsed_arguments.scenario_id scenario_name_arg = parsed_arguments.scenario scenario_location = parsed_arguments.scenario_location conn = connect_to_database(db_path=db_path) c = conn.cursor() if not parsed_arguments.quiet: print("Processing results... (connected to database {})".format(db_path)) scenario_id, scenario_name = get_scenario_id_and_name( scenario_id_arg=scenario_id_arg, scenario_name_arg=scenario_name_arg, c=c, script="process_results", ) # Determine scenario directory scenario_directory = determine_scenario_directory( scenario_location=scenario_location, scenario_name=scenario_name ) # Go through modules modules_to_use = determine_modules(scenario_directory=scenario_directory) loaded_modules = load_modules(modules_to_use) # Subscenarios subscenarios = SubScenarios(conn=conn, scenario_id=scenario_id) process_results( loaded_modules=loaded_modules, db=conn, cursor=c, scenario_id=scenario_id, subscenarios=subscenarios, quiet=parsed_arguments.quiet, ) # Close the database connection conn.close()
def main(args=None): """ This is the 'main' method that runs a scenario. It takes in and parses the script arguments, determines the scenario structure (i.e. whether it is a single optimization or has subproblems), and runs the scenario. This method also returns the objective function value(s). """ if args is None: args = sys.argv[1:] # Parse arguments parsed_args = parse_arguments(args) scenario_directory = determine_scenario_directory( scenario_location=parsed_args.scenario_location, scenario_name=parsed_args.scenario, ) # Check if the scenario actually exists if not os.path.exists(scenario_directory): raise IOError( "Scenario '{}/{}' does not exist. Please verify" " scenario name and scenario location".format( parsed_args.scenario_location, parsed_args.scenario ) ) subproblem_structure = get_subproblem_structure_from_disk( scenario_directory=scenario_directory ) # Run the scenario (can be multiple optimization subproblems) expected_objective_values = run_scenario( scenario_directory=scenario_directory, subproblem_structure=subproblem_structure, parsed_arguments=parsed_args, ) # Return the objective function values (used in testing) return expected_objective_values
def solve(instance, parsed_arguments): """ :param instance: the compiled problem instance :param parsed_arguments: the user-defined arguments (parsed) :return: the problem results Send the compiled problem instance to the solver and solve. """ # Start with solver name specified on command line solver_name = parsed_arguments.solver # Get any user-requested solver options scenario_directory = determine_scenario_directory( scenario_location=parsed_arguments.scenario_location, scenario_name=parsed_arguments.scenario) solver_options = dict() solver_options_file = os.path.join(scenario_directory, "solver_options.csv") if os.path.exists(solver_options_file): with open(solver_options_file) as f: _reader = reader(f, delimiter=",") for row in _reader: solver_options[row[0]] = row[1] # Check the the solver specified is the same as that given from the # command line (if any) if parsed_arguments.solver is not None: if parsed_arguments.solver == solver_options["solver"]: pass else: raise UserWarning( "ERROR! Solver specified on command line ({}) and solver " "in solver_options.csv ({}) do not match.".format( parsed_arguments.solver, solver_options["solver"])) # If we make it here, set the solver name from the # solver_options.csv file solver_name = solver_options["solver"] else: if parsed_arguments.solver is None: solver_name = "cbc" # Get solver # If a solver executable is specified, pass it to Pyomo if parsed_arguments.solver_executable is not None: solver = SolverFactory(solver_name, executable=parsed_arguments.solver_executable) # Otherwise, only pass the solver name; Pyomo will look for the # executable in the PATH else: solver = SolverFactory(solver_name) # Apply the solver options (if any) for opt in solver_options.keys(): if opt == "solver": pass # this is just the solver name, not actually an 'option' else: solver.options[opt] = solver_options[opt] # Solve # Note: Pyomo moves the results to the instance object by default. # If you want the results to stay into a results object, set the # load_solutions argument to False: # >>> results = solver.solve(instance, load_solutions=False) results = solver.solve(instance, tee=not parsed_arguments.mute_solver_output, keepfiles=parsed_arguments.keepfiles, symbolic_solver_labels=parsed_arguments.symbolic) # Can optionally log infeasibilities but this has resulted in false # positives due to rounding errors larger than the default tolerance # of 1E-6. # log_infeasible_constraints(instance) return results
def main(args=None): """ :return: """ # Retrieve DB location and scenario_id and/or name from args if args is None: args = sys.argv[1:] parsed_arguments = parse_arguments(args=args) db_path = parsed_arguments.database scenario_id_arg = parsed_arguments.scenario_id scenario_name_arg = parsed_arguments.scenario scenario_location = parsed_arguments.scenario_location conn = connect_to_database(db_path=db_path) c = conn.cursor() if not parsed_arguments.quiet: print("Getting inputs... (connected to database {})".format(db_path)) scenario_id, scenario_name = get_scenario_id_and_name( scenario_id_arg=scenario_id_arg, scenario_name_arg=scenario_name_arg, c=c, script="get_scenario_inputs", ) # Determine scenario directory and create it if needed scenario_directory = determine_scenario_directory( scenario_location=scenario_location, scenario_name=scenario_name) create_directory_if_not_exists(directory=scenario_directory) # Get scenario characteristics (features, scenario_id, subscenarios, subproblems) # TODO: it seems these fail silently if empty; we may want to implement # some validation optional_features = OptionalFeatures(conn=conn, scenario_id=scenario_id) subscenarios = SubScenarios(conn=conn, scenario_id=scenario_id) subproblem_structure = get_subproblem_structure_from_db( conn=conn, scenario_id=scenario_id) solver_options = SolverOptions(conn=conn, scenario_id=scenario_id) # Determine requested features and use this to determine what modules to # load for Gridpath feature_list = optional_features.get_active_features() # If any subproblem's stage list is non-empty, we have stages, so set # the stages_flag to True to pass to determine_modules below # This tells the determine_modules function to include the # stages-related modules stages_flag = any([ len(subproblem_structure.SUBPROBLEM_STAGES[subp]) > 1 for subp in list(subproblem_structure.SUBPROBLEM_STAGES.keys()) ]) # Figure out which modules to use and load the modules modules_to_use = determine_modules(features=feature_list, multi_stage=stages_flag) # Get appropriate inputs from database and write the .tab file model inputs write_model_inputs( scenario_directory=scenario_directory, subproblem_structure=subproblem_structure, modules_to_use=modules_to_use, scenario_id=scenario_id, subscenarios=subscenarios, db_path=db_path, n_parallel_subproblems=int(parsed_arguments.n_parallel_get_inputs), ) # Save the list of optional features to a file (will be used to determine # modules without database connection) write_features_csv(scenario_directory=scenario_directory, feature_list=feature_list) # Write full scenario description write_scenario_description( scenario_directory=scenario_directory, scenario_id=scenario_id, scenario_name=scenario_name, optional_features=optional_features, subscenarios=subscenarios, ) # Write the units used for all metrics write_units_csv(scenario_directory, conn) # Write the solver options file if needed write_solver_options(scenario_directory=scenario_directory, solver_options=solver_options) # Write the subproblem linked timepoints map file if needed write_linked_subproblems_map(scenario_directory, conn, subscenarios) # Close the database connection conn.close()
def main(args=None): """ :param args: :return: """ # Get process ID and start_time process_id = os.getpid() start_time = datetime.datetime.now() # Signal-handling directives signal.signal(signal.SIGTERM, sigterm_handler) signal.signal(signal.SIGINT, sigint_handler) if args is None: args = sys.argv[1:] parsed_args = parse_arguments(args) # Log the run if requested # If directed to do so, log e2e run scenario_directory = determine_scenario_directory( scenario_location=parsed_args.scenario_location, scenario_name=parsed_args.scenario, ) if parsed_args.log: logs_directory = create_logs_directory_if_not_exists( scenario_directory=scenario_directory, subproblem="", stage="") # Save sys.stdout so we can return to it later stdout_original = sys.stdout stderr_original = sys.stderr # The print statement will call the write() method of any object # you assign to sys.stdout (in this case the Logging object). The # write method of Logging writes both to sys.stdout and a log file # (see auxiliary/auxiliary.py) logger = Logging( logs_dir=logs_directory, start_time=start_time, e2e=True, process_id=process_id, ) sys.stdout = logger sys.stderr = logger # Create connection db_path = parsed_args.database conn = connect_to_database(db_path=db_path) scenario_id, scenario = get_scenario_id_and_name( scenario_id_arg=parsed_args.scenario_id, scenario_name_arg=parsed_args.scenario, c=conn.cursor(), script="run_end_to_end", ) conn.close() if not parsed_args.quiet: print("Running scenario {} end to end".format(scenario)) # Check if running from queue queue_order_id = check_if_in_queue(db_path, scenario) # Update run status to 'running' update_run_status(db_path, scenario, 1) # Record process ID and process start time in database if not parsed_args.quiet: print("Process ID is {}".format(process_id)) print("End-to-end run started on {}".format(start_time)) record_process_id_and_start_time(db_path, parsed_args.scenario, process_id, start_time) # Figure out which steps we are skipping if user has requested a single # E2E step; start by assuming we'll skip and reverse skipping if the # step is specified skip_get_inputs = True skip_run_scenario = True skip_import_results = True skip_process_results = True if parsed_args.single_e2e_step_only == "get_inputs": skip_get_inputs = False elif parsed_args.single_e2e_step_only == "run_scenario": skip_run_scenario = False elif parsed_args.single_e2e_step_only == "import_results": skip_import_results = False elif parsed_args.single_e2e_step_only == "process_results": skip_process_results = False else: skip_get_inputs = False skip_run_scenario = False skip_import_results = False skip_process_results = False # Go through the steps if user has not requested to skip them if not skip_get_inputs and not parsed_args.skip_get_inputs: try: get_scenario_inputs.main(args=args) except Exception as e: logging.exception(e) end_time = update_db_for_run_end( db_path=db_path, scenario=scenario, queue_order_id=queue_order_id, process_id=process_id, run_status_id=3, ) print( "Error encountered when getting inputs from the database for " "scenario {}. End time: {}.".format(scenario, end_time)) sys.exit(1) if not skip_run_scenario and not parsed_args.skip_run_scenario: try: # make sure run_scenario.py gets the required --scenario argument run_scenario_args = args + ["--scenario", scenario] expected_objective_values = run_scenario.main( args=run_scenario_args) except Exception as e: logging.exception(e) end_time = update_db_for_run_end( db_path=db_path, scenario=scenario, queue_order_id=queue_order_id, process_id=process_id, run_status_id=3, ) print("Error encountered when running scenario {}. End time: {}.". format(scenario, end_time)) sys.exit(1) else: expected_objective_values = None if not skip_import_results and not parsed_args.skip_import_results: try: import_scenario_results.main(import_rule=_import_rule, args=args) except Exception as e: logging.exception(e) end_time = update_db_for_run_end( db_path=db_path, scenario=scenario, queue_order_id=queue_order_id, process_id=process_id, run_status_id=3, ) print("Error encountered when importing results for " "scenario {}. End time: {}.".format(scenario, end_time)) sys.exit(1) if not skip_process_results and not parsed_args.skip_process_results: try: process_results.main(args=args) except Exception as e: logging.exception(e) end_time = update_db_for_run_end( db_path=db_path, scenario=scenario, queue_order_id=queue_order_id, process_id=process_id, run_status_id=3, ) print("Error encountered when importing results for " "scenario {}. End time: {}.".format(scenario, end_time)) sys.exit(1) # If we make it here, mark run as complete and update run end time end_time = update_db_for_run_end( db_path=db_path, scenario=scenario, queue_order_id=queue_order_id, process_id=process_id, run_status_id=2, ) # TODO: should the process ID be set back to NULL? if not parsed_args.quiet: print("Done. Run finished on {}.".format(end_time)) # If logging, we need to return sys.stdout to original (i.e. stop writing # to log file) if parsed_args.log: sys.stdout = stdout_original sys.stderr = stderr_original # Return expected objective values (for testing) return expected_objective_values
def main(import_rule, args=None): """ :return: """ if args is None: args = sys.argv[1:] parsed_arguments = parse_arguments(args=args) db_path = parsed_arguments.database scenario_id_arg = parsed_arguments.scenario_id scenario_name_arg = parsed_arguments.scenario scenario_location = parsed_arguments.scenario_location quiet = parsed_arguments.quiet conn = connect_to_database(db_path=db_path) c = conn.cursor() if not parsed_arguments.quiet: print( "Importing results... (connected to database {})".format(db_path)) scenario_id, scenario_name = get_scenario_id_and_name( scenario_id_arg=scenario_id_arg, scenario_name_arg=scenario_name_arg, c=c, script="import_scenario_results", ) subproblem_structure = get_subproblem_structure_from_db( conn=conn, scenario_id=scenario_id) # Determine scenario directory scenario_directory = determine_scenario_directory( scenario_location=scenario_location, scenario_name=scenario_name) # Check that the saved scenario_id matches sc_df = pd.read_csv( os.path.join(scenario_directory, "scenario_description.csv"), header=None, index_col=0, ) scenario_id_saved = int(sc_df.loc["scenario_id", 1]) if scenario_id_saved != scenario_id: raise AssertionError("ERROR: saved scenario_id does not match") # Delete all previous results for this scenario_id # Each module also makes sure results are deleted, but this step ensures # that if a scenario_id was run with different modules before, we also # delete previously imported "phantom" results delete_scenario_results(conn=conn, scenario_id=scenario_id) # Go through modules modules_to_use = determine_modules(scenario_directory=scenario_directory) loaded_modules = load_modules(modules_to_use) # Import appropriate results into database import_scenario_results_into_database( import_rule=import_rule, loaded_modules=loaded_modules, scenario_id=scenario_id, subproblems=subproblem_structure, cursor=c, db=conn, scenario_directory=scenario_directory, quiet=quiet, ) # Close the database connection conn.close()