def __init__(self): self.fr = FolderRoutes() # Experimental Code self.solar_data_save_path = self.fr.get_route("solar_profiles_dir") self.load_data_save_path = self.fr.get_route("load_profiles_dir") # Solar Path/Load Path self.solar_path = SHARED_SOLAR_LOCATION self.load_path = SHARED_LOAD_LOCATION self.p_config_path = SHARED_PARTICIPANTS_CONFIG_LOCATION self.t_config_path = SHARED_TARIFFS_CONFIG_LOCATION self.f_config_path = SHARED_FINANCING_CONFIG_LOCATION self.config_paths = { "model_participants": self.p_config_path, "model_tariffs": self.t_config_path, "model_financing": self.f_config_path } self.result_channels = { "model_participants": "participants_file_channel", "model_tariffs": "tariffs_file_channel", "model_financing": "financing_file_channel", } self.solar_files = [] self.load_files = [] self.p_config_files = [] self.update_files_lists()
def __init__(self): # Folder Routes self.folder_routes = FolderRoutes() # Model setup parameters self.model_type = 'mike' self.network_name = 'Default_Network' self.network_type = 'embedded_network' self.data_dir = self.folder_routes.get_route('data_dir') # UI Interface objects self.ui_participants = Ui_Participants(self.folder_routes) self.ui_tariffs = None self.ui_finances = None self.ui_central_battery = Ui_Central_Battery(self.folder_routes) self.ui_central_solar = Ui_Central_Solar(self.folder_routes) self.ui_results_parser = Ui_Results_Parsers(self.folder_routes) # Model Objects self.model_network = None self.model_central_battery = None self.model_tariffs = None self.model_time_periods = None self.model_results = None # Mike Model Objects self.mike_model = None # Legacy Stuff. self.time_periods = None self.ui_inputs = None
class OSFileService(FileService): def __init__(self): self.fr = FolderRoutes() # Experimental Code self.solar_data_save_path = self.fr.get_route("solar_profiles_dir") self.load_data_save_path = self.fr.get_route("load_profiles_dir") # Solar Path/Load Path self.solar_path = SHARED_SOLAR_LOCATION self.load_path = SHARED_LOAD_LOCATION self.p_config_path = SHARED_PARTICIPANTS_CONFIG_LOCATION self.t_config_path = SHARED_TARIFFS_CONFIG_LOCATION self.f_config_path = SHARED_FINANCING_CONFIG_LOCATION self.config_paths = { "model_participants": self.p_config_path, "model_tariffs": self.t_config_path, "model_financing": self.f_config_path } self.result_channels = { "model_participants": "participants_file_channel", "model_tariffs": "tariffs_file_channel", "model_financing": "financing_file_channel", } self.solar_files = [] self.load_files = [] self.p_config_files = [] self.update_files_lists() def update_files_lists(self): self.solar_files = [ f for f in os.listdir(self.solar_data_save_path) if os.path.isfile(os.path.join(self.solar_path, f)) and 'csv' in f ] self.load_files = [ f for f in os.listdir(self.load_data_save_path) if os.path.isfile(os.path.join(self.load_path, f)) and 'csv' in f ] self.p_config_files = [ f for f in os.listdir(self.p_config_path) if os.path.isfile(os.path.join(self.p_config_path, f)) ] def valid_file(self, new_file): print("file_service.py/valid_file", new_file) print("file_service.py/valid_file", new_file.filename) # print("file_service.py/valid_file", new_file.read()) # Ensure the file ends in a csv. if not new_file.filename.lower().endswith('csv'): return False, "Error: Expected CSV File" # try: contents_str = new_file.read().decode("utf-8") reader = csv.DictReader(io.StringIO(contents_str)) for line in reader: if 'timestamp' not in line: return False, "timestamp column not found" if line['timestamp'].isspace() or line['timestamp'] == '': return False, "Blank timestamp found - check end of file perhaps?" if not re.compile("^[0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9][0-9]$" ).match(line['timestamp']): return False, " Incorrectly formatted timestamp found: " + str( line['timestamp']) + "- Must follow DD/MM/YYYY HH:mm: " # except: # return False, "Could not parse CSV file - check formatting." return True, "Success" def save(self, file, save_type): if save_type == "solar_data": file.save(os.path.join(self.solar_data_save_path, file.filename)) if save_type == "load_data": file.save(os.path.join(self.load_data_save_path, file.filename)) self.update_files_lists() # print("FILE_SERVICE: Saving", file) # file.save(os.path.join('uploads', file.filename)) # print("Successfully saved") def list_solar_files(self): self.update_files_lists() return self.solar_files def list_load_files(self): self.update_files_lists() return self.load_files def list_solar_start_end(self): self.update_files_lists() output = {} for solar_filename in self.solar_files: path = os.path.join(self.solar_path, solar_filename) with open(path) as f: reader = csv.DictReader(f) for idx, line in enumerate(reader): if idx == 0: start_date = pd.datetime.strptime( line['timestamp'], '%d/%m/%Y %H:%M').isoformat() # start_date = pendulum.from_format(line['timestamp'], ('DD/MM/YYYY HH:mm'), tz='UTC').isoformat() end_date = pd.datetime.strptime(line['timestamp'], '%d/%m/%Y %H:%M').isoformat() # end_date = pendulum.from_format(line['timestamp'], ('DD/MM/YYYY HH:mm'), tz='UTC').isoformat() output[solar_filename] = { 'start_date': start_date, 'end_date': end_date } return output def list_load_start_end(self): self.update_files_lists() output = {} for load_filename in self.load_files: path = os.path.join(self.load_path, load_filename) with open(path) as f: reader = csv.DictReader(f) for idx, line in enumerate(reader): if idx == 0: start_date = pd.datetime.strptime( line['timestamp'], '%d/%m/%Y %H:%M').isoformat() # start_date = pendulum.from_format(line['timestamp'], ('DD/MM/YYYY HH:mm'), tz='UTC').isoformat() end_date = pd.datetime.strptime(line['timestamp'], '%d/%m/%Y %H:%M').isoformat() # end_date = pendulum.from_format(line['timestamp'], ('DD/MM/YYYY HH:mm'), tz='UTC').isoformat() output[load_filename] = { 'start_date': start_date, 'end_date': end_date } return output def list_solar_profiles(self, solar_filename): solar_profiles = "" if solar_filename is not "": solar_profiles = list( pd.read_csv(os.path.join(self.solar_path, solar_filename))) if solar_profiles[0] == 'timestamp': solar_profiles.pop(0) return solar_profiles def list_load_profiles(self, load_filename): load_profiles = "" if load_filename is not "": load_profiles = list( pd.read_csv(os.path.join(self.load_path, load_filename))) if load_profiles[0] == 'timestamp': load_profiles.pop(0) return load_profiles def get_solar_timeseries(self, solar_filename): self.update_files_lists() output = {} path = os.path.join(self.solar_path, solar_filename) with open(path) as f: reader = csv.DictReader(f) for line in reader: # There is a small (I think timezone) difference in these two. But pendulum is wack in compiled versions. time = pd.datetime.strptime( line['timestamp'], '%d/%m/%Y %H:%M').timestamp() * 1000.0 # time = pendulum.from_format(line['timestamp'], ('DD/MM/YYYY HH:mm'), tz='UTC').timestamp() * 1000.0 for label in line: if not label == 'timestamp': output[label] = [] if not label in output else output[ label] dp = float(line[label]) output[label].append([time, dp]) return output def get_load_timeseries(self, load_filename): self.update_files_lists() output = {} path = os.path.join(self.load_path, load_filename) with open(path) as f: reader = csv.DictReader(f) for line in reader: time = pd.datetime.strptime( line['timestamp'], '%d/%m/%Y %H:%M').timestamp() * 1000.0 # time = pendulum.from_format(line['timestamp'], ('DD/MM/YYYY HH:mm')).timestamp() * 1000.0 for label in line: if not label == 'timestamp': output[label] = [] if not label in output else output[ label] dp = float(line[label]) output[label].append([time, dp]) return output def save_config(self, page_name, config_filename, data, additional_headers): # print("Page Name: ", page_name, # "\nConfig Filename: ", config_filename, # "\nData: ", data, # "\nAdditional Headers: ", additional_headers) table_data = data["data"] table_headers = [] for each in table_data[0]["row_inputs"]: table_headers.append(each) if additional_headers: for each in additional_headers: table_headers.append(each) file_path = os.path.join(self.config_paths[page_name], config_filename) self.clear_csv(file_path) with open(file_path, 'w') as file: writer = csv.DictWriter(file, fieldnames=table_headers) writer.writeheader() for each in table_data: row = each["row_inputs"] if additional_headers: for key in additional_headers: row[key] = additional_headers[key] writer.writerow(row) return True def load_config(self, page_name, config_filename): file_path = os.path.join(self.config_paths[page_name], config_filename) results = [] if os.path.isfile(file_path): with open(file_path) as file: reader = csv.DictReader(file) counter = 0 for row in reader: results.append({'row_id': counter, 'row_inputs': row}) return self.result_channels[page_name], results def load_participants_config(self, page_name, config_filename): channel, data = self.load_config(page_name, config_filename) solar_filename = data[0]["row_inputs"]["selected_solar_file"] load_filename = data[0]["row_inputs"]["selected_load_file"] solar_profiles_options = self.list_solar_profiles(solar_filename) load_profiles_options = self.list_load_profiles(load_filename) packaged_data = { "data": data, "solar_profiles_options": solar_profiles_options, "load_profiles_options": load_profiles_options } return channel, packaged_data @staticmethod def clear_csv(path): f = open(path, "w+") f.close()
class Parameters: def __init__(self): # Folder Routes self.folder_routes = FolderRoutes() # Model setup parameters self.model_type = 'luomi' self.network_name = 'Default_Network' self.network_type = 'embedded_network' self.data_dir = self.folder_routes.get_route('data_dir') self.luomi_defaults_dir = self.folder_routes.get_route( "luomi_defaults_dir") self.luomi_input_dir = self.folder_routes.get_route("luomi_input_dir") self.luomi_output_dir = self.folder_routes.get_route( "luomi_output_dir") # UI Interface objects self.ui_participants = Ui_Participants(self.folder_routes) self.ui_tariffs = Ui_Tariffs(self.folder_routes) self.ui_finances = None self.ui_central_battery = Ui_Central_Battery(self.folder_routes) self.ui_central_solar = Ui_Central_Solar(self.folder_routes) self.ui_results_parser = Ui_Results_Parsers(self.folder_routes) # Model Objects self.model_network = None self.model_central_battery = None self.model_tariffs = None self.model_time_periods = None self.model_results = None # Mike Model Objects self.mike_model = None # Legacy Stuff. self.time_periods = None self.ui_inputs = None def load(self, ui_inputs): load_functions = [ self.load_model_selection, self.load_network_name, self.load_central_services, self.load_tariffs, self.load_participants, self.load_data_sources, ] for each in load_functions: each(ui_inputs) self.ui_inputs = ui_inputs # def load_defaults(self): # # Populate default participants from the CSV. # self.ui_tariffs.load_defaults() # self.ui_participants.load_defaults() # # This is temporary. # start = datetime.datetime(year=2017, month=2, day=26, hour=10) # end = datetime.datetime(year=2017, month=2, day=26, hour=12) # self.time_periods = util.generate_dates_in_range(start, end, 30) def load_model_selection(self, ui_inputs): if 'model_selection' in ui_inputs: inputs = ui_inputs['model_selection'] self.model_type = inputs[ 'model_type'] if 'model_type' in inputs else None self.network_type = inputs[ 'network_type'] if 'network_type' in inputs else None def load_network_name(self, ui_inputs): key = "network_name" if key in ui_inputs: self.network_name = ui_inputs[key] def load_central_services(self, ui_inputs): key = "central_services" if key in ui_inputs: print(ui_inputs[key]) self.ui_central_battery.load(ui_inputs[key]) def load_tariffs(self, ui_inputs): # key = "model_tariffs" # if key in ui_inputs: # self.ui_tariffs.load(ui_inputs[key]) self.ui_tariffs = ui_inputs[ 'tariffs'] #This just grabs the new tariffs object from the ui inputs def load_participants(self, ui_inputs): key = "model_participants" if key in ui_inputs: self.ui_participants.load(ui_inputs[key]) def load_data_sources(self, ui_inputs): key = "model_data_sources" if key in ui_inputs: start, end = self.find_time_periods(ui_inputs[key]) self.time_periods = util.generate_dates_in_range(start, end, 30) # def load_central_solar(self, ui_inputs): # key = "model_solar" # if key in ui_inputs: # print("Called load_central_solar") # # self.ui_participants.add_participant(ui_inputs[key]) def print(self): print("Model Type: ", self.model_type) def create_objects(self): if self.model_type == 'mike': self.create_mike_objects() else: self.create_luomi_objects() def create_luomi_objects(self): self.model_network = Luomi_Network(self.network_name) # Need to add participants into model participants_string = self.ui_participants.get_participants_as_string() self.model_network.add_participants_from_string( self.data_dir, self.ui_inputs['model_data_sources']['selected_load_file'], self.ui_inputs['model_data_sources']['selected_solar_file'], participants_string) # Create a central battery from the ui_central_battery. self.model_central_battery = Luomi_Central_Battery( **self.ui_central_battery.get_params_dict()) # Add the central battery to the network self.model_network.add_central_battery(self.model_central_battery) # tariffs_dict = self.ui_tariffs.get_tariffs_dict() # self.model_tariffs = Luomi_Tariffs(**tariffs_dict) self.model_tariffs = Luomi_Tariffs(self.ui_tariffs) print("parameters.py/create_luomi_objects", "Made LUOMI Objects without error") def create_mike_objects(self): # Create the main Study object self.mike_model = NewSim(self.folder_routes) # Create the CSV's from the standard objects. create_csvs(self.ui_participants, self.ui_tariffs, self.ui_finances, self.ui_central_battery, self.ui_central_solar, self.folder_routes) def run(self, status): if self.model_type == 'mike': return self.run_mike_model(status) else: return self.run_luomi_model(status) def run_luomi_model(self, status): # bc = self.ui_central_battery.get_capacity() info_tag = "" # print("RUN_LUOMI_TIME_PERIODS", self.time_periods) self.model_results = Results( self.time_periods, [p.get_id() for p in self.model_network.get_participants()]) energy_sim.simulate(self.time_periods, self.model_network, self.model_tariffs, self.model_results, status) financial_sim.simulate(self.time_periods, self.model_network, self.model_tariffs, self.model_results, status) self.model_results.to_csv(self.luomi_output_dir, info_tag=info_tag) parsed_results = self.ui_results_parser.luomi_temp_parser(info_tag) return parsed_results def run_mike_model(self, status): status("Attempting Mike Model") if self.mike_model: self.mike_model.run() parsed_results = self.ui_results_parser.mike_temp_parser() status("Mike Model Complete - See Folder") return parsed_results # Might move this later. def find_time_periods(self, frontend_data): s_path = self.folder_routes.solar_profiles_dir l_path = self.folder_routes.load_profiles_dir s_file_path = os.path.join(s_path, frontend_data["selected_solar_file"]) l_file_path = os.path.join(l_path, frontend_data["selected_load_file"]) s_df = pd.read_csv(s_file_path) l_df = pd.read_csv(l_file_path) s_start_string = str(s_df.head(1)["timestamp"].values[0]) s_start = pd.datetime.strptime(s_start_string, '%d/%m/%Y %H:%M') l_start_string = str(l_df.head(1)["timestamp"].values[0]) l_start = pd.datetime.strptime(l_start_string, '%d/%m/%Y %H:%M') s_end_string = str(s_df.tail(1)["timestamp"].values[0]) s_end = pd.datetime.strptime(s_end_string, '%d/%m/%Y %H:%M') l_end_string = str(l_df.tail(1)["timestamp"].values[0]) l_end = pd.datetime.strptime(l_end_string, '%d/%m/%Y %H:%M') return max(s_start, l_start), min(s_end, l_end)
class MikeWrapper: def __init__(self): # Folder Routes self.folder_routes = FolderRoutes() # Model setup parameters self.model_type = 'mike' self.network_name = 'Default_Network' self.network_type = 'embedded_network' self.data_dir = self.folder_routes.get_route('data_dir') # UI Interface objects self.ui_participants = Ui_Participants(self.folder_routes) self.ui_tariffs = None self.ui_finances = None self.ui_central_battery = Ui_Central_Battery(self.folder_routes) self.ui_central_solar = Ui_Central_Solar(self.folder_routes) self.ui_results_parser = Ui_Results_Parsers(self.folder_routes) # Model Objects self.model_network = None self.model_central_battery = None self.model_tariffs = None self.model_time_periods = None self.model_results = None # Mike Model Objects self.mike_model = None # Legacy Stuff. self.time_periods = None self.ui_inputs = None def load(self, ui_inputs): load_functions = [ self.load_model_selection, self.load_network_name, self.load_central_services, self.load_tariffs, self.load_participants, self.load_data_sources, self.load_study_parameters, ] for each in load_functions: each(ui_inputs) self.ui_inputs = ui_inputs def load_model_selection(self, ui_inputs): if 'model_selection' in ui_inputs: inputs = ui_inputs['model_selection'] self.model_type = inputs[ 'model_type'] if 'model_type' in inputs else None self.network_type = inputs[ 'network_type'] if 'network_type' in inputs else None def load_network_name(self, ui_inputs): key = "network_name" if key in ui_inputs: self.network_name = ui_inputs[key] def load_central_services(self, ui_inputs): key = "central_services" if key in ui_inputs: print(ui_inputs[key]) self.ui_central_battery.load(ui_inputs[key]) def load_tariffs(self, ui_inputs): # print("mike.py/load_tariffs()", ui_inputs['model_tariffs_mike']) self.ui_tariffs = ui_inputs['model_tariffs_mike'] # self.ui_tariffs = [ # { # 'name':'user_interface', # 'daily_fixed_rate': 1, # 'static_imports':[ # { # 'start_hr':7, # 'end_hr':10, # 'price':0.3 # }, # { # 'start_hr':10, # 'end_hr':15, # 'price':0.5 # }, # { # 'start_hr':15, # 'end_hr':18, # 'price':0.3 # }, # ], # 'static_solar_imports':[], # 'static_exports':[] # } # ] def load_study_parameters(self, ui_inputs): if 'study_parameters_mike' in ui_inputs: self.study_parameters = ui_inputs['study_parameters_mike'] # self.study_parameters = { # 'scenario': 1, # 'arrangement':'en_pv', # 'pv_cap_id': 'W_max_yield', # 'pv_capex_scaleable':False, # 'en_capex_id':'capex_med', # 'a_term':20, # 'a_rate':0.06, # 'pv_scaleable':False, # 'pv_kW_peak':'', # 'notes':'', # 'tariffs':{ # 'cp':'TIDNULL', # 'all_residents':'STC_20', # 'parent': 'EA305_TOU12', # 'network_tariff':'EA305', # } # } def load_participants(self, ui_inputs): ui_participants = {} if "model_participants_mike" in ui_inputs: for row in ui_inputs["model_participants_mike"]: row_selections = {} for row_input in row['row_inputs']: if row_input['name'] == 'participant_id': row_selections['participant_id'] = row_input['value'] if row_input['name'] == 'retail_tariff_type': row_selections['tariff'] = row_input['value'] if row_input['name'] == 'load_profile': row_selections['load'] = row_input['value'] if row_input['name'] == 'solar_profile': row_selections['solar'] = row_input['value'] ui_participants[ row_selections['participant_id']] = row_selections # print("mike.py/load_participants()",ui_participants) self.ui_participants = ui_participants # self.ui_participants = { # 'Participant 1':{ # 'load':'profile_1', # 'solar':'profile_1', # 'tariff':'user_interface', # }, # 'Participant 2':{ # 'load':'profile_1', # 'solar':'profile_1', # 'tariff':'STC_20', # }, # } def load_data_sources(self, ui_inputs): if "model_data_sources_mike" in ui_inputs: self.solar_filename = ui_inputs["model_data_sources_mike"][ 'selected_solar_file'] self.load_filename = ui_inputs["model_data_sources_mike"][ 'selected_load_file'] # This code figures out where the datasets need to be chopped such that they match. start, end = self.find_time_periods(self.solar_filename, self.load_filename) self.solar_skiprows = self.find_skiprows( os.path.join(self.folder_routes.solar_profiles_dir, self.solar_filename), start, end) self.load_skiprows = self.find_skiprows( os.path.join(self.folder_routes.load_profiles_dir, self.load_filename), start, end) # self.time_periods = util.generate_dates_in_range(start, end, 30) def print(self): print("Model Type: ", self.model_type) def create_objects(self): # Create the main Study object self.mike_model = NewSim(self.folder_routes, self.ui_participants, self.ui_tariffs, self.study_parameters, self.solar_filename, self.load_filename, self.solar_skiprows, self.load_skiprows) def run(self, status): print("mike.py/run()", "Attempting Mike Model Run") status("Running Mike Simulation") self.mike_model.run() print("mike.py/run()", "Finished Running Mike Model") status("Finished Running Model. Parsing Results") parsed_results = self.ui_results_parser.mike_temp_parser() print("mike.py/run()", "Finished Parsing Results") status("Finished Parsing Results") return parsed_results # Might move this later. def find_time_periods(self, solar_filename, load_filename): s_file_path = os.path.join(self.folder_routes.solar_profiles_dir, solar_filename) l_file_path = os.path.join(self.folder_routes.load_profiles_dir, load_filename) s_df = pd.read_csv(s_file_path) l_df = pd.read_csv(l_file_path) s_start_string = str(s_df.head(1)["timestamp"].values[0]) s_start = pd.datetime.strptime(s_start_string, '%d/%m/%Y %H:%M') l_start_string = str(l_df.head(1)["timestamp"].values[0]) l_start = pd.datetime.strptime(l_start_string, '%d/%m/%Y %H:%M') s_end_string = str(s_df.tail(1)["timestamp"].values[0]) s_end = pd.datetime.strptime(s_end_string, '%d/%m/%Y %H:%M') l_end_string = str(l_df.tail(1)["timestamp"].values[0]) l_end = pd.datetime.strptime(l_end_string, '%d/%m/%Y %H:%M') return max(s_start, l_start), min(s_end, l_end) def find_skiprows(self, path, start, end): """ This generates a skiprows array that can be passed to a pd.read_csv function. Given two datetimes, it searches through the dataset and determines arrays of rows to skip. These can be passed to pd.read_csv (ie. pd.read_csv(path, skiprows=skiprows)) and results in loading the constrained period. """ df = pd.read_csv(path) start_idx = 0 end_idx = 0 for index, row in df.iterrows(): dt = pd.datetime.strptime(row['timestamp'], '%d/%m/%Y %H:%M') print(index, row['timestamp']) if dt.isoformat() == start.isoformat(): start_idx = index + 1 if dt.isoformat() == end.isoformat(): end_idx = index + 2 final_index = df.shape[0] + 1 skiprows = list(range(1, start_idx)) + list(range( end_idx, final_index)) # trimmed_df = pd.read_csv(path, skiprows=skiprows) # print(trimmed_df) return skiprows