def test_max(): ''' Tests difference between different numbers of past periods''' print('Testing test_max()') past_periods_list = [6,12,24] for past_periods in past_periods_list: lstm_dataset = lstmInputDataset(main_dataset, df, num_past_periods=past_periods) normal_predictions = wahrsager(lstm_dataset, power_dem_df, TYPE = 'MAX', NAME ='_test_max_{}'.format(past_periods), #PLOTTING = True, ).train()
def test_seq(): ''' Tests difference between different numbers of past periods''' print('Testing test_seq()') output_periods_list = [6,12,24] for output_periods in output_periods_list: lstm_dataset = lstmInputDataset(main_dataset, df, num_past_periods=12) normal_predictions = wahrsager(lstm_dataset, power_dem_df, TYPE = 'SEQ', NAME ='_test_seq_{}'.format(output_periods), num_outputs = output_periods, #PLOTTING = True, ).train()
def test_hidden_layers(): '''Tests difference between different numbers of hidden layers''' print('Testing test_hidden_layers()') hidden_layers_list = [1,2] lstm_dataset = lstmInputDataset(main_dataset, df, num_past_periods=12) for hidden_layers in hidden_layers_list: normal_predictions = wahrsager(lstm_dataset, power_dem_df, TYPE = 'NORMAL', NAME ='_test_hidden_layers_{}'.format(hidden_layers), num_hidden = hidden_layers, PLOTTING = True, ).train()
def test_learning_rate(): '''Tests different learning rates''' print('Testing test_learning_rate()') learning_rate_list = [0.00001,0.0001,0.001,0.01,0.1,0.5] lstm_dataset = lstmInputDataset(main_dataset, df, num_past_periods=12) for learning_rate in learning_rate_list: normal_predictions = wahrsager(lstm_dataset, power_dem_df, TYPE = 'NORMAL', NAME = '_test_learning_rate_{}'.format(learning_rate), lr = learning_rate, num_epochs = 1000, PLOTTING = True, ).train()
def test_lstm_layers(): '''Tests difference between different numbers of lstm layers''' print('Testing test_lstm_layers()') lstm_layers_list = [16,32,64,256,512,1028] lstm_dataset = lstmInputDataset(main_dataset, df, num_past_periods=12) for lstm_layers in lstm_layers_list: normal_predictions = wahrsager(lstm_dataset, power_dem_df, TYPE = 'NORMAL', NAME ='_test_lstm_layers_{}'.format(lstm_layers), # Model-Parameter: lstm_size = lstm_layers, first_hidden_size = lstm_layers, PLOTTING = True, ).train()
def test_dropout(): '''Tests overfitting by comparing training loss to validation loss''' print('Testing test_dropout()') dropout_list = [0.1,0.2,0.4,0.6] lstm_dataset = lstmInputDataset(main_dataset, df, num_past_periods=12) for dropout in dropout_list: normal_predictions = wahrsager(lstm_dataset, power_dem_df, TYPE = 'NORMAL', NAME ='_test_dropout_{}'.format(dropout), dropout = dropout, recurrent_dropout = dropout, num_epochs = 1000, PLOTTING = True, ).train()
def standart_settings(): '''Creates standart results to compare the tests''' print('Testing standart_settings()') lstm_dataset = lstmInputDataset(main_dataset, df, num_past_periods=12) normal_predictions = wahrsager(lstm_dataset, power_dem_df, TYPE = 'NORMAL', NAME ='_test_standart', #PLOTTING = True, # Model-Parameter: num_outputs = 1, dropout = 0.1, recurrent_dropout = 0.1, num_hidden = 3, lstm_size = 128, first_hidden_size = 128, neuron_num_change = 0.5, activation_hidden = 'relu', activation_end = 'relu', lr = 0.001, # Trainings-Parameter val_data_size = 2000, num_epochs = 200, ).train()
def run_wahrsager(NAME, TYPE='NORMAL', num_outputs=1, dropout=0.1, num_hidden=3, lstm_size=256, num_past_periods=24, activation_hidden='relu', activation_end='relu', lr=0.001, num_epochs=1000): lstm_dataset = lstmInputDataset(main_dataset, df, num_past_periods=num_past_periods) normal_predictions = wahrsager( lstm_dataset, power_dem_df, TYPE=TYPE, NAME=NAME, PLOTTING=True, # Model-Parameter: num_outputs=num_outputs, dropout=dropout, recurrent_dropout=dropout, num_hidden=num_hidden, lstm_size=lstm_size, first_hidden_size=lstm_size, neuron_num_change=0.5, activation_hidden=activation_hidden, activation_end=activation_end, lr=lr, # Trainings-Parameter val_data_size=2000, num_epochs=num_epochs, ).train()
# These don't take up a lot of time to run, # but you can run those beforhand to check if everything is setup properly: main_dataset_creator.load_total_power() main_dataset_creator.normalized_df() main_dataset_creator.norm_activation_time_df() # wait_to_continue() ''' LSTM Dataset: ''' # Import main dataset as dataframe: df = main_dataset.make_input_df(drop_main_terminal=False, use_time_diff=True, day_diff='holiday-weekend') # Setup lstm dataset creator/loader: lstm_dataset = lstmInputDataset(main_dataset, df, num_past_periods=12) # If you want to check that everything works fine, run those rather step by step: lstm_dataset_creator.rolling_mean_training_data() #wait_to_continue() lstm_dataset_creator.rolling_max_training_data() #wait_to_continue() lstm_dataset_creator.normal_training_data() #wait_to_continue() lstm_dataset_creator.sequence_training_data(num_seq_periods=12) #wait_to_continue() #plotter = DatasetStatistics(D_PATH='_small_d/', period_string_min='15min', full_dataset=True)
main_dataset_creator.smoothed_df() wait_to_continue() main_dataset_creator.load_total_power() wait_to_continue() main_dataset_creator.normalized_df() wait_to_continue() main_dataset_creator.norm_activation_time_df() wait_to_continue() lstm_dataset_creator = lstmInputDataset(D_PATH='_BIG_D/', period_string_min='5min', full_dataset=False, num_past_periods=12, drop_main_terminal=False, use_time_diff=True, day_diff='holiday-weekend') # If you want to check that everything works fine, run those rather step by step: lstm_dataset_creator.rolling_mean_training_data() wait_to_continue() lstm_dataset_creator.rolling_max_training_data() wait_to_continue() lstm_dataset_creator.normal_training_data() wait_to_continue() lstm_dataset_creator.sequence_training_data(num_seq_periods=12)
def use_heuristic(HEURISTIC_TYPE='Perfekt-Pred-Heuristic', epochs=1, threshold_dem=50, deactivate_SMS=True, deactivate_LION=True): # Naming the agent and setting up the directory path: now = datetime.now() NAME = str( round(threshold_dem)) + '_NO_BATTERY_' + HEURISTIC_TYPE + now.strftime( "_%d-%m-%Y_%H-%M-%S") D_PATH = '_small_d/' # Load the dataset: main_dataset = mainDataset(D_PATH=D_PATH, period_string_min='15min', full_dataset=True) # Normalized dataframe: df = main_dataset.make_input_df(drop_main_terminal=False, use_time_diff=True, day_diff='holiday-weekend') # Sum of the power demand dataframe (nor normalized): power_dem_df = main_dataset.load_total_power()[24:-12] # Load the LSTM input dataset: lstm_dataset = lstmInputDataset(main_dataset, df, num_past_periods=12) # Making predictions: normal_predictions = wahrsager(lstm_dataset, power_dem_df, TYPE='NORMAL').pred()[:-12] seq_predictions = wahrsager(lstm_dataset, power_dem_df, TYPE='SEQ', num_outputs=12).pred() # Adding the predictions to the dataset: df = df[24:-12] df['normal'] = normal_predictions df['seq_max'] = max_seq(seq_predictions) logger = Logger(NAME, D_PATH) # Setup reward_maker r_maker = reward_maker(LOGGER=logger, COST_TYPE='exact_costs', R_TYPE='savings_focus', R_HORIZON='single_step', cost_per_kwh=0.2255, LION_Anschaffungs_Preis=34100, LION_max_Ladezyklen=1000, SMS_Anschaffungs_Preis=115000 / 3, SMS_max_Nutzungsjahre=20, Leistungspreis=102, logging_list=[ 'cost_saving', 'exact_costs', 'sum_exact_costs', 'sum_cost_saving' ], deactivate_SMS=deactivate_SMS, deactivate_LION=deactivate_LION) # Lade Environment: env = common_env(reward_maker=r_maker, df=df, power_dem_df=power_dem_df, input_list=['norm_total_power', 'normal', 'seq_max'], max_SMS_SoC=12 / 3, max_LION_SoC=54, PERIODEN_DAUER=15, ACTION_TYPE='contin', OBS_TYPE='contin', AGENT_TYPE='heuristic') agent = heurisitc(env=env, HEURISTIC_TYPE=HEURISTIC_TYPE, threshold_dem=threshold_dem) return agent.calculate(epochs=epochs)