def test_save_callback(self): ''' Test that the model performance can be monitored and results can be checked and saved as the model improves. This test trains an agent for a short period of time, without loading a pre-trained model. Therefore, this test also checks that a RL from stable-baselines can be trained. ''' # Define logging directory. Monitoring data and agent model will be stored here log_dir = os.path.join(utilities.get_root_path(), 'examples', 'agents', 'monitored_A2C') # Perform a short training example with callback env, _, _ = run_save_callback.train_A2C_with_callback(log_dir=log_dir) # Load the trained agent model = A2C.load(os.path.join(log_dir, 'best_model')) # Test one step with the trained model obs = env.reset() df = pd.DataFrame([model.predict(obs)[0][0]], columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'save_callback.csv') self.compare_ref_values_df(df, ref_filepath) # Remove model to prove further testing shutil.rmtree(log_dir, ignore_errors=True)
def _perform_test(self, tot, dictionary, label): '''Common function for performing the tests. Parameters ---------- tot: float Value of kpi "tot" dictionary: dict or None kpi "dict". If None, not used. label: str Label to describe KPI. ''' # Check total df = pd.DataFrame(data=[tot], index=['{0}_tot'.format(label)], columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'kpis', '{0}_tot_{1}.csv'.format(label, self.name)) self.compare_ref_values_df(df, ref_filepath) # Check dict if dictionary is not None: df = pd.DataFrame.from_dict(dictionary, orient = 'index', columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'kpis', '{0}_dict_{1}.csv'.format(label, self.name)) self.compare_ref_values_df(df, ref_filepath)
def test_run(self): '''Runs the example and tests the kpi and trajectory results. ''' # Run test kpi_path = os.path.join(utilities.get_root_path(), 'examples', 'julia', 'kpi_testcase1.csv') res_path = os.path.join(utilities.get_root_path(), 'examples', 'julia', 'result_testcase1.csv') # Check kpis df = pd.read_csv(kpi_path).transpose() # Check kpis df.columns = ['value'] df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase1', 'kpis_julia.csv') self.compare_ref_values_df(df, ref_filepath) # Check trajectories df = pd.read_csv(res_path, index_col='time') # Set reference file path ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase1', 'results_julia.csv') # Test self.compare_ref_timeseries_df(df, ref_filepath)
def test_run(self): '''Runs the example and tests the kpi and trajectory results. ''' # Run test kpi_path = os.path.join(utilities.get_root_path(), 'examples', 'julia', 'kpi_testcase2.csv') res_path = os.path.join(utilities.get_root_path(), 'examples', 'julia', 'result_testcase2.csv') # Check kpis kpi = pd.read_csv(kpi_path) self.assertAlmostEqual(kpi['ener_tot'].get_values()[0], kpi_ref['ener_tot'], places=3) self.assertAlmostEqual(kpi['tdis_tot'].get_values()[0], kpi_ref['tdis_tot'], places=3) self.assertAlmostEqual(kpi['cost_tot'].get_values()[0], kpi_ref['cost_tot'], places=3) self.assertAlmostEqual(kpi['time_rat'].get_values()[0], kpi_ref['time_rat_julia'], places=3) self.assertAlmostEqual(kpi['emis_tot'].get_values()[0], kpi_ref['emis_tot'], places=3) # Check trajectories df = pd.read_csv(res_path, index_col='time') # Set reference file path ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase2', 'results_julia.csv') # Test self.compare_ref_timeseries_df(df, ref_filepath)
def _run(self, season): '''Runs the example and tests the kpi and trajectory results for season. Parameters ---------- season: str 'winter' or 'summer' or 'shoulder' Returns ------- None ''' if season == 'winter': start_time = 1 * 24 * 3600 elif season == 'summer': start_time = 248 * 24 * 3600 elif season == 'shoulder': start_time = 118 * 24 * 3600 else: raise ValueError('Season {0} unknown.'.format(season)) # Initialize test case res_initialize = requests.put('{0}/initialize'.format(self.url), data={ 'start_time': start_time, 'warmup_period': 0 }) # Get default simulation step step_def = requests.get('{0}/step'.format(self.url)).json() # Simulation Loop for i in range(int(self.length / step_def)): # Advance simulation y = requests.post('{0}/advance'.format(self.url), data={}).json() # Report KPIs for price_scenario in ['constant', 'dynamic', 'highly_dynamic']: requests.put('{0}/scenario'.format(self.url), data={'electricity_price': price_scenario}) res_kpi = requests.get('{0}/kpi'.format(self.url)).json() # Check kpis df = pd.DataFrame.from_dict(res_kpi, orient='index', columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join( utilities.get_root_path(), 'testing', 'references', self.name, 'kpis_{0}_{1}.csv'.format(season, price_scenario)) self.compare_ref_values_df(df, ref_filepath) requests.put('{0}/scenario'.format(self.url), data={'electricity_price': 'constant'}) # Report results res_results = requests.get('{0}/results'.format(self.url)).json() # Check results df = self.results_to_df(res_results) ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', self.name, 'results_{0}.csv'.format(season)) self.compare_ref_timeseries_df(df, ref_filepath)
def test_variable_episode(self): ''' Test that a model can be trained using variable episode length. The method that is used to determine whether the episode is terminated or not is defined by the user. This test trains an agent for a short period of time, without loading a pre-trained model. Therefore, this test also checks that a RL from stable-baselines can be trained. This test also uses the save callback to check that the variable episode length is being effectively used. Notice that this test also checks that child classes can be nested since the example redefines the `compute_reward` and the `compute_done` methods. ''' # Define logging directory. Monitoring data and agent model will be stored here log_dir = os.path.join(utilities.get_root_path(), 'examples', 'agents', 'variable_episode_A2C') # Perform a short training example with callback env, _, _ = run_variable_episode.train_A2C_with_variable_episode( log_dir=log_dir) # Load the trained agent model = A2C.load(os.path.join(log_dir, 'best_model')) # Test one step with the trained model obs = env.reset() df = pd.DataFrame([model.predict(obs)[0][0]], columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'variable_episode_step.csv') self.compare_ref_values_df(df, ref_filepath) # Check variable lengths monitor = pd.read_csv(os.path.join(log_dir, 'monitor.csv'), index_col=None) monitor = monitor.iloc[1:] monitor.reset_index(inplace=True) monitor.columns = ['reward', 'episode_length', 'time'] # Time may vary from one computer to another monitor.drop(labels='time', axis=1, inplace=True) # Utilities require index to have time as index name (even this is not the case here) monitor.index.name = 'time' # Transfor to numeric monitor = monitor.apply( lambda col: pd.to_numeric(col, errors='coerce')) # Check that we obtain always same monitoring parameters ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'variable_episode_monitor.csv') self.compare_ref_timeseries_df(monitor, ref_filepath) # Remove model to prove further testing shutil.rmtree(log_dir, ignore_errors=True)
def test_run(self): '''Runs the example and tests the kpi and trajectory results. ''' # Run test kpi, res, customizedkpis_result = twoday_p.run() # Check kpis self.assertAlmostEqual(kpi['ener_tot'], kpi_ref['ener_tot'], places=3) self.assertAlmostEqual(kpi['tdis_tot'], kpi_ref['tdis_tot'], places=3) self.assertAlmostEqual(kpi['cost_tot'], kpi_ref['cost_tot'], places=3) self.assertAlmostEqual(kpi['time_rat'], kpi_ref['time_rat_python'], places=3) self.assertAlmostEqual(kpi['emis_tot'], kpi_ref['emis_tot'], places=3) # Check trajectories # Make dataframe df = pd.DataFrame() for s in ['y', 'u']: for x in res[s].keys(): if x != 'time': df = pd.concat((df, pd.DataFrame(data=res[s][x], index=res['y']['time'], columns=[x])), axis=1) df.index.name = 'time' # Set reference file path ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase1', 'results_python.csv') # Test self.compare_ref_timeseries_df(df, ref_filepath) # Check customized kpi trajectories # Make dataframe df = pd.DataFrame() for x in customizedkpis_result.keys(): if x != 'time': df = pd.concat( (df, pd.DataFrame(data=customizedkpis_result[x], index=customizedkpis_result['time'], columns=[x])), axis=1) df.index.name = 'time' # Set reference file path ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase1', 'customizedkpis.csv') # Test self.compare_ref_timeseries_df(df, ref_filepath)
def setUp(self): '''Setup for testcase. ''' self.name = 'testcase1' self.url = 'http://127.0.0.1:5000' self.name_ref = 'wrapped' self.inputs_ref = { "oveAct_activate": { "Unit": None, "Description": "Activation for Heater thermal power", "Minimum": None, "Maximum": None }, "oveAct_u": { "Unit": "W", "Description": "Heater thermal power", "Minimum": -10000, "Maximum": 10000 } } self.measurements_ref = { "PHea_y": { "Unit": "W", "Description": "Heater power", "Minimum": None, "Maximum": None }, "TRooAir_y": { "Unit": "K", "Description": "Zone air temperature", "Minimum": None, "Maximum": None } } self.step_ref = 60.0 self.y_ref = { u'PHea_y': 0.0, u'TRooAir_y': 293.15015556512265, u'time': 60.0 } self.forecast_default_ref = os.path.join(utilities.get_root_path(), 'testing', 'references', 'forecast', 'tc1_forecast_default.csv') self.forecast_parameters_ref = {'horizon': 172800, 'interval': 123} self.forecast_with_parameters_ref = os.path.join( utilities.get_root_path(), 'testing', 'references', 'forecast', 'tc1_forecast_interval.csv')
def check_obs_act_rew_kpi(self, obs=None, act=None, rew=None, kpi=None, label='default'): '''Auxiliary method to check for observations, actions, rewards, and/or kpis of a particular test case run. ''' # Check observation values if obs is not None: df = pd.DataFrame(obs, columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'observations_{}.csv'.format(label)) self.compare_ref_values_df(df, ref_filepath) # Check actions values if act is not None: df = pd.DataFrame(act, columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'actions_{}.csv'.format(label)) self.compare_ref_values_df(df, ref_filepath) # Check reward values if rew is not None: df = pd.DataFrame(rew, columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'rewards_{}.csv'.format(label)) self.compare_ref_values_df(df, ref_filepath) if kpi is not None: df = pd.DataFrame(data=[kpi]).T df.columns = ['value'] df.index.name = 'keys' # Time ratio is not checked since depends on the machine where tests are run df.drop('time_rat', inplace=True) ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'kpis_{}.csv'.format(label)) self.compare_ref_values_df(df, ref_filepath)
def __init__(self, hawking, bot, *args, **kwargs): self.hawking = hawking self.bot = bot self.questions = [] self.is_mid_question_refresh = False self.last_question_refresh_time = time.time() ## Load config data self.submission_top_time = CONFIG_OPTIONS.get("stupid_question_top_time", "month") self.submission_count = CONFIG_OPTIONS.get("stupid_question_submission_count", 500) self.refresh_time_seconds = CONFIG_OPTIONS.get("stupid_question_refresh_time_seconds", 21600) ## Load module specific configs from 'stupid_questions.json' located in modules folder modules_folder_name = CONFIG_OPTIONS.get("modules_folder", "modules") config = utilities.load_json(os.path.sep.join([utilities.get_root_path(), modules_folder_name, "stupid_questions.json"])) reddit_client_id = config.get("reddit_client_id") reddit_secret = config.get("reddit_secret") subreddits = CONFIG_OPTIONS.get("stupid_question_subreddits", ["NoStupidQuestions"]) try: self.reddit = Reddit(client_id=reddit_client_id, client_secret=reddit_secret, user_agent=self.REDDIT_USER_AGENT) ## Use a multireddit to pull random post from any of the chosen subreddits self.subreddit = self.reddit.subreddit("+".join(subreddits)) except Exception: logger.exception("Unable to create reddit/subreddit instance") self.bot.loop.create_task(self.load_questions())
def setUp(self): '''Setup for each test. ''' self.case = TestCase() # Instantiate a KPI calculator linked to an empty case self.cal = KPI_Calculator(self.case) # Read the reference data ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'kpis', 'tc2_results_python.csv') df = pd.read_csv(ref_filepath) # Fill the test case with the refernce data for var in df.keys(): # Assign time if var == 'time': self.case.y_store[var] = df.loc[:, var] # Assign inputs elif var.endswith('_u'): self.case.u_store[var] = df.loc[:, var] # Assign outputs elif var.endswith('_y'): self.case.y_store[var] = df.loc[:, var]
def __init__(self, hawking, bot, *args, **kwargs): self.hawking = hawking self.bot = bot ## Load module specific configs from 'stupid_questions.json' located in modules folder modules_folder_name = CONFIG_OPTIONS.get("modules_folder", "modules") config = utilities.load_json( os.path.sep.join([ utilities.get_root_path(), modules_folder_name, "stupid_questions.json" ])) reddit_client_id = config.get("reddit_client_id") reddit_secret = config.get("reddit_secret") subreddits = CONFIG_OPTIONS.get("stupid_question_subreddits", ["NoStupidQuestions"]) try: self.reddit = Reddit(client_id=reddit_client_id, client_secret=reddit_secret, user_agent=self.REDDIT_USER_AGENT) ## Use a multireddit to pull random post from any of the chosen subreddits self.subreddit = self.reddit.subreddit("+".join(subreddits)) except Exception as e: utilities.debug_log("Unable to create reddit/subreddit instance,", e, debug_level=1)
def test_return(self): '''Test that scenario returns properly. ''' scenario_both = { 'time_period': 'test_day', 'electricity_price': 'dynamic' } scenario_time = {'time_period': 'test_day'} scenario_elec = {'electricity_price': 'dynamic'} # Both res = requests.put('{0}/scenario'.format(self.url), data=scenario_both).json() # Check return is valid for electricity price self.assertTrue(res['electricity_price']) # Check return is valid for time period df = pd.DataFrame.from_dict(res['time_period'], orient='index', columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', self.name, 'initial_values_set_scenario.csv') self.compare_ref_values_df(df, ref_filepath) # Time only res = requests.put('{0}/scenario'.format(self.url), data=scenario_time).json() # Check return is valid for electricity price self.assertTrue(res['electricity_price'] is None) # Check return is valid for time period df = pd.DataFrame.from_dict(res['time_period'], orient='index', columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', self.name, 'initial_values_set_scenario.csv') self.compare_ref_values_df(df, ref_filepath) # Electricity price only res = requests.put('{0}/scenario'.format(self.url), data=scenario_elec).json() # Check return is valid for electricity price self.assertTrue(res['electricity_price']) # Check return is valid for time period self.assertTrue(res['time_period'] is None)
async def play_random_channel_timeout_clip(self, server_state, callback): '''Channel timeout logic, picks an appropriate sign-off message and plays it''' if (len(self.channel_timeout_clip_paths) > 0): await self.audio_player_cog._play_audio_via_server_state( server_state, os.path.sep.join([ utilities.get_root_path(), random.choice(self.channel_timeout_clip_paths) ]), callback)
def setUp(self): '''Setup for each test. ''' self.name = 'MultiZone' self.testcase_name = 'testcase3' self.ref_data_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'kpis', 'tc3_results_python.csv') self._initialize_testcase(self.testcase_name, self.ref_data_filepath)
def test_run(self): '''Runs the example and tests the kpi and trajectory results. ''' # Run test kpi, df_res = testcase3.run() # Check kpis df = pd.DataFrame.from_dict(kpi, orient='index', columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase3', 'kpis_python.csv') self.compare_ref_values_df(df, ref_filepath) # Set reference file path ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase3', 'results_python.csv') self.compare_ref_timeseries_df(df_res, ref_filepath)
def test_run(self): '''Runs the example and tests the kpi and trajectory results. ''' # Run test custom_kpi_config_path = os.path.join(utilities.get_root_path(), 'examples', 'python', 'custom_kpi', 'custom_kpis_example.config') kpi, res, customizedkpis_result = testcase1.run( customized_kpi_config=custom_kpi_config_path) # Check kpis df = pd.DataFrame.from_dict(kpi, orient='index', columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase1', 'kpis_python.csv') self.compare_ref_values_df(df, ref_filepath) # Check trajectories df = self.results_to_df(res) # Set reference file path ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase1', 'results_python.csv') self.compare_ref_timeseries_df(df, ref_filepath) # Check customized kpi trajectories df = pd.DataFrame() for x in customizedkpis_result.keys(): if x != 'time': df = pd.concat( (df, pd.DataFrame(data=customizedkpis_result[x], index=customizedkpis_result['time'], columns=[x])), axis=1) df.index.name = 'time' # Set reference file path ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase1', 'customizedkpis.csv') self.compare_ref_timeseries_df(df, ref_filepath)
def setUp(self): '''Setup for each test. ''' # Change directory to testcase 2 os.chdir(os.path.join(testing_root_dir, 'testcase2')) from testcase2.testcase import TestCase self.case = TestCase() # Instantiate a forecaster self.forecaster = Forecaster(self.case) # Specify test references self.ref_forecast_default = os.path.join(utilities.get_root_path(), 'testing', 'references', 'forecast', 'testcase2', 'tc2_forecast_default.csv') self.ref_forecast_interval = os.path.join(utilities.get_root_path(), 'testing', 'references', 'forecast', 'testcase2', 'tc2_forecast_interval.csv')
def load_states(self): try: with open( os.sep.join( [utilities.get_root_path(), CONFIGS.get('state_path')])) as fd: return json.loads(fd.read()) except json.JSONDecodeError: ## Load some sort of default output = {} for user in CONFIGS.get('users', []): output[user] = 'home' return output
def test_run(self): '''Runs the example and tests the kpi results. ''' # Run test kpi = testcase1_scenario.run(plot=False) # Check kpis df = pd.DataFrame.from_dict(kpi, orient='index', columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase1', 'kpis_python_scenario.csv') self.compare_ref_values_df(df, ref_filepath)
def test_run(self): '''Runs the example and tests the kpi and trajectory results. ''' # Run test kpi_path = os.path.join(utilities.get_root_path(), 'examples', 'javascript', 'kpi_testcase2.csv') res_path = os.path.join(utilities.get_root_path(), 'examples', 'javascript', 'result_testcase2.csv') # Check kpis df = pd.read_csv(kpi_path, index_col='keys') ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase2', 'kpis_javascript.csv') self.compare_ref_values_df(df, ref_filepath) # Check trajectories df = pd.read_csv(res_path, index_col='time') # Set reference file path ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'testcase2', 'results_javascript.csv') # Test self.compare_ref_timeseries_df(df, ref_filepath)
def test_get_forecast_default(self): '''Check that the forecaster is able to retrieve the data ''' # Load the data into the test case forecast = self.forecaster.get_forecast() # Set reference file path ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'forecast', 'tc2_forecast_default.csv') # Check the forecast df_forecaster = pd.DataFrame(forecast).set_index('time') self.compare_ref_timeseries_df(df_forecaster, ref_filepath)
def __init__(self): self.path_parser = bot_io.PathParser() # Passes kwargs onto PlotterFileController self.plotter = plotter.Plotter(output_folder_path=os.sep.join( [utilities.get_root_path(), "resources", "examples"])) self.plotter.file_controller._init_dir() # Clean up the examples dir for map_name, message in EXAMPLES: path_obj = self.path_parser.parse_message(message) plotted_map = self.plotter.plot_plane_path(map_name, path_obj) plotted_map = plotted_map.resize(EXAMPLE_SIZE, EXAMPLE_RESIZE_FILTER) self.plotter.file_controller.save_map( plotted_map, "{} {}.{}".format( map_name, message, self.plotter.file_controller.map_file_extension))
def test_larger_step(self): '''Test that simulation stops if try to take larger step than scenario. ''' scenario = {'time_period': 'test_day'} requests.put('{0}/scenario'.format(self.url), data=scenario) # Try simulating past test period step = 5 * 7 * 24 * 3600 requests.put('{0}/step'.format(self.url), data={'step': step}) requests.post('{0}/advance'.format(self.url), data={}).json() # Check results measurements = requests.get('{0}/measurements'.format(self.url)).json() df = self.results_to_df(measurements.keys(), -np.inf, np.inf, self.url) ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', self.name, 'results_time_period_end_larger_step.csv') self.compare_ref_timeseries_df(df, ref_filepath)
def test_reset_fixed(self): '''Test that the environment can reset using a fixed start time and a specific warmup period. ''' self.env.random_start_time = False self.env.start_time = 14 * 24 * 3600 self.env.warmup_period = 3 * 3600 obs = self.env.reset() # Check values df = pd.DataFrame(data=[obs], index=['obs_reset_fixed'], columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'reset_fixed.csv') self.compare_ref_values_df(df, ref_filepath)
def test_extra_step(self): '''Test that simulation stops if try to take extra step than scenario. ''' scenario = {'time_period': 'test_day'} requests.put('{0}/scenario'.format(self.url), data=scenario) # Try simulating past test period step = 7 * 24 * 3600 requests.put('{0}/step'.format(self.url), data={'step': step}) for i in [0, 1, 2]: y = requests.post('{0}/advance'.format(self.url), data={}).json() # Check y[2] indicates no simulation (empty dict) self.assertDictEqual(y, dict()) # Check results measurements = requests.get('{0}/measurements'.format(self.url)).json() df = self.results_to_df(measurements.keys(), -np.inf, np.inf, self.url) ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', self.name, 'results_time_period_end_extra_step.csv') self.compare_ref_timeseries_df(df, ref_filepath)
def test_reset_random(self): '''Test that the environment can reset using a random start time that is out of the specified `excluding_periods`. This test also checks that the seed for random initialization works properly. ''' self.env.random_start_time = True self.env.warmup_period = 1 * 3600 # Set the excluding periods to be the two first weeks of February # and the two first weeks of November excluding_periods = [(31 * 24 * 3600, 31 * 24 * 3600 + 14 * 24 * 3600), (304 * 24 * 3600, 304 * 24 * 3600 + 14 * 24 * 3600)] self.env.excluding_periods = excluding_periods random.seed(123456) start_times = OrderedDict() # Reset hundred times for i in range(100): obs = self.env.reset() start_time = self.env.start_time episode = (start_time, start_time + self.env.max_episode_length) for period in excluding_periods: # Make sure that the episodes don't overlap with excluding_periods assert not(episode[0] < period[1] and period[0] < episode[1]),\ 'reset is not working properly when generating random times. '\ 'The episode with starting time {0} and end time {1} '\ 'overlaps with period {2}. This corresponds to the '\ 'generated starting time number {3}.'\ ''.format(start_time,start_time+self.env.max_episode_length,period,i) start_times[start_time] = obs # Check values df = pd.DataFrame.from_dict(start_times, orient='index', columns=['value']) df.index.name = 'keys' ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', 'reset_random.csv') self.compare_ref_values_df(df, ref_filepath)
def test_event(self): '''Runs the example to test for correct event handling. Parameters ---------- None Returns ------- None ''' start_time = 118 * 24 * 3600 length = 48 * 3600 / 12 # Initialize test case requests.put('{0}/initialize'.format(self.url), data={ 'start_time': start_time, 'warmup_period': 0 }) # Get default simulation step step_def = requests.get('{0}/step'.format(self.url)).json() # Simulation Loop for i in range(int(length / step_def)): # Advance simulation #switch pump on/off for each timestep pump = 0 if (i % 2) == 0 else 1 u = {'ovePum_activate': 1, 'ovePum_u': pump} requests.post('{0}/advance'.format(self.url), data=u).json() # Check results points = self.get_all_points(self.url) df = self.results_to_df(points, start_time, start_time + length, self.url) ref_filepath = os.path.join(utilities.get_root_path(), 'testing', 'references', self.name, 'results_event_test.csv') self.compare_ref_timeseries_df(df, ref_filepath)
def init_clips(self): clip_dir_paths = self.scan_clips( os.path.sep.join( [utilities.get_root_path(), self.clips_folder_path])) counter = 0 for clip_dir_path in clip_dir_paths: starting_count = counter clip_group = self._build_clip_group(clip_dir_path) for clip in self.load_clips(clip_dir_path): try: self.add_clip(clip) clip_group.add_clip(clip) except Exception as e: logger.warn("Couldn't add clip", exc_info=True) else: counter += 1 ## Ensure we don't add in empty clip files into the groupings if (counter > starting_count): self.clip_groups[clip_group.key] = clip_group ## Set up a dummy command for the category, to assist with creating the help interface. ## asyncio.sleep is just a dummy command since commands.Command needs some kind of async callback help_command = commands.Command(self._create_noop_callback(), name=clip_group.key, hidden=True, no_pm=True) self.bot.add_command(help_command) self.command_group_names.append( clip_group.key ) # Keep track of the 'parent' commands for later use logger.info("Loaded {} clip{}.".format(counter, "s" if counter != 1 else "")) return counter
with os.scandir(path) as entries: for entry in entries: try: with open(path + entry.name + '/history.pkl', 'rb') as source: histories['/'.join( entry.name.split('_')[:3])] = pickle.load(source) except NotADirectoryError: pass plt.figure(figsize=(11, 6), sharex=True) for key, item in enumerate(histories): history = histories[item] metrics = list(history.keys()) for i, metric in enumerate(metrics): plt.subplot(1, len(metrics), i + 1) plt.plot(history[metric]) plt.ylabel(metric) plt.xlabel('epoch') plt.legend(list(histories.keys()), loc='best') plt.tight_layout() plt.show() if __name__ == '__main__': root_path = get_root_path() review_path = os.path.join(root_path, 'review/') plot_graph(review_path) sys.exit(0)