def test_raise_error_for_filter_column_not_in_default_columns(self): with self.assertRaises(Exception) as context: static_table('VARIABLES_FCAS_4_SECOND', defaults.raw_data_cache, select_columns=['VARIABLENUMBER'], filter_cols=['NOTACOLUMN'], filter_values=(['0'], )) self.assertTrue(( 'Filter columns not valid. They must be a part of ' + 'select_columns or the table defaults.') in str(context.exception))
def test_raise_error_if_select_columns_not_in_data(self): with self.assertRaises(Exception) as context: static_table('VARIABLES_FCAS_4_SECOND', defaults.raw_data_cache, select_columns=['NOTACOLUMN']) self.assertTrue(( f'None of columns [\'NOTACOLUMN\'] are in raw_aemo_data\\Ancillary Services Market Causer Pays Variables File.csv. ' "This may be caused by user input if the \'select_columns\' " "argument is being used, or by changed AEMO data formats. " "This error can be avoided by using the argument select_columns=\'all\'." ) in str(context.exception))
def test_raise_error_for_no_data_returned(self): good_url = defaults.static_table_url['VARIABLES_FCAS_4_SECOND'] defaults.static_table_url['VARIABLES_FCAS_4_SECOND'] = 'bad_url' path_and_name = defaults.raw_data_cache + '/' + defaults.names[ 'VARIABLES_FCAS_4_SECOND'] if os.path.isfile(path_and_name): os.remove(path_and_name) with self.assertRaises(Exception) as context: static_table('VARIABLES_FCAS_4_SECOND', defaults.raw_data_cache) self.assertTrue(( f'Compiling data for table VARIABLES_FCAS_4_SECOND failed. ' + 'This probably because none of the requested data ' + 'could be download from AEMO. Check your internet ' + 'connection and that the requested data is archived on: ' + 'https://nemweb.com.au see nemosis.defaults for table specific urls.' ) in str(context.exception)) defaults.static_table_url['VARIABLES_FCAS_4_SECOND'] = good_url
def test_using_select_columns_all_does_not_raise_error(self): price_data = static_table('VARIABLES_FCAS_4_SECOND', defaults.raw_data_cache, select_columns='all') expected_columns = ['VARIABLENUMBER', 'VARIABLETYPE'] self.assertSequenceEqual(list(price_data.columns), expected_columns)
def test_raise_error_for_incorrect_table_name(self): with self.assertRaises(Exception) as context: static_table('NOTATABLE', defaults.raw_data_cache) self.assertTrue("Table name provided is not a static table." in str( context.exception))
import numpy as np from datetime import timedelta from nemosis import static_table, dynamic_data_compiler import plotly.express as px # Specify where we will be caching the raw AEMO data. raw_data_cache = 'C:/Users/nick/Desktop/cache' # Time window to pull data from. start_time = '2021/04/27 00:00:00' end_time = '2021/04/28 00:00:00' # Download the latest FCAS causer pays elements file. The update_static_file=True argument forces nemosis to # download the a new copy of file from AEMO even if a copy already exists in the cache. fcas_causer_pays_elements = static_table(table_name='ELEMENTS_FCAS_4_SECOND', raw_data_location=raw_data_cache, update_static_file=True) # Using filtering and manual inspection find which fcas element numbers belong to Hornsdale Power Reserve. elements_for_honsdale_power_reserve = \ fcas_causer_pays_elements[fcas_causer_pays_elements['EMSNAME'].str.contains('HPR')] # Check which variable numbers we will need. fcas_causer_pays_elements = static_table(table_name='ELEMENTS_FCAS_4_SECOND', raw_data_location=raw_data_cache, update_static_file=True) scada_4s_resolution = dynamic_data_compiler( start_time, end_time, table_name='FCAS_4_SECOND',
from nemosis import dynamic_data_compiler, static_table, cache_compiler import pandas as pd start_time = '2017/01/01 00:00:00' end_time = '2017/01/01 00:05:00' table = 'DISPATCHPRICE' raw_data_cache = 'C:/Users/nick/Desktop/cache' # Download the latest Generators and Scheduled Loads table. The # update_static_file=True argument forces nemosis to download a new copy of # file from AEMO even if a copy already exists in the cache. fcas_elements = static_table(table_name='Generators and Scheduled Loads', raw_data_location=raw_data_cache, update_static_file=True) # price_data = dynamic_data_compiler(start_time, end_time, table, raw_data_cache, fformat='parquet', keep_csv=False, # parse_data_types=True) # # print(price_data) # # gens = static_table('Generators and Scheduled Loads', raw_data_cache) # # print(gens) # cache_compiler(start_time, end_time, table, raw_data_cache, fformat='feather', # select_columns=['REGIONID', 'RRP'], rebuild=True, keep_csv=True) # # t = pd.read_feather('smoke_cache\PUBLIC_DVD_DISPATCHPRICE_201612010000.feather') # # print(t['RRP'].iloc[0]) # print(t)