"Company Name", "Station Name", "Fuel", "Installed Capacity (MW)", "Year of commission or year generation began" ] TAB_NUMBER_DUKES = 2 # set up projection transformation # thanks to John A. Stevenson: http://all-geo.org/volcan01010/2012/11/change-coordinates-with-pyproj/ wgs84 = pyproj.Proj( "+init=EPSG:4326" ) # LatLon with WGS84 datum used by GPS units and Google Earth osgb36 = pyproj.Proj("+init=EPSG:27700") # UK Ordnance Survey, 1936 datum # raw files download # True if specified --download, otherwise False FILES = {RAW_FILE_NAME_REPD: URL_REPD, RAW_FILE_NAME_DUKES: URL_DUKES} DOWNLOAD_FILES = pw.download( u"UK Renewable Energy Planning Database and DUKES", FILES) # set up fuel type thesaurus fuel_thesaurus = pw.make_fuel_thesaurus() # set up country name thesaurus country_thesaurus = pw.make_country_names_thesaurus() # create dictionary for power plant objects plants_dictionary = {} # load GEO and CARMA for matching coordinates geo_database = pw.load_database(GEO_DATABASE_FILE) print("Loaded {0} plants from GEO database.".format(len(geo_database))) carma_database = pw.load_database(CARMA_DATABASE_FILE) print("Loaded {0} plants from CARMA database.".format(len(carma_database)))
if fuel_name in ["Coal", "Gas", "Oil"]: fuel_name = "Thermal" if idval in plant_locations[fuel_name].keys(): return plant_locations[fuel_name][idval][0:2] else: return pw.NO_DATA_NUMERIC, pw.NO_DATA_NUMERIC # download if specified FILES = {} for dataset in DATASETS: RAW_FILE_NAME_this = RAW_FILE_NAME.replace("FILENAME", dataset["filename"]) URL = URL_BASE.replace("NUMBER", dataset["number"]) FILES[RAW_FILE_NAME_this] = URL DOWNLOAD_FILES = pw.download("Chile power plant data", FILES) # set up fuel type thesaurus fuel_thesaurus = pw.make_fuel_thesaurus() # set up country name thesaurus country_thesaurus = pw.make_country_names_thesaurus() # create dictionary for power plant objects plants_dictionary = {} # extract powerplant information from file(s) print(u"Reading in plants...") # read static location file [fuel,id,name,latitude,longitude] plant_locations = {
fileType="src_csv", filename="database_{0}.csv".format(SAVE_CODE)) SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin") #LOCATION_FILE_NAME = pw.make_file_path(fileType="resource", subFolder=SAVE_CODE, filename="locations_{0}.csv".format(SAVE_CODE)) # other parameters as needed # specific functions here # def xyz(): # pass # optional raw file(s) download # True if specified --download, otherwise False FILES = { RAW_FILE_NAME: URL } # dictionary of saving directories and corresponding urls DOWNLOAD_FILES = pw.download(NAME_OF_DATABASE, FILES) # set up fuel type thesaurus fuel_thesaurus = pw.make_fuel_thesaurus() # set up country name thesaurus country_thesaurus = pw.make_country_names_thesaurus() # create dictionary for power plant objects plants_dictionary = {} # extract powerplant information from file(s) print(u"Reading in plants...") # example for reading CSV file # specify column names used in raw file
RAW_FILE_NAME_CEA_UZ = pw.make_file_path(fileType="raw", filename=SAVE_CODE) RAW_FILE_NAME_REC = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="accredited_rec_generators.html") WRI_DATABASE = pw.make_file_path(fileType="src_bin", filename=u"WRI-Database.bin") CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="database_IND.csv") PLANT_LOCATIONS_FILE = pw.make_file_path(fileType="resource", subFolder="IND", filename="CEA_plants.csv") SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin") LOCATION_FILE = pw.make_file_path(fileType="resource", subFolder=SAVE_CODE, filename="plant_locations_IND.csv") TAB_NAME = u"Data" DATA_YEAR = 2016 # capacity data from CEA is as of 2016 # optional raw file(s) download # True if specified --download, otherwise False FILES = {RAW_FILE_NAME_CEA: "http://www.cea.nic.in/reports/others/thermal/tpece/cdm_co2/database_12.zip", RAW_FILE_NAME_REC: "https://www.recregistryindia.nic.in/index.php/general/publics/accredited_regens"} # dictionary of saving directories and corresponding urls DOWNLOAD_FILES = pw.download(u'CEA and RECS', FILES) # set up fuel type thesaurus fuel_thesaurus = pw.make_fuel_thesaurus() # create dictionary for power plant objects plants_dictionary = {} # extract powerplant information from file(s) print(u"Reading in plants...") # load location information from static file plant_locations = {} with open(PLANT_LOCATIONS_FILE, 'rU') as f: datareader = csv.reader(f) headers = datareader.next()
SOURCE_NAME = u"European Pollutant Release and Transfer Register" SOURCE_URL = "http://prtr.ec.europa.eu/" SAVE_CODE = u"EPRTR" RAW_FILE_NAME = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="EPRTR-plants.csv") CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="database_EPRTR.csv") SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin") YEAR_UPDATED = 2014 DATA_ENCODING = "Windows-1252" # optional raw file(s) download URL = "http://prtr.ec.europa.eu/" FILES = {RAW_FILE_NAME: URL} DOWNLOAD_FILES = pw.download(SOURCE_NAME, FILES) # set up fuel type thesaurus fuel_thesaurus = pw.make_fuel_thesaurus() # set up country name thesaurus country_thesaurus = pw.make_country_names_thesaurus() # create dictionary for power plant objects plants_dictionary = {} # extract powerplant information from file(s) print(u"Reading in plants...") # specify column names used in raw file COLNAMES = [
SAVE_CODE = u"YEM" RAW_FILE_NAME = pw.make_file_path(fileType="raw", subFolder=RAW_SOURCE, filename="Manual Of Power Stations2016.xls") CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="yemen_database.csv") SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin") URL = "http://www.auptde.org/Article_Files/Manual%20Of%20Power%20Stations2016.xls" COLS = {'name': 0, 'capacity': 3, 'year_built': 4, 'fuel_type': 5} START_ROW = 4 END_ROW = 41 TAB_NUMBER = 0 # optional raw file download DOWNLOAD_FILES = pw.download(COUNTRY_NAME, {RAW_FILE_NAME: URL}) # set up fuel type thesaurus fuel_thesaurus = pw.make_fuel_thesaurus() # create dictionary for power plant objects plants_dictionary = {} # load file book = xlrd.open_workbook(RAW_FILE_NAME) sheet = book.sheet_by_index(TAB_NUMBER) capacity_list = [] year_built_list = [] for i in range(START_ROW, END_ROW): # reset variables
COUNTRY_NAME = u"Uruguay" SOURCE_NAME = u"La Administración Nacional de Usinas y Trasmisiones Eléctricas (Uruguay)" SOURCE_URL = u"https://www.ute.com.uy/SgePublico/mapa.aspx" SOURCE_YEAR = 2018 SAVE_CODE = u"URY" RAW_FILE_NAME = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="mapa.aspx.html") CSV_FILE_NAME = pw.make_file_path( fileType="src_csv", filename="database_{0}.csv".format(SAVE_CODE)) SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin") #COORDINATE_FILE = pw.make_file_path(fileType="resource", subFolder=SAVE_CODE, filename="coordinates_{0}.csv".format(SAVE_CODE)) ENCODING = "UTF-8" # download files if requested DOWNLOAD_FILES = pw.download('UTE data', {RAW_FILE_NAME: SOURCE_URL}) # make URY-specific fuel parser def parse_fuel_URY(fuel_string, id_val): """Returns a tuple of primary_fuel, other_fuel_set.""" fuel_synonyms = { "fot": "Solar", "eol": "Wind", #"ter": "Thermal", "bio": "Biomass", "hid": "Hydro" } special_fuel_corrections = {
# other parameters API_BASE = "http://services.ga.gov.au/site_3/services/Electricity_Infrastructure/MapServer/WFSServer" API_CALL = "service=WFS&version=1.1.0&request=GetFeature&typeName=National_Major_Power_Stations" # optional raw file(s) download URL = API_BASE + "?" + API_CALL FILES = { RAW_FILE_NAME: URL, NGER_FILENAME_1617: NGER_URL_1617, NGER_FILENAME_1516: NGER_URL_1516, NGER_FILENAME_1415: NGER_URL_1415, NGER_FILENAME_1314: NGER_URL_1314, NGER_FILENAME_1213: NGER_URL_1213, } DOWNLOAD_FILES = pw.download(COUNTRY_NAME, FILES) # set up fuel type thesaurus fuel_thesaurus = pw.make_fuel_thesaurus() # set up country name thesaurus country_thesaurus = pw.make_country_names_thesaurus() # get permanent IDs for australian plants linking_table = { k['aremi_oid']: k for k in csv.DictReader(open(STATIC_ID_FILENAME)) } # create dictionary for power plant objects plants_dictionary = {}
subFolder=SAVE_CODE, filename="PowerPlantsRenewGE1MW_NorthAmerica_201708.xlsx") SOURCE_URL_2 = u"ftp://ftp.maps.canada.ca/pub/nacei_cnaie/energy_infrastructure/PowerPlantsRenewGE1MW_NorthAmerica_201708.xlsx" FUSION_TABLE_FILE = pw.make_file_path(fileType="raw", subFolder="WRI", filename="Canada.csv") CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="database_CAN.csv") SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin") SOURCE_YEAR = 2017 ENCODING = 'UTF-8' # True if specified --download, otherwise False FILES = {RAW_FILE_NAME_1: SOURCE_URL_1, RAW_FILE_NAME_2: SOURCE_URL_2} DOWNLOAD_FILES = pw.download("NRC data", FILES) # set up fuel type thesaurus fuel_thesaurus = pw.make_fuel_thesaurus() # set up country name thesaurus country_thesaurus = pw.make_country_names_thesaurus() # create dictionary for power plant objects plants_dictionary = {} # extract powerplant information from file(s) print(u"Reading in plants...") # specify column names and tabs used in raw file COLNAMES_1 = [
COLS = { 'owner': 1, 'name': 2, 'fuel': 3, 'grid': 4, 'capacity': 6, 'generation': 7 } TAB = "POT_GEN" START_ROW = 8 gen_start = datetime.date(YEAR_OF_DATA, 1, 1) gen_stop = datetime.date(YEAR_OF_DATA, 12, 31) # optional raw file(s) download downloaded = pw.download(COUNTRY_NAME, {RAW_FILE_NAME: SOURCE_URL}) # set up fuel type thesaurus fuel_thesaurus = pw.make_fuel_thesaurus() # create dictionary for power plant objects plants_dictionary = {} # extract powerplant information from file(s) print(u"Reading in plants...") # read locations locations_dictionary = {} with open(LOCATION_FILE_NAME, 'r') as f: datareader = csv.reader(f) headers = datareader.next()
filename="1814.xlsx") SOURCE_URL_3 = u"http://www.cre.gob.mx/documento/1814.xlsx" CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="database_MEX.csv") SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin") SOURCE_YEAR = 2016 ENCODING = 'UTF-8' # True if specified --download, otherwise False FILES = { RAW_FILE_NAME_1: SOURCE_URL_1, RAW_FILE_NAME_2: SOURCE_URL_2, RAW_FILE_NAME_3: SOURCE_URL_3 } # dictionary of saving directories and corresponding urls DOWNLOAD_FILES = pw.download("NACEI and CRE data", FILES) # set up fuel type thesaurus fuel_thesaurus = pw.make_fuel_thesaurus() # set up country name thesaurus country_thesaurus = pw.make_country_names_thesaurus() # create dictionary for power plant objects plants_dictionary = {} # extract powerplant information from file(s) print(u"Reading in plants...") # specify column names and tabs used in raw file COLNAMES_1 = [
SOURCE_YEAR = 2017 SAVE_CODE = u"BRA" RAW_FILE_NAME = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="BRA_data.html") CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="database_BRA.csv") SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin") COORDINATE_FILE = pw.make_file_path(fileType="resource", subFolder=SAVE_CODE, filename="coordinates_BRA.csv") CAPACITY_CONVERSION_TO_MW = 0.001 # capacity values are given in kW in the raw data ENCODING = "ISO-8859-1" # set locale to Portuguese/Brazil locale.setlocale(locale.LC_ALL,'pt_BR') # download files if requested (large file; slow) DOWNLOAD_URL = u"http://www2.aneel.gov.br/aplicacoes/capacidadebrasil/GeracaoTipoFase.asp" POST_DATA = {'tipo': 0,'fase': 3} DOWNLOAD_FILES = pw.download('ANEEL B.I.G.', {RAW_FILE_NAME: DOWNLOAD_URL}, POST_DATA) # define specialized fuel type interpreter generator_types = {u'CGH':u'Hydro', u'CGU':u'Wave and Tidal', u'EOL':u'Wind', u'PCH':u'Hydro', u'UFV':u'Solar', u'UHE':u'Hydro', u'UTE':u'Thermal', u'UTN':u'Nuclear'} fuel_types = { u'FL':u'Biomass', u'RU':u'Waste', u'RA':u'Waste', u'BL':u'Biomass',