Exemplo n.º 1
0
RAW_FILE_NAME = pw.make_file_path(fileType="raw",
                                  subFolder=SAVE_CODE,
                                  filename="Full_CARMA_2009_Dataset.json")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="carma_database.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
YEAR_UPDATED = 2009

# optional raw file(s) download
# note: API documentation says to specify "limit=0" to download entire dataset.
# this doesn't work; it only downloads 2000 plants.
# full dataset has about 51k plants, so specify 60k for limit to be safe.
#URL = "http://carma.org/javascript/ajax-lister.php?type=plant&sort=carbon_present%20DESC&page=1&time=present&m1=world&m2=plant&m3=&m4=&export=make"
URL = "http://carma.org/api/1.1/searchPlants?raw=1&limit=20000"
FILES = {RAW_FILE_NAME: URL}
DOWNLOAD_FILES = pw.download(SOURCE_NAME, FILES)

# set up country name thesaurus
country_thesaurus = pw.make_country_names_thesaurus()

# create dictionary for power plant objects
plants_dictionary = {}

# extract powerplant information from file(s)
print(u"Reading in plants...")

with open(RAW_FILE_NAME, 'r') as f:
    raw_plant_list = json.loads(f.read())

for plant in raw_plant_list:
Exemplo n.º 2
0
COLS = {
    'owner': 1,
    'name': 2,
    'fuel': 3,
    'grid': 4,
    'capacity': 6,
    'generation': 7
}
TAB = "POT_GEN"
START_ROW = 8

gen_start = datetime.date(YEAR_OF_DATA, 1, 1)
gen_stop = datetime.date(YEAR_OF_DATA, 12, 31)

# optional raw file(s) download
downloaded = pw.download(COUNTRY_NAME, {RAW_FILE_NAME: SOURCE_URL})

# set up fuel type thesaurus
fuel_thesaurus = pw.make_fuel_thesaurus()

# create dictionary for power plant objects
plants_dictionary = {}

# extract powerplant information from file(s)
print(u"Reading in plants...")

# read locations
locations_dictionary = {}
with open(LOCATION_FILE_NAME, 'r') as f:
    datareader = csv.reader(f)
    headers = datareader.next()
Exemplo n.º 3
0
SOURCE_YEAR = 2017
SAVE_CODE = u"BRA"
RAW_FILE_NAME = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="BRA_data.html")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="database_BRA.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
COORDINATE_FILE = pw.make_file_path(fileType="resource", subFolder=SAVE_CODE, filename="coordinates_BRA.csv")
CAPACITY_CONVERSION_TO_MW = 0.001       # capacity values are given in kW in the raw data
ENCODING = "ISO-8859-1"

# set locale to Portuguese/Brazil
locale.setlocale(locale.LC_ALL,'pt_BR')

# download files if requested (large file; slow)
DOWNLOAD_URL = u"http://www2.aneel.gov.br/aplicacoes/capacidadebrasil/GeracaoTipoFase.asp"
POST_DATA = {'tipo':0,'fase':3}
DOWNLOAD_FILES = pw.download('ANEEL B.I.G.',{RAW_FILE_NAME:DOWNLOAD_URL},POST_DATA)

# define specialized fuel type interpreter
generator_types = {  u'CGH':u'Hydro',
                u'CGU':u'Wave and Tidal',
                u'EOL':u'Wind',
                u'PCH':u'Hydro',
                u'UFV':u'Solar',
                u'UHE':u'Hydro',
                u'UTE':u'Thermal',
                u'UTN':u'Nuclear'}

fuel_types = { u'FL':u'Biomass',
                u'RU':u'Waste',
                u'RA':u'Waste',
                u'BL':u'Biomass',
Exemplo n.º 4
0
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
LOCATION_FILE = pw.make_file_path(fileType="resource",
                                  subFolder=SAVE_CODE,
                                  filename="plant_locations_IND.csv")
TAB_NAME = u"Data"
DATA_YEAR = 2015

# optional raw file(s) download
# True if specified --download, otherwise False
FILES = {
    RAW_FILE_NAME_CEA:
    "http://www.cea.nic.in/reports/others/thermal/tpece/cdm_co2/database_11.zip",
    RAW_FILE_NAME_REC:
    "https://www.recregistryindia.nic.in/index.php/general/publics/accredited_regens"
}  # dictionary of saving directories and corresponding urls
DOWNLOAD_FILES = pw.download(u'CEA and RECS', FILES)

# set up fuel type thesaurus
fuel_thesaurus = pw.make_fuel_thesaurus()

# create dictionary for power plant objects
plants_dictionary = {}

# extract powerplant information from file(s)
print(u"Reading in plants...")

# specify column names used in raw file
COLNAMES = [
    u"S_NO", u"NAME", u"UNIT_NO", u"DT_ COMM", u"CAPACITY MW AS ON 31/03/2015",
    u"TYPE", u"FUEL 1", u"FUEL 2", u"2014-15\n\nNet \nGeneration \nGWh"
]
Exemplo n.º 5
0
CSV_FILE_NAME = pw.make_file_path(fileType = "src_csv", filename = "finland_database.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType = "src_bin")
DATASET_URL = "http://www.energiavirasto.fi/documents/10191/0/Energiaviraston+Voimalaitosrekisteri+010117.xlsx/fa2c9bd2-e436-4dbb-a2e5-074ecbf11d23"

COLS = {"name":0, "owner":1, "capacity_max":17, "capacity_avg":18,
        "gen_type":7, "fuel_type":[20, 21, 22]}
TAB_NAME = "English"

pattern = r" -?\D?\d+.?" # space, optional hyphen or letter followed by 0-3 digits followed by any characters except newline
def regex_match(name_1, name_2):
    if re.sub(pattern, "", name_1) == re.sub(pattern, "", name_2):
        return re.sub(pattern, "''", name_1)
    return False

# optional raw file(s) download
DOWNLOAD_FILES = pw.download(COUNTRY_NAME, {RAW_FILE_NAME:DATASET_URL})

# set up fuel type thesaurus
fuel_thesaurus = pw.make_fuel_thesaurus()

# create dictionary for power plant objects
units_dictionary = {}
plants_dictionary = {}

# Parse url to read the data year
time_updated = re.search("([0-9]{6}\.xlsx)", DATASET_URL).group(0)
year_updated = "20" + time_updated[-7:-5]   # 4-digit year

# Open the workbook
wb = xlrd.open_workbook(RAW_FILE_NAME)
ws = wb.sheet_by_name(TAB_NAME)
Exemplo n.º 6
0
    },
    7: {
        "name": "UFV",
        "fuel": "Solar"
    }
}

URL_BASE = u"http://sigel.aneel.gov.br/arcgis/services/SIGEL/ExportKMZ/MapServer/KmlServer?Composite=false&LayerIDs=ID_HERE&BBOX=-75.0,-34.0,-30.0,6.0"

# optional raw file(s) download
FILES = {}
for fuel_code, dataset in DATASETS.iteritems():
    RAW_FILE_NAME_this = RAW_FILE_NAME.replace("FILENAME", dataset["name"])
    URL = URL_BASE.replace("ID_HERE", str(fuel_code))
    FILES[RAW_FILE_NAME_this] = URL
DOWNLOAD_FILES = pw.download(COUNTRY_NAME, FILES)

# utility function to compare coordinates
coordinate_tolerance = 1e-2


def is_close(a, b, rel_tol=coordinate_tolerance):
    return abs(a - b) <= rel_tol * max(a, b)


# create dictionary for power plant objects
plants_dictionary = {}

# extract powerplant information from file(s)
print(u"Reading in plants...")
inconsistent_coordinates = 0
Exemplo n.º 7
0
    "Company Name", "Station Name", "Fuel", "Installed Capacity (MW)",
    "Year of commission or year generation began"
]
TAB_NUMBER_DUKES = 2

# set up projection transformation
# thanks to John A. Stevenson: http://all-geo.org/volcan01010/2012/11/change-coordinates-with-pyproj/
wgs84 = pyproj.Proj(
    "+init=EPSG:4326"
)  # LatLon with WGS84 datum used by GPS units and Google Earth
osgb36 = pyproj.Proj("+init=EPSG:27700")  # UK Ordnance Survey, 1936 datum

# raw files download
# True if specified --download, otherwise False
FILES = {RAW_FILE_NAME_REPD: URL_REPD, RAW_FILE_NAME_DUKES: URL_DUKES}
DOWNLOAD_FILES = pw.download(
    u"UK Renewable Energy Planning Database and DUKES", FILES)

# set up fuel type thesaurus
fuel_thesaurus = pw.make_fuel_thesaurus()

# set up country name thesaurus
country_thesaurus = pw.make_country_names_thesaurus()

# create dictionary for power plant objects
plants_dictionary = {}

# load GEO and CARMA for matching coordinates
geo_database = pw.load_database(GEO_DATABASE_FILE)
print("Loaded {0} plants from GEO database.".format(len(geo_database)))
carma_database = pw.load_database(CARMA_DATABASE_FILE)
print("Loaded {0} plants from CARMA database.".format(len(carma_database)))
Exemplo n.º 8
0
CSV_FILE_NAME = pw.make_file_path(
    fileType="src_csv", filename="database_{0}.csv".format(SAVE_CODE))
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
#LOCATION_FILE_NAME = pw.make_file_path(fileType="resource",subFolder=SAVE_CODE,filename="locations_{0}.csv".format(SAVE_CODE))

# other parameters as needed

# specific functions here
# def xyz():

# optional raw file(s) download
# True if specified --download, otherwise False
FILES = {
    RAW_FILE_NAME: URL
}  # dictionary of saving directories and corresponding urls
DOWNLOAD_FILES = pw.download(NAME_OF_DATABASE, FILES)

# set up fuel type thesaurus
fuel_thesaurus = pw.make_fuel_thesaurus()

# set up country name thesaurus
country_thesaurus = pw.make_country_names_thesaurus()

# create dictionary for power plant objects
plants_dictionary = {}

# extract powerplant information from file(s)
print(u"Reading in plants...")

# example for reading CSV file
# specify column names used in raw file
Exemplo n.º 9
0
                                    filename="1814.xlsx")
SOURCE_URL_3 = u"http://www.cre.gob.mx/documento/1814.xlsx"

CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_MEX.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
SOURCE_YEAR = 2016
ENCODING = 'UTF-8'

# True if specified --download, otherwise False
FILES = {
    RAW_FILE_NAME_1: SOURCE_URL_1,
    RAW_FILE_NAME_2: SOURCE_URL_2,
    RAW_FILE_NAME_3: SOURCE_URL_3
}  # dictionary of saving directories and corresponding urls
DOWNLOAD_FILES = pw.download("NACEI and CRE data", FILES)

# set up fuel type thesaurus
fuel_thesaurus = pw.make_fuel_thesaurus()

# set up country name thesaurus
country_thesaurus = pw.make_country_names_thesaurus()

# create dictionary for power plant objects
plants_dictionary = {}

# extract powerplant information from file(s)
print(u"Reading in plants...")

# specify column names and tabs used in raw file
COLNAMES_1 = [
Exemplo n.º 10
0
    if fuel_name in ["Coal", "Gas", "Oil"]:
        fuel_name = "Thermal"

    if idval in plant_locations[fuel_name].keys():
        return plant_locations[fuel_name][idval][0:2]
    else:
        return pw.NO_DATA_NUMERIC, pw.NO_DATA_NUMERIC


# download if specified
FILES = {}
for dataset in DATASETS:
    RAW_FILE_NAME_this = RAW_FILE_NAME.replace("FILENAME", dataset["filename"])
    URL = URL_BASE.replace("NUMBER", dataset["number"])
    FILES[RAW_FILE_NAME_this] = URL
DOWNLOAD_FILES = pw.download("Chile power plant data", FILES)

# set up fuel type thesaurus
fuel_thesaurus = pw.make_fuel_thesaurus()

# set up country name thesaurus
country_thesaurus = pw.make_country_names_thesaurus()

# create dictionary for power plant objects
plants_dictionary = {}

# extract powerplant information from file(s)
print(u"Reading in plants...")

# read static location file [fuel,id,name,latitude,longitude]
plant_locations = {