Exemplo n.º 1
0
Note: The AUE data is in .xls format. We need to use xlrd to read this.
Note: xlrd decodes values to unicode as it reads them.
"""

import xlrd
import sys, os

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = 'Yemen'
RAW_SOURCE = "AUE"
SAVE_CODE = u"YEM"
RAW_FILE_NAME = pw.make_file_path(fileType="raw",
                                  subFolder=RAW_SOURCE,
                                  filename="Manual Of Power Stations2016.xls")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="yemen_database.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
URL = "http://www.auptde.org/Article_Files/Manual%20Of%20Power%20Stations2016.xls"

COLS = {'name': 0, 'capacity': 3, 'year_built': 4, 'fuel_type': 5}
START_ROW = 4
END_ROW = 41
TAB_NUMBER = 0

# optional raw file download
DOWNLOAD_FILES = pw.download(COUNTRY_NAME, {RAW_FILE_NAME: URL})

# set up fuel type thesaurus
Exemplo n.º 2
0
Additional information: CARAM data api: http://carma.org/api/
"""

import csv
import sys, os

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"GLOBAL"
SOURCE_NAME = u"CARMA"
SOURCE_URL = "http://carma.org/"
SAVE_CODE = u"CARMA"
RAW_FILE_NAME = pw.make_file_path(
    fileType="raw",
    subFolder=SAVE_CODE,
    filename="Full_CARMA_2009_Dataset_Power_Watch.csv")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_CARMA.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
YEAR_UPDATED = 2009
COLNAMES = ['plant.id', 'plant', 'lat', 'lon', 'iso3']

# optional raw file(s) download
# note: API documentation says to specify "limit=0" to download entire dataset.
# this doesn't work; it only downloads 2000 plants.
# full dataset has about 51k plants, so specify 60k for limit to be safe.
#URL = "http://carma.org/javascript/ajax-lister.php?type=plant&sort=carbon_present%20DESC&page=1&time=present&m1=world&m2=plant&m3=&m4=&export=make"
#URL = "http://carma.org/api/1.1/searchPlants?raw=1&limit=20000"
#FILES = {RAW_FILE_NAME: URL}
#DOWNLOAD_FILES = pw.download(SOURCE_NAME, FILES)
Exemplo n.º 3
0
- Use matches/concordances as specified in powerplants_database.py

TO-DOS:
- Alias list for power plants
"""

import csv
import time
import argparse
import sys, os

sys.path.insert(0, os.pardir)
import powerplant_database as pw

### PARAMETERS ###
COUNTRY_DATABASE_FILE = pw.make_file_path(fileType="src_bin", filename="COUNTRY-Database.bin")
WRI_DATABASE_FILE = pw.make_file_path(fileType="src_bin", filename="WRI-Database.bin")
GEO_DATABASE_FILE = pw.make_file_path(fileType="src_bin", filename="GEODB-Database.bin")
CARMA_DATABASE_FILE = pw.make_file_path(fileType="src_bin", filename="CARMA-Database.bin")
DATABASE_CSV_SAVEFILE = pw.make_file_path(fileType="output", filename="global_power_plant_database.csv")
DATABASE_BUILD_LOG_FILE = pw.make_file_path(fileType="output", filename="database_build_log.txt")
DATABASE_CSV_DUMPFILE = pw.make_file_path(fileType="output", filename="global_power_plant_database_data_dump.csv")
MINIMUM_CAPACITY_MW = 1

parser = argparse.ArgumentParser()
parser.add_argument("--dump", help="dump all the data", action="store_true")
DATA_DUMP = True if parser.parse_args().dump else False

# open log file
f_log = open(DATABASE_BUILD_LOG_FILE, 'a')
f_log.write('Starting Global Power Plant Database build run at {0}.\n'.format(time.ctime()))
- We should develop an approach to handle very large raw files outside of normal downloads.
"""

import csv
import sys, os

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"GLOBAL"
SOURCE_NAME = u"European Pollutant Release and Transfer Register"
SOURCE_URL = "http://prtr.ec.europa.eu/"
SAVE_CODE = u"EPRTR"
RAW_FILE_NAME = pw.make_file_path(fileType="raw",
                                  subFolder=SAVE_CODE,
                                  filename="EPRTR-plants.csv")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_EPRTR.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
YEAR_UPDATED = 2014
DATA_ENCODING = "Windows-1252"

# optional raw file(s) download
URL = "http://prtr.ec.europa.eu/"
FILES = {RAW_FILE_NAME: URL}
DOWNLOAD_FILES = pw.download(SOURCE_NAME, FILES)

# set up fuel type thesaurus
fuel_thesaurus = pw.make_fuel_thesaurus()
import csv
import sys, os

# import [other packages]

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"NAME OF COUNTRY OR GLOBAL"
SOURCE_NAME = u"NAME OF ORGANIZATION OR DATABASE[, OTHER ORGANIZATION OR DATABASE]"
SOURCE_URL = u"PRIMARY URL"
SAVE_CODE = u"3-LETTER SAVE CODE HERE (ISO CODE FOR COUNTRIES)"
YEAR_POSTED = 2017  # Year of data posted on line
RAW_FILE_NAME = pw.make_file_path(fileType="raw",
                                  subFolder=SAVE_CODE,
                                  filename="RAW FILE HERE")
CSV_FILE_NAME = pw.make_file_path(
    fileType="src_csv", filename="database_{0}.csv".format(SAVE_CODE))
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
#LOCATION_FILE_NAME = pw.make_file_path(fileType="resource", subFolder=SAVE_CODE, filename="locations_{0}.csv".format(SAVE_CODE))

# other parameters as needed

# specific functions here
# def xyz():
#     pass

# optional raw file(s) download
# True if specified --download, otherwise False
FILES = {
Global Power Plant Database
assemble_coordinates.py
Use plant matches from master concordance table to extract coordinates from GEO and CARMA.
Output table of all plants matched to GEO or CARMA and their coordinates from those sources.
"""

import argparse
import csv
import sys, os

sys.path.insert(0, os.path.join(os.pardir, os.pardir))
import powerplant_database as pw

# params
OUTPUT_FILE = "assembled_coordinates.csv"
GEO_DATABASE_FILE = pw.make_file_path(fileType="src_bin",
                                      filename="GEODB-Database.bin")
CARMA_DATABASE_FILE = pw.make_file_path(fileType="src_bin",
                                        filename="CARMA-Database.bin")

# make concordance dictionary
plant_concordance = pw.make_plant_concordance()
print("Loaded concordance file with {0} entries.".format(
    len(plant_concordance)))

# create dictionary for power plants and coordinate information
plants_dictionary = {}

# load GEO and CARMA for matching coordinates
geo_database = pw.load_database(GEO_DATABASE_FILE)
print("Loaded {0} plants from GEO database.".format(len(geo_database)))
carma_database = pw.load_database(CARMA_DATABASE_FILE)
Exemplo n.º 7
0
TO-DOS:
- Alias list for power plants
- Primary fuel type designation
- Link to Platts/WEPP ID
"""

import csv
import time
import argparse
import sys, os

sys.path.insert(0, os.pardir)
import powerplant_database as pw

### PARAMETERS ###
COUNTRY_DATABASE_FILE = pw.make_file_path(fileType="src_bin",
                                          filename="COUNTRY-Database.bin")
WRI_DATABASE_FILE = pw.make_file_path(fileType="src_bin",
                                      filename="WRI-Database.bin")
GEO_DATABASE_FILE = pw.make_file_path(fileType="src_bin",
                                      filename="GEODB-Database.bin")
CARMA_DATABASE_FILE = pw.make_file_path(fileType="src_bin",
                                        filename="CARMA-Database.bin")
DATABASE_CSV_SAVEFILE = pw.make_file_path(
    fileType="output", filename="global_power_plant_database.csv")
DATABASE_BUILD_LOG_FILE = pw.make_file_path(fileType="output",
                                            filename="database_build_log.txt")
DATABASE_CSV_DUMPFILE = pw.make_file_path(
    fileType="output", filename="global_power_plant_database_data_dump.csv")
MINIMUM_CAPACITY_MW = 1

parser = argparse.ArgumentParser()
Data Source: World Resources Institute (manually assembled from multiple sources).
Additional information: https://github.com/wri/global-power-plant-database
"""

import argparse
import csv
import sys, os

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"GLOBAL"
SOURCE_NAME = u"WRI"
SAVE_CODE = u"WRI"
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_WRI.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
RAW_FILE_DIRECTORY = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE)
OVERLAP_FILE = "Power_Plant_ID_overlaps.csv"

# set up fuel type thesaurus
fuel_thesaurus = pw.make_fuel_thesaurus()

# set up country name thesaurus
country_thesaurus = pw.make_country_names_thesaurus()

# create dictionary for power plant objects
plants_dictionary = {}

# extract powerplant information from file(s)
print(u"Reading in plants...")
import datetime
import xlrd
import sys
import os
import json

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"Argentina"
SOURCE_NAME = u"Ministerio de Energía y Minería"
SOURCE_URL = u"http://energia3.mecon.gov.ar/contenidos/archivos/Reorganizacion/informacion_del_mercado/publicaciones/mercado_electrico/estadisticosectorelectrico/2015/A1.POT_GEN_COMB_POR_CENTRAL_2015.xlsx"
SAVE_CODE = u"ARG"
RAW_FILE_NAME = pw.make_file_path(
    fileType="raw",
    subFolder=SAVE_CODE,
    filename="A1.POT_GEN_COMB_POR_CENTRAL_2015.xlsx")
PLANT_AUX_FILE = pw.make_file_path(fileType="resource",
                                   subFolder=SAVE_CODE,
                                   filename="ARG_plants.csv")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_ARG.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
YEAR_OF_DATA = 2015
CAPACITY_CONVERSION_TO_MW = 0.001  # capacity values are given in kW in the raw data
GENERATION_CONVERSION_TO_GWH = 0.001  # generation values are given in MWh in the raw data

# other parameters
COLS = {
    'owner': 1,
    'name': 2,
from time import sleep
import sqlite3

import requests
from bs4 import BeautifulSoup as soup
from selenium import webdriver

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = 'China'
SOURCE_NAME = 'Industry About'
SAVE_CODE = 'IDABT'

CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="database_IDABT.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")

DATABASE_FILE = pw.make_file_path('raw', SAVE_CODE, 'Industry_About_China_Fossil_Fuel_Energy.sqlite')
JSON_URL = 'https://www.industryabout.com/index.php?option=com_contentmap&view=smartloader&type=json&filename=articlesmarkers&source=articles&owner=module&id=127&Itemid=181'
SLEEP_TIME = 2

# Field names that have typos/inconsistencies
FIELD_ALIAS = {
	u'4Type': 'Type',
	u'Activity Since': 'Activity since',
	u'Coodinates': 'Coordinates',
	u'Coordiantes': 'Coordinates',
	u'Kind of Coal': 'Kind of Fuel',
	u'Other names': 'Other name',
	u'TType': 'Type',
Exemplo n.º 11
0
import csv
import sys, os
import xlrd

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"Mexico"
SOURCE_NAME = u"North American Cooperation on Energy Information and Comisión Reguladora de Energía"
SOURCE_NAME_CRE = u"Comisión Reguladora de Energía"
SAVE_CODE = u"MEX"

RAW_FILE_NAME_1 = pw.make_file_path(
    fileType="raw",
    subFolder=SAVE_CODE,
    filename="PowerPlantsAllGE100MW_NorthAmerica_201606.xlsx")
SOURCE_URL_1 = u"http://base.energia.gob.mx/nacei/Archivos/3%20P%C3%A1gina%20Datos%20de%20Infraestructura/Ingl%C3%A9s/PowerPlantsAllGE100MW_NorthAmerica_201606.xlsx"
RAW_FILE_NAME_2 = pw.make_file_path(
    fileType="raw",
    subFolder=SAVE_CODE,
    filename="PowerPlantsRenewGE1MW_NorthAmerica_201606.xlsx")
SOURCE_URL_2 = u"http://base.energia.gob.mx/nacei/Archivos/3%20P%C3%A1gina%20Datos%20de%20Infraestructura/Ingl%C3%A9s/PowerPlantsRenewGE1MW_NorthAmerica_201606.xlsx"
RAW_FILE_NAME_3 = pw.make_file_path(fileType="raw",
                                    subFolder=SAVE_CODE,
                                    filename="1814.xlsx")
SOURCE_URL_3 = u"http://www.cre.gob.mx/documento/1814.xlsx"

CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_MEX.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
"""

import xlrd
import sys, os
import re

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"Finland"
SOURCE_NAME = u"Energiavirasto"
SOURCE_URL = u"http://www.energiavirasto.fi/en/voimalaitosrekisteri"
SAVE_CODE = u"FIN"
RAW_FILE_NAME = pw.make_file_path(fileType="raw",
                                  subFolder=SAVE_CODE,
                                  filename="finland_power_plants.xlsx")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="finland_database.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
DATASET_URL = "http://www.energiavirasto.fi/documents/10191/0/Energiaviraston+Voimalaitosrekisteri+010117.xlsx/fa2c9bd2-e436-4dbb-a2e5-074ecbf11d23"

COLS = {
    "name": 0,
    "owner": 1,
    "capacity_max": 17,
    "capacity_avg": 18,
    "gen_type": 7,
    "fuel_type": [20, 21, 22]
}
TAB_NAME = "English"
from lxml import etree, html
from dateutil.parser import parse as parse_date
import csv
import locale
import sys, os

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"Brazil"
SOURCE_NAME = u"Agência Nacional de Energia Elétrica (Brazil)"
SOURCE_URL = u"http://www2.aneel.gov.br/aplicacoes/capacidadebrasil/capacidadebrasil.cfm"
SOURCE_YEAR = 2017
SAVE_CODE = u"BRA"
RAW_FILE_NAME = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="BRA_data.html")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="database_BRA.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
COORDINATE_FILE = pw.make_file_path(fileType="resource", subFolder=SAVE_CODE, filename="coordinates_BRA.csv")
CAPACITY_CONVERSION_TO_MW = 0.001       # capacity values are given in kW in the raw data
ENCODING = "ISO-8859-1"

# set locale to Portuguese/Brazil
locale.setlocale(locale.LC_ALL,'pt_BR')

# download files if requested (large file; slow)
DOWNLOAD_URL = u"http://www2.aneel.gov.br/aplicacoes/capacidadebrasil/GeracaoTipoFase.asp"
POST_DATA = {'tipo': 0,'fase': 3}
DOWNLOAD_FILES = pw.download('ANEEL B.I.G.', {RAW_FILE_NAME: DOWNLOAD_URL}, POST_DATA)

# define specialized fuel type interpreter
Exemplo n.º 14
0
from zipfile import ZipFile
from lxml import etree, html
import sys, os

sys.path.insert(0, os.pardir)
sys.path.insert(0, os.path.join(os.pardir, os.pardir, os.pardir))
import powerplant_database as pw

# params
COUNTRY_NAME = u"Brazil"
SOURCE_NAME = u"Agência Nacional de Energia Elétrica (Brazil)"
SOURCE_URL = u"http://sigel.aneel.gov.br/kmz.html"
SAVE_CODE = u"BRA"
RAW_FILE_NAME = pw.make_file_path(fileType="raw",
                                  subFolder=SAVE_CODE,
                                  filename="FILENAME.zip")
CSV_FILE_NAME = "coordinates_BRA.csv"
ENCODING = "UTF-8"

# other parameters
DATASETS = {
    0: {
        "name": "UHE",
        "fuel": "Hydro"
    },
    1: {
        "name": "PCH",
        "fuel": "Hydro"
    },
    2: {
Exemplo n.º 15
0
"""
Global Power Plant Database
fix_coordinates_VNM.py
Convert coordinates in VNM data from minutes/seconds to decimal.
"""

import requests
import sys, os

sys.path.insert(0, os.pardir)
sys.path.insert(0, os.path.join(os.pardir, os.pardir))
import powerplant_database as pw

# params
API_KEY_FILE = pw.make_file_path(fileType="resource",
                                 subFolder="api_keys",
                                 filename="fusion_tables_api_key.txt")
TABLE_ID = "10WHpc9fcqZzV0kxoKdsKjdvLY21MPMmcQ6dLjYUT"
URL = "https://www.googleapis.com/fusiontables/v2/query"

# retrieve table
with open(API_KEY_FILE, 'r') as f:
    API_KEY = f.readline()
payload = {}
payload['alt'] = 'csv'
payload['sql'] = "SELECT Latitude FROM " + TABLE_ID
payload['key'] = API_KEY
response = requests.post(URL, payload)
print("Current table data:")
print(response.text)
Exemplo n.º 16
0
import datetime
import xlrd
import sys
import os
import json

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"Argentina"
SOURCE_NAME = u"Ministerio de Energía y Minería"
SOURCE_URL = u"http://energia3.mecon.gov.ar/contenidos/archivos/Reorganizacion/informacion_del_mercado/publicaciones/mercado_electrico/estadisticosectorelectrico/2015/A1.POT_GEN_COMB_POR_CENTRAL_2015.xlsx"
SAVE_CODE = u"ARG"
RAW_FILE_NAME = pw.make_file_path(
    fileType="raw",
    subFolder=SAVE_CODE,
    filename="A1.POT_GEN_COMB_POR_CENTRAL_2015.xlsx")
LOCATION_FILE_NAME = pw.make_file_path(fileType="resource",
                                       subFolder=SAVE_CODE,
                                       filename="locations_ARG.csv")
COMMISSIONING_YEAR_FILE_NAME = pw.make_file_path(
    fileType="resource",
    subFolder=SAVE_CODE,
    filename="commissioning_years_ARG.csv")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_ARG.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
YEAR_OF_DATA = 2015
CAPACITY_CONVERSION_TO_MW = 0.001  # capacity values are given in kW in the raw data
GENERATION_CONVERSION_TO_GWH = 0.001  # generation values are given in MWh in the raw data
Additional information: https://github.com/wri/global-power-plant-database
Issues: Requires an API key to retrieve data from Fusion Tables.
"""

import argparse
import csv
import sys, os

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"GLOBAL"
SOURCE_NAME = u"WRI"
SAVE_CODE = u"WRI"
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="database_WRI.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
RAW_FILE_DIRECTORY = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE)
API_KEY_FILE = pw.make_file_path(fileType="resource", subFolder="api_keys", filename="fusion_tables_api_key.txt")
OVERLAP_FILE = "Power_Plant_ID_overlaps.csv"

# other parameters as needed
URL_BASE = "https://www.googleapis.com/fusiontables/v2/query?alt=csv&sql=SELECT * FROM "

# set up country dictionary (need this for fusion table keys)
country_dictionary = pw.make_country_dictionary()

if '--download' in sys.argv:
    # get API key
    with open(API_KEY_FILE, 'r') as f:
        API_KEY = f.readline().rstrip()
Exemplo n.º 18
0
"""

import csv
import sys, os
import xlrd

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"Canada"
SOURCE_NAME_1 = u"Natural Resources Canada"
SAVE_CODE = u"CAN"

RAW_FILE_NAME_1 = pw.make_file_path(
    fileType="raw",
    subFolder=SAVE_CODE,
    filename="PowerPlantsAllGE100MW_NorthAmerica_201708.xlsx")
SOURCE_URL_1 = u"http://ftp.maps.canada.ca/pub/nacei_cnaie/energy_infrastructure/PowerPlantsAllGE100MW_NorthAmerica_201708.xlsx"
RAW_FILE_NAME_2 = pw.make_file_path(
    fileType="raw",
    subFolder=SAVE_CODE,
    filename="PowerPlantsRenewGE1MW_NorthAmerica_201708.xlsx")
SOURCE_URL_2 = u"ftp://ftp.maps.canada.ca/pub/nacei_cnaie/energy_infrastructure/PowerPlantsRenewGE1MW_NorthAmerica_201708.xlsx"
FUSION_TABLE_FILE = pw.make_file_path(fileType="raw",
                                      subFolder="WRI",
                                      filename="Canada.csv")

CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_CAN.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
SOURCE_YEAR = 2017
Exemplo n.º 19
0
import sys
import os
from zipfile import ZipFile
import lxml.html as LH
import xlrd

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"India"
SOURCE_NAME = u"Central Electricity Authority"
SOURCE_URL = u"http://www.cea.nic.in/"
SOURCE_URL2 = u"https://www.recregistryindia.nic.in/"
SAVE_CODE = u"IND"
RAW_FILE_NAME_CEA = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="database_12.zip")
RAW_FILE_NAME_CEA_UZ = pw.make_file_path(fileType="raw", filename=SAVE_CODE)
RAW_FILE_NAME_REC = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="accredited_rec_generators.html")
WRI_DATABASE = pw.make_file_path(fileType="src_bin", filename=u"WRI-Database.bin")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="database_IND.csv")
PLANT_LOCATIONS_FILE = pw.make_file_path(fileType="resource", subFolder="IND", filename="CEA_plants.csv")

SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
LOCATION_FILE = pw.make_file_path(fileType="resource", subFolder=SAVE_CODE, filename="plant_locations_IND.csv")
TAB_NAME = u"Data"
DATA_YEAR = 2016        # capacity data from CEA is as of 2016

# optional raw file(s) download
# True if specified --download, otherwise False
FILES = {RAW_FILE_NAME_CEA: "http://www.cea.nic.in/reports/others/thermal/tpece/cdm_co2/database_12.zip",
            RAW_FILE_NAME_REC: "https://www.recregistryindia.nic.in/index.php/general/publics/accredited_regens"} # dictionary of saving directories and corresponding urls
import argparse
import csv
import sys, os
import xlrd
from lxml import etree

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"Global"
SOURCE_NAME = u"Clean Development Mechanism"
SOURCE_URL = u"https://cdm.unfccc.int"
SAVE_CODE = u"CDMDB"
RAW_FILE_NAME1 = pw.make_file_path(fileType="raw",
                                   subFolder=SAVE_CODE,
                                   filename="Database for PAs and PoAs.xlsx")
RAW_FILE_NAME2 = pw.make_file_path(fileType="raw",
                                   subFolder=SAVE_CODE,
                                   filename="projectsLocationAll.xml")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_CDMDB.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
URL1 = "https://cdm.unfccc.int/Statistics/Public/files/Database%20for%20PAs%20and%20PoAs.xlsx"
URL2 = "https://cdm.unfccc.int/Projects/MapApp/projectsLocationAll.xml"

# project file specfications
TAB_NAME = "CDM activities"
COLNAMES = [
    "CDM project reference number",
    "Unique project identifier (traceable with Google)",
import xlrd
import sys, os
import csv

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"United States of America"
SAVE_CODE  = u"USA"
SOURCE_NAME = u"U.S. Energy Information Administration"
SOURCE_URL = u"http://www.eia.gov/electricity/data/browser/"
YEAR = 2017  # year of reported capacity value

# 860 - use 2017
RAW_FILE_NAME_860_2 = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="2___Plant_Y2017.xlsx")
RAW_FILE_NAME_860_3 = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="3_1_Generator_Y2017.xlsx")

# 923 - use 2013-2017
RAW_FILE_NAME_923_2_2017 = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="EIA923_Schedules_2_3_4_5_M_12_2017_Final_Revision.xlsx")
RAW_FILE_NAME_923_2_2016 = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="EIA923_Schedules_2_3_4_5_M_12_2016_Final_Revision.xlsx")
RAW_FILE_NAME_923_2_2015 = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="EIA923_Schedules_2_3_4_5_M_12_2015_Final_Revision.xlsx")
RAW_FILE_NAME_923_2_2014 = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="EIA923_Schedules_2_3_4_5_M_12_2014_Final_Revision.xlsx")
RAW_FILE_NAME_923_2_2013 = pw.make_file_path(fileType="raw", subFolder=SAVE_CODE, filename="EIA923_Schedules_2_3_4_5_2013_Final_Revision.xlsx")

WRI_DATABASE = pw.make_file_path(fileType="src_bin", filename=u"WRI-Database.bin")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv", filename="database_USA.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
GENERATION_CONVERSION_TO_GWH = 0.001	# generation values are given in MWh in the raw data
SUBSIDIARY_COUNTRIES = ["Puerto Rico", "Guam"]
Exemplo n.º 22
0
import sys, os
import csv

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"Australia"
SAVE_CODE = u"AUS"
SOURCE_NAME = u"Australian Renewable Energy Mapping Infrastructure"
SOURCE_URL = u"http://services.ga.gov.au/site_3/rest/services/Electricity_Infrastructure/MapServer"
GENERATION_SOURCE = u"Australia Clean Energy Regulator"

NGER_URL_1617 = u"http://www.cleanenergyregulator.gov.au/DocumentAssets/Documents/Greenhouse%20and%20energy%20information%20for%20designated%20generation%20facilities%202016-17.csv"
NGER_FILENAME_1617 = pw.make_file_path(fileType="raw",
                                       subFolder=SAVE_CODE,
                                       filename="NGER_2016-2017.csv")
NGER_URL_1516 = u"http://www.cleanenergyregulator.gov.au/DocumentAssets/Documents/Greenhouse%20and%20energy%20information%20for%20designated%20generation%20facilities%202015-16.csv"
NGER_FILENAME_1516 = pw.make_file_path(fileType="raw",
                                       subFolder=SAVE_CODE,
                                       filename="NGER_2015-2016.csv")

NGER_URL_1415 = u"http://www.cleanenergyregulator.gov.au/DocumentAssets/Documents/2014-15%20Greenhouse%20and%20energy%20information%20for%20designated%20generation%20facilities.csv"
NGER_FILENAME_1415 = pw.make_file_path(fileType="raw",
                                       subFolder=SAVE_CODE,
                                       filename="NGER_2014-2015.csv")

NGER_URL_1314 = u"http://www.cleanenergyregulator.gov.au/DocumentAssets/Documents/2013-14%20Greenhouse%20and%20energy%20information%20for%20designated%20generation%20facilities.csv"
NGER_FILENAME_1314 = pw.make_file_path(fileType="raw",
                                       subFolder=SAVE_CODE,
                                       filename="NGER_2013-2014.csv")
"""

import csv
import sys, os

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"Chile"
SOURCE_NAME = u"Energía Abierta"
SOURCE_URL = u"http://energiaabierta.cl/electricidad/"
SAVE_CODE = u"CHL"
YEAR_POSTED = 2016  # Year of data posted on line
RAW_FILE_NAME = pw.make_file_path(fileType='raw',
                                  subFolder=SAVE_CODE,
                                  filename="FILENAME")
CSV_FILE_NAME = pw.make_file_path(
    fileType="src_csv", filename="database_{0}.csv".format(SAVE_CODE))
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
LOCATION_FILE_NAME = pw.make_file_path(
    fileType="resource",
    subFolder=SAVE_CODE,
    filename="locations_{0}.csv".format(SAVE_CODE))

# other parameters
URL_BASE = "http://datos.energiaabierta.cl/rest/datastreams/NUMBER/data.csv"

DATASETS = [{
    "number": "215392",
    "fuel": "Thermal",
Exemplo n.º 24
0
or which one to use.
"""

import sqlite3
import sys
import os

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"GLOBAL"
SOURCE_NAME = u"GEO"
SAVE_CODE = u"GEODB"  # don't use 'GEO' to avoid conflict with country of Georgia
RAW_FILE_NAME = pw.make_file_path(fileType="raw",
                                  subFolder=SAVE_CODE,
                                  filename="geo-database.db")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_GEODB.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
SOURCE_URL = "http://globalenergyobservatory.org"
URL_BASE = "https://morph.io/coroa/global_energy_observatory_power_plants"
URL_END = "/data.sqlite?key=RopNCJ6LtIx9%2Bdp1r%2BQV"
YEAR = 2017

# optional raw file(s) download
URL = URL_BASE + URL_END
FILES = {RAW_FILE_NAME: URL}
DOWNLOAD_FILES = pw.download(SOURCE_NAME, FILES)

# possible values for operational status meaning "not operational"
import xlrd
import pyproj as pyproj

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"United Kingdom"
SOURCE_NAME = u"Department for Business Energy & Industrial Strategy"
SOURCE_NAME_REPD = u"UK Renewable Energy Planning Database"
SOURCE_URL = u"https://www.gov.uk/government/collections/digest-of-uk-energy-statistics-dukes;https://www.gov.uk/government/collections/renewable-energy-planning-data"
SAVE_CODE_GBR = u"GBR"
SAVE_CODE_GEO = u"GEODB"
SAVE_CODE_CARMA = u"CARMA"
RAW_FILE_NAME_REPD = pw.make_file_path(
    fileType="raw",
    subFolder=SAVE_CODE_GBR,
    filename="Public_Database_-_Jan_2018.csv")
RAW_FILE_NAME_DUKES = pw.make_file_path(fileType="raw",
                                        subFolder=SAVE_CODE_GBR,
                                        filename="DUKES_5.11.xls")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_GBR.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")
GEO_DATABASE_FILE = pw.make_file_path(fileType="src_bin",
                                      filename="GEODB-Database.bin")
CARMA_DATABASE_FILE = pw.make_file_path(fileType="src_bin",
                                        filename="CARMA-Database.bin")
REPD_YEAR = 2016
DUKES_YEAR = 2016

# other params
Exemplo n.º 26
0
to the Global Power Plant Database format.
"""

import xml.etree.ElementTree as ET
import sys, os

sys.path.insert(0, os.pardir)
import powerplant_database as pw

# params
COUNTRY_NAME = u"Australia"
SOURCE_NAME = u"Australian Renewable Energy Mapping Infrastructure"
SOURCE_URL = u"http://services.ga.gov.au/site_3/rest/services/Electricity_Infrastructure/MapServer"
SAVE_CODE = u"AUS"
RAW_FILE_NAME = pw.make_file_path(fileType="raw",
                                  subFolder=SAVE_CODE,
                                  filename="australia_power_plants.xml")
CSV_FILE_NAME = pw.make_file_path(fileType="src_csv",
                                  filename="database_AUS.csv")
SAVE_DIRECTORY = pw.make_file_path(fileType="src_bin")

# other parameters
API_BASE = "http://services.ga.gov.au/site_3/services/Electricity_Infrastructure/MapServer/WFSServer"
API_CALL = "service=WFS&version=1.1.0&request=GetFeature&typeName=National_Major_Power_Stations"

# optional raw file(s) download
URL = API_BASE + "?" + API_CALL
FILES = {RAW_FILE_NAME: URL}
DOWNLOAD_FILES = pw.download(COUNTRY_NAME, FILES)

# set up fuel type thesaurus