# True if specified --download, otherwise False
FILES = {RAW_FILE_NAME_REPD: URL_REPD, RAW_FILE_NAME_DUKES: URL_DUKES}
DOWNLOAD_FILES = pw.download(
    u"UK Renewable Energy Planning Database and DUKES", FILES)

# set up fuel type thesaurus
fuel_thesaurus = pw.make_fuel_thesaurus()

# set up country name thesaurus
country_thesaurus = pw.make_country_names_thesaurus()

# create dictionary for power plant objects
plants_dictionary = {}

# load GEO and CARMA for matching coordinates
geo_database = pw.load_database(GEO_DATABASE_FILE)
print("Loaded {0} plants from GEO database.".format(len(geo_database)))
carma_database = pw.load_database(CARMA_DATABASE_FILE)
print("Loaded {0} plants from CARMA database.".format(len(carma_database)))

# read in plant matches file
with open(PLANT_MATCHES, "rbU") as f:
    f.readline()  # skip headers
    csvreader = csv.reader(f)
    plant_matches = {}
    for row in csvreader:
        dukes_name = str(row[0])
        geo_id = pw.make_id(SAVE_CODE_GEO, int(row[1])) if row[1] else ""
        carma_id = pw.make_id(SAVE_CODE_CARMA, int(row[2])) if row[2] else ""
        repd_id = int(row[3]) if row[3] else ""
        plant_matches[dukes_name] = {
for row_id in xrange(6, ws923_2013.nrows):
	rv = ws923_2013.row_values(row_id)
	idnr = pw.make_id(SAVE_CODE, int(rv[COLS_923_2_2013['idnr']]))
	if idnr in plants_dictionary:
		if not pw.annual_generation(plants_dictionary[idnr].generation, 2013):
			generation = pw.PlantGenerationObject.create(0.0, 2013, source=SOURCE_NAME)
			plants_dictionary[idnr].generation.append(generation)
		plants_dictionary[idnr].generation[-1].gwh += float(rv[COLS_923_2_2013['generation']]) * GENERATION_CONVERSION_TO_GWH
	else:
		print("Can't find plant with ID: {0}".format(idnr))

print("...Added plant generations.")

# read in subsidiary states (Puerto Rico, Guam)
print("Adding additional plants from WRI-collected table...")
wri_collected_data = pw.load_database(WRI_DATABASE)
for country in SUBSIDIARY_COUNTRIES:
	these_plants = {k:v for k,v in wri_collected_data.iteritems() if v.country == country}
	for k,v in these_plants.iteritems():
		v.country = COUNTRY_NAME
	plants_dictionary.update(these_plants)
print("...finished.")

# report on plants read from file
print(u"Loaded {0} plants to database.".format(len(plants_dictionary)))

# write database to csv format
pw.write_csv_file(plants_dictionary, CSV_FILE_NAME)

# pickle database
pw.save_database(plants_dictionary, SAVE_CODE, SAVE_DIRECTORY)
예제 #3
0
# make plant condcordance dictionary
plant_concordance = pw.make_plant_concordance()
print("Loaded concordance file with {0} entries.".format(
    len(plant_concordance)))
carma_id_used = []  # Record matched carma_ids

# STEP 0: Read in source databases.
# Identify countries with automated data from .automated flag.
print("Loading source databases...")
country_databases = {}
for country_name, country in country_dictionary.iteritems():
    if country.automated == 1:
        country_code = country.iso_code
        database_filename = COUNTRY_DATABASE_FILE.replace(
            "COUNTRY", country_code)
        country_databases[country_name] = pw.load_database(database_filename)
        print("Loaded {0} plants from {1} database.".format(
            len(country_databases[country_name]), country_name))

# Load multi-country databases.
wri_database = pw.load_database(WRI_DATABASE_FILE)
print("Loaded {0} plants from WRI database.".format(len(wri_database)))
geo_database = pw.load_database(GEO_DATABASE_FILE)
print("Loaded {0} plants from GEO database.".format(len(geo_database)))
carma_database = pw.load_database(CARMA_DATABASE_FILE)
print("Loaded {0} plants from CARMA database.".format(len(carma_database)))

# Track counts using a dict with keys corresponding to each data source
db_sources = country_databases.keys()
db_sources.extend([
    "WRI", "GEO", "WRI with GEO lat/long data", "WRI with CARMA lat/long data"
예제 #4
0
    idnr = pw.make_id(SAVE_CODE, int(rv[COLS_923_2['idnr']]))
    if idnr in plants_dictionary.keys():
        if not plants_dictionary[idnr].generation[0]:
            generation = pw.PlantGenerationObject.create(0.0,
                                                         YEAR,
                                                         source=SOURCE_URL)
            plants_dictionary[idnr].generation[0] = generation
        plants_dictionary[idnr].generation[0].gwh += float(
            rv[COLS_923_2['generation']]) * GENERATION_CONVERSION_TO_GWH
    else:
        print("Can't find plant with ID: {0}".format(idnr))
print("...Added plant generations.")

# read in subsidiary states (Puerto Rico, Guam)
print("Adding additional plants from Fusion Table data...")
fusion_table_data = pw.load_database(WRI_DATABASE)
for country in SUBSIDIARY_COUNTRIES:
    these_plants = {
        k: v
        for k, v in fusion_table_data.iteritems() if v.country == country
    }
    for k, v in these_plants.iteritems():
        v.country = COUNTRY_NAME
    plants_dictionary.update(these_plants)
print("...finished.")

# report on plants read from file
print(u"Loaded {0} plants to database.".format(len(plants_dictionary)))

# write database to csv format
pw.write_csv_file(plants_dictionary, CSV_FILE_NAME)
예제 #5
0
        print(u'Commissioning year of {0} is {1}'.format(
            plant.name, commissioning_year))

    if plant.capacity:
        capacity_ratio_check = total_capacity / plant.capacity
        if capacity_ratio_check < 0.999 or capacity_ratio_check > 1.001:

            print(
                u'-Error: Plant {0} total capacity ({1}) does not match unit capacity sum ({2}).'
                .format(plant.name, total_capacity, plant.capacity))

# now add plants from WRI manually-collected table (non-conventional/not included in CEA data)

# read in additional data from WRI-collected data file
print("Adding additional plants from WRI manually gathered data...")
wri_database = pw.load_database(WRI_DATABASE)
plants_dictionary.update(
    {k: v
     for k, v in wri_database.iteritems() if v.country == 'India'})
print("...finished.")

# load and process RECS file - NOT IMPLEMENTED
#tree = LH.parse(RAW_FILE_NAME_CEA)
#print([td.text_content() for td in tree.xpath('//td')])

#ns = {"kml":"http://www.opengis.net/kml/2.2"}   # namespace
#parser = etree.XMLParser(ns_clean=True, recover=True, encoding="utf-8")
#tree = etree.parse(RAW_FILE_NAME_REC, parser)
#rows = iter(table)
#for row in rows:
#    print row
est = model_data['model']
params = model_data['params']
num_folds = model_data['num_folds']
fuel_types = model_data['fuel_types']
print("Loaded trained generation estimation model from {0}.".format(
    args.model_filename))
for k, v in params.iteritems():
    print(" - {0}: {1}".format(k, v))
print(" - num_folds: {0}".format(num_folds))
print("Fuel types: {0}".format(fuel_types))

# load powerplant database
if args.powerplant_database.endswith('.csv'):
    plants = pw.read_csv_file_to_dict(args.powerplant_database)
else:
    plants = pw.load_database(args.powerplant_database)

print("Loaded {0} plants from file {1}.".format(len(plants),
                                                args.powerplant_database))

# set up arrays
fuel_type_list = []
capacity_by_country_by_fuel = {
}  # will hold total capacity by country and fuel
feature_name_list = [
    'fuel_type', 'capacity_mw', 'commissioning_year', 'fuel_avg_cf',
    'cap_sh_country', 'cap_sh_country_fuel'
]

# read data from plant database
count_full_data = 0