def run(self, location_set, development_event_set, *args, **kwargs): changed_indices, processed_development_event_indices = \ EventsCoordinator.run(self, location_set, development_event_set, *args, **kwargs) if development_event_set is not None: subset = DatasetSubset(development_event_set, processed_development_event_indices) subset.write_dataset(out_storage=AttributeCache()) return (changed_indices, processed_development_event_indices)
def _convert_lccm_input(self, flt_directory_in, flt_directory_out): gc.collect() t1 = time() lc = LandCoverDataset(in_storage=StorageFactory().get_storage( 'flt_storage', storage_location=flt_directory_in), out_storage=StorageFactory().get_storage( 'flt_storage', storage_location=flt_directory_out)) # lc.get_header() # added 23 june 2009 by mm mask = lc.get_mask() idx = where(mask == 0)[0] lcsubset = DatasetSubset(lc, idx) print "Converting:" lcsubset.write_dataset(attributes=["relative_x"], out_table_name="land_covers") lc.delete_one_attribute("relative_x") lcsubset.write_dataset(attributes=["relative_y"], out_table_name="land_covers") lc.delete_one_attribute("relative_y") lc.flush_dataset() gc.collect() # lc_names = lc.get_primary_attribute_names() for attr in lc.get_primary_attribute_names(): print " ", attr lcsubset.write_dataset(attributes=[attr], out_table_name="land_covers") lc.delete_one_attribute(attr) logger.log_status("Data conversion done. " + str(time() - t1) + " s")
def _convert_lccm_input(self, flt_directory_in, flt_directory_out): gc.collect() t1 = time() lc = LandCoverDataset(in_storage = StorageFactory().get_storage('flt_storage', storage_location = flt_directory_in), out_storage = StorageFactory().get_storage('flt_storage', storage_location = flt_directory_out)) # lc.get_header() # added 23 june 2009 by mm mask = lc.get_mask() idx = where(mask==0)[0] lcsubset = DatasetSubset(lc, idx) print "Converting:" lcsubset.write_dataset(attributes=["relative_x"], out_table_name="land_covers") lc.delete_one_attribute("relative_x") lcsubset.write_dataset(attributes=["relative_y"], out_table_name="land_covers") lc.delete_one_attribute("relative_y") lc.flush_dataset() gc.collect() # lc_names = lc.get_primary_attribute_names() for attr in lc.get_primary_attribute_names(): print " ", attr lcsubset.write_dataset(attributes=[attr], out_table_name="land_covers") lc.delete_one_attribute(attr) logger.log_status("Data conversion done. " + str(time()-t1) + " s")
#years = [1995, 1999] #years = [2002] #years = sys.argv[3] years = [2007, 2007] lc1 = LandCoverDataset(in_storage = StorageFactory().get_storage('flt_storage', storage_location = os.path.join(flt_directory_in, str(years[0]))), out_storage = StorageFactory().get_storage('flt_storage', storage_location = os.path.join(flt_directory_out, str(years[0])))) agents_index = where(lc1.get_attribute(index_attribute))[0] lc1subset = DatasetSubset(lc1, agents_index) print "Writing set 1:" for attr in lc1.get_primary_attribute_names(): print " ", attr lc1subset.write_dataset(attributes=[attr], out_table_name="land_covers") lc1.delete_one_attribute(attr) # leaving this line in causes the processing of every other input data file; commenting it causes memory error lc2 = LandCoverDataset(in_storage = StorageFactory().get_storage('flt_storage', storage_location = os.path.join(flt_directory_in, str(years[1]))), out_storage = StorageFactory().get_storage('flt_storage', storage_location = os.path.join(flt_directory_out, str(years[1])))) lc2subset = DatasetSubset(lc2, agents_index) print "Writing set 2:" for attr in lc2.get_primary_attribute_names(): print " ", attr lc2subset.write_dataset(attributes=[attr], out_table_name="land_covers") lc2.delete_one_attribute(attr) # leaving this line in causes the processing of every other input data file ; commenting it causes memory error logger.log_status("Data storage done. " + str(time()-t1) + " s")
# os.mkdir(flt_directory_out) logger.log_status("Convert input data from ", str(input_year)) lc = LandCoverDataset(in_storage=StorageFactory().get_storage( 'flt_storage', storage_location=flt_directory_in), out_storage=StorageFactory().get_storage( 'flt_storage', storage_location=flt_directory_out)) lc.get_header() # added 23 june 2009 by mm mask = lc.get_mask() idx = where(mask == 0)[0] lcsubset = DatasetSubset(lc, idx) print "Converting:" lcsubset.write_dataset(attributes=["relative_x"], out_table_name="land_covers") #lcsubset.write_dataset(attributes=["relative_x"], out_table_name="land_covers", # valuetypes=valuetypes) lc.delete_one_attribute("relative_x") lcsubset.write_dataset(attributes=["relative_y"], out_table_name="land_covers") #lcsubset.write_dataset(attributes=["relative_y"], out_table_name="land_covers", # valuetypes=valuetypes) lc.delete_one_attribute("relative_y") # srcdir = os.path.join(flt_directory_out, "land_covers", "computed") # shutil.move(os.path.join(srcdir,"relative_x.li4"), os.path.join(flt_directory_out, "land_covers")) # shutil.move(os.path.join(srcdir,"relative_y.li4"), os.path.join(flt_directory_out, "land_covers")) # shutil.rmtree(srcdir) for attr in lc.get_primary_attribute_names(): print " ", attr lcsubset.write_dataset(attributes=[attr], out_table_name="land_covers")
years = [2007, 2007] lc1 = LandCoverDataset(in_storage=StorageFactory().get_storage( 'flt_storage', storage_location=os.path.join(flt_directory_in, str(years[0]))), out_storage=StorageFactory().get_storage( 'flt_storage', storage_location=os.path.join( flt_directory_out, str(years[0])))) agents_index = where(lc1.get_attribute(index_attribute))[0] lc1subset = DatasetSubset(lc1, agents_index) print "Writing set 1:" for attr in lc1.get_primary_attribute_names(): print " ", attr lc1subset.write_dataset(attributes=[attr], out_table_name="land_covers") lc1.delete_one_attribute( attr ) # leaving this line in causes the processing of every other input data file; commenting it causes memory error lc2 = LandCoverDataset(in_storage=StorageFactory().get_storage( 'flt_storage', storage_location=os.path.join(flt_directory_in, str(years[1]))), out_storage=StorageFactory().get_storage( 'flt_storage', storage_location=os.path.join( flt_directory_out, str(years[1])))) lc2subset = DatasetSubset(lc2, agents_index) print "Writing set 2:" for attr in lc2.get_primary_attribute_names():
test_flag = options.test_flag # shutil.rmtree(flt_directory_out) # os.mkdir(flt_directory_out) logger.log_status("Convert input data from ", str(input_year)) lc = LandCoverDataset(in_storage = StorageFactory().get_storage('flt_storage', storage_location = flt_directory_in), out_storage = StorageFactory().get_storage('flt_storage', storage_location = flt_directory_out)) lc.get_header() # added 23 june 2009 by mm mask = lc.get_mask() idx = where(mask==0)[0] lcsubset = DatasetSubset(lc, idx) print "Converting:" lcsubset.write_dataset(attributes=["relative_x"], out_table_name="land_covers") #lcsubset.write_dataset(attributes=["relative_x"], out_table_name="land_covers", # valuetypes=valuetypes) lc.delete_one_attribute("relative_x") lcsubset.write_dataset(attributes=["relative_y"], out_table_name="land_covers") #lcsubset.write_dataset(attributes=["relative_y"], out_table_name="land_covers", # valuetypes=valuetypes) lc.delete_one_attribute("relative_y") # srcdir = os.path.join(flt_directory_out, "land_covers", "computed") # shutil.move(os.path.join(srcdir,"relative_x.li4"), os.path.join(flt_directory_out, "land_covers")) # shutil.move(os.path.join(srcdir,"relative_y.li4"), os.path.join(flt_directory_out, "land_covers")) # shutil.rmtree(srcdir) for attr in lc.get_primary_attribute_names(): print " ", attr lcsubset.write_dataset(attributes=[attr], out_table_name="land_covers") # lcsubset.write_dataset(attributes=[attr], out_table_name="land_covers",
'db_output_database':None, 'cache_directory':cache_directory, 'base_year':2000, 'tables_to_cache':[ 'gridcells', # 'households', # 'jobs', ]}) #CacheScenarioDatabase().run(gridcell_config) # step 2 cache water demand data by dbcon = ScenarioDatabase(database_name = "water_demand_seattle2") print "Create Storage object." from opus_core.storage_factory import StorageFactory storage = StorageFactory().get_storage(type="mysql_storage", storage_location=dbcon) from waterdemand.datasets.consumption_dataset import ConsumptionDataset consumption_types = ['wrmr', 'wcsr', 'wrsr'] #'wcmr' for consumption_type in consumption_types: consumption = ConsumptionDataset(in_storage = storage, in_table_name=consumption_type+'_grid') for year in range(1990, 2001): print "%s %s" % (consumption_type, year) year_index = where(consumption.get_attribute("billyear") == year) out_storage = StorageFactory().get_storage(type="flt_storage", storage_location=os.path.join(cache_directory, str(year))) consumption_subset = DatasetSubset(consumption, year_index) consumption_subset.write_dataset(out_storage=out_storage, out_table_name=consumption_type.lower())
] }) #CacheScenarioDatabase().run(gridcell_config) # step 2 cache water demand data by dbcon = ScenarioDatabase(database_name="water_demand_seattle2") print "Create Storage object." from opus_core.storage_factory import StorageFactory storage = StorageFactory().get_storage(type="mysql_storage", storage_location=dbcon) from waterdemand.datasets.consumption_dataset import ConsumptionDataset consumption_types = ['wrmr', 'wcsr', 'wrsr'] #'wcmr' for consumption_type in consumption_types: consumption = ConsumptionDataset(in_storage=storage, in_table_name=consumption_type + '_grid') for year in range(1990, 2001): print "%s %s" % (consumption_type, year) year_index = where(consumption.get_attribute("billyear") == year) out_storage = StorageFactory().get_storage( type="flt_storage", storage_location=os.path.join(cache_directory, str(year))) consumption_subset = DatasetSubset(consumption, year_index) consumption_subset.write_dataset( out_storage=out_storage, out_table_name=consumption_type.lower())