def analyze(fmatch): output_dir = Scenario.get_scen_value('output_dir') run = Processing.get_current_run() write_avg_file = Scenario.get_scen_value('write_avg_file') avg_filename = f'{output_dir}avg.log' write_std_dev_file = Scenario.get_scen_value('write_std_dev_file') std_filename = f'{output_dir}std_dev.log' control_filename = f'{output_dir}control_stats.log' if write_avg_file: if not os.path.isfile(avg_filename): Stats.create_stats_val_file(avg_filename) if write_std_dev_file: if not os.path.isfile(std_filename): Stats.create_stats_val_file(std_filename) if Processing.get_processing_type() != Globals.mode_enum['predict']: if not os.path.isfile(control_filename): Stats.create_control_file(control_filename) # start at i = 1; i = 0 is the initial seed # I think I need to put a dummy stats_val to represent the initial seed Stats.average.append(StatsVal()) for i in range(1, IGrid.igrid.get_num_urban()): year = IGrid.igrid.get_urban_year(i) Stats.calculate_averages(i) Stats.process_grow_log(run, year) if write_avg_file: Stats.write_stats_val_line(avg_filename, run, year, Stats.average[i], i) if write_std_dev_file: Stats.write_stats_val_line(std_filename, run, year, Stats.std_dev[i], i) Stats.do_regressions() Stats.do_aggregate(fmatch) Stats.write_control_stats(control_filename) if Processing.get_processing_type() == Globals.mode_enum['predict']: start = int(Scenario.get_scen_value('prediction_start_date')) stop = Processing.get_stop_year() for year in range(start + 1, stop + 1): Stats.clear_stats() Stats.process_grow_log(run, year) if write_avg_file: Stats.write_stats_val_line(avg_filename, run, year, Stats.average[0], 0) if write_std_dev_file: Stats.write_stats_val_line(std_filename, run, year, Stats.std_dev[0], 0) Stats.clear_stats()
def grow_landuse(land1, num_growth_pix): nrows = IGrid.nrows ncols = IGrid.ncols ticktock = Processing.get_current_year() landuse0_year = IGrid.igrid.get_landuse_year(0) landuse1_year = IGrid.igrid.get_landuse_year(1) urban_code = LandClass.get_urban_code() new_indices = LandClass.get_new_indices() landuse_classes = LandClass.get_landclasses() class_indices = LandClass.get_reduced_classes() background = IGrid.igrid.get_background() slope = IGrid.igrid.get_slope() deltatron = PGrid.get_deltatron() z = PGrid.get_z() land2 = PGrid.get_land2() class_slope = Transition.get_class_slope() ftransition = Transition.get_ftransition() if ticktock >= landuse0_year: # Place the New Urban Simulation into the Land Use Image Utilities.condition_gt_gif(z.gridData, 0, land1.gridData, urban_code) Deltatron.deltatron(new_indices, landuse_classes, class_indices, deltatron, land1, land2, slope, num_growth_pix, class_slope, ftransition) # Switch the old to the new for i in range(len(land2.gridData)): land1.gridData[i] = land2.gridData[i] if Processing.get_processing_type() == Globals.mode_enum['predict'] or \ (Processing.get_processing_type() == Globals.mode_enum['test'] and Processing.get_current_monte() == Processing.get_last_monte()): #Write land1 to file if IGrid.using_gif: filename = f"{Scenario.get_scen_value('output_dir')}{IGrid.igrid.location}_land_n_urban" \ f".{Processing.get_current_year()}.gif" else: filename = f"{Scenario.get_scen_value('output_dir')}{IGrid.igrid.location}_land_n_urban" \ f".{Processing.get_current_year()}.tif" IGrid.echo_meta( f"{Scenario.get_scen_value('output_dir')}{IGrid.igrid.location}_land_n_urban." f"{Processing.get_current_year()}.tfw", "landuse") date = f"{Processing.get_current_year()}" ImageIO.write_gif(land1, Color.get_landuse_table(), filename, date, nrows, ncols) # Compute final match statistics for landuse Utilities.condition_gt_gif(z.gridData, 0, land1.gridData, urban_code)
def update(num_growth_pix): # print(f"Num_growth_pix: {num_growth_pix}") nrows = IGrid.nrows ncols = IGrid.ncols total_pixels = nrows * ncols road_pixel_count = IGrid.get_road_pixel_count( Processing.get_current_year()) excluded_pixel_count = IGrid.get_excld_count() # Compute this year stats Stats.compute_cur_year_stats() # Set num growth pixels Stats.set_num_growth_pixels(num_growth_pix) # Calibrate growth rate Stats.cal_growth_rate() # Calibrate Percent Urban Stats.cal_percent_urban(total_pixels, road_pixel_count, excluded_pixel_count) output_dir = Scenario.get_scen_value('output_dir') cur_run = Processing.get_current_run() cur_year = Processing.get_current_year() if IGrid.test_for_urban_year(Processing.get_current_year()): Stats.cal_leesalee() filename = f"{output_dir}grow_{cur_run}_{cur_year}.log" Stats.save(filename) if Processing.get_processing_type() == Globals.mode_enum['predict']: filename = f"{output_dir}grow_{cur_run}_{cur_year}.log" Stats.save(filename)
def write_z_prob_grid(z, name): # copy background int z_prob_ptr and remap background pixels # which collide with the seed, prob colors, and date nrows = IGrid.nrows ncols = IGrid.ncols total_pix = nrows * ncols background = IGrid.igrid.get_background_grid() prob_color_cnt = len(Scenario.get_scen_value('probability_color')) lower_bounds = [UGMDefines.SEED_COLOR_INDEX, UGMDefines.DATE_COLOR_INDEX] upper_bounds = [UGMDefines.SEED_COLOR_INDEX + prob_color_cnt, UGMDefines.DATE_COLOR_INDEX] indices = [UGMDefines.SEED_COLOR_INDEX + prob_color_cnt + 1, UGMDefines.DATE_COLOR_INDEX - 1] z_prob = Utilities.map_grid_to_index(background, lower_bounds, upper_bounds, indices, total_pix) if Processing.get_processing_type() == Globals.mode_enum['predict']: # Map z_ptr pixels into desired prob indices and save in overlay prob_list = Scenario.get_scen_value('probability_color') lower_bounds = [] upper_bounds = [] indices = [] for i, prob in enumerate(prob_list): lower_bounds.append(prob.lower_bound) upper_bounds.append(prob.upper_bound) indices.append(i + 2) indices[0] = 0 overlay = Utilities.map_grid_to_index(z, lower_bounds, upper_bounds, indices, total_pix) # Overlay overlay grid onto the z_prob grid z_prob = Utilities.overlay(z_prob, overlay) # Overlay urban_seed into the z_prob grid z_prob = Utilities.overlay_seed(z_prob, total_pix) else: # TESTING # Map z grid pixels into desired seed_color_index and save in overlay pt lower_bounds = [1] upper_bounds = [100] indices = [UGMDefines.SEED_COLOR_INDEX] overlay = Utilities.map_grid_to_index(z.gridData, lower_bounds, upper_bounds, indices, total_pix) # Overlay overlay grid onto the z_prob grid z_prob = Utilities.overlay(z_prob, overlay) # The file writer needs to take in a Grid, so we're going to wrap our z_prob list in a grid z_prob_grid = IGrid.wrap_list(z_prob) if IGrid.using_gif: filename = f"{Scenario.get_scen_value('output_dir')}{IGrid.igrid.get_location()}" \ f"{name}{Processing.get_current_year()}.gif" else: filename = f"{Scenario.get_scen_value('output_dir')}{IGrid.igrid.get_location()}" \ f"{name}{Processing.get_current_year()}.tif" IGrid.echo_meta(f"{Scenario.get_scen_value('output_dir')}" f"{IGrid.igrid.get_location()}{name}{Processing.get_current_year()}.tfw", "urban") date = f"{Processing.get_current_year()}" ImageIO.write_gif(z_prob_grid, Color.get_probability_table(), filename, date, IGrid.nrows, IGrid.ncols)
def calculate_stand_dev(idx): temp = StatsVal() total_mc = int(Scenario.get_scen_value('monte_carlo_iterations')) if idx == 0 and Processing.get_processing_type( ) != Globals.mode_enum['predict']: raise ValueError() temp.calculate_sd(total_mc, Stats.record, Stats.average[idx]) Stats.std_dev.append(temp)
def process_grow_log(run, year): output_dir = Scenario.get_scen_value('output_dir') filename = f'{output_dir}grow_{run}_{year}.log' mc_iters = int(Scenario.get_scen_value('monte_carlo_iterations')) mc_count = 0 grow_records = [] # if Processing.get_processing_type() != Globals.mode_enum['predict']: with (open(filename, "rb")) as openfile: while True: try: grow_records.append(_pickle.load(openfile)) except EOFError: break """print(f"****************Year {year}***********************") for record in grow_records: print(record) print("***************************************")""" if len(grow_records) > int( Scenario.get_scen_value('monte_carlo_iterations')): raise AssertionError( "Num Records is larger than Monte Carlo iters") if Processing.get_processing_type() == Globals.mode_enum['predict']: for record in grow_records: Stats.record = record Stats.update_running_total(0) Stats.calculate_averages(0) for record in grow_records: Stats.record = record if mc_count >= mc_iters: Logger.log("mc_count >= scen_GetMonteCarloIterations ()") sys.exit(1) if Processing.get_processing_type( ) != Globals.mode_enum['predict']: index = IGrid.igrid.urban_yr_to_idx(Stats.record.year) Stats.calculate_stand_dev(index) else: Stats.calculate_stand_dev(0) mc_count += 1 os.remove(filename)
def monte_carlo(cumulate, land1): log_it = Scenario.get_scen_value("logging") z = PGrid.get_z() total_pixels = IGrid.get_total_pixels() num_monte_carlo = int( Scenario.get_scen_value("monte_carlo_iterations")) for imc in range(num_monte_carlo): Processing.set_current_monte(imc) '''print("--------Saved-------") print(Coeff.get_saved_diffusion()) print(Coeff.get_saved_spread()) print(Coeff.get_saved_breed()) print(Coeff.get_saved_slope_resistance()) print(Coeff.get_saved_road_gravity()) print("--------------------")''' # Reset the Parameters Coeff.set_current_diffusion(Coeff.get_saved_diffusion()) Coeff.set_current_spread(Coeff.get_saved_spread()) Coeff.set_current_breed(Coeff.get_saved_breed()) Coeff.set_current_slope_resistance( Coeff.get_saved_slope_resistance()) Coeff.set_current_road_gravity(Coeff.get_saved_road_gravity()) if log_it and Scenario.get_scen_value("log_initial_coefficients"): Coeff.log_current() # Run Simulation Stats.init_urbanization_attempts() TimerUtility.start_timer('grw_growth') Grow.grow(z, land1) TimerUtility.stop_timer('grw_growth') if log_it and Scenario.get_scen_value("log_urbanization_attempts"): Stats.log_urbanization_attempts() # Update Cumulate Grid for i in range(total_pixels): if z.gridData[i] > 0: cumulate.gridData[i] += 1 # Update Annual Land Class Probabilities if Processing.get_processing_type( ) == Globals.mode_enum["predict"]: LandClass.update_annual_prob(land1.gridData, total_pixels) # Normalize Cumulative Urban Image for i in range(total_pixels): cumulate.gridData[i] = (100 * cumulate.gridData[i]) / num_monte_carlo
def grow_non_landuse(z): num_monte = int(Scenario.get_scen_value('monte_carlo_iterations')) cumulate_monte_carlo = Grid() filename = f"{Scenario.get_scen_value('output_dir')}cumulate_monte_carlo.year_{Processing.get_current_year()}" if Processing.get_processing_type() != Globals.mode_enum['calibrate']: if Processing.get_current_monte() == 0: # Zero out accumulation grid cumulate_monte_carlo.init_grid_data(IGrid.total_pixels) else: Input.read_file_to_grid(filename, cumulate_monte_carlo) # Accumulate Z over monte carlos for i in range(IGrid.total_pixels): if z[i] > 0: cumulate_monte_carlo.gridData[i] += 1 if Processing.get_current_monte() == num_monte - 1: if Processing.get_processing_type( ) == Globals.mode_enum['test']: Utilities.condition_gt_gif(z, 0, cumulate_monte_carlo.gridData, 100) else: # Normalize Accumulated grid for i in range(IGrid.total_pixels): cumulate_monte_carlo.gridData[ i] = 100 * cumulate_monte_carlo.gridData[ i] / num_monte Utilities.write_z_prob_grid(cumulate_monte_carlo, "_urban_") if Processing.get_current_monte() != 0: os.remove(filename) else: # Dump accumulated grid to disk Output.write_grid_to_file(filename, cumulate_monte_carlo)
def landuse_init(deltatron, land1): total_pixels = IGrid.nrows * IGrid.ncols # Initialize Deltatron Grid to Zero for pixel in deltatron: pixel = 0 if Processing.get_processing_type() == Globals.mode_enum['predict']: landuse = IGrid.igrid.get_landuse_igrid(1) for i in range(total_pixels): land1[i] = landuse[i] else: landuse = IGrid.igrid.get_landuse_igrid(0) for i in range(total_pixels): land1[i] = landuse[i]
def save(filename): Stats.record.run = Processing.get_current_run() Stats.record.monte_carlo = Processing.get_current_monte() Stats.record.year = Processing.get_current_year() index = 0 if Processing.get_processing_type() != Globals.mode_enum['predict']: index = IGrid.igrid.urban_yr_to_idx(Stats.record.year) Stats.update_running_total(index) # Now we are writing the record to file for now... if Stats.record.monte_carlo == 0: # Create file with open(filename, 'wb') as output: # Overwrites any existing file. _pickle.dump(Stats.record, output, -1) else: with open(filename, 'ab') as output: _pickle.dump(Stats.record, output, -1)
def grow(z, land1): deltatron = PGrid.get_deltatron() avg_slope = 0 if Processing.get_processing_type() == Globals.mode_enum['predict']: Processing.set_current_year( Scenario.get_scen_value('prediction_start_date')) else: Processing.set_current_year(IGrid.igrid.get_urban_year(0)) Utilities.init_grid(z.gridData) # print(z.gridData) if len(Scenario.get_scen_value('landuse_data_file')) > 0: Grow.landuse_init(deltatron.gridData, land1.gridData) seed = IGrid.igrid.get_urban_grid(0) Utilities.condition_gif(seed, z.gridData) if Scenario.get_scen_value('echo'): print("******************************************") if Processing.get_processing_type( ) == Globals.mode_enum['calibrate']: c_run = Processing.get_current_run() t_run = Processing.get_total_runs() print(f"Run = {c_run} of {t_run}" f" ({100 * c_run / t_run:8.1f} percent complete)") print( f"Monte Carlo = {int(Processing.get_current_monte()) + 1} of " f"{Scenario.get_scen_value('monte_carlo_iterations')}") print(f"Processing.current_year = {Processing.get_current_year()}") print(f"Processing.stop_year = {Processing.get_stop_year()}") if Scenario.get_scen_value('logging') and int( Scenario.get_scen_value('log_processing_status')) > 0: Grow.completion_status() while Processing.get_current_year() < Processing.get_stop_year(): # Increment Current Year Processing.increment_current_year() cur_yr = Processing.get_current_year() if Scenario.get_scen_value('echo'): print(f" {cur_yr}", end='') sys.stdout.flush() if (cur_yr + 1) % 10 == 0 or cur_yr == Processing.get_stop_year(): print() if Scenario.get_scen_value('logging'): Logger.log(f" {cur_yr}") if (cur_yr + 1) % 10 == 0 or cur_yr == Processing.get_stop_year(): Logger.log("") # Apply the Cellular Automaton Rules for this Year avg_slope, num_growth_pix, sng, sdc, og, rt, pop = Spread.spread( z, avg_slope) #print(f"rt: {rt}") sdg = 0 # this isn't passed into spread, but I don't know why then it's here Stats.set_sng(sng) Stats.set_sdg(sdc) #Stats.set_sdc(sdc) Stats.set_og(og) Stats.set_rt(rt) Stats.set_pop(pop) if Scenario.get_scen_value('view_growth_types'): if IGrid.using_gif: filename = f"{Scenario.get_scen_value('output_dir')}z_growth_types" \ f"_{Processing.get_current_run()}_{Processing.get_current_monte()}_" \ f"{Processing.get_current_year()}.gif" else: filename = f"{Scenario.get_scen_value('output_dir')}z_growth_types" \ f"_{Processing.get_current_run()}_{Processing.get_current_monte()}_" \ f"{Processing.get_current_year()}.tif" date = str(Processing.get_current_year()) ImageIO.write_gif(z, Color.get_growth_table(), filename, date, IGrid.nrows, IGrid.ncols) if len(Scenario.get_scen_value('landuse_data_file')) > 0: Grow.grow_landuse(land1, num_growth_pix) else: Grow.grow_non_landuse(z.gridData) seed = IGrid.igrid.get_urban_grid(0) Utilities.condition_gif(seed, z.gridData) # do Statistics Stats.update(num_growth_pix) # do Self Modification Coeff.self_modify(Stats.get_growth_rate(), Stats.get_percent_urban()) Coeff.write_current_coeff(Processing.get_current_run(), Processing.get_current_monte(), Processing.get_current_year())
def cal_leesalee(): z = PGrid.get_z() urban = IGrid.igrid.get_urban_grid_by_yr(Processing.get_current_year()) Stats.record.this_year.leesalee = 1.0 if Processing.get_processing_type() != Globals.mode_enum['predict']: Stats.compute_leesalee(z.gridData, urban)
def driver(): TimerUtility.start_timer('drv_driver') name = "_cumcolor_urban_" output_dir = Scenario.get_scen_value("output_dir") landuse_flag = len(Scenario.get_scen_value("landuse_data_file")) > 0 nrows = IGrid.nrows ncols = IGrid.ncols total_pixels = IGrid.get_total_pixels() z_cumulate = PGrid.get_cumulate() sim_landuse = PGrid.get_land1() # Create Annual Landuse Probability File if Processing.get_processing_type() == Globals.mode_enum["predict"]: if landuse_flag: LandClass.init_annual_prob(total_pixels) # Monte Carlo Simulation Driver.monte_carlo(z_cumulate, sim_landuse) if Processing.get_processing_type() == Globals.mode_enum["predict"]: # Output Urban Images if IGrid.using_gif: filename = f"{output_dir}cumulate_urban.gif" else: filename = f"{output_dir}cumulate_urban.tif" IGrid.echo_meta(f"{output_dir}cumulate_urban.tfw", "urban") colortable = Color.get_grayscale_table() ImageIO.write_gif(z_cumulate, colortable, filename, "", nrows, ncols) Utilities.write_z_prob_grid(z_cumulate.gridData, name) if landuse_flag: cum_prob, cum_uncert = LandClass.build_prob_image(total_pixels) #print(cum_prob) # Output Cumulative Prob Image if IGrid.using_gif: filename = f"{output_dir}cumcolor_landuse.gif" else: filename = f"{output_dir}cumcolor_landuse.tif" IGrid.echo_meta(f"{output_dir}cumcolor_landuse.tfw", "landuse") cum_prob_grid = IGrid.wrap_list(cum_prob) ImageIO.write_gif(cum_prob_grid, Color.get_landuse_table(), filename, "", nrows, ncols) # Output Cumulative Uncertainty Image if IGrid.using_gif: filename = f"{output_dir}uncertainty.landuse.gif" else: filename = f"{output_dir}uncertainty.landuse.tif" IGrid.echo_meta(f"{output_dir}uncertainty.landuse.tfw", "landuse") cum_uncert_grid = IGrid.wrap_list(cum_uncert) ImageIO.write_gif(cum_uncert_grid, Color.get_grayscale_table(), filename, "", nrows, ncols) if not landuse_flag or Processing.get_processing_type( ) == Globals.mode_enum['predict']: fmatch = 0.0 else: landuse1 = IGrid.igrid.get_landuse_igrid(1) fmatch = Driver.fmatch(sim_landuse, landuse1, landuse_flag, total_pixels) Stats.analyze(fmatch) TimerUtility.stop_timer('drv_driver')
def main(): TimerUtility.start_timer('total_time') valid_modes = ["predict", "restart", "test", "calibrate"] Globals.mype = 0 Globals.npes = 1 packing = False restart_run = 0 # Parse command line if len(sys.argv) != 3: __print_usage(sys.argv[0]) sys.exit(1) if len(sys.argv) != 3 or sys.argv[1] not in valid_modes: __print_usage(sys.argv[0]) sys.exit(1) Processing.set_processing_type(Globals.mode_enum[sys.argv[1]]) if Processing.get_processing_type() == Globals.mode_enum['restart']: Processing.set_restart_flag(True) Scenario.init(sys.argv[2], Processing.get_restart_flag()) try: log_it = Scenario.get_scen_value("logging") random_seed = Scenario.get_scen_value("random_seed") Random.set_seed(random_seed) landuse_class_info = Scenario.get_scen_value("landuse_class_info") LandClass.num_landclasses = len(landuse_class_info) # filling in the class array in Land_Class for i, landuse_class in enumerate(landuse_class_info): # num, class_id, name, idx, hexColor landuse_class_meta = LanduseMeta(landuse_class.grayscale, landuse_class.type, landuse_class.name, i, landuse_class.color[2:]) LandClass.landuse_classes.append(landuse_class_meta) # Set up Coefficients if sys.argv[1] == 'restart': if log_it: print("Implement log here") diffusion, breed, spread, slope_resistance, road_gravity, random_seed, restart_run = \ Input.read_restart_file(Scenario.get_scen_value("output_dir")) Processing.set_current_run(restart_run) else: Processing.set_current_run(0) Coeff.set_start_coeff( Scenario.get_scen_value("calibration_diffusion_start"), Scenario.get_scen_value("calibration_spread_start"), Scenario.get_scen_value("calibration_breed_start"), Scenario.get_scen_value("calibration_slope_start"), Scenario.get_scen_value("calibration_road_start")) Coeff.set_stop_coeff( Scenario.get_scen_value("calibration_diffusion_stop"), Scenario.get_scen_value("calibration_spread_stop"), Scenario.get_scen_value("calibration_breed_stop"), Scenario.get_scen_value("calibration_slope_stop"), Scenario.get_scen_value("calibration_road_stop")) Coeff.set_step_coeff( Scenario.get_scen_value("calibration_diffusion_step"), Scenario.get_scen_value("calibration_spread_step"), Scenario.get_scen_value("calibration_breed_step"), Scenario.get_scen_value("calibration_slope_step"), Scenario.get_scen_value("calibration_road_step")) Coeff.set_best_fit_coeff( Scenario.get_scen_value("prediction_diffusion_best_fit"), Scenario.get_scen_value("prediction_spread_best_fit"), Scenario.get_scen_value("prediction_breed_best_fit"), Scenario.get_scen_value("prediction_slope_best_fit"), Scenario.get_scen_value("prediction_road_best_fit")) # Initial IGrid IGrid.init(packing, Processing.get_processing_type()) ''' Skipped memory and logging stuff for now, don't know if I'll need it If there is a problem, I can go back and implement ''' # Initialize Landuse if len(Scenario.get_scen_value("landuse_data_file")) > 0: LandClass.init() if Scenario.get_scen_value("log_landclass_summary"): if log_it: # this is where we would log Logger.log("Test log") # Initialize Colortables Color.init(IGrid.ncols) # Read and validate input IGrid.read_input_files(packing, Scenario.get_scen_value("echo_image_files"), Scenario.get_scen_value("output_dir")) IGrid.validate_grids(log_it) # Normalize Roads IGrid.normalize_roads() landuse_flag = len(Scenario.get_scen_value("landuse_data_file")) != 0 IGrid.verify_inputs(log_it, landuse_flag) # Initialize PGRID Grids PGrid.init(IGrid.get_total_pixels()) if log_it and Scenario.get_scen_value("log_colortables"): Color.log_colors() # Count the Number of Runs Processing.set_total_runs() Processing.set_last_monte( int(Scenario.get_scen_value("monte_carlo_iterations")) - 1) if log_it: if Processing.get_processing_type( ) == Globals.mode_enum["calibrate"]: Logger.log( f"Total Number of Runs = {Processing.get_total_runs()}") # Compute Transition Matrix if len(Scenario.get_scen_value("landuse_data_file")) > 0: Transition.create_matrix() if log_it and Scenario.get_scen_value("log_transition_matrix"): Transition.log_transition() # Compute the Base Statistics against which the calibration will take place Stats.set_base_stats() if log_it and Scenario.get_scen_value("log_base_statistics"): Stats.log_base_stats() if log_it and Scenario.get_scen_value("log_debug"): IGrid.debug("main.py") Processing.set_num_runs_exec_this_cpu(0) if Processing.get_current_run() == 0 and Globals.mype == 0: output_dir = Scenario.get_scen_value("output_dir") if Processing.get_processing_type( ) != Globals.mode_enum["predict"]: filename = f"{output_dir}control_stats.log" Stats.create_control_file(filename) if Scenario.get_scen_value("write_std_dev_file"): filename = f"{output_dir}std_dev.log" Stats.create_stats_val_file(filename) if Scenario.get_scen_value("write_avg_file"): filename = f"{output_dir}avg.log" Stats.create_stats_val_file(filename) if Scenario.get_scen_value("write_coeff_file"): output_dir = Scenario.get_scen_value("output_dir") filename = f"{output_dir}coeff.log" Coeff.create_coeff_file(filename, True) if Processing.get_processing_type() == Globals.mode_enum["predict"]: # Prediction Runs Processing.set_stop_year( Scenario.get_scen_value("prediction_stop_date")) Coeff.set_current_coeff(Coeff.get_best_diffusion(), Coeff.get_best_spread(), Coeff.get_best_breed(), Coeff.get_best_slope_resistance(), Coeff.get_best_road_gravity()) if Globals.mype == 0: Driver.driver() Processing.increment_num_runs_exec_this_cpu() # Timing stuff if log_it and int(Scenario.get_scen_value('log_timings')) > 1: TimerUtility.log_timers() else: # Calibration and Test Runs Processing.set_stop_year( IGrid.igrid.get_urban_year(IGrid.igrid.get_num_urban() - 1)) output_dir = Scenario.get_scen_value('output_dir') d_start, d_step, d_stop = Coeff.get_start_step_stop_diffusion() for diffusion_coeff in range(d_start, d_stop + 1, d_step): b_start, b_step, b_stop = Coeff.get_start_step_stop_breed() for breed_coeff in range(b_start, b_stop + 1, b_step): s_start, s_step, s_stop = Coeff.get_start_step_stop_spread( ) for spread_coeff in range(s_start, s_stop + 1, s_step): sr_start, sr_step, sr_stop = Coeff.get_start_step_stop_slope_resistance( ) for slope_resist_coeff in range( sr_start, sr_stop + 1, sr_step): rg_start, rg_step, rg_stop = Coeff.get_start_step_stop_road_gravity( ) for road_grav_coeff in range( rg_start, rg_stop + 1, rg_step): filename = f"{output_dir}{UGMDefines.RESTART_FILE}{Globals.mype}" Output.write_restart_data( filename, diffusion_coeff, breed_coeff, spread_coeff, slope_resist_coeff, road_grav_coeff, Scenario.get_scen_value('random_seed'), restart_run) restart_run += 1 Coeff.set_current_coeff( diffusion_coeff, spread_coeff, breed_coeff, slope_resist_coeff, road_grav_coeff) Driver.driver() Processing.increment_num_runs_exec_this_cpu() # Timing Logs if log_it and int( Scenario.get_scen_value( 'log_timings')) > 1: TimerUtility.log_timers() Processing.increment_current_run() if Processing.get_processing_type( ) == Globals.mode_enum['test']: TimerUtility.stop_timer('total_time') if log_it and int( Scenario.get_scen_value( 'log_timings')) > 0: TimerUtility.log_timers() Logger.close() sys.exit(0) # Stop timer TimerUtility.stop_timer('total_time') if log_it and int(Scenario.get_scen_value('log_timings')) > 0: TimerUtility.log_timers() # Close Logger Logger.close() except KeyError as err: traceback.print_exc() print("{0} is not set. Please set it in your scenario file".format( str(err).upper())) Logger.log("Something went wrong") Logger.close() sys.exit(1) except FileNotFoundError as err: traceback.print_exc() print(err) Logger.log("Something went wrong") Logger.close() sys.exit(1) except Exception: traceback.print_exc() Logger.log("Something went wrong") Logger.close() sys.exit(1)