def main(): ts = time.time() output_path = "../runs/phase_2/step" + str(step) + "/" + datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M') lookup_tables = read_lookup_table(lookup_table_file) s_res = "" c_res = "" for crop in crops: crop_id = "" if (crop == "maize"): crop_id = "SM" elif (crop == "winter_wheat"): crop_id = "WW" for res in resolutions: lookup_table = lookup_tables[res] #for k,v in lookup_table.iteritems(): # print k, v soil_file = "" if (step == 2) : s_res = "s" + str(res) c_res = "c1" soil_file = soil_files_directory + soil_files[res] elif (step == 3): s_res = "s1" c_res = "c" + str(res) soil_file = soil_files_directory + soil_files["1"] for simulation_type in simulation_types: print rank, "Simulation type: ", simulation_type, "\tResolution: ", res output_prefix, input_path, ini_file = getSimulationConfig(simulation_type, crop) splitted_meta_info_list = None if (rank == 0): # only one processor reads in the meta information meta_info_list = readMetaInfo(res, step, lookup_table) #for meta in meta_info_list: # print meta.start_date, meta.end_date, meta.project_id, meta.climate_file, meta.row_id, meta.col_id # split the meta info list into "number of processors" sublists splitted_meta_info_list = mpi_helper.splitListForNodes(meta_info_list, size) ################################################### # parallel part ################################################## # send each sublist of the splitted list to on processor node_specific_meta_info_list = comm.scatter(splitted_meta_info_list, root=0) # each processor received a specific number of meta_info_objects # that he has to process print rank, "Received meta info list with", len(node_specific_meta_info_list), "elements" start_date = str(start_year) + "-01-01" end_date = str(end_year) + "-01-31" monica_simulation_config = monica.MacsurScalingConfiguration() monica_simulation_config.setInputPath(input_path) monica_simulation_config.setIniFile(ini_file) monica_simulation_config.setCropName(crop) node_simulation_results = [] for index, meta in enumerate(node_specific_meta_info_list): #if (index >= 5): # continue col_row_list = lookup_table[str([str(meta.col_id),str(meta.row_id)])] lookup_col = int(col_row_list[0]) lookup_row = int(col_row_list[1]) lookup_project_id = "Res" + str(res) + "_C" + str(lookup_col) + ":R" + str(lookup_row) print rank, "###################################" print rank, str(index+1) + "/" + str(len(node_specific_meta_info_list)) print rank, "Calculating", meta.project_id, "\tLookup_project_id:", lookup_project_id print rank, meta.start_date, meta.end_date print rank, meta.climate_file print rank, soil_file monica_simulation_config.setStartDate(meta.start_date) monica_simulation_config.setEndDate(meta.end_date) monica_simulation_config.setClimateFile(meta.climate_file) monica_simulation_config.setProjectId(meta.project_id) monica_simulation_config.setLookupProjectId(lookup_project_id) monica_simulation_config.setRowId(meta.row_id) monica_simulation_config.setColId(meta.col_id) monica_simulation_config.setPhase(2) monica_simulation_config.setStep(step) monica_simulation_config.setSoilFile(soil_file) path = path = output_path + "/" + crop + "_" + simulation_type + "/" + meta.project_id monica_simulation_config.setOutputPath(path) if not (os.path.exists(path)): print rank, "create_directory: ", path os.makedirs(path) monica_simulation_config.setLatitude(meta.latitude) monica_simulation_config.setElevation(meta.elevation) monica.activateDebugOutput(False); monica.runMacsurScalingSimulation(monica_simulation_config) sim_result = analyseSimulationOutput(path, meta.row_id, meta.col_id, crop) node_simulation_results.extend(sim_result) # remove simulation result dir if (remove_monica_files_after_simulation): shutil.rmtree(path, True) print rank, "###################################" #if (index == 1): # break ################################################### # end of parallel part ################################################## result_list = comm.gather(node_simulation_results, root=0) if (rank == 0): output_filename = output_prefix + "_" + s_res + "x" + c_res + ".csv" output_filehandle = open(output_path + "/" + output_filename, "wb") output_csv = csv.writer(output_filehandle, delimiter=sep) header = ["gridcell", "year", "Yield (t DM/ha)", "Total above ground biomass (t DM/ha)", "Total ET over the growing season (mm/growing season)", "Total intercepted PAR over the growing season (MJ/ha/growing season)", "Maximum LAI during the growing season (m2/m2)", "Anthesis date (DOY)", "Maturity date (DOY)", "SOC at sowing date at 30 cm (gC/m2)", "SOC at sowing date at 2.0 m (gC/m2)", "Total net ecosystem exchange over the growing season (gC/m2/growing season)", "Total net primary productivity over the growing season (gC/m2/growing season)", "Total N20 over the growing season (kg N/ha/growing season)", "Total annual N20 (kg N/ha/year)", "Total annual N leaching over 1.5 m (kg N/ha/year)", "Total N leaching over the growing season over 1.5 m (kg N/ha/growing season)", "Total annual water loss below 1.5 m (mm/ha/year)", "Total water loss below 1.5 m over the growing season (mm/ha/growing season)", "Maximum rooted soil depth (m)", "Total annual irrigated water amount (mm)"] output_csv.writerow(header) for node_list in result_list: for result_row in node_list: output_csv.writerow(result_row) output_filehandle.close()
def main(): ts = time.time() output_path = "../runs/phase_2/step1/" + datetime.datetime.fromtimestamp( ts).strftime('%Y-%m-%d_%H-%M') for crop in crops: crop_id = "" if (crop == "maize"): crop_id = "SM" elif (crop == "winter_wheat"): crop_id = "WW" for c_res in climate_resolutions: for s_res in soil_resolutions: soil_file = soil_files_directory + soil_files[s_res] for simulation_type in simulation_types: splitted_project_id_list = None project_id_list = None print rank, "Simulation type: ", simulation_type, "\Soil file: ", soil_file output_prefix, input_path, ini_file = getSimulationConfig( simulation_type, crop) if (s_res == "sDS"): print "Calculating the dominamt soil ..." project_id_list = [[ "dominant_soil", 0, 0, 51.47, 175.5 ]] else: print "Calculating other different soil types ..." if (rank == 0): # only one processor reads in the meta information project_id_list = readSoilFile(soil_file) if (rank == 0): # split the meta info list into "number of processors" sublists splitted_project_id_list = mpi_helper.splitListForNodes( project_id_list, size) ################################################### # parallel part ################################################## # send each sublist of the splitted list to on processor node_specific_project_id_list = comm.scatter( splitted_project_id_list, root=0) # each processor received a specific number of meta_info_objects # that he has to process print rank, "Received project id list with", len( node_specific_project_id_list), "elements" start_date = str(start_year) + "-01-01" end_date = str(end_year) + "-01-31" monica_simulation_config = monica.MacsurScalingConfiguration( ) monica_simulation_config.setInputPath(input_path) monica_simulation_config.setIniFile(ini_file) monica_simulation_config.setCropName(crop) monica_simulation_config.setStartDate(start_date) monica_simulation_config.setEndDate(end_date) monica_simulation_config.setPhase(2) monica_simulation_config.setStep(1) monica_simulation_config.setSoilFile(soil_file) node_simulation_results = [] for index, config_list in enumerate( node_specific_project_id_list): project_id = config_list[0] row = config_list[1] col = config_list[2] latitude = config_list[3] elevation = config_list[4] print rank, "###################################" print rank, str(index + 1) + "/" + str( len(node_specific_project_id_list)) print rank, "Calculating", project_id print rank, start_date, end_date print rank, row, col, latitude, elevation monica_simulation_config.setClimateFile( "../input_data/phase_2/step1/climate_files/daily_mean_NRW.csv" ) monica_simulation_config.setProjectId(project_id) monica_simulation_config.setRowId(row) monica_simulation_config.setColId(col) path = output_path + "/" + crop + "_" + simulation_type + "/" + project_id monica_simulation_config.setOutputPath(path) if not (os.path.exists(path)): print rank, "create_directory: ", path os.makedirs(path) monica_simulation_config.setLatitude(latitude) monica_simulation_config.setElevation(elevation) monica.activateDebugOutput(False) monica.runMacsurScalingSimulation( monica_simulation_config) sim_result = analyseSimulationOutput( path, row, col, crop) node_simulation_results.extend(sim_result) # remove simulation result dir if (remove_monica_files_after_simulation): shutil.rmtree(path, True) print rank, "###################################" #if (index == 1): # break ################################################### # end of parallel part ################################################## result_list = comm.gather(node_simulation_results, root=0) if (rank == 0): output_filename = output_prefix + "_" + s_res + "x" + c_res + ".csv" output_filehandle = open( output_path + "/" + output_filename, "wb") output_csv = csv.writer(output_filehandle, delimiter=sep) header = [ "gridcell", "year", "Yield (t DM/ha)", "Total above ground biomass (t DM/ha)", "Total ET over the growing season (mm/growing season)", "Total intercepted PAR over the growing season (MJ/ha/growing season)", "Maximum LAI during the growing season (m2/m2)", "Anthesis date (DOY)", "Maturity date (DOY)", "SOC at sowing date at 30 cm (gC/m2)", "SOC at sowing date at 2.0 m (gC/m2)", "Total net ecosystem exchange over the growing season (gC/m2/growing season)", "Total net primary productivity over the growing season (gC/m2/growing season)", "Total N20 over the growing season (kg N/ha/growing season)", "Total annual N20 (kg N/ha/year)", "Total annual N leaching over 1.5 m (kg N/ha/year)", "Total N leaching over the growing season over 1.5 m (kg N/ha/growing season)", "Total annual water loss below 1.5 m (mm/ha/year)", "Total water loss below 1.5 m over the growing season (mm/ha/growing season)", "Maximum rooted soil depth (m)", "Total annual irrigated water amount (mm)" ] output_csv.writerow(header) for node_list in result_list: for result_row in node_list: output_csv.writerow(result_row) output_filehandle.close()
def main(): ts = time.time() output_path = "../runs/phase_2/step1/" + datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M') for crop in crops: crop_id = "" if (crop == "maize"): crop_id = "SM" elif (crop == "winter_wheat"): crop_id = "WW" for c_res in climate_resolutions: for s_res in soil_resolutions: soil_file = soil_files_directory + soil_files[s_res] for simulation_type in simulation_types: splitted_project_id_list = None project_id_list = None print rank, "Simulation type: ", simulation_type, "\Soil file: ", soil_file output_prefix, input_path, ini_file = getSimulationConfig(simulation_type, crop) if (s_res == "sDS"): print "Calculating the dominamt soil ..." project_id_list = [["dominant_soil",0,0,51.47, 175.5]] else: print "Calculating other different soil types ..." if (rank == 0): # only one processor reads in the meta information project_id_list = readSoilFile(soil_file) if (rank == 0): # split the meta info list into "number of processors" sublists splitted_project_id_list = mpi_helper.splitListForNodes(project_id_list, size) ################################################### # parallel part ################################################## # send each sublist of the splitted list to on processor node_specific_project_id_list = comm.scatter(splitted_project_id_list, root=0) # each processor received a specific number of meta_info_objects # that he has to process print rank, "Received project id list with", len(node_specific_project_id_list), "elements" start_date = str(start_year) + "-01-01" end_date = str(end_year) + "-01-31" monica_simulation_config = monica.MacsurScalingConfiguration() monica_simulation_config.setInputPath(input_path) monica_simulation_config.setIniFile(ini_file) monica_simulation_config.setCropName(crop) monica_simulation_config.setStartDate(start_date) monica_simulation_config.setEndDate(end_date) monica_simulation_config.setPhase(2) monica_simulation_config.setStep(1) monica_simulation_config.setSoilFile(soil_file) node_simulation_results = [] for index, config_list in enumerate(node_specific_project_id_list): project_id = config_list[0] row = config_list[1] col = config_list[2] latitude = config_list[3] elevation = config_list[4] print rank, "###################################" print rank, str(index+1) + "/" + str(len(node_specific_project_id_list)) print rank, "Calculating", project_id print rank, start_date, end_date print rank, row, col, latitude, elevation monica_simulation_config.setClimateFile("../input_data/phase_2/step1/climate_files/daily_mean_NRW.csv") monica_simulation_config.setProjectId(project_id) monica_simulation_config.setRowId(row) monica_simulation_config.setColId(col) path = output_path + "/" + crop + "_" + simulation_type + "/" + project_id monica_simulation_config.setOutputPath(path) if not (os.path.exists(path)): print rank, "create_directory: ", path os.makedirs(path) monica_simulation_config.setLatitude(latitude) monica_simulation_config.setElevation(elevation) monica.activateDebugOutput(False); monica.runMacsurScalingSimulation(monica_simulation_config) sim_result = analyseSimulationOutput(path, row, col, crop) node_simulation_results.extend(sim_result) # remove simulation result dir if (remove_monica_files_after_simulation): shutil.rmtree(path, True) print rank, "###################################" #if (index == 1): # break ################################################### # end of parallel part ################################################## result_list = comm.gather(node_simulation_results, root=0) if (rank == 0): output_filename = output_prefix + "_" + s_res + "x" + c_res + ".csv" output_filehandle = open(output_path + "/" + output_filename, "wb") output_csv = csv.writer(output_filehandle, delimiter=sep) header = ["gridcell", "year", "Yield (t DM/ha)", "Total above ground biomass (t DM/ha)", "Total ET over the growing season (mm/growing season)", "Total intercepted PAR over the growing season (MJ/ha/growing season)", "Maximum LAI during the growing season (m2/m2)", "Anthesis date (DOY)", "Maturity date (DOY)", "SOC at sowing date at 30 cm (gC/m2)", "SOC at sowing date at 2.0 m (gC/m2)", "Total net ecosystem exchange over the growing season (gC/m2/growing season)", "Total net primary productivity over the growing season (gC/m2/growing season)", "Total N20 over the growing season (kg N/ha/growing season)", "Total annual N20 (kg N/ha/year)", "Total annual N leaching over 1.5 m (kg N/ha/year)", "Total N leaching over the growing season over 1.5 m (kg N/ha/growing season)", "Total annual water loss below 1.5 m (mm/ha/year)", "Total water loss below 1.5 m over the growing season (mm/ha/growing season)", "Maximum rooted soil depth (m)", "Total annual irrigated water amount (mm)"] output_csv.writerow(header) for node_list in result_list: for result_row in node_list: output_csv.writerow(result_row) output_filehandle.close()
def main(): ts = time.time() output_path = path_to_macsur_data + "/runs/phase_1/" + datetime.datetime.fromtimestamp( ts).strftime('%Y-%m-%d_%H-%M') for crop in macsur_config.crops: crop_id = "" if (crop == "maize"): crop_id = "SM" elif (crop == "winter_wheat"): crop_id = "WW" for simulation_type in macsur_config.simulation_types: for resolution in macsur_config.resolutions: print rank, "Simulation type: ", simulation_type, "\tResolution: ", resolution output_prefix, input_path, ini_file = getSimulationConfig( simulation_type, crop) splitted_meta_info_list = None if (rank == 0): # only one processor reads in the meta information meta_info_list = readMetaInfo(resolution) #for meta in meta_info_list: #print meta.start_date, meta.end_date, meta.project_id, meta.climate_file, meta.row_id, meta.col_id # split the meta info list into "number of processors" sublists splitted_meta_info_list = mpi_helper.splitListForNodes( meta_info_list, size) ################################################### # parallel part ################################################## # send each sublist of the splitted list to on processor node_specific_meta_info_list = comm.scatter( splitted_meta_info_list, root=0) # each processor received a specific number of meta_info_objects # that he has to process print rank, "Received meta info list with", len( node_specific_meta_info_list), "elements" monica_simulation_config = monica.MacsurScalingConfiguration() monica_simulation_config.setInputPath(input_path) monica_simulation_config.setIniFile(ini_file) monica_simulation_config.setCropName(crop) node_simulation_results = [] for index, meta in enumerate(node_specific_meta_info_list): print rank, "###################################" print rank, str(index + 1) + "/" + str( len(node_specific_meta_info_list)) print rank, "Calculating", meta.project_id print rank, meta.start_date, meta.end_date print rank, meta.climate_file monica_simulation_config.setStartDate(meta.start_date) monica_simulation_config.setEndDate(meta.end_date) monica_simulation_config.setClimateFile(meta.climate_file) monica_simulation_config.setProjectId(meta.project_id) monica_simulation_config.setRowId(meta.row_id) monica_simulation_config.setColId(meta.col_id) monica_simulation_config.setPhase(1) path = output_path + "/" + crop + "_" + simulation_type + "/res" + str( meta.resolution) + "/C" + str( meta.col_id) + "-R" + str(meta.row_id) monica_simulation_config.setOutputPath(path) if not (os.path.exists(path)): print rank, "create_directory: ", path os.makedirs(path) monica_simulation_config.setLatitude(meta.latitude) monica_simulation_config.setElevation(meta.elevation) monica.activateDebugOutput(False) monica.runMacsurScalingSimulation(monica_simulation_config) sim_result = analyseSimulationOutput( path, meta.row_id, meta.col_id, crop) node_simulation_results.extend(sim_result) # remove simulation result dir if (remove_monica_files_after_simulation): shutil.rmtree(path, True) print rank, "###################################" #if (index == 1): # break ################################################### # end of parallel part ################################################## result_list = comm.gather(node_simulation_results, root=0) if (rank == 0): output_filename = output_prefix + "_Res" + str( resolution) + "_" + crop + ".csv" output_filehandle = open( output_path + "/" + output_filename, "wb") output_csv = csv.writer(output_filehandle, delimiter=sep) header = [ "gridcell", "year", "Yield (t DM/ha)", "Total above ground biomass (t DM/ha)", "Total ET over the growing season (mm/growing season)", "Total intercepted PAR over the growing season (MJ/ha/growing season)", "Maximum LAI during the growing season (m2/m2)", "Anthesis date (DOY)", "Maturity date (DOY)", "SOC at sowing date at 30 cm (gC/m2)", "SOC at sowing date at 2.0 m (gC/m2)", "Total net ecosystem exchange over the growing season (gC/m2/growing season)", "Total net primary productivity over the growing season (gC/m2/growing season)", "Total N20 over the growing season (kg N/ha/growing season)", "Total annual N20 (kg N/ha/year)", "Total annual N leaching over 1.5 m (kg N/ha/year)", "Total N leaching over the growing season over 1.5 m (kg N/ha/growing season)", "Total annual water loss below 1.5 m (mm/ha/year)", "Total water loss below 1.5 m over the growing season (mm/ha/growing season)", "Maximum rooted soil depth (m)", "Total annual irrigated water amount (mm)" ] output_csv.writerow(header) for node_list in result_list: for result_row in node_list: output_csv.writerow(result_row) output_filehandle.close()