def main():
    
    ts = time.time()
    output_path = "../runs/phase_2/step1/" + datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M')

    for crop in crops:

        crop_id = ""
        if (crop == "maize"):
            crop_id = "SM"
        elif (crop == "winter_wheat"):
            crop_id = "WW"

        for c_res in climate_resolutions:

            for s_res in soil_resolutions:

                soil_file = soil_files_directory + soil_files[s_res]

                for simulation_type in simulation_types:
          
                    splitted_project_id_list = None    
                    project_id_list = None
              
                    print rank, "Simulation type: ", simulation_type, "\Soil file: ", soil_file
                    output_prefix, input_path, ini_file = getSimulationConfig(simulation_type, crop)
              
                    if (s_res == "sDS"):
                        print "Calculating the dominamt soil ..."
                        project_id_list = [["dominant_soil",0,0,51.47, 175.5]]
                    else:
                        print "Calculating other different soil types ..."
                   
                        if (rank == 0):
                            # only one processor reads in the meta information
                            project_id_list = readSoilFile(soil_file)
                          
                    if (rank == 0):
                        # split the meta info list into "number of processors" sublists 
                        splitted_project_id_list = mpi_helper.splitListForNodes(project_id_list, size)

                  
                    ###################################################
                    # parallel part
                    ##################################################
                  
                    # send each sublist of the splitted list to on processor
                    node_specific_project_id_list = comm.scatter(splitted_project_id_list, root=0)      
              
                    # each processor received a specific number of meta_info_objects
                    # that he has to process
                    print rank, "Received project id list with", len(node_specific_project_id_list), "elements" 

                    start_date = str(start_year) + "-01-01"
                    end_date = str(end_year) + "-01-31"

                    monica_simulation_config = monica.MacsurScalingConfiguration()
                    monica_simulation_config.setInputPath(input_path)
                    monica_simulation_config.setIniFile(ini_file)
                    monica_simulation_config.setCropName(crop)
                    monica_simulation_config.setStartDate(start_date)
                    monica_simulation_config.setEndDate(end_date)
                    monica_simulation_config.setPhase(2)
                    monica_simulation_config.setStep(1)
                    monica_simulation_config.setSoilFile(soil_file)
                  
                    node_simulation_results = []
                  
                    for index, config_list in enumerate(node_specific_project_id_list):
                        project_id = config_list[0]


                        row = config_list[1]
                        col = config_list[2]
                        latitude = config_list[3]
                        elevation = config_list[4]

                        print rank, "###################################"
                        print rank, str(index+1) +  "/" + str(len(node_specific_project_id_list)) 
                        print rank, "Calculating", project_id
                        print rank, start_date, end_date
                        print rank, row, col, latitude, elevation

                        monica_simulation_config.setClimateFile("../input_data/phase_2/step1/climate_files/daily_mean_NRW.csv")
              
                        monica_simulation_config.setProjectId(project_id)
                        monica_simulation_config.setRowId(row)
                        monica_simulation_config.setColId(col)
                        
                        path = output_path + "/" + crop + "_" + simulation_type + "/" + project_id
                        monica_simulation_config.setOutputPath(path) 
                            
                        if not (os.path.exists(path)):
                            print rank, "create_directory: ", path
                            os.makedirs(path)
                        
                        monica_simulation_config.setLatitude(latitude)
                        monica_simulation_config.setElevation(elevation)
                                
                        monica.activateDebugOutput(False);
                        monica.runMacsurScalingSimulation(monica_simulation_config)
                        sim_result = analyseSimulationOutput(path, row, col, crop)
                        node_simulation_results.extend(sim_result)
                        
                        # remove simulation result dir
                        if (remove_monica_files_after_simulation):
                            shutil.rmtree(path, True)
        
                        print rank, "###################################"
                        
                        #if (index == 1):
                        #    break
                        
                
                    ###################################################
                    # end of parallel part
                    ##################################################
                    
                    result_list = comm.gather(node_simulation_results, root=0)
                    
                    if (rank == 0):
                        
                        
                        output_filename = output_prefix + "_" + s_res + "x" + c_res + ".csv"       
                        output_filehandle = open(output_path + "/" + output_filename, "wb")
                        output_csv = csv.writer(output_filehandle, delimiter=sep)
                        
                        header = ["gridcell", "year", "Yield (t DM/ha)", "Total above ground biomass (t DM/ha)",
                                "Total ET over the growing season (mm/growing season)", 
                                "Total intercepted PAR over the growing season (MJ/ha/growing season)",
                                "Maximum LAI during the growing season (m2/m2)", 
                                "Anthesis date (DOY)", "Maturity date (DOY)", 
                                "SOC at sowing date at 30 cm (gC/m2)", 
                                "SOC at sowing date at 2.0 m (gC/m2)", 
                                "Total net ecosystem exchange over the growing season (gC/m2/growing season)", 
                                "Total net primary productivity over the growing season (gC/m2/growing season)",
                                "Total N20 over the growing season (kg N/ha/growing season)",
                                "Total annual N20 (kg N/ha/year)",
                                "Total annual N leaching over 1.5 m (kg N/ha/year)", 
                                "Total N leaching over the growing season over 1.5 m (kg N/ha/growing season)",
                                "Total annual water loss below 1.5 m (mm/ha/year)", 
                                "Total water loss below 1.5 m over the growing season (mm/ha/growing season)", 
                                "Maximum rooted soil depth (m)",
                                "Total annual irrigated water amount (mm)"]
                        output_csv.writerow(header)
                      
          
                       
                        for node_list in result_list:   
                            for result_row in node_list:
                                output_csv.writerow(result_row)
                                
                        output_filehandle.close()
def main():
    
    ts = time.time()
    output_path = "../runs/phase_2/step" + str(step) + "/" + datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M')

    lookup_tables = read_lookup_table(lookup_table_file)

    s_res = ""
    c_res = ""

    for crop in crops:

        crop_id = ""
        if (crop == "maize"):
            crop_id = "SM"
        elif (crop == "winter_wheat"):
            crop_id = "WW"


        for res in resolutions:

            lookup_table = lookup_tables[res]
            #for k,v in lookup_table.iteritems():
            #    print k, v

            soil_file = ""
            if (step == 2) :

                s_res = "s" + str(res)
                c_res = "c1"

                soil_file = soil_files_directory + soil_files[res]
            elif (step == 3):

                s_res = "s1"
                c_res = "c" + str(res)

                soil_file = soil_files_directory + soil_files["1"]


            for simulation_type in simulation_types:
      
              print rank, "Simulation type: ", simulation_type, "\tResolution: ", res
              output_prefix, input_path, ini_file = getSimulationConfig(simulation_type, crop)
          
              splitted_meta_info_list = None
              
               
              if (rank == 0):
                  # only one processor reads in the meta information
                  meta_info_list = readMetaInfo(res, step, lookup_table)
                      
                  #for meta in meta_info_list:
                  #    print meta.start_date, meta.end_date, meta.project_id, meta.climate_file, meta.row_id, meta.col_id
                      
                  # split the meta info list into "number of processors" sublists 
                  splitted_meta_info_list = mpi_helper.splitListForNodes(meta_info_list, size)

              ###################################################
              # parallel part
              ##################################################
              
              # send each sublist of the splitted list to on processor
              node_specific_meta_info_list = comm.scatter(splitted_meta_info_list, root=0)      
          
              # each processor received a specific number of meta_info_objects
              # that he has to process
              print rank, "Received meta info list with", len(node_specific_meta_info_list), "elements" 

              start_date = str(start_year) + "-01-01"
              end_date = str(end_year) + "-01-31"

              monica_simulation_config = monica.MacsurScalingConfiguration()
              monica_simulation_config.setInputPath(input_path)
              monica_simulation_config.setIniFile(ini_file)
              monica_simulation_config.setCropName(crop)
              
              node_simulation_results = []
              
              for index, meta in enumerate(node_specific_meta_info_list):

                  #if (index >= 5):
                  #    continue

                  col_row_list = lookup_table[str([str(meta.col_id),str(meta.row_id)])]
                  lookup_col = int(col_row_list[0])
                  lookup_row = int(col_row_list[1])

                  lookup_project_id = "Res" + str(res) + "_C" + str(lookup_col) + ":R" + str(lookup_row)



                  print rank, "###################################"
                  print rank, str(index+1) +  "/" + str(len(node_specific_meta_info_list)) 
                  print rank, "Calculating", meta.project_id, "\tLookup_project_id:", lookup_project_id
                  print rank, meta.start_date, meta.end_date
                  print rank, meta.climate_file
                  print rank, soil_file

                  monica_simulation_config.setStartDate(meta.start_date)
                  monica_simulation_config.setEndDate(meta.end_date)
                  monica_simulation_config.setClimateFile(meta.climate_file)
                  monica_simulation_config.setProjectId(meta.project_id)
                  monica_simulation_config.setLookupProjectId(lookup_project_id)
                  monica_simulation_config.setRowId(meta.row_id)
                  monica_simulation_config.setColId(meta.col_id)
                  monica_simulation_config.setPhase(2)
                  monica_simulation_config.setStep(step)
                  monica_simulation_config.setSoilFile(soil_file)
                  
                  path = path = output_path + "/" + crop + "_" + simulation_type + "/" + meta.project_id
                  monica_simulation_config.setOutputPath(path) 
                  
                  if not (os.path.exists(path)):
                      print rank, "create_directory: ", path
                      os.makedirs(path)
                  
                  monica_simulation_config.setLatitude(meta.latitude)
                  monica_simulation_config.setElevation(meta.elevation)
                          
                  monica.activateDebugOutput(False);
                  monica.runMacsurScalingSimulation(monica_simulation_config)
                  sim_result = analyseSimulationOutput(path, meta.row_id, meta.col_id, crop)
                  node_simulation_results.extend(sim_result)
                  
                  # remove simulation result dir
                  if (remove_monica_files_after_simulation):
                      shutil.rmtree(path, True)

                  print rank, "###################################"
                  
                  #if (index == 1):
                  #    break
                  
          
              ###################################################
              # end of parallel part
              ##################################################              


                
              result_list = comm.gather(node_simulation_results, root=0)
            
              if (rank == 0):
                
                
                  output_filename = output_prefix + "_" + s_res + "x" + c_res + ".csv"       
                  output_filehandle = open(output_path + "/" + output_filename, "wb")
                  output_csv = csv.writer(output_filehandle, delimiter=sep)
                
                  header = ["gridcell", "year", "Yield (t DM/ha)", "Total above ground biomass (t DM/ha)",
                        "Total ET over the growing season (mm/growing season)", 
                        "Total intercepted PAR over the growing season (MJ/ha/growing season)",
                        "Maximum LAI during the growing season (m2/m2)", 
                        "Anthesis date (DOY)", "Maturity date (DOY)", 
                        "SOC at sowing date at 30 cm (gC/m2)", 
                        "SOC at sowing date at 2.0 m (gC/m2)", 
                        "Total net ecosystem exchange over the growing season (gC/m2/growing season)", 
                        "Total net primary productivity over the growing season (gC/m2/growing season)",
                        "Total N20 over the growing season (kg N/ha/growing season)",
                        "Total annual N20 (kg N/ha/year)",
                        "Total annual N leaching over 1.5 m (kg N/ha/year)", 
                        "Total N leaching over the growing season over 1.5 m (kg N/ha/growing season)",
                        "Total annual water loss below 1.5 m (mm/ha/year)", 
                        "Total water loss below 1.5 m over the growing season (mm/ha/growing season)", 
                        "Maximum rooted soil depth (m)",
                        "Total annual irrigated water amount (mm)"]
                  output_csv.writerow(header)
              
  
               
                  for node_list in result_list:   
                      for result_row in node_list:
                          output_csv.writerow(result_row)
                        
                  output_filehandle.close()
def main():
  ts = time.time()
  output_path = pathToCarbiocialData + "runs/" + datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M') + "/"

  print "processor #", rank
  input_path = pathToCarbiocialData + "input_data/"
  #ini_file = "soybean.ini"
  ini_file = "maize.ini"
  
  splittedGridDataMap = None

  if (rank == 0):
    # only one processor reads in the meta information
    splittedGridDataMap = splitAsciiGrid(pathToCarbiocialData + "input_data/solos-profile-ids_brazil_900.asc", size)

  ###################################################
  # parallel part
  ##################################################

  # send each sublist of the splitted list to on processor
  nodeSpecificDataMap = comm.scatter(splittedGridDataMap, root=0)

  # each processor received a specific number of meta_info_objects
  # that he has to process
  print rank, "Received data map with ", len(nodeSpecificDataMap), " elements"

  monica_simulation_config = monica.CarbiocialConfiguration()
  monica_simulation_config.setInputPath(input_path)
  monica_simulation_config.setIniFile(ini_file)
  #monica_simulation_config.pathToClimateDataReorderingFile = pathToClimateDataReorderingFile;
  #monica_simulation_config.create2013To2040ClimateData = True
  #monica_simulation_config.setCropName(crop)

  #node_simulation_results = []

  coord2year2yield = {}

  index = 0
  for coord, profileId in nodeSpecificDataMap.iteritems():
    row, col = coord
    #row, col = (86, 820)
    monica_simulation_config.setStartDate(startDate)
    monica_simulation_config.setEndDate(endDate)
    monica_simulation_config.setClimateFile(pathToClimateData + "row-" + str(row) + "/col-" + str(col) + ".asc")
    #monica_simulation_config.setClimateFile(pathToCarbiocialData+"input_data/row-0/col-0.asc")
    monica_simulation_config.setRowId(row)
    monica_simulation_config.setColId(col)
    monica_simulation_config.setProfileId(profileId)

    print rank, "###################################"
    print rank, "coord: ", coord, " profileId: ", monica_simulation_config.getProfileId()
    print rank, "startDate: ", startDate, " endDate: ", endDate
    print rank, "climateFile: ", monica_simulation_config.getClimateFile()

    path = output_path + "row-" + str(row) + "/col-" + str(col) + "/"
    monica_simulation_config.setOutputPath(path)

    #if not (os.path.exists(path)):
    #  print rank, "create_directory: ", path
    #  os.makedirs(path)

    monica_simulation_config.setLatitude(-9.41)
    monica_simulation_config.setElevation(300.0)

    #monica.activateDebugOutput(True);
    monica.activateDebugOutput(False);
    #monica.activateDebugFileOutput(False);
    #monica.setPathToDebugFile(output_path + "row-" + str(row) + "/col-" + str(col) + "-debug.out");
    year2yield = monica.runCarbiocialSimulation(monica_simulation_config)
    #print rank, "type(year2yield): ", type(year2yield)
    #simResult = getYieldsFromSimulationOutput(path, row, col)
    #coord2year2yield[simResult[0]] = simResult[1]
    
    y2y = {}
    
    if len(year2yield) > 0:
      #outputFile = open(output_path + "row-" + str(row) + "/col-" + str(col) + "-yields.txt", "wb")    
      #outputFile.write("year yield\n")
      for year, yield_ in year2yield.iteritems():
      #  outputFile.write(str(year) + " " + str(yield_) + "\n")
        y2y[year] = yield_
      #outputFile.close()

      coord2year2yield[(row, col)] = y2y
    
      # remove simulation result dir
      #if remove_monica_files_after_simulation:
      #  shutil.rmtree(path, True)

    print rank, "###################################"

    #if index == 1:
    #  break

    index = index + 1


  ###################################################
  # end of parallel part
  ##################################################

  resultList = comm.gather(coord2year2yield, root=0)

  if rank == 0:

    print "outputing results ..."

    #sorted values for creation of yearly grids
    row2col2year2yield = {}
    #sorted values for creation of avg yield grid over all years
    row2col2yields = {}
    #print "resultList: ", resultList
    years = resultList[0].items()[0][1].keys();
    print "years: ", years

    #collect data into nested maps to access them below
    for c2y2y in resultList:
      for (row, col), y2y in c2y2y.iteritems():
        if not row in row2col2year2yield:
          row2col2year2yield[row] = {}
          row2col2yields[row] = {}
        row2col2year2yield[row][col] = y2y
        row2col2yields[row][col] = y2y.values()

    if not (os.path.exists(output_path)):
      print "creating output directory: ", output_path
      os.makedirs(output_path)    
        
    outputGridFilename = "yields-year-"
    outputAvgGridFile = open(output_path + "yields-avg.asc", "wb")
    outputAvgGridFile.writelines(asciiGridHeaders)
    currentColAvgYields = []
    year2openFile = {}
    year2currentColYields = {}
    #open for every available year a file
    for year in years:
      year2openFile[year] = open(output_path + outputGridFilename + str(year) + ".asc", "wb")
      year2openFile[year].writelines(asciiGridHeaders)

    #iterate over all rows and cols, avg years, and assemble a ascii grid line with the column values
    for row in range(noOfGridRows):
      for col in range(noOfGridCols):
        if row in row2col2year2yield and col in row2col2year2yield[row]:
          #collect column values for single years
          for year, yield_ in row2col2year2yield[row][col].iteritems():
            if not year in year2currentColYields:
              year2currentColYields[year] = []
            year2currentColYields[year].append(yield_)
        else:
          for year in years:
            if not year in year2currentColYields:
              year2currentColYields[year] = []
            year2currentColYields[year].append(noDataValue)

        #collect column values for the averaged years
        if row in row2col2yields and col in row2col2yields[row]:
          yields = row2col2yields[row][col]
          if len(yields) > 0:
            currentColAvgYields.append(sum(yields) / len(yields))
          else:
            currentColAvgYields.append(0)
        else:
          currentColAvgYields.append(noDataValue)

      #write the yearly column values to the according file
      for year, f in year2openFile.iteritems():
        line = " ".join([str(ys) for ys in year2currentColYields[year]]) + "\n"
        f.write(line)
        year2currentColYields[year] = []

      #write the averaged column values to the file
      avgLine = " ".join([str(ys) for ys in currentColAvgYields]) + "\n"
      outputAvgGridFile.write(avgLine)
      currentColAvgYields = []

    for year, f in year2openFile.iteritems():
      f.close()

    outputAvgGridFile.close()
def main():
    ts = time.time()
    output_path = pathToCarbiocialData + "runs/" + datetime.datetime.fromtimestamp(
        ts).strftime('%Y-%m-%d_%H-%M') + "/"

    print "processor #", rank
    input_path = pathToCarbiocialData + "input_data/"
    #ini_file = "soybean.ini"
    ini_file = "maize.ini"

    splittedGridDataMap = None

    if (rank == 0):
        # only one processor reads in the meta information
        splittedGridDataMap = splitAsciiGrid(
            pathToCarbiocialData +
            "input_data/solos-profile-ids_brazil_900.asc", size)

    ###################################################
    # parallel part
    ##################################################

    # send each sublist of the splitted list to on processor
    nodeSpecificDataMap = comm.scatter(splittedGridDataMap, root=0)

    # each processor received a specific number of meta_info_objects
    # that he has to process
    print rank, "Received data map with ", len(
        nodeSpecificDataMap), " elements"

    monica_simulation_config = monica.CarbiocialConfiguration()
    monica_simulation_config.setInputPath(input_path)
    monica_simulation_config.setIniFile(ini_file)
    #monica_simulation_config.pathToClimateDataReorderingFile = pathToClimateDataReorderingFile;
    #monica_simulation_config.create2013To2040ClimateData = True
    #monica_simulation_config.setCropName(crop)

    #node_simulation_results = []

    coord2year2yield = {}

    index = 0
    for coord, profileId in nodeSpecificDataMap.iteritems():
        row, col = coord
        #row, col = (86, 820)
        monica_simulation_config.setStartDate(startDate)
        monica_simulation_config.setEndDate(endDate)
        monica_simulation_config.setClimateFile(pathToClimateData + "row-" +
                                                str(row) + "/col-" + str(col) +
                                                ".asc")
        #monica_simulation_config.setClimateFile(pathToCarbiocialData+"input_data/row-0/col-0.asc")
        monica_simulation_config.setRowId(row)
        monica_simulation_config.setColId(col)
        monica_simulation_config.setProfileId(profileId)

        print rank, "###################################"
        print rank, "coord: ", coord, " profileId: ", monica_simulation_config.getProfileId(
        )
        print rank, "startDate: ", startDate, " endDate: ", endDate
        print rank, "climateFile: ", monica_simulation_config.getClimateFile()

        path = output_path + "row-" + str(row) + "/col-" + str(col) + "/"
        monica_simulation_config.setOutputPath(path)

        #if not (os.path.exists(path)):
        #  print rank, "create_directory: ", path
        #  os.makedirs(path)

        monica_simulation_config.setLatitude(-9.41)
        monica_simulation_config.setElevation(300.0)

        #monica.activateDebugOutput(True);
        monica.activateDebugOutput(False)
        #monica.activateDebugFileOutput(False);
        #monica.setPathToDebugFile(output_path + "row-" + str(row) + "/col-" + str(col) + "-debug.out");
        year2yield = monica.runCarbiocialSimulation(monica_simulation_config)
        #print rank, "type(year2yield): ", type(year2yield)
        #simResult = getYieldsFromSimulationOutput(path, row, col)
        #coord2year2yield[simResult[0]] = simResult[1]

        y2y = {}

        if len(year2yield) > 0:
            #outputFile = open(output_path + "row-" + str(row) + "/col-" + str(col) + "-yields.txt", "wb")
            #outputFile.write("year yield\n")
            for year, yield_ in year2yield.iteritems():
                #  outputFile.write(str(year) + " " + str(yield_) + "\n")
                y2y[year] = yield_
            #outputFile.close()

            coord2year2yield[(row, col)] = y2y

            # remove simulation result dir
            #if remove_monica_files_after_simulation:
            #  shutil.rmtree(path, True)

        print rank, "###################################"

        #if index == 1:
        #  break

        index = index + 1

    ###################################################
    # end of parallel part
    ##################################################

    resultList = comm.gather(coord2year2yield, root=0)

    if rank == 0:

        print "outputing results ..."

        #sorted values for creation of yearly grids
        row2col2year2yield = {}
        #sorted values for creation of avg yield grid over all years
        row2col2yields = {}
        #print "resultList: ", resultList
        years = resultList[0].items()[0][1].keys()
        print "years: ", years

        #collect data into nested maps to access them below
        for c2y2y in resultList:
            for (row, col), y2y in c2y2y.iteritems():
                if not row in row2col2year2yield:
                    row2col2year2yield[row] = {}
                    row2col2yields[row] = {}
                row2col2year2yield[row][col] = y2y
                row2col2yields[row][col] = y2y.values()

        if not (os.path.exists(output_path)):
            print "creating output directory: ", output_path
            os.makedirs(output_path)

        outputGridFilename = "yields-year-"
        outputAvgGridFile = open(output_path + "yields-avg.asc", "wb")
        outputAvgGridFile.writelines(asciiGridHeaders)
        currentColAvgYields = []
        year2openFile = {}
        year2currentColYields = {}
        #open for every available year a file
        for year in years:
            year2openFile[year] = open(
                output_path + outputGridFilename + str(year) + ".asc", "wb")
            year2openFile[year].writelines(asciiGridHeaders)

        #iterate over all rows and cols, avg years, and assemble a ascii grid line with the column values
        for row in range(noOfGridRows):
            for col in range(noOfGridCols):
                if row in row2col2year2yield and col in row2col2year2yield[row]:
                    #collect column values for single years
                    for year, yield_ in row2col2year2yield[row][col].iteritems(
                    ):
                        if not year in year2currentColYields:
                            year2currentColYields[year] = []
                        year2currentColYields[year].append(yield_)
                else:
                    for year in years:
                        if not year in year2currentColYields:
                            year2currentColYields[year] = []
                        year2currentColYields[year].append(noDataValue)

                #collect column values for the averaged years
                if row in row2col2yields and col in row2col2yields[row]:
                    yields = row2col2yields[row][col]
                    if len(yields) > 0:
                        currentColAvgYields.append(sum(yields) / len(yields))
                    else:
                        currentColAvgYields.append(0)
                else:
                    currentColAvgYields.append(noDataValue)

            #write the yearly column values to the according file
            for year, f in year2openFile.iteritems():
                line = " ".join(
                    [str(ys) for ys in year2currentColYields[year]]) + "\n"
                f.write(line)
                year2currentColYields[year] = []

            #write the averaged column values to the file
            avgLine = " ".join([str(ys) for ys in currentColAvgYields]) + "\n"
            outputAvgGridFile.write(avgLine)
            currentColAvgYields = []

        for year, f in year2openFile.iteritems():
            f.close()

        outputAvgGridFile.close()
Beispiel #5
0
def main():

    ts = time.time()
    output_path = "../runs/phase_2/step1/" + datetime.datetime.fromtimestamp(
        ts).strftime('%Y-%m-%d_%H-%M')

    for crop in crops:

        crop_id = ""
        if (crop == "maize"):
            crop_id = "SM"
        elif (crop == "winter_wheat"):
            crop_id = "WW"

        for c_res in climate_resolutions:

            for s_res in soil_resolutions:

                soil_file = soil_files_directory + soil_files[s_res]

                for simulation_type in simulation_types:

                    splitted_project_id_list = None
                    project_id_list = None

                    print rank, "Simulation type: ", simulation_type, "\Soil file: ", soil_file
                    output_prefix, input_path, ini_file = getSimulationConfig(
                        simulation_type, crop)

                    if (s_res == "sDS"):
                        print "Calculating the dominamt soil ..."
                        project_id_list = [[
                            "dominant_soil", 0, 0, 51.47, 175.5
                        ]]
                    else:
                        print "Calculating other different soil types ..."

                        if (rank == 0):
                            # only one processor reads in the meta information
                            project_id_list = readSoilFile(soil_file)

                    if (rank == 0):
                        # split the meta info list into "number of processors" sublists
                        splitted_project_id_list = mpi_helper.splitListForNodes(
                            project_id_list, size)

                    ###################################################
                    # parallel part
                    ##################################################

                    # send each sublist of the splitted list to on processor
                    node_specific_project_id_list = comm.scatter(
                        splitted_project_id_list, root=0)

                    # each processor received a specific number of meta_info_objects
                    # that he has to process
                    print rank, "Received project id list with", len(
                        node_specific_project_id_list), "elements"

                    start_date = str(start_year) + "-01-01"
                    end_date = str(end_year) + "-01-31"

                    monica_simulation_config = monica.MacsurScalingConfiguration(
                    )
                    monica_simulation_config.setInputPath(input_path)
                    monica_simulation_config.setIniFile(ini_file)
                    monica_simulation_config.setCropName(crop)
                    monica_simulation_config.setStartDate(start_date)
                    monica_simulation_config.setEndDate(end_date)
                    monica_simulation_config.setPhase(2)
                    monica_simulation_config.setStep(1)
                    monica_simulation_config.setSoilFile(soil_file)

                    node_simulation_results = []

                    for index, config_list in enumerate(
                            node_specific_project_id_list):
                        project_id = config_list[0]

                        row = config_list[1]
                        col = config_list[2]
                        latitude = config_list[3]
                        elevation = config_list[4]

                        print rank, "###################################"
                        print rank, str(index + 1) + "/" + str(
                            len(node_specific_project_id_list))
                        print rank, "Calculating", project_id
                        print rank, start_date, end_date
                        print rank, row, col, latitude, elevation

                        monica_simulation_config.setClimateFile(
                            "../input_data/phase_2/step1/climate_files/daily_mean_NRW.csv"
                        )

                        monica_simulation_config.setProjectId(project_id)
                        monica_simulation_config.setRowId(row)
                        monica_simulation_config.setColId(col)

                        path = output_path + "/" + crop + "_" + simulation_type + "/" + project_id
                        monica_simulation_config.setOutputPath(path)

                        if not (os.path.exists(path)):
                            print rank, "create_directory: ", path
                            os.makedirs(path)

                        monica_simulation_config.setLatitude(latitude)
                        monica_simulation_config.setElevation(elevation)

                        monica.activateDebugOutput(False)
                        monica.runMacsurScalingSimulation(
                            monica_simulation_config)
                        sim_result = analyseSimulationOutput(
                            path, row, col, crop)
                        node_simulation_results.extend(sim_result)

                        # remove simulation result dir
                        if (remove_monica_files_after_simulation):
                            shutil.rmtree(path, True)

                        print rank, "###################################"

                        #if (index == 1):
                        #    break

                    ###################################################
                    # end of parallel part
                    ##################################################

                    result_list = comm.gather(node_simulation_results, root=0)

                    if (rank == 0):

                        output_filename = output_prefix + "_" + s_res + "x" + c_res + ".csv"
                        output_filehandle = open(
                            output_path + "/" + output_filename, "wb")
                        output_csv = csv.writer(output_filehandle,
                                                delimiter=sep)

                        header = [
                            "gridcell", "year", "Yield (t DM/ha)",
                            "Total above ground biomass (t DM/ha)",
                            "Total ET over the growing season (mm/growing season)",
                            "Total intercepted PAR over the growing season (MJ/ha/growing season)",
                            "Maximum LAI during the growing season (m2/m2)",
                            "Anthesis date (DOY)", "Maturity date (DOY)",
                            "SOC at sowing date at 30 cm (gC/m2)",
                            "SOC at sowing date at 2.0 m (gC/m2)",
                            "Total net ecosystem exchange over the growing season (gC/m2/growing season)",
                            "Total net primary productivity over the growing season (gC/m2/growing season)",
                            "Total N20 over the growing season (kg N/ha/growing season)",
                            "Total annual N20 (kg N/ha/year)",
                            "Total annual N leaching over 1.5 m (kg N/ha/year)",
                            "Total N leaching over the growing season over 1.5 m (kg N/ha/growing season)",
                            "Total annual water loss below 1.5 m (mm/ha/year)",
                            "Total water loss below 1.5 m over the growing season (mm/ha/growing season)",
                            "Maximum rooted soil depth (m)",
                            "Total annual irrigated water amount (mm)"
                        ]
                        output_csv.writerow(header)

                        for node_list in result_list:
                            for result_row in node_list:
                                output_csv.writerow(result_row)

                        output_filehandle.close()
Beispiel #6
0
def main():

    ts = time.time()
    output_path = path_to_macsur_data + "/runs/phase_1/" + datetime.datetime.fromtimestamp(
        ts).strftime('%Y-%m-%d_%H-%M')

    for crop in macsur_config.crops:

        crop_id = ""
        if (crop == "maize"):
            crop_id = "SM"
        elif (crop == "winter_wheat"):
            crop_id = "WW"

        for simulation_type in macsur_config.simulation_types:

            for resolution in macsur_config.resolutions:

                print rank, "Simulation type: ", simulation_type, "\tResolution: ", resolution
                output_prefix, input_path, ini_file = getSimulationConfig(
                    simulation_type, crop)

                splitted_meta_info_list = None

                if (rank == 0):
                    # only one processor reads in the meta information
                    meta_info_list = readMetaInfo(resolution)

                    #for meta in meta_info_list:
                    #print meta.start_date, meta.end_date, meta.project_id, meta.climate_file, meta.row_id, meta.col_id

                    # split the meta info list into "number of processors" sublists
                    splitted_meta_info_list = mpi_helper.splitListForNodes(
                        meta_info_list, size)

                ###################################################
                # parallel part
                ##################################################

                # send each sublist of the splitted list to on processor
                node_specific_meta_info_list = comm.scatter(
                    splitted_meta_info_list, root=0)

                # each processor received a specific number of meta_info_objects
                # that he has to process
                print rank, "Received meta info list with", len(
                    node_specific_meta_info_list), "elements"

                monica_simulation_config = monica.MacsurScalingConfiguration()
                monica_simulation_config.setInputPath(input_path)
                monica_simulation_config.setIniFile(ini_file)
                monica_simulation_config.setCropName(crop)

                node_simulation_results = []

                for index, meta in enumerate(node_specific_meta_info_list):
                    print rank, "###################################"
                    print rank, str(index + 1) + "/" + str(
                        len(node_specific_meta_info_list))
                    print rank, "Calculating", meta.project_id
                    print rank, meta.start_date, meta.end_date
                    print rank, meta.climate_file
                    monica_simulation_config.setStartDate(meta.start_date)
                    monica_simulation_config.setEndDate(meta.end_date)
                    monica_simulation_config.setClimateFile(meta.climate_file)
                    monica_simulation_config.setProjectId(meta.project_id)
                    monica_simulation_config.setRowId(meta.row_id)
                    monica_simulation_config.setColId(meta.col_id)
                    monica_simulation_config.setPhase(1)

                    path = output_path + "/" + crop + "_" + simulation_type + "/res" + str(
                        meta.resolution) + "/C" + str(
                            meta.col_id) + "-R" + str(meta.row_id)
                    monica_simulation_config.setOutputPath(path)

                    if not (os.path.exists(path)):
                        print rank, "create_directory: ", path
                        os.makedirs(path)

                    monica_simulation_config.setLatitude(meta.latitude)
                    monica_simulation_config.setElevation(meta.elevation)

                    monica.activateDebugOutput(False)
                    monica.runMacsurScalingSimulation(monica_simulation_config)
                    sim_result = analyseSimulationOutput(
                        path, meta.row_id, meta.col_id, crop)
                    node_simulation_results.extend(sim_result)

                    # remove simulation result dir
                    if (remove_monica_files_after_simulation):
                        shutil.rmtree(path, True)

                    print rank, "###################################"

                    #if (index == 1):
                    #    break

                ###################################################
                # end of parallel part
                ##################################################

                result_list = comm.gather(node_simulation_results, root=0)

                if (rank == 0):

                    output_filename = output_prefix + "_Res" + str(
                        resolution) + "_" + crop + ".csv"
                    output_filehandle = open(
                        output_path + "/" + output_filename, "wb")
                    output_csv = csv.writer(output_filehandle, delimiter=sep)

                    header = [
                        "gridcell", "year", "Yield (t DM/ha)",
                        "Total above ground biomass (t DM/ha)",
                        "Total ET over the growing season (mm/growing season)",
                        "Total intercepted PAR over the growing season (MJ/ha/growing season)",
                        "Maximum LAI during the growing season (m2/m2)",
                        "Anthesis date (DOY)", "Maturity date (DOY)",
                        "SOC at sowing date at 30 cm (gC/m2)",
                        "SOC at sowing date at 2.0 m (gC/m2)",
                        "Total net ecosystem exchange over the growing season (gC/m2/growing season)",
                        "Total net primary productivity over the growing season (gC/m2/growing season)",
                        "Total N20 over the growing season (kg N/ha/growing season)",
                        "Total annual N20 (kg N/ha/year)",
                        "Total annual N leaching over 1.5 m (kg N/ha/year)",
                        "Total N leaching over the growing season over 1.5 m (kg N/ha/growing season)",
                        "Total annual water loss below 1.5 m (mm/ha/year)",
                        "Total water loss below 1.5 m over the growing season (mm/ha/growing season)",
                        "Maximum rooted soil depth (m)",
                        "Total annual irrigated water amount (mm)"
                    ]
                    output_csv.writerow(header)

                    for node_list in result_list:
                        for result_row in node_list:
                            output_csv.writerow(result_row)

                    output_filehandle.close()
        print >> r_config_file, "folder=\"" + directory.replace("\\","/") + "\""
        print >> r_config_file, "standort=" +  str(simulation_config.getLocation())
        print >> r_config_file, "profil=" +  str(simulation_config.getProfil_number())
        print >> r_config_file, "anlage=" +  str(simulation_config.getVariante())
        print >> r_config_file, "ff=\"" +  simulation_config.getFruchtFolge() + "\""
        print >> r_config_file, "classification=" + str(simulation_config.getClassification())
        print >> r_config_file, "root_folder=\"" +  rootpath.replace("\\","/") + "/\""
        print >> r_config_file, "standortname=\"" + standort + "\""
        print >> r_config_file, "data_folder =\"" + simulation_data_path.replace("\\","/") + "\""

        r_config_file.close() 

 
    # run MONICA model simulation   
    if (verbose):
        monica.activateDebugOutput(1)
    else:
        monica.activateDebugOutput(0)
    
    print "Previous to runEva2Simulation"
    result_object = monica.runEVA2Simulation(simulation_config)
    if (analyse):
        save_monica_results(result_object,output_path, standort, simulation_config.getClassification(), simulation_config.getFruchtFolge(), simulation_config.getVariante(), simulation_config.getProfil_number(), simulation_config.getLocation())

    if (save_results_to_db):
        add_simulation_to_sql_db(simulation_config.getLocation(),
                                 simulation_config.getClassification(),
                                 simulation_config.getVariante(),
                                 simulation_config.getFruchtFolge(),
                                 output_path,
                                 start_date,