def get_max_biomass_pasture(outer_outdir, forage_args, n_pastures, date): """Identify the pasture with highest total biomass.""" biom_list = [] for pidx in range(n_pastures): outdir = os.path.join(outer_outdir, 'p_{}'.format(pidx)) # find latest folder of outputs folder_list = [ f for f in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, f)) ] folder_list.remove('CENTURY_outputs_spin_up') max_year = max([f[-4:] for f in folder_list]) folder_list = [f for f in folder_list if f.endswith(max_year)] max_month = max([ int( re.search('CENTURY_outputs_m(.+?)_y{}'.format(max_year), f).group(1)) for f in folder_list ]) max_folder = os.path.join( outdir, 'CENTURY_outputs_m{}_y{}'.format(max_month, max_year)) output_files = [ f for f in os.listdir(max_folder) if f.endswith('.lis') ] output_f = os.path.join(max_folder, output_files[0]) outputs = cent.read_CENTURY_outputs(output_f, math.floor(date) - 1, math.ceil(date) + 1) outputs.drop_duplicates(inplace=True) total_biom = outputs.loc[date, 'aglivc'] + outputs.loc[date, 'stdedc'] biom_list.append(total_biom) return biom_list.index(max(biom_list))
def get_raw_cp_green(): # calculate n multiplier to achieve target outputs = cent.read_CENTURY_outputs(sim_output, first_year, last_year) outputs.drop_duplicates(inplace=True) # restrict to months of the simulation first_month = forage_args[u'start_month'] start_date = first_year + float('%.2f' % (first_month / 12.)) end_date = last_year + float('%.2f' % (month / 12.)) outputs = outputs[(outputs.index >= start_date)] outputs = outputs[(outputs.index <= end_date)] return np.mean(outputs.aglive1 / outputs.aglivc)
def collect_results(save_as): outerdir = r"C:\Users\Ginger\Dropbox\NatCap_backup\Forage_model\Data\Kenya\From_Jenny\Comparisons_with_CENTURY\back_calc_mgmt_9.13.16" site_csv = r"C:\Users\Ginger\Dropbox\NatCap_backup\Forage_model\Data\Kenya\From_Jenny\jenny_site_summary_open.csv" date_dict = {2012: 2012.50, 2013: 2013.50, 2014: 2014.08} succeeded = 0 failed = [] sum_dict = {'site': [], 'date': [], 'biomass': [], 'sim_vs_emp': []} site_df = pandas.read_csv(site_csv) for row in xrange(len(site_df)): year = site_df.iloc[row].year emp_series = [PDM_to_g_m2(site_df.iloc[row]['week{}cage'.format(week)]) for week in [0, 3, 6, 9]] emp_dates = [date_dict[year] + x for x in [0, 0.0625, 0.125, 0.1875]] site_name = site_df.iloc[row].site result_csv = os.path.join(outerdir, site_name, 'modify_management_summary_{}.csv'.format(site_name)) res_df = pandas.read_csv(result_csv) diff = res_df.iloc[len(res_df) - 1].Simulated_biomass - \ res_df.iloc[len(res_df) - 1].Empirical_biomass if abs(float(diff)) <= 15.0: succeeded += 1 else: failed.append(site_name) final_sim = int(res_df.iloc[len(res_df) - 1].Iteration) output_file = os.path.join(outerdir, site_name, 'CENTURY_outputs_iteration{}'.format(final_sim), '{}.lis'.format(site_name)) biomass_df = cent.read_CENTURY_outputs(output_file, year - 1, year + 1) sim_months = [date_dict[year] + x for x in [0, 0.08, 0.17, 0.25]] if year == 2014: sim_months[1] = 2014.17 sim_dat = biomass_df.loc[sim_months] sim_series = sim_dat.aglivc + sim_dat.stdedc sum_dict['site'].extend([site_name] * 8) sum_dict['biomass'].extend(emp_series) sum_dict['sim_vs_emp'].extend(['empirical'] * 4) sum_dict['date'].extend(emp_dates) sum_dict['biomass'].extend(sim_series) sum_dict['sim_vs_emp'].extend(['simulated'] * 4) sum_dict['date'].extend(sim_series.index) sum_df = pandas.DataFrame(sum_dict) sum_df.to_csv(save_as) print "{} sites succeeded".format(succeeded) print "these sites failed: {}".format(failed)
def calc_n_mult(forage_args, target): """calculate N multiplier for a grass to achieve target cp content. Target should be supplied as a float between 0 and 1.""" # verify that N multiplier is initially set to 1 grass_df = pd.read_csv(forage_args['grass_csv']) current_value = grass_df.iloc[0].N_multiplier assert current_value == 1, "Initial value for N multiplier must be 1" # launch model to get initial crude protein content forage.execute(forage_args) # find output final_month = forage_args[u'start_month'] + forage_args['num_months'] - 1 if final_month > 12: mod = final_month % 12 if mod == 0: month = 12 year = (final_month / 12) + forage_args[u'start_year'] - 1 else: month = mod year = (final_month / 12) + forage_args[u'start_year'] else: month = final_month year = (step / 12) + forage_args[u'start_year'] intermediate_dir = os.path.join(forage_args['outdir'], 'CENTURY_outputs_m%d_y%d' % (month, year)) grass_label = grass_df.iloc[0].label sim_output = os.path.join(intermediate_dir, '{}.lis'.format(grass_label)) # calculate n multiplier to achieve target first_year = forage_args['start_year'] last_year = year outputs = cent.read_CENTURY_outputs(sim_output, first_year, last_year) outputs.drop_duplicates(inplace=True) cp_green = np.mean(outputs.aglive1 / outputs.aglivc) n_mult = '%.2f' % (target / cp_green) # edit grass csv to reflect calculated n_mult grass_df.N_multiplier = grass_df.N_multiplier.astype(float) grass_df = grass_df.set_value(0, 'N_multiplier', float(n_mult)) grass_df = grass_df.set_index('label') grass_df.to_csv(forage_args['grass_csv'])
def get_biomass_from_Century_outputs(outdir, date): """Retrieve biomass from model outputs on the specified date. Date must be in "Century format", e.g. 2012.00 is December of 2011.""" # find latest folder of outputs folder_list = [f for f in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, f))] folder_list.remove('CENTURY_outputs_spin_up') max_year = max([f[-4:] for f in folder_list]) folder_list = [f for f in folder_list if f.endswith(max_year)] max_month = max([int(re.search( 'CENTURY_outputs_m(.+?)_y{}'.format(max_year), f).group(1)) for f in folder_list]) max_folder = os.path.join(outdir, 'CENTURY_outputs_m{}_y{}'.format( max_month, max_year)) output_files = [f for f in os.listdir(max_folder) if f.endswith('.lis')] output_f = os.path.join(max_folder, output_files[0]) outputs = cent.read_CENTURY_outputs(output_f, math.floor(date) - 1, math.ceil(date) + 1) outputs.drop_duplicates(inplace=True) live_gm2 = outputs.loc[date, 'aglivc'] dead_gm2 = outputs.loc[date, 'stdedc'] return live_gm2, dead_gm2
def summarize_calc_schedules( site_list, n_months, input_dir, century_dir, out_dir, raw_file, summary_file): """Summarize grazing schedule estimated by back-calculate management. Access the ending schedules for sites where grazing pressure was estimated by the back-calculate management routine. Summarize the biomass removed by herbivores according to the estimated schedules. Parameters: site_list (dict): a dictionary containing information about the sites; this must be the same dictionary that was supplied to the back- calculate management routine n_months (int): number of months that the back-calculate management routine was allowed to potentially modify input_dir (string): path to directory containing inputs to the beta rangeland model for each site, for example schedule and <site>.100 files century_dir (string): path to directory containing Century executable and supporting files, such as graz.100 out_dir (string): path to directory where the results of the back- calculate management routine were written. raw_file (string): path to csv file where results table should be written, including biomass removed in each month at each site summary_file (string): path to csv file where summary results table should be written, including average biomass removed at each site across months Side effects: writes a csv table to `raw_file` and `summary_file` Returns: None """ df_list = list() for site in site_list: site_name = site['name'] empirical_date = site['date'] site_dir = os.path.join(out_dir, '{}'.format(site_name)) result_csv = os.path.join( site_dir, 'modify_management_summary_{}.csv'.format(site_name)) res_df = pd.read_csv(result_csv) sch_files = [f for f in os.listdir(site_dir) if f.endswith('.sch')] sch_iter_list = [ int(re.search('{}_{}(.+?).sch'.format( site_name, site_name), f).group(1)) for f in sch_files] if len(sch_iter_list) == 0: # no schedule modification was needed final_sch = os.path.join(input_dir, '{}.sch'.format(site_name)) else: final_sch_iter = max(sch_iter_list) final_sch = os.path.join( site_dir, '{}_{}{}.sch'.format( site_name, site_name, final_sch_iter)) # read schedule file, collect months where grazing was scheduled schedule_df = cent.read_block_schedule(final_sch) for i in range(0, schedule_df.shape[0]): start_year = schedule_df.loc[i, 'block_start_year'] last_year = schedule_df.loc[i, 'block_end_year'] if empirical_date > start_year and empirical_date <= last_year: break relative_empirical_year = int( math.floor(empirical_date) - start_year + 1) empirical_month = int( round((empirical_date - float(math.floor(empirical_date))) * 12)) if empirical_month == 0: empirical_month = 12 relative_empirical_year = relative_empirical_year - 1 first_rel_month, first_rel_year = cent.find_first_month_and_year( n_months, empirical_month, relative_empirical_year) first_abs_year = first_rel_year + start_year - 1 # find months where grazing took place prior to empirical date graz_schedule = cent.read_graz_level(final_sch) block = graz_schedule.loc[ (graz_schedule["block_end_year"] == last_year), ['relative_year', 'month', 'grazing_level', 'year']] empirical_year = block.loc[ (block['relative_year'] == relative_empirical_year) & (block['month'] <= empirical_month), ] intervening_years = block.loc[ (block['relative_year'] < relative_empirical_year) & (block['relative_year'] > first_rel_year), ] first_year = block.loc[ (block['relative_year'] == first_rel_year) & (block['month'] >= first_rel_month), ] history = pd.concat([first_year, intervening_years, empirical_year]) if len(history) > 0: # collect % biomass removed for these months grz_files = [ f for f in os.listdir(site_dir) if f.startswith('graz')] if len(grz_files) > 0: grz_iter_list = [ int(re.search( 'graz_{}(.+?).100'.format(site_name), f).group(1)) for f in grz_files] final_iter = max(grz_iter_list) final_grz = os.path.join( site_dir, 'graz_{}{}.100'.format(site_name, final_iter)) else: final_grz = os.path.join(century_dir, 'graz.100') grz_levels = history.grazing_level.unique() grz_level_list = [{'label': level} for level in grz_levels] for grz in grz_level_list: flgrem, fdgrem = retrieve_grazing_params( final_grz, grz['label']) grz['flgrem'] = flgrem grz['fdgrem'] = fdgrem # fill in history with months where no grazing took place history = cent.fill_schedule( history, first_rel_year, first_rel_month, relative_empirical_year, empirical_month) history['year'] = history['relative_year'] + start_year - 1 history['perc_live_removed'] = 0. history['perc_dead_removed'] = 0. if len(grz_level_list) > 0: for grz in grz_level_list: history.ix[ history.grazing_level == grz['label'], 'perc_live_removed'] = grz['flgrem'] history.ix[ history.grazing_level == grz['label'], 'perc_dead_removed'] = grz['fdgrem'] # collect biomass for these months history['date'] = history.year + history.month / 12.0 history = history.round({'date': 2}) final_sim = int(res_df.iloc[len(res_df) - 1].Iteration) output_file = os.path.join( site_dir, 'CENTURY_outputs_iteration{}'.format(final_sim), '{}.lis'.format(site_name)) biomass_df = cent.read_CENTURY_outputs( output_file, first_abs_year, math.floor(empirical_date)) biomass_df['time'] = biomass_df.index sum_df = history.merge( biomass_df, left_on='date', right_on='time', how='inner') sum_df['site'] = site_name df_list.append(sum_df) summary_df = pd.concat(df_list) summary_df['live_rem'] = summary_df.aglivc * summary_df.perc_live_removed summary_df['dead_rem'] = summary_df.stdedc * summary_df.perc_dead_removed summary_df['total_rem'] = summary_df.live_rem + summary_df.dead_rem rem_means = summary_df.groupby(by='site')[ ('total_rem', 'live_rem', 'dead_rem')].mean() rem_means.to_csv(summary_file) summary_df.to_csv(raw_file)
def back_calculate_management( site, input_dir, century_dir, out_dir, fix_file, n_months, vary, live_or_total, threshold, max_iterations, template_level): """Back-calculate grazing history at a site to match empirical biomass. This function uses Century to predict biomass at the site on the empirical measurement date, and iteratively increases or decreases herbivore grazing pressure prior to the empirical measurement date until modeled biomass on the empirical measurement date matches empirical biomass, or until a certain number of iterations are completed. Parameters: site (dict): dictionary describing the empirical measurement that the routine should match. This dictionary must contain the following keys: - biomass: empirical biomass, in grams per square meter - date: empirical measurement date, in decimal format with two decimal places. For example, 2012.00 designates December 2011; 2012.08 designates January 2012 - name: site identifier. This must be matched to inputs to the beta rangeland model for the site, such as the schedule and <site>.100 files. input_dir (string): path to directory containing inputs to the beta rangeland model for each site, for example schedule and <site>.100 files century_dir (string): path to directory containing Century executable and supporting files, such as graz.100 out_dir (string): path to directory where outputs from the routine should be written fix_file (string): basename of the Century fix file that should be used for the Century model run. This file must be located in `century_dir`. n_months (int): number of months prior to the empirical sampling date that the back-calculate management routine should potentially modify vary (string): how should the grazing history be changed? one of 'intensity', 'schedule', or 'both' live_or_total (string): match live, or live+standing dead biomass? one of 'live' or 'total' threshold (float): maximum difference between simulated and empirical biomass, in grams per square m, to consider the match successful max_iterations (int): maximum number of times to run Century with modified grazing schedule before abandoning the attempt template_level (string): an existing grazing code in graz.100 to use as the basis for modifed grazing events, giving the set of grazing-related Century parameters (e.g., gfcret, gret(1), grzeff) Side effects: Creates output files in `out_dir` Returns: None """ cent.set_century_directory(century_dir) empirical_biomass = site['biomass'] empirical_date = site['date'] schedule = site['name'] + '.sch' schedule_file = os.path.join(input_dir, schedule) site_file, weather_file = cent.get_site_weather_files( schedule_file, input_dir) graz_file = os.path.join(century_dir, "graz.100") output = site['name'] # check that last block in schedule file includes >= n_months before # empirical_date cent.check_schedule(schedule_file, n_months, empirical_date) # write CENTURY batch file for spin-up simulation hist_bat = os.path.join(input_dir, (site['name'] + '_hist.bat')) hist_schedule = site['name'] + '_hist.sch' hist_output = site['name'] + '_hist' cent.write_century_bat( input_dir, hist_bat, hist_schedule, hist_output, fix_file, 'outvars.txt') # write CENTURY bat for extend simulation extend_bat = os.path.join(input_dir, (site['name'] + '.bat')) extend = site['name'] + '_hist' output_lis = output + '.lis' cent.write_century_bat( input_dir, extend_bat, schedule, output_lis, fix_file, 'outvars.txt', extend) # make a copy of the original graz params and schedule file shutil.copyfile(graz_file, os.path.join(century_dir, 'graz_orig.100')) shutil.copyfile(schedule_file, os.path.join(input_dir, 'sch_orig.sch')) spin_up_outputs = [ site['name'] + '_hist_log.txt', site['name'] + '_hist.lis'] century_outputs = [output + '_log.txt', output + '.lis', output + '.bin'] # move CENTURY run files to CENTURY dir e_schedule = os.path.join(input_dir, site['name'] + '.sch') h_schedule = os.path.join(input_dir, site['name'] + '_hist.sch') file_list = [ hist_bat, extend_bat, e_schedule, h_schedule, site_file, weather_file] if weather_file == 'NA': file_list.remove(weather_file) for file in file_list: shutil.copyfile(file, os.path.join( century_dir, os.path.basename(file))) run_schedule = os.path.join(century_dir, os.path.basename(e_schedule)) os.remove(hist_bat) os.remove(extend_bat) # run CENTURY for spin-up hist_bat_run = os.path.join(century_dir, (site['name'] + '_hist.bat')) century_bat_run = os.path.join(century_dir, (site['name'] + '.bat')) cent.launch_CENTURY_subprocess(hist_bat_run) # save copies of CENTURY outputs, but remove from CENTURY dir intermediate_dir = os.path.join(out_dir, 'CENTURY_outputs_spin_up') if not os.path.exists(intermediate_dir): os.makedirs(intermediate_dir) for file in spin_up_outputs: shutil.copyfile( os.path.join(century_dir, file), os.path.join(intermediate_dir, file)) os.remove(os.path.join(century_dir, file)) # start summary csv summary_csv = os.path.join( out_dir, 'modify_management_summary_' + site['name'] + '.csv') try: with open(summary_csv, 'wb') as summary_file: writer = csv.writer(summary_file, delimiter=',') row = ['Iteration', 'Empirical_biomass', 'Simulated_biomass'] writer.writerow(row) for iter in xrange(max_iterations): row = [iter] row.append(empirical_biomass) # call CENTURY from the batch file cent.launch_CENTURY_subprocess(century_bat_run) intermediate_dir = os.path.join( out_dir, 'CENTURY_outputs_iteration%d' % iter) if not os.path.exists(intermediate_dir): os.makedirs(intermediate_dir) for file in century_outputs: shutil.copyfile( os.path.join(century_dir, file), os.path.join(intermediate_dir, file)) os.remove(os.path.join(century_dir, file)) # get simulated biomass output_file = os.path.join(intermediate_dir, output_lis) biomass_df = cent.read_CENTURY_outputs( output_file, math.floor(empirical_date) - 1, math.ceil(empirical_date) + 1) biomass_df.drop_duplicates(inplace=True) if live_or_total == 'live': simulated_biomass = biomass_df.loc[ empirical_date, 'aglivc'] elif live_or_total == 'total': simulated_biomass = biomass_df.loc[ empirical_date, 'total'] else: er = """Biomass output from CENTURY must be specified as 'live' or 'total'""" raise ValueError(er) row.append(simulated_biomass) writer.writerow(row) # check difference between simulated and empirical biomass if float(abs( simulated_biomass - empirical_biomass)) <= threshold: print("Biomass target reached!") break # success! else: # these are the guts of the routine, where either the # grazing parameters file or the schedule file used in the # CENTURY call are changed diff = simulated_biomass - empirical_biomass increase_intensity = 0 if diff > 0: # simulated biomass greater than empirical increase_intensity = 1 if vary == 'intensity': # change % biomass removed in grazing parameters file # TODO the grazing level here should reflect what's in # the schedule file success = cent.modify_intensity( increase_intensity, graz_file, template_level, out_dir, site['name'] + str(iter)) if not success: print("""intensity cannot be modified, %d iterations completed""" % (iter - 1)) break elif vary == 'schedule': # add or remove scheduled grazing events target_dict = cent.find_target_month( increase_intensity, run_schedule, empirical_date, n_months) cent.modify_schedule( run_schedule, increase_intensity, target_dict, template_level, out_dir, site['name'] + str(iter)) elif vary == 'both': target_dict = cent.find_target_month( increase_intensity, run_schedule, empirical_date, n_months) if target_dict: # there are opportunities to modify the schedule cent.modify_schedule( run_schedule, increase_intensity, target_dict, template_level, out_dir, site['name'] + str(iter)) else: # no opportunities to modify the schedule exist success = cent.modify_intensity( increase_intensity, graz_file, template_level, out_dir, site['name'] + str(iter)) if not success: print("""intensity cannot be modified, %d iterations completed""" % (iter - 1)) break if iter == (max_iterations - 1): print('maximum iterations performed, target not reached') finally: # replace modified grazing parameters and schedule files with original # files os.remove(graz_file) shutil.copyfile(os.path.join(century_dir, 'graz_orig.100'), graz_file) os.remove(os.path.join(century_dir, 'graz_orig.100')) os.remove(schedule_file) shutil.copyfile( os.path.join(input_dir, 'sch_orig.sch'), schedule_file) files_to_remove = [ os.path.join(century_dir, os.path.basename(f)) for f in file_list] for file in files_to_remove: os.remove(file) os.remove(os.path.join(input_dir, 'sch_orig.sch')) os.remove(os.path.join(century_dir, site['name'] + '_hist.bin'))
def run_simulations(): century_dir = 'C:/Users/Ginger/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Century46_PC_Jan-2014' fix_file = 'drytrpfi.100' graz_file = os.path.join(century_dir, "graz.100") site_list = ['Research', 'Loidien'] #, 'Rongai', 'Kamok'] input_dir = "C:/Users/Ginger/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Kenya/input" outer_dir = "C:/Users/Ginger/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Output/Stocking_density_test" prop_legume = 0 template_level = 'GL' herb_class_weights = "C:/Users/Ginger/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Kenya/Boran_weights.csv" sd_dir = 'C:/Users/Ginger/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Kenya/OPC_stocking_density' breed = 'Boran' steepness = 1. latitude = 0 supp_available = 0 FParam = FreerParam.FreerParam(forage.get_general_breed(breed)) supp = forage.Supplement(FParam, 0, 0, 0, 0, 0, 0) forage.set_time_step('month') add_event = 1 grass_file = "C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/Forage_model/model_inputs/grass.csv" grass = (pandas.read_csv(grass_file)).to_dict(orient='records')[0] grass['DMD_green'] = 0.64 grass['DMD_dead'] = 0.64 grass['cprotein_green'] = 0.1 grass['cprotein_dead'] = 0.1 for site in site_list: spin_up_outputs = [site + '_hist_log.txt', site + '_hist.lis'] century_outputs = [site + '_log.txt', site + '.lis', site + '.bin'] filename = 'average_animals_%s_2km_per_ha.csv' % site stocking_density_file = os.path.join(sd_dir, filename) sd_df = pandas.read_table(stocking_density_file, sep=',') outdir = os.path.join(outer_dir, site) if not os.path.exists(outdir): os.makedirs(outdir) # write CENTURY bat for spin-up simulation hist_bat = os.path.join(input_dir, (site + '_hist.bat')) hist_schedule = site + '_hist.sch' hist_output = site + '_hist' cent.write_century_bat(input_dir, hist_bat, hist_schedule, hist_output, fix_file, 'outvars.txt') # write CENTURY bat for extend simulation extend_bat = os.path.join(input_dir, site + '.bat') schedule = site + '.sch' output = site extend = site + '_hist' cent.write_century_bat(century_dir, extend_bat, schedule, output, fix_file, 'outvars.txt', extend) # move CENTURY run files to CENTURY dir site_file = os.path.join(input_dir, site + '.100') weather_file = os.path.join(input_dir, site + '.wth') e_schedule = os.path.join(input_dir, site + '.sch') h_schedule = os.path.join(input_dir, site + '_hist.sch') file_list = [ hist_bat, extend_bat, e_schedule, h_schedule, site_file, weather_file ] for file in file_list: shutil.copyfile(file, os.path.join(century_dir, os.path.basename(file))) # make a copy of the original graz params and schedule file shutil.copyfile(graz_file, os.path.join(century_dir, 'graz_orig.100')) label = os.path.basename(e_schedule)[:-4] copy_name = label + '_orig.sch' shutil.copyfile(e_schedule, os.path.join(input_dir, copy_name)) # run CENTURY for spin-up up to start_year and start_month hist_bat = os.path.join(century_dir, site + '_hist.bat') century_bat = os.path.join(century_dir, site + '.bat') p = Popen(["cmd.exe", "/c " + hist_bat], cwd=century_dir) stdout, stderr = p.communicate() p = Popen(["cmd.exe", "/c " + century_bat], cwd=century_dir) stdout, stderr = p.communicate() # save copies of CENTURY outputs, but remove from CENTURY dir intermediate_dir = os.path.join(outdir, 'CENTURY_outputs_spin_up') if not os.path.exists(intermediate_dir): os.makedirs(intermediate_dir) to_move = century_outputs + spin_up_outputs for file in to_move: shutil.copyfile(os.path.join(century_dir, file), os.path.join(intermediate_dir, file)) os.remove(os.path.join(century_dir, file)) grass_list = [grass] results_dict = {'year': [], 'month': []} herbivore_input = (pandas.read_csv(herb_class_weights).to_dict( orient='records')) herbivore_list = [] for h_class in herbivore_input: results_dict[h_class['label'] + '_gain_kg'] = [] results_dict[h_class['label'] + '_offtake'] = [] results_dict['milk_prod_kg'] = [] for grass in grass_list: results_dict[grass['label'] + '_green_kgha'] = [] results_dict[grass['label'] + '_dead_kgha'] = [] results_dict['total_offtake'] = [] results_dict['stocking_density'] = [] try: for row in xrange(len(sd_df)): herbivore_list = [] for h_class in herbivore_input: herd = forage.HerbivoreClass(FParam, breed, h_class['weight'], h_class['sex'], h_class['age'], h_class['stocking_density'], h_class['label'], Wbirth=24) herd.update(FParam, 0, 0) herbivore_list.append(herd) for h_class in herbivore_list: h_class.stocking_density = sd_df.iloc[row][h_class.label] total_SD = forage.calc_total_stocking_density(herbivore_list) results_dict['stocking_density'].append(total_SD) siteinfo = forage.SiteInfo(total_SD, steepness, latitude) month = sd_df.iloc[row].month year = sd_df.iloc[row].year suf = '%d-%d' % (month, year) DOY = month * 30 # get biomass and crude protein for each grass type from CENTURY output_file = os.path.join(intermediate_dir, site + '.lis') outputs = cent.read_CENTURY_outputs(output_file, year, year + 2) target_month = cent.find_prev_month(year, month) grass['prev_g_gm2'] = grass['green_gm2'] grass['prev_d_gm2'] = grass['dead_gm2'] grass['green_gm2'] = outputs.loc[target_month, 'aglivc'] grass['dead_gm2'] = outputs.loc[target_month, 'stdedc'] grass['cprotein_green'] = ( outputs.loc[target_month, 'aglive1'] / outputs.loc[target_month, 'aglivc']) grass['cprotein_dead'] = ( outputs.loc[target_month, 'stdede1'] / outputs.loc[target_month, 'stdedc']) if row == 0: available_forage = forage.calc_feed_types(grass_list) else: available_forage = forage.update_feed_types( grass_list, available_forage) results_dict['year'].append(year) results_dict['month'].append(month) for feed_type in available_forage: results_dict[feed_type.label + '_' + feed_type.green_or_dead + '_kgha'].append( feed_type.biomass) siteinfo.calc_distance_walked(FParam, available_forage) for feed_type in available_forage: feed_type.calc_digestibility_from_protein() total_biomass = forage.calc_total_biomass(available_forage) # Initialize containers to track forage consumed across herbivore # classes total_intake_step = 0. total_consumed = {} for feed_type in available_forage: label_string = ';'.join( [feed_type.label, feed_type.green_or_dead]) total_consumed[label_string] = 0. for herb_class in herbivore_list: max_intake = herb_class.calc_max_intake(FParam) if herb_class.Z < FParam.CR7: ZF = 1. + (FParam.CR7 - herb_class.Z) else: ZF = 1. if herb_class.stocking_density > 0: adj_forage = forage.calc_adj_availability( available_forage, herb_class.stocking_density) else: adj_forage = list(available_forage) diet = forage.diet_selection_t2(ZF, prop_legume, supp_available, supp, max_intake, FParam, adj_forage) diet_interm = forage.calc_diet_intermediates( FParam, diet, supp, herb_class, siteinfo, prop_legume, DOY) reduced_max_intake = forage.check_max_intake( FParam, diet, diet_interm, herb_class, max_intake) if reduced_max_intake < max_intake: diet = forage.diet_selection_t2( ZF, prop_legume, supp_available, supp, reduced_max_intake, FParam, adj_forage) diet_interm = forage.calc_diet_intermediates( FParam, diet, supp, herb_class, siteinfo, prop_legume, DOY) total_intake_step += ( forage.convert_daily_to_step(diet.If) * herb_class.stocking_density) if herb_class.sex == 'lac_female': milk_production = forage.check_milk_production( FParam, diet_interm) milk_kg_day = forage.calc_milk_yield( FParam, milk_production) delta_W = forage.calc_delta_weight(FParam, diet, diet_interm, supp, herb_class) delta_W_step = forage.convert_daily_to_step(delta_W) herb_class.update(FParam, delta_W_step, forage.find_days_per_step()) if herb_class.stocking_density > 0: results_dict[herb_class.label + '_gain_kg'].append(delta_W_step) results_dict[herb_class.label + '_offtake'].append( diet.If) else: results_dict[herb_class.label + '_gain_kg'].append('NA') results_dict[herb_class.label + '_offtake'].append('NA') if herb_class.sex == 'lac_female': results_dict['milk_prod_kg'].append(milk_kg_day * 30.) # after have performed max intake check, we have the final diet # selected # calculate percent live and dead removed for each grass type consumed_by_class = forage.calc_percent_consumed( available_forage, diet, herb_class.stocking_density) forage.sum_percent_consumed(total_consumed, consumed_by_class) results_dict['total_offtake'].append(total_intake_step) # send to CENTURY for this month's scheduled grazing event date = year + float('%.2f' % (month / 12.)) schedule = os.path.join(century_dir, site + '.sch') target_dict = cent.find_target_month(add_event, schedule, date, 1) if target_dict == 0: er = "Error: no opportunities exist to add grazing event" raise Exception(er) new_code = cent.add_new_graz_level(grass, total_consumed, graz_file, template_level, outdir, suf) cent.modify_schedule(schedule, add_event, target_dict, new_code, outdir, suf) # call CENTURY from the batch file century_bat = os.path.join(century_dir, site + '.bat') p = Popen(["cmd.exe", "/c " + century_bat], cwd=century_dir) stdout, stderr = p.communicate() # save copies of CENTURY outputs, but remove from CENTURY dir intermediate_dir = os.path.join( outdir, 'CENTURY_outputs_m%d_y%d' % (month, year)) if not os.path.exists(intermediate_dir): os.makedirs(intermediate_dir) for file in century_outputs: shutil.copyfile(os.path.join(century_dir, file), os.path.join(intermediate_dir, file)) os.remove(os.path.join(century_dir, file)) # remove files from CENTURY directory finally: # replace graz params used by CENTURY with original file os.remove(graz_file) shutil.copyfile(os.path.join(century_dir, 'graz_orig.100'), graz_file) os.remove(os.path.join(century_dir, 'graz_orig.100')) files_to_remove = [ os.path.join(century_dir, os.path.basename(f)) for f in file_list ] for file in files_to_remove: os.remove(file) os.remove(os.path.join(century_dir, site + '_hist.bin')) filled_dict = forage.fill_dict(results_dict, 'NA') df = pandas.DataFrame(filled_dict) df.to_csv(os.path.join(outdir, 'summary_results.csv'))
def summarize_live_dead(save_as, input_dir, n_months): """summarize live:dead ratio in the last n_months months of a schedule estimated by back-calc management routine.""" sum_dict = { 'site': [], 'avg_ratio_live_dead': [], 'avg_live': [], 'avg_dead': [] } out_dir = r"C:\Users\Ginger\Dropbox\NatCap_backup\Forage_model\Forage_model\model_results\OPC\back_calc_match_last_measurement" site_list = sites_definition('last') for site in site_list: site_name = site['name'] empirical_date = site['date'] site_dir = os.path.join(out_dir, site_name) sch_files = [f for f in os.listdir(site_dir) if f.endswith('.sch')] sch_iter_list = [ int( re.search('{}_{}(.+?).sch'.format(site_name, site_name), f).group(1)) for f in sch_files ] if len(sch_iter_list) == 0: # no schedule modification was needed final_sch = os.path.join(input_dir, '{}.sch'.format(site_name)) else: final_sch_iter = max(sch_iter_list) final_sch = os.path.join( site_dir, '{}_{}{}.sch'.format(site_name, site_name, final_sch_iter)) # read schedule file to know schedule_df = cent.read_block_schedule(final_sch) for i in range(0, schedule_df.shape[0]): start_year = schedule_df.loc[i, 'block_start_year'] last_year = schedule_df.loc[i, 'block_end_year'] if empirical_date > start_year and empirical_date <= last_year: break relative_empirical_year = int( math.floor(empirical_date) - start_year + 1) empirical_month = int( round((empirical_date - float(math.floor(empirical_date))) * 12)) if empirical_month == 0: empirical_month = 12 relative_empirical_year = relative_empirical_year - 1 first_rel_month, first_rel_year = cent.find_first_month_and_year( n_months, empirical_month, relative_empirical_year) first_abs_year = first_rel_year + start_year - 1 result_csv = os.path.join( site_dir, 'modify_management_summary_{}.csv'.format(site_name)) res_df = pandas.read_csv(result_csv) final_sim = int(res_df.iloc[len(res_df) - 1].Iteration) output_file = os.path.join( site_dir, 'CENTURY_outputs_iteration{}'.format(final_sim), '{}.lis'.format(site_name)) biomass_df = cent.read_CENTURY_outputs(output_file, first_abs_year, math.floor(empirical_date)) biomass_df['time'] = biomass_df.index biomass_df = biomass_df.loc[biomass_df['time'] <= empirical_date] biomass_df = biomass_df.iloc[-24:] avg_live_dead = (biomass_df.aglivc / biomass_df.stdedc).mean() avg_live = biomass_df.aglivc.mean() agv_dead = biomass_df.stdedc.mean() sum_dict['site'].append(site_name) sum_dict['avg_ratio_live_dead'].append(avg_live_dead) sum_dict['avg_live'].append(avg_live) sum_dict['avg_dead'].append(agv_dead) sum_df = pandas.DataFrame(sum_dict) sum_df.to_csv(save_as)