def _populate_solution_land_allocation(self): """ 'AEZ Data'!A63:AD70 Calculates solution specific Drawdown land allocation using values from 'allocation' directory. """ df = pd.read_csv(LAND_CSV_PATH.joinpath('aez', 'solution_la_template.csv'), index_col=0) if self.ignore_allocation: self.soln_land_alloc_df = df.fillna(1) return else: df = df.fillna(0) for tmr in self.regimes: tmr_path = LAND_CSV_PATH.joinpath('allocation', to_filename(tmr)) for col in df: if col.startswith( 'AEZ29' ): # this zone is not included in land allocation continue aez_path = tmr_path.joinpath(to_filename(col) + '.csv') la_df = pd.read_csv(aez_path, index_col=0) total_perc_allocated = la_df.loc[ self.solution_name]['Total % allocated'] if total_perc_allocated > 0: df.at[tmr, col] = total_perc_allocated else: self.soln_land_alloc_df = df
def _populate_solution_ocean_allocation(self): """ 'DEZ Data'!A63:AD70 Calculates solution specific Drawdown ocean allocation using values from 'allocation' directory. """ df = pd.read_csv(OCEAN_CSV_PATH.joinpath('dez', 'solution_oa_template.csv'), index_col=0) df = df.fillna(0) for tdr in self.regimes: tdr_path = OCEAN_CSV_PATH.joinpath('allocation', to_filename(tdr)) for col in df: dez_path = tdr_path.joinpath(to_filename(col) + '.csv') oa_df = pd.read_csv(dez_path, index_col=0) total_perc_allocated = oa_df.loc[self.solution_name]['Total % allocated'] if total_perc_allocated > 0: df.at[tdr, col] = total_perc_allocated self.soln_ocean_alloc_df = df
def get_tla_regime_and_region(): """ Returns total land area df (rows = regions, columns = regimes) """ total_land_dict = {} for tmr in THERMAL_MOISTURE_REGIMES: df = pd.read_csv(datadir.joinpath('land', 'world', to_filename(tmr) + '.csv'), index_col=0).iloc[:5, 0] / 10000 total_land_dict[tmr] = df return pd.DataFrame(total_land_dict)
def _populate_world_ocean_allocation(self): """ 'DEZ Data'!D353:AG610 Combines world ocean area data with Drawdown's ocean allocation values. Creates a dict of DataFrames sorted by Thermal Dynamical Regime. """ self.world_ocean_alloc_dict = {} for tdr in self.regimes: df = pd.read_csv(OCEAN_CSV_PATH.joinpath('world', to_filename(tdr) + '.csv'), index_col=0).drop( 'Total Area (Mha)', 1) self.world_ocean_alloc_dict[tdr] = df.mul(self.soln_ocean_alloc_df.loc[tdr], axis=1)
def get_tla_regime_and_aez(region=None): """ Returns total land area df (rows = regimes, columns = AEZs) """ total_land_dict = {} for tmr in THERMAL_MOISTURE_REGIMES: df = pd.read_csv(datadir.joinpath('land', 'world', to_filename(tmr) + '.csv'), index_col=0).iloc[:5, 1:] if region is not None: df = df.loc[region, :] / 10000 else: df = df.sum(axis=0) / 10000 total_land_dict[tmr] = df return pd.DataFrame(total_land_dict).T
def _populate_world_land_allocation(self): """ 'AEZ Data'!D353:AG610 Combines world land area data with Drawdown's land allocation values. Creates a dict of DataFrames sorted by Thermal Moisture Region. """ self.world_land_alloc_dict = {} for tmr in self.regimes: df = pd.read_csv(LAND_CSV_PATH.joinpath('world', to_filename(tmr) + '.csv'), index_col=0).drop('Total Area (km2)', 1) # apply fixed world fraction to each region self.world_land_alloc_dict[tmr] = df.mul( self.soln_land_alloc_df.loc[tmr], axis=1) / 10000
def make_csvs(self): """ Makes csv versions of tables and stores in data/land/allocation """ path = LAND_CSV_PATH if self.key == 'land' else OCEAN_CSV_PATH # Sanity check if os.listdir(path): ans = input('Overwrite existing csv files? y or n') if ans == 'n': return elif ans != 'y': print('Not a valid answer') return # check the DataFrames are loaded if self.df_dict is None: self.read_allocation_xls() # write CSVs for regime in self.regimes: filename = to_filename(regime) os.mkdir(path.joinpath(filename)) for zone, df in self.df_dict[regime].items(): df.to_csv(path.joinpath(filename, to_filename(zone) + '.csv'))
def total_land_array(compress_aezs=False): """ Note: currently not used Build 3-D DataArray from World land data. Dimensions are: - region (the 5 main world regions) - aez (29 AEZ types) - tmr (6 Thermal Moisture Regimes) """ import xarray as xr tmr_list = [] for tmr in THERMAL_MOISTURE_REGIMES: df = pd.read_csv(datadir.joinpath('land', 'world', to_filename(tmr) + '.csv'), index_col=0).loc[MAIN_REGIONS, :].iloc[:, 1:] / 10000 if compress_aezs: for land_cover, aez_codes in AEZ_LAND_COVER_MAP.items(): df[land_cover] = df.loc[:, aez_codes].sum(axis=1) df.drop(columns=aez_codes, inplace=True) df.drop(columns=['AEZ29: All Barren Land'], inplace=True) print(df) tmr_list.append(xr.DataArray(df, dims=['region', 'aez'])) array = xr.concat(tmr_list, dim=pd.Index(THERMAL_MOISTURE_REGIMES, name='tmr')) return array
def read_xls(self, csv_path=None, alt_vma=False): """ Reads the whole Variable Meta-analysis xls sheet. Note this currently only works for LAND solutions. csv_path: (pathlib path object or str) If specified, will write CSVs to path for each table alt_vma: False = process the primary VMA sheet 'Variable Meta-analysis', True = process the alternate VMA sheet 'Variable Meta-analysis-DD' with fixed values for Average, High, and Low. """ if alt_vma: sheetname = 'Variable Meta-analysis-DD' fixed_summary = True else: sheetname = 'Variable Meta-analysis' fixed_summary = False self._find_tables(sheetname=sheetname) df_dict = OrderedDict() for title, location in self.table_locations.items(): df, use_weight, summary = self.read_single_table( source_id_cell=location, sheetname=sheetname, fixed_summary=fixed_summary) if df.empty: # in line with our policy of setting empty tables to None df_dict[title] = (None, False, (nan, nan, nan)) else: df_dict[title] = (df, use_weight, summary) if csv_path is not None: idx = pd.Index(data=list(range(1, len(df_dict) + 1)), name='VMA number') info_df = pd.DataFrame(columns=[ 'Filename', 'Title on xls', 'Has data?', 'Use weight?', 'Fixed Mean', 'Fixed High', 'Fixed Low' ], index=idx) i = 1 for title, values in df_dict.items(): table = values[0] use_weight = values[1] (average, high, low) = values[2] path_friendly_title = to_filename(title) row = { 'Filename': path_friendly_title, 'Title on xls': title, 'Has data?': False if table is None else True, 'Use weight?': use_weight, 'Fixed Mean': average, 'Fixed High': high, 'Fixed Low': low } info_df.loc[i, :] = row i += 1 if table is not None: for col in optional_columns: if table.loc[:, col].isnull().all(): table.drop(labels=col, axis='columns', inplace=True) table.to_csv(os.path.join(csv_path, path_friendly_title + '.csv'), index=False) info_df.to_csv(os.path.join(csv_path, 'VMA_info.csv')) return df_dict