예제 #1
0
def use_andreas_map():
    obj = ShftCult(use_andreas=True, file_sc=constants.TIF_ANDREAS, skiprows=0)
    path_nc = obj.create_andreas_nc()
    pdb.set_trace()
    # Plot maps

    ds = util.open_or_die(path_nc)
    lat = ds.variables['latitude'][:]
    lon = ds.variables['longitude'][:]
    imgs_for_movie = plot.plot_maps_ts(
        path_nc,
        'shift_cult',
        lon,
        lat,
        out_path=
        'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\shift_cult\\andreas\\',
        save_name='shift_cult',
        xlabel='Shifting cultivation frequency on croplands',
        title='',
        land_bg=False,
        grid=True)
    plot.make_movie(
        imgs_for_movie,
        'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\shift_cult\\andreas\\',
        out_fname='shift_cult.gif')

    ncc = util.open_or_die(
        'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\shift_cult\\andreas\\andreas.nc'
    )
    crop_data = util.open_or_die(
        'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\LUH\\v0.3_historical\\states.nc'
    )

    tme = numpy.arange(0, 1165 + 1)
    mult = numpy.zeros(len(tme))
    mult_one = numpy.zeros(len(tme))
    cell_area = util.open_or_die(constants.path_GLM_carea)

    nc_data = ncc.variables['shift_cult'][int(1), :, :]
    all_one = numpy.copy(nc_data)
    all_one[all_one > 0.0] = 1.0
    for idx, t in enumerate(tme):
        print t
        nc_data = ncc.variables['shift_cult'][int(t), :, :]

        all_crops = crop_data.variables['c3ann'][int(t), :, :] + crop_data.variables['c4ann'][int(t), :, :] +\
        crop_data.variables['c3per'][int(t), :, :] + crop_data.variables['c4per'][int(t), :, :] +\
        crop_data.variables['c3nfx'][int(t), :, :]

        mult[idx] = numpy.ma.sum(all_crops * nc_data * cell_area)
        mult_one[idx] = numpy.ma.sum(all_crops * all_one * cell_area)
    pdb.set_trace()
    import matplotlib.pyplot as plt
    ax = plt.plot(mult_one, label='butler')
    plt.plot(mult, label='andreas')
    plt.show()
예제 #2
0
    def copy_global_metadata_nc(self, path_src, path_dest):
        """
        :param path_src: Source netcdf file from which to copy metadata
        :param path_dest: Target netCDF file
        :return: Nothing (side-effect: target netCDF has modified metadata)
        """
        src_ds = util.open_or_die(path_src)
        dest_ds = util.open_or_die(path_dest, 'r+')

        # copy Global attributes from original file
        for att in src_ds.ncattrs():
            setattr(dest_ds, att, getattr(src_ds, att))

        src_ds.close()
        dest_ds.close()
예제 #3
0
    def copy_global_metadata_nc(self, path_src, path_dest):
        """
        :param path_src: Source netcdf file from which to copy metadata
        :param path_dest: Target netCDF file
        :return: Nothing (side-effect: target netCDF has modified metadata)
        """
        src_ds   = util.open_or_die(path_src)
        dest_ds  = util.open_or_die(path_dest, 'r+')

        # copy Global attributes from original file
        for att in src_ds.ncattrs():
            setattr(dest_ds,att,getattr(src_ds,att))

        src_ds.close()
        dest_ds.close()
예제 #4
0
파일: plot.py 프로젝트: ritviksahajpal/LUH2
def plot_maps_ts_from_path(path_nc, var_name, lon, lat, out_path, xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1,
                           save_name='fig', xlabel='', start_movie_yr=-1, title='', do_jenks=True,
                           tme_name='time', land_bg=True, cmap=plt.cm.RdBu, grid=False):
    """
    Plot map for var_name variable from netCDF file
    :param path_nc: Name of netCDF file
    :param var_name: Name of variable in netCDF file to plot on map
    :param xaxis_min:
    :param xaxis_max:
    :param xaxis_step:
    :param lon: List of lon's
    :param lat: List of lat's
    :param out_path: Output directory path + file name
    :return: List of paths of images produced, side-effect: save an image(s)
    """
    logger.info('Plotting ' + var_name + ' in ' + path_nc)

    util.make_dir_if_missing(out_path)

    # Read netCDF file and get time dimension
    nc = util.open_or_die(path_nc)

    if start_movie_yr > 0:
        ts = nc.variables[tme_name][:].astype(int)  # time-series
        ts = ts[start_movie_yr - ts[0]:]
    else:
        ts = nc.variables[tme_name][:]  # time-series

    nc.close()

    return plot_maps_ts(path_nc, ts, lon, lat, out_path, var_name=var_name,
                        xaxis_min=xaxis_min, xaxis_max=xaxis_max, xaxis_step=xaxis_step,
                        save_name=save_name, xlabel=xlabel, do_jenks=do_jenks,
                        start_movie_yr=start_movie_yr, title=title, tme_name=tme_name, land_bg=land_bg, cmap=cmap,
                        grid=grid)
예제 #5
0
    def read_processed_FAO_data(self):
        """
        Read in data on FAO crop acreages globally (already processed)
        :return:
        """
        fao_file = util.open_or_die(constants.data_dir + os.sep + constants.FAO_FILE)

        return fao_file.parse(constants.FAO_SHEET)
예제 #6
0
    def read_processed_FAO_data(self):
        """
        Read in data on FAO crop acreages globally (already processed)
        :return:
        """
        fao_file = util.open_or_die(constants.data_dir + os.sep +
                                    constants.FAO_FILE)

        return fao_file.parse(constants.FAO_SHEET)
예제 #7
0
    def copy_var_metadata(self, path_src, path_dest, vars):
        """
        Copy all the variable attributes from original file
        :param src_nc: Source netcdf file from which to copy metadata
        :param dest_nc: Target netCDF file
        :param vars: Variables for which to copy metadata
        :return: Nothing (side-effect: target netCDF has modified metadata)
        """
        # @TODO: Empirical testing showing that it is failing
        src_ds   = util.open_or_die(path_dest)
        dest_ds  = util.open_or_die(path_dest, 'r+')

        for var in [vars[0], vars[1], vars[2]]:
            for att in src_ds.variables[var].ncattrs():
                setattr(dest_ds.variables[var],att,getattr(src_ds.variables[var],att))

        src_ds.close()
        dest_ds.close()
예제 #8
0
    def copy_var_metadata(self, path_src, path_dest, vars):
        """
        Copy all the variable attributes from original file
        :param src_nc: Source netcdf file from which to copy metadata
        :param dest_nc: Target netCDF file
        :param vars: Variables for which to copy metadata
        :return: Nothing (side-effect: target netCDF has modified metadata)
        """
        # @TODO: Empirical testing showing that it is failing
        src_ds = util.open_or_die(path_dest)
        dest_ds = util.open_or_die(path_dest, 'r+')

        for var in [vars[0], vars[1], vars[2]]:
            for att in src_ds.variables[var].ncattrs():
                setattr(dest_ds.variables[var], att,
                        getattr(src_ds.variables[var], att))

        src_ds.close()
        dest_ds.close()
예제 #9
0
    def read_crop_lup(self, fname='', sht_name=''):
        """
        Read lookup table of crops
        :return:
        """
        logger.info('read_crop_lup')

        crp_file = util.open_or_die(fname)

        return crp_file.parse(sht_name)
예제 #10
0
    def get_shape_data(self, path_nc, var):
        """
        :param path_nc: Path to netCDF dataset
        :return Shape of netCDF dataset iyrs: time-dimension, iny: y-dimension, inx: x-dimension
        """
        ds = util.open_or_die(path_nc)
        iyrs, iny, inx = numpy.shape(ds.variables[var])
        ds.close()

        return iyrs, iny, inx
예제 #11
0
    def read_crop_lup(self, fname='', sht_name=''):
        """
        Read lookup table of crops
        :return:
        """
        logger.info('read_crop_lup')

        crp_file = util.open_or_die(fname)

        return crp_file.parse(sht_name)
예제 #12
0
    def get_shape_data(self, path_nc, var):
        """
        :param path_nc: Path to netCDF dataset
        :return Shape of netCDF dataset iyrs: time-dimension, iny: y-dimension, inx: x-dimension
        """
        ds = util.open_or_die(path_nc)
        iyrs, iny, inx = numpy.shape(ds.variables[var])
        ds.close()

        return iyrs, iny, inx
예제 #13
0
def use_andreas_map():
    obj = ShftCult(use_andreas=True, file_sc=constants.TIF_ANDREAS, skiprows=0)
    path_nc = obj.create_andreas_nc()
    pdb.set_trace()
    # Plot maps

    ds = util.open_or_die(path_nc)
    lat = ds.variables['latitude'][:]
    lon = ds.variables['longitude'][:]
    imgs_for_movie = plot.plot_maps_ts(path_nc,
                                       'shift_cult', lon, lat,
                                       out_path='C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\shift_cult\\andreas\\',
                                       save_name='shift_cult', xlabel='Shifting cultivation frequency on croplands',
                                       title='', land_bg=False, grid=True)
    plot.make_movie(imgs_for_movie, 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\shift_cult\\andreas\\', out_fname='shift_cult.gif')

    ncc = util.open_or_die('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\shift_cult\\andreas\\andreas.nc')
    crop_data = util.open_or_die('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\LUH\\v0.3_historical\\states.nc')

    tme = numpy.arange(0, 1165 + 1)
    mult = numpy.zeros(len(tme))
    mult_one = numpy.zeros(len(tme))
    cell_area = util.open_or_die(constants.path_GLM_carea)

    nc_data = ncc.variables['shift_cult'][int(1), :, :]
    all_one = numpy.copy(nc_data)
    all_one[all_one > 0.0] = 1.0
    for idx, t in enumerate(tme):
        print t
        nc_data = ncc.variables['shift_cult'][int(t), :, :]

        all_crops = crop_data.variables['c3ann'][int(t), :, :] + crop_data.variables['c4ann'][int(t), :, :] +\
        crop_data.variables['c3per'][int(t), :, :] + crop_data.variables['c4per'][int(t), :, :] +\
        crop_data.variables['c3nfx'][int(t), :, :]

        mult[idx] = numpy.ma.sum(all_crops * nc_data * cell_area)
        mult_one[idx] = numpy.ma.sum(all_crops * all_one * cell_area)
    pdb.set_trace()
    import matplotlib.pyplot as plt
    ax = plt.plot(mult_one, label='butler')
    plt.plot(mult, label='andreas')
    plt.show()
예제 #14
0
    def plot_HYDE_annual_diff_maps(self):
        """
        Maps showing annual diffs for LU categories in HYDE
        :return:
        """
        if self.ver != '3.2' or self.ver != '3.2a' or self.ver != '3.2_v1h':
            logger.info('plot_HYDE_annual_diff_maps does not work for HYDE version other than 3.2/3.2a')
            sys.exit(0)

        out_path = self.out_path + os.sep + 'HYDE_annual_diff_maps'

        for path, lu in self.lus.iteritems():
            imgs_diff_movie = []
            var_hyde = lu[0]
            logger.info('Create difference map for variable '+var_hyde)

            # Create difference map and movie of difference maps
            for idx, yr in enumerate(self.time.tolist()[::constants.MOVIE_SEP]):
                # Break out of loop if year + constants.MOVIE_SEP exceeds max year value
                if (yr + constants.MOVIE_SEP) >= self.time.max():
                    break

                # Plot difference map for consecutive years
                # 2nd year
                arr_two = util.open_or_die(path).variables[var_hyde][int(idx * constants.MOVIE_SEP +
                                                                         constants.MOVIE_SEP), :, :]
                # 1st year
                arr_one = util.open_or_die(path).variables[var_hyde][int(idx * constants.MOVIE_SEP), :, :]
                # Difference array
                diff_arr = arr_two - arr_one

                # Output diff as map
                out_map = plot.plot_ascii_map(diff_arr, out_path, xaxis_min=-1.0, xaxis_max=1.1, xaxis_step=0.2,
                                              plot_type='diverging', append_name=str(int(yr)), var_name=var_hyde,
                                              skiprows=0, map_label=str(int(yr)))

                imgs_diff_movie.append(out_map)
                self.list_files_to_del.append(out_map)

            plot.make_movie(imgs_diff_movie, out_path + os.sep + 'movies', out_fname='Annual_Diff_HYDE_' + var_hyde +
                            '.gif')
예제 #15
0
    def __init__(self, iam, path_nc, lon_name = 'lon', lat_name = 'lat', tme_name = 'time'):
        """
        Constructor
        """
        self.iam  = iam
        self.lon_name = lon_name
        self.lat_name = lat_name
        self.tme_name = tme_name

        # Open up netCDF and get dimensions
        ds        = util.open_or_die(path_nc)
        self.lat  = ds.variables[lat_name][:]
        self.lon  = ds.variables[lon_name][:]
        self.time = ds.variables[tme_name][:]
        ds.close()
예제 #16
0
    def __init__(self):
        self.fao_file = util.open_or_die(constants.RAW_FAO)

        # Initialize names of columns
        self.country_code = 'Country Code'  # Numeric values from 1 - ~5817, gives a unique code for country and region
        self.FAO_code = 'Country_FAO'
        self.ISO_code = 'ISO'  # ISO code for each country, not the same as country_code. ISO_code is used in HYDE

        self.crop_name = 'Item'  # Name of crop e.g. Wheat
        self.crop_id = 'Item Code'  # Id of crop e.g. 1, 2 etc.
        self.cft_id = 'functional crop id'  # crop functional type id e.g. 1
        self.cft_type = 'functional crop type'  # crop functional type e.g. C3Annual

        # Names of columns in past and future
        self.cur_cols = ['Y' + str(x) for x in range(constants.FAO_START_YR, constants.FAO_END_YR + 1)]  # 850 -> 1960
        self.past_cols = ['Y' + str(x) for x in range(constants.GLM_STRT_YR, constants.FAO_START_YR)]  # 850 -> 1960
        if constants.FAO_END_YR < constants.GLM_END_YR:
            self.futr_cols = ['Y' + str(x) for x in range(constants.FAO_END_YR + 1, constants.GLM_END_YR + 1)]  # 2014 -> 2015
        else:
            self.futr_cols = []

        self.all_cols = self.past_cols + self.cur_cols + self.futr_cols
        self.FAO_perc_all_df = pd.DataFrame()  # Dataframe containing FAO data for entire time-period
        self.FAO_mfd_df = pd.DataFrame()  # Dataframe containing CFT percentage data for each country for the year 2000

        # area_df: Area of CFT for each country in FAO era
        # gcrop: Percentage of ag area per CFT
        # gcnt: Percentage of ag area per country
        # perc_df: Percentage of ag area for each CFT by country in FAO era
        self.area_df = pd.DataFrame()
        self.gcrop = pd.DataFrame()
        self.gcnt = pd.DataFrame()
        self.perc_df = pd.DataFrame()

        # Path to csv of crop rotations
        self.csv_rotations = constants.csv_rotations

        self.dict_cont = {0: 'Antartica', 1: 'North_America', 2: 'South_America', 3: 'Europe', 4: 'Asia', 5: 'Africa',
                          6: 'Australia'}
        self.CCODES = 'country code'
        self.CONT_CODES = 'continent code'

        # continent and country code files
        self.ccodes_file = constants_glm.ccodes_file
        self.contcodes_file = constants_glm.contcodes_file
예제 #17
0
    def create_GCAM_croplands(self, nc):
        """
        :param nc: Empty 3D numpy array (yrs,ny,nx)
        :return nc: 3D numpy array containing SUM of all GCAM cropland percentages
        """
        # Iterate over all crop categories and add the self.land_var data
        for i in range(len(constants.CROPS)):
            print('Processing: ' + constants.CROPS[i])
            logging.info('Processing: ' + constants.CROPS[i])

            ds = util.open_or_die(constants.gcam_dir + constants.CROPS[i])
            for j in range(len(self.time)):
                nc[j, :, :] += ds.variables[self.land_var][j, :, :].data
            ds.close()

        # @TODO: Test whether sum of all self.land_var in a given year is <= 1.0

        return nc
예제 #18
0
    def read_monfreda(self):
        # Loop over crop functional types
        for key, value in self.cft.iteritems():
            for val in value:
                logging.info('Processing ' + key + ' ' + val)
                tmp_asc = util.open_or_die(
                    path_file=GLM.constants.MFD_DATA_DIR + os.sep + val,
                    skiprows=self.skiprows,
                    delimiter=' ')

                if key == 'C4Annual':
                    self.c4annual = self.c4annual + tmp_asc
                elif key == 'C4Perren':
                    self.c4perren = self.c4perren + tmp_asc
                elif key == 'C3Perren':
                    self.c3perren = self.c3perren + tmp_asc
                elif key == 'Ntfixing':
                    self.ntfixing = self.ntfixing + tmp_asc
                elif key == 'C3Annual':
                    self.c3annual = self.c3annual + tmp_asc
                elif key == 'TotlRice':
                    self.totlrice = self.totlrice + tmp_asc
                else:
                    logging.info('Wrong key')

                # Add to total crop fraction of grid cell area
                self.totlcrop = self.totlcrop + tmp_asc

        # Aggregate MONFREDA data from 5' to 0.25 degree
        self.totlcrop = util.avg_np_arr(self.totlcrop, block_size=3)
        self.croparea = self.totlcrop * self.land_area

        # Aggregate MONFREDA data for each CFT from 5' to 0.25 degree
        self.c4anarea = util.avg_np_arr(self.c4annual,
                                        block_size=3) * self.land_area
        self.c4prarea = util.avg_np_arr(self.c4perren,
                                        block_size=3) * self.land_area
        self.c3prarea = util.avg_np_arr(self.c3perren,
                                        block_size=3) * self.land_area
        self.ntfxarea = util.avg_np_arr(self.ntfixing,
                                        block_size=3) * self.land_area
        self.c3anarea = util.avg_np_arr(self.c3annual,
                                        block_size=3) * self.land_area
예제 #19
0
    def __init__(self,
                 iam,
                 path_nc,
                 lon_name='lon',
                 lat_name='lat',
                 tme_name='time'):
        """
        Constructor
        """
        self.iam = iam
        self.lon_name = lon_name
        self.lat_name = lat_name
        self.tme_name = tme_name

        # Open up netCDF and get dimensions
        ds = util.open_or_die(path_nc)
        self.lat = ds.variables[lat_name][:]
        self.lon = ds.variables[lon_name][:]
        self.time = ds.variables[tme_name][:]
        ds.close()
예제 #20
0
    def test_output_cft_frac_to_nc(self):
        # TODO: Maximum grid cell value should not exceed 1
        # TODO Minimum grid cell value should not be less than 1
        # Add the fractions from all crop functional types (they should sum up to 1.0 for every year)
        obj = CropStats()
        obj.process_crop_stats()

        obj.output_cft_frac_to_nc(obj.extend_FAO_time(obj.FAO_perc_all_df), nc_name='test_FAO_CFT_fraction.nc')

        # Open up netCDF file just created
        nc = util.open_or_die(constants.out_dir + os.sep + 'test_FAO_CFT_fraction.nc')
        # Get list of CFTs
        cfts = obj.FAO_perc_all_df[obj.cft_type].unique().tolist()

        # Add up all CFT fractions
        arr = numpy.zeros(nc[cfts[0]].shape)
        for idx, key in enumerate(cfts):
            arr = arr + nc[key][:]

        # CFT fraction sum for each country should be ~= 1.0
        zeros = arr == 0.0
        without_zeros = arr[~zeros]  # Remove all 0.0s
        self.assertEqual(numpy.allclose(without_zeros, 1), True)
예제 #21
0
파일: plot.py 프로젝트: ritviksahajpal/LUH2
def plot_ascii_map(asc, out_path, xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1, plot_type='sequential', map_label='',
                   append_name='', xlabel='', title='', var_name='data', skiprows=0, num_lats=constants.NUM_LATS,
                   num_lons=constants.NUM_LONS):
    """

    :param asc:
    :param out_path:
    :param xaxis_min:
    :param xaxis_max:
    :param xaxis_step:
    :param plot_type:
    :param map_label:
    :param append_name:
    :param xlabel:
    :param title:
    :param var_name:
    :param skiprows:
    :param num_lats:
    :param num_lons:
    :return:
    """
    logger.info('Plot ascii file as map')
    out_nc = util.convert_ascii_nc(asc, out_path + os.sep + 'file_' + append_name + '.nc', skiprows=skiprows,
                                   num_lats=num_lats, num_lons=num_lons, var_name=var_name, desc='netCDF')

    nc_file = util.open_or_die(out_nc)
    nc_file.close()

    path = os.path.dirname(out_path)
    map_path = path + os.sep + var_name + '_' + append_name + '.png'

    plot_map_from_nc(out_nc, map_path, var_name, xaxis_min, xaxis_max, xaxis_step, plot_type, annotate_date=True,
                     yr=map_label, date=-1, xlabel=xlabel, title=title, any_time_data=False, land_bg=False,
                     cmap=plt.cm.RdBu, grid=True, fill_mask=True)

    os.remove(out_nc)
    return map_path
예제 #22
0
파일: plot.py 프로젝트: ritviksahajpal/LUH2
def plot_maps_ts_from_path(path_nc,
                           var_name,
                           lon,
                           lat,
                           out_path,
                           xaxis_min=0.0,
                           xaxis_max=1.1,
                           xaxis_step=0.1,
                           save_name='fig',
                           xlabel='',
                           start_movie_yr=-1,
                           title='',
                           do_jenks=True,
                           tme_name='time',
                           land_bg=True,
                           cmap=plt.cm.RdBu,
                           grid=False):
    """
    Plot map for var_name variable from netCDF file
    :param path_nc: Name of netCDF file
    :param var_name: Name of variable in netCDF file to plot on map
    :param xaxis_min:
    :param xaxis_max:
    :param xaxis_step:
    :param lon: List of lon's
    :param lat: List of lat's
    :param out_path: Output directory path + file name
    :return: List of paths of images produced, side-effect: save an image(s)
    """
    logger.info('Plotting ' + var_name + ' in ' + path_nc)

    util.make_dir_if_missing(out_path)

    # Read netCDF file and get time dimension
    nc = util.open_or_die(path_nc)

    if start_movie_yr > 0:
        ts = nc.variables[tme_name][:].astype(int)  # time-series
        ts = ts[start_movie_yr - ts[0]:]
    else:
        ts = nc.variables[tme_name][:]  # time-series

    nc.close()

    return plot_maps_ts(path_nc,
                        ts,
                        lon,
                        lat,
                        out_path,
                        var_name=var_name,
                        xaxis_min=xaxis_min,
                        xaxis_max=xaxis_max,
                        xaxis_step=xaxis_step,
                        save_name=save_name,
                        xlabel=xlabel,
                        do_jenks=do_jenks,
                        start_movie_yr=start_movie_yr,
                        title=title,
                        tme_name=tme_name,
                        land_bg=land_bg,
                        cmap=cmap,
                        grid=grid)
예제 #23
0
    def write_GCAM_nc(self, isum_perc, shape):
        """
        :param isum_perc: Sum of self.land_var values for all crop classes
        :param shape: Tuple containing dimensions of netCDF (yrs, ny, nx)
        :return: Nothing, side-effect is to create a netCDF file with each crop category
                represented as fraction of cropland area and not total grid cell area
        """
        print 'Creating GCAM file'
        logging.info('Creating GCAM file')

        # Read in netCDF datasets
        ids_pas = util.open_or_die(self.path_pas)
        ids_frt = util.open_or_die(self.path_frt)
        ids_urb = util.open_or_die(self.path_urb)

        iam_nc = util.open_or_die(self.gcam_out_fl, perm='w')
        iam_nc.description = 'GCAM'

        # dimensions
        iam_nc.createDimension('time', shape[0])
        iam_nc.createDimension('lat', shape[1])
        iam_nc.createDimension('lon', shape[2])

        # variables
        time = iam_nc.createVariable('time', 'i4', ('time', ))
        latitudes = iam_nc.createVariable('lat', 'f4', ('lat', ))
        longitudes = iam_nc.createVariable('lon', 'f4', ('lon', ))
        crp = iam_nc.createVariable('cropland',
                                    'f4', (
                                        'time',
                                        'lat',
                                        'lon',
                                    ),
                                    fill_value=np.nan)
        pas = iam_nc.createVariable('pasture',
                                    'f4', (
                                        'time',
                                        'lat',
                                        'lon',
                                    ),
                                    fill_value=np.nan)
        frt = iam_nc.createVariable('forest',
                                    'f4', (
                                        'time',
                                        'lat',
                                        'lon',
                                    ),
                                    fill_value=np.nan)
        urb = iam_nc.createVariable('urban',
                                    'f4', (
                                        'time',
                                        'lat',
                                        'lon',
                                    ),
                                    fill_value=np.nan)

        # Metadata
        crp.units = 'percentage'
        pas.units = 'percentage'
        frt.units = 'percentage'
        urb.units = 'percentage'

        latitudes.units = 'degrees_north'
        latitudes.standard_name = 'latitude'
        longitudes.units = 'degrees_east'
        longitudes.standard_name = 'longitude'

        # Assign time
        time[:] = self.time

        # Assign lats/lons
        latitudes[:] = self.lat
        longitudes[:] = self.lon

        # Assign data to new netCDF file
        for i in range(len(self.time)):
            crp[i, :, :] = isum_perc[i, :, :]
            pas[i, :, :] = ids_pas.variables[self.land_var][i, :, :].data
            frt[i, :, :] = ids_frt.variables[self.land_var][i, :, :].data
            urb[i, :, :] = ids_urb.variables[self.land_var][i, :, :].data

        # @TODO: Copy metadata from original GCAM netcdf

        ids_pas.close()
        ids_frt.close()
        ids_urb.close()
        iam_nc.close()
예제 #24
0
    def create_nc_perc_croplands(self, sum_nc, shape):
        """
        Create netcdf file with each crop category represented as fraction of cropland
        area and not total grid cell area

        :param sum_nc: netCDF file containing 'croplands' which is fraction of area
                       of cell occupied by all croplands
        :param shape: Tuple containing dimensions of netCDF (yrs, ny, nx)
        :return: None
        """
        print 'Creating cropland nc'
        logging.info('Creating cropland nc')

        inc = util.open_or_die(sum_nc)
        onc = util.open_or_die(self.perc_crops_fl, 'w')

        onc.description = 'crops_as_fraction_of_croplands'

        # dimensions
        onc.createDimension('time', shape[0])
        onc.createDimension('lat', shape[1])
        onc.createDimension('lon', shape[2])

        # variables
        time = onc.createVariable('time', 'i4', ('time', ))
        latitudes = onc.createVariable('lat', 'f4', ('lat', ))
        longitudes = onc.createVariable('lon', 'f4', ('lon', ))

        # Metadata
        latitudes.units = 'degrees_north'
        latitudes.standard_name = 'latitude'
        longitudes.units = 'degrees_east'
        longitudes.standard_name = 'longitude'

        # Assign time
        time[:] = self.time

        # Assign lats/lons
        latitudes[:] = self.lat
        longitudes[:] = self.lon

        # Assign data
        for i in range(len(constants.CROPS)):
            print '\t' + constants.CROPS[i]
            onc_var = onc.createVariable(constants.CROPS[i],
                                         'f4', (
                                             'time',
                                             'lat',
                                             'lon',
                                         ),
                                         fill_value=np.nan)
            onc_var.units = 'percentage'

            ds = util.open_or_die(constants.gcam_dir + constants.CROPS[i])
            # Iterate over all years
            for j in range(shape[0]):
                onc_var[j, :, :] = ds.variables[self.land_var][
                    j, :, :].data / inc.variables['cropland'][j, :, :]

            ds.close()

        # @TODO: Copy metadata from original GCAM netcdf
        onc.close()
예제 #25
0
def create_tillage_nc(fao_tillage_yr=1973):
    if os.name == 'nt':
        path_hyde_crop = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\lumip_data\\hyde_3.2\\quarter_deg_grids_incl_urban\\gcrop_850_2015_quarterdeg_incl_urban.nc'
        path_till = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\Management\\Tillage\\aquastat.xlsx'
        path_cft_frac = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\lumip_data\\other\\croptypes\\FAO_CFT_fraction.nc'
    elif os.name == 'posix' or os.name == 'mac':
        # Read in tillage dataset
        path_hyde_crop = '/Volumes/gel1/pukeko_restore/data/hyde3.2_june_23_2015/feb26_2016/hyde32_baseline/processed/gcrop_850_2015_quarterdeg_incl_urban.nc'
        path_till = '/Users/ritvik/Documents/Projects/GLM/Input/Management/Tillage/aquastat.xlsx'
        path_cft_frac = '/Volumes/gel1/data/glm_data/lumip_data/other/croptypes/FAO_CFT_fraction.nc'

    hndl_hyde_crop = util.open_or_die(path_hyde_crop)
    hndl_till = util.open_or_die(path_till)
    hndl_cft_frac = util.open_or_die(path_cft_frac)

    # Country code map
    map_ccodes = np.genfromtxt(constants.CNTRY_CODES,
                               skip_header=0,
                               delimiter=' ')
    carea = util.open_or_die(constants.path_glm_carea)

    df = hndl_till.parse('processed_tillage')

    # Create dataframe with columns from 850 to 1973 with all 0's
    cols = np.arange(850, fao_tillage_yr + 1)
    df_ = pd.DataFrame(index=df.index, columns=cols)
    df_ = df_.fillna(0)  # with 0s rather than NaNs

    # Concatenate all years from 850 to 2015
    df = pd.concat([df_, df], axis=1)

    # Interpolate across the years
    df = pd.concat([
        df[['country code', 'country name']],
        df.filter(regex='^8|9|1|2').interpolate(axis=1)
    ],
                   axis=1)
    pdb.set_trace()
    out_nc = constants_glm.path_glm_output + os.sep + 'national_tillage_data_850_2015_new.nc'
    nc_data = util.open_or_die(out_nc, perm='w', format='NETCDF4_CLASSIC')
    nc_data.description = ''

    # dimensions
    tme = np.arange(850, 2015 + 1)

    # country codes
    ccodes = pd.read_csv(constants_glm.ccodes_file, header=None)
    ccodes.columns = ['country code']
    contcodes = pd.read_csv(constants_glm.contcodes_file, header=None)
    contcodes.columns = ['continent code']
    lup_codes = pd.concat([ccodes, contcodes], axis=1)

    nc_data.createDimension('time', np.shape(tme)[0])
    nc_data.createDimension('country', len(ccodes))

    # Populate and output nc file
    time = nc_data.createVariable('time', 'i4', ('time', ), fill_value=0.0)
    country = nc_data.createVariable('country',
                                     'i4', ('country', ),
                                     fill_value=0.0)

    # Assign units and other metadata
    time.units = 'year as %Y.%f'
    time.calendar = 'proleptic_gregorian'
    country.units = 'ISO country code'

    # Assign values to dimensions and data
    time[:] = tme
    country[:] = ccodes.values

    tillage = nc_data.createVariable('tillage', 'f4', (
        'time',
        'country',
    ))
    tillage[:, :] = 0.0  # Assign all values to 0.0
    tillage.units = 'fraction of cropland area'

    # Loop over all countries
    for index, row in lup_codes.iterrows():
        # Find row containing country in df
        row_country = df[df['country code'] == row['country code']]

        if len(row_country):
            cntr = row_country.values[0][0]
            mask_cntr = np.where(map_ccodes == cntr, 1.0, 0.0)
            idx_cntr = np.where(ccodes == cntr)[0][0]

            # Iterate over years
            for idx, yr in enumerate(range(fao_tillage_yr, 2015 + 1)):
                # Get fraction of cell area that is cropland
                crop_frac = hndl_hyde_crop.variables['cropland'][
                    fao_tillage_yr - 850 + idx, :, :]

                # Get fraction of cell area that is (C4 Annual + C3 Annual + N-fixing)
                cft_frac = crop_frac * \
                           (hndl_cft_frac.variables['C4annual'][fao_tillage_yr - 850 + idx, idx_cntr].data +
                            hndl_cft_frac.variables['C3annual'][fao_tillage_yr - 850 + idx, idx_cntr].data +
                            hndl_cft_frac.variables['N-fixing'][fao_tillage_yr - 850 + idx, idx_cntr].data)

                # Subset of cropland area (C4 Annual + C3 Annual + N-fixing) * mask applied to country * cell area
                sum_area = np.ma.sum(cft_frac * carea * mask_cntr)

                # Multiply by 100 to convert numerator from ha to km2
                frac = row_country.values[0][2 + fao_tillage_yr + idx -
                                             850] / (100 * sum_area)
                if frac > 1.0:
                    tillage[idx + fao_tillage_yr - 850, idx_cntr] = 1.0
                else:
                    tillage[idx + fao_tillage_yr - 850, idx_cntr] = frac

    nc_data.close()
예제 #26
0
def create_static_info_nc():
    ##########################################################################################################
    # Creating new static info file
    ##########################################################################################################
    if os.name == 'nt':
        path_inp_file = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\staticData_quarterdeg.nc'
        path_out_file = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\staticData_quarterdeg_out.nc'
    elif os.name == 'posix' or os.name == 'mac':
        path_inp_file = '/Users/ritvik/Documents/Projects/GLM/Input/LUH/v0.1/staticData_quarterdeg.nc'
        path_out_file = '/Users/ritvik/Documents/Projects/GLM/Input/LUH/v0.1/staticData_quarterdeg_out.nc'

    vba = util.open_or_die(constants.path_glm_vba)
    vba = vba / constants.AGB_TO_BIOMASS
    vba[vba < 0.0] = 0.0

    asc_vba = np.copy(vba)
    asc_vba[asc_vba < 2.0] = 0.0  # Biomass defn of forest: > 2.0 kg C/m^2
    asc_vba[asc_vba > 0.0] = 1.0
    fnf = asc_vba  # Boolean ascii file indicating whether it is forest(1.0)/non-forest(0.0)

    # copy netCDF
    # http://guziy.blogspot.com/2014/01/netcdf4-python-copying-variables-from.html
    now = datetime.datetime.now()

    # input file
    dsin = netCDF4.Dataset(path_inp_file)

    # output file
    dsout = netCDF4.Dataset(path_out_file, 'w')

    # Copy dimensions
    for dname, the_dim in dsin.dimensions.iteritems():
        dsout.createDimension(
            dname,
            len(the_dim) if not the_dim.isunlimited() else None)

    # Copy variables
    for v_name, varin in dsin.variables.iteritems():
        print v_name
        if v_name == 'lat' or v_name == 'lon':
            outVar = dsout.createVariable(v_name, 'f8', varin.dimensions)
        else:
            outVar = dsout.createVariable(v_name, 'f4', varin.dimensions)

        # Copy variable attributes
        # outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})

        if v_name == 'ptbio':
            outVar[:] = vba[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = 'vegetation_carbon_content'
            outVar.long_name = 'potential biomass carbon content'
            outVar.units = 'kg m-2'
        elif v_name == 'fstnf':
            outVar[:] = fnf[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = ''
            outVar.long_name = 'mask denoting forest (1) or non-forest (0)'
            outVar.units = '1'
        elif v_name == 'carea':
            outVar[:] = varin[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = ''
            outVar.long_name = 'area of grid cell'
            outVar.units = 'km2'
        elif v_name == 'icwtr':
            outVar[:] = varin[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = 'area_fraction'
            outVar.long_name = 'ice/water fraction'
            outVar.units = '1'
        elif v_name == 'ccode':
            outVar[:] = varin[:]
            outVar._fillvalue = 1e20
            outVar.missing_value = 1e20
            outVar.standard_name = ''
            outVar.long_name = 'country codes'
            outVar.units = '1'
        elif v_name == 'lon':
            outVar[:] = varin[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = 'longitude'
            outVar.long_name = 'longitude'
            outVar.units = 'degrees_east'
            outVar.axis = 'X'
        elif v_name == 'lat':
            outVar[:] = varin[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = 'latitude'
            outVar.long_name = 'latitude'
            outVar.units = 'degrees_north'
            outVar.axis = 'Y'
        else:
            print '***'
            outVar[:] = varin[:]

    # Write global variables
    dsout.history = 'Processed: ' + str(now.strftime("%Y-%m-%dT%H:%M:%SZ"))
    dsout.host = 'UMD College Park'
    dsout.comment = 'LUH2'
    dsout.contact = '[email protected], [email protected], [email protected], [email protected]'
    dsout.creation_date = now.strftime("%Y %m %d %H:%M")
    dsout.title = 'Land Use Data Sets'
    dsout.activity_id = 'input4MIPs'
    dsout.Conventions = 'CF-1.6'
    dsout.data_structure = 'grid'
    dsout.source = 'LUH2-0-1: Land-Use Harmonization Data Set'
    dsout.source_id = 'LUH2-0-1'
    dsout.license = 'MIT'
    dsout.further_info_url = 'http://luh.umd.edu'
    dsout.frequency = 'yr'
    dsout.instituition = 'University of Maryland College Park'
    dsout.realm = 'land'
    dsout.references = 'Hurtt, Chini et al. 2011'

    # close the output file
    dsout.close()
예제 #27
0
def create_biofuel_nc():
    ##########################################################################################################
    # Creating new file on biofuel cft
    ##########################################################################################################
    if os.name == 'nt':
        path_hndl_biof = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\Biofuel\\biofuels.xls'
        path_hyde_crop = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\lumip_data\\hyde_3.2\\quarter_deg_grids_incl_urban\\gcrop_850_2015_quarterdeg_incl_urban.nc'
        path_cft_frac = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\lumip_data\\other\\croptypes\\FAO_CFT_fraction.nc'
    elif os.name == 'posix':
        path_hndl_biof = '/Users/ritvik/Documents/Projects/GLM/Input/Biofuel/biofuels.xls'
        path_hyde_crop = '/Volumes/gel1/pukeko_restore/data/hyde3.2_june_23_2015/feb26_2016/hyde32_baseline/processed/gcrop_850_2015_quarterdeg_incl_urban.nc'
        path_cft_frac = '/Volumes/gel1/data/glm_data/lumip_data/other/croptypes/FAO_CFT_fraction.nc'

    hndl_biof = util.open_or_die(path_hndl_biof)
    hndl_hyde_crop = util.open_or_die(path_hyde_crop)
    hndl_cft_frac = util.open_or_die(path_cft_frac)

    # Determine biofuel area/crop land area per grid cell over time
    sheets_biof = hndl_biof.sheet_names

    # Country code map
    map_ccodes = np.genfromtxt(constants.CNTRY_CODES,
                               skip_header=0,
                               delimiter=' ')

    # List of country codes
    ccodes = pd.read_csv(constants.ccodes_file, header=None)[0].values

    start_cbio_yr = 2000

    # Loop through all crop types
    # If crop type is not present, then set value to nan for all countries in the globe
    # If crop type is present, then set value using excel table for specific countries, others get nan
    # Read in HYDE file or some other file that has cropland information, this will be used to compute cropland fraction
    # Initialize nc file
    out_nc = constants.path_glm_output + os.sep + 'biofuel.nc'
    nc_data = netCDF4.Dataset(out_nc, 'w', 'NETCDF4_CLASSIC')
    nc_data.description = ''

    # dimensions
    START_YR = 850
    tme = np.arange(START_YR, 2015 + 1)

    nc_data.createDimension('time', np.shape(tme)[0])
    nc_data.createDimension('country', len(ccodes))

    # Populate and output nc file
    time = nc_data.createVariable('time', 'i4', ('time', ), fill_value=0.0)
    country = nc_data.createVariable('country',
                                     'i4', ('country', ),
                                     fill_value=0.0)

    # Assign units and other metadata
    time.units = 'year as %Y.%f'
    time.calendar = 'proleptic_gregorian'
    country.units = 'ISO country code'

    # Assign values to dimensions and data
    time[:] = tme
    country[:] = ccodes

    c3ann_cbio_frac = nc_data.createVariable('c3ann_cbio_frac', 'f4', (
        'time',
        'country',
    ))
    c4ann_cbio_frac = nc_data.createVariable('c4ann_cbio_frac', 'f4', (
        'time',
        'country',
    ))
    c3per_cbio_frac = nc_data.createVariable('c3per_cbio_frac', 'f4', (
        'time',
        'country',
    ))
    c4per_cbio_frac = nc_data.createVariable('c4per_cbio_frac', 'f4', (
        'time',
        'country',
    ))
    c3nfx_cbio_frac = nc_data.createVariable('c3nfx_cbio_frac', 'f4', (
        'time',
        'country',
    ))

    c3ann_cbio_frac.units = 'fraction of crop type area occupied by biofuel crops'
    c3ann_cbio_frac.long_name = 'C3 annual crops grown as biofuels'

    c4ann_cbio_frac.units = 'fraction of crop type area occupied by biofuel crops'
    c4ann_cbio_frac.long_name = 'C4 annual crops grown as biofuels'

    c3per_cbio_frac.units = 'fraction of crop type area occupied by biofuel crops'
    c3per_cbio_frac.long_name = 'C3 perennial crops grown as biofuels'

    c4per_cbio_frac.units = 'fraction of crop type area occupied by biofuel crops'
    c4per_cbio_frac.long_name = 'C4 perennial crops grown as biofuels'

    c3nfx_cbio_frac.units = 'fraction of crop type area occupied by biofuel crops'
    c3nfx_cbio_frac.long_name = 'C3 nitrogen-fixing crops grown as biofuels'

    carea = util.open_or_die(constants.path_glm_carea)

    # Assign all values to 0.0
    c4ann_cbio_frac[:, :] = 0.0
    c4per_cbio_frac[:, :] = 0.0
    c3nfx_cbio_frac[:, :] = 0.0
    c3ann_cbio_frac[:, :] = 0.0
    c3per_cbio_frac[:, :] = 0.0

    # [u'country code', u'country name', 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015]
    # C4ANN
    print 'C4ANN'
    df_c4ann = hndl_biof.parse('c4ann')
    for row in df_c4ann.iterrows():
        for idx, yr in enumerate(range(start_cbio_yr, 2015 + 1)):
            crop_frac = hndl_hyde_crop.variables['cropland'][start_cbio_yr -
                                                             850 + idx, :, :]
            for idy, cntr in enumerate(ccodes):
                if cntr == row[1]['country code']:
                    global_cft_frac = crop_frac * hndl_cft_frac.variables[
                        'C4annual'][start_cbio_yr - 850 + idx,
                                    np.where(ccodes == cntr)[0][0]].data
                    mask_cntr = np.where(map_ccodes == cntr, 1.0, 0.0)

                    sum_area = (global_cft_frac * carea * mask_cntr).sum()
                    frac = row[1][yr] / sum_area
                    if frac <= 1.0:
                        c4ann_cbio_frac[idx + start_cbio_yr - START_YR,
                                        np.where(ccodes == cntr)[0]
                                        [0]] = row[1][yr] / sum_area  # maize
                    else:
                        if row[1][yr] / (crop_frac * carea *
                                         mask_cntr).sum() <= 1.0:
                            c4ann_cbio_frac[
                                idx + start_cbio_yr - START_YR,
                                np.where(
                                    ccodes == cntr)[0][0]] = row[1][yr] / (
                                        crop_frac * carea * mask_cntr).sum()
                        else:
                            c4ann_cbio_frac[idx + start_cbio_yr - START_YR,
                                            np.where(
                                                ccodes == cntr)[0][0]] = 1.0

    # C4PER
    print 'C4PER'
    df_c4per = hndl_biof.parse('c4per')
    for row in df_c4per.iterrows():
        for idx, yr in enumerate(range(start_cbio_yr, 2015 + 1)):
            crop_frac = hndl_hyde_crop.variables['cropland'][start_cbio_yr -
                                                             850 + idx, :, :]
            for idy, cntr in enumerate(ccodes):
                if cntr == row[1]['country code']:
                    global_cft_frac = crop_frac * hndl_cft_frac.variables[
                        'C4perennial'][start_cbio_yr - 850 + idx,
                                       np.where(ccodes == cntr)[0][0]].data
                    mask_cntr = np.where(map_ccodes == cntr, 1.0, 0.0)

                    sum_area = (global_cft_frac * carea * mask_cntr).sum()
                    frac = row[1][yr] / sum_area

                    if frac <= 1.0:
                        c4per_cbio_frac[
                            idx + start_cbio_yr - START_YR,
                            np.where(
                                ccodes == cntr
                            )[0][0]] = row[1][yr] / sum_area  # sugarcane
                    else:
                        if row[1][yr] / (crop_frac * carea *
                                         mask_cntr).sum() <= 1.0:
                            c4per_cbio_frac[
                                idx + start_cbio_yr - START_YR,
                                np.where(
                                    ccodes == cntr)[0][0]] = row[1][yr] / (
                                        crop_frac * carea * mask_cntr).sum()
                        else:
                            c4per_cbio_frac[idx + start_cbio_yr - START_YR,
                                            np.where(
                                                ccodes == cntr)[0][0]] = 1.0
    # C3NFX
    df_c3nfx = hndl_biof.parse('c3nfx')
    print 'C3NFX'
    for row in df_c3nfx.iterrows():
        for idx, yr in enumerate(range(start_cbio_yr, 2015 + 1)):
            crop_frac = hndl_hyde_crop.variables['cropland'][start_cbio_yr -
                                                             850 + idx, :, :]
            for idy, cntr in enumerate(ccodes):
                if cntr == row[1]['country code']:
                    global_cft_frac = crop_frac * hndl_cft_frac.variables[
                        'N-fixing'][start_cbio_yr - 850 + idx,
                                    np.where(ccodes == cntr)[0][0]].data
                    mask_cntr = np.where(map_ccodes == cntr, 1.0, 0.0)

                    sum_area = (global_cft_frac * carea * mask_cntr).sum()
                    frac = row[1][yr] / sum_area
                    if frac <= 1.0:
                        c3nfx_cbio_frac[idx + start_cbio_yr - START_YR,
                                        np.where(ccodes ==
                                                 cntr)[0][0]] = frac  # soybean
                    else:
                        if row[1][yr] / (crop_frac * carea *
                                         mask_cntr).sum() <= 1.0:
                            c3nfx_cbio_frac[
                                idx + start_cbio_yr - START_YR,
                                np.where(
                                    ccodes == cntr)[0][0]] = row[1][yr] / (
                                        crop_frac * carea * mask_cntr).sum()
                        else:
                            c3nfx_cbio_frac[idx + start_cbio_yr - START_YR,
                                            np.where(
                                                ccodes == cntr)[0][0]] = 1.0

    nc_data.close()
예제 #28
0
파일: plot.py 프로젝트: ritviksahajpal/LUH2
def plot_ascii_map(asc,
                   out_path,
                   xaxis_min=0.0,
                   xaxis_max=1.1,
                   xaxis_step=0.1,
                   plot_type='sequential',
                   map_label='',
                   append_name='',
                   xlabel='',
                   title='',
                   var_name='data',
                   skiprows=0,
                   num_lats=constants.NUM_LATS,
                   num_lons=constants.NUM_LONS):
    """

    :param asc:
    :param out_path:
    :param xaxis_min:
    :param xaxis_max:
    :param xaxis_step:
    :param plot_type:
    :param map_label:
    :param append_name:
    :param xlabel:
    :param title:
    :param var_name:
    :param skiprows:
    :param num_lats:
    :param num_lons:
    :return:
    """
    logger.info('Plot ascii file as map')
    out_nc = util.convert_ascii_nc(asc,
                                   out_path + os.sep + 'file_' + append_name +
                                   '.nc',
                                   skiprows=skiprows,
                                   num_lats=num_lats,
                                   num_lons=num_lons,
                                   var_name=var_name,
                                   desc='netCDF')

    nc_file = util.open_or_die(out_nc)
    nc_file.close()

    path = os.path.dirname(out_path)
    map_path = path + os.sep + var_name + '_' + append_name + '.png'

    plot_map_from_nc(out_nc,
                     map_path,
                     var_name,
                     xaxis_min,
                     xaxis_max,
                     xaxis_step,
                     plot_type,
                     annotate_date=True,
                     yr=map_label,
                     date=-1,
                     xlabel=xlabel,
                     title=title,
                     any_time_data=False,
                     land_bg=False,
                     cmap=plt.cm.RdBu,
                     grid=True,
                     fill_mask=True)

    os.remove(out_nc)
    return map_path
예제 #29
0
    def output_constant_cft_frac_to_nc(self, df, nc_name):
        """
        Create a netCDF with constant CFT fraction values for each country across time
        :return:
        """
        logger.info('output_constant_cft_frac_to_nc')
        # Get list of ALL country codes
        fao_id = pd.read_csv(constants.FAO_CONCOR)
        all_cntrs = fao_id[self.ISO_code].unique()

        # Create a lookup dictionary between crop ids (1,2 etc.) and crop names (wheat etc.)
        crp_ids = df[self.cft_id].unique().tolist()
        crp_names = df[self.cft_type].unique().tolist()
        dict_crps = dict(zip(crp_ids, crp_names))

        # Compute global average of all CFT area percentages (one average value for each CFT)
        cols = self.all_cols[:]
        cols.extend([self.cft_id])
        global_cft_avg = df[cols].groupby(
            self.cft_id).sum() * 100.0 / df[cols].groupby(
                self.cft_id).sum().sum()
        global_cft_avg = global_cft_avg.mean(axis=1)

        # Read in HYDE dataset to get lat, lon info
        ds = util.open_or_die(constants.hyde_dir)
        tme = ds.variables['time'][:]

        onc = util.open_or_die(constants.out_dir + nc_name, 'w')

        # Create dimensions
        onc.createDimension('time', np.shape(tme)[0])
        onc.createDimension('country_code', len(all_cntrs))

        # Create variables
        time = onc.createVariable('time', 'i4', ('time', ))
        cntrs = onc.createVariable('country_code', 'i4', ('country_code', ))

        # Assign time
        time[:] = tme

        # Metadata
        cntrs.units = ''
        cntrs.standard_name = 'FAO country codes'

        # Assign data to countries
        cntrs[:] = all_cntrs

        all = onc.createVariable('sum',
                                 'f4', (
                                     'time',
                                     'country_code',
                                 ),
                                 fill_value=np.nan)
        all[:, :] = np.zeros((np.shape(tme)[0], len(all_cntrs)))

        # Assign data for crop functional types
        for key, val in dict_crps.iteritems():
            cft = onc.createVariable(val,
                                     'f4', (
                                         'time',
                                         'country_code',
                                     ),
                                     fill_value=np.nan)
            cft.units = 'fraction'

            # Iterate over all countries over all years
            for idc, i in enumerate(all_cntrs):
                # Check if country is present in dataframe
                cntr_present = i in df[self.ISO_code].values

                # If country is present, then fill CFT values
                if cntr_present:
                    # Get data corresponding to country code 'i' and crop id 'val' and for all years (all_cols)
                    vals = df[(df[self.ISO_code] == i) &
                              (df[self.cft_type] == val)][self.all_cols].values

                    # If CFT data is missing, then vals will be an empty array.
                    # In that case, fill with 0.0
                    if len(vals) == 0:
                        vals = np.zeros((1, len(tme)))
                else:  # country data not present, fill with global average
                    vals = np.repeat(
                        global_cft_avg[global_cft_avg.index == key].values,
                        len(tme))

                cft[:,
                    idc] = vals.T / 100.0  # Convert from percentage to fraction
                all[:, idc] = all[:, idc] + cft[:, idc]

        onc.close()
예제 #30
0
    def __init__(self):
        self.fao_file = util.open_or_die(constants.RAW_FAO)

        # Initialize names of columns
        self.country_code = 'Country Code'  # Numeric values from 1 - ~5817, gives a unique code for country and region
        self.FAO_code = 'Country_FAO'
        self.ISO_code = 'ISO'  # ISO code for each country, not the same as country_code. ISO_code is used in HYDE

        self.crop_name = 'Item'  # Name of crop e.g. Wheat
        self.crop_id = 'Item Code'  # Id of crop e.g. 1, 2 etc.
        self.cft_id = 'functional crop id'  # crop functional type id e.g. 1
        self.cft_type = 'functional crop type'  # crop functional type e.g. C3Annual

        # Names of columns in past and future
        self.cur_cols = [
            'Y' + str(x)
            for x in range(constants.FAO_START_YR, constants.FAO_END_YR + 1)
        ]  # 850 -> 1960
        self.past_cols = [
            'Y' + str(x)
            for x in range(constants.GLM_STRT_YR, constants.FAO_START_YR)
        ]  # 850 -> 1960
        if constants.FAO_END_YR < constants.GLM_END_YR:
            self.futr_cols = [
                'Y' + str(x) for x in range(constants.FAO_END_YR +
                                            1, constants.GLM_END_YR + 1)
            ]  # 2014 -> 2015
        else:
            self.futr_cols = []

        self.all_cols = self.past_cols + self.cur_cols + self.futr_cols
        self.FAO_perc_all_df = pd.DataFrame(
        )  # Dataframe containing FAO data for entire time-period
        self.FAO_mfd_df = pd.DataFrame(
        )  # Dataframe containing CFT percentage data for each country for the year 2000

        # area_df: Area of CFT for each country in FAO era
        # gcrop: Percentage of ag area per CFT
        # gcnt: Percentage of ag area per country
        # perc_df: Percentage of ag area for each CFT by country in FAO era
        self.area_df = pd.DataFrame()
        self.gcrop = pd.DataFrame()
        self.gcnt = pd.DataFrame()
        self.perc_df = pd.DataFrame()

        # Path to csv of crop rotations
        self.csv_rotations = constants.csv_rotations

        self.dict_cont = {
            0: 'Antartica',
            1: 'North_America',
            2: 'South_America',
            3: 'Europe',
            4: 'Asia',
            5: 'Africa',
            6: 'Australia'
        }
        self.CCODES = 'country code'
        self.CONT_CODES = 'continent code'

        # continent and country code files
        self.ccodes_file = constants_glm.ccodes_file
        self.contcodes_file = constants_glm.contcodes_file
예제 #31
0
    def output_constant_cft_frac_to_nc(self, df, nc_name):
        """
        Create a netCDF with constant CFT fraction values for each country across time
        :return:
        """
        logger.info('output_constant_cft_frac_to_nc')
        # Get list of ALL country codes
        fao_id = pd.read_csv(constants.FAO_CONCOR)
        all_cntrs = fao_id[self.ISO_code].unique()

        # Create a lookup dictionary between crop ids (1,2 etc.) and crop names (wheat etc.)
        crp_ids = df[self.cft_id].unique().tolist()
        crp_names = df[self.cft_type].unique().tolist()
        dict_crps = dict(zip(crp_ids, crp_names))

        # Compute global average of all CFT area percentages (one average value for each CFT)
        cols = self.all_cols[:]
        cols.extend([self.cft_id])
        global_cft_avg = df[cols].groupby(self.cft_id).sum()*100.0/df[cols].groupby(self.cft_id).sum().sum()
        global_cft_avg = global_cft_avg.mean(axis = 1)

        # Read in HYDE dataset to get lat, lon info
        ds  = util.open_or_die(constants.hyde_dir)
        tme = ds.variables['time'][:]

        onc = util.open_or_die(constants.out_dir + nc_name, 'w')

        # Create dimensions
        onc.createDimension('time', np.shape(tme)[0])
        onc.createDimension('country_code', len(all_cntrs))

        # Create variables
        time = onc.createVariable('time', 'i4', ('time',))
        cntrs = onc.createVariable('country_code', 'i4', ('country_code',))

        # Assign time
        time[:] = tme

        # Metadata
        cntrs.units = ''
        cntrs.standard_name = 'FAO country codes'

        # Assign data to countries
        cntrs[:] = all_cntrs

        all = onc.createVariable('sum', 'f4', ('time', 'country_code', ), fill_value=np.nan)
        all[:, :] = np.zeros((np.shape(tme)[0], len(all_cntrs)))

        # Assign data for crop functional types
        for key, val in dict_crps.iteritems():
            cft = onc.createVariable(val, 'f4', ('time', 'country_code', ), fill_value=np.nan)
            cft.units = 'fraction'

            # Iterate over all countries over all years
            for idc, i in enumerate(all_cntrs):
                # Check if country is present in dataframe
                cntr_present = i in df[self.ISO_code].values

                # If country is present, then fill CFT values
                if cntr_present:
                    # Get data corresponding to country code 'i' and crop id 'val' and for all years (all_cols)
                    vals = df[(df[self.ISO_code] == i) & (df[self.cft_type] == val)][self.all_cols].values

                    # If CFT data is missing, then vals will be an empty array.
                    # In that case, fill with 0.0
                    if len(vals) == 0:
                        vals = np.zeros((1, len(tme)))
                else:  # country data not present, fill with global average
                    vals = np.repeat(global_cft_avg[global_cft_avg.index == key].values, len(tme))

                cft[:, idc] = vals.T/100.0  # Convert from percentage to fraction
                all[:, idc] = all[:, idc] + cft[:, idc]

        onc.close()
예제 #32
0
    def __init__(self, out_path, ver='3.2', lon_name='lon', lat_name='lat', tme_name='time', area_name='cell_area'):
        """
        Constructor
        """
        self.list_files_to_del = []  # List of file paths to delete when done

        self.ver = ver
        if self.ver == '3.2':
            self.HYDE_crop_path = constants.HYDE32_CROP_PATH
            self.HYDE_othr_path = constants.HYDE32_OTHR_PATH
            self.HYDE_urbn_path = constants.HYDE32_URBN_PATH
            self.HYDE_past_path = constants.HYDE32_PAST_PATH
            self.HYDE_graz_path = constants.HYDE32_GRAZ_PATH
            self.HYDE_rang_path = constants.HYDE32_RANG_PATH
            self.HYDE_urbn_path = constants.HYDE32_URBN_PATH

            # Land use variables
            self.lus = {self.HYDE_crop_path: ['cropland', 'cropland fraction'],
                        self.HYDE_othr_path: ['other', 'other vegetation fraction'],
                        self.HYDE_urbn_path: ['urban', 'urban fraction'],
                        self.HYDE_graz_path: ['grazing', 'grazing land fraction'],
                        self.HYDE_past_path: ['pasture', 'managed pasture fraction'],
                        self.HYDE_rang_path: ['rangeland', 'rangeland fraction']}
        elif self.ver == '3.1':
            self.HYDE_crop_path = constants.HYDE31_CROP_PATH
            self.HYDE_othr_path = constants.HYDE31_OTHR_PATH
            self.HYDE_past_path = constants.HYDE31_PAST_PATH
            self.HYDE_urbn_path = constants.HYDE31_URBN_PATH

            # Land use variables
            self.lus = {self.HYDE_crop_path: ['cropland', 'cropland'],
                        self.HYDE_othr_path: ['primary', 'primary'],
                        self.HYDE_urbn_path: ['urban', 'urban fraction'],
                        self.HYDE_past_path: ['pasture', 'pasture']}
        elif self.ver == '3.2_v03':
            self.HYDE_crop_path = constants.HYDE32_v03_CROP_PATH
            self.HYDE_othr_path = constants.HYDE32_v03_OTHR_PATH
            self.HYDE_urbn_path = constants.HYDE32_v03_URBN_PATH
            self.HYDE_past_path = constants.HYDE32_v03_PAST_PATH
            self.HYDE_graz_path = constants.HYDE32_v03_GRAZ_PATH
            self.HYDE_rang_path = constants.HYDE32_v03_RANG_PATH
            self.HYDE_urbn_path = constants.HYDE32_v03_URBN_PATH

            # Land use variables
            self.lus = {self.HYDE_crop_path: ['cropland', 'cropland fraction'],
                        self.HYDE_othr_path: ['other', 'other vegetation fraction'],
                        self.HYDE_urbn_path: ['urban', 'urban fraction'],
                        self.HYDE_graz_path: ['grazing', 'grazing land fraction'],
                        self.HYDE_past_path: ['pasture', 'pasture fraction'],
                        self.HYDE_rang_path: ['rangeland', 'rangeland fraction']}
        elif self.ver == '3.2_v1hb':
            self.HYDE_crop_path = constants.HYDE32_v1hb_CROP_PATH
            self.HYDE_othr_path = constants.HYDE32_v1hb_OTHR_PATH
            self.HYDE_urbn_path = constants.HYDE32_v1hb_URBN_PATH
            self.HYDE_past_path = constants.HYDE32_v1hb_PAST_PATH
            self.HYDE_graz_path = constants.HYDE32_v1hb_GRAZ_PATH
            self.HYDE_rang_path = constants.HYDE32_v1hb_RANG_PATH

            # Land use variables
            self.lus = {self.HYDE_crop_path: ['cropland', 'cropland fraction'],
                        self.HYDE_othr_path: ['other', 'other vegetation fraction'],
                        self.HYDE_urbn_path: ['urban', 'urban fraction'],
                        self.HYDE_graz_path: ['grazing', 'grazing land fraction'],
                        self.HYDE_past_path: ['pasture', 'pasture fraction'],
                        self.HYDE_rang_path: ['rangeland', 'rangeland fraction']}
        elif self.ver == '3.2_march':
            self.HYDE_crop_path = constants.HYDE32_march_CROP_PATH
            self.HYDE_othr_path = constants.HYDE32_march_OTHR_PATH
            self.HYDE_urbn_path = constants.HYDE32_march_URBN_PATH
            self.HYDE_past_path = constants.HYDE32_march_PAST_PATH
            self.HYDE_graz_path = constants.HYDE32_march_GRAZ_PATH
            self.HYDE_rang_path = constants.HYDE32_march_RANG_PATH

            # Land use variables
            self.lus = {self.HYDE_crop_path: ['cropland', 'cropland fraction'],
                        self.HYDE_othr_path: ['other', 'other vegetation fraction'],
                        self.HYDE_urbn_path: ['urban', 'urban fraction'],
                        self.HYDE_graz_path: ['grazing', 'grazing land fraction'],
                        self.HYDE_past_path: ['pasture', 'pasture fraction'],
                        self.HYDE_rang_path: ['rangeland', 'rangeland fraction']}

        # Lat, Lon and time dimensions
        self.lon_name = lon_name
        self.lat_name = lat_name
        self.tme_name = tme_name
        self.area_name = area_name
        self.out_path = out_path
        self.movies_path = out_path + os.sep + 'movies'

        util.make_dir_if_missing(self.out_path)
        util.make_dir_if_missing(self.movies_path)

        # Open up netCDF and get dimensions
        ds = util.open_or_die(self.HYDE_crop_path)
        self.lat = ds.variables[lat_name][:]
        self.lon = ds.variables[lon_name][:]
        self.time = ds.variables[tme_name][:]
        ds.close()

        # GLM static data
        self.path_glm_stat = constants.path_glm_stat  # Static data, contains grid cell area (carea)
        self.path_glm_carea = constants.path_glm_carea

        # Get cell area (after subtracting ice/water fraction)
        icwtr_nc = util.open_or_die(self.path_glm_stat)
        icwtr = icwtr_nc.variables[constants.ice_water_frac][:, :]
        self.carea = util.open_or_die(self.path_glm_carea)
        self.carea_wo_wtr = util.open_or_die(self.path_glm_carea) * (1.0 - icwtr)

        # Movie frames
        self.yrs = np.arange(int(min(self.time)), int(max(self.time)), constants.MOVIE_SEP)
        # Get colors for plotting
        self.cols = plot.get_colors(palette='tableau')
예제 #33
0
    def create_rotations_nc(self, df):
        """
        :param df: Pandas dataframe
            Dataframe containing cropland area for each country x functional crop type combination
            Country_FAO    functional crop type     mean_area
            Albania             C3annual         6.687115e+03
            Albania          C3perennial         4.139867e+03
            Albania             C4annual         5.300000e+04
            Albania             N-fixing         4.460714e+03
            Algeria             C3annual         5.371344e+04
        :return:
        """
        logger.info('create_rotations_nc')

        df_rotations = util.open_or_die(self.csv_rotations, csv_header=0)

        # Read in country and continent file
        # Create dataframe combining country and continent files
        ccodes = pd.read_csv(self.ccodes_file, header=None)
        ccodes.columns = [self.CCODES]
        contcodes = pd.read_csv(self.contcodes_file, header=None)
        contcodes.columns = [self.CONT_CODES]
        lup_codes = pd.concat([ccodes, contcodes], axis=1)

        # Merge dataframe
        df_merge = pd.merge(df_rotations, lup_codes, on=self.CCODES)

        out_nc = constants_glm.path_glm_output + os.sep + 'national_crop_rotation_data_850_2015_new.nc'
        nc_data = util.open_or_die(out_nc, perm='w', format='NETCDF4_CLASSIC')
        nc_data.description = ''

        # dimensions
        tme = np.arange(constants.GLM_STRT_YR, constants.GLM_END_YR + 1)

        nc_data.createDimension('time', np.shape(tme)[0])
        nc_data.createDimension('country', len(ccodes))

        # Populate and output nc file
        time = nc_data.createVariable('time', 'i4', ('time',), fill_value=0.0)
        country = nc_data.createVariable('country', 'i4', ('country',), fill_value=0.0)

        # Assign units and other metadata
        time.units = 'year as %Y.%f'
        time.calendar = 'proleptic_gregorian'
        country.units = 'ISO country code'

        # Assign values to dimensions and data
        time[:] = tme
        country[:] = ccodes.values

        c4ann_to_c3nfx = nc_data.createVariable('c4ann_to_c3nfx', 'f4', ('time', 'country',))
        c4ann_to_c3ann = nc_data.createVariable('c4ann_to_c3ann', 'f4', ('time', 'country',))
        c3ann_to_c3nfx = nc_data.createVariable('c3ann_to_c3nfx', 'f4', ('time', 'country',))

        c4ann_to_c3nfx.units = 'fraction of crop type area undergoing crop rotation'
        c4ann_to_c3nfx.long_name = 'Crop rotations: C4 Annual, C3 N-Fixing'

        c4ann_to_c3ann.units = 'fraction of crop type area undergoing crop rotation'
        c4ann_to_c3ann.long_name = 'Crop rotations: C4 Annual, C3 Annual'

        c3ann_to_c3nfx.units = 'fraction of crop type area undergoing crop rotation'
        c3ann_to_c3nfx.long_name = 'Crop rotations: C3 Annual, C3 N-Fixing'

        # Loop over all countries
        for index, row in lup_codes.iterrows():
            # print index, row[self.CCODES], row[self.CONT_CODES]

            # Find row containing country in df_merge
            row_country = df_merge[df_merge[self.CCODES] == row[self.CCODES]]
            if len(row_country):
                c4ann_to_c3nfx[:, index] = row_country['c4ann_to_c3nfx'].values[0]
                c4ann_to_c3ann[:, index] = row_country['c4ann_to_c3ann'].values[0]
                c3ann_to_c3nfx[:, index] = row_country['c3ann_to_c3nfx'].values[0]
            else:
                # TODO Find the average crop rotation rate for the continent

                c4ann_to_c3nfx[:, index] = 0.03  # 0.53
                c4ann_to_c3ann[:, index] = 0.01
                c3ann_to_c3nfx[:, index] = 0.02

        nc_data.close()
예제 #34
0
    def __init__(self, res='q'):
        """
        :param res: Resolution of output dataset: q=quarter, h=half, o=one
        :return:
        """
        # Dictionary of crop functional types
        self.cft = {
            'C4Annual': ['maize.asc', 'millet.asc', 'sorghum.asc'],
            'C4Perren': ['sugarcane.asc'],
            'C3Perren': [
                'banana.asc', 'berry.asc', 'citrus.asc', 'fruittree.asc',
                'grape.asc', 'palm.asc', 'tropevrgrn.asc'
            ],
            'Ntfixing': [
                'alfalfa.asc', 'bean.asc', 'legumehay.asc', 'peanut.asc',
                'soybean.asc'
            ],
            'C3Annual': [
                'beet.asc', 'cassava.asc', 'cotton.asc', 'flax.asc',
                'hops.asc', 'mixedcover.asc', 'nursflower.asc', 'oat.asc',
                'potato.asc', 'rapeseed.asc', 'rice.asc', 'rye.asc',
                'safflower.asc', 'sunflower.asc', 'tobacco.asc',
                'vegetable.asc', 'wheat.asc'
            ],
            'TotlRice': ['rice.asc', 'xrice.asc']
        }

        # Get shape of file
        self.skiprows = 6
        self.res = res
        self.tmpdata = util.open_or_die(path_file=GLM.constants.MFD_DATA_DIR +
                                        os.sep + 'maize.asc',
                                        skiprows=self.skiprows,
                                        delimiter=' ')
        self.asc_hdr = util.get_ascii_header(
            path_file=GLM.constants.MFD_DATA_DIR + os.sep + 'maize.asc',
            getrows=self.skiprows)
        self.yshape = self.tmpdata.shape[0]
        self.xshape = self.tmpdata.shape[1]

        # Create empty numpy arrays
        self.c4annual = numpy.zeros(shape=(self.yshape, self.xshape))
        self.c4perren = numpy.zeros(shape=(self.yshape, self.xshape))
        self.c3perren = numpy.zeros(shape=(self.yshape, self.xshape))
        self.ntfixing = numpy.zeros(shape=(self.yshape, self.xshape))
        self.c3annual = numpy.zeros(shape=(self.yshape, self.xshape))
        self.totlrice = numpy.zeros(shape=(self.yshape, self.xshape))
        self.totlcrop = numpy.zeros(shape=(self.yshape, self.xshape))

        self.c4anarea = numpy.zeros(shape=(self.yshape, self.xshape))
        self.c4prarea = numpy.zeros(shape=(self.yshape, self.xshape))
        self.c3prarea = numpy.zeros(shape=(self.yshape, self.xshape))
        self.ntfxarea = numpy.zeros(shape=(self.yshape, self.xshape))
        self.c3anarea = numpy.zeros(shape=(self.yshape, self.xshape))
        self.croparea = numpy.zeros(shape=(self.yshape, self.xshape))

        # Area of each cell in Monfreda dataset
        self.mfd_area = numpy.zeros(shape=(self.yshape, self.xshape))

        # Ice-water fraction and other static data
        self.icwtr = util.open_or_die(GLM.constants.path_GLM_stat)

        # Read in area file based on res
        if res == 'q':
            self.area_data = util.open_or_die(
                path_file=GLM.constants.CELL_AREA_Q)
        elif res == 'h':
            self.area_data = util.open_or_die(
                path_file=GLM.constants.CELL_AREA_H)
        elif res == 'o':
            self.area_data = util.open_or_die(
                path_file=GLM.constants.CELL_AREA_O)
        else:
            logging.info('Incorrect resolution for output of Monfreda')

        # Compute cell area (excluding ice-water fraction)
        self.cell_area = util.open_or_die(GLM.constants.path_GLM_carea)
        self.land_area = self.cell_area * (
            1.0 - self.icwtr.variables[GLM.constants.ice_water_frac][:, :])

        # Get FAO country concordance list
        self.fao_id = pandas.read_csv(
            GLM.constants.FAO_CONCOR)[['Country_FAO', 'ISO']]

        # Output path
        self.out_path = GLM.constants.out_dir + os.sep + 'Monfreda'
        util.make_dir_if_missing(self.out_path)
예제 #35
0
파일: plot.py 프로젝트: ritviksahajpal/LUH2
def plot_map_from_nc(path_nc,
                     out_path,
                     var_name,
                     xaxis_min=0.0,
                     xaxis_max=1.1,
                     xaxis_step=0.1,
                     annotate_date=False,
                     yr=0,
                     date=-1,
                     xlabel='',
                     title='',
                     tme_name='time',
                     show_plot=False,
                     any_time_data=True,
                     format='%.2f',
                     land_bg=True,
                     cmap=plt.cm.RdBu,
                     grid=False,
                     fill_mask=False):
    """
    Plot var_name variable from netCDF file

    \b
    Args:
        path_nc: Name of netCDF file including path
        out_path: Output directory path + file name
        var_name: Name of variable in netCDF file to plot on map

    Returns:
        Nothing, side-effect: save an image
    """
    logger.info('Plotting ' + var_name + ' in ' + path_nc)

    # Read netCDF file and get time dimension
    nc = util.open_or_die(path_nc, 'r', format='NETCDF4')
    lon = nc.variables['lon'][:]
    lat = nc.variables['lat'][:]

    if any_time_data:
        ts = nc.variables[tme_name][:]  # time-series
        if date == -1:  # Plot either the last year {len(ts)-1} or whatever year the user wants
            plot_yr = len(ts) - 1
        else:
            plot_yr = date - ts[0]

    # Draw empty basemap
    m = Basemap(projection='robin', resolution='c', lat_0=0, lon_0=0)
    # m.drawcoastlines()
    # m.drawcountries()

    # Find x,y of map projection grid.
    lons, lats = np.meshgrid(lon, lat)
    x, y = m(lons, lats)
    if fill_mask:
        nc_vars = np.ma.filled(nc.variables[var_name], fill_value=np.nan)
    else:
        nc_vars = np.array(nc.variables[var_name])

    # Plot
    # Get data for the last year from the netCDF file array
    if any_time_data:
        mask_data = maskoceans(lons, lats, nc_vars[int(plot_yr), :, :])
    else:
        mask_data = maskoceans(lons, lats, nc_vars[:, :])

    m.etopo()
    if land_bg:
        m.drawlsmask(land_color='white', ocean_color='none',
                     lakes=True)  # land_color = (0, 0, 0, 0) for transparent
    else:
        m.drawlsmask(land_color=(0, 0, 0, 0), ocean_color='none', lakes=True)

    cs = m.contourf(x,
                    y,
                    mask_data,
                    np.arange(xaxis_min, xaxis_max, xaxis_step),
                    cmap=cmap)

    if annotate_date:
        plt.annotate(str(yr),
                     xy=(0.45, 0.1),
                     xycoords='axes fraction',
                     size=20)

    if grid:
        # where labels intersect = [left, right, top, bottom]
        m.drawmeridians(np.arange(-180, 180, 60),
                        labels=[0, 0, 1, 0],
                        labelstyle='+/-',
                        linewidth=0.5)
        m.drawparallels([-40, 0, 40],
                        labels=[1, 0, 0, 0],
                        labelstyle='+/-',
                        linewidth=0.5)

    # Add colorbar
    cb = m.colorbar(cs,
                    "bottom",
                    size="3%",
                    pad='2%',
                    extend='both',
                    drawedges=False,
                    spacing='proportional',
                    format=format)
    cb.set_label(xlabel)
    plt.title(title, y=1.08)

    plt.tight_layout()
    if not show_plot:
        plt.savefig(out_path, dpi=constants.DPI)
        plt.close()
    else:
        plt.show()

    nc.close()

    return out_path
예제 #36
0
    def create_rotations_nc(self, df):
        """
        :param df: Pandas dataframe
            Dataframe containing cropland area for each country x functional crop type combination
            Country_FAO    functional crop type     mean_area
            Albania             C3annual         6.687115e+03
            Albania          C3perennial         4.139867e+03
            Albania             C4annual         5.300000e+04
            Albania             N-fixing         4.460714e+03
            Algeria             C3annual         5.371344e+04
        :return:
        """
        logger.info('create_rotations_nc')

        df_rotations = util.open_or_die(self.csv_rotations, csv_header=0)

        # Read in country and continent file
        # Create dataframe combining country and continent files
        ccodes = pd.read_csv(self.ccodes_file, header=None)
        ccodes.columns = [self.CCODES]
        contcodes = pd.read_csv(self.contcodes_file, header=None)
        contcodes.columns = [self.CONT_CODES]
        lup_codes = pd.concat([ccodes, contcodes], axis=1)

        # Merge dataframe
        df_merge = pd.merge(df_rotations, lup_codes, on=self.CCODES)

        out_nc = constants_glm.path_glm_output + os.sep + 'national_crop_rotation_data_850_2015_new.nc'
        nc_data = util.open_or_die(out_nc, perm='w', format='NETCDF4_CLASSIC')
        nc_data.description = ''

        # dimensions
        tme = np.arange(constants.GLM_STRT_YR, constants.GLM_END_YR + 1)

        nc_data.createDimension('time', np.shape(tme)[0])
        nc_data.createDimension('country', len(ccodes))

        # Populate and output nc file
        time = nc_data.createVariable('time', 'i4', ('time', ), fill_value=0.0)
        country = nc_data.createVariable('country',
                                         'i4', ('country', ),
                                         fill_value=0.0)

        # Assign units and other metadata
        time.units = 'year as %Y.%f'
        time.calendar = 'proleptic_gregorian'
        country.units = 'ISO country code'

        # Assign values to dimensions and data
        time[:] = tme
        country[:] = ccodes.values

        c4ann_to_c3nfx = nc_data.createVariable('c4ann_to_c3nfx', 'f4', (
            'time',
            'country',
        ))
        c4ann_to_c3ann = nc_data.createVariable('c4ann_to_c3ann', 'f4', (
            'time',
            'country',
        ))
        c3ann_to_c3nfx = nc_data.createVariable('c3ann_to_c3nfx', 'f4', (
            'time',
            'country',
        ))

        c4ann_to_c3nfx.units = 'fraction of crop type area undergoing crop rotation'
        c4ann_to_c3nfx.long_name = 'Crop rotations: C4 Annual, C3 N-Fixing'

        c4ann_to_c3ann.units = 'fraction of crop type area undergoing crop rotation'
        c4ann_to_c3ann.long_name = 'Crop rotations: C4 Annual, C3 Annual'

        c3ann_to_c3nfx.units = 'fraction of crop type area undergoing crop rotation'
        c3ann_to_c3nfx.long_name = 'Crop rotations: C3 Annual, C3 N-Fixing'

        # Loop over all countries
        for index, row in lup_codes.iterrows():
            # print index, row[self.CCODES], row[self.CONT_CODES]

            # Find row containing country in df_merge
            row_country = df_merge[df_merge[self.CCODES] == row[self.CCODES]]
            if len(row_country):
                c4ann_to_c3nfx[:,
                               index] = row_country['c4ann_to_c3nfx'].values[0]
                c4ann_to_c3ann[:,
                               index] = row_country['c4ann_to_c3ann'].values[0]
                c3ann_to_c3nfx[:,
                               index] = row_country['c3ann_to_c3nfx'].values[0]
            else:
                # TODO Find the average crop rotation rate for the continent

                c4ann_to_c3nfx[:, index] = 0.03  # 0.53
                c4ann_to_c3ann[:, index] = 0.01
                c3ann_to_c3nfx[:, index] = 0.02

        nc_data.close()
예제 #37
0
    def AEZ_to_national_GCAM(self, data_source='wh', out_nc_name=''):
        """
        :param data_source:
        :param out_nc_name:
        :return:
        """
        # Create a dictionary mapping GCAM AEZ's to regions
        if data_source == 'wh':
            df = util.open_or_die(self.path_wh)
        elif data_source == 'ftz':  # fertilizer data
            df = util.open_or_die(self.path_ftz)

        # Insert columns so that we have data for each year
        idx = 1
        for yr in xrange(constants.GCAM_START_YR + 1, constants.GCAM_END_YR):
            # Skip years for which we already have data i.e. multiples of constants.GCAM_STEP_YR
            if yr % constants.GCAM_STEP_YR != 0:
                df.insert(constants.SKIP_GCAM_COLS + idx, str(yr), np.nan)
            idx += 1

        # Extract columns with information on GCAM regions
        gcam_df = df[['region', 'subsector']]

        # Fill in missing values, note that using interpolate
        df = df.ix[:, str(constants.GCAM_START_YR):str(constants.GCAM_END_YR)]
        df = df.interpolate(axis=1)

        # Concatenate
        df = pandas.concat([gcam_df, df], axis=1, join='inner')

        # Extract "Russia", "Central Asia", "EU-12", and "Europe-Eastern"  into a single larger region with region code 33
        merg_df = df.loc[df['region'].isin(
            ['Russia', 'Central Asia', 'EU-12', 'Europe-Eastern'])]

        # Create a new row with data for USSR or region code 33
        new_row = ['USSR', 'subsector']
        new_row.extend(merg_df.ix[:, 2:].sum().tolist())

        # Add newly created row to dataframe
        df.loc[len(df.index)] = np.array(new_row)
        # Group dataframe by region
        df = df.groupby('region').sum()
        # Remove the subsector column since it interferes with netCDF creation later
        df.drop('subsector', axis=1, inplace=True)

        # Read in GCAM region country mapping
        xdf = util.open_or_die(constants.gcam_dir + constants.GCAM_MAPPING)
        map_xdf = xdf.parse("Sheet1")
        df_dict = dict((z[0], list(z[1:])) for z in zip(
            map_xdf['country ISO code'], map_xdf['Modified GCAM Regions'],
            map_xdf['GCAM REGION NAME'],
            map_xdf['country-to-region WH ratios']))

        # Create WH output netCDF
        onc = util.open_or_die(
            constants.out_dir + out_nc_name + '_' +
            str(constants.GCAM_START_YR) + '_' + str(constants.GCAM_END_YR) +
            '.nc', 'w')

        # dimensions
        onc.createDimension('country_code', len(df_dict.keys()))
        onc.createDimension(
            'time', constants.GCAM_END_YR - constants.GCAM_START_YR + 1)

        # variables
        country_code = onc.createVariable('country_code', 'i4',
                                          ('country_code', ))
        time = onc.createVariable('time', 'i4', ('time', ))
        data = onc.createVariable(out_nc_name, 'f4', (
            'country_code',
            'time',
        ))

        # Metadata
        country_code.long_name = 'country_code'
        country_code.units = 'index'
        country_code.standard_name = 'country_code'

        time.units = 'year as %Y.%f'
        time.calendar = 'proleptic_gregorian'

        if data_source == 'wh':
            data.units = 'MgC'
            data.long_name = 'wood harvest carbon'
        elif data_source == 'ftz':
            print 'TODO!!'

        # Assign data
        time[:] = np.arange(constants.GCAM_START_YR, constants.GCAM_END_YR + 1)
        country_code[:] = sorted(df_dict.keys())

        for idx, ctr in enumerate(country_code[:]):
            # Get GCAM region corresponding to country
            gcam_reg = df_dict.get(ctr)[1]  # GCAM region identifier
            gcam_mul = df_dict.get(ctr)[2]  # GCAM country-to-region WH ratios

            try:
                # @TODO: Need to finalize woodharvest calculation
                # @TODO: Generalize for data other than wood harvest
                data[idx, :] = df.ix[gcam_reg].values.astype(
                    float) * 0.225 * constants.BILLION * gcam_mul
                # @TODO: Multiply by 1.3 to account for slash fraction
            except:
                data[idx, :] = np.zeros(len(time[:]))

        onc.close()
예제 #38
0
파일: misc.py 프로젝트: ritviksahajpal/LUH2
def create_static_info_nc():
    ##########################################################################################################
    # Creating new static info file
    ##########################################################################################################
    if os.name == 'nt':
        path_inp_file = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\staticData_quarterdeg.nc'
        path_out_file = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\staticData_quarterdeg_out.nc'
    elif os.name == 'posix' or os.name == 'mac':
        path_inp_file = '/Users/ritvik/Documents/Projects/GLM/Input/LUH/v0.1/staticData_quarterdeg.nc'
        path_out_file = '/Users/ritvik/Documents/Projects/GLM/Input/LUH/v0.1/staticData_quarterdeg_out.nc'

    vba = util.open_or_die(constants.path_glm_vba)
    vba = vba / constants.AGB_TO_BIOMASS
    vba[vba < 0.0] = 0.0

    asc_vba = np.copy(vba)
    asc_vba[asc_vba < 2.0] = 0.0  # Biomass defn of forest: > 2.0 kg C/m^2
    asc_vba[asc_vba > 0.0] = 1.0
    fnf = asc_vba  # Boolean ascii file indicating whether it is forest(1.0)/non-forest(0.0)

    # copy netCDF
    # http://guziy.blogspot.com/2014/01/netcdf4-python-copying-variables-from.html
    now = datetime.datetime.now()

    # input file
    dsin = netCDF4.Dataset(path_inp_file)

    # output file
    dsout = netCDF4.Dataset(path_out_file, 'w')

    # Copy dimensions
    for dname, the_dim in dsin.dimensions.iteritems():
        dsout.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None)

    # Copy variables
    for v_name, varin in dsin.variables.iteritems():
        print v_name
        if v_name == 'lat' or v_name == 'lon':
            outVar = dsout.createVariable(v_name, 'f8', varin.dimensions)
        else:
            outVar = dsout.createVariable(v_name, 'f4', varin.dimensions)

        # Copy variable attributes
        # outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})

        if v_name == 'ptbio':
            outVar[:] = vba[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = 'vegetation_carbon_content'
            outVar.long_name = 'potential biomass carbon content'
            outVar.units = 'kg m-2'
        elif v_name == 'fstnf':
            outVar[:] = fnf[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = ''
            outVar.long_name = 'mask denoting forest (1) or non-forest (0)'
            outVar.units = '1'
        elif v_name == 'carea':
            outVar[:] = varin[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = ''
            outVar.long_name = 'area of grid cell'
            outVar.units = 'km2'
        elif v_name == 'icwtr':
            outVar[:] = varin[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = 'area_fraction'
            outVar.long_name = 'ice/water fraction'
            outVar.units = '1'
        elif v_name == 'ccode':
            outVar[:] = varin[:]
            outVar._fillvalue = 1e20
            outVar.missing_value = 1e20
            outVar.standard_name = ''
            outVar.long_name = 'country codes'
            outVar.units = '1'
        elif v_name == 'lon':
            outVar[:] = varin[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = 'longitude'
            outVar.long_name = 'longitude'
            outVar.units = 'degrees_east'
            outVar.axis = 'X'
        elif v_name == 'lat':
            outVar[:] = varin[:]
            outVar.missing_value = 1e20
            outVar._fillvalue = 1e20
            outVar.standard_name = 'latitude'
            outVar.long_name = 'latitude'
            outVar.units = 'degrees_north'
            outVar.axis = 'Y'
        else:
            print '***'
            outVar[:] = varin[:]

    # Write global variables
    dsout.history = 'Processed: ' + str(now.strftime("%Y-%m-%dT%H:%M:%SZ"))
    dsout.host = 'UMD College Park'
    dsout.comment = 'LUH2'
    dsout.contact = '[email protected], [email protected], [email protected], [email protected]'
    dsout.creation_date = now.strftime("%Y %m %d %H:%M")
    dsout.title = 'Land Use Data Sets'
    dsout.activity_id = 'input4MIPs'
    dsout.Conventions = 'CF-1.6'
    dsout.data_structure = 'grid'
    dsout.source = 'LUH2-0-1: Land-Use Harmonization Data Set'
    dsout.source_id = 'LUH2-0-1'
    dsout.license = 'MIT'
    dsout.further_info_url = 'http://luh.umd.edu'
    dsout.frequency = 'yr'
    dsout.instituition = 'University of Maryland College Park'
    dsout.realm = 'land'
    dsout.references = 'Hurtt, Chini et al. 2011'

    # close the output file
    dsout.close()
예제 #39
0
파일: misc.py 프로젝트: ritviksahajpal/LUH2
def create_biofuel_nc():
    ##########################################################################################################
    # Creating new file on biofuel cft
    ##########################################################################################################
    if os.name == 'nt':
        path_hndl_biof = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\Biofuel\\biofuels.xls'
        path_hyde_crop = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\lumip_data\\hyde_3.2\\quarter_deg_grids_incl_urban\\gcrop_850_2015_quarterdeg_incl_urban.nc'
        path_cft_frac = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\lumip_data\\other\\croptypes\\FAO_CFT_fraction.nc'
    elif os.name == 'posix':
        path_hndl_biof = '/Users/ritvik/Documents/Projects/GLM/Input/Biofuel/biofuels.xls'
        path_hyde_crop = '/Volumes/gel1/pukeko_restore/data/hyde3.2_june_23_2015/feb26_2016/hyde32_baseline/processed/gcrop_850_2015_quarterdeg_incl_urban.nc'
        path_cft_frac = '/Volumes/gel1/data/glm_data/lumip_data/other/croptypes/FAO_CFT_fraction.nc'

    hndl_biof = util.open_or_die(path_hndl_biof)
    hndl_hyde_crop = util.open_or_die(path_hyde_crop)
    hndl_cft_frac = util.open_or_die(path_cft_frac)

    # Determine biofuel area/crop land area per grid cell over time
    sheets_biof = hndl_biof.sheet_names

    # Country code map
    map_ccodes = np.genfromtxt(constants.CNTRY_CODES, skip_header=0, delimiter=' ')

    # List of country codes
    ccodes = pd.read_csv(constants.ccodes_file, header=None)[0].values

    start_cbio_yr = 2000

    # Loop through all crop types
    # If crop type is not present, then set value to nan for all countries in the globe
    # If crop type is present, then set value using excel table for specific countries, others get nan
    # Read in HYDE file or some other file that has cropland information, this will be used to compute cropland fraction
    # Initialize nc file
    out_nc = constants.path_glm_output + os.sep + 'biofuel.nc'
    nc_data = netCDF4.Dataset(out_nc, 'w', 'NETCDF4_CLASSIC')
    nc_data.description = ''

    # dimensions
    START_YR = 850
    tme = np.arange(START_YR, 2015 + 1)

    nc_data.createDimension('time', np.shape(tme)[0])
    nc_data.createDimension('country', len(ccodes))

    # Populate and output nc file
    time = nc_data.createVariable('time', 'i4', ('time',), fill_value=0.0)
    country = nc_data.createVariable('country', 'i4', ('country',), fill_value=0.0)

    # Assign units and other metadata
    time.units = 'year as %Y.%f'
    time.calendar = 'proleptic_gregorian'
    country.units = 'ISO country code'

    # Assign values to dimensions and data
    time[:] = tme
    country[:] = ccodes

    c3ann_cbio_frac = nc_data.createVariable('c3ann_cbio_frac', 'f4', ('time', 'country',))
    c4ann_cbio_frac = nc_data.createVariable('c4ann_cbio_frac', 'f4', ('time', 'country',))
    c3per_cbio_frac = nc_data.createVariable('c3per_cbio_frac', 'f4', ('time', 'country',))
    c4per_cbio_frac = nc_data.createVariable('c4per_cbio_frac', 'f4', ('time', 'country',))
    c3nfx_cbio_frac = nc_data.createVariable('c3nfx_cbio_frac', 'f4', ('time', 'country',))

    c3ann_cbio_frac.units = 'fraction of crop type area occupied by biofuel crops'
    c3ann_cbio_frac.long_name = 'C3 annual crops grown as biofuels'

    c4ann_cbio_frac.units = 'fraction of crop type area occupied by biofuel crops'
    c4ann_cbio_frac.long_name = 'C4 annual crops grown as biofuels'

    c3per_cbio_frac.units = 'fraction of crop type area occupied by biofuel crops'
    c3per_cbio_frac.long_name = 'C3 perennial crops grown as biofuels'

    c4per_cbio_frac.units = 'fraction of crop type area occupied by biofuel crops'
    c4per_cbio_frac.long_name = 'C4 perennial crops grown as biofuels'

    c3nfx_cbio_frac.units = 'fraction of crop type area occupied by biofuel crops'
    c3nfx_cbio_frac.long_name = 'C3 nitrogen-fixing crops grown as biofuels'

    carea = util.open_or_die(constants.path_glm_carea)

    # Assign all values to 0.0
    c4ann_cbio_frac[:, :] = 0.0
    c4per_cbio_frac[:, :] = 0.0
    c3nfx_cbio_frac[:, :] = 0.0
    c3ann_cbio_frac[:, :] = 0.0
    c3per_cbio_frac[:, :] = 0.0

    # [u'country code', u'country name', 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015]
    # C4ANN
    print 'C4ANN'
    df_c4ann = hndl_biof.parse('c4ann')
    for row in df_c4ann.iterrows():
        for idx, yr in enumerate(range(start_cbio_yr, 2015 + 1)):
            crop_frac = hndl_hyde_crop.variables['cropland'][start_cbio_yr - 850 + idx, :, :]
            for idy, cntr in enumerate(ccodes):
                if cntr == row[1]['country code']:
                    global_cft_frac = crop_frac * hndl_cft_frac.variables['C4annual'][start_cbio_yr - 850 + idx, np.where(ccodes == cntr)[0][0]].data
                    mask_cntr = np.where(map_ccodes == cntr, 1.0, 0.0)

                    sum_area = (global_cft_frac * carea * mask_cntr).sum()
                    frac = row[1][yr]/sum_area
                    if frac <= 1.0:
                        c4ann_cbio_frac[idx + start_cbio_yr - START_YR, np.where(ccodes == cntr)[0][0]] = row[1][yr]/sum_area  # maize
                    else:
                        if row[1][yr] / (crop_frac * carea * mask_cntr).sum() <= 1.0:
                            c4ann_cbio_frac[idx + start_cbio_yr - START_YR, np.where(ccodes == cntr)[0][0]] = row[1][yr]/(crop_frac * carea * mask_cntr).sum()
                        else:
                            c4ann_cbio_frac[idx + start_cbio_yr - START_YR, np.where(ccodes == cntr)[0][0]] = 1.0

    # C4PER
    print 'C4PER'
    df_c4per = hndl_biof.parse('c4per')
    for row in df_c4per.iterrows():
        for idx, yr in enumerate(range(start_cbio_yr, 2015 + 1)):
            crop_frac = hndl_hyde_crop.variables['cropland'][start_cbio_yr - 850 + idx, :, :]
            for idy, cntr in enumerate(ccodes):
                if cntr == row[1]['country code']:
                    global_cft_frac = crop_frac * hndl_cft_frac.variables['C4perennial'][start_cbio_yr - 850 + idx, np.where(ccodes == cntr)[0][0]].data
                    mask_cntr = np.where(map_ccodes == cntr, 1.0, 0.0)

                    sum_area = (global_cft_frac * carea * mask_cntr).sum()
                    frac = row[1][yr]/sum_area

                    if frac <= 1.0:
                        c4per_cbio_frac[idx + start_cbio_yr - START_YR, np.where(ccodes == cntr)[0][0]] = row[1][yr]/sum_area  # sugarcane
                    else:
                        if row[1][yr]/(crop_frac * carea * mask_cntr).sum() <= 1.0:
                            c4per_cbio_frac[idx + start_cbio_yr - START_YR, np.where(ccodes == cntr)[0][0]] = row[1][yr]/(crop_frac * carea * mask_cntr).sum()
                        else:
                            c4per_cbio_frac[idx + start_cbio_yr - START_YR, np.where(ccodes == cntr)[0][0]] = 1.0
    # C3NFX
    df_c3nfx = hndl_biof.parse('c3nfx')
    print 'C3NFX'
    for row in df_c3nfx.iterrows():
        for idx, yr in enumerate(range(start_cbio_yr, 2015 + 1)):
            crop_frac = hndl_hyde_crop.variables['cropland'][start_cbio_yr - 850 + idx, :, :]
            for idy, cntr in enumerate(ccodes):
                if cntr == row[1]['country code']:
                    global_cft_frac = crop_frac * hndl_cft_frac.variables['N-fixing'][start_cbio_yr - 850 + idx, np.where(ccodes == cntr)[0][0]].data
                    mask_cntr = np.where(map_ccodes == cntr, 1.0, 0.0)

                    sum_area = (global_cft_frac * carea * mask_cntr).sum()
                    frac = row[1][yr]/sum_area
                    if frac <= 1.0:
                        c3nfx_cbio_frac[idx + start_cbio_yr - START_YR, np.where(ccodes == cntr)[0][0]] = frac  # soybean
                    else:
                        if row[1][yr] / (crop_frac * carea * mask_cntr).sum() <= 1.0:
                            c3nfx_cbio_frac[idx + start_cbio_yr - START_YR, np.where(ccodes == cntr)[0][0]] = row[1][yr]/(crop_frac * carea * mask_cntr).sum()
                        else:
                            c3nfx_cbio_frac[idx + start_cbio_yr - START_YR, np.where(ccodes == cntr)[0][0]] = 1.0

    nc_data.close()
예제 #40
0
def process_MIAMI_LU():
    """
    Convert MIAMI LU AGB and NPP estimates to quarter degree
    :return:
    """
    # Read in MIAMI-LU data
    mlu_data = util.open_or_die(constants.miami_lu_nc)

    # Get last year for which biomass is extracted
    lyr = mlu_data.variables['biomass'].shape[0] - 1

    # Extract agb and npp layers and convert to quarter degree format
    mlu_vba = mlu_data.variables['biomass'][lyr, :, :]
    mlu_vba = mlu_vba.repeat(2, 0).repeat(2, 1)
    mlu_npp = mlu_data.variables['aa_NPP'][lyr, :, :]
    mlu_npp = mlu_npp.repeat(2, 0).repeat(2, 1)

    # Output
    numpy.savetxt(constants.out_dir + 'miami_vba_quarter_deg.txt',
                  mlu_vba,
                  fmt='%1.1f')
    numpy.savetxt(constants.out_dir + 'miami_npp_quarter_deg.txt',
                  mlu_npp,
                  fmt='%1.1f')

    # Read in previous estimates
    prev_mlu_npp = util.open_or_die(constants.miami_npp)
    prev_mlu_vba = util.open_or_die(constants.miami_vba)

    # Compare
    diff_vba = mlu_vba - prev_mlu_vba
    diff_npp = mlu_npp - prev_mlu_npp

    # Plot
    xaxis_min, xaxis_max, xaxis_step = util.get_ascii_plot_parameters(mlu_vba)
    plot.plot_ascii_map(mlu_vba,
                        constants.out_dir,
                        xaxis_min,
                        xaxis_max,
                        xaxis_step,
                        plot_type='sequential',
                        xlabel=r'$Biomass\ (kg\ C/m^{2})$',
                        title='',
                        var_name='Biomass',
                        skiprows=0)

    xaxis_min, xaxis_max, xaxis_step = util.get_ascii_plot_parameters(
        prev_mlu_vba)
    plot.plot_ascii_map(prev_mlu_vba,
                        constants.out_dir,
                        0.0,
                        27.0,
                        3.0,
                        plot_type='sequential',
                        xlabel=r'$Biomass\ (kg\ C/m^{2})$',
                        title='',
                        var_name='prev_Biomass',
                        skiprows=0)

    xaxis_min, xaxis_max, xaxis_step = util.get_ascii_plot_parameters(mlu_npp)
    plot.plot_ascii_map(mlu_npp,
                        constants.out_dir,
                        xaxis_min,
                        xaxis_max,
                        xaxis_step,
                        plot_type='sequential',
                        xlabel=r'$NPP\ (kg\ C/m^{2})$',
                        title='',
                        var_name='NPP',
                        skiprows=0)

    # Difference
    xaxis_min, xaxis_max, xaxis_step = util.get_ascii_plot_parameters(diff_vba)
    plot.plot_ascii_map(diff_vba,
                        constants.out_dir,
                        xaxis_min,
                        xaxis_max,
                        xaxis_step,
                        plot_type='diverging',
                        xlabel=r'$(New - Old)\ Biomass\ (kg\ C/m^{2})$',
                        title='',
                        var_name='Difference_Biomass',
                        skiprows=0)

    xaxis_min, xaxis_max, xaxis_step = util.get_ascii_plot_parameters(diff_npp)
    plot.plot_ascii_map(diff_npp,
                        constants.out_dir,
                        xaxis_min,
                        xaxis_max,
                        xaxis_step,
                        plot_type='diverging',
                        xlabel=r'$(New - Old)\ NPP\ (kg\ C/m^{2})$',
                        title='',
                        var_name='Difference_NPP',
                        skiprows=0)
예제 #41
0
파일: misc.py 프로젝트: ritviksahajpal/LUH2
def create_tillage_nc(fao_tillage_yr=1973):
    if os.name == 'nt':
        path_hyde_crop = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\lumip_data\\hyde_3.2\\quarter_deg_grids_incl_urban\\gcrop_850_2015_quarterdeg_incl_urban.nc'
        path_till = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\Management\\Tillage\\aquastat.xlsx'
        path_cft_frac = 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\lumip_data\\other\\croptypes\\FAO_CFT_fraction.nc'
    elif os.name == 'posix' or os.name == 'mac':
        # Read in tillage dataset
        path_hyde_crop = '/Volumes/gel1/pukeko_restore/data/hyde3.2_june_23_2015/feb26_2016/hyde32_baseline/processed/gcrop_850_2015_quarterdeg_incl_urban.nc'
        path_till = '/Users/ritvik/Documents/Projects/GLM/Input/Management/Tillage/aquastat.xlsx'
        path_cft_frac = '/Volumes/gel1/data/glm_data/lumip_data/other/croptypes/FAO_CFT_fraction.nc'

    hndl_hyde_crop = util.open_or_die(path_hyde_crop)
    hndl_till = util.open_or_die(path_till)
    hndl_cft_frac = util.open_or_die(path_cft_frac)

    # Country code map
    map_ccodes = np.genfromtxt(constants.CNTRY_CODES, skip_header=0, delimiter=' ')
    carea = util.open_or_die(constants.path_glm_carea)

    df = hndl_till.parse('processed_tillage')

    # Create dataframe with columns from 850 to 1973 with all 0's
    cols = np.arange(850, fao_tillage_yr + 1)
    df_ = pd.DataFrame(index=df.index, columns=cols)
    df_ = df_.fillna(0)  # with 0s rather than NaNs

    # Concatenate all years from 850 to 2015
    df = pd.concat([df_, df], axis=1)

    # Interpolate across the years
    df = pd.concat([df[['country code', 'country name']],
                    df.filter(regex='^8|9|1|2').interpolate(axis=1)], axis=1)
    pdb.set_trace()
    out_nc = constants_glm.path_glm_output + os.sep + 'national_tillage_data_850_2015_new.nc'
    nc_data = util.open_or_die(out_nc, perm='w', format='NETCDF4_CLASSIC')
    nc_data.description = ''

    # dimensions
    tme = np.arange(850, 2015 + 1)

    # country codes
    ccodes = pd.read_csv(constants_glm.ccodes_file, header=None)
    ccodes.columns = ['country code']
    contcodes = pd.read_csv(constants_glm.contcodes_file, header=None)
    contcodes.columns = ['continent code']
    lup_codes = pd.concat([ccodes, contcodes], axis=1)

    nc_data.createDimension('time', np.shape(tme)[0])
    nc_data.createDimension('country', len(ccodes))

    # Populate and output nc file
    time = nc_data.createVariable('time', 'i4', ('time',), fill_value=0.0)
    country = nc_data.createVariable('country', 'i4', ('country',), fill_value=0.0)

    # Assign units and other metadata
    time.units = 'year as %Y.%f'
    time.calendar = 'proleptic_gregorian'
    country.units = 'ISO country code'

    # Assign values to dimensions and data
    time[:] = tme
    country[:] = ccodes.values

    tillage = nc_data.createVariable('tillage', 'f4', ('time', 'country',))
    tillage[:, :] = 0.0  # Assign all values to 0.0
    tillage.units = 'fraction of cropland area'

    # Loop over all countries
    for index, row in lup_codes.iterrows():
        # Find row containing country in df
        row_country = df[df['country code'] == row['country code']]

        if len(row_country):
            cntr = row_country.values[0][0]
            mask_cntr = np.where(map_ccodes == cntr, 1.0, 0.0)
            idx_cntr = np.where(ccodes == cntr)[0][0]

            # Iterate over years
            for idx, yr in enumerate(range(fao_tillage_yr, 2015 + 1)):
                # Get fraction of cell area that is cropland
                crop_frac = hndl_hyde_crop.variables['cropland'][fao_tillage_yr - 850 + idx, :, :]

                # Get fraction of cell area that is (C4 Annual + C3 Annual + N-fixing)
                cft_frac = crop_frac * \
                           (hndl_cft_frac.variables['C4annual'][fao_tillage_yr - 850 + idx, idx_cntr].data +
                            hndl_cft_frac.variables['C3annual'][fao_tillage_yr - 850 + idx, idx_cntr].data +
                            hndl_cft_frac.variables['N-fixing'][fao_tillage_yr - 850 + idx, idx_cntr].data)

                # Subset of cropland area (C4 Annual + C3 Annual + N-fixing) * mask applied to country * cell area
                sum_area = np.ma.sum(cft_frac * carea * mask_cntr)

                # Multiply by 100 to convert numerator from ha to km2
                frac = row_country.values[0][2 + fao_tillage_yr + idx - 850]/(100 * sum_area)
                if frac > 1.0:
                    tillage[idx + fao_tillage_yr - 850, idx_cntr] = 1.0
                else:
                    tillage[idx + fao_tillage_yr - 850, idx_cntr] = frac

    nc_data.close()
예제 #42
0
파일: plot.py 프로젝트: ritviksahajpal/LUH2
def plot_map_from_nc(path_nc, out_path, var_name, xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1,
                     annotate_date=False, yr=0, date=-1, xlabel='', title='', tme_name='time', show_plot=False,
                     any_time_data=True, format='%.2f', land_bg=True, cmap=plt.cm.RdBu, grid=False, fill_mask=False):
    """
    Plot var_name variable from netCDF file

    \b
    Args:
        path_nc: Name of netCDF file including path
        out_path: Output directory path + file name
        var_name: Name of variable in netCDF file to plot on map

    Returns:
        Nothing, side-effect: save an image
    """
    logger.info('Plotting ' + var_name + ' in ' + path_nc)

    # Read netCDF file and get time dimension
    nc = util.open_or_die(path_nc, 'r', format='NETCDF4')
    lon = nc.variables['lon'][:]
    lat = nc.variables['lat'][:]

    if any_time_data:
        ts = nc.variables[tme_name][:]  # time-series
        if date == -1:  # Plot either the last year {len(ts)-1} or whatever year the user wants
            plot_yr = len(ts) - 1
        else:
            plot_yr = date - ts[0]

    # Draw empty basemap
    m = Basemap(projection='robin', resolution='c', lat_0=0, lon_0=0)
    # m.drawcoastlines()
    # m.drawcountries()

    # Find x,y of map projection grid.
    lons, lats = np.meshgrid(lon, lat)
    x, y = m(lons, lats)
    if fill_mask:
        nc_vars = np.ma.filled(nc.variables[var_name], fill_value=np.nan)
    else:
        nc_vars = np.array(nc.variables[var_name])

    # Plot
    # Get data for the last year from the netCDF file array
    if any_time_data:
        mask_data = maskoceans(lons, lats, nc_vars[int(plot_yr), :, :])
    else:
        mask_data = maskoceans(lons, lats, nc_vars[:, :])

    m.etopo()
    if land_bg:
        m.drawlsmask(land_color='white', ocean_color='none', lakes=True)  # land_color = (0, 0, 0, 0) for transparent
    else:
        m.drawlsmask(land_color=(0, 0, 0, 0), ocean_color='none', lakes=True)

    cs = m.contourf(x, y, mask_data, np.arange(xaxis_min, xaxis_max, xaxis_step), cmap=cmap)

    if annotate_date:
        plt.annotate(str(yr), xy=(0.45, 0.1), xycoords='axes fraction', size=20)

    if grid:
        # where labels intersect = [left, right, top, bottom]
        m.drawmeridians(np.arange(-180, 180, 60), labels=[0,0,1,0], labelstyle='+/-', linewidth=0.5)
        m.drawparallels([-40, 0, 40], labels=[1, 0, 0, 0], labelstyle='+/-', linewidth=0.5)

    # Add colorbar
    cb = m.colorbar(cs, "bottom", size="3%", pad='2%', extend='both', drawedges=False, spacing='proportional',
                    format=format)
    cb.set_label(xlabel)
    plt.title(title, y=1.08)

    plt.tight_layout()
    if not show_plot:
        plt.savefig(out_path, dpi=constants.DPI)
        plt.close()
    else:
        plt.show()

    nc.close()

    return out_path