예제 #1
0
    def mapping(self, crop_id, covariate_rasters, conn, covariate_root_dir,
                suit_root_dir):

        raster = Raster()
        self.no_data = raster.getNoDataValue(covariate_rasters[-1])
        ref_rst = covariate_rasters[-1]
        covariate_id_list = []
        suit_array_stack = []

        filepath = strip_end(ref_rst, ref_rst.split('\\')[-1])
        out_dir = join(
            suit_root_dir,
            strip_start(filepath, covariate_root_dir)[1:]
        )  # the [1:] is for removing the first and the last '\' of string

        if not os.path.exists(out_dir):
            os.makedirs(out_dir, exist_ok=True)

        for rst in covariate_rasters:

            filename = rst.split('\\')[-1].split('.')[0]

            if len(filename.split('_')) == 1:
                covariate_id = filename
                covariate_id_list.append(covariate_id)
                out_raster = join(
                    out_dir,
                    'suitability_{}_{}.tif'.format(crop_id, covariate_id))
            else:
                covariate_id = filename.split('_')[1]
                covariate_id_list.append(covariate_id)
                time_span = '{}_{}'.format(
                    filename.split('_')[-2],
                    filename.split('_')[-1])
                out_raster = join(
                    out_dir,
                    'suitability_{}_{}_{}.tif'.format(crop_id, covariate_id,
                                                      time_span))

            with conn as cur:
                rows = cur.execute(
                    "select * from suitability_rule where crop_id=? and covariate_id=? order by suitability_level",
                    (
                        crop_id,
                        covariate_id,
                    )).fetchall()
                is_continual = cur.execute(
                    "select * from Covariate where id=?",
                    (covariate_id, )).fetchone()['iscontinual']

            # If the query returns none then move to the next covariate
            if rows:
                covariate_array = raster.getRasterArray(
                    rst)  # array of the covariate

                if is_continual == 1:
                    suit_array = self.__reclassify_contianual__(
                        covariate_array, rows)
                else:
                    suit_array = self.__reclassify_catorgorical__(
                        covariate_array, rows)

                suit_array[np.where(
                    covariate_array == self.no_data)] = self.no_data

                raster.array2Raster(suit_array, ref_rst, out_raster)

                suit_array_stack.append(suit_array)
            else:
                print(
                    'Warning! Suitability ruleset for {}, {} not found! Please check the database.'
                    .format(crop_id, covariate_id))

        if len(suit_array_stack) > 0:
            crop_suit_array = ExtractMaxValueOfStack(suit_array_stack)
            ref_array = homogenize_nodata_area(suit_array_stack, self.no_data)
            crop_suit_array[np.where(ref_array == self.no_data)] = self.no_data
            crop_suit_raster = join(out_dir,
                                    '{}_suitability.tif'.format(crop_id))
            raster.array2Raster(crop_suit_array, ref_rst, crop_suit_raster)

            print('create dominant worst covariate raster at {}...'.format(
                dt.datetime.now()))
            worst_dominant_array, worst_count_array, worst_dominant_legend_list = ExtractMaxValueIndexBinaryOfStack(
                suit_array_stack, covariate_id_list, 4)

            worst_dominant_array[np.where(
                ref_array == self.no_data)] = self.no_data
            worst_dominant_raster_file = join(
                out_dir, '{}_worst_dominant.tif'.format(crop_id))
            raster.array2Raster(worst_dominant_array, ref_rst,
                                worst_dominant_raster_file)

            worst_count_array[np.where(
                ref_array == self.no_data)] = self.no_data
            worst_count_raster_file = join(
                out_dir, '{}_worst_count.tif'.format(crop_id))
            raster.array2Raster(worst_count_array, ref_rst,
                                worst_count_raster_file)

            worst_dominant_legend_csv = join(
                out_dir, '{}_worst_dominant_legend.csv'.format(crop_id))
            csvw = CSVOperation.CSVWriting()
            headers = ['raster value', 'number of restriction', 'covariates']
            csvw.WriteLines(worst_dominant_legend_csv, headers,
                            worst_dominant_legend_list)
        else:
            print('Warning! No suitability map for {} was created!'.format(
                crop_id))
예제 #2
0
class ClimaticCovariates(object):
    '''
    Climatic covariates class
    '''
    def __init__(self, year_list, data_dir):
        '''
        year_list: a numeric list of years which are took into account when calculating
                   climatic covariates
        '''
        def GetRefRaster(data_dir):
            for (subdirpath, subdirname, filenames) in walk(data_dir):
                for f in filenames:
                    if f.split('.')[-1].lower()[:3] == 'tif':
                        return join(subdirpath, f)

        self.years = year_list
        self.dir = data_dir
        self.raster = Raster()
        self.ref_raster = GetRefRaster(self.dir)
        self.ref_array = self.raster.getRasterArray(self.ref_raster)
        self.no_data = self.raster.getNoDataValue(self.ref_raster)

    def __GetFileList__(self, start_date, end_date, keyword):
        '''
        Return a file list for the given period of the years.
        'keyword' is the key to look for the correct files. 
        Based on the file/folder structure of Climate data, 
        here assumes one of the subfolder should contain the keyword. 
        '''
        files = []
        for (subdirpath, subdirname, filenames) in walk(self.dir):
            if keyword in subdirpath.split('\\')[-1]:
                for f in filenames:
                    # get the date of each file
                    d = f.split('.')[0].split('-')
                    # if the month of the start date is later than that of the end date
                    # which means the given period is across two natural years
                    if int(start_date[:2]) > int(end_date[:2]):
                        for y in self.years[:-1]:
                            if (  # in that case
                                (  # get files of that year
                                    (d[0] == y) and
                                    (  # and the date after the start date
                                        (d[1] == start_date[:2] and
                                         int(d[-1]) >= int(start_date[-2:])) or
                                        (int(d[1]) > int(start_date[:2])))) or
                                (  # also get files of the next year
                                    (int(d[0]) == int(y) + 1) and
                                    (  # and the date before the end date
                                        (d[1] == end_date[:2] and
                                         int(d[-1]) <= int(end_date[-2:])) or
                                        (int(d[1]) < int(end_date[:2]))))):

                                files.append(join(subdirpath, f))
                    else:  # the given period in one natural year
                        for y in self.years:
                            if (  # get files of that year
                                (d[0] == y) and
                                (  # and the date in between the start and the end date
                                    (d[1] == start_date[:2]
                                     and int(d[-1]) >= int(start_date[-2:])) or
                                    (int(d[1]) > int(start_date[:2])
                                     and int(d[1]) < int(end_date[:2])) or
                                    (d[1] == end_date[:2]
                                     and int(d[-1]) <= int(end_date[-2:])))):

                                files.append(join(subdirpath, f))

        return files

    def __GetFileDictionary__(self, start_date, end_date, keyword):
        '''
        From the file list generated from the __GetFileList__ function,
        return a file Dictionary with year named key and a file list related to the key.
        '''
        file_dict = {}
        file_list = self.__GetFileList__(start_date, end_date, keyword)

        if int(start_date[:2]) > int(end_date[:2]):
            for year in self.years[:-1]:
                files = []
                for f in file_list:
                    y, m, d = f.split('\\')[-1].split('.')[0].split('-')
                    if (((y == year) and
                         ((m == start_date[:2]
                           and int(d) >= int(start_date[-2:])) or
                          (int(m) > int(start_date[:2])))) or
                        ((int(y) == int(year) + 1) and
                         ((m == end_date[:2] and int(d) <= int(end_date[-2:]))
                          or (int(m) < int(end_date[:2]))))):

                        files.append(f)
                file_dict[year] = files

        else:
            for year in self.years:
                files = []
                for f in file_list:
                    y, m, d = f.split('\\')[-1].split('.')[0].split('-')
                    if ((y == year) and (
                        (m == start_date[:2]
                         and int(d) >= int(start_date[-2:])) or
                        (int(m) > int(start_date[:2])
                         and int(m) < int(end_date[:2])) or
                        (m == end_date[:2] and int(d) <= int(end_date[-2:])))):

                        files.append(f)
                file_dict[year] = files

        return file_dict

    def __ChillHoursModel__(self, tmin_array, tmax_array, base_min, base_max):
        '''
        The model of simulating chill hours based on daily temperature data.
        
        *Note: 1. the original model has an issue on the denominator (tave_array - tmin_array) 
                  of the algorithm, when it is equel to 0. 
               2. some pixels of the daily temperature data have abnormal values e.g. 
                  the min is greater than the max (e.g. pixel [43, 180] from 1971-05-01).
               So in this function a reset (when abnormal values occur) of min temperature is 
               coded at the beginning, to eliminate the effect. But this may result in other 
               issues such as an unexpected result.
               
               3. When negtive chill hours occur set it to 0 (may not the correct way)
        '''
        tmin_array = np.where(tmin_array >= tmax_array, tmax_array - 1,
                              tmin_array)
        tave_array = (tmin_array + tmax_array) / 2
        daychill_array_A = np.where(
            tmax_array > base_max,
            2 * 6 * (base_max - tmin_array) / (tave_array - tmin_array), 24)
        daychill_array_B = np.where(
            tmin_array < base_min,
            2 * 6 * (base_min - tmin_array) / (tave_array - tmin_array), 0)

        daychill_array = daychill_array_A - daychill_array_B
        daychill_array = np.where(daychill_array > 0, daychill_array, 0)

        return daychill_array