Beispiel #1
0
    def aspect(self,
               outfile=None,
               file_format='GTiff',
               compute_edges=True,
               band=1,
               algorithm='ZevenbergenThorne',
               zero_for_flat=True,
               trigonometric=False,
               **creation_options):
        """
        Method to calculate aspect
        :param outfile: output file name
        :param file_format: Output file format (default: 'GTiff')
        :param compute_edges: If the edges of the raster should be computed as well.
                              This can present incomplete results at the edges and resulting
                              rasters may show edge effects on mosaicking
        :param band: Band index to use (default: 0)
        :param algorithm: slope algorithm to use
                          valid options:
                             4-neighbor: 'ZevenbergenThorne'
                             8-neighbor: 'Horn'

        :param zero_for_flat: whether to return 0 for flat areas with slope=0, instead of -9999.
        :param trigonometric: whether to return trigonometric angle instead of azimuth.
                             Here 0deg will mean East, 90deg North, 180deg West, 270deg South.
        :param creation_options: Valid creation options examples:
                                 compress='LZW'
                                 bigtiff='yes'
        """
        if not self.init:
            self.initialize()

        if outfile is None:
            outfile = Handler(self.name).add_to_filename('_ASPECT')
            outfile = Handler(outfile).file_remove_check()

        creation_option_list = list()
        for key, value in creation_options.items():
            creation_option_list.append('{}={}'.format(str(key), str(value)))

        aspect_opts = gdal.DEMProcessingOptions(
            format=file_format,
            computeEdges=compute_edges,
            creationOptions=creation_option_list,
            alg=algorithm,
            band=band,
            zeroForFlat=zero_for_flat,
            trigonometric=trigonometric)

        res = gdal.DEMProcessing(outfile,
                                 self.datasource,
                                 'aspect',
                                 options=aspect_opts)
        res = None
Beispiel #2
0
    def slope(self,
              outfile=None,
              slope_format='degree',
              file_format='GTiff',
              compute_edges=True,
              band=1,
              scale=None,
              algorithm='ZevenbergenThorne',
              **creation_options):
        """
        Method to calculate slope
        :param outfile: output file name
        :param slope_format: format of the slope raster (valid options: 'degree', or 'percent')
        :param file_format: Output file format (default: 'GTiff')
        :param compute_edges: If the edges of the raster should be computed as well.
                              This can present incomplete results at the edges and resulting
                              rasters may show edge effects on mosaicking
        :param band: Band index to use (default: 0)
        :param scale: ratio of vertical to horizontal units
        :param algorithm: slope algorithm to use
                          valid options:
                             4-neighbor: 'ZevenbergenThorne'
                             8-neighbor: 'Horn'
        :param creation_options: Valid creation options examples:
                                 compress='LZW'
                                 bigtiff='yes'
        """
        if not self.init:
            self.initialize()

        if outfile is None:
            outfile = Handler(self.name).add_to_filename('_SLOPE')
            outfile = Handler(outfile).file_remove_check()

        creation_option_list = list()
        for key, value in creation_options.items():
            creation_option_list.append('{}={}'.format(
                str(key).upper(),
                str(value).upper()))

        slope_opts = gdal.DEMProcessingOptions(
            format=file_format,
            computeEdges=compute_edges,
            alg=algorithm,
            slopeFormat=slope_format,
            band=band,
            scale=scale,
            creationOptions=creation_option_list)

        res = gdal.DEMProcessing(outfile,
                                 self.datasource,
                                 'slope',
                                 options=slope_opts)
        res = None
Beispiel #3
0
    def save_to_file(self, out_file):
        """
        Function to save sample object to csv file
        :param out_file: CSV file full path (string)
        :return: Write to file
        """

        out_arr = np.hstack((self.x, self.y[:, np.newaxis]))
        out_names = self.x_name + [self.y_name]

        Handler(out_file).write_numpy_array_to_file(np_array=out_arr,
                                                    colnames=out_names)
Beispiel #4
0
    def roughness(self,
                  outfile=None,
                  file_format='GTiff',
                  compute_edges=True,
                  band=1,
                  **creation_options):
        """
        Method to calculate DEM roughness
        :param outfile: output file name
        :param file_format: Output file format (default: 'GTiff')
        :param compute_edges: If the edges of the raster should be computed as well.
                              This can present incomplete results at the edges and resulting
                              rasters may show edge effects on mosaicking
        :param band: Band index to use (default: 0)
        :param creation_options: Valid creation options examples:
                                 compress='LZW'
                                 bigtiff='yes'
        """
        if not self.init:
            self.initialize()

        if outfile is None:
            outfile = Handler(self.name).add_to_filename('_ROUGHNESS')
            outfile = Handler(outfile).file_remove_check()

        creation_option_list = list()
        for key, value in creation_options.items():
            creation_option_list.append('{}={}'.format(str(key), str(value)))

        tpi_opts = gdal.DEMProcessingOptions(
            format=file_format,
            computeEdges=compute_edges,
            creationOptions=creation_option_list,
            band=band)

        res = gdal.DEMProcessing(outfile,
                                 self.datasource,
                                 'Roughness',
                                 options=tpi_opts)
        res = None
Beispiel #5
0
 def __repr__(self):
     """
     Representation of the Samples object
     :return: Samples class representation
     """
     if self.csv_file is not None:
         return "<Samples object from {cf} with {v} variables, label: {l}, {n} samples>".format(
             cf=Handler(self.csv_file).basename,
             l=self.y_name,
             n=self.x.shape[0],
             v=self.x.shape[1])
     elif self.csv_file is None and self.x is not None:
         return "<Samples object with {v} variables, {n} samples>".format(
             n=self.x.shape[0], v=self.x.shape[1])
     else:
         return "<Samples object: EMPTY>"
Beispiel #6
0
    def sample_matrix(self):
        """
        Method to convert sample dictionaries to sample matrix
        :return: Numpy matrix with columns as dimensions and rows as individual samples
        """
        # dimensions of the sample matrix
        nsamp = len(self.samples)
        nvar = len(self.names)

        if nsamp > 1:
            # copy data to matrix
            self.matrix = np.array([[
                Handler.string_to_type(self.samples[i][self.names[j]])
                for j in range(0, nvar)
            ] for i in range(0, self.nsamp)])
        else:
            raise ValueError('Not enough samples to make a matrix object')
Beispiel #7
0
    def __init__(self, samples=None, names=None, csv_file=None, index=None):
        """
        Class constructor
        :param samples: List of sample dictionaries
        :param names: Name of the columns or dimensions
        :param csv_file: path of the csv file with sample data in columns
        :param index: Name of index column
        :return: _Distance object
        """
        self.samples = samples
        self.nsamp = len(samples)
        self.names = names
        self.csv_file = csv_file
        self.index = index

        if samples is not None:
            self.samples = samples
            self.nsamp = len(samples)

        elif csv_file is not None:
            self.samples = Handler(filename=csv_file).read_from_csv(
                return_dicts=True)
            self.nsamp = len(self.samples)
        else:
            warnings.warn('Empty Samples class initialized')
            self.samples = None
            self.nsamp = 0

        if self.nsamp > 0:
            self.index = list(range(self.nsamp))
        else:
            self.index = list()

        if names is not None:
            self.names = names
        elif self.samples is not None:
            self.names = list(self.samples[0])
        else:
            self.names = list()

        self.matrix = None
        self.center = None
Beispiel #8
0
    def __init__(self,
                 csv_file=None,
                 label_colname=None,
                 x=None,
                 y=None,
                 x_name=None,
                 y_name=None,
                 weights=None,
                 weights_colname=None,
                 use_band_dict=None,
                 max_allow_x=1e13,
                 max_allow_y=1e13,
                 line_limit=None,
                 remove_null=True,
                 **kwargs):
        """
        :param csv_file: csv file that contains the features (training or validation samples)
        :param label_colname: column in csv file that contains the feature label (output value)
        :param x: 2d array containing features (samples) without the label
        :param y: 1d array of feature labels (same order as x)
        :param x_name: 1d array of feature names (bands).
                       Can be used to select which columns to read from csv  file.
        :param y_name: name of label
        :param use_band_dict: list of attribute (band) names
        :param max_allow_x: Maximum allowed values of x
        :param max_allow_y: Maximum allowed value of y
        """
        self.csv_file = csv_file
        self.label_colname = label_colname

        if type(x).__name__ in ('ndarray', 'NoneType'):
            self.x = x
        else:
            self.x = np.array(list(x))

        self.x_name = x_name

        if type(y).__name__ in ('ndarray', 'NoneType'):
            self.y = y
        else:
            self.y = np.array(list(y))

        self.y_name = y_name

        self.weights = weights
        self.weights_colname = weights_colname
        self.use_band_dict = use_band_dict

        self.index = None
        self.nfeat = None

        self.xmin = None
        self.xmax = None
        self.ymin = None
        self.ymax = None

        self.y_hist = None
        self.y_bin_edges = None
        self.x_hist = None
        self.x_bin_edges = None

        self.max_allow_x = max_allow_x
        self.max_allow_y = max_allow_y

        # label name or csv file are provided
        if (label_colname is not None) and (csv_file is not None):

            temp = Handler(filename=csv_file).read_from_csv(
                return_dicts=True, line_limit=line_limit)
            header = list(temp[0])

            # label name doesn't match
            if label_colname in header:
                loc = header.index(label_colname)
            else:
                raise ValueError("Label name mismatch.\nAvailable names: " +
                                 ', '.join(header))

            feat_names = header.copy()
            _ = feat_names.pop(loc)

            # read from data dictionary
            if self.x_name is not None and type(self.x_name) in (list, tuple):
                self.x_name = [
                    elem for elem in feat_names if elem in self.x_name
                ]
            else:
                self.x_name = feat_names

            clean_list = []
            if remove_null:
                for elem_dict in temp:
                    val_chk = list((elem in (
                        None, '', ' ', 'null', 'NULL', '<null>',
                        '<NULL>')) or (elem in (int, float) and np.isnan(elem))
                                   for elem in elem_dict.values())
                    if any(val_chk):
                        continue
                    else:
                        clean_list.append(elem_dict)
            else:
                clean_list = temp

            self.x = np.array(
                list(
                    list(samp_dict[feat_name] for feat_name in feat_names)
                    for samp_dict in clean_list))
            self.y = np.array(
                list(samp_dict[label_colname] for samp_dict in clean_list))
            self.y_name = label_colname

            # if band name dictionary is provided
            if use_band_dict is not None:
                self.y_name = [use_band_dict[b] for b in self.y_name]

        elif (label_colname is None) and (csv_file is not None):
            temp = Handler(filename=csv_file).read_from_csv(
                return_dicts=True, line_limit=line_limit)

            clean_list = []
            if remove_null:
                for elem_dict in temp:
                    val_chk = list((elem in (
                        None, '', ' ', 'null', 'NULL', '<null>',
                        '<NULL>')) or (elem in (int, float) and np.isnan(elem))
                                   for elem in elem_dict.values())
                    if any(val_chk):
                        continue
                    else:
                        clean_list.append(elem_dict)
            else:
                clean_list = temp

            # read from data dictionary
            feat_names = list(clean_list[0].keys())
            if self.x_name is not None and type(self.x_name) in (list, tuple):
                self.x_name = [
                    elem for elem in feat_names if elem in self.x_name
                ]
            else:
                self.x_name = feat_names
            self.x = np.array(
                list(
                    list(samp_dict[feat_name] for feat_name in self.x_name)
                    for samp_dict in clean_list))

        else:
            warnings.warn(
                "Samples class initiated without data file and/or label",
                category=RuntimeWarning,
                stacklevel=1)

        if self.x is not None and self.y is not None:
            if self.y_name is None:
                self.y_name = 'y'
            if (self.x_name is None) or \
                    (type(self.x_name) not in (list, tuple)) or \
                    (len(self.x_name) != self.x.shape[1]):
                self.x_name = list('x{}'.format(str(i + 1))
                                   for i in range(self.x.shape[1]))

        if weights is None:
            if weights_colname is not None:
                if csv_file is not None:

                    # label name doesn't match
                    if any(weights_colname in n for n in self.x_name):
                        loc = self.x_name.index(weights_colname)
                    else:
                        raise ValueError("Weight column name mismatch")

                    self.weights = self.x[:, loc]
                    self.x = np.delete(self.x, loc, 1)

                else:
                    raise ValueError("No csv_file specified for weights")

        # if keywords are supplied
        if kwargs is not None:

            # columns containing data
            if 'columns' in kwargs:
                if type(kwargs['columns']).__name__ == 'list':
                    self.columns = np.array(kwargs['columns'])
                elif type(kwargs['columns']).__name__ in ('ndarray',
                                                          'NoneType'):
                    self.columns = kwargs['columns']
                else:
                    self.columns = np.array(list(kwargs['columns']))
            else:
                self.columns = None

            # IDs of samples
            if 'ids' in kwargs:
                self.ids = kwargs['ids']
            else:
                self.ids = None

        else:
            self.columns = None
            self.ids = None

        if self.x is not None:

            if self.columns is None:
                self.columns = np.arange(0, self.x.shape[1])

            self.nsamp = self.x.shape[0]
            self.nvar = self.x.shape[1]

            self.nfeat = self.x.shape[1]

            if np.issubdtype(self.x.dtype, np.number):
                self.xmin = self.x.min(0, initial=max_allow_x)
                self.xmax = self.x.max(0, initial=max_allow_y)

            self.index = np.arange(0, self.x.shape[0])

        else:
            self.nsamp = 0
            self.nvar = 0

        if self.y is not None:
            if np.issubdtype(self.y.dtype, np.number):
                self.ymin = self.y.min(initial=-max_allow_y)
                self.ymax = self.y.max(initial=max_allow_y)

        if self.y is not None:
            self.head = '\n'.join(
                list(
                    str(elem)
                    for elem in [' '.join(list(self.x_name) + [self.y_name])] +
                    list(' '.join(
                        list(
                            str(elem_)
                            for elem_ in self.x[i, :].tolist() + [self.y[i]]))
                         for i in range(10))))
        else:
            self.head = '<empty>'
Beispiel #9
0
    def __init__(self,
                 csv_file=None,
                 label_colname=None,
                 x=None,
                 y=None,
                 x_name=None,
                 y_name=None,
                 weights=None,
                 weights_colname=None,
                 use_band_dict=None,
                 max_allow_x=1e13,
                 max_allow_y=1e13,
                 **kwargs):

        """
        :param csv_file: csv file that contains the features (training or validation samples)
        :param label_colname: column in csv file that contains the feature label (output value)
        :param x: 2d array containing features (samples) without the label
        :param y: 1d array of feature labels (same order as x)
        :param x_name: 1d array of feature names (bands)
        :param y_name: name of label
        :param use_band_dict: list of attribute (band) names
        :param max_allow_x: Maximum allowed values of x
        :param max_allow_y: Maximum allowed value of y
        """
        self.csv_file = csv_file
        self.label_colname = label_colname

        if type(x).__name__ in ('ndarray', 'NoneType'):
            self.x = x
        else:
            self.x = np.array(list(x))

        self.x_name = x_name

        if type(y).__name__ in ('ndarray', 'NoneType'):
            self.y = y
        else:
            self.y = np.array(list(y))

        self.y_name = y_name

        self.weights = weights
        self.weights_colname = weights_colname
        self.use_band_dict = use_band_dict

        self.index = None
        self.nfeat = None

        self.xmin = None
        self.xmax = None
        self.ymin = None
        self.ymax = None

        self.max_allow_x = max_allow_x
        self.max_allow_y = max_allow_y

        # either of label name or csv file is provided without the other
        if (csv_file is None) and (label_colname is None):
            pass  # warnings.warn("Samples class initiated without data file or label")

        # label name or csv file are provided
        elif (label_colname is not None) and (csv_file is not None):

            temp = Handler(filename=csv_file).read_from_csv()

            # label name doesn't match
            if any(label_colname in s for s in temp['name']):
                loc = temp['name'].index(label_colname)
            else:
                raise ValueError("Label name mismatch.\nAvailable names: " + ', '.join(temp['name']))

            # read from data dictionary
            self.x_name = Sublist(elem.strip() for elem in temp['name'][:loc] + temp['name'][(loc + 1):])
            self.x = np.array(list(feat[:loc] + feat[(loc + 1):] for feat in temp['feature']))
            self.y = np.array(list(feat[loc] for feat in temp['feature']))
            self.y_name = temp['name'][loc].strip()

            # if band name dictionary is provided
            if use_band_dict is not None:
                self.y_name = [use_band_dict[b] for b in self.y_name]

        elif (label_colname is None) and (csv_file is not None):

            temp = Handler(filename=csv_file).read_from_csv()

            # read from data dictionary
            self.x_name = Sublist(temp['name'])
            self.x = np.array(list(feat for feat in temp['feature']))

        else:
            ValueError("No data found for label.")

        if self.x is not None and self.y is not None:
            if self.y_name is None:
                self.y_name = 'y'
            if (self.x_name is None) or \
                    (type(self.x_name) not in (list, tuple)) or \
                    (len(self.x_name) != self.x.shape[1]):
                self.x_name = list('x{}'.format(str(i+1)) for i in range(self.x.shape[1]))

        if weights is None:
            if weights_colname is not None:
                if csv_file is not None:

                    temp = Handler(filename=csv_file).read_from_csv()

                    # label name doesn't match
                    if any(weights_colname in n for n in temp['name']):
                        loc = temp['name'].index(weights_colname)
                    else:
                        raise ValueError("Weight column name mismatch.\nAvailable names: " + ', '.join(temp['name']))

                    self.weights = self.x[:, loc]
                    self.x = np.delete(self.x, loc, 1)

                else:
                    raise ValueError("No csv_file specified for weights")

        # if keywords are supplied
        if kwargs is not None:

            # columns containing data
            if 'columns' in kwargs:
                if type(kwargs['columns']).__name__ == 'list':
                    self.columns = np.array(kwargs['columns'])
                elif type(kwargs['columns']).__name__ in ('ndarray', 'NoneType'):
                    self.columns = kwargs['columns']
                else:
                    self.columns = np.array(list(kwargs['columns']))
            else:
                self.columns = None

            # IDs of samples
            if 'ids' in kwargs:
                self.ids = kwargs['ids']
            else:
                self.ids = None

        else:
            self.columns = None
            self.ids = None

        if self.x is not None:

            if self.columns is None:
                self.columns = np.arange(0, self.x.shape[1])

            self.nsamp = self.x.shape[0]
            self.nvar = self.x.shape[1]

            self.nfeat = self.x.shape[1]

            self.xmin = self.x.min(0, initial=max_allow_x)
            self.xmax = self.x.max(0, initial=max_allow_y)

            self.index = np.arange(0, self.x.shape[0])

        else:
            self.nsamp = 0
            self.nvar = 0

        if self.y is not None:
            self.ymin = self.y.min(initial=-max_allow_y)
            self.ymax = self.y.max(initial=max_allow_y)

        if self.y is not None:
            self.head = '\n'.join(list(str(elem) for elem in
                                       [' '.join(list(self.x_name) + [self.y_name])] +
                                       list(' '.join(list(str(elem_) for elem_ in self.x[i, :].tolist() + [self.y[i]]))
                                            for i in range(10))))
        else:
            self.head = '<empty>'
Beispiel #10
0
 def __repr__(self):
     """
     Representation of the Samples object
     :return: Samples class representation
     """
     if self.csv_file is not None:
         return "<Samples object from {cf} with {v} variables, {n} samples>".format(cf=Handler(
                                                                                    self.csv_file).basename,
                                                                                    n=len(self.x),
                                                                                    v=len(self.x_name))
     elif self.csv_file is None and self.x is not None:
         return "<Samples object with {n} samples>".format(n=len(self.x))
     else:
         return "<Samples object: __empty__>"
Beispiel #11
0
    def buffer(self, buffer_size=0, outfile=None, return_vector=False):
        """
        Method to buffer a geometries in a Vector object
        :param buffer_size: Distance in shapefile coordinates for the buffer
        :param outfile: Name of the outputfile, if None, _buffer_{buf}.shp extension is used
        :param return_vector: If this operation should  return a Vector object
        :return: Vector or None (writes file)
        """

        # get driver to write to memory
        memory_driver = ogr.GetDriverByName('Memory')
        temp_datasource = memory_driver.CreateDataSource('out')
        temp_layer = temp_datasource.CreateLayer('temp_layer',
                                                 srs=self.spref,
                                                 geom_type=self.type)

        # initialize vector
        temp_vector = Vector()

        # update features and crs
        temp_vector.nfeat = self.nfeat
        temp_vector.type = self.type
        temp_vector.crs = self.spref
        temp_vector.spref = self.spref
        temp_vector.attributes = self.attributes
        temp_vector.layer = temp_layer
        temp_vector.data_source = temp_datasource

        layr = self.layer

        # get field (attribute) information
        layr_defn = layr.GetLayerDefn()
        nfields = layr_defn.GetFieldCount()
        temp_vector.fields = list(
            layr_defn.GetFieldDefn(i) for i in range(0, nfields))

        # loop thru all the features
        for feat in self.features:

            geom = feat.GetGeometryRef()
            buffered_geom = geom.Buffer(buffer_size)

            new_feat = ogr.Feature(layr_defn)
            new_feat.SetGeometry(buffered_geom)

            for field in temp_vector.fields:
                new_feat.SetField(field.GetName(),
                                  feat.GetField(field.GetName()))

            temp_vector.layer.CreateFeature(new_feat)
            temp_vector.features.append(new_feat)
            temp_vector.wktlist.append(buffered_geom.ExportToWkt())

        if return_vector:
            return temp_vector

        else:
            if outfile is None:
                outfile = Handler(self.filename) \
                    .add_to_filename('_buffer_{buf}'.format(buf=str(buffer_size).replace('.', '')))

            temp_vector.write_vector(outfile)
            return
Beispiel #12
0
    def write_vector(self, outfile=None, in_memory=False):
        """
        Method to write the vector object to memory or to file
        :param outfile: File to write the vector object to
        :param in_memory: If the vector object should be written in memory (default: False)
        :return: Vector object if written to memory else NoneType
        """

        if in_memory:

            driver_type = 'Memory'

            if outfile is not None:
                outfile = os.path.basename(outfile).split('.')[0]
            else:
                outfile = 'in_memory'

            out_driver = ogr.GetDriverByName(driver_type)
            out_datasource = out_driver.CreateDataSource(outfile)
            out_layer = out_datasource.CopyLayer(self.layer, outfile)

            out_vector = Vector()

            out_vector.datasource = out_datasource
            out_vector.mem_source = out_datasource

            return out_vector

        else:

            if outfile is None:
                outfile = self.filename
                if self.filename is None:
                    raise ValueError("No filename for output")

            self.name = Handler(outfile).basename.split('.')[0]

            if os.path.basename(outfile).split('.')[-1] == 'json':
                driver_type = 'GeoJSON'
            elif os.path.basename(outfile).split('.')[-1] == 'csv':
                driver_type = 'Comma Separated Value'
            else:
                driver_type = 'ESRI Shapefile'

            out_driver = ogr.GetDriverByName(driver_type)
            out_datasource = out_driver.CreateDataSource(outfile)

            out_layer = out_datasource.CreateLayer(self.name,
                                                   srs=self.spref,
                                                   geom_type=self.type)

            for field in self.fields:
                out_layer.CreateField(field)

            layer_defn = out_layer.GetLayerDefn()

            if len(self.wktlist) > 0:
                for i, wkt_geom in enumerate(self.wktlist):
                    geom = ogr.CreateGeometryFromWkt(wkt_geom)
                    feat = ogr.Feature(layer_defn)
                    feat.SetGeometry(geom)

                    for attr, val in self.attributes[i].items():
                        feat.SetField(attr, val)

                    out_layer.CreateFeature(feat)

            elif len(self.features) > 0:
                for feature in self.features:
                    out_layer.CreateFeature(feature)

            else:
                sys.stdout.write('No features found... closing file.\n')

            out_datasource = out_driver = None
Beispiel #13
0
    def hillshade(self,
                  outfile=None,
                  file_format='GTiff',
                  compute_edges=True,
                  band=1,
                  scale=None,
                  algorithm='ZevenbergenThorne',
                  z_factor=1,
                  azimuth=315,
                  altitude=90,
                  combined=False,
                  multi_directional=False,
                  **creation_options):
        """
        Method to calculate DEM hillshade raster
        :param outfile: output file name
        :param file_format: Output file format (default: 'GTiff')
        :param compute_edges: If the edges of the raster should be computed as well.
                              This can present incomplete results at the edges and resulting
                              rasters may show edge effects on mosaicking
        :param band: Band index to use (default: 0)
        :param scale: ratio of vertical to horizontal units
        :param algorithm: slope algorithm to use
                          valid options:
                             4-neighbor: 'ZevenbergenThorne'
                             8-neighbor: 'Horn'

        :param z_factor: vertical exaggeration used to pre-multiply the elevations. (default: 1)
        :param azimuth:  azimuth of the light, in degrees. (default: 315)
                         0 if it comes from the top of the raster, 90 from the east and so on.
                         The default value, 315, should rarely be changed
                         as it is the value generally used to generate shaded maps.

        :param altitude: altitude of the light, in degrees. (default: 90)
                         90 if the light comes from above the DEM, 0 if it is raking light.
        :param combined:  whether to compute combined shading,
                         a combination of slope and oblique shading. (Default: False)
        :param multi_directional: whether to compute multi-directional shading (Default: False)

        :param creation_options: Valid creation options examples:
                                 compress='LZW'
                                 bigtiff='yes'
        """
        if not self.init:
            self.initialize()

        if outfile is None:
            outfile = Handler(self.name).add_to_filename('_HILLSHADE')
            outfile = Handler(outfile).file_remove_check()

        creation_option_list = list()
        for key, value in creation_options.items():
            creation_option_list.append('{}={}'.format(str(key), str(value)))

        tpi_opts = gdal.DEMProcessingOptions(
            format=file_format,
            computeEdges=compute_edges,
            creationOptions=creation_option_list,
            band=band,
            scale=scale,
            alg=algorithm,
            zFactor=z_factor,
            azimuth=azimuth,
            altitude=altitude,
            combined=combined,
            multiDirectional=multi_directional)

        res = gdal.DEMProcessing(outfile,
                                 self.datasource,
                                 'hillshade',
                                 options=tpi_opts)
        res = None