Esempio n. 1
0
    def _parseVDbConn(self, mapp, layerInp):
        '''find attribute key according to layer of input map'''
        vdb = Module('v.db.connect', map=mapp, flags='g', stdout_=PIPE)

        vdb = vdb.outputs.stdout
        for line in vdb.splitlines():
            lsplit = line.split('|')
            layer = lsplit[0].split('/')[0]
            if str(layer) == str(layerInp):
                return lsplit[2]
        return None
Esempio n. 2
0
    def __init__(self, cmd, *args, **kargs):
        for banned in ['stdout_', 'stderr_', 'finish_', 'run_']:
            if banned in kargs:
                raise ValueError('Do not set %s parameter'
                                 ', it would be overriden' % banned)
        kargs['stdout_'] = subprocess.PIPE
        kargs['stderr_'] = subprocess.PIPE
        kargs['finish_'] = True
        kargs['run_'] = False

        Module.__init__(self, cmd, *args, **kargs)
def main(argv):
    print 'computing '
    step=float(argv[0])
    elevin=argv[1]
    start_day=int(argv[2])
    stop_day=int(argv[3])
    parent_dir = os.path.dirname(os.path.dirname(os.getcwd()))
    out_folder=os.path.join(parent_dir,"raw_data","rn")
    
    #compute irradiance for each day
    for day in range(start_day,stop_day):
        rsun = Module("r.sun", elevation=elevin,step=step,glob_rad='rad.global.'+str(day),overwrite=True,run_=False, day=day)
        rsun.run()
        #r.out.gdal geotiff does not include datum; use AAIGrid format for output raster
        rout = Module("r.out.gdal",overwrite=True,f=True,input='rad.global.'+str(day),output=os.path.join(out_folder,'rn'+str(day)),format="AAIGrid")
        gremove = Module("g.remove",f=True,type='raster',name='rad.global.'+str(day))
Esempio n. 4
0
File: grid.py Progetto: caomw/grass
def copy_groups(groups, gisrc_src, gisrc_dst, region=None):
    """Copy group from one mapset to another, crop the raster to the region

    :param groups: a list of strings with the group that must be copied
                   from a master to another.
    :type groups: list of strings
    :param gisrc_src: path of the GISRC file from where we want to copy the groups
    :type gisrc_src: str
    :param gisrc_dst: path of the GISRC file where the groups will be created
    :type gisrc_dst: str
    :param region: a region like object or a dictionary with the region
                   parameters that will be used to crop the rasters of the
                   groups
    :type region: Region object or dictionary
    :returns: None

    """
    env = os.environ.copy()
    # instantiate modules
    get_grp = Module('i.group', flags='lg', stdout_=sub.PIPE, run_=False)
    set_grp = Module('i.group')
    get_grp.run_ = True
    rmloc = lambda r: r.split('@')[0] if '@' in r else r

    src = read_gisrc(gisrc_src)
    dst = read_gisrc(gisrc_dst)
    rm = True if src[2] != dst[2] else False
    all_rasts = [r[0]
                 for r in findmaps('rast', location=dst[1], gisdbase=dst[2])]
    for grp in groups:
        # change gisdbase to src
        env['GISRC'] = gisrc_src
        get_grp(group=grp, env_=env)
        rasts = [r for r in get_grp.outputs.stdout.split()]
        # change gisdbase to dst
        env['GISRC'] = gisrc_dst
        rast2cp = [r for r in rasts if rmloc(r) not in all_rasts]
        if rast2cp:
            copy_rasters(rast2cp, gisrc_src, gisrc_dst, region=region)
        set_grp(group=grp,
                input=[rmloc(r) for r in rasts] if rast2cp or rm else rasts,
                env_=env)
Esempio n. 5
0
 def __init__(self, cmd, width=None, height=None, overlap=0, processes=None,
              split=False, debug=False, region=None, move=None, log=False,
              start_row=0, start_col=0, out_prefix='', mapset_prefix=None,
              *args, **kargs):
     kargs['run_'] = False
     self.mset = Mapset()
     self.module = Module(cmd, *args, **kargs)
     self.width = width
     self.height = height
     self.overlap = overlap
     self.processes = processes
     self.region = region if region else Region()
     self.start_row = start_row
     self.start_col = start_col
     self.out_prefix = out_prefix
     self.log = log
     self.move = move
     self.gisrc_src = os.environ['GISRC']
     self.n_mset, self.gisrc_dst = None, None
     if self.move:
         self.n_mset = copy_mapset(self.mset, self.move)
         self.gisrc_dst = write_gisrc(self.n_mset.gisdbase,
                                      self.n_mset.location,
                                      self.n_mset.name)
         rasters = [r for r in select(self.module.inputs, 'raster')]
         if rasters:
             copy_rasters(rasters, self.gisrc_src, self.gisrc_dst,
                          region=self.region)
         vectors = [v for v in select(self.module.inputs, 'vector')]
         if vectors:
             copy_vectors(vectors, self.gisrc_src, self.gisrc_dst)
         groups = [g for g in select(self.module.inputs, 'group')]
         if groups:
             copy_groups(groups, self.gisrc_src, self.gisrc_dst,
                         region=self.region)
     self.bboxes = split_region_tiles(region=region,
                                      width=width, height=height,
                                      overlap=overlap)
     if mapset_prefix:
         self.msetstr = mapset_prefix + "_%03d_%03d"
     else:
         self.msetstr = cmd.replace('.', '') + "_%03d_%03d"
     self.inlist = None
     if split:
         self.split()
     self.debug = debug
Esempio n. 6
0
def main(options, flags):
    Module("v.overlay",
           overwrite=True,
           ainput=options["region"],
           alayer="1",
           atype="auto",
           binput=options["clouds"],
           blayer="1",
           btype="area",
           operator="not",
           output="region_mask",
           olayer="1,0,0",
           snap=1e-8)

    Module("g.region",
           overwrite=True,
           vector="region_mask",
           align=options["red"])

    Module("r.mask",
           overwrite=True,
           maskcats="*",
           vector="region_mask",
           layer="1")

    Module("i.vi",
           overwrite=True,
           red=options["red"],
           output="ndvi",
           viname="ndvi",
           nir=options["nir"],
           storage_bit=8)

    Module("r.recode",
           overwrite=True,
           input="ndvi",
           output="ndvi_class",
           rules="-",
           stdin_="-1:0.1:1\n0.1:0.5:2\n0.5:1:3")

    Module("r.colors",
           map="ndvi_class",
           rules="-",
           stdin_="1 grey\n2 255 255 0\n3 green")

    Module("r.to.vect",
           flags='sv',
           overwrite=True,
           input="ndvi_class",
           output="ndvi_class",
           type="area",
           column="value")

    Module("v.clean",
           overwrite=True,
           input="ndvi_class",
           layer="-1",
           type=[
               "point", "line", "boundary", "centroid", "area", "face",
               "kernel"
           ],
           output=options["output"],
           tool="rmarea",
           threshold=options["threshold"])

    ret = Module('r.univar', flags='g', map='ndvi', stdout_=PIPE)
    stats = parse_key_val(ret.outputs.stdout)
    print('-' * 80)
    print('NDVI value statistics')
    print('-' * 80)
    print('NDVI min value: {0:.4f}'.format(float(stats['min'])))
    print('NDVI max value: {0:.4f}'.format(float(stats['max'])))
    print('NDVI mean value: {0:.4f}'.format(float(stats['mean'])))

    print('-' * 80)
    print('NDVI class statistics')
    print('-' * 80)
    ret = Module('v.report',
                 map=options['output'],
                 option='area',
                 stdout_=PIPE)
    for line in ret.outputs.stdout.splitlines(
    )[1:]:  # skip first line (cat|label|area)
        # parse line (eg. 1||2712850)
        data = line.split('|')
        cat = data[0]
        area = float(data[-1])
        print('NDVI class {0}: {1:.1f} ha'.format(cat, area / 1e4))

    # v.to.rast: use -c flag for updating statistics if exists
    Module('v.rast.stats',
           flags='c',
           map=options['output'],
           raster='ndvi',
           column_prefix='ndvi',
           method=['minimum', 'maximum', 'average'])
    # v.db.select: don't print column names (-c)
    ret = Module('v.db.select',
                 flags='c',
                 map=options['output'],
                 separator='comma',
                 stdout_=PIPE)
    for line in ret.outputs.stdout.splitlines():
        # parse line (eg. 1,,-0.433962264150943,0.740350877192983,0.051388909449992)
        cat, label, min, max, mean = line.split(',')
        print('NDVI class {0}: {1:.4f} (min) {2:.4f} (max) {3:.4f} (mean)'.
              format(cat, float(min), float(max), float(mean)))

    return 0
Esempio n. 7
0
def main(opt, flg):
    # import functions which depend on sklearn only after parser run
    from ml_functions import (
        balance,
        explorer_clsfiers,
        run_classifier,
        optimize_training,
        explore_SVC,
        plot_grid,
    )
    from features import importances, tocsv

    msgr = get_msgr()
    indexes = None
    vect = opt["vector"]
    vtraining = opt["vtraining"] if opt["vtraining"] else None
    scaler, decmp = None, None
    vlayer = opt["vlayer"] if opt["vlayer"] else vect + "_stats"
    tlayer = opt["tlayer"] if opt["tlayer"] else vect + "_training"
    rlayer = opt["rlayer"] if opt["rlayer"] else vect + "_results"

    labels = extract_classes(vtraining, 1)
    pprint(labels)

    if opt["scalar"]:
        scapar = opt["scalar"].split(",")
        from sklearn.preprocessing import StandardScaler

        scaler = StandardScaler(with_mean="with_mean" in scapar,
                                with_std="with_std" in scapar)

    if opt["decomposition"]:
        dec, params = (opt["decomposition"].split("|") if "|"
                       in opt["decomposition"] else (opt["decomposition"], ""))
        kwargs = ({k: v
                   for k, v in (p.split("=")
                                for p in params.split(","))} if params else {})
        load_decompositions()
        decmp = DECMP[dec](**kwargs)

    # if training extract training
    if vtraining and flg["e"]:
        msgr.message("Extract training from: <%s> to <%s>." %
                     (vtraining, vect))
        extract_training(vect, vtraining, tlayer)
        flg["n"] = True

    if flg["n"]:
        msgr.message("Save arrays to npy files.")
        save2npy(
            vect,
            vlayer,
            tlayer,
            fcats=opt["npy_cats"],
            fcols=opt["npy_cols"],
            fdata=opt["npy_data"],
            findx=opt["npy_index"],
            fclss=opt["npy_tclasses"],
            ftdata=opt["npy_tdata"],
        )

    # define the classifiers to use/test
    if opt["pyclassifiers"] and opt["pyvar"]:
        # import classifiers to use
        mycls = SourceFileLoader("mycls", opt["pyclassifiers"]).load_module()
        classifiers = getattr(mycls, opt["pyvar"])
    else:
        from ml_classifiers import CLASSIFIERS

        classifiers = CLASSIFIERS

    # Append the SVC classifier
    if opt["svc_c"] and opt["svc_gamma"]:
        from sklearn.svm import SVC

        svc = {
            "name": "SVC",
            "classifier": SVC,
            "kwargs": {
                "C": float(opt["svc_c"]),
                "gamma": float(opt["svc_gamma"]),
                "kernel": opt["svc_kernel"],
            },
        }
        classifiers.append(svc)

    # extract classifiers from pyindx
    if opt["pyindx"]:
        indexes = [i for i in get_indexes(opt["pyindx"])]
        classifiers = [classifiers[i] for i in indexes]

    num = int(opt["n_training"]) if opt["n_training"] else None

    # load fron npy files
    Xt = np.load(opt["npy_tdata"])
    Yt = np.load(opt["npy_tclasses"])
    cols = np.load(opt["npy_cols"])

    # Define rules to substitute NaN, Inf, posInf, negInf values
    rules = {}
    for key in ("nan", "inf", "neginf", "posinf"):
        if opt[key]:
            rules[key] = get_rules(opt[key])
    pprint(rules)

    # Substitute (skip cat column)
    Xt, rules_vals = substitute(Xt, rules, cols[1:])
    Xtoriginal = Xt

    # scale the data
    if scaler:
        msgr.message("Scaling the training data set.")
        scaler.fit(Xt, Yt)
        Xt = scaler.transform(Xt)

    # decompose data
    if decmp:
        msgr.message("Decomposing the training data set.")
        decmp.fit(Xt)
        Xt = decmp.transform(Xt)

    # Feature importances with forests of trees
    if flg["f"]:
        np.save("training_transformed.npy", Xt)
        importances(
            Xt,
            Yt,
            cols[1:],
            csv=opt["imp_csv"],
            img=opt["imp_fig"],
            # default parameters to save the matplotlib figure
            **dict(dpi=300, transparent=False, bbox_inches="tight"),
        )

    # optimize the training set
    if flg["o"]:
        ind_optimize = int(
            opt["pyindx_optimize"]) if opt["pyindx_optimize"] else 0
        cls = classifiers[ind_optimize]
        msgr.message("Find the optimum training set.")
        best, Xbt, Ybt = optimize_training(
            cls,
            Xt,
            Yt,
            labels,  # {v: k for k, v in labels.items()},
            scaler,
            decmp,
            num=num,
            maxiterations=1000,
        )
        msg = "    - save the optimum training data set to: %s."
        msgr.message(msg % opt["npy_btdata"])
        np.save(opt["npy_btdata"], Xbt)
        msg = "    - save the optimum training classes set to: %s."
        msgr.message(msg % opt["npy_btclasses"])
        np.save(opt["npy_btclasses"], Ybt)

    # balance the data
    if flg["b"]:
        msg = "Balancing the training data set, each class have <%d> samples."
        msgr.message(msg % num)
        Xbt, Ybt = balance(Xt, Yt, num)
    else:
        if not flg["o"]:
            Xbt = (np.load(opt["npy_btdata"])
                   if os.path.isfile(opt["npy_btdata"]) else Xt)
            Ybt = (np.load(opt["npy_btclasses"])
                   if os.path.isfile(opt["npy_btclasses"]) else Yt)

    # scale the data
    if scaler:
        msgr.message("Scaling the training data set.")
        scaler.fit(Xbt, Ybt)
        Xt = scaler.transform(Xt)
        Xbt = scaler.transform(Xbt)

    if flg["d"]:
        C_range = [float(c) for c in opt["svc_c_range"].split(",") if c]
        gamma_range = [
            float(g) for g in opt["svc_gamma_range"].split(",") if g
        ]
        kernel_range = [
            str(s) for s in opt["svc_kernel_range"].split(",") if s
        ]
        poly_range = [int(i) for i in opt["svc_poly_range"].split(",") if i]
        allkwargs = dict(C=C_range,
                         gamma=gamma_range,
                         kernel=kernel_range,
                         degree=poly_range)
        kwargs = {}
        for k in allkwargs:
            if allkwargs[k]:
                kwargs[k] = allkwargs[k]
        msgr.message("Exploring the SVC domain.")
        grid = explore_SVC(Xbt,
                           Ybt,
                           n_folds=5,
                           n_jobs=int(opt["svc_n_jobs"]),
                           **kwargs)
        import pickle

        krnlstr = "_".join(s for s in opt["svc_kernel_range"].split(",") if s)
        pkl = open("grid%s.pkl" % krnlstr, "w")
        pickle.dump(grid, pkl)
        pkl.close()
        #        pkl = open('grid.pkl', 'r')
        #        grid = pickle.load(pkl)
        #        pkl.close()
        plot_grid(grid, save=opt["svc_img"])

    # test the accuracy of different classifiers
    if flg["t"]:
        # test different classifiers
        msgr.message("Exploring different classifiers.")
        msgr.message("cls_id   cls_name          mean     max     min     std")

        res = explorer_clsfiers(
            classifiers,
            Xt,
            Yt,
            labels=labels,
            indexes=indexes,
            n_folds=5,
            bv=flg["v"],
            extra=flg["x"],
        )
        # TODO: sort(order=...) is working only in the terminal, why?
        # res.sort(order='mean')
        with open(opt["csv_test_cls"], "w") as csv:
            csv.write(tocsv(res))

    if flg["c"]:
        # classify
        data = np.load(opt["npy_data"])
        indx = np.load(opt["npy_index"])

        # Substitute using column values
        data, dummy = substitute(data, rules, cols[1:])
        Xt = data[indx]

        if scaler:
            msgr.message("Scaling the training data set.")
            scaler.fit(Xt, Yt)
            Xt = scaler.transform(Xt)
            msgr.message("Scaling the whole data set.")
            data = scaler.transform(data)
        if decmp:
            msgr.message("Decomposing the training data set.")
            decmp.fit(Xt)
            Xt = decmp.transform(Xt)
            msgr.message("Decompose the whole data set.")
            data = decmp.transform(data)
        cats = np.load(opt["npy_cats"])

        np.save("data_filled_scaled.npy", data)
        tcols = []
        for cls in classifiers:
            report = (open(opt["report_class"], "w")
                      if opt["report_class"] else sys.stdout)
            run_classifier(cls, Xt, Yt, Xt, Yt, labels, data, report=report)
            tcols.append((cls["name"], "INTEGER"))

        import pickle

        with open("classification_results.pkl", "w") as res:
            pickle.dump(classifiers, res)
        # classifiers = pickle.load(res)
        msgr.message("Export the results to layer: <%s>" % str(rlayer))
        export_results(
            vect,
            classifiers,
            cats,
            rlayer,
            vtraining,
            tcols,
            overwrite(),
            pkl="res.pkl",
            append=flg["a"],
        )
    #        res.close()

    if flg["r"]:
        rules = ("\n".join([
            "%d %s" % (k, v) for k, v in get_colors(vtraining).items()
        ]) if vtraining else None)

        msgr.message("Export the layer with results to raster")
        with Vector(vect, mode="r") as vct:
            tab = vct.dblinks.by_name(rlayer).table()
            rasters = [c for c in tab.columns]
            rasters.remove(tab.key)

        v2rst = Module("v.to.rast")
        rclrs = Module("r.colors")
        for rst in rasters:
            v2rst(
                input=vect,
                layer=rlayer,
                type="area",
                use="attr",
                attrcolumn=rst.encode(),
                output=(opt["rst_names"] % rst).encode(),
                memory=1000,
                overwrite=overwrite(),
            )
            if rules:
                rclrs(map=rst.encode(), rules="-", stdin_=rules)
Esempio n. 8
0
#!/usr/bin/env python3

from grass.pygrass.modules import Module

psc = '41115'

Module('v.extract', input='obce', output='obce1',
       where="psc = '{}'".format(psc))
Module('v.select', ainput='obce', binput='obce1',
       output='obce_psc_{}'.format(psc),
       operator='overlap', overwrite=True)
Module('g.remove', type='vector', name='obce1', flags='f')
Esempio n. 9
0
# Look up B10 and B11 TIF-files in dir + subdirs and write path/filename to "files"
files = glob.glob(path + '/**/*B[1][0-1].TIF')

# Look up BQA TIF-files in dir + subdirs and write path/filename to "files_temp"
files_bqa = glob.glob(path + '/**/*BQA.TIF')

# Create list with B10 only (for lst creation)
#files_b10 = glob.glob(path + '/**/*B[1][0].TIF')

# Add "files_temp" to "files"
files.extend(files_bqa)

# Import AOI from file
Module("v.import",
    overwrite = True,
    input= path_aoi, 
    output="aoi")

# Set AOI as Region
Module("g.region",
    overwrite = True, 
    vector="aoi@"+str(mapset)

# Import each file "LC08xxxxxxxxxxxx"
for i in files:
    Module("r.import",
       overwrite = True, 
       memory = 2000,
       input = i,
       output = i[-28:-4],
	extent = "region")
Esempio n. 10
0
class GridModule(object):
    # TODO maybe also i.* could be supported easily
    """Run GRASS raster commands in a multiprocessing mode.

    :param cmd: raster GRASS command, only command staring with r.* are valid.
    :type cmd: str
    :param width: width of the tile, in pixel
    :type width: int
    :param height: height of the tile, in pixel.
    :type height: int
    :param overlap: overlap between tiles, in pixel.
    :type overlap: int
    :param processes: number of threads, default value is equal to the number
                      of processor available.
    :param split: if True use r.tile to split all the inputs.
    :type split: bool
    :param mapset_prefix: if specified created mapsets start with this prefix
    :type mapset_prefix: str
    :param run_: if False only instantiate the object
    :type run_: bool
    :param args: give all the parameters to the command
    :param kargs: give all the parameters to the command

    >>> grd = GridModule('r.slope.aspect',
    ...                  width=500, height=500, overlap=2,
    ...                  processes=None, split=False,
    ...                  elevation='elevation',
    ...                  slope='slope', aspect='aspect', overwrite=True)
    >>> grd.run()
    """
    def __init__(self,
                 cmd,
                 width=None,
                 height=None,
                 overlap=0,
                 processes=None,
                 split=False,
                 debug=False,
                 region=None,
                 move=None,
                 log=False,
                 start_row=0,
                 start_col=0,
                 out_prefix='',
                 mapset_prefix=None,
                 *args,
                 **kargs):
        kargs['run_'] = False
        self.mset = Mapset()
        self.module = Module(cmd, *args, **kargs)
        self.width = width
        self.height = height
        self.overlap = overlap
        self.processes = processes
        self.region = region if region else Region()
        self.start_row = start_row
        self.start_col = start_col
        self.out_prefix = out_prefix
        self.log = log
        self.move = move
        self.gisrc_src = os.environ['GISRC']
        self.n_mset, self.gisrc_dst = None, None
        if self.move:
            self.n_mset = copy_mapset(self.mset, self.move)
            self.gisrc_dst = write_gisrc(self.n_mset.gisdbase,
                                         self.n_mset.location,
                                         self.n_mset.name)
            rasters = [r for r in select(self.module.inputs, 'raster')]
            if rasters:
                copy_rasters(rasters,
                             self.gisrc_src,
                             self.gisrc_dst,
                             region=self.region)
            vectors = [v for v in select(self.module.inputs, 'vector')]
            if vectors:
                copy_vectors(vectors, self.gisrc_src, self.gisrc_dst)
            groups = [g for g in select(self.module.inputs, 'group')]
            if groups:
                copy_groups(groups,
                            self.gisrc_src,
                            self.gisrc_dst,
                            region=self.region)
        self.bboxes = split_region_tiles(region=region,
                                         width=width,
                                         height=height,
                                         overlap=overlap)
        if mapset_prefix:
            self.msetstr = mapset_prefix + "_%03d_%03d"
        else:
            self.msetstr = cmd.replace('.', '') + "_%03d_%03d"
        self.inlist = None
        if split:
            self.split()
        self.debug = debug

    def __del__(self):
        if self.gisrc_dst:
            # remove GISRC file
            os.remove(self.gisrc_dst)

    def clean_location(self, location=None):
        """Remove all created mapsets.

        :param location: a Location instance where we are running the analysis
        :type location: Location object
        """
        if location is None:
            if self.n_mset:
                self.n_mset.current()
            location = Location()

        mapsets = location.mapsets(self.msetstr.split('_')[0] + '_*')
        for mset in mapsets:
            Mapset(mset).delete()
        if self.n_mset and self.n_mset.is_current():
            self.mset.current()

    def split(self):
        """Split all the raster inputs using r.tile"""
        rtile = Module('r.tile')
        inlist = {}
        for inm in select(self.module.inputs, 'raster'):
            rtile(input=inm.value,
                  output=inm.value,
                  width=self.width,
                  height=self.height,
                  overlap=self.overlap)
            patt = '%s-*' % inm.value
            inlist[inm.value] = sorted(
                self.mset.glist(type='raster', pattern=patt))
        self.inlist = inlist

    def get_works(self):
        """Return a list of tuble with the parameters for cmd_exe function"""
        works = []
        reg = Region()
        if self.move:
            mdst, ldst, gdst = read_gisrc(self.gisrc_dst)
        else:
            ldst, gdst = self.mset.location, self.mset.gisdbase
        cmd = self.module.get_dict()
        groups = [g for g in select(self.module.inputs, 'group')]
        for row, box_row in enumerate(self.bboxes):
            for col, box in enumerate(box_row):
                inms = None
                if self.inlist:
                    inms = {}
                    cols = len(box_row)
                    for key in self.inlist:
                        indx = row * cols + col
                        inms[key] = "%s@%s" % (self.inlist[key][indx],
                                               self.mset.name)
                # set the computational region, prepare the region parameters
                bbox = dict([(k[0], str(v)) for k, v in box.items()[:-2]])
                bbox['nsres'] = '%f' % reg.nsres
                bbox['ewres'] = '%f' % reg.ewres
                new_mset = self.msetstr % (self.start_row + row,
                                           self.start_col + col),
                works.append((bbox, inms, self.gisrc_src,
                              write_gisrc(gdst, ldst, new_mset), cmd, groups))
        return works

    def define_mapset_inputs(self):
        """Add the mapset information to the input maps
        """
        for inmap in self.module.inputs:
            inm = self.module.inputs[inmap]
            if inm.type in ('raster', 'vector') and inm.value:
                if '@' not in inm.value:
                    mset = get_mapset_raster(inm.value)
                    inm.value = inm.value + '@%s' % mset

    def run(self, patch=True, clean=True):
        """Run the GRASS command

        :param patch: set False if you does not want to patch the results
        :type patch: bool
        :param clean: set False if you does not want to remove all the stuff
                      created by GridModule
        :type clean: bool
        """
        self.module.flags.overwrite = True
        self.define_mapset_inputs()
        if self.debug:
            for wrk in self.get_works():
                cmd_exe(wrk)
        else:
            pool = mltp.Pool(processes=self.processes)
            result = pool.map_async(cmd_exe, self.get_works())
            result.wait()
            pool.close()
            pool.join()
            if not result.successful():
                raise RuntimeError(
                    _("Execution of subprocesses was not successful"))

        if patch:
            if self.move:
                os.environ['GISRC'] = self.gisrc_dst
                self.n_mset.current()
                self.patch()
                os.environ['GISRC'] = self.gisrc_src
                self.mset.current()
                # copy the outputs from dst => src
                routputs = [
                    self.out_prefix + o
                    for o in select(self.module.outputs, 'raster')
                ]
                copy_rasters(routputs, self.gisrc_dst, self.gisrc_src)
            else:
                self.patch()

        if self.log:
            # record in the temp directory
            from grass.lib.gis import G_tempfile
            tmp, dummy = os.path.split(G_tempfile())
            tmpdir = os.path.join(tmp, self.module.name)
            for k in self.module.outputs:
                par = self.module.outputs[k]
                if par.typedesc == 'raster' and par.value:
                    dirpath = os.path.join(tmpdir, par.name)
                    if not os.path.isdir(dirpath):
                        os.makedirs(dirpath)
                    fil = open(
                        os.path.join(dirpath, self.out_prefix + par.value),
                        'w+')
                    fil.close()

        if clean:
            self.clean_location()
            self.rm_tiles()
            if self.n_mset:
                gisdbase, location = os.path.split(self.move)
                self.clean_location(Location(location, gisdbase))
                # rm temporary gis_rc
                os.remove(self.gisrc_dst)
                self.gisrc_dst = None
                sht.rmtree(os.path.join(self.move, 'PERMANENT'))
                sht.rmtree(os.path.join(self.move, self.mset.name))

    def patch(self):
        """Patch the final results."""
        bboxes = split_region_tiles(width=self.width, height=self.height)
        loc = Location()
        mset = loc[self.mset.name]
        mset.visible.extend(loc.mapsets())
        for otmap in self.module.outputs:
            otm = self.module.outputs[otmap]
            if otm.typedesc == 'raster' and otm.value:
                rpatch_map(otm.value, self.mset.name, self.msetstr, bboxes,
                           self.module.flags.overwrite, self.start_row,
                           self.start_col, self.out_prefix)

    def rm_tiles(self):
        """Remove all the tiles."""
        # if split, remove tiles
        if self.inlist:
            grm = Module('g.remove')
            for key in self.inlist:
                grm(flags='f', type='raster', name=self.inlist[key])
Esempio n. 11
0
    def updateGrassMd(self, md):
        '''
        Update some parameters in r/v.support. This part need revision #TODO
        '''
        if self.type == "vector":

            if len(md.contact) > 0:
                _org = ''
                for co in md.contact:
                    if co.organization != '':
                        _org += co.organization + ', '
                    if co.email != '':
                        _org += co.email + ', '
                    if co.role != '':
                        _org += co.role + '; '

                Module('v.support', map=self.map, organization=_org, flags='r')

            if md.identification.date is not None:
                if len(md.identification.date) > 0:
                    for d in md.identification.date:
                        if d.type == 'creation':
                            _date = d.date

                    Module('v.support', map=self.map, date=_date, flags='r')

            if md.identification.contact is not None:
                if len(md.identification.contact) > 0:
                    _person = md.identification.contact.pop()
                    if _person is str:
                        Module('v.support',
                               map=self.map,
                               person=_person,
                               flags='r')

            if md.identification.title is not (None or ''):
                _name = md.identification.title
                Module('v.support', map=self.map, map_name=_name, flags='r')

            if len(md.identification.denominators) > 0:
                _scale = md.identification.denominators.pop()
                try:
                    _scale = int(_scale)
                    Module('v.support', map=self.map, scale=_scale, flags='r')
                except:
                    pass

            if md.identification.keywords is not None or len(
                    md.identification.keywords) > 0:
                _comments = ''
                for k in md.identification.keywords:
                    for kw in k["keywords"]:
                        if kw != '':
                            _comments += kw + ', '
                    if k["thesaurus"]["title"] != '':
                        _comments += k["thesaurus"]["title"] + ', '
                    if k["thesaurus"]["date"] != '':
                        _comments += k["thesaurus"]["date"] + ', '
                    if k['thesaurus']['datetype'] != '':
                        _comments += k['thesaurus']['datetype'] + ';'

                Module('v.support', map=self.map, comment=_comments, flags='r')

#------------------------------------------------------------------------ RASTER
        if self.type == "raster":

            if md.identification.title is not (None or ''):
                _title = md.identification.title
                Module('r.support', map=self.map, title=_title, overwrite=True)

            if md.dataquality.lineage is not (None or ''):
                _history = md.dataquality.lineage
                Module(
                    'r.support',
                    map=self.map,  # append
                    history=_history)

            _units = ''
            if len(md.identification.distance) > 0:
                _units += md.identification.distance.pop()
            if len(md.identification.uom) > 0:
                _units += md.identification.uom.pop()
            if _units != '':
                Module('r.support', map=self.map, units=_units, overwrite=True)

            if md.identification.keywords is not None or len(
                    md.identification.keywords) > 0:
                _comments = self.md_grass['description']
                for k in md.identification.keywords:
                    for kw in k["keywords"]:
                        if kw != '':
                            _comments += kw + ', '
                    if k["thesaurus"]["title"] != '':
                        _comments += k["thesaurus"]["title"] + ', '
                    if k["thesaurus"]["date"] != '':
                        _comments += k["thesaurus"]["date"] + ', '
                    if k['thesaurus']['datetype'] != '':
                        _comments += k['thesaurus']['datetype'] + ';'

                Module('r.support',
                       map=self.map,
                       description=_comments,
                       overwrite=True)
Esempio n. 12
0
def cleanup():
    try:
        Module('g.remove', flags='f', type='raster', name=output)
    except CalledModuleError:
        pass
Esempio n. 13
0
def main(opt, flg):
    # import functions which depend on sklearn only after parser run
    from ml_functions import (balance, explorer_clsfiers, run_classifier,
                              optimize_training, explore_SVC, plot_grid)
    from features import importances, tocsv

    msgr = get_msgr()
    indexes = None
    vect = opt['vector']
    vtraining = opt['vtraining'] if opt['vtraining'] else None
    scaler, decmp = None, None
    vlayer = opt['vlayer'] if opt['vlayer'] else vect + '_stats'
    tlayer = opt['tlayer'] if opt['tlayer'] else vect + '_training'
    rlayer = opt['rlayer'] if opt['rlayer'] else vect + '_results'

    labels = extract_classes(vtraining, 1)
    pprint(labels)

    if opt['scalar']:
        scapar = opt['scalar'].split(',')
        from sklearn.preprocessing import StandardScaler
        scaler = StandardScaler(with_mean='with_mean' in scapar,
                                with_std='with_std' in scapar)

    if opt['decomposition']:
        dec, params = (opt['decomposition'].split('|') if '|'
                       in opt['decomposition'] else (opt['decomposition'], ''))
        kwargs = ({k: v
                   for k, v in (p.split('=')
                                for p in params.split(','))} if params else {})
        load_decompositions()
        decmp = DECMP[dec](**kwargs)

    # if training extract training
    if vtraining and flg['e']:
        msgr.message("Extract training from: <%s> to <%s>." %
                     (vtraining, vect))
        extract_training(vect, vtraining, tlayer)
        flg['n'] = True

    if flg['n']:
        msgr.message("Save arrays to npy files.")
        save2npy(vect,
                 vlayer,
                 tlayer,
                 fcats=opt['npy_cats'],
                 fcols=opt['npy_cols'],
                 fdata=opt['npy_data'],
                 findx=opt['npy_index'],
                 fclss=opt['npy_tclasses'],
                 ftdata=opt['npy_tdata'])

    # define the classifiers to use/test
    if opt['pyclassifiers'] and opt['pyvar']:
        # import classifiers to use
        mycls = SourceFileLoader("mycls", opt['pyclassifiers']).load_module()
        classifiers = getattr(mycls, opt['pyvar'])
    else:
        from ml_classifiers import CLASSIFIERS
        classifiers = CLASSIFIERS

    # Append the SVC classifier
    if opt['svc_c'] and opt['svc_gamma']:
        from sklearn.svm import SVC
        svc = {
            'name': 'SVC',
            'classifier': SVC,
            'kwargs': {
                'C': float(opt['svc_c']),
                'gamma': float(opt['svc_gamma']),
                'kernel': opt['svc_kernel']
            }
        }
        classifiers.append(svc)

    # extract classifiers from pyindx
    if opt['pyindx']:
        indexes = [i for i in get_indexes(opt['pyindx'])]
        classifiers = [classifiers[i] for i in indexes]

    num = int(opt['n_training']) if opt['n_training'] else None

    # load fron npy files
    Xt = np.load(opt['npy_tdata'])
    Yt = np.load(opt['npy_tclasses'])
    cols = np.load(opt['npy_cols'])

    # Define rules to substitute NaN, Inf, posInf, negInf values
    rules = {}
    for key in ('nan', 'inf', 'neginf', 'posinf'):
        if opt[key]:
            rules[key] = get_rules(opt[key])
    pprint(rules)

    # Substitute (skip cat column)
    Xt, rules_vals = substitute(Xt, rules, cols[1:])
    Xtoriginal = Xt

    # scale the data
    if scaler:
        msgr.message("Scaling the training data set.")
        scaler.fit(Xt, Yt)
        Xt = scaler.transform(Xt)

    # decompose data
    if decmp:
        msgr.message("Decomposing the training data set.")
        decmp.fit(Xt)
        Xt = decmp.transform(Xt)

    # Feature importances with forests of trees
    if flg['f']:
        np.save('training_transformed.npy', Xt)
        importances(
            Xt,
            Yt,
            cols[1:],
            csv=opt['imp_csv'],
            img=opt['imp_fig'],
            # default parameters to save the matplotlib figure
            **dict(dpi=300, transparent=False, bbox_inches='tight'))

    # optimize the training set
    if flg['o']:
        ind_optimize = (int(opt['pyindx_optimize'])
                        if opt['pyindx_optimize'] else 0)
        cls = classifiers[ind_optimize]
        msgr.message("Find the optimum training set.")
        best, Xbt, Ybt = optimize_training(
            cls,
            Xt,
            Yt,
            labels,  # {v: k for k, v in labels.items()},
            scaler,
            decmp,
            num=num,
            maxiterations=1000)
        msg = "    - save the optimum training data set to: %s."
        msgr.message(msg % opt['npy_btdata'])
        np.save(opt['npy_btdata'], Xbt)
        msg = "    - save the optimum training classes set to: %s."
        msgr.message(msg % opt['npy_btclasses'])
        np.save(opt['npy_btclasses'], Ybt)

    # balance the data
    if flg['b']:
        msg = "Balancing the training data set, each class have <%d> samples."
        msgr.message(msg % num)
        Xbt, Ybt = balance(Xt, Yt, num)
    else:
        if not flg['o']:
            Xbt = (np.load(opt['npy_btdata'])
                   if os.path.isfile(opt['npy_btdata']) else Xt)
            Ybt = (np.load(opt['npy_btclasses'])
                   if os.path.isfile(opt['npy_btclasses']) else Yt)

    # scale the data
    if scaler:
        msgr.message("Scaling the training data set.")
        scaler.fit(Xbt, Ybt)
        Xt = scaler.transform(Xt)
        Xbt = scaler.transform(Xbt)

    if flg['d']:
        C_range = [float(c) for c in opt['svc_c_range'].split(',') if c]
        gamma_range = [
            float(g) for g in opt['svc_gamma_range'].split(',') if g
        ]
        kernel_range = [
            str(s) for s in opt['svc_kernel_range'].split(',') if s
        ]
        poly_range = [int(i) for i in opt['svc_poly_range'].split(',') if i]
        allkwargs = dict(C=C_range,
                         gamma=gamma_range,
                         kernel=kernel_range,
                         degree=poly_range)
        kwargs = {}
        for k in allkwargs:
            if allkwargs[k]:
                kwargs[k] = allkwargs[k]
        msgr.message("Exploring the SVC domain.")
        grid = explore_SVC(Xbt,
                           Ybt,
                           n_folds=5,
                           n_jobs=int(opt['svc_n_jobs']),
                           **kwargs)
        import pickle
        krnlstr = '_'.join(s for s in opt['svc_kernel_range'].split(',') if s)
        pkl = open('grid%s.pkl' % krnlstr, 'w')
        pickle.dump(grid, pkl)
        pkl.close()
        #        pkl = open('grid.pkl', 'r')
        #        grid = pickle.load(pkl)
        #        pkl.close()
        plot_grid(grid, save=opt['svc_img'])

    # test the accuracy of different classifiers
    if flg['t']:
        # test different classifiers
        msgr.message("Exploring different classifiers.")
        msgr.message("cls_id   cls_name          mean     max     min     std")

        res = explorer_clsfiers(classifiers,
                                Xt,
                                Yt,
                                labels=labels,
                                indexes=indexes,
                                n_folds=5,
                                bv=flg['v'],
                                extra=flg['x'])
        # TODO: sort(order=...) is working only in the terminal, why?
        #res.sort(order='mean')
        with open(opt['csv_test_cls'], 'w') as csv:
            csv.write(tocsv(res))

    if flg['c']:
        # classify
        data = np.load(opt['npy_data'])
        indx = np.load(opt['npy_index'])

        # Substitute using column values
        data, dummy = substitute(data, rules, cols[1:])
        Xt = data[indx]

        if scaler:
            msgr.message("Scaling the training data set.")
            scaler.fit(Xt, Yt)
            Xt = scaler.transform(Xt)
            msgr.message("Scaling the whole data set.")
            data = scaler.transform(data)
        if decmp:
            msgr.message("Decomposing the training data set.")
            decmp.fit(Xt)
            Xt = decmp.transform(Xt)
            msgr.message("Decompose the whole data set.")
            data = decmp.transform(data)
        cats = np.load(opt['npy_cats'])

        np.save('data_filled_scaled.npy', data)
        tcols = []
        for cls in classifiers:
            report = (open(opt['report_class'], "w")
                      if opt['report_class'] else sys.stdout)
            run_classifier(cls, Xt, Yt, Xt, Yt, labels, data, report=report)
            tcols.append((cls['name'], 'INTEGER'))

        import pickle
        with open('classification_results.pkl', 'w') as res:
            pickle.dump(classifiers, res)
        #classifiers = pickle.load(res)
        msgr.message("Export the results to layer: <%s>" % str(rlayer))
        export_results(vect,
                       classifiers,
                       cats,
                       rlayer,
                       vtraining,
                       tcols,
                       overwrite(),
                       pkl='res.pkl',
                       append=flg['a'])


#        res.close()

    if flg['r']:
        rules = ('\n'.join([
            '%d %s' % (k, v) for k, v in get_colors(vtraining).items()
        ]) if vtraining else None)

        msgr.message("Export the layer with results to raster")
        with Vector(vect, mode='r') as vct:
            tab = vct.dblinks.by_name(rlayer).table()
            rasters = [c for c in tab.columns]
            rasters.remove(tab.key)

        v2rst = Module('v.to.rast')
        rclrs = Module('r.colors')
        for rst in rasters:
            v2rst(input=vect,
                  layer=rlayer,
                  type='area',
                  use='attr',
                  attrcolumn=rst.encode(),
                  output=(opt['rst_names'] % rst).encode(),
                  memory=1000,
                  overwrite=overwrite())
            if rules:
                rclrs(map=rst.encode(), rules='-', stdin_=rules)
Esempio n. 14
0
            >>> provider.stop()

            ..
        """
        self.check_server()
        self.client_conn.send([
            RPCDefs.GET_VECTOR_FEATURES_AS_WKB, name, mapset, extent,
            feature_type, field
        ])
        return self.safe_receive("get_vector_features_as_wkb_list")


if __name__ == "__main__":
    import doctest
    from grass.pygrass import utils
    from grass.pygrass.modules import Module
    Module("g.region", n=40, s=0, e=40, w=0, res=10)
    Module("r.mapcalc",
           expression="%s = row() + (10 * col())" % (test_raster_name),
           overwrite=True)
    utils.create_test_vector_map(test_vector_name)

    doctest.testmod()
    """Remove the generated maps, if exist"""
    mset = utils.get_mapset_raster(test_raster_name, mapset='')
    if mset:
        Module("g.remove", flags='f', type='raster', name=test_raster_name)
    mset = utils.get_mapset_vector(test_vector_name, mapset='')
    if mset:
        Module("g.remove", flags='f', type='vector', name=test_vector_name)
def compute(b4, b8, cl, output, idx):

    modules = []
    if cl:
        region_mask = "region_mask" + idx
        modules.append(
            Module("v.overlay",
                   overwrite=True,
                   ainput=options["region"],
                   binput=cl,
                   operator="not",
                   output=region_mask,
                   run_=False))
    else:
        region_mask = options["region"]

    modules.append(
        Module("g.region",
               overwrite=True,
               vector=region_mask,
               align=b4,
               run_=False))
    modules.append(
        Module("v.to.rast",
               overwrite=True,
               input=region_mask,
               output='mask' + idx,
               type='area',
               use='val',
               value='1',
               run_=False))
    modules.append(
        Module(
            "r.mapcalc",
            overwrite=True,
            expression=
            "ndvi{idx} = if(isnull({clouds}), null(), float({b8} - {b4}) / ({b8} + {b4}))"
            .format(idx=idx, clouds=cl, b8=b8, b4=b4),
            run_=False))

    recode_str = """-1:0.1:1
0.1:0.5:2
0.5:1:3"""

    modules.append(
        Module("r.recode",
               overwrite=True,
               input="ndvi" + idx,
               output="ndvi_class" + idx,
               rules="-",
               stdin_=recode_str,
               run_=False))

    colors_str = """1 grey
2 255 255 0
3 green"""
    modules.append(
        Module("r.colors",
               map="ndvi_class" + idx,
               rules="-",
               stdin_=colors_str,
               run_=False))

    modules.append(
        Module("r.to.vect",
               flags='sv',
               overwrite=True,
               input="ndvi_class" + idx,
               output="ndvi_class" + idx,
               type="area",
               run_=False))

    modules.append(
        Module("v.clean",
               overwrite=True,
               input="ndvi_class" + idx,
               output=output,
               tool="rmarea",
               threshold=options['threshold'],
               run_=False))

    modules.append(
        Module('v.rast.stats',
               flags='c',
               map=output,
               raster='ndvi' + idx,
               column_prefix='ndvi',
               method=['minimum', 'maximum', 'average'],
               run_=False))

    queue.put(MultiModule(modules, sync=False, set_temp_region=True))
def main():
    # check if the map is in the current mapset
    mapset = grass.find_file(opt['map'], element='vector')['mapset']
    if not mapset or mapset != grass.gisenv()['MAPSET']:
        grass.fatal(
            _("Vector map <{}> not found in the current mapset").format(
                opt['map']))

    # get list of existing columns
    try:
        columns = grass.vector_columns(opt['map']).keys()
    except CalledModuleError as e:
        return 1

    allowed_rasters = ('N2', 'N5', 'N10', 'N20', 'N50', 'N100')

    # test input feature type
    vinfo = grass.vector_info_topo(opt['map'])
    if vinfo['areas'] < 1 and vinfo['points'] < 1:
        grass.fatal(
            _("No points or areas found in input vector map <{}>").format(
                opt['map']))

    # check area size limit
    check_area_size = float(opt['area_size']) > 0
    if check_area_size:
        area_col_name = 'area_{}'.format(os.getpid())
        Module('v.db.addcolumn',
               map=opt['map'],
               columns='{} double precision'.format(area_col_name))
        Module('v.to.db',
               map=opt['map'],
               option='area',
               units='kilometers',
               columns=area_col_name,
               quiet=True)
        areas = Module('v.db.select',
                       flags='c',
                       map=opt['map'],
                       columns=area_col_name,
                       where='{} > {}'.format(area_col_name, opt['area_size']),
                       stdout_=grass.PIPE)
        large_areas = len(areas.outputs.stdout.splitlines())
        if large_areas > 0:
            grass.warning(
                '{} areas larger than size limit will be skipped from computation'
                .format(large_areas))

    # extract multi values to points
    for rast in opt['return_period'].split(','):
        # check valid rasters
        name = grass.find_file(rast, element='cell')['name']
        if not name:
            grass.warning('Raster map <{}> not found. '
                          'Skipped.'.format(rast))
            continue
        if name not in allowed_rasters:
            grass.warning('Raster map <{}> skipped. '
                          'Allowed: {}'.format(rast, allowed_rasters))
            continue

        # perform zonal statistics
        grass.message('Processing <{}>...'.format(rast))
        table = '{}_table'.format(name)
        if vinfo['areas'] > 0:
            Module('v.rast.stats',
                   flags='c',
                   map=opt['map'],
                   raster=rast,
                   column_prefix=name,
                   method='average',
                   quiet=True)
            # handle NULL values (areas smaller than raster resolution)
            null_values = Module('v.db.select',
                                 map=opt['map'],
                                 columns='cat',
                                 flags='c',
                                 where="{}_average is NULL".format(name),
                                 stdout_=grass.PIPE)
            cats = null_values.outputs.stdout.splitlines()
            if len(cats) > 0:
                grass.warning(
                    _("Input vector map <{}> contains very small areas (smaller than "
                      "raster resolution). These areas will be proceeded by querying "
                      "single raster cell.").format(opt['map']))
                Module('v.what.rast',
                       map=opt['map'],
                       raster=rast,
                       type='centroid',
                       column='{}_average'.format(name),
                       where="{}_average is NULL".format(name),
                       quiet=True)
        else:  # -> points
            Module('v.what.rast',
                   map=opt['map'],
                   raster=rast,
                   column='{}_average'.format(name),
                   quiet=True)

        # add column to the attribute table if not exists
        rl = float(opt['rainlength'])
        field_name = 'H_{}T{}'.format(name, opt['rainlength'])
        if field_name not in columns:
            Module('v.db.addcolumn',
                   map=opt['map'],
                   columns='{} double precision'.format(field_name))

        # determine coefficient for calculation
        a, c = coeff(rast, rl)
        if a is None or c is None:
            grass.fatal("Unable to calculate coefficients")

        # calculate output values, update attribute table
        coef = a * rl**(1 - c)
        expression = '{}_average * {}'.format(name, coef)
        Module('v.db.update',
               map=opt['map'],
               column=field_name,
               query_column=expression)

        if check_area_size:
            Module('v.db.update',
                   map=opt['map'],
                   column=field_name,
                   value='-1',
                   where='{} > {}'.format(area_col_name, opt['area_size']))

        # remove unused column
        Module('v.db.dropcolumn',
               map=opt['map'],
               columns='{}_average'.format(name))

    if check_area_size:
        # remove unused column
        Module('v.db.dropcolumn', map=opt['map'], columns=area_col_name)

    return 0
Esempio n. 17
0
def get_zones(vector, zones, layer=1, overwrite=False):
    v2rast = Module('v.to.rast', input=vector, layer=str(layer), type='area',
                    output=zones, overwrite=overwrite, use='cat')
    rclr = Module("r.colors", map=zones, color="random")
Esempio n. 18
0
    def updateGrassMd(self, md):
        """
        Update some parameters in r/v.support. This part need revision #TODO
        """
        if self.type == "vector":

            if len(md.contact) > 0:
                _org = ""
                for co in md.contact:
                    if co.organization != "":
                        _org += co.organization + ", "
                    if co.email != "":
                        _org += co.email + ", "
                    if co.role != "":
                        _org += co.role + "; "

                Module("v.support", map=self.map, organization=_org, flags="r")

            if md.identification.date is not None:
                if len(md.identification.date) > 0:
                    for d in md.identification.date:
                        if d.type == "creation":
                            _date = d.date

                    Module("v.support", map=self.map, date=_date, flags="r")

            if md.identification.contact is not None:
                if len(md.identification.contact) > 0:
                    _person = md.identification.contact.pop()
                    if _person is str:
                        Module("v.support", map=self.map, person=_person, flags="r")

            if md.identification.title is not (None or ""):
                _name = md.identification.title
                Module("v.support", map=self.map, map_name=_name, flags="r")

            if len(md.identification.denominators) > 0:
                _scale = md.identification.denominators.pop()
                try:
                    _scale = int(_scale)
                    Module("v.support", map=self.map, scale=_scale, flags="r")
                except:
                    pass

            if (
                md.identification.keywords is not None
                or len(md.identification.keywords) > 0
            ):
                _comments = ""
                for k in md.identification.keywords:
                    for kw in k["keywords"]:
                        if kw != "":
                            _comments += kw + ", "
                    if k["thesaurus"]["title"] != "":
                        _comments += k["thesaurus"]["title"] + ", "
                    if k["thesaurus"]["date"] != "":
                        _comments += k["thesaurus"]["date"] + ", "
                    if k["thesaurus"]["datetype"] != "":
                        _comments += k["thesaurus"]["datetype"] + ";"

                Module("v.support", map=self.map, comment=_comments, flags="r")

        # ------------------------------------------------------------------------ RASTER
        if self.type == "raster":

            if md.identification.title is not (None or ""):
                _title = md.identification.title
                Module("r.support", map=self.map, title=_title, overwrite=True)

            if md.dataquality.lineage is not (None or ""):
                _history = md.dataquality.lineage
                Module("r.support", map=self.map, history=_history)  # append

            _units = ""
            if len(md.identification.distance) > 0:
                _units += md.identification.distance.pop()
            if len(md.identification.uom) > 0:
                _units += md.identification.uom.pop()
            if _units != "":
                Module("r.support", map=self.map, units=_units, overwrite=True)

            if (
                md.identification.keywords is not None
                or len(md.identification.keywords) > 0
            ):
                _comments = self.md_grass["description"]
                for k in md.identification.keywords:
                    for kw in k["keywords"]:
                        if kw != "":
                            _comments += kw + ", "
                    if k["thesaurus"]["title"] != "":
                        _comments += k["thesaurus"]["title"] + ", "
                    if k["thesaurus"]["date"] != "":
                        _comments += k["thesaurus"]["date"] + ", "
                    if k["thesaurus"]["datetype"] != "":
                        _comments += k["thesaurus"]["datetype"] + ";"

                Module("r.support", map=self.map, description=_comments, overwrite=True)
Esempio n. 19
0
def shp_to_raster(shp,
                  inSource,
                  cellsize,
                  nodata,
                  outRaster,
                  epsg=None,
                  rst_template=None,
                  api='gdal',
                  snap=None):
    """
    Feature Class to Raster
    
    cellsize will be ignored if rst_template is defined
    
    * API's Available:
    - gdal;
    - arcpy;
    - pygrass;
    - grass;
    """

    if api == 'gdal':
        from osgeo import gdal, ogr
        from gasp.prop.ff import drv_name

        if not epsg:
            from gasp.prop.feat import get_shp_sref
            srs = get_shp_sref(shp).ExportToWkt()
        else:
            from gasp.prop.prj import epsg_to_wkt
            srs = epsg_to_wkt(epsg)

        # Get Extent
        dtShp = ogr.GetDriverByName(drv_name(shp)).Open(shp, 0)

        lyr = dtShp.GetLayer()

        if not rst_template:
            x_min, x_max, y_min, y_max = lyr.GetExtent()
            x_res = int((x_max - x_min) / cellsize)
            y_res = int((y_max - y_min) / cellsize)

        else:
            from gasp.fm.rst import rst_to_array

            img_temp = gdal.Open(rst_template)
            geo_transform = img_temp.GetGeoTransform()

            y_res, x_res = rst_to_array(rst_template).shape

        # Create output
        dtRst = gdal.GetDriverByName(drv_name(outRaster)).Create(
            outRaster, x_res, y_res, gdal.GDT_Byte)

        if not rst_template:
            dtRst.SetGeoTransform((x_min, cellsize, 0, y_max, 0, -cellsize))

        else:
            dtRst.SetGeoTransform(geo_transform)
        dtRst.SetProjection(srs)

        bnd = dtRst.GetRasterBand(1)
        bnd.SetNoDataValue(nodata)

        gdal.RasterizeLayer(dtRst, [1], lyr, burn_values=[1])

        del lyr
        dtShp.Destroy()

    elif api == 'arcpy':
        import arcpy

        if rst_template:
            tempEnvironment0 = arcpy.env.extent
            arcpy.env.extent = template

        if snap:
            tempSnap = arcpy.env.snapRaster
            arcpy.env.snapRaster = snap

        obj_describe = arcpy.Describe(shp)
        geom = obj_describe.ShapeType

        if geom == u'Polygon':
            arcpy.PolygonToRaster_conversion(shp, inField, outRaster,
                                             "CELL_CENTER", "NONE", cellsize)

        elif geom == u'Polyline':
            arcpy.PolylineToRaster_conversion(shp, inField, outRaster,
                                              "MAXIMUM_LENGTH", "NONE",
                                              cellsize)

        if rst_template:
            arcpy.env.extent = tempEnvironment0

        if snap:
            arcpy.env.snapRaster = tempSnap

    elif api == 'grass' or api == 'pygrass':
        """
        Vectorial geometry to raster
    
        If source is None, the convertion will be based on the cat field.
    
        If source is a string, the convertion will be based on the field
        with a name equal to the given string.
    
        If source is a numeric value, all cells of the output raster will have
        that same value.
        """

        __USE = "cat" if not inSource else "attr" if type(inSource) == str or \
            type(inSource) == unicode else "val" if type(inSource) == int or \
            type(inSource) == float else None

        if not __USE:
            raise ValueError('\'source\' parameter value is not valid')

        if api == 'pygrass':
            from grass.pygrass.modules import Module

            m = Module("v.to.rast",
                       input=shp,
                       output=outRaster,
                       use=__USE,
                       attribute_column=inSource if __USE == "attr" else None,
                       value=inSource if __USE == "val" else None,
                       overwrite=True,
                       run_=False,
                       quiet=True)

            m()

        else:
            from gasp import exec_cmd

            rcmd = exec_cmd((
                "v.to.rast input={} output={} use={}{} "
                "--overwrite --quiet"
            ).format(
                shp, outRaster, __USE,
                "" if __USE == "cat" else " attribute_column={}".format(inSource) \
                    if __USE == "attr" else " val={}".format(inSource)
            ))

    else:
        raise ValueError('API {} is not available'.format(api))

    return outRaster
Esempio n. 20
0
    def saveXML(self,
                path=None,
                xml_out_name=None,
                wxparent=None,
                overwrite=False):
        ''' Save init. record  of OWSLib objects to ISO XML file'''

        # if  output file name is None, use map name and add suffix
        if xml_out_name is None:
            xml_out_name = self.type + '_' + str(
                self.map).partition('@')[0]  # + self.schema_type
        if not xml_out_name.lower().endswith('.xml'):
            xml_out_name += '.xml'

        if not path:
            path = os.path.join(mdutil.pathToMapset(), 'metadata')
            if not os.path.exists(path):
                print(os.makedirs(path))
        path = os.path.join(path, xml_out_name)

        # generate xml using jinja profiles
        env = Environment(loader=FileSystemLoader(self.dirpath))
        env.globals.update(zip=zip)
        profile = env.get_template(self.profilePath)
        iso_xml = profile.render(md=self.md)

        # write xml to flat file
        if wxparent is not None:
            if os.path.isfile(path):
                if mdutil.yesNo(
                        wxparent,
                        'Metadata file exists. Do you want to overwrite metadata file: %s?'
                        % path, 'Overwrite dialog'):
                    try:
                        xml_file = open(path, "w")
                        xml_file.write(iso_xml)
                        xml_file.close()
                        Module('g.message',
                               message='metadata exported: \n\
                                                     %s' % (str(path)))
                    except IOError as e:
                        print("I/O error({0}): {1}".format(
                            e.errno, e.strerror))
                        grass.fatal('ERROR: cannot write xml to file')
                return path
            else:
                try:
                    xml_file = open(path, "w")
                    xml_file.write(iso_xml)
                    xml_file.close()
                    Module('g.message',
                           message='metadata exported: \n\
                                                     %s' % (str(path)))
                except IOError as e:
                    print("I/O error({0}): {1}".format(e.errno, e.strerror))
                    grass.fatal('ERROR: cannot write xml to file')
                    # sys.exit()
                return path
        else:
            if os.path.isfile(path):
                Module('g.message', message='Metadata file exists: %s' % path)
                if overwrite:
                    try:
                        xml_file = open(path, "w")
                        xml_file.write(iso_xml)
                        xml_file.close()
                        Module('g.message',
                               message='Metadata file has been overwritten')
                        return path
                    except IOError as e:
                        print("I/O error({0}): {1}".format(
                            e.errno, e.strerror))
                        grass.fatal('error: cannot write xml to file')
                else:
                    Module('g.message',
                           message='For overwriting use flag -overwrite')
                    return False
            else:
                try:
                    xml_file = open(path, "w")
                    xml_file.write(iso_xml)
                    xml_file.close()
                    Module('g.message',
                           message='Metadata file has been exported')
                    return path

                except IOError as e:
                    print("I/O error({0}): {1}".format(e.errno, e.strerror))
                    grass.fatal('error: cannot write xml to file')
Esempio n. 21
0
def cleanup():
    """Remove temporary maps specified in the global list"""
    cleanrast = list(reversed(CLEAN_RAST))
    for rast in cleanrast:
        Module("g.remove", flags="f", type="all", name=rast, quiet=True)
Esempio n. 22
0
def main(options, flags):

    gisbase = os.getenv('GISBASE')
    if not gisbase:
        gs.fatal(_('$GISBASE not defined'))
        return 0

    # Variables
    ref = options['reference']
    REF = ref.split(',')
    pro = options['projection']
    if pro:
        PRO = pro.split(',')
    else:
        PRO = REF
    opn = [z.split('@')[0] for z in PRO]
    out = options['output']
    region = options['region']
    flag_d = flags['d']
    flag_e = flags['e']
    flag_p = flags['p']

    # get current region settings, to compare to new ones later
    regbu1 = tmpname("region")
    gs.parse_command("g.region", save=regbu1)

    # Check if region, projected layers or mask is given
    if region:
        ffile = gs.find_file(region, element="region")
        if not ffile:
            gs.fatal(_("the region {} does not exist").format(region))
    if not pro and not checkmask() and not region:
        gs.fatal(
            _("You need to provide projected layers, a region, or "
              "a mask has to be set"))
    if pro and len(REF) != len(PRO):
        gs.fatal(
            _("The number of reference and projection layers need to "
              "be the same. Your provided %d reference and %d"
              "projection variables") % (len(REF), len(PRO)))

    # Text for history in metadata
    opt2 = dict((k, v) for k, v in options.iteritems() if v)
    hist = ' '.join("{!s}={!r}".format(k, v) for (k, v) in opt2.iteritems())
    hist = "r.exdet {}".format(hist)
    unused, tmphist = tempfile.mkstemp()
    with open(tmphist, "w") as text_file:
        text_file.write(hist)

    # Create covariance table
    VI = CoVar(maps=REF)

    # Import reference data & compute univar stats per reference layer
    s = len(REF)
    dat_ref = stat_mean = stat_min = stat_max = None
    layer = garray.array()

    for i, map in enumerate(REF):
        layer.read(map, null=np.nan)
        r, c = layer.shape
        if dat_ref is None:
            dat_ref = np.empty((s, r, c), dtype=np.double)
        if stat_mean is None:
            stat_mean = np.empty((s), dtype=np.double)
        if stat_min is None:
            stat_min = np.empty((s), dtype=np.double)
        if stat_max is None:
            stat_max = np.empty((s), dtype=np.double)
        stat_min[i] = np.nanmin(layer)
        stat_mean[i] = np.nanmean(layer)
        stat_max[i] = np.nanmax(layer)
        dat_ref[i, :, :] = layer
    del layer

    # Compute mahalanobis over full set of reference layers
    mahal_ref = mahal(v=dat_ref, m=stat_mean, VI=VI)
    mahal_ref_max = max(mahal_ref[np.isfinite(mahal_ref)])
    if flag_e:
        mahalref = "{}_mahalref".format(out)
        mahal_ref.write(mapname=mahalref)
        gs.info(_("Mahalanobis distance map saved: {}").format(mahalref))
        gs.run_command("r.support",
                       map=mahalref,
                       title="Mahalanobis distance map",
                       units="unitless",
                       description="Mahalanobis distance map in reference "
                       "domain",
                       loadhistory=tmphist)
    del mahal_ref

    # Remove mask and set new region based on user-defined region or
    # otherwise based on projection layers
    if checkmask():
        gs.run_command("r.mask", flags="r")
    if region:
        gs.run_command("g.region", region=region)
        gs.info(_("The region has set to the region {}").format(region))
    if pro:
        gs.run_command("g.region", raster=PRO[0])
        # TODO: only set region to PRO[0] when different from current region
        gs.info(_("The region has set to match the proj raster layers"))

    # Import projected layers in numpy array
    s = len(PRO)
    dat_pro = None
    layer = garray.array()
    for i, map in enumerate(PRO):
        layer.read(map, null=np.nan)
        r, c = layer.shape
        if dat_pro is None:
            dat_pro = np.empty((s, r, c), dtype=np.double)
        dat_pro[i, :, :] = layer
    del layer

    # Compute mahalanobis distance
    mahal_pro = mahal(v=dat_pro, m=stat_mean, VI=VI)
    if flag_d:
        mahalpro = "{}_mahalpro".format(out)
        mahal_pro.write(mapname=mahalpro)
        gs.info(_("Mahalanobis distance map saved: {}").format(mahalpro))
        gs.run_command("r.support",
                       map=mahalpro,
                       title="Mahalanobis distance map projection domain",
                       units="unitless",
                       loadhistory=tmphist,
                       description="Mahalanobis distance map in projection "
                       "domain estimated using covariance of refence data")

    # Compute NT1
    tmplay = tmpname(out)
    mnames = [None] * len(REF)
    for i in xrange(len(REF)):
        tmpout = tmpname("exdet")
        # TODO: computations below sometimes result in very small negative
        # numbers, which are not 'real', but rather due to some differences
        # in handling digits in grass and python, hence second mapcalc
        # statement. Need to figure out how to handle this better.
        gs.mapcalc(
            "eval("
            "tmp = min(($prolay - $refmin), ($refmax - $prolay),0) / "
            "($refmax - $refmin))\n"
            "$Dij = if(tmp > -0.000000001, 0, tmp)",
            Dij=tmpout,
            prolay=PRO[i],
            refmin=stat_min[i],
            refmax=stat_max[i],
            quiet=True)
        mnames[i] = tmpout
    gs.run_command("r.series",
                   quiet=True,
                   input=mnames,
                   output=tmplay,
                   method="sum")

    # Compute most influential covariate (MIC) metric for NT1
    if flag_p:
        tmpla1 = tmpname(out)
        gs.run_command("r.series",
                       quiet=True,
                       output=tmpla1,
                       input=mnames,
                       method="min_raster")

    # Compute NT2
    tmpla2 = tmpname(out)
    nt2map = garray.array()
    nt2map[...] = mahal_pro / mahal_ref_max
    nt2map.write(mapname=tmpla2)

    # Compute  most influential covariate (MIC) metric for NT2
    if flag_p:
        tmpla3 = tmpname(out)
        laylist = []
        layer = garray.array()
        for i, map in enumerate(opn):
            gs.use_temp_region()
            gs.run_command("g.region", quiet=True, region=regbu1)
            REFtmp = [x for num, x in enumerate(REF) if not num == i]
            stattmp = np.delete(stat_mean, i, axis=0)
            VItmp = CoVar(maps=REFtmp)
            gs.del_temp_region()
            dat_protmp = np.delete(dat_pro, i, axis=0)
            ymap = mahal(v=dat_protmp, m=stattmp, VI=VItmp)
            # in Mesgaran et al, the MIC2 is the max icp, but that is the
            # same as the minimum mahalanobis distance (ymap)
            # icp = (mahal_pro - ymap) / mahal_pro * 100
            layer[:, :] = ymap
            tmpmahal = tmpname(out)
            layer.write(tmpmahal)
            laylist.append(tmpmahal)
        gs.run_command("r.series",
                       quiet=True,
                       output=tmpla3,
                       input=laylist,
                       method="min_raster",
                       overwrite=True)

    # Compute nt1, nt2, and nt1and2 novelty maps
    nt1 = "{}_NT1".format(out)
    nt2 = "{}_NT2".format(out)
    nt12 = "{}_NT1NT2".format(out)
    expr = ";".join([
        "$nt12 = if($tmplay < 0, $tmplay, $tmpla2)",
        "$nt2 = if($tmplay >= 0, $tmpla2, null())",
        "$nt1 = if($tmplay < 0, $tmplay, null())"
    ])
    gs.mapcalc(expr,
               nt12=nt12,
               nt1=nt1,
               nt2=nt2,
               tmplay=tmplay,
               tmpla2=tmpla2,
               quiet=True)

    # Write metadata nt1, nt2, nt1and2  maps
    gs.run_command("r.support",
                   map=nt1,
                   units="unitless",
                   title="Type 1 similarity",
                   description="Type 1 similarity (NT1)",
                   loadhistory=tmphist)
    gs.run_command("r.support",
                   map=nt2,
                   units="unitless",
                   title="Type 2 similarity",
                   description="Type 2 similarity (NT2)",
                   loadhistory=tmphist)
    gs.run_command("r.support",
                   map=nt12,
                   units="unitless",
                   title="Type 1 + 2 novelty / similarity",
                   description="Type 1 + 2 similarity (NT1)",
                   loadhistory=tmphist)

    # Compute MIC maps
    if flag_p:
        mic12 = "{}_MICNT1and2".format(out)
        expr = "$mic12 = if($tmplay < 0, $tmpla1, " \
               "if($tmpla2>1, $tmpla3, -1))"
        gs.mapcalc(expr,
                   tmplay=tmplay,
                   tmpla1=tmpla1,
                   tmpla2=tmpla2,
                   tmpla3=tmpla3,
                   mic12=mic12,
                   quiet=True)

        # Write category labels to MIC maps
        tmpcat = tempfile.mkstemp()
        with open(tmpcat[1], "w") as text_file:
            text_file.write("-1:None\n")
            for cats in xrange(len(opn)):
                text_file.write("{}:{}\n".format(cats, opn[cats]))
        gs.run_command("r.category",
                       quiet=True,
                       map=mic12,
                       rules=tmpcat[1],
                       separator=":")
        os.remove(tmpcat[1])
        CATV = Module('r.category', map=mic12, stdout_=PIPE).outputs.stdout
        Module('r.category', map=mic12, rules="-", stdin_=CATV, quiet=True)
        gs.run_command("r.support",
                       map=mic12,
                       units="unitless",
                       title="Most influential covariate",
                       description="Most influential covariate (MIC) for NT1"
                       "and NT2",
                       loadhistory=tmphist)

    # Write color table
    gs.write_command("r.colors",
                     map=nt12,
                     rules='-',
                     stdin=COLORS_EXDET,
                     quiet=True)

    # Finalize
    gs.info(_("Done...."))
Esempio n. 23
0
def main(options, flags):

    gisbase = os.getenv("GISBASE")
    if not gisbase:
        gs.fatal(_("$GISBASE not defined"))
        return 0

    # Reference / sample area or points
    ref_rast = options["ref_rast"]
    ref_vect = options["ref_vect"]
    if ref_rast:
        reftype = gs.raster_info(ref_rast)
        if reftype["datatype"] != "CELL":
            gs.fatal(_("The ref_rast map must have type CELL (integer)"))
        if (reftype["min"] != 0 and reftype["min"] != 1) or reftype["max"] != 1:
            gs.fatal(
                _(
                    "The ref_rast map must be a binary raster,"
                    " i.e. it should contain only values 0 and 1 or 1 only"
                    " (now the minimum is {} and maximum is {})".format(
                        reftype["min"], reftype["max"]
                    )
                )
            )

    # old environmental layers & variable names
    reference_layer = options["env"]
    reference_layer = reference_layer.split(",")
    raster_exists(reference_layer)
    variable_name = [z.split("@")[0] for z in reference_layer]
    variable_name = [x.lower() for x in variable_name]

    # new environmental variables
    projection_layers = options["env_proj"]
    if not projection_layers:
        to_be_projected = False
        projection_layers = reference_layer
    else:
        to_be_projected = True
        projection_layers = projection_layers.split(",")
        raster_exists(projection_layers)
        if (
            len(projection_layers) != len(reference_layer)
            and len(projection_layers) != 0
        ):
            gs.fatal(
                _(
                    "The number of reference and predictor variables"
                    " should be the same. You provided {} reference and {}"
                    " projection variables".format(
                        len(reference_layer), len(projection_layers)
                    )
                )
            )

    # output layers
    opl = options["output"]
    opc = opl + "_MES"
    ipi = [opl + "_" + i for i in variable_name]

    # flags
    flm = flags["m"]
    flk = flags["k"]
    fln = flags["n"]
    fli = flags["i"]
    flc = flags["c"]

    # digits / precision
    digits = int(options["digits"])
    digits2 = pow(10, digits)

    # get current region settings, to compare to new ones later
    region_1 = gs.parse_command("g.region", flags="g")

    # Text for history in metadata
    opt2 = dict((k, v) for k, v in options.items() if v)
    hist = " ".join("{!s}={!r}".format(k, v) for (k, v) in opt2.items())
    hist = "r.mess {}".format(hist)
    unused, tmphist = tempfile.mkstemp()
    with open(tmphist, "w") as text_file:
        text_file.write(hist)

    # Create reference layer if not defined
    if not ref_rast and not ref_vect:
        ref_rast = tmpname("tmp0")
        Module(
            "r.mapcalc",
            "{0} = if(isnull({1}),null(),1)".format(ref_rast, reference_layer[0]),
            quiet=True,
        )

    # Create the recode table - Reference distribution is raster
    citiam = gs.find_file(name="MASK", element="cell", mapset=gs.gisenv()["MAPSET"])
    if citiam["fullname"]:
        rname = tmpname("tmp3")
        Module("r.mapcalc", expression="{} = MASK".format(rname), quiet=True)

    if ref_rast:
        vtl = ref_rast

        # Create temporary layer based on reference layer
        tmpf0 = tmpname("tmp2")
        Module(
            "r.mapcalc", expression="{0} = int({1} * 1)".format(tmpf0, vtl), quiet=True
        )
        Module("r.null", map=tmpf0, setnull=0, quiet=True)
        if citiam["fullname"]:
            Module("r.mask", flags="r", quiet=True)
        for i in range(len(reference_layer)):

            # Create mask based on combined MASK/reference layer
            Module("r.mask", raster=tmpf0, quiet=True)

            # Calculate the frequency distribution
            tmpf1 = tmpname("tmp4")
            Module(
                "r.mapcalc",
                expression="{0} = int({1} * {2})".format(
                    tmpf1, digits2, reference_layer[i]
                ),
                quiet=True,
            )
            stats_out = Module(
                "r.stats",
                flags="cn",
                input=tmpf1,
                sort="asc",
                separator=";",
                stdout_=PIPE,
            ).outputs.stdout
            stval = {}
            stats_outlines = stats_out.replace("\r", "").split("\n")
            stats_outlines = [_f for _f in stats_outlines if _f]
            for z in stats_outlines:
                [val, count] = z.split(";")
                stval[float(val)] = float(count)
            sstval = sorted(stval.items(), key=operator.itemgetter(0))
            sstval = np.matrix(sstval)
            a = np.cumsum(np.array(sstval), axis=0)
            b = np.sum(np.array(sstval), axis=0)
            c = a[:, 1] / b[1] * 100

            # Remove tmp mask and set region to env_proj if needed
            Module("r.mask", quiet=True, flags="r")
            if to_be_projected:
                gs.use_temp_region()
                Module("g.region", quiet=True, raster=projection_layers[0])

            # get new region settings, to compare to original ones later
            region_2 = gs.parse_command("g.region", flags="g")

            # Get min and max values for recode table (based on full map)
            tmpf2 = tmpname("tmp5")
            Module(
                "r.mapcalc",
                expression="{0} = int({1} * {2})".format(
                    tmpf2, digits2, projection_layers[i]
                ),
                quiet=True,
            )
            d = gs.parse_command("r.univar", flags="g", map=tmpf2, quiet=True)

            # Create recode rules
            Dmin = int(d["min"])
            Dmax = int(d["max"])
            envmin = np.min(np.array(sstval), axis=0)[0]
            envmax = np.max(np.array(sstval), axis=0)[0]

            if Dmin < envmin:
                e1 = Dmin - 1
            else:
                e1 = envmin - 1
            if Dmax > envmax:
                e2 = Dmax + 1
            else:
                e2 = envmax + 1

            a1 = np.hstack([(e1), np.array(sstval.T[0])[0, :]])
            a2 = np.hstack([np.array(sstval.T[0])[0, :] - 1, (e2)])
            b1 = np.hstack([(0), c])

            fd2, tmprule = tempfile.mkstemp(suffix=variable_name[i])
            with open(tmprule, "w") as text_file:
                for k in np.arange(0, len(b1.T)):
                    text_file.write(
                        "%s:%s:%s\n" % (str(int(a1[k])), str(int(a2[k])), str(b1[k]))
                    )

            # Create the recode layer and calculate the IES
            compute_ies(tmprule, ipi[i], tmpf2, envmin, envmax)
            Module(
                "r.support",
                map=ipi[i],
                title="IES {}".format(reference_layer[i]),
                units="0-100 (relative score)",
                description="Environmental similarity {}".format(reference_layer[i]),
                loadhistory=tmphist,
            )

            # Clean up
            os.close(fd2)
            os.remove(tmprule)

            # Change region back to original
            gs.del_temp_region()

    # Create the recode table - Reference distribution is vector
    else:
        vtl = ref_vect

        # Copy point layer and add columns for variables
        tmpf0 = tmpname("tmp7")
        Module(
            "v.extract", quiet=True, flags="t", input=vtl, type="point", output=tmpf0
        )
        Module("v.db.addtable", quiet=True, map=tmpf0)

        # TODO: see if there is a more efficient way to handle the mask
        if citiam["fullname"]:
            Module("r.mask", quiet=True, flags="r")

        # Upload raster values and get value in python as frequency table
        sql1 = "SELECT cat FROM {}".format(str(tmpf0))
        cn = len(np.hstack(db.db_select(sql=sql1)))
        for m in range(len(reference_layer)):

            # Set mask back (this means that points outside the mask will
            # be ignored in the computation of the frequency distribution
            # of the reference variabele env(m))
            if citiam["fullname"]:
                Module("g.copy", raster=[rname, "MASK"], quiet=True)

            # Compute frequency distribution of variable(m)
            mid = str(m)
            laytype = gs.raster_info(reference_layer[m])["datatype"]
            if laytype == "CELL":
                columns = "envvar_{} integer".format(str(mid))
            else:
                columns = "envvar_{} double precision".format(str(mid))
            Module("v.db.addcolumn", map=tmpf0, columns=columns, quiet=True)
            sql2 = "UPDATE {} SET envvar_{} = NULL".format(str(tmpf0), str(mid))
            Module("db.execute", sql=sql2, quiet=True)
            coln = "envvar_{}".format(str(mid))
            Module(
                "v.what.rast",
                quiet=True,
                map=tmpf0,
                layer=1,
                raster=reference_layer[m],
                column=coln,
            )
            sql3 = (
                "SELECT {0}, count({0}) from {1} WHERE {0} IS NOT NULL "
                "GROUP BY {0} ORDER BY {0}"
            ).format(coln, tmpf0)
            volval = np.vstack(db.db_select(sql=sql3))
            volval = volval.astype(np.float, copy=False)
            a = np.cumsum(volval[:, 1], axis=0)
            b = np.sum(volval[:, 1], axis=0)
            c = a / b * 100

            # Check for point without values
            if b < cn:
                gs.info(
                    _(
                        "Please note that there were {} points without "
                        "value. This is probably because they are outside "
                        "the computational region or mask".format((cn - b))
                    )
                )

            # Set region to env_proj layers (if different from env) and remove
            # mask (if set above)
            if citiam["fullname"]:
                Module("r.mask", quiet=True, flags="r")
            if to_be_projected:
                gs.use_temp_region()
                Module("g.region", quiet=True, raster=projection_layers[0])
            region_2 = gs.parse_command("g.region", flags="g")

            # Multiply env_proj layer with dignum
            tmpf2 = tmpname("tmp8")
            Module(
                "r.mapcalc",
                expression="{0} = int({1} * {2})".format(
                    tmpf2, digits2, projection_layers[m]
                ),
                quiet=True,
            )

            # Calculate min and max values of sample points and raster layer
            envmin = int(min(volval[:, 0]) * digits2)
            envmax = int(max(volval[:, 0]) * digits2)
            Drange = gs.read_command("r.info", flags="r", map=tmpf2)
            Drange = str.splitlines(Drange)
            Drange = np.hstack([i.split("=") for i in Drange])
            Dmin = int(Drange[1])
            Dmax = int(Drange[3])

            if Dmin < envmin:
                e1 = Dmin - 1
            else:
                e1 = envmin - 1
            if Dmax > envmax:
                e2 = Dmax + 1
            else:
                e2 = envmax + 1

            a0 = volval[:, 0] * digits2
            a0 = a0.astype(np.int, copy=False)
            a1 = np.hstack([(e1), a0])
            a2 = np.hstack([a0 - 1, (e2)])
            b1 = np.hstack([(0), c])

            fd3, tmprule = tempfile.mkstemp(suffix=variable_name[m])
            with open(tmprule, "w") as text_file:
                for k in np.arange(0, len(b1)):
                    rtmp = "{}:{}:{}\n".format(
                        str(int(a1[k])), str(int(a2[k])), str(b1[k])
                    )
                    text_file.write(rtmp)

            # Create the recode layer and calculate the IES
            compute_ies(tmprule, ipi[m], tmpf2, envmin, envmax)
            Module(
                "r.support",
                map=ipi[m],
                title="IES {}".format(reference_layer[m]),
                units="0-100 (relative score)",
                description="Environmental similarity {}".format(reference_layer[m]),
                loadhistory=tmphist,
            )

            # Clean up
            os.close(fd3)
            os.remove(tmprule)

            # Change region back to original
            gs.del_temp_region()

    # Calculate MESS statistics
    # Set region to env_proj layers (if different from env)
    # Note: this changes the region, to ensure the newly created layers
    # are actually visible to the user. This goes against normal practise
    # There will be a warning.
    if to_be_projected:
        Module("g.region", quiet=True, raster=projection_layers[0])

    # MES
    Module("r.series", quiet=True, output=opc, input=ipi, method="minimum")
    gs.write_command("r.colors", map=opc, rules="-", stdin=COLORS_MES, quiet=True)

    # Write layer metadata
    Module(
        "r.support",
        map=opc,
        title="Areas with novel conditions",
        units="0-100 (relative score)",
        description="The multivariate environmental similarity" "(MES)",
        loadhistory=tmphist,
    )

    # Area with negative MES
    if fln:
        mod1 = "{}_novel".format(opl)
        Module("r.mapcalc", "{} = int(if( {} < 0, 1, 0))".format(mod1, opc), quiet=True)

        # Write category labels
        Module("r.category", map=mod1, rules="-", stdin=RECL_MESNEG, quiet=True)

        # Write layer metadata
        Module(
            "r.support",
            map=mod1,
            title="Areas with novel conditions",
            units="-",
            source1="Based on {}".format(opc),
            description="1 = novel conditions, 0 = within range",
            loadhistory=tmphist,
        )

    # Most dissimilar variable (MoD)
    if flm:
        tmpf4 = tmpname("tmp9")
        mod2 = "{}_MoD".format(opl)
        Module("r.series", quiet=True, output=tmpf4, input=ipi, method="min_raster")
        Module("r.mapcalc", "{} = int({})".format(mod2, tmpf4), quiet=True)

        fd4, tmpcat = tempfile.mkstemp()
        with open(tmpcat, "w") as text_file:
            for cats in range(len(ipi)):
                text_file.write("{}:{}\n".format(str(cats), reference_layer[cats]))
        Module("r.category", quiet=True, map=mod2, rules=tmpcat, separator=":")
        os.close(fd4)
        os.remove(tmpcat)

        # Write layer metadata
        Module(
            "r.support",
            map=mod2,
            title="Most dissimilar variable (MoD)",
            units="-",
            source1="Based on {}".format(opc),
            description="Name of most dissimilar variable",
            loadhistory=tmphist,
        )

    # sum(IES), where IES < 0
    if flk:
        mod3 = "{}_SumNeg".format(opl)
        c0 = -0.01 / digits2
        Module(
            "r.series",
            quiet=True,
            input=ipi,
            method="sum",
            range=("-inf", c0),
            output=mod3,
        )
        gs.write_command("r.colors", map=mod3, rules="-", stdin=COLORS_MES, quiet=True)

        # Write layer metadata
        Module(
            "r.support",
            map=mod3,
            title="Sum of negative IES values",
            units="-",
            source1="Based on {}".format(opc),
            description="Sum of negative IES values",
            loadhistory=tmphist,
        )

    # Number of layers with negative values
    if flc:
        tmpf5 = tmpname("tmp10")
        mod4 = "{}_CountNeg".format(opl)
        MinMes = gs.read_command("r.info", quiet=True, flags="r", map=opc)
        MinMes = str.splitlines(MinMes)
        MinMes = float(np.hstack([i.split("=") for i in MinMes])[1])
        c0 = -0.0001 / digits2
        Module(
            "r.series",
            quiet=True,
            input=ipi,
            output=tmpf5,
            method="count",
            range=(MinMes, c0),
        )
        gs.mapcalc("$mod4 = int($tmpf5)", mod4=mod4, tmpf5=tmpf5, quiet=True)

        # Write layer metadata
        Module(
            "r.support",
            map=mod4,
            title="Number of layers with negative values",
            units="-",
            source1="Based on {}".format(opc),
            description="Number of layers with negative values",
            loadhistory=tmphist,
        )

    # Remove IES layers
    if fli:
        Module("g.remove", quiet=True, flags="f", type="raster", name=ipi)
    # Clean up tmp file
    # os.remove(tmphist)

    gs.message(_("Finished ...\n"))
    if region_1 != region_2:
        gs.message(
            _(
                "\nPlease note that the region has been changes to match"
                " the set of projection (env_proj) variables.\n"
            )
        )
Esempio n. 24
0
 def __init__(self,
              cmd,
              width=None,
              height=None,
              overlap=0,
              processes=None,
              split=False,
              debug=False,
              region=None,
              move=None,
              log=False,
              start_row=0,
              start_col=0,
              out_prefix='',
              mapset_prefix=None,
              *args,
              **kargs):
     kargs['run_'] = False
     self.mset = Mapset()
     self.module = Module(cmd, *args, **kargs)
     self.width = width
     self.height = height
     self.overlap = overlap
     self.processes = processes
     self.region = region if region else Region()
     self.start_row = start_row
     self.start_col = start_col
     self.out_prefix = out_prefix
     self.log = log
     self.move = move
     self.gisrc_src = os.environ['GISRC']
     self.n_mset, self.gisrc_dst = None, None
     if self.move:
         self.n_mset = copy_mapset(self.mset, self.move)
         self.gisrc_dst = write_gisrc(self.n_mset.gisdbase,
                                      self.n_mset.location,
                                      self.n_mset.name)
         rasters = [r for r in select(self.module.inputs, 'raster')]
         if rasters:
             copy_rasters(rasters,
                          self.gisrc_src,
                          self.gisrc_dst,
                          region=self.region)
         vectors = [v for v in select(self.module.inputs, 'vector')]
         if vectors:
             copy_vectors(vectors, self.gisrc_src, self.gisrc_dst)
         groups = [g for g in select(self.module.inputs, 'group')]
         if groups:
             copy_groups(groups,
                         self.gisrc_src,
                         self.gisrc_dst,
                         region=self.region)
     self.bboxes = split_region_tiles(region=region,
                                      width=width,
                                      height=height,
                                      overlap=overlap)
     if mapset_prefix:
         self.msetstr = mapset_prefix + "_%03d_%03d"
     else:
         self.msetstr = cmd.replace('.', '') + "_%03d_%03d"
     self.inlist = None
     if split:
         self.split()
     self.debug = debug
Esempio n. 25
0
def main():

    settings = options['settings']
    scene_names = options['scene_name'].split(',')
    output = options['output']
    nprocs = int(options['nprocs'])
    clouds = int(options['clouds'])
    producttype = options['producttype']
    start = options['start']
    end = options['end']
    use_scenenames = flags['s']
    ind_folder = flags['f']

    ### check if we have the i.sentinel.download + i.sentinel.import addons
    if not grass.find_program('i.sentinel.download', '--help'):
        grass.fatal(
            _("The 'i.sentinel.download' module was not found, install it first:"
              ) + "\n" + "g.extension i.sentinel")

    ### Test if all required data are there
    if not os.path.isfile(settings):
        grass.fatal(_("Settings file <%s> not found" % (settings)))

    ### set some common environmental variables, like:
    os.environ.update(
        dict(GRASS_COMPRESS_NULLS='1',
             GRASS_COMPRESSOR='ZSTD',
             GRASS_MESSAGE_FORMAT='plain'))

    ### test nprocs Settings
    if nprocs > mp.cpu_count():
        grass.fatal("Using %d parallel processes but only %d CPUs available." %
                    (nprocs, mp.cpu_count()))

    ### sentinelsat allows only three parallel downloads
    elif nprocs > 2:
        grass.message("Maximum number of parallel processes for Downloading" +
                      " fixed to 2 due to sentinelsat API restrictions")
        nprocs = 2

    if use_scenenames:
        scenenames = scene_names
        ### check if the filename is valid
        ### TODO: refine check, it's currently a very lazy check
        if len(scenenames[0]) < 10:
            grass.fatal(
                "No scene names indicated. Please provide scenenames in \
                        the format S2A_MSIL1C_20180822T155901_N0206_R097_T17SPV_20180822T212023.SAFE"
            )
    else:
        ### get a list of scenenames to download
        i_sentinel_download_string = grass.parse_command(
            'i.sentinel.download',
            settings=settings,
            producttype=producttype,
            start=start,
            end=end,
            clouds=clouds,
            flags='l')
        i_sentinel_keys = i_sentinel_download_string.keys()
        scenenames = [item.split(' ')[1] for item in i_sentinel_keys]

    ### parallelize download
    grass.message(_("Downloading Sentinel-2 data..."))

    ### adapt nprocs to number of scenes
    if len(scenenames) == 1:
        nprocs = 1

    queue_download = ParallelModuleQueue(nprocs=nprocs)

    for idx, scenename in enumerate(scenenames):
        producttype, start_date, end_date, query_string = scenename_split(
            scenename)
        ### output into separate folders, easier to import in a parallel way:
        if ind_folder:
            outpath = os.path.join(output, 'dl_s2_%s' % str(idx + 1))
        else:
            outpath = output
        i_sentinel_download = Module('i.sentinel.download',
                                     settings=settings,
                                     start=start_date,
                                     end=end_date,
                                     producttype=producttype,
                                     query=query_string,
                                     output=outpath,
                                     run_=False)
        queue_download.put(i_sentinel_download)
    queue_download.wait()
Esempio n. 26
0
def main():
    # check if the map is in the current mapset
    mapset = grass.find_file(opt["map"], element="vector")["mapset"]
    if not mapset or mapset != grass.gisenv()["MAPSET"]:
        grass.fatal(
            _("Vector map <{}> not found in the current mapset").format(
                opt["map"]))

    # get list of existing columns
    try:
        columns = grass.vector_columns(opt["map"]).keys()
    except CalledModuleError as e:
        return 1

    allowed_rasters = ("N2", "N5", "N10", "N20", "N50", "N100")

    # test input feature type
    vinfo = grass.vector_info_topo(opt["map"])
    if vinfo["areas"] < 1 and vinfo["points"] < 1:
        grass.fatal(
            _("No points or areas found in input vector map <{}>").format(
                opt["map"]))

    # check area size limit
    check_area_size = float(opt["area_size"]) > 0
    if check_area_size:
        area_col_name = "area_{}".format(os.getpid())
        Module(
            "v.to.db",
            map=opt["map"],
            option="area",
            units="kilometers",
            columns=area_col_name,
            quiet=True,
        )
        areas = Module(
            "v.db.select",
            flags="c",
            map=opt["map"],
            columns=area_col_name,
            where="{} > {}".format(area_col_name, opt["area_size"]),
            stdout_=grass.PIPE,
        )
        large_areas = len(areas.outputs.stdout.splitlines())
        if large_areas > 0:
            grass.warning(
                "{} areas larger than size limit will be skipped from computation"
                .format(large_areas))

    # extract multi values to points
    for rast in opt["return_period"].split(","):
        # check valid rasters
        name = grass.find_file(rast, element="cell")["name"]
        if not name:
            grass.warning("Raster map <{}> not found. "
                          "Skipped.".format(rast))
            continue
        if name not in allowed_rasters:
            grass.warning("Raster map <{}> skipped. "
                          "Allowed: {}".format(rast, allowed_rasters))
            continue

        # perform zonal statistics
        grass.message("Processing <{}>...".format(rast))
        table = "{}_table".format(name)
        if vinfo["areas"] > 0:
            Module(
                "v.rast.stats",
                flags="c",
                map=opt["map"],
                raster=rast,
                column_prefix=name,
                method="average",
                quiet=True,
            )
            # handle NULL values (areas smaller than raster resolution)
            null_values = Module(
                "v.db.select",
                map=opt["map"],
                columns="cat",
                flags="c",
                where="{}_average is NULL".format(name),
                stdout_=grass.PIPE,
            )
            cats = null_values.outputs.stdout.splitlines()
            if len(cats) > 0:
                grass.warning(
                    _("Input vector map <{}> contains very small areas (smaller than "
                      "raster resolution). These areas will be proceeded by querying "
                      "single raster cell.").format(opt["map"]))
                Module(
                    "v.what.rast",
                    map=opt["map"],
                    raster=rast,
                    type="centroid",
                    column="{}_average".format(name),
                    where="{}_average is NULL".format(name),
                    quiet=True,
                )
        else:  # -> points
            Module(
                "v.what.rast",
                map=opt["map"],
                raster=rast,
                column="{}_average".format(name),
                quiet=True,
            )

        # add column to the attribute table if not exists
        rl = float(opt["rainlength"])
        field_name = "H_{}T{}".format(name, opt["rainlength"])
        if field_name not in columns:
            Module(
                "v.db.addcolumn",
                map=opt["map"],
                columns="{} double precision".format(field_name),
            )

        # determine coefficient for calculation
        a, c = coeff(rast, rl)
        if a is None or c is None:
            grass.fatal("Unable to calculate coefficients")

        # calculate output values, update attribute table
        coef = a * rl**(1 - c)
        expression = "{}_average * {}".format(name, coef)
        Module("v.db.update",
               map=opt["map"],
               column=field_name,
               query_column=expression)

        if check_area_size:
            Module(
                "v.db.update",
                map=opt["map"],
                column=field_name,
                value="-1",
                where="{} > {}".format(area_col_name, opt["area_size"]),
            )

        # remove unused column
        Module("v.db.dropcolumn",
               map=opt["map"],
               columns="{}_average".format(name))

    if check_area_size:
        # remove unused column
        Module("v.db.dropcolumn", map=opt["map"], columns=area_col_name)

    return 0
Esempio n. 27
0
    def get_topology(self, map):
        vinfo = Module('v.info', self.map, flags='t', quiet=True, stdout_=PIPE)

        features = parse_key_val(vinfo.outputs.stdout)
Esempio n. 28
0
def erase(inShp,
          erase_feat,
          out,
          splitMultiPart=None,
          notTbl=None,
          api='pygrass'):
    """
    Difference between two feature classes
    
    API's Available:
    * pygrass;
    * grass;
    * saga;
    """

    if api == 'saga':
        """
        Using SAGA GIS
        
        It appears to be very slow
        """
        from glass.pys import execmd

        cmd = ('saga_cmd shapes_polygons 15 -A {in_shp} -B {erase_shp} '
               '-RESULT {output} -SPLIT {sp}').format(
                   in_shp=inShp,
                   erase_shp=erase_feat,
                   output=out,
                   sp='0' if not splitMultiPart else '1')

        outcmd = execmd(cmd)

    elif api == 'pygrass':
        """
        Use pygrass
        """

        from grass.pygrass.modules import Module

        erase = Module("v.overlay",
                       ainput=inShp,
                       atype="area",
                       binput=erase_feat,
                       btype="area",
                       operator="not",
                       output=out,
                       overwrite=True,
                       run_=False,
                       quiet=True,
                       flags='t' if notTbl else None)

        erase()

    elif api == 'grass':
        """
        Use GRASS GIS tool via command line
        """

        from glass.pys import execmd

        rcmd = execmd(
            ("v.overlay ainput={} atype=area binput={} "
             "btype=area operator=not output={} {}"
             "--overwrite --quiet").format(inShp, erase_feat, out,
                                           "" if not notTbl else "-t "))

    else:
        raise ValueError('API {} is not available!'.format(api))

    return out
Esempio n. 29
0
def main():
    # Get the options
    input = options["input"]
    output = options["output"]
    start = options["start"]
    stop = options["stop"]
    base = options["basename"]
    cycle = options["cycle"]
    lower = options["lower"]
    upper = options["upper"]
    offset = options["offset"]
    limits = options["limits"]
    shift = options["shift"]
    scale = options["scale"]
    method = options["method"]
    granularity = options["granularity"]
    register_null = flags["n"]
    reverse = flags["r"]

    # Make sure the temporal database exists
    tgis.init()

    # We need a database interface
    dbif = tgis.SQLDatabaseInterfaceConnection()
    dbif.connect()

    mapset = tgis.get_current_mapset()

    if input.find("@") >= 0:
        id = input
    else:
        id = input + "@" + mapset

    input_strds = tgis.SpaceTimeRasterDataset(id)

    if input_strds.is_in_db() == False:
        dbif.close()
        grass.fatal(_("Space time raster dataset <%s> not found") % (id))

    input_strds.select(dbif)

    if output.find("@") >= 0:
        out_id = output
    else:
        out_id = output + "@" + mapset

    # The output space time raster dataset
    output_strds = tgis.SpaceTimeRasterDataset(out_id)
    if output_strds.is_in_db(dbif):
        if not grass.overwrite():
            dbif.close()
            grass.fatal(_("Space time raster dataset <%s> is already in the "
                          "database, use overwrite flag to overwrite") % out_id)

    if tgis.check_granularity_string(granularity,
                                     input_strds.get_temporal_type()) == False:
            dbif.close()
            grass.fatal(_("Invalid granularity"))

    if tgis.check_granularity_string(cycle,
                                     input_strds.get_temporal_type()) == False:
            dbif.close()
            grass.fatal(_("Invalid cycle"))

    if offset:
        if tgis.check_granularity_string(offset,
                                         input_strds.get_temporal_type()) == False:
                dbif.close()
                grass.fatal(_("Invalid offset"))

    # The lower threshold space time raster dataset
    if lower:
        if not range:
            dbif.close()
            grass.fatal(_("You need to set the range to compute the occurrence"
                          " space time raster dataset"))

        if lower.find("@") >= 0:
            lower_id = lower
        else:
            lower_id = lower + "@" + mapset

        lower_strds = tgis.SpaceTimeRasterDataset(lower_id)
        if lower_strds.is_in_db() == False:
            dbif.close()
            grass.fatal(_("Space time raster dataset <%s> not found") % (lower_strds.get_id()))

        if lower_strds.get_temporal_type() != input_strds.get_temporal_type():
            dbif.close()
            grass.fatal(_("Temporal type of input strds and lower strds must be equal"))

        lower_strds.select(dbif)

    # The upper threshold space time raster dataset
    if upper:
        if not lower:
            dbif.close()
            grass.fatal(_("The upper option works only in conjunction with the lower option"))

        if upper.find("@") >= 0:
            upper = upper
        else:
            upper_id = upper + "@" + mapset

        upper_strds = tgis.SpaceTimeRasterDataset(upper_id)
        if upper_strds.is_in_db() == False:
            dbif.close()
            grass.fatal(_("Space time raster dataset <%s> not found") % (upper_strds.get_id()))

        if upper_strds.get_temporal_type() != input_strds.get_temporal_type():
            dbif.close()
            grass.fatal(_("Temporal type of input strds and upper strds must be equal"))

        upper_strds.select(dbif)

    input_strds_start, input_strds_end = input_strds.get_temporal_extent_as_tuple()

    if input_strds.is_time_absolute():
        start = tgis.string_to_datetime(start)
        if stop:
            stop = tgis.string_to_datetime(stop)
        else:
            stop = input_strds_end
        start = tgis.adjust_datetime_to_granularity(start, granularity)
    else:
        start = int(start)
        if stop:
            stop = int(stop)
        else:
            stop = input_strds_end

    if input_strds.is_time_absolute():
        end = tgis.increment_datetime_by_string(start, cycle)
    else:
        end = start + cycle

    limit_relations = ["EQUALS", "DURING", "OVERLAPS", "OVERLAPPING", "CONTAINS"]

    count = 1
    output_maps = []


    while input_strds_end > start and stop > start:

        # Make sure that the cyclic computation will stop at the correct time
        if stop and end > stop:
            end = stop

        where = "start_time >= \'%s\' AND start_time < \'%s\'"%(str(start),
                                                                str(end))
        input_maps = input_strds.get_registered_maps_as_objects(where=where,
                                                                dbif=dbif)

        grass.message(_("Processing cycle %s - %s"%(str(start), str(end))))

        if len(input_maps) == 0:
            continue

        # Lets create a dummy list of maps with granularity conform intervals
        gran_list = []
        gran_list_low = []
        gran_list_up = []
        gran_start = start
        while gran_start < end:
            map = input_strds.get_new_map_instance("%i@%i"%(count, count))
            if input_strds.is_time_absolute():
                gran_end = tgis.increment_datetime_by_string(gran_start,
                                                             granularity)
                map.set_absolute_time(gran_start, gran_end)
                gran_start = tgis.increment_datetime_by_string(gran_start,
                                                               granularity)
            else:
                gran_end = gran_start + granularity
                map.set_relative_time(gran_start, gran_end,
                                      input_strds.get_relative_time_unit())
                gran_start = gran_start + granularity
            gran_list.append(copy(map))
            gran_list_low.append(copy(map))
            gran_list_up.append(copy(map))
        # Lists to compute the topology with upper and lower datasets

        # Create the topology between the granularity conform list and all maps
        # of the current cycle
        gran_topo = tgis.SpatioTemporalTopologyBuilder()
        gran_topo.build(gran_list, input_maps)

        if lower:
            lower_maps = lower_strds.get_registered_maps_as_objects(dbif=dbif)
            gran_lower_topo = tgis.SpatioTemporalTopologyBuilder()
            gran_lower_topo.build(gran_list_low, lower_maps)

        if upper:
            upper_maps = upper_strds.get_registered_maps_as_objects(dbif=dbif)
            gran_upper_topo = tgis.SpatioTemporalTopologyBuilder()
            gran_upper_topo.build(gran_list_up, upper_maps)

        old_map_name = None

        # Aggregate
        num_maps = len(gran_list)

        for i in xrange(num_maps):
            if reverse:
                map = gran_list[num_maps - i - 1]
            else:
                map = gran_list[i]
            # Select input maps based on temporal topology relations
            input_maps = []
            if map.get_equal():
                input_maps += map.get_equal()
            elif map.get_contains():
                input_maps += map.get_contains()
            elif map.get_overlaps():
                input_maps += map.get_overlaps()
            elif map.get_overlapped():
                input_maps += map.get_overlapped()
            elif map.get_during():
                input_maps += map.get_during()

            # Check input maps
            if len(input_maps) == 0:
                continue

            # New output map
            output_map_name = "%s_%i" % (base, count)
            output_map_id = map.build_id(output_map_name, mapset)
            output_map = input_strds.get_new_map_instance(output_map_id)

            # Check if new map is in the temporal database
            if output_map.is_in_db(dbif):
                if grass.overwrite():
                    # Remove the existing temporal database entry
                    output_map.delete(dbif)
                    output_map = input_strds.get_new_map_instance(output_map_id)
                else:
                    grass.fatal(_("Map <%s> is already registered in the temporal"
                                 " database, use overwrite flag to overwrite.") %
                                (output_map.get_map_id()))

            map_start, map_end = map.get_temporal_extent_as_tuple()

            if map.is_time_absolute():
                output_map.set_absolute_time(map_start, map_end)
            else:
                output_map.set_relative_time(map_start, map_end,
                                             map.get_relative_time_unit())

            limits_vals = limits.split(",")
            limits_lower = float(limits_vals[0])
            limits_upper = float(limits_vals[1])

            lower_map_name = None
            if lower:
                relations = gran_list_low[i].get_temporal_relations()
                for relation in limit_relations:
                    if relation in relations:
                        lower_map_name = str(relations[relation][0].get_id())
                        break

            upper_map_name = None
            if upper:
                relations = gran_list_up[i].get_temporal_relations()
                for relation in limit_relations:
                    if relation in relations:
                        upper_map_name = str(relations[relation][0].get_id())
                        break

            input_map_names = []
            for input_map in input_maps:
                input_map_names.append(input_map.get_id())

            # Set up the module
            accmod = Module("r.series.accumulate", input=input_map_names,
                            output=output_map_name, run_=False)

            if old_map_name:
                accmod.inputs["basemap"].value = old_map_name
            if lower_map_name:
                accmod.inputs["lower"].value = lower_map_name
            if upper_map_name:
                accmod.inputs["upper"].value = upper_map_name

            accmod.inputs["limits"].value = (limits_lower, limits_upper)

            if shift:
                accmod.inputs["shift"].value = float(shift)

            if scale:
                accmod.inputs["scale"].value = float(scale)

            if method:
                accmod.inputs["method"].value = method

            print accmod
            accmod.run()

            if accmod.popen.returncode != 0:
                dbif.close()
                grass.fatal(_("Error running r.series.accumulate"))

            output_maps.append(output_map)
            old_map_name = output_map_name
            count += 1

        # Increment the cycle
        start = end
        if input_strds.is_time_absolute():
            start = end
            if offset:
                start = tgis.increment_datetime_by_string(end, offset)

            end = tgis.increment_datetime_by_string(start, cycle)
        else:
            if offset:
                start = end + offset
            end = start + cycle

    # Insert the maps into the output space time dataset
    if output_strds.is_in_db(dbif):
        if grass.overwrite():
            output_strds.delete(dbif)
            output_strds = input_strds.get_new_instance(out_id)

    temporal_type, semantic_type, title, description = input_strds.get_initial_values()
    output_strds.set_initial_values(temporal_type, semantic_type, title,
                                    description)
    output_strds.insert(dbif)

    empty_maps = []
    # Register the maps in the database
    count = 0
    for output_map in output_maps:
        count += 1
        if count%10 == 0:
            grass.percent(count, len(output_maps), 1)
        # Read the raster map data
        output_map.load()
        # In case of a empty map continue, do not register empty maps

        if not register_null:
            if output_map.metadata.get_min() is None and \
                output_map.metadata.get_max() is None:
                empty_maps.append(output_map)
                continue

        # Insert map in temporal database
        output_map.insert(dbif)
        output_strds.register_map(output_map, dbif)

    # Update the spatio-temporal extent and the metadata table entries
    output_strds.update_from_registered_maps(dbif)
    grass.percent(1, 1, 1)

    dbif.close()

    # Remove empty maps
    if len(empty_maps) > 0:
        for map in empty_maps:
            grass.run_command("g.remove", flags='f', type="rast",  pattern=map.get_name(), quiet=True)
Esempio n. 30
0
def cleanup():
    Module('g.remove', flags='f', name='region_mask', type='vector')
    Module('g.remove', flags='f', name='ndvi', type='raster')
    Module('g.remove', flags='f', name='ndvi_class', type='raster')
    Module('g.remove', flags='f', name='ndvi_class', type='vector')
def main():
    elev = options["input"]
    output = options["output"]
    n_dir = int(options["ndir"])
    notParallel = flags["p"]

    global TMP_NAME, CLEANUP

    if options["basename"]:
        TMP_NAME = options["basename"]
        CLEANUP = False

    colorized_output = options["colorized_output"]
    colorize_color = options["color_table"]

    if colorized_output:
        color_raster_tmp = TMP_NAME + "_color_raster"
    else:
        color_raster_tmp = None

    color_raster_type = options["color_source"]
    color_input = options["color_input"]

    if color_raster_type == "color_input" and not color_input:
        gcore.fatal(_("Provide raster name in color_input option"))

    if color_raster_type != "color_input" and color_input:
        gcore.fatal(
            _(
                "The option color_input is not needed"
                " when not using it as source for color"
            )
        )
    # this would be needed only when no value would allowed
    if not color_raster_type and color_input:
        color_raster_type = "color_input"  # enable for convenience

    if (
        color_raster_type == "aspect"
        and colorize_color
        and colorize_color not in ["default", "aspectcolr"]
    ):
        gcore.warning(
            _(
                "Using possibly inappropriate color table <{}>"
                " for aspect".format(colorize_color)
            )
        )

    horizon_step = 360.0 / n_dir
    horizon_intervals = np.arange(0, 360, horizon_step)
    msgr = get_msgr()

    # checks if there are already some maps
    old_maps = _get_horizon_maps()

    if old_maps:
        if not gcore.overwrite():
            CLEANUP = False
            msgr.fatal(
                _(
                    "You have to first check overwrite flag or remove"
                    " the following maps:\n"
                    "{names}"
                ).format(names=",".join(old_maps))
            )
        else:
            msgr.warning(
                _("The following maps will be overwritten: {names}").format(
                    names=",".join(old_maps)
                )
            )

    if not gcore.overwrite() and color_raster_tmp:
        check_map_name(color_raster_tmp)

    try:
        if notParallel is False:
            if options["maxdistance"]:
                maxdistance = float(options["maxdistance"])
            else:
                maxdistance = None

            r_horizon = Module(
                "r.horizon",
                elevation=elev,
                maxdistance=maxdistance,
                flags="d",
                run_=False,
            )

            queue = ParallelModuleQueue(nprocs=int(options["processes"]))

            for d in horizon_intervals:
                r_horizon_prc = deepcopy(r_horizon)
                r_horizon_prc.inputs.direction = d
                r_horizon_prc.outputs.output = TMP_NAME
                queue.put(r_horizon_prc)

            queue.wait()

        else:
            params = {}
            if options["maxdistance"]:
                params["maxdistance"] = options["maxdistance"]

            gcore.run_command(
                "r.horizon",
                elevation=elev,
                step=horizon_step,
                output=TMP_NAME,
                flags="d",
                **params
            )

        new_maps = _get_horizon_maps()

        if flags["o"]:
            msgr.message(_("Computing openness ..."))
            expr = "{out} = 1 - (sin({first}) ".format(first=new_maps[0], out=output)
            for horizon in new_maps[1:]:
                expr += "+ sin({name}) ".format(name=horizon)
            expr += ") / {n}.".format(n=len(new_maps))
        else:
            msgr.message(_("Computing skyview factor ..."))
            expr = "{out} = 1 - (sin( if({first} < 0, 0, {first}) ) ".format(
                first=new_maps[0], out=output
            )
            for horizon in new_maps[1:]:
                expr += "+ sin( if({name} < 0, 0, {name}) ) ".format(name=horizon)
            expr += ") / {n}.".format(n=len(new_maps))
        
        grast.mapcalc(exp=expr)
        gcore.run_command("r.colors", map=output, color="grey")

    except CalledModuleError:
        msgr.fatal(
            _(
                "r.horizon failed to compute horizon elevation "
                "angle maps. Please report this problem to developers."
            )
        )
        return 1

    if colorized_output:
        if color_raster_type == "slope":
            gcore.run_command("r.slope.aspect", elevation=elev, slope=color_raster_tmp)
        elif color_raster_type == "aspect":
            gcore.run_command("r.slope.aspect", elevation=elev, aspect=color_raster_tmp)
        elif color_raster_type == "dxy":
            gcore.run_command("r.slope.aspect", elevation=elev, dxy=color_raster_tmp)
        elif color_raster_type == "color_input":
            color_raster_tmp = color_input
        else:
            color_raster_tmp = elev

        # don't modify user's color table for inputs
        if colorize_color and color_raster_type not in ["input", "color_input"]:
            rcolors_flags = ""

            if flags["n"]:
                rcolors_flags += "n"

            gcore.run_command(
                "r.colors",
                map=color_raster_tmp,
                color=colorize_color,
                flags=rcolors_flags,
            )

        gcore.run_command(
            "r.shade", shade=output, color=color_raster_tmp, output=colorized_output
        )
        grast.raster_history(colorized_output)

    grast.raster_history(output)

    return 0
Esempio n. 32
0
def main():
    # user specified variables
    dem = options["elevation"]
    slope = options["slope"]
    aspect = options["aspect"]
    neighborhood_size = options["size"]
    output = options["output"]
    nprocs = int(options["nprocs"])

    # check for valid neighborhood sizes
    neighborhood_size = neighborhood_size.split(",")
    neighborhood_size = [int(i) for i in neighborhood_size]

    if any([True for i in neighborhood_size if i % 2 == 0]):
        grass.fatal(
            "Invalid size - neighborhood sizes have to consist of odd numbers")

    if min(neighborhood_size) == 1:
        grass.fatal("Neighborhood sizes have to be > 1")

    # determine nprocs
    if nprocs < 0:
        n_cores = mp.cpu_count()
        nprocs = n_cores - (nprocs + 1)

    # temporary raster map names for slope, aspect, x, y, z components
    if slope == "":
        slope_raster = create_tempname("tmpSlope_")
    else:
        slope_raster = slope

    if aspect == "":
        aspect_raster = create_tempname("tmpAspect_")
    else:
        aspect_raster = aspect

    z_raster = create_tempname("tmpzRaster_")
    x_raster = create_tempname("tmpxRaster_")
    y_raster = create_tempname("tmpyRaster_")

    # create slope and aspect rasters
    if slope == "" or aspect == "":
        grass.message("Calculating slope and aspect...")
        grass.run_command(
            "r.slope.aspect",
            elevation=dem,
            slope=slope_raster,
            aspect=aspect_raster,
            format="degrees",
            precision="FCELL",
            zscale=1.0,
            min_slope=0.0,
            quiet=True,
        )

    # calculate x y and z rasters
    # note - GRASS sin/cos functions differ from ArcGIS which expects input grid in radians
    # whereas GRASS functions expect degrees
    # no need to convert slope and aspect to radians as in the original ArcGIS script
    x_expr = "{x} = float( sin({a}) * sin({b}) )".format(x=x_raster,
                                                         a=aspect_raster,
                                                         b=slope_raster)

    y_expr = "{y} = float( cos({a}) * sin({b}) )".format(y=y_raster,
                                                         a=aspect_raster,
                                                         b=slope_raster)

    z_expr = "{z} = float( cos({a}) )".format(z=z_raster, a=slope_raster)

    # calculate x, y, z components (parallel)
    grass.message("Calculating x, y, and z rasters...")

    mapcalc = Module("r.mapcalc", run_=False)
    queue = ParallelModuleQueue(nprocs=nprocs)

    mapcalc1 = copy.deepcopy(mapcalc)
    m = mapcalc1(expression=x_expr)
    queue.put(m)

    mapcalc2 = copy.deepcopy(mapcalc)
    m = mapcalc2(expression=y_expr)
    queue.put(m)

    mapcalc3 = copy.deepcopy(mapcalc)
    m = mapcalc3(expression=z_expr)
    queue.put(m)

    queue.wait()

    # calculate x, y, z neighborhood sums (parallel)
    grass.message(
        "Calculating sums of x, y, and z rasters in selected neighborhoods...")

    x_sum_list = []
    y_sum_list = []
    z_sum_list = []

    neighbors = Module("r.neighbors", overwrite=True, run_=False)
    queue = ParallelModuleQueue(nprocs=nprocs)

    for size in neighborhood_size:
        # create temporary raster names for neighborhood x, y, z sums
        x_sum_raster = create_tempname("tmpxSumRaster_")
        x_sum_list.append(x_sum_raster)

        y_sum_raster = create_tempname("tmpySumRaster_")
        y_sum_list.append(y_sum_raster)

        z_sum_raster = create_tempname("tmpzSumRaster_")
        z_sum_list.append(z_sum_raster)

        # queue jobs for x, y, z neighborhood sums
        neighbors_xsum = copy.deepcopy(neighbors)
        n = neighbors_xsum(input=x_raster,
                           output=x_sum_raster,
                           method="average",
                           size=size)
        queue.put(n)

        neighbors_ysum = copy.deepcopy(neighbors)
        n = neighbors_ysum(input=y_raster,
                           output=y_sum_raster,
                           method="average",
                           size=size)
        queue.put(n)

        neighbors_zsum = copy.deepcopy(neighbors)
        n = neighbors_zsum(input=z_raster,
                           output=z_sum_raster,
                           method="average",
                           size=size)
        queue.put(n)

    queue.wait()

    # calculate the resultant vector and final ruggedness raster
    # modified from the original script to multiple each SumRaster by the n neighborhood cells to get the sum
    grass.message("Calculating the final ruggedness rasters...")

    mapcalc = Module("r.mapcalc", run_=False)
    queue = ParallelModuleQueue(nprocs=nprocs)
    vrm_list = []

    for x_sum_raster, y_sum_raster, z_sum_raster, size in zip(
            x_sum_list, y_sum_list, z_sum_list, neighborhood_size):

        if len(neighborhood_size) > 1:
            vrm_name = "_".join([output, str(size)])
        else:
            vrm_name = output

        vrm_list.append(vrm_name)

        vrm_expr = "{x} = float(1-( (sqrt(({a}*{d})^2 + ({b}*{d})^2 + ({c}*{d})^2) / {d})))".format(
            x=vrm_name,
            a=x_sum_raster,
            b=y_sum_raster,
            c=z_sum_raster,
            d=int(size) * int(size),
        )
        mapcalc1 = copy.deepcopy(mapcalc)
        m = mapcalc1(expression=vrm_expr)
        queue.put(m)

    queue.wait()

    # set colors
    grass.run_command("r.colors", flags="e", map=vrm_list, color="ryb")

    # set metadata
    for vrm, size in zip(vrm_list, neighborhood_size):
        title = "Vector Ruggedness Measure (size={size})".format(size=size)
        grass.run_command("r.support", map=vrm, title=title)

    return 0
Esempio n. 33
0
    if (reg.rows, reg.cols) != array.shape:
        msg = "Region and array are different: %r != %r"
        raise TypeError(msg % ((reg.rows, reg.cols), array.shape))
    with RasterRow(rastname, mode='w', mtype=mtype,
                   overwrite=overwrite) as new:
        newrow = Buffer((array.shape[1], ), mtype=mtype)
        for row in array:
            newrow[:] = row[:]
            new.put_row(newrow)


if __name__ == "__main__":

    import doctest
    from grass.pygrass.modules import Module
    Module("g.region", n=40, s=0, e=40, w=0, res=10)
    Module("r.mapcalc",
           expression="%s = row() + (10 * col())" % (test_raster_name),
           overwrite=True)
    Module("r.support",
           map=test_raster_name,
           title="A test map",
           history="Generated by r.mapcalc",
           description="This is a test map")
    cats = """11:A
            12:B
            13:C
            14:D
            21:E
            22:F
            23:G
Esempio n. 34
0
def main(options, flags):

    gisbase = os.getenv('GISBASE')
    if not gisbase:
        gs.fatal(_('$GISBASE not defined'))
        return 0

    # Variables
    RAST = options['raster']
    RAST = RAST.split(',')
    RASTL = [z.split('@')[0] for z in RAST]
    RASTL = [x.lower() for x in RASTL]
    RAST2 = options['raster2']
    RAST2 = RAST2.split(',')
    RASTL2 = [z.split('@')[0] for z in RAST2]
    RASTL2 = [x.lower() for x in RASTL2]
    VECT = options['vector']
    OUTP = options['output']

    # Create vector with column names
    CT = ['x double precision, y double precision, label integer']
    for i in xrange(len(RAST)):
        DT = gs.parse_command('r.info', flags='g', map=RAST[i],
                              quiet=True)['datatype']
        if DT == 'CELL':
            CT.append("ID_{0} integer, {0} varchar(255)".format(RASTL[i]))
        else:
            CT.append("ID_{0} double precision, {0} varchar(255)".format(
                RASTL[i]))
    CNT = ','.join(CT)

    # Get raster points of raster layers with labels
    # Export point map to text file first and use that as input in r.what
    # TODO: the above is workaround to get vector cat value as label. Easier,
    # would be to use vector point map directly as input, but that does not
    # give label to link back to old vector layer
    PAT = Module('v.out.ascii',
                 input=VECT,
                 format='point',
                 separator='space',
                 precision=12,
                 stdout_=PIPE).outputs.stdout
    CAT = Module('r.what', flags='f', map=RAST, stdin_=PAT,
                 stdout_=PIPE).outputs.stdout
    CATV = CAT.replace('|*|', '||')
    Module('v.in.ascii',
           input='-',
           stdin_=CATV,
           output=OUTP,
           columns=CNT,
           separator='pipe',
           format='point',
           x=1,
           y=2,
           quiet=True)

    # Get raster points of raster layers without labels (optional)
    if options['raster2']:
        for j in xrange(len(RAST2)):
            DT = gs.parse_command('r.info',
                                  flags='g',
                                  map=RAST2[j],
                                  quiet=True)['datatype']
            if DT == 'CELL':
                CT = "{} integer".format(RASTL2[j])
            else:
                CT = "{} double precision".format(RASTL2[j])
            Module('v.db.addcolumn', map=OUTP, columns=CT)
            Module('v.what.rast', map=OUTP, raster=RAST2[j], column=RASTL2[j])

    # Write metadata
    opt2 = dict((k, v) for k, v in options.iteritems() if v)
    hist = ' '.join("{!s}={!r}".format(k, v) for (k, v) in opt2.iteritems())
    hist = "v.what.rastlabel {}".format(hist)
    Module('v.support',
           map=OUTP,
           comment="created with v.what.rastlabel",
           cmdhist=hist,
           flags='r',
           quiet=True)
Esempio n. 35
0
def main():

    global rm_regions, rm_rasters, rm_vectors, tmpfolder

    # parameters
    if options['s2names']:
        s2names = options['s2names'].split(',')
        if os.path.isfile(s2names[0]):
            with open(s2names[0], 'r') as f:
                s2namesstr = f.read()
        else:
            s2namesstr = ','.join(s2names)
    tmpdirectory = options['directory']

    test_nprocs_memory()

    if not grass.find_program('i.sentinel.download', '--help'):
        grass.fatal(
            _("The 'i.sentinel.download' module was not found, install it first:"
              ) + "\n" + "g.extension i.sentinel")
    if not grass.find_program('i.sentinel.import', '--help'):
        grass.fatal(
            _("The 'i.sentinel.import' module was not found, install it first:"
              ) + "\n" + "g.extension i.sentinel")
    if not grass.find_program('i.sentinel.parallel.download', '--help'):
        grass.fatal(
            _("The 'i.sentinel.parallel.download' module was not found, install it first:"
              ) + "\n" + "g.extension i.sentinel")
    if not grass.find_program('i.zero2null', '--help'):
        grass.fatal(
            _("The 'i.zero2null' module was not found, install it first:") +
            "\n" + "g.extension i.zero2null")

    # create temporary directory to download data
    if tmpdirectory:
        if not os.path.isdir(tmpdirectory):
            try:
                os.makedirs(tmpdirectory)
            except:
                grass.fatal(_("Unable to create temp dir"))

    else:
        tmpdirectory = grass.tempdir()
        tmpfolder = tmpdirectory

    # make distinct download and sen2cor directories
    try:
        download_dir = os.path.join(tmpdirectory,
                                    'download_{}'.format(os.getpid()))
        os.makedirs(download_dir)
    except Exception as e:
        grass.fatal(_('Unable to create temp dir {}').format(download_dir))

    if not options['input_dir']:
        # auxiliary variable showing whether each S2-scene lies in an
        # individual folder
        single_folders = True

        download_args = {
            'settings': options['settings'],
            'nprocs': options['nprocs'],
            'output': download_dir,
            'datasource': options['datasource'],
            'flags': 'f'
        }
        if options['limit']:
            download_args['limit'] = options['limit']
        if options['s2names']:
            download_args['flags'] += 's'
            download_args['scene_name'] = s2namesstr.strip()
            if options['datasource'] == 'USGS_EE':
                if flags['e']:
                    download_args['flags'] += 'e'
                download_args['producttype'] = 'S2MSI1C'
        else:
            download_args['clouds'] = options['clouds']
            download_args['start'] = options['start']
            download_args['end'] = options['end']
            download_args['producttype'] = options['producttype']

        grass.run_command('i.sentinel.parallel.download', **download_args)
    else:
        download_dir = options['input_dir']
        single_folders = False

    number_of_scenes = len(os.listdir(download_dir))
    nprocs_final = min(number_of_scenes, int(options['nprocs']))

    # run atmospheric correction
    if flags['a']:
        sen2cor_folder = os.path.join(tmpdirectory,
                                      'sen2cor_{}'.format(os.getpid()))
        try:
            os.makedirs(sen2cor_folder)
        except Exception as e:
            grass.fatal(
                _("Unable to create temporary sen2cor folder {}").format(
                    sen2cor_folder))
        grass.message(
            _('Starting atmospheric correction with sen2cor...').format(
                nprocs_final))
        queue_sen2cor = ParallelModuleQueue(nprocs=nprocs_final)
        for idx, subfolder in enumerate(os.listdir(download_dir)):
            if single_folders is False:
                if subfolder.endswith('.SAFE'):
                    filepath = os.path.join(download_dir, subfolder)
            else:
                folderpath = os.path.join(download_dir, subfolder)
                for file in os.listdir(folderpath):
                    if file.endswith('.SAFE'):
                        filepath = os.path.join(folderpath, file)
            output_dir = os.path.join(sen2cor_folder,
                                      'sen2cor_result_{}'.format(idx))
            try:
                os.makedirs(output_dir)
            except Exception:
                grass.fatal(
                    _('Unable to create directory {}').format(output_dir))
            sen2cor_module = Module(
                'i.sentinel-2.sen2cor',
                input_file=filepath,
                output_dir=output_dir,
                sen2cor_path=options['sen2cor_path'],
                nprocs=1,
                run_=False
                # all remaining sen2cor parameters can be left as default
            )
            queue_sen2cor.put(sen2cor_module)
        queue_sen2cor.wait()
        download_dir = sen2cor_folder
        single_folders = True

    grass.message(_("Importing Sentinel scenes ..."))
    env = grass.gisenv()
    start_gisdbase = env['GISDBASE']
    start_location = env['LOCATION_NAME']
    start_cur_mapset = env['MAPSET']
    ### save current region
    id = str(os.getpid())
    currentregion = 'tmp_region_' + id
    rm_regions.append(currentregion)
    grass.run_command('g.region', save=currentregion, flags='p')

    queue_import = ParallelModuleQueue(nprocs=nprocs_final)
    memory_per_proc = round(float(options['memory']) / nprocs_final)
    mapsetids = []
    importflag = 'rn'
    if flags['i']:
        importflag += 'i'
    if flags['c']:
        importflag += 'c'
    json_standard_folder = os.path.join(env['GISDBASE'], env['LOCATION_NAME'],
                                        env['MAPSET'], 'cell_misc')

    if not os.path.isdir(json_standard_folder):
        os.makedirs(json_standard_folder)
    for idx, subfolder in enumerate(os.listdir(download_dir)):
        if os.path.exists(os.path.join(download_dir, subfolder)):
            mapsetid = 'S2_import_%s' % (str(idx + 1))
            mapsetids.append(mapsetid)
            import_kwargs = {
                "mapsetid": mapsetid,
                "memory": memory_per_proc,
                "pattern": options["pattern"],
                "flags": importflag,
                "region": currentregion,
                "metadata": json_standard_folder
            }
            if single_folders is True:
                directory = os.path.join(download_dir, subfolder)
            else:
                directory = download_dir
                if subfolder.endswith(".SAFE"):
                    pattern_file = subfolder.split(".SAFE")[0]
                elif subfolder.endswith(".zip"):
                    pattern_file = subfolder.split(".zip")[0]
                    if ".SAFE" in pattern_file:
                        pattern_file = pattern_file.split(".SAFE")[0]
                else:
                    grass.warning(
                        _("{} is not in .SAFE or .zip format, "
                          "skipping...").format(
                              os.path.join(download_dir, subfolder)))
                    continue
                import_kwargs["pattern_file"] = pattern_file
            import_kwargs["input"] = directory
            i_sentinel_import = Module("i.sentinel.import.worker",
                                       run_=False,
                                       **import_kwargs)
            queue_import.put(i_sentinel_import)
    queue_import.wait()
    grass.run_command('g.remove', type='region', name=currentregion, flags='f')
    # verify that switching the mapset worked
    env = grass.gisenv()
    gisdbase = env['GISDBASE']
    location = env['LOCATION_NAME']
    cur_mapset = env['MAPSET']
    if cur_mapset != start_cur_mapset:
        grass.fatal("New mapset is <%s>, but should be <%s>" %
                    (cur_mapset, start_cur_mapset))
    # copy maps to current mapset
    maplist = []
    cloudlist = []
    for new_mapset in mapsetids:
        for vect in grass.parse_command('g.list',
                                        type='vector',
                                        mapset=new_mapset):
            cloudlist.append(vect)
            grass.run_command('g.copy',
                              vector=vect + '@' + new_mapset + ',' + vect)
        for rast in grass.parse_command('g.list',
                                        type='raster',
                                        mapset=new_mapset):
            maplist.append(rast)
            grass.run_command('g.copy',
                              raster=rast + '@' + new_mapset + ',' + rast)
        grass.utils.try_rmdir(os.path.join(gisdbase, location, new_mapset))
    # space time dataset
    grass.message(_("Creating STRDS of Sentinel scenes ..."))
    if options['strds_output']:
        strds = options['strds_output']
        grass.run_command('t.create',
                          output=strds,
                          title="Sentinel-2",
                          desc="Sentinel-2",
                          quiet=True)

        # check GRASS version
        g79_or_higher = False
        gversion = grass.parse_command("g.version", flags="g")["version"]
        gversion_base = gversion.split(".")[:2]
        gversion_base_int = tuple([int(a) for a in gversion_base])
        if gversion_base_int >= tuple((7, 9)):
            g79_or_higher = True

        # create register file
        registerfile = grass.tempfile()
        file = open(registerfile, 'w')
        for imp_rast in list(set(maplist)):
            band_str_tmp1 = imp_rast.split("_")[2]
            band_str = band_str_tmp1.replace("B0", "").replace("B", "")
            date_str1 = imp_rast.split('_')[1].split('T')[0]
            date_str2 = "%s-%s-%s" % (date_str1[:4], date_str1[4:6],
                                      date_str1[6:])
            time_str = imp_rast.split('_')[1].split('T')[1]
            clock_str2 = "%s:%s:%s" % (time_str[:2], time_str[2:4],
                                       time_str[4:])
            write_str = "%s|%s %s" % (imp_rast, date_str2, clock_str2)
            if g79_or_higher is True:
                write_str += "|S2_%s" % band_str
            file.write("%s\n" % write_str)
        file.close()
        grass.run_command('t.register',
                          input=strds,
                          file=registerfile,
                          quiet=True)
        # remove registerfile
        grass.try_remove(registerfile)

        if flags['c']:
            stvdsclouds = strds + '_clouds'
            grass.run_command('t.create',
                              output=stvdsclouds,
                              title="Sentinel-2 clouds",
                              desc="Sentinel-2 clouds",
                              quiet=True,
                              type='stvds')
            registerfileclouds = grass.tempfile()
            fileclouds = open(registerfileclouds, 'w')
            for imp_clouds in cloudlist:
                date_str1 = imp_clouds.split('_')[1].split('T')[0]
                date_str2 = "%s-%s-%s" % (date_str1[:4], date_str1[4:6],
                                          date_str1[6:])
                time_str = imp_clouds.split('_')[1].split('T')[1]
                clock_str2 = "%s:%s:%s" % (time_str[:2], time_str[2:4],
                                           time_str[4:])
                fileclouds.write("%s|%s %s\n" %
                                 (imp_clouds, date_str2, clock_str2))
            fileclouds.close()
            grass.run_command('t.register',
                              type='vector',
                              input=stvdsclouds,
                              file=registerfileclouds,
                              quiet=True)
            grass.message("<%s> is created" % (stvdsclouds))
            # remove registerfile
            grass.try_remove(registerfileclouds)

        # extract strds for each band
        bands = []
        pattern = options['pattern']
        if "(" in pattern:
            global beforebrackets, afterbrackets
            beforebrackets = re.findall(r"(.*?)\(", pattern)[0]
            inbrackets = re.findall(r"\((.*?)\)", pattern)[0]
            afterbrackets = re.findall(r"\)(.*)", pattern)[0]
            bands = [
                "%s%s%s" % (beforebrackets, x, afterbrackets)
                for x in inbrackets.split('|')
            ]
        else:
            bands = pattern.split('|')

        for band in bands:
            if flags['i'] and ('20' in band or '60' in band):
                band.replace('20', '10').replace('60', '10')
            grass.run_command('t.rast.extract',
                              input=strds,
                              where="name like '%" + band + "%'",
                              output="%s_%s" % (strds, band),
                              quiet=True)
            grass.message("<%s_%s> is created" % (strds, band))
Esempio n. 36
0
File: grid.py Progetto: caomw/grass
class GridModule(object):
    # TODO maybe also i.* could be supported easily
    """Run GRASS raster commands in a multiprocessing mode.

    :param cmd: raster GRASS command, only command staring with r.* are valid.
    :type cmd: str
    :param width: width of the tile, in pixel
    :type width: int
    :param height: height of the tile, in pixel.
    :type height: int
    :param overlap: overlap between tiles, in pixel.
    :type overlap: int
    :param processes: number of threads, default value is equal to the number
                      of processor available.
    :param split: if True use r.tile to split all the inputs.
    :type split: bool
    :param run_: if False only instantiate the object
    :type run_: bool
    :param args: give all the parameters to the command
    :param kargs: give all the parameters to the command

    >>> grd = GridModule('r.slope.aspect',
    ...                  width=500, height=500, overlap=2,
    ...                  processes=None, split=False,
    ...                  elevation='elevation',
    ...                  slope='slope', aspect='aspect', overwrite=True)
    >>> grd.run()
    """
    def __init__(self, cmd, width=None, height=None, overlap=0, processes=None,
                 split=False, debug=False, region=None, move=None, log=False,
                 start_row=0, start_col=0, out_prefix='',
                 *args, **kargs):
        kargs['run_'] = False
        self.mset = Mapset()
        self.module = Module(cmd, *args, **kargs)
        self.width = width
        self.height = height
        self.overlap = overlap
        self.processes = processes
        self.region = region if region else Region()
        self.start_row = start_row
        self.start_col = start_col
        self.out_prefix = out_prefix
        self.log = log
        self.move = move
        self.gisrc_src = os.environ['GISRC']
        self.n_mset, self.gisrc_dst = None, None
        if self.move:
            self.n_mset = copy_mapset(self.mset, self.move)
            self.gisrc_dst = write_gisrc(self.n_mset.gisdbase,
                                         self.n_mset.location,
                                         self.n_mset.name)
            rasters = [r for r in select(self.module.inputs, 'raster')]
            if rasters:
                copy_rasters(rasters, self.gisrc_src, self.gisrc_dst,
                             region=self.region)
            vectors = [v for v in select(self.module.inputs, 'vector')]
            if vectors:
                copy_vectors(vectors, self.gisrc_src, self.gisrc_dst)
            groups = [g for g in select(self.module.inputs, 'group')]
            if groups:
                copy_groups(groups, self.gisrc_src, self.gisrc_dst,
                            region=self.region)
        self.bboxes = split_region_tiles(region=region,
                                         width=width, height=height,
                                         overlap=overlap)
        self.msetstr = cmd.replace('.', '') + "_%03d_%03d"
        self.inlist = None
        if split:
            self.split()
        self.debug = debug

    def __del__(self):
        if self.gisrc_dst:
            # remove GISRC file
            os.remove(self.gisrc_dst)

    def clean_location(self, location=None):
        """Remove all created mapsets.

        :param location: a Location instance where we are running the analysis
        :type location: Location object
        """
        if location is None:
            if self.n_mset:
                self.n_mset.current()
            location = Location()

        mapsets = location.mapsets(self.msetstr.split('_')[0] + '_*')
        for mset in mapsets:
            Mapset(mset).delete()
        if self.n_mset and self.n_mset.is_current():
            self.mset.current()

    def split(self):
        """Split all the raster inputs using r.tile"""
        rtile = Module('r.tile')
        inlist = {}
        for inm in select(self.module.inputs, 'raster'):
            rtile(input=inm.value, output=inm.value,
                  width=self.width, height=self.height,
                  overlap=self.overlap)
            patt = '%s-*' % inm.value
            inlist[inm.value] = sorted(self.mset.glist(type='rast',
                                                       pattern=patt))
        self.inlist = inlist

    def get_works(self):
        """Return a list of tuble with the parameters for cmd_exe function"""
        works = []
        reg = Region()
        if self.move:
            mdst, ldst, gdst = read_gisrc(self.gisrc_dst)
        else:
            ldst, gdst = self.mset.location, self.mset.gisdbase
        cmd = self.module.get_dict()
        groups = [g for g in select(self.module.inputs, 'group')]
        for row, box_row in enumerate(self.bboxes):
            for col, box in enumerate(box_row):
                inms = None
                if self.inlist:
                    inms = {}
                    cols = len(box_row)
                    for key in self.inlist:
                        indx = row * cols + col
                        inms[key] = "%s@%s" % (self.inlist[key][indx],
                                               self.mset.name)
                # set the computational region, prepare the region parameters
                bbox = dict([(k[0], str(v)) for k, v in box.items()[:-2]])
                bbox['nsres'] = '%f' % reg.nsres
                bbox['ewres'] = '%f' % reg.ewres
                new_mset = self.msetstr % (self.start_row + row,
                                           self.start_col + col),
                works.append((bbox, inms,
                              self.gisrc_src,
                              write_gisrc(gdst, ldst, new_mset),
                              cmd, groups))
        return works

    def define_mapset_inputs(self):
        """Add the mapset information to the input maps
        """
        for inmap in self.module.inputs:
            inm = self.module.inputs[inmap]
            if inm.type in ('raster', 'vector') and inm.value:
                if '@' not in inm.value:
                    mset = get_mapset_raster(inm.value)
                    inm.value = inm.value + '@%s' % mset

    def run(self, patch=True, clean=True):
        """Run the GRASS command

        :param patch: set False if you does not want to patch the results
        :type patch: bool
        :param clean: set False if you does not want to remove all the stuff
                      created by GridModule
        :type clean: bool
        """
        self.module.flags.overwrite = True
        self.define_mapset_inputs()
        if self.debug:
            for wrk in self.get_works():
                cmd_exe(wrk)
        else:
            pool = mltp.Pool(processes=self.processes)
            result = pool.map_async(cmd_exe, self.get_works())
            result.wait()
            if not result.successful():
                raise RuntimeError(_("Execution of subprocesses was not successful"))

        if patch:
            if self.move:
                os.environ['GISRC'] = self.gisrc_dst
                self.n_mset.current()
                self.patch()
                os.environ['GISRC'] = self.gisrc_src
                self.mset.current()
                # copy the outputs from dst => src
                routputs = [self.out_prefix + o
                            for o in select(self.module.outputs, 'raster')]
                copy_rasters(routputs, self.gisrc_dst, self.gisrc_src)
            else:
                self.patch()

        if self.log:
            # record in the temp directory
            from grass.lib.gis import G_tempfile
            tmp, dummy = os.path.split(G_tempfile())
            tmpdir = os.path.join(tmp, self.module.name)
            for k in self.module.outputs:
                par = self.module.outputs[k]
                if par.typedesc == 'raster' and par.value:
                    dirpath = os.path.join(tmpdir, par.name)
                    if not os.path.isdir(dirpath):
                        os.makedirs(dirpath)
                    fil = open(os.path.join(dirpath,
                                            self.out_prefix + par.value), 'w+')
                    fil.close()

        if clean:
            self.clean_location()
            self.rm_tiles()
            if self.n_mset:
                gisdbase, location = os.path.split(self.move)
                self.clean_location(Location(location, gisdbase))
                # rm temporary gis_rc
                os.remove(self.gisrc_dst)
                self.gisrc_dst = None
                sht.rmtree(os.path.join(self.move, 'PERMANENT'))
                sht.rmtree(os.path.join(self.move, self.mset.name))

    def patch(self):
        """Patch the final results."""
        bboxes = split_region_tiles(width=self.width, height=self.height)
        loc = Location()
        mset = loc[self.mset.name]
        mset.visible.extend(loc.mapsets())
        for otmap in self.module.outputs:
            otm = self.module.outputs[otmap]
            if otm.typedesc == 'raster' and otm.value:
                rpatch_map(otm.value,
                           self.mset.name, self.msetstr, bboxes,
                           self.module.flags.overwrite,
                           self.start_row, self.start_col, self.out_prefix)

    def rm_tiles(self):
        """Remove all the tiles."""
        # if split, remove tiles
        if self.inlist:
            grm = Module('g.remove')
            for key in self.inlist:
                grm(flags='f', type='rast', pattern=self.inlist[key])
Esempio n. 37
0
def isTableExists(name):
    res = Module('db.tables', flags='p', stdout_=PIPE)
    for line in res.outputs.stdout.splitlines():
        if name == line:
            return True
    return False