Beispiel #1
0
def ml2rast(segsname, outname, hdf=None, idsname=None, ids=None, hist=None):
    ids = ids if ids else pnd.read_hdf(hdf, idsname)
    t0 = time.time()
    segs = RasterRow(segsname)
    segs.open('r')
    out = RasterRow(outname)
    out.open('w', mtype='CELL', overwrite=True)
    nrows = len(segs)
    for r, row in enumerate(segs):
        #import ipdb; ipdb.set_trace()
        #row[:] = ids.loc[row]  # => MemoryError
        for i, col in enumerate(row):
            try:
                row[i] = ids[col]
            except KeyError:
                row[i] = 0
        out.put_row(row)
        G_percent(r, nrows, 10)
    segs.close()
    if hist:
        import datetime, os
        out.hist.date = datetime.datetime.now()
        out.hist.title = hist
        out.hist.creator = os.getenv('USER')
        out.hist.write(out.name)
    out.close()
    print("Time spent writing the raster %s: %.2fs" % (outname,
                                                       time.time() - t0))
Beispiel #2
0
def _kappa_pixel(maps1, maps2, out, method, over):
    from grass.pygrass.raster.buffer import Buffer
    import sklearn
    rasterout = RasterRow(out, overwrite=over)
    rasterout.open('w','DCELL')
    array1 = maps1.values()[0]
    for row in range(len(array1)):
        newrow = Buffer((len(array1[row]),), mtype='DCELL')
        for col in range(len(array1[row])):
            vals1 = np.ndarray(len(maps1.values()))
            vals2 = np.ndarray(len(maps2.values()))
            x = 0
            for value in maps1.values():
                vals1[x] = value[row][col]
                x += 1
            x = 0
            for value in maps2.values():
                vals2[x] = value[row][col]
                x += 1
            if sklearn.__version__ >= '0.18':
                outval = sklearn.metrics.cohen_kappa_score(vals1, vals2,
                                                           weights=method)
            else:
                outval = sklearn.metrics.cohen_kappa_score(vals1, vals2)
            newrow[col] = outval
        rasterout.put_row(newrow)
    rasterout.close()
    return
Beispiel #3
0
def ml2rast(segsname, outname, hdf=None, idsname=None, ids=None, hist=None):
    ids = ids if ids else pnd.read_hdf(hdf, idsname)
    t0 = time.time()
    segs = RasterRow(segsname)
    segs.open('r')
    out = RasterRow(outname)
    out.open('w', mtype='CELL', overwrite=True)
    nrows = len(segs)
    for r, row in enumerate(segs):
        #import ipdb; ipdb.set_trace()
        #row[:] = ids.loc[row]  # => MemoryError
        for i, col in enumerate(row):
            try:
                row[i] = ids[col]
            except KeyError:
                row[i] = 0
        out.put_row(row)
        G_percent(r, nrows, 10)
    segs.close()
    if hist:
        import datetime, os
        out.hist.date = datetime.datetime.now()
        out.hist.title = hist
        out.hist.creator = os.getenv('USER')
        out.hist.write(out.name)
    out.close()
    print("Time spent writing the raster %s: %.2fs" %
          (outname, time.time() - t0))
Beispiel #4
0
def df2raster(df, newrastname):
    """
    Writes a pandas dataframe to a GRASS raster
    
    """
    new = newrastname
    new = RasterRow('newET')
    new.open('w', overwrite=True)
    for row in df.iterrows():
        new.put_row(row)

    new.close()
Beispiel #5
0
def ifnumpy(mapname0, mapname1):
    # instantiate raster objects
    old = RasterRow(mapname0)
    new = RasterRow(mapname1)
    # open the maps
    old.open('r')
    new.open('w', mtype=old.mtype, overwrite=True)
    # start a cycle
    for row in old:
        new.put_row(row > 50)
    # close the maps
    new.close()
    old.close()
Beispiel #6
0
def createRast(name, matrix, inverse=False):
    """Create the new raster map using the output matrix of calculateOblique"""
    newscratch = RasterRow(name)
    newscratch.open('w', overwrite=True)
    try:
        for r in matrix:
            if inverse:
                r.reverse()
            newrow = Buffer((len(r), ), mtype='FCELL')
            for c in range(len(r)):
                newrow[c] = r[c]
            newscratch.put_row(newrow)
        newscratch.close()
        return True
    except:
        return False
Beispiel #7
0
#!/usr/bin/env python3

import numpy

from grass.pygrass.raster import RasterRow

b04 = RasterRow('L2A_T32UPB_20170706T102021_B04_10m')
b04.open()
b08 = RasterRow('L2A_T32UPB_20170706T102021_B08_10m')
b08.open()
ndvi = RasterRow('ndvi_pyrass')
ndvi.open('w', mtype='FCELL', overwrite=True)

for i in range(len(b04)):
    row_b04 = b04[i]
    row_b08 = b08[i]
    rowb04 = row_b04.astype(numpy.float32)
    rowb08 = row_b08.astype(numpy.float32)
    row_new = (rowb08 - rowb04) / (rowb08 + rowb04)
    ndvi.put_row(row_new)
    
ndvi.close() 
b04.close()
b08.close()
Beispiel #8
0
def main():

    # Get the options
    inputs = options["inputs"]
    output = options["output"]
    basename = options["basename"]
    where = options["where"]
    pyfile = options["pyfile"]
    nrows = int(options["nrows"])

    input_name_list = inputs.split(",")

    input_strds: List[StrdsEntry] = []

    # Import the python code into the current function context
    code = open(pyfile, "r").read()
    projection_kv = gcore.parse_command("g.proj", flags="g")
    epsg_code = projection_kv["epsg"]

    tgis.init()
    mapset = gcore.gisenv()["MAPSET"]

    dbif = tgis.SQLDatabaseInterfaceConnection()
    dbif.connect()

    region = Region()
    num_input_maps = 0
    open_output_maps = []

    for input_name in input_name_list:
        sp = tgis.open_old_stds(input_name, "strds", dbif)
        map_list = sp.get_registered_maps_as_objects(where=where,
                                                     order="start_time",
                                                     dbif=dbif)

        if not map_list:
            dbif.close()
            gcore.fatal(_("Space time raster dataset <%s> is empty") % input)

        if nrows == 0:
            dbif.close()
            gcore.fatal(_("Number of rows for the udf must be greater 0."))

        num_input_maps = len(map_list)
        input_strds.append(
            StrdsEntry(dbif=dbif, strds=sp, map_list=map_list, region=region))

    for strds in input_strds:
        if len(strds.map_list) != num_input_maps:
            dbif.close()
            gcore.fatal(
                _("The number of maps in the input STRDS must be equal"))

    # Setup the input strds to compute the output maps and the resulting strds
    mtype = None
    for strds in input_strds:
        strds.setup()
        mtype = strds.mtype

    num_output_maps = count_resulting_maps(input_strds=input_strds,
                                           code=code,
                                           epsg_code=epsg_code)

    if num_output_maps == 1:
        output_map = RasterRow(name=basename)
        output_map.open(mode="w", mtype=mtype, overwrite=gcore.overwrite())
        open_output_maps.append(output_map)
    elif num_output_maps > 1:
        for index in range(num_output_maps):
            output_map = RasterRow(name=basename + f"_{index}", mapset=mapset)
            output_map.open(mode="w", mtype=mtype, overwrite=gcore.overwrite())
            open_output_maps.append(output_map)
    else:
        dbif.close()
        gcore.fatal(_("No result generated") % input)

    # Workaround because time reduction will remove the timestamp
    result_start_times = [datetime.now()]
    first = False

    # Read several rows for each map of each input strds and load them into the udf
    for index in range(0, region.rows, nrows):
        if index + nrows > region.rows:
            usable_rows = index + nrows - region.rows + 1
        else:
            usable_rows = nrows

        # Read all input strds as cubes
        datacubes = []
        for strds in input_strds:
            datacube = strds.to_datacube(index=index, usable_rows=usable_rows)
            datacubes.append(datacube)

        # Run the UDF code
        data = run_udf(code=code, epsg_code=epsg_code, datacube_list=datacubes)

        # Read only the first cube
        datacubes = data.get_datacube_list()
        first_cube_array: xarray.DataArray = datacubes[0].get_array()

        if first is False:
            if 't' in first_cube_array.coords:
                result_start_times = first_cube_array.coords['t']

        # Three dimensions
        if first_cube_array.ndim == 3:
            for count, slice in enumerate(first_cube_array):
                output_map = open_output_maps[count]
                # print(f"Write slice at index {index} \n{slice} for map {output_map.name}")
                for row in slice:
                    # Write the result into the output raster map
                    b = Buffer(shape=(region.cols, ), mtype=mtype)
                    b[:] = row[:]
                    output_map.put_row(b)
        # Two dimensions
        elif first_cube_array.ndim == 2:
            output_map = open_output_maps[0]
            # print(f"Write slice at index {index} \n{slice} for map {output_map.name}")
            for row in first_cube_array:
                # Write the result into the output raster map
                b = Buffer(shape=(region.cols, ), mtype=mtype)
                b[:] = row[:]
                output_map.put_row(b)

        first = True

    # Create new STRDS
    new_sp = open_new_stds(
        name=output,
        type="strds",
        temporaltype=input_strds[0].strds.get_temporal_type(),
        title="new STRDS",
        descr="New STRDS from UDF",
        semantic="UDF",
        overwrite=gcore.overwrite(),
        dbif=dbif)

    maps_to_register = []
    for count, output_map in enumerate(open_output_maps):
        output_map.close()
        print(output_map.fullname())
        rd = RasterDataset(output_map.fullname())
        if input_strds[0].strds.is_time_absolute():
            if hasattr(result_start_times, "data"):
                d = pandas.to_datetime(result_start_times.data[count])
            else:
                d = result_start_times[count]
            rd.set_absolute_time(start_time=d)
        elif input_strds[0].strds.is_time_relative():
            if hasattr(result_start_times, "data"):
                d = result_start_times.data[count]
            else:
                d = result_start_times[count]
            rd.set_relative_time(start_time=d, end_time=None, unit="seconds")
        rd.load()
        if rd.is_in_db(dbif=dbif):
            rd.update(dbif=dbif)
        else:
            rd.insert(dbif=dbif)
        maps_to_register.append(rd)
        rd.print_info()

    register_map_object_list(type="raster",
                             map_list=maps_to_register,
                             output_stds=new_sp,
                             dbif=dbif)

    dbif.close()
Beispiel #9
0
#!/usr/bin/env python3

import numpy

from grass.pygrass.raster import RasterRow

newscratch = RasterRow('newscratch')
newscratch.open('w', overwrite=True)

# get computational region info
from grass.pygrass.gis.region import Region
reg = Region()

# import buffer and create empty row
from grass.pygrass.raster.buffer import Buffer
newrow = Buffer((reg.cols,), mtype='CELL')

# we create a raster to fill all the GRASS GIS region
for r in range(reg.rows):
    newrow[:] = numpy.random.random_integers(0, 1000, size=newrow.size)
    newscratch.put_row(newrow)
          
newscratch.close()