Ejemplo n.º 1
0
def import_stds(input,
                output,
                directory,
                title=None,
                descr=None,
                location=None,
                link=False,
                exp=False,
                overr=False,
                create=False,
                stds_type="strds",
                base=None,
                set_current_region=False,
                memory=300):
    """Import space time datasets of type raster and vector

        :param input: Name of the input archive file
        :param output: The name of the output space time dataset
        :param directory: The extraction directory
        :param title: The title of the new created space time dataset
        :param descr: The description of the new created
                     space time dataset
        :param location: The name of the location that should be created,
                        maps are imported into this location
        :param link: Switch to link raster maps instead importing them
        :param exp: Extend location extents based on new dataset
        :param overr: Override projection (use location's projection)
        :param create: Create the location specified by the "location"
                      parameter and exit.
                      Do not import the space time datasets.
        :param stds_type: The type of the space time dataset that
                         should be imported
        :param base: The base name of the new imported maps, it will be
                     extended using a numerical index.
        :param memory: Cache size for raster rows, used in r.in.gdal
    """

    global raise_on_error
    old_state = gscript.raise_on_error
    gscript.set_raise_on_error(True)

    # Check if input file and extraction directory exits
    if not os.path.exists(input):
        gscript.fatal(
            _("Space time raster dataset archive <%s> not found") % input)
    if not create and not os.path.exists(directory):
        gscript.fatal(_("Extraction directory <%s> not found") % directory)

    tar = tarfile.open(name=input, mode='r')

    # Check for important files
    msgr = get_tgis_message_interface()
    msgr.message(
        _("Checking validity of input file (size: %0.1f MB). Make take a while..."
          % (os.path.getsize(input) / (1024 * 1024.0))))
    members = tar.getnames()
    # Make sure that the basenames of the files are used for comparison
    member_basenames = [os.path.basename(name) for name in members]

    if init_file_name not in member_basenames:
        gscript.fatal(_("Unable to find init file <%s>") % init_file_name)
    if list_file_name not in member_basenames:
        gscript.fatal(_("Unable to find list file <%s>") % list_file_name)
    if proj_file_name not in member_basenames:
        gscript.fatal(
            _("Unable to find projection file <%s>") % proj_file_name)

    msgr.message(_("Extracting data..."))
    tar.extractall(path=directory)
    tar.close()

    # We use a new list file name for map registration
    new_list_file_name = list_file_name + "_new"
    # Save current working directory path
    old_cwd = os.getcwd()

    # Switch into the data directory
    os.chdir(directory)

    # Check projection information
    if not location:
        temp_name = gscript.tempfile()
        temp_file = open(temp_name, "w")
        proj_name = os.path.abspath(proj_file_name)

        # We need to convert projection strings generated
        # from other programs than g.proj into
        # new line format so that the grass file comparison function
        # can be used to compare the projections
        proj_name_tmp = temp_name + "_in_projection"
        proj_file = open(proj_name, "r")
        proj_content = proj_file.read()
        proj_content = proj_content.replace(" +", "\n+")
        proj_content = proj_content.replace("\t+", "\n+")
        proj_file.close()

        proj_file = open(proj_name_tmp, "w")
        proj_file.write(proj_content)
        proj_file.close()

        p = gscript.start_command("g.proj", flags="j", stdout=temp_file)
        p.communicate()
        temp_file.close()

        if not gscript.compare_key_value_text_files(
                temp_name, proj_name_tmp, sep="="):
            if overr:
                gscript.warning(
                    _("Projection information does not match. "
                      "Proceeding..."))
            else:
                diff = ''.join(gscript.diff_files(temp_name, proj_name))
                gscript.warning(
                    _("Difference between PROJ_INFO file of "
                      "imported map and of current location:"
                      "\n{diff}").format(diff=diff))
                gscript.fatal(
                    _("Projection information does not match. "
                      "Aborting."))

    # Create a new location based on the projection information and switch
    # into it
    old_env = gscript.gisenv()
    if location:
        try:
            proj4_string = open(proj_file_name, 'r').read()
            gscript.create_location(dbase=old_env["GISDBASE"],
                                    location=location,
                                    proj4=proj4_string)
            # Just create a new location and return
            if create:
                os.chdir(old_cwd)
                return
        except Exception as e:
            gscript.fatal(
                _("Unable to create location %(l)s. Reason: %(e)s") % {
                    'l': location,
                    'e': str(e)
                })
        # Switch to the new created location
        try:
            gscript.run_command("g.mapset",
                                mapset="PERMANENT",
                                location=location,
                                dbase=old_env["GISDBASE"])
        except CalledModuleError:
            gscript.fatal(_("Unable to switch to location %s") % location)
        # create default database connection
        try:
            gscript.run_command("t.connect", flags="d")
        except CalledModuleError:
            gscript.fatal(
                _("Unable to create default temporal database "
                  "in new location %s") % location)

    try:
        # Make sure the temporal database exists
        factory.init()

        fs = "|"
        maplist = []
        mapset = get_current_mapset()
        list_file = open(list_file_name, "r")
        new_list_file = open(new_list_file_name, "w")

        # get number of lines to correctly form the suffix
        max_count = -1
        for max_count, l in enumerate(list_file):
            pass
        max_count += 1
        list_file.seek(0)

        # Read the map list from file
        line_count = 0
        while True:
            line = list_file.readline()
            if not line:
                break

            line_list = line.split(fs)

            # The filename is actually the base name of the map
            # that must be extended by the file suffix
            filename = line_list[0].strip().split(":")[0]
            if base:
                mapname = "%s_%s" % (
                    base, gscript.get_num_suffix(line_count + 1, max_count))
                mapid = "%s@%s" % (mapname, mapset)
            else:
                mapname = filename
                mapid = mapname + "@" + mapset

            row = {}
            row["filename"] = filename
            row["name"] = mapname
            row["id"] = mapid
            row["start"] = line_list[1].strip()
            row["end"] = line_list[2].strip()

            new_list_file.write("%s%s%s%s%s\n" %
                                (mapname, fs, row["start"], fs, row["end"]))

            maplist.append(row)
            line_count += 1

        list_file.close()
        new_list_file.close()

        # Read the init file
        fs = "="
        init = {}
        init_file = open(init_file_name, "r")
        while True:
            line = init_file.readline()
            if not line:
                break

            kv = line.split(fs)
            init[kv[0]] = kv[1].strip()

        init_file.close()

        if "temporal_type" not in init or \
           "semantic_type" not in init or \
           "number_of_maps" not in init:
            gscript.fatal(
                _("Key words %(t)s, %(s)s or %(n)s not found in init"
                  " file.") % {
                      't': "temporal_type",
                      's': "semantic_type",
                      'n': "number_of_maps"
                  })

        if line_count != int(init["number_of_maps"]):
            gscript.fatal(_("Number of maps mismatch in init and list file."))

        format_ = "GTiff"
        type_ = "strds"

        if "stds_type" in init:
            type_ = init["stds_type"]
        if "format" in init:
            format_ = init["format"]

        if stds_type != type_:
            gscript.fatal(
                _("The archive file is of wrong space time dataset"
                  " type"))

        # Check the existence of the files
        if format_ == "GTiff":
            for row in maplist:
                filename = row["filename"] + ".tif"
                if not os.path.exists(filename):
                    gscript.fatal(
                        _("Unable to find GeoTIFF raster file "
                          "<%s> in archive.") % filename)
        elif format_ == "AAIGrid":
            for row in maplist:
                filename = row["filename"] + ".asc"
                if not os.path.exists(filename):
                    gscript.fatal(
                        _("Unable to find AAIGrid raster file "
                          "<%s> in archive.") % filename)
        elif format_ == "GML":
            for row in maplist:
                filename = row["filename"] + ".xml"
                if not os.path.exists(filename):
                    gscript.fatal(
                        _("Unable to find GML vector file "
                          "<%s> in archive.") % filename)
        elif format_ == "pack":
            for row in maplist:
                if type_ == "stvds":
                    filename = str(row["filename"].split(":")[0]) + ".pack"
                else:
                    filename = row["filename"] + ".pack"
                if not os.path.exists(filename):
                    gscript.fatal(
                        _("Unable to find GRASS package file "
                          "<%s> in archive.") % filename)
        else:
            gscript.fatal(_("Unsupported input format"))

        # Check the space time dataset
        id = output + "@" + mapset
        sp = dataset_factory(type_, id)
        if sp.is_in_db() and gscript.overwrite() is False:
            gscript.fatal(
                _("Space time %(t)s dataset <%(sp)s> is already in"
                  " the database. Use the overwrite flag.") % {
                      't': type_,
                      'sp': sp.get_id()
                  })

        # Import the maps
        if type_ == "strds":
            if format_ == "GTiff" or format_ == "AAIGrid":
                _import_raster_maps_from_gdal(maplist, overr, exp, location,
                                              link, format_,
                                              set_current_region, memory)
            if format_ == "pack":
                _import_raster_maps(maplist, set_current_region)
        elif type_ == "stvds":
            if format_ == "GML":
                _import_vector_maps_from_gml(maplist, overr, exp, location,
                                             link)
            if format_ == "pack":
                _import_vector_maps(maplist)

        # Create the space time dataset
        if sp.is_in_db() and gscript.overwrite() is True:
            gscript.info(
                _("Overwrite space time %(sp)s dataset "
                  "<%(id)s> and unregister all maps.") % {
                      'sp': sp.get_new_map_instance(None).get_type(),
                      'id': sp.get_id()
                  })
            sp.delete()
            sp = sp.get_new_instance(id)

        temporal_type = init["temporal_type"]
        semantic_type = init["semantic_type"]
        relative_time_unit = None
        if temporal_type == "relative":
            if "relative_time_unit" not in init:
                gscript.fatal(
                    _("Key word %s not found in init file.") %
                    ("relative_time_unit"))
            relative_time_unit = init["relative_time_unit"]
            sp.set_relative_time_unit(relative_time_unit)

        gscript.verbose(
            _("Create space time %s dataset.") %
            sp.get_new_map_instance(None).get_type())

        sp.set_initial_values(temporal_type=temporal_type,
                              semantic_type=semantic_type,
                              title=title,
                              description=descr)
        sp.insert()

        # register the maps
        fs = "|"
        register_maps_in_space_time_dataset(
            type=sp.get_new_map_instance(None).get_type(),
            name=output,
            file=new_list_file_name,
            start="file",
            end="file",
            unit=relative_time_unit,
            dbif=None,
            fs=fs,
            update_cmd_list=False)

        os.chdir(old_cwd)
    except:
        raise

    # Make sure the location is switched back correctly
    finally:
        if location:
            # Switch to the old location
            try:
                gscript.run_command("g.mapset",
                                    mapset=old_env["MAPSET"],
                                    location=old_env["LOCATION_NAME"],
                                    gisdbase=old_env["GISDBASE"])
            except CalledModuleError:
                grass.warning(_("Switching to original location failed"))

        gscript.set_raise_on_error(old_state)
Ejemplo n.º 2
0
def dataset_mapcalculator(inputs, output, type, expression, base, method,
                          nprocs=1, register_null=False, spatial=False):
    """Perform map-calculations of maps from different space time
       raster/raster3d datasets, using a specific sampling method
       to select temporal related maps.

       A mapcalc expression must be provided to process the temporal
       selected maps. Temporal operators are available in addition to
       the r.mapcalc operators:

       Supported operators for relative and absolute time are:

       - td() - the time delta of the current interval in days
                and fractions of days or the unit in case of relative time
       - start_time() - The start time of the interval from the begin of
                        the time series in days and fractions of days or the
                        unit in case of relative time
       - end_time() - The end time of the current interval from the begin of
                      the time series in days and fractions of days or the
                      unit in case of relative time

       Supported operators for absolute time:

       - start_doy() - Day of year (doy) from the start time [1 - 366]
       - start_dow() - Day of week (dow) from the start time [1 - 7],
                       the start of the week is monday == 1
       - start_year() - The year of the start time [0 - 9999]
       - start_month() - The month of the start time [1 - 12]
       - start_week() - Week of year of the start time [1 - 54]
       - start_day() - Day of month from the start time [1 - 31]
       - start_hour() - The hour of the start time [0 - 23]
       - start_minute() - The minute of the start time [0 - 59]
       - start_second() - The second of the start time [0 - 59]

       - end_doy() - Day of year (doy) from the end time [1 - 366]
       - end_dow() - Day of week (dow) from the end time [1 - 7],
                     the start of the week is monday == 1
       - end_year() - The year of the end time [0 - 9999]
       - end_month() - The month of the end time [1 - 12]
       - end_week() - Week of year of the end time [1 - 54]
       - end_day() - Day of month from the end time [1 - 31]
       - end_hour() - The hour of the end time [0 - 23]
       - end_minute() - The minute of the end time [0 - 59]
       - end_second() - The minute of the end time [0 - 59]

       :param inputs: The names of the input space time raster/raster3d datasets
       :param output: The name of the extracted new space time raster(3d) dataset
       :param type: The type of the dataset: "raster" or "raster3d"
       :param expression: The r(3).mapcalc expression
       :param base: The base name of the new created maps in case a
              mapclac expression is provided
       :param method: The method to be used for temporal sampling
       :param nprocs: The number of parallel processes to be used for
              mapcalc processing
       :param register_null: Set this number True to register empty maps
       :param spatial: Check spatial overlap
    """

    # We need a database interface for fast computation
    dbif = SQLDatabaseInterfaceConnection()
    dbif.connect()

    mapset = get_current_mapset()
    msgr = get_tgis_message_interface()

    input_name_list = inputs.split(",")

    first_input = open_old_stds(input_name_list[0], type, dbif)

    # All additional inputs in reverse sorted order to avoid
    # wrong name substitution
    input_name_list = input_name_list[1:]
    input_name_list.sort()
    input_name_list.reverse()
    input_list = []

    for input in input_name_list:
        sp = open_old_stds(input, type, dbif)
        input_list.append(copy.copy(sp))

    new_sp = check_new_stds(output, type, dbif, gscript.overwrite())

    # Sample all inputs by the first input and create a sample matrix
    if spatial:
        msgr.message(_("Starting spatio-temporal sampling..."))
    else:
        msgr.message(_("Starting temporal sampling..."))
    map_matrix = []
    id_list = []
    sample_map_list = []
    # First entry is the first dataset id
    id_list.append(first_input.get_name())

    if len(input_list) > 0:
        has_samples = False
        for dataset in input_list:
            list = dataset.sample_by_dataset(stds=first_input,
                                             method=method, spatial=spatial,
                                             dbif=dbif)

            # In case samples are not found
            if not list and len(list) == 0:
                dbif.close()
                msgr.message(_("No samples found for map calculation"))
                return 0

            # The fist entries are the samples
            map_name_list = []
            if not has_samples:
                for entry in list:
                    granule = entry["granule"]
                    # Do not consider gaps
                    if granule.get_id() is None:
                        continue
                    sample_map_list.append(granule)
                    map_name_list.append(granule.get_name())
                # Attach the map names
                map_matrix.append(copy.copy(map_name_list))
                has_samples = True

            map_name_list = []
            for entry in list:
                maplist = entry["samples"]
                granule = entry["granule"]

                # Do not consider gaps in the sampler
                if granule.get_id() is None:
                    continue

                if len(maplist) > 1:
                    msgr.warning(_("Found more than a single map in a sample "
                                   "granule. Only the first map is used for "
                                   "computation. Use t.rast.aggregate.ds to "
                                   "create synchronous raster datasets."))

                # Store all maps! This includes non existent maps,
                # identified by id == None
                map_name_list.append(maplist[0].get_name())

            # Attach the map names
            map_matrix.append(copy.copy(map_name_list))

            id_list.append(dataset.get_name())
    else:
        list = first_input.get_registered_maps_as_objects(dbif=dbif)

        if list is None:
            dbif.close()
            msgr.message(_("No maps registered in input dataset"))
            return 0

        map_name_list = []
        for map in list:
            map_name_list.append(map.get_name())
            sample_map_list.append(map)

        # Attach the map names
        map_matrix.append(copy.copy(map_name_list))

    # Needed for map registration
    map_list = []

    if len(map_matrix) > 0:

        msgr.message(_("Starting mapcalc computation..."))

        count = 0
        # Get the number of samples
        num = len(map_matrix[0])

        # Parallel processing
        proc_list = []
        proc_count = 0

        # For all samples
        for i in range(num):

            count += 1
            if count % 10 == 0:
                msgr.percent(count, num, 1)

            # Create the r.mapcalc statement for the current time step
            map_name = "{base}_{suffix}".format(base=base,
                                                suffix=gscript.get_num_suffix(count, num))
            # Remove spaces and new lines
            expr = expression.replace(" ", "")

            # Check that all maps are in the sample
            valid_maps = True
            # Replace all dataset names with their map names of the
            # current time step
            for j in range(len(map_matrix)):
                if map_matrix[j][i] is None:
                    valid_maps = False
                    break
                # Substitute the dataset name with the map name
                expr = expr.replace(id_list[j], map_matrix[j][i])

            # Proceed with the next sample
            if not valid_maps:
                continue

            # Create the new map id and check if the map is already
            # in the database
            map_id = map_name + "@" + mapset

            new_map = first_input.get_new_map_instance(map_id)

            # Check if new map is in the temporal database
            if new_map.is_in_db(dbif):
                if gscript.overwrite():
                    # Remove the existing temporal database entry
                    new_map.delete(dbif)
                    new_map = first_input.get_new_map_instance(map_id)
                else:
                    msgr.error(_("Map <%s> is already in temporal database, "
                                 "use overwrite flag to overwrite"))
                    continue

            # Set the time stamp
            if sample_map_list[i].is_time_absolute():
                start, end = sample_map_list[i].get_absolute_time()
                new_map.set_absolute_time(start, end)
            else:
                start, end, unit = sample_map_list[i].get_relative_time()
                new_map.set_relative_time(start, end, unit)

            # Parse the temporal expressions
            expr = _operator_parser(expr, sample_map_list[0],
                                    sample_map_list[i])
            # Add the output map name
            expr = "%s=%s" % (map_name, expr)

            map_list.append(new_map)

            msgr.verbose(_("Apply mapcalc expression: \"%s\"") % expr)

            # Start the parallel r.mapcalc computation
            if type == "raster":
                proc_list.append(Process(target=_run_mapcalc2d, args=(expr,)))
            else:
                proc_list.append(Process(target=_run_mapcalc3d, args=(expr,)))
            proc_list[proc_count].start()
            proc_count += 1

            if proc_count == nprocs or proc_count == num or count == num:
                proc_count = 0
                exitcodes = 0
                for proc in proc_list:
                    proc.join()
                    exitcodes += proc.exitcode

                if exitcodes != 0:
                    dbif.close()
                    msgr.fatal(_("Error while mapcalc computation"))

                # Empty process list
                proc_list = []

        # Register the new maps in the output space time dataset
        msgr.message(_("Starting map registration in temporal database..."))

        temporal_type, semantic_type, title, description = first_input.get_initial_values()

        new_sp = open_new_stds(output, type, temporal_type, title, description,
                               semantic_type, dbif, gscript.overwrite())
        count = 0

        # collect empty maps to remove them
        empty_maps = []

        # Insert maps in the temporal database and in the new space time
        # dataset
        for new_map in map_list:

            count += 1
            if count % 10 == 0:
                msgr.percent(count, num, 1)

            # Read the map data
            new_map.load()

            # In case of a null map continue, do not register null maps
            if new_map.metadata.get_min() is None and \
               new_map.metadata.get_max() is None:
                if not register_null:
                    empty_maps.append(new_map)
                    continue

            # Insert map in temporal database
            new_map.insert(dbif)

            new_sp.register_map(new_map, dbif)

        # Update the spatio-temporal extent and the metadata table entries
        new_sp.update_from_registered_maps(dbif)

        msgr.percent(1, 1, 1)

        # Remove empty maps
        if len(empty_maps) > 0:
            names = ""
            count = 0
            for map in empty_maps:
                if count == 0:
                    names += "%s" % (map.get_name())
                else:
                    names += ",%s" % (map.get_name())
                count += 1
            if type == "raster":
                gscript.run_command("g.remove", flags='f', type='rast',
                                    pattern=names, quiet=True)
            elif type == "raster3d":
                gscript.run_command("g.remove", flags='f', type='rast3d',
                                    pattern=names, quiet=True)

    dbif.close()
Ejemplo n.º 3
0
def dataset_mapcalculator(inputs,
                          output,
                          type,
                          expression,
                          base,
                          method,
                          nprocs=1,
                          register_null=False,
                          spatial=False):
    """Perform map-calculations of maps from different space time
       raster/raster3d datasets, using a specific sampling method
       to select temporal related maps.

       A mapcalc expression must be provided to process the temporal
       selected maps. Temporal operators are available in addition to
       the r.mapcalc operators:

       Supported operators for relative and absolute time are:

       - td() - the time delta of the current interval in days
                and fractions of days or the unit in case of relative time
       - start_time() - The start time of the interval from the begin of
                        the time series in days and fractions of days or the
                        unit in case of relative time
       - end_time() - The end time of the current interval from the begin of
                      the time series in days and fractions of days or the
                      unit in case of relative time

       Supported operators for absolute time:

       - start_doy() - Day of year (doy) from the start time [1 - 366]
       - start_dow() - Day of week (dow) from the start time [1 - 7],
                       the start of the week is monday == 1
       - start_year() - The year of the start time [0 - 9999]
       - start_month() - The month of the start time [1 - 12]
       - start_week() - Week of year of the start time [1 - 54]
       - start_day() - Day of month from the start time [1 - 31]
       - start_hour() - The hour of the start time [0 - 23]
       - start_minute() - The minute of the start time [0 - 59]
       - start_second() - The second of the start time [0 - 59]

       - end_doy() - Day of year (doy) from the end time [1 - 366]
       - end_dow() - Day of week (dow) from the end time [1 - 7],
                     the start of the week is monday == 1
       - end_year() - The year of the end time [0 - 9999]
       - end_month() - The month of the end time [1 - 12]
       - end_week() - Week of year of the end time [1 - 54]
       - end_day() - Day of month from the end time [1 - 31]
       - end_hour() - The hour of the end time [0 - 23]
       - end_minute() - The minute of the end time [0 - 59]
       - end_second() - The minute of the end time [0 - 59]

       :param inputs: The names of the input space time raster/raster3d datasets
       :param output: The name of the extracted new space time raster(3d) dataset
       :param type: The type of the dataset: "raster" or "raster3d"
       :param expression: The r(3).mapcalc expression
       :param base: The base name of the new created maps in case a
              mapclac expression is provided
       :param method: The method to be used for temporal sampling
       :param nprocs: The number of parallel processes to be used for
              mapcalc processing
       :param register_null: Set this number True to register empty maps
       :param spatial: Check spatial overlap
    """

    # We need a database interface for fast computation
    dbif = SQLDatabaseInterfaceConnection()
    dbif.connect()

    mapset = get_current_mapset()
    msgr = get_tgis_message_interface()

    input_name_list = inputs.split(",")

    first_input = open_old_stds(input_name_list[0], type, dbif)

    # All additional inputs in reverse sorted order to avoid
    # wrong name substitution
    input_name_list = input_name_list[1:]
    input_name_list.sort()
    input_name_list.reverse()
    input_list = []

    for input in input_name_list:
        sp = open_old_stds(input, type, dbif)
        input_list.append(copy.copy(sp))

    new_sp = check_new_stds(output, type, dbif, gscript.overwrite())

    # Sample all inputs by the first input and create a sample matrix
    if spatial:
        msgr.message(_("Starting spatio-temporal sampling..."))
    else:
        msgr.message(_("Starting temporal sampling..."))
    map_matrix = []
    id_list = []
    sample_map_list = []
    # First entry is the first dataset id
    id_list.append(first_input.get_name())

    if len(input_list) > 0:
        has_samples = False
        for dataset in input_list:
            list = dataset.sample_by_dataset(stds=first_input,
                                             method=method,
                                             spatial=spatial,
                                             dbif=dbif)

            # In case samples are not found
            if not list or len(list) == 0:
                dbif.close()
                msgr.message(_("No samples found for map calculation"))
                return 0

            # The fist entries are the samples
            map_name_list = []
            if not has_samples:
                for entry in list:
                    granule = entry["granule"]
                    # Do not consider gaps
                    if granule.get_id() is None:
                        continue
                    sample_map_list.append(granule)
                    map_name_list.append(granule.get_name())
                # Attach the map names
                map_matrix.append(copy.copy(map_name_list))
                has_samples = True

            map_name_list = []
            for entry in list:
                maplist = entry["samples"]
                granule = entry["granule"]

                # Do not consider gaps in the sampler
                if granule.get_id() is None:
                    continue

                if len(maplist) > 1:
                    msgr.warning(
                        _("Found more than a single map in a sample "
                          "granule. Only the first map is used for "
                          "computation. Use t.rast.aggregate.ds to "
                          "create synchronous raster datasets."))

                # Store all maps! This includes non existent maps,
                # identified by id == None
                map_name_list.append(maplist[0].get_name())

            # Attach the map names
            map_matrix.append(copy.copy(map_name_list))

            id_list.append(dataset.get_name())
    else:
        list = first_input.get_registered_maps_as_objects(dbif=dbif)

        if list is None:
            dbif.close()
            msgr.message(_("No maps registered in input dataset"))
            return 0

        map_name_list = []
        for map in list:
            map_name_list.append(map.get_name())
            sample_map_list.append(map)

        # Attach the map names
        map_matrix.append(copy.copy(map_name_list))

    # Needed for map registration
    map_list = []

    if len(map_matrix) > 0:

        msgr.message(_("Starting mapcalc computation..."))

        count = 0
        # Get the number of samples
        num = len(map_matrix[0])

        # Parallel processing
        proc_list = []
        proc_count = 0

        # For all samples
        for i in range(num):

            count += 1
            msgr.percent(count, num, 10)

            # Create the r.mapcalc statement for the current time step
            map_name = "{base}_{suffix}".format(base=base,
                                                suffix=gscript.get_num_suffix(
                                                    count, num))
            # Remove spaces and new lines
            expr = expression.replace(" ", "")

            # Check that all maps are in the sample
            valid_maps = True
            # Replace all dataset names with their map names of the
            # current time step
            for j in range(len(map_matrix)):
                if map_matrix[j][i] is None:
                    valid_maps = False
                    break
                # Substitute the dataset name with the map name
                expr = expr.replace(id_list[j], map_matrix[j][i])

            # Proceed with the next sample
            if not valid_maps:
                continue

            # Create the new map id and check if the map is already
            # in the database
            map_id = map_name + "@" + mapset

            new_map = first_input.get_new_map_instance(map_id)

            # Check if new map is in the temporal database
            if new_map.is_in_db(dbif):
                if gscript.overwrite():
                    # Remove the existing temporal database entry
                    new_map.delete(dbif)
                    new_map = first_input.get_new_map_instance(map_id)
                else:
                    msgr.error(
                        _("Map <%s> is already in temporal database, "
                          "use overwrite flag to overwrite"))
                    continue

            # Set the time stamp
            if sample_map_list[i].is_time_absolute():
                start, end = sample_map_list[i].get_absolute_time()
                new_map.set_absolute_time(start, end)
            else:
                start, end, unit = sample_map_list[i].get_relative_time()
                new_map.set_relative_time(start, end, unit)

            # Parse the temporal expressions
            expr = _operator_parser(expr, sample_map_list[0],
                                    sample_map_list[i])
            # Add the output map name
            expr = "%s=%s" % (map_name, expr)

            map_list.append(new_map)

            msgr.verbose(_("Apply mapcalc expression: \"%s\"") % expr)

            # Start the parallel r.mapcalc computation
            if type == "raster":
                proc_list.append(Process(target=_run_mapcalc2d, args=(expr, )))
            else:
                proc_list.append(Process(target=_run_mapcalc3d, args=(expr, )))
            proc_list[proc_count].start()
            proc_count += 1

            if proc_count == nprocs or proc_count == num or count == num:
                proc_count = 0
                exitcodes = 0
                for proc in proc_list:
                    proc.join()
                    exitcodes += proc.exitcode

                if exitcodes != 0:
                    dbif.close()
                    msgr.fatal(_("Error while mapcalc computation"))

                # Empty process list
                proc_list = []

        # Register the new maps in the output space time dataset
        msgr.message(_("Starting map registration in temporal database..."))

        temporal_type, semantic_type, title, description = first_input.get_initial_values(
        )

        new_sp = open_new_stds(output, type, temporal_type, title, description,
                               semantic_type, dbif, gscript.overwrite())
        count = 0

        # collect empty maps to remove them
        empty_maps = []

        # Insert maps in the temporal database and in the new space time
        # dataset
        for new_map in map_list:

            count += 1
            msgr.percent(count, num, 10)

            # Read the map data
            new_map.load()

            # In case of a null map continue, do not register null maps
            if new_map.metadata.get_min() is None and \
               new_map.metadata.get_max() is None:
                if not register_null:
                    empty_maps.append(new_map)
                    continue

            # Insert map in temporal database
            new_map.insert(dbif)

            new_sp.register_map(new_map, dbif)

        # Update the spatio-temporal extent and the metadata table entries
        new_sp.update_from_registered_maps(dbif)

        # Remove empty maps
        if len(empty_maps) > 0:
            n_empty, n_tot = len(empty_maps), len(map_list)
            msgr.warning(
                _("Removing {}/{} ({}%) maps because empty...").format(
                    n_empty, n_tot, n_empty * 100. / n_tot))
            names = ""
            count = 0
            for map in empty_maps:
                if count == 0:
                    names += "%s" % (map.get_name())
                else:
                    names += ",%s" % (map.get_name())
                count += 1
            if type == "raster":
                gscript.run_command("g.remove",
                                    flags='f',
                                    type='raster',
                                    name=names,
                                    quiet=True)
            elif type == "raster3d":
                gscript.run_command("g.remove",
                                    flags='f',
                                    type='raster_3d',
                                    name=names,
                                    quiet=True)

    dbif.close()
Ejemplo n.º 4
0
def import_stds(input, output, directory, title=None, descr=None, location=None,
                link=False, exp=False, overr=False, create=False,
                stds_type="strds", base=None, set_current_region=False):
    """Import space time datasets of type raster and vector

        :param input: Name of the input archive file
        :param output: The name of the output space time dataset
        :param directory: The extraction directory
        :param title: The title of the new created space time dataset
        :param descr: The description of the new created
                     space time dataset
        :param location: The name of the location that should be created,
                        maps are imported into this location
        :param link: Switch to link raster maps instead importing them
        :param exp: Extend location extents based on new dataset
        :param overr: Override projection (use location's projection)
        :param create: Create the location specified by the "location"
                      parameter and exit.
                      Do not import the space time datasets.
        :param stds_type: The type of the space time dataset that
                         should be imported
        :param base: The base name of the new imported maps, it will be
                     extended using a numerical index.
    """

    global raise_on_error
    old_state = gscript.raise_on_error
    gscript.set_raise_on_error(True)

    # Check if input file and extraction directory exits
    if not os.path.exists(input):
        gscript.fatal(_("Space time raster dataset archive <%s> not found")
                      % input)
    if not create and not os.path.exists(directory):
        gscript.fatal(_("Extraction directory <%s> not found") % directory)

    tar = tarfile.open(name=input, mode='r')

    # Check for important files
    msgr = get_tgis_message_interface()
    msgr.message(_("Checking validity of input file (size: %0.1f MB). Make take a while..."
        % (os.path.getsize(input)/(1024*1024.0))))
    members = tar.getnames()
    # Make sure that the basenames of the files are used for comparison
    member_basenames = [os.path.basename(name) for name in members]

    if init_file_name not in member_basenames:
        gscript.fatal(_("Unable to find init file <%s>") % init_file_name)
    if list_file_name not in member_basenames:
        gscript.fatal(_("Unable to find list file <%s>") % list_file_name)
    if proj_file_name not in member_basenames:
        gscript.fatal(_("Unable to find projection file <%s>") % proj_file_name)

    msgr.message(_("Extracting data..."))
    tar.extractall(path=directory)
    tar.close()

    # We use a new list file name for map registration
    new_list_file_name = list_file_name + "_new"
    # Save current working directory path
    old_cwd = os.getcwd()

    # Switch into the data directory
    os.chdir(directory)

    # Check projection information
    if not location:
        temp_name = gscript.tempfile()
        temp_file = open(temp_name, "w")
        proj_name = os.path.abspath(proj_file_name)

        # We need to convert projection strings generated 
        # from other programms than g.proj into
        # new line format so that the grass file comparison function
        # can be used to compare the projections
        proj_name_tmp = temp_name + "_in_projection"
        proj_file = open(proj_name, "r")
        proj_content = proj_file.read()
        proj_content = proj_content.replace(" +", "\n+")
        proj_content = proj_content.replace("\t+", "\n+")
        proj_file.close()

        proj_file = open(proj_name_tmp, "w")
        proj_file.write(proj_content)
        proj_file.close()

        p = gscript.start_command("g.proj", flags="j", stdout=temp_file)
        p.communicate()
        temp_file.close()

        if not gscript.compare_key_value_text_files(temp_name, proj_name_tmp,
                                                    sep="="):
            if overr:
                gscript.warning(_("Projection information does not match. "
                                  "Proceeding..."))
            else:
                diff = ''.join(gscript.diff_files(temp_name, proj_name))
                gscript.warning(_("Difference between PROJ_INFO file of "
                                  "imported map and of current location:"
                                  "\n{diff}").format(diff=diff))
                gscript.fatal(_("Projection information does not match. "
                                "Aborting."))

    # Create a new location based on the projection information and switch
    # into it
    old_env = gscript.gisenv()
    if location:
        try:
            proj4_string = open(proj_file_name, 'r').read()
            gscript.create_location(dbase=old_env["GISDBASE"],
                                    location=location,
                                    proj4=proj4_string)
            # Just create a new location and return
            if create:
                os.chdir(old_cwd)
                return
        except Exception as e:
            gscript.fatal(_("Unable to create location %(l)s. Reason: %(e)s")
                          % {'l': location, 'e': str(e)})
        # Switch to the new created location
        try:
            gscript.run_command("g.mapset", mapset="PERMANENT",
                                location=location,
                                dbase=old_env["GISDBASE"])
        except CalledModuleError:
            gscript.fatal(_("Unable to switch to location %s") % location)
        # create default database connection
        try:
            gscript.run_command("t.connect", flags="d")
        except CalledModuleError:
            gscript.fatal(_("Unable to create default temporal database "
                            "in new location %s") % location)

    try:
        # Make sure the temporal database exists
        factory.init()

        fs = "|"
        maplist = []
        mapset = get_current_mapset()
        list_file = open(list_file_name, "r")
        new_list_file = open(new_list_file_name, "w")

        # get number of lines to correctly form the suffix
        max_count = -1
        for max_count, l in enumerate(list_file):
            pass
        max_count += 1
        list_file.seek(0)

        # Read the map list from file
        line_count = 0
        while True:
            line = list_file.readline()
            if not line:
                break

            line_list = line.split(fs)

            # The filename is actually the base name of the map
            # that must be extended by the file suffix
            filename = line_list[0].strip().split(":")[0]
            if base:
                mapname = "%s_%s" % (base, gscript.get_num_suffix(line_count + 1,
                                                                  max_count))
                mapid = "%s@%s" % (mapname, mapset)
            else:
                mapname = filename
                mapid = mapname + "@" + mapset

            row = {}
            row["filename"] = filename
            row["name"] = mapname
            row["id"] = mapid
            row["start"] = line_list[1].strip()
            row["end"] = line_list[2].strip()

            new_list_file.write("%s%s%s%s%s\n" % (mapname, fs, row["start"],
                                                  fs, row["end"]))

            maplist.append(row)
            line_count += 1

        list_file.close()
        new_list_file.close()

        # Read the init file
        fs = "="
        init = {}
        init_file = open(init_file_name, "r")
        while True:
            line = init_file.readline()
            if not line:
                break

            kv = line.split(fs)
            init[kv[0]] = kv[1].strip()

        init_file.close()

        if "temporal_type" not in init or \
           "semantic_type" not in init or \
           "number_of_maps" not in init:
            gscript.fatal(_("Key words %(t)s, %(s)s or %(n)s not found in init"
                            " file.") % {'t': "temporal_type",
                                         's': "semantic_type",
                                         'n': "number_of_maps"})

        if line_count != int(init["number_of_maps"]):
            gscript.fatal(_("Number of maps mismatch in init and list file."))

        format_ = "GTiff"
        type_ = "strds"

        if "stds_type" in init:
            type_ = init["stds_type"]
        if "format" in init:
            format_ = init["format"]

        if stds_type != type_:
            gscript.fatal(_("The archive file is of wrong space time dataset"
                            " type"))

        # Check the existence of the files
        if format_ == "GTiff":
            for row in maplist:
                filename = row["filename"] + ".tif"
                if not os.path.exists(filename):
                    gscript.fatal(_("Unable to find GeoTIFF raster file "
                                    "<%s> in archive.") % filename)
        elif format_ == "AAIGrid":
            for row in maplist:
                filename = row["filename"] + ".asc"
                if not os.path.exists(filename):
                    gscript.fatal(_("Unable to find AAIGrid raster file "
                                    "<%s> in archive.") % filename)
        elif format_ == "GML":
            for row in maplist:
                filename = row["filename"] + ".xml"
                if not os.path.exists(filename):
                    gscript.fatal(_("Unable to find GML vector file "
                                    "<%s> in archive.") % filename)
        elif format_ == "pack":
            for row in maplist:
                if type_ == "stvds":
                    filename = str(row["filename"].split(":")[0]) + ".pack"
                else:
                    filename = row["filename"] + ".pack"
                if not os.path.exists(filename):
                    gscript.fatal(_("Unable to find GRASS package file "
                                    "<%s> in archive.") % filename)
        else:
            gscript.fatal(_("Unsupported input format"))

        # Check the space time dataset
        id = output + "@" + mapset
        sp = dataset_factory(type_, id)
        if sp.is_in_db() and gscript.overwrite() is False:
            gscript.fatal(_("Space time %(t)s dataset <%(sp)s> is already in"
                            " the database. Use the overwrite flag.") %
                          {'t': type_, 'sp': sp.get_id()})

        # Import the maps
        if type_ == "strds":
            if format_ == "GTiff" or format_ == "AAIGrid":
                _import_raster_maps_from_gdal(maplist, overr, exp, location,
                                              link, format_, set_current_region)
            if format_ == "pack":
                _import_raster_maps(maplist, set_current_region)
        elif type_ == "stvds":
            if format_ == "GML":
                _import_vector_maps_from_gml(
                    maplist, overr, exp, location, link)
            if format_ == "pack":
                _import_vector_maps(maplist)

        # Create the space time dataset
        if sp.is_in_db() and gscript.overwrite() is True:
            gscript.info(_("Overwrite space time %(sp)s dataset "
                           "<%(id)s> and unregister all maps.") %
                         {'sp': sp.get_new_map_instance(None).get_type(),
                          'id': sp.get_id()})
            sp.delete()
            sp = sp.get_new_instance(id)

        temporal_type = init["temporal_type"]
        semantic_type = init["semantic_type"]
        relative_time_unit = None
        if temporal_type == "relative":
            if "relative_time_unit" not in init:
                gscript.fatal(_("Key word %s not found in init file.") %
                              ("relative_time_unit"))
            relative_time_unit = init["relative_time_unit"]
            sp.set_relative_time_unit(relative_time_unit)

        gscript.verbose(_("Create space time %s dataset.") %
                        sp.get_new_map_instance(None).get_type())

        sp.set_initial_values(temporal_type=temporal_type,
                              semantic_type=semantic_type, title=title,
                              description=descr)
        sp.insert()

        # register the maps
        fs = "|"
        register_maps_in_space_time_dataset(
            type=sp.get_new_map_instance(None).get_type(),
            name=output, file=new_list_file_name, start="file",
            end="file", unit=relative_time_unit, dbif=None, fs=fs,
            update_cmd_list=False)

        os.chdir(old_cwd)
    except:
        raise

    # Make sure the location is switched back correctly
    finally:
        if location:
            # Switch to the old location
            try:
                gscript.run_command("g.mapset", mapset=old_env["MAPSET"],
                                    location=old_env["LOCATION_NAME"],
                                    gisdbase=old_env["GISDBASE"])
            except CalledModuleError:
                grass.warning(_("Switching to original location failed"))

        gscript.set_raise_on_error(old_state)
Ejemplo n.º 5
0
def aggregate_by_topology(granularity_list,
                          granularity,
                          map_list,
                          topo_list,
                          basename,
                          time_suffix,
                          offset=0,
                          method="average",
                          nprocs=1,
                          spatial=None,
                          dbif=None,
                          overwrite=False):
    """Aggregate a list of raster input maps with r.series

       :param granularity_list: A list of AbstractMapDataset objects.
                                The temporal extents of the objects are used
                                to build the spatio-temporal topology with the
                                map list objects
       :param granularity: The granularity of the granularity list
       :param map_list: A list of RasterDataset objects that contain the raster
                        maps that should be aggregated
       :param topo_list: A list of strings of topological relations that are
                         used to select the raster maps for aggregation
       :param basename: The basename of the new generated raster maps
       :param time_suffix: Use the granularity truncated start time of the
                           actual granule to create the suffix for the basename
       :param offset: Use a numerical offset for suffix generation
                      (overwritten by time_suffix)
       :param method: The aggregation method of r.series (average,min,max, ...)
       :param nprocs: The number of processes used for parallel computation
       :param spatial: This indicates if the spatial topology is created as
                       well: spatial can be None (no spatial topology), "2D"
                       using west, east, south, north or "3D" using west,
                       east, south, north, bottom, top
       :param dbif: The database interface to be used
       :param overwrite: Overwrite existing raster maps
       :return: A list of RasterDataset objects that contain the new map names
                and the temporal extent for map registration
    """
    import grass.pygrass.modules as pymod
    import copy

    msgr = get_tgis_message_interface()

    dbif, connected = init_dbif(dbif)

    topo_builder = SpatioTemporalTopologyBuilder()
    topo_builder.build(mapsA=granularity_list, mapsB=map_list, spatial=spatial)

    # The module queue for parallel execution
    process_queue = pymod.ParallelModuleQueue(int(nprocs))

    # Dummy process object that will be deep copied
    # and be put into the process queue
    r_series = pymod.Module("r.series",
                            output="spam",
                            method=[method],
                            overwrite=overwrite,
                            quiet=True,
                            run_=False,
                            finish_=False)
    g_copy = pymod.Module("g.copy",
                          rast=['spam', 'spamspam'],
                          quiet=True,
                          run_=False,
                          finish_=False)
    output_list = []
    count = 0

    for granule in granularity_list:
        msgr.percent(count, len(granularity_list), 1)
        count += 1

        aggregation_list = []

        if "equal" in topo_list and granule.equal:
            for map_layer in granule.equal:
                aggregation_list.append(map_layer.get_name())
        if "contains" in topo_list and granule.contains:
            for map_layer in granule.contains:
                aggregation_list.append(map_layer.get_name())
        if "during" in topo_list and granule.during:
            for map_layer in granule.during:
                aggregation_list.append(map_layer.get_name())
        if "starts" in topo_list and granule.starts:
            for map_layer in granule.starts:
                aggregation_list.append(map_layer.get_name())
        if "started" in topo_list and granule.started:
            for map_layer in granule.started:
                aggregation_list.append(map_layer.get_name())
        if "finishes" in topo_list and granule.finishes:
            for map_layer in granule.finishes:
                aggregation_list.append(map_layer.get_name())
        if "finished" in topo_list and granule.finished:
            for map_layer in granule.finished:
                aggregation_list.append(map_layer.get_name())
        if "overlaps" in topo_list and granule.overlaps:
            for map_layer in granule.overlaps:
                aggregation_list.append(map_layer.get_name())
        if "overlapped" in topo_list and granule.overlapped:
            for map_layer in granule.overlapped:
                aggregation_list.append(map_layer.get_name())

        if aggregation_list:
            msgr.verbose(
                _("Aggregating %(len)i raster maps from %(start)s to"
                  " %(end)s") %
                ({
                    "len": len(aggregation_list),
                    "start": str(granule.temporal_extent.get_start_time()),
                    "end": str(granule.temporal_extent.get_end_time())
                }))

            if granule.is_time_absolute() is True and time_suffix is True:
                suffix = create_suffix_from_datetime(
                    granule.temporal_extent.get_start_time(), granularity)
            else:
                suffix = gscript.get_num_suffix(
                    count + int(offset),
                    len(granularity_list) + int(offset))
            output_name = "%s_%s" % (basename, suffix)

            map_layer = RasterDataset("%s@%s" %
                                      (output_name, get_current_mapset()))
            map_layer.set_temporal_extent(granule.get_temporal_extent())

            if map_layer.map_exists() is True and overwrite is False:
                msgr.fatal(
                    _("Unable to perform aggregation. Output raster "
                      "map <%(name)s> exists and overwrite flag was "
                      "not set" % ({
                          "name": output_name
                      })))

            output_list.append(map_layer)

            if len(aggregation_list) > 1:
                # Create the r.series input file
                filename = gscript.tempfile(True)
                file = open(filename, 'w')
                for name in aggregation_list:
                    string = "%s\n" % (name)
                    file.write(string)
                file.close()

                mod = copy.deepcopy(r_series)
                mod(file=filename, output=output_name)
                if len(aggregation_list) > 1000:
                    mod(flags="z")
                process_queue.put(mod)
            else:
                mod = copy.deepcopy(g_copy)
                mod(rast=[aggregation_list[0], output_name])
                process_queue.put(mod)

    if connected:
        dbif.close()

    msgr.percent(1, 1, 1)

    return output_list
Ejemplo n.º 6
0
def aggregate_by_topology(granularity_list, granularity, map_list, topo_list,
                          basename, time_suffix, offset=0, method="average",
                          nprocs=1, spatial=None, dbif=None, overwrite=False,
                          file_limit=1000):
    """Aggregate a list of raster input maps with r.series

       :param granularity_list: A list of AbstractMapDataset objects.
                                The temporal extents of the objects are used
                                to build the spatio-temporal topology with the
                                map list objects
       :param granularity: The granularity of the granularity list
       :param map_list: A list of RasterDataset objects that contain the raster
                        maps that should be aggregated
       :param topo_list: A list of strings of topological relations that are
                         used to select the raster maps for aggregation
       :param basename: The basename of the new generated raster maps
       :param time_suffix: Use the granularity truncated start time of the
                           actual granule to create the suffix for the basename
       :param offset: Use a numerical offset for suffix generation
                      (overwritten by time_suffix)
       :param method: The aggregation method of r.series (average,min,max, ...)
       :param nprocs: The number of processes used for parallel computation
       :param spatial: This indicates if the spatial topology is created as
                       well: spatial can be None (no spatial topology), "2D"
                       using west, east, south, north or "3D" using west,
                       east, south, north, bottom, top
       :param dbif: The database interface to be used
       :param overwrite: Overwrite existing raster maps
       :param file_limit: The maximum number of raster map layers that
                          should be opened at once by r.series
       :return: A list of RasterDataset objects that contain the new map names
                and the temporal extent for map registration
    """
    import grass.pygrass.modules as pymod
    import copy

    msgr = get_tgis_message_interface()

    dbif, connected = init_dbif(dbif)

    topo_builder = SpatioTemporalTopologyBuilder()
    topo_builder.build(mapsA=granularity_list, mapsB=map_list, spatial=spatial)

    # The module queue for parallel execution
    process_queue = pymod.ParallelModuleQueue(int(nprocs))

    # Dummy process object that will be deep copied
    # and be put into the process queue
    r_series = pymod.Module("r.series", output="spam", method=[method],
                            overwrite=overwrite, quiet=True, run_=False,
                            finish_=False)
    g_copy = pymod.Module("g.copy", raster=['spam', 'spamspam'],
                          quiet=True, run_=False, finish_=False)
    output_list = []
    count = 0

    for granule in granularity_list:
        msgr.percent(count, len(granularity_list), 1)
        count += 1

        aggregation_list = []

        if "equal" in topo_list and granule.equal:
            for map_layer in granule.equal:
                aggregation_list.append(map_layer.get_name())
        if "contains" in topo_list and granule.contains:
            for map_layer in granule.contains:
                aggregation_list.append(map_layer.get_name())
        if "during" in topo_list and granule.during:
            for map_layer in granule.during:
                aggregation_list.append(map_layer.get_name())
        if "starts" in topo_list and granule.starts:
            for map_layer in granule.starts:
                aggregation_list.append(map_layer.get_name())
        if "started" in topo_list and granule.started:
            for map_layer in granule.started:
                aggregation_list.append(map_layer.get_name())
        if "finishes" in topo_list and granule.finishes:
            for map_layer in granule.finishes:
                aggregation_list.append(map_layer.get_name())
        if "finished" in topo_list and granule.finished:
            for map_layer in granule.finished:
                aggregation_list.append(map_layer.get_name())
        if "overlaps" in topo_list and granule.overlaps:
            for map_layer in granule.overlaps:
                aggregation_list.append(map_layer.get_name())
        if "overlapped" in topo_list and granule.overlapped:
            for map_layer in granule.overlapped:
                aggregation_list.append(map_layer.get_name())

        if aggregation_list:
            msgr.verbose(_("Aggregating %(len)i raster maps from %(start)s to"
                           " %(end)s")  %({"len": len(aggregation_list),
                           "start": str(granule.temporal_extent.get_start_time()),
                           "end": str(granule.temporal_extent.get_end_time())}))

            if granule.is_time_absolute() is True and time_suffix is True:
                suffix = create_suffix_from_datetime(granule.temporal_extent.get_start_time(),
                                                     granularity)
            else:
                suffix = gscript.get_num_suffix(count + int(offset),
                                                len(granularity_list) + int(offset))
            output_name = "%s_%s" % (basename, suffix)

            map_layer = RasterDataset("%s@%s" % (output_name,
                                                 get_current_mapset()))
            map_layer.set_temporal_extent(granule.get_temporal_extent())

            if map_layer.map_exists() is True and overwrite is False:
                msgr.fatal(_("Unable to perform aggregation. Output raster "
                             "map <%(name)s> exists and overwrite flag was "
                             "not set" % ({"name": output_name})))

            output_list.append(map_layer)

            if len(aggregation_list) > 1:
                # Create the r.series input file
                filename = gscript.tempfile(True)
                file = open(filename, 'w')
                for name in aggregation_list:
                    string = "%s\n" % (name)
                    file.write(string)
                file.close()

                mod = copy.deepcopy(r_series)
                mod(file=filename, output=output_name)
                if len(aggregation_list) > int(file_limit):
                    msgr.warning(_("The limit of open fiels (%i) was "\
                                   "reached (%i). The module r.series will "\
                                   "be run with flag z, to avoid open "\
                                   "files limit exceeding."%(int(file_limit),
                                                             len(aggregation_list))))
                    mod(flags="z")
                process_queue.put(mod)
            else:
                mod = copy.deepcopy(g_copy)
                mod(raster=[aggregation_list[0],  output_name])
                process_queue.put(mod)

    process_queue.wait()

    if connected:
        dbif.close()

    msgr.percent(1, 1, 1)

    return output_list