예제 #1
0
    def Test_get_mapset(self):

        mapset = querydb.get_mapset(mapsetcode='SPOTV-Africa-1km')
        logger.info("Mapset: %s", mapset.pixel_shift_lat)
        mapset_dict = functions.row2dict(mapset)

        self.assertEqual(1, 1)
예제 #2
0
    def test_product_data_management(self):
        def row2dict(row):
            d = {}
            for column_name in row.c.keys():  # all_cols:
                d[column_name] = str(getattr(row, column_name))
            return d

        # Select prod/vers
        productcode = 'vgt-ndvi'
        version = 'proba-v2.1'
        product = Product(product_code=productcode, version=version)
        # does the product have mapsets AND subproducts?
        all_prod_mapsets = product.mapsets
        all_prod_subproducts = product.subproducts
        if all_prod_mapsets.__len__() > 0 and all_prod_subproducts.__len__() > 0:
                for mapset in all_prod_mapsets:
                    mapset_info = querydb.get_mapset(mapsetcode=mapset, allrecs=False, echo=False)
                    mapset_dict = row2dict(mapset_info)
                    mapset_dict['mapsetdatasets'] = []
                    all_mapset_datasets = product.get_subproducts(mapset=mapset)
                    for subproductcode in all_mapset_datasets:
                        dataset_info = querydb.get_subproduct(productcode=productcode,
                                                              version=version,
                                                              subproductcode=subproductcode,
                                                              echo=False)

                        dataset_dict = row2dict(dataset_info)
                        dataset = product.get_dataset(mapset=mapset, sub_product_code=subproductcode)
                        completeness = dataset.get_dataset_normalized_info()
                        dataset_dict['datasetcompleteness'] = completeness

                        mapset_dict['mapsetdatasets'].append(dataset_dict)
예제 #3
0
    def test_product_data_management(self):
        def row2dict(row):
            d = {}
            for column_name in row.c.keys():  # all_cols:
                d[column_name] = str(getattr(row, column_name))
            return d

        # Select prod/vers
        productcode = 'vgt-ndvi'
        version = 'proba-v2.1'
        product = Product(product_code=productcode, version=version)
        # does the product have mapsets AND subproducts?
        all_prod_mapsets = product.mapsets
        all_prod_subproducts = product.subproducts
        if all_prod_mapsets.__len__() > 0 and all_prod_subproducts.__len__(
        ) > 0:
            for mapset in all_prod_mapsets:
                mapset_info = querydb.get_mapset(mapsetcode=mapset,
                                                 allrecs=False)
                mapset_dict = row2dict(mapset_info)
                mapset_dict['mapsetdatasets'] = []
                all_mapset_datasets = product.get_subproducts(mapset=mapset)
                for subproductcode in all_mapset_datasets:
                    dataset_info = querydb.get_subproduct(
                        productcode=productcode,
                        version=version,
                        subproductcode=subproductcode)

                    dataset_dict = row2dict(dataset_info)
                    dataset = product.get_dataset(
                        mapset=mapset, sub_product_code=subproductcode)
                    completeness = dataset.get_dataset_normalized_info()
                    dataset_dict['datasetcompleteness'] = completeness

                    mapset_dict['mapsetdatasets'].append(dataset_dict)
예제 #4
0
    def test_all_products_to_json(self):
        def row2dict(row):
            d = {}
            for column_name in list(row.c.keys()):  # all_cols:
                d[column_name] = str(getattr(row, column_name))
            return d

        # get full distinct list of products (native only)
        db_products = querydb.get_products()
        try:
            db_products.__len__()
        except AttributeError:
            db_products = querydb.get_product_native(allrecs=True)
        self.assertTrue(db_products.__len__() > 0)
        products_dict_all = []
        # loop the products list
        for product in db_products:
            if python_version == 2:
                prod_dict = row2dict(product)
            if python_version == 3:
                prod_dict = product
            productcode = prod_dict['productcode']
            version = prod_dict['version']
            p = Product(product_code=productcode, version=version)

            # does the product have mapsets AND subproducts?
            all_prod_mapsets = p.mapsets
            all_prod_subproducts = p.subproducts
            if all_prod_mapsets.__len__() > 0 and all_prod_subproducts.__len__() > 0:
                prod_dict['productmapsets'] = []
                for mapset in all_prod_mapsets:
                    mapset_info = querydb.get_mapset(mapsetcode=mapset, allrecs=False)
                    mapset_dict = row2dict(mapset_info)
                    mapset_dict['mapsetdatasets'] = []
                    all_mapset_datasets = p.get_subproducts(mapset=mapset)
                    for subproductcode in all_mapset_datasets:
                        dataset_info = querydb.get_subproduct(productcode=productcode,
                                                              version=version,
                                                              subproductcode=subproductcode)

                        dataset_dict = row2dict(dataset_info)
                        dataset = p.get_dataset(mapset=mapset, sub_product_code=subproductcode)
                        completeness = dataset.get_dataset_normalized_info()
                        dataset_dict['datasetcompleteness'] = completeness

                        mapset_dict['mapsetdatasets'].append(dataset_dict)
                    prod_dict['productmapsets'].append(mapset_dict)
            products_dict_all.append(prod_dict)

        # See ES2-596
        self.assertEqual(len(db_products), len(products_dict_all))
예제 #5
0
    def assigndb(self, mapsetcode):
        mapset = querydb.get_mapset(mapsetcode, echo=False)
        spatial_ref_wkt = mapset.srs_wkt
        geo_transform = [
            mapset.upper_left_long, mapset.pixel_shift_long,
            mapset.rotation_factor_long, mapset.upper_left_lat,
            mapset.rotation_factor_lat, mapset.pixel_shift_lat
        ]

        self.spatial_ref.ImportFromWkt(spatial_ref_wkt)
        self.geo_transform = geo_transform
        self.size_x = int(mapset.pixel_size_x)
        self.size_y = int(mapset.pixel_size_y)
        self.short_name = mapset.mapsetcode
예제 #6
0
    def assigndb(self, mapsetcode):
        mapset = querydb.get_mapset(mapsetcode, echo=False)
        spatial_ref_wkt = mapset.srs_wkt
        geo_transform = [mapset.upper_left_long,
                         mapset.pixel_shift_long,
                         mapset.rotation_factor_long,
                         mapset.upper_left_lat,
                         mapset.rotation_factor_lat,
                         mapset.pixel_shift_lat]

        self.spatial_ref.ImportFromWkt(spatial_ref_wkt)
        self.geo_transform = geo_transform
        self.size_x = int(mapset.pixel_size_x)
        self.size_y = int(mapset.pixel_size_y)
        self.short_name = mapset.mapsetcode
예제 #7
0
    def Test_get_processingchains_input_products(self):
        import json
        processingchain_products = querydb.get_processingchains_input_products(
        )
        if processingchain_products.__len__() > 0:
            products_dict_all = []

            # loop the products list
            for input_product in processingchain_products:
                process_id = input_product.process_id
                output_mapsetcode = input_product.output_mapsetcode
                prod_dict = functions.row2dict(input_product)
                # prod_dict = input_product
                # del prod_dict['_labels']

                prod_dict['productmapsets'] = []
                mapset_info = querydb.get_mapset(mapsetcode=output_mapsetcode)

                mapset_dict = functions.row2dict(mapset_info)
                mapset_dict['mapsetoutputproducts'] = []
                output_products = querydb.get_processingchain_output_products(
                    process_id)
                for outputproduct in output_products:
                    outputproduct_dict = functions.row2dict(outputproduct)
                    # outputproduct_dict = outputproduct
                    # del outputproduct_dict['_labels']
                    mapset_dict['mapsetoutputproducts'].append(
                        outputproduct_dict)
                prod_dict['productmapsets'].append(mapset_dict)
                products_dict_all.append(prod_dict)

            prod_json = json.dumps(products_dict_all,
                                   ensure_ascii=False,
                                   sort_keys=True,
                                   indent=4,
                                   separators=(', ', ': '))

        # logger.info("Processing chains: %s", processingchain_products)
        # for row in processingchain_products:
        #     logger.info("row.dict: %s", row.__dict__)
        #     logger.info("row.process_id: %s", row.process_id)
        #     logger.info("row.output_mapsetcode: %s", row.output_mapsetcode)
        #     logger.info("row.mapsetcode: %s", row.mapsetcode)
        #     print row.process_id
        #     print row.output_mapsetcode
        #     print row.mapsetcode

        self.assertEqual(1, 1)
예제 #8
0
    def test_all_products_to_json(self):
        def row2dict(row):
            d = {}
            for column_name in row.c.keys():  # all_cols:
                d[column_name] = str(getattr(row, column_name))
            return d

        # get full distinct list of products (native only)
        db_products = querydb.get_products(echo=False)
        try:
            db_products.__len__()
        except AttributeError:
            db_products = querydb.get_product_native(allrecs=True, echo=False)
        self.assertTrue(db_products.__len__() > 0)
        products_dict_all = []
        # loop the products list
        for product in db_products:
            prod_dict = row2dict(product)
            productcode = prod_dict['productcode']
            version = prod_dict['version']
            p = Product(product_code=productcode, version=version)

            # does the product have mapsets AND subproducts?
            all_prod_mapsets = p.mapsets
            all_prod_subproducts = p.subproducts
            if all_prod_mapsets.__len__() > 0 and all_prod_subproducts.__len__() > 0:
                prod_dict['productmapsets'] = []
                for mapset in all_prod_mapsets:
                    mapset_info = querydb.get_mapset(mapsetcode=mapset, allrecs=False, echo=False)
                    mapset_dict = row2dict(mapset_info)
                    mapset_dict['mapsetdatasets'] = []
                    all_mapset_datasets = p.get_subproducts(mapset=mapset)
                    for subproductcode in all_mapset_datasets:
                        dataset_info = querydb.get_subproduct(productcode=productcode,
                                                              version=version,
                                                              subproductcode=subproductcode,
                                                              echo=False)

                        dataset_dict = row2dict(dataset_info)
                        dataset = p.get_dataset(mapset=mapset, sub_product_code=subproductcode)
                        completeness = dataset.get_dataset_normalized_info()
                        dataset_dict['datasetcompleteness'] = completeness

                        mapset_dict['mapsetdatasets'].append(dataset_dict)
                    prod_dict['productmapsets'].append(mapset_dict)
            products_dict_all.append(prod_dict)
        self.assertEquals(len(db_products), 31)
예제 #9
0
    def Test_get_processingchains_input_products(self):
        import json
        processingchain_products = querydb.get_processingchains_input_products()
        if processingchain_products.__len__() > 0:
            products_dict_all = []

            # loop the products list
            for input_product in processingchain_products:
                process_id = input_product.process_id
                output_mapsetcode = input_product.output_mapsetcode
                prod_dict = functions.row2dict(input_product)
                # prod_dict = input_product
                # del prod_dict['_labels']

                prod_dict['productmapsets'] = []
                mapset_info = querydb.get_mapset(mapsetcode=output_mapsetcode)

                mapset_dict = functions.row2dict(mapset_info)
                mapset_dict['mapsetoutputproducts'] = []
                output_products = querydb.get_processingchain_output_products(process_id)
                for outputproduct in output_products:
                    outputproduct_dict = functions.row2dict(outputproduct)
                    # outputproduct_dict = outputproduct
                    # del outputproduct_dict['_labels']
                    mapset_dict['mapsetoutputproducts'].append(outputproduct_dict)
                prod_dict['productmapsets'].append(mapset_dict)
                products_dict_all.append(prod_dict)

            prod_json = json.dumps(products_dict_all,
                                   ensure_ascii=False,
                                   sort_keys=True,
                                   indent=4,
                                   separators=(', ', ': '))

        # logger.info("Processing chains: %s", processingchain_products)
        # for row in processingchain_products:
        #     logger.info("row.dict: %s", row.__dict__)
        #     logger.info("row.process_id: %s", row.process_id)
        #     logger.info("row.output_mapsetcode: %s", row.output_mapsetcode)
        #     logger.info("row.mapsetcode: %s", row.mapsetcode)
        #     print row.process_id
        #     print row.output_mapsetcode
        #     print row.mapsetcode

        self.assertEqual(1, 1)
예제 #10
0
    def assigndb(self, mapsetcode):
        mapset = querydb.get_mapset(mapsetcode)
        self.spatial_ref_wkt = mapset.srs_wkt
        geo_transform = [
            float(mapset.upper_left_long),
            float(mapset.pixel_shift_long),
            float(mapset.rotation_factor_long),
            float(mapset.upper_left_lat),
            float(mapset.rotation_factor_lat),
            float(mapset.pixel_shift_lat)
        ]

        self.spatial_ref.ImportFromWkt(self.spatial_ref_wkt)
        self.geo_transform = geo_transform
        self.size_x = int(mapset.pixel_size_x)
        self.size_y = int(mapset.pixel_size_y)
        self.short_name = mapset.mapsetcode
        self.bbox = self.get_bbox(mapset)
예제 #11
0
    def Test_get_mapset(self):

        mapset = querydb.get_mapset(mapsetcode='WGS84_Africa_1km')
        logger.info("Mapset: %s", mapset)

        self.assertEqual(1, 1)
def getTimeseries(productcode, subproductcode, version, mapsetcode, geom,
                  start_date, end_date, aggregate):

    #    Extract timeseries from a list of files and return as JSON object
    #    It applies to a single dataset (prod/sprod/version/mapset) and between 2 dates
    #    Several types of aggregation foreseen:
    #
    #       mean :      Sum(Xi)/N(Xi)        -> min/max not considered          e.g. Rain
    #       cumulate:   Sum(Xi)              -> min/max not considered          e.g. Fire
    #
    #       count:      N(Xi where min < Xi < max)                              e.g. Vegetation anomalies
    #       surface:    count * PixelArea                                       e.g. Water Bodies
    #       percent:    count/Ntot                                              e.g. Vegetation anomalies
    #       precip:     compute the precipitation volume in m3*1E6              Rain (only)
    #
    #   History: 1.0 :  Initial release - since 2.0.1 -> now renamed '_green' from greenwich package
    #            1.1 :  Since Feb. 2017, it is based on a different approach (gdal.RasterizeLayer instead of greenwich)
    #                   in order to solve the issue with MULTIPOLYGON
    #

    ogr.UseExceptions()

    # Get Mapset Info
    mapset_info = querydb.get_mapset(mapsetcode=mapsetcode)

    # Prepare for computing conversion to area: the pixel size at Lat=0 is computed
    # The correction to the actual latitude (on AVERAGE value - will be computed below)
    const_d2km = 12364.35
    area_km_equator = abs(float(mapset_info.pixel_shift_lat)) * abs(
        float(mapset_info.pixel_shift_long)) * const_d2km

    # Get Product Info
    product_info = querydb.get_product_out_info(productcode=productcode,
                                                subproductcode=subproductcode,
                                                version=version)
    if product_info.__len__() > 0:
        # Get info from product_info
        scale_factor = 0
        scale_offset = 0
        nodata = 0
        date_format = ''
        for row in product_info:
            scale_factor = row.scale_factor
            scale_offset = row.scale_offset
            nodata = row.nodata
            date_format = row.date_format
            date_type = row.data_type_id

        # Create an output/temp shapefile, for managing the output layer (really mandatory ?? Can be simplified ???)
        try:
            tmpdir = tempfile.mkdtemp(prefix=__name__,
                                      suffix='_getTimeseries',
                                      dir=es_constants.base_tmp_dir)
        except:
            logger.error('Cannot create temporary dir ' +
                         es_constants.base_tmp_dir + '. Exit')
            raise NameError('Error in creating tmpdir')

        out_shape = tmpdir + os.path.sep + "output_shape.shp"
        outDriver = ogr.GetDriverByName('ESRI Shapefile')

        # Create the output shapefile
        outDataSource = outDriver.CreateDataSource(out_shape)
        dest_srs = ogr.osr.SpatialReference()
        dest_srs.ImportFromEPSG(4326)

        outLayer = outDataSource.CreateLayer("Layer", dest_srs)
        # outLayer = outDataSource.CreateLayer("Layer")
        idField = ogr.FieldDefn("id", ogr.OFTInteger)
        outLayer.CreateField(idField)

        featureDefn = outLayer.GetLayerDefn()
        feature = ogr.Feature(featureDefn)
        feature.SetGeometry(geom)
        # area = geom.GetArea()
        feature.SetField("id", 1)
        outLayer.CreateFeature(feature)
        feature = None

        [list_files,
         dates_list] = getFilesList(productcode, subproductcode, version,
                                    mapsetcode, date_format, start_date,
                                    end_date)

        # Built a dictionary with filenames/dates
        dates_to_files_dict = dict(list(zip(dates_list, list_files)))

        # Generate unique list of files
        unique_list = set(list_files)
        uniqueFilesValues = []

        geo_mask_created = False
        for infile in unique_list:
            single_result = {
                'filename': '',
                'meanvalue_noscaling': nodata,
                'meanvalue': None
            }

            if infile.strip() != '' and os.path.isfile(infile):
                # try:

                # Open input file
                orig_ds = gdal.Open(infile, gdal.GA_ReadOnly)
                orig_cs = osr.SpatialReference()
                orig_cs.ImportFromWkt(orig_ds.GetProjectionRef())
                orig_geoT = orig_ds.GetGeoTransform()
                x_origin = orig_geoT[0]
                y_origin = orig_geoT[3]
                pixel_size_x = orig_geoT[1]
                pixel_size_y = -orig_geoT[5]

                in_data_type_gdal = conv_data_type_to_gdal(date_type)

                # Create a mask from the geometry, with the same georef as the input file[s]
                if not geo_mask_created:

                    # Read polygon extent and round to raster resolution
                    x_min, x_max, y_min, y_max = outLayer.GetExtent()
                    x_min_round = int(old_div(
                        (x_min - x_origin),
                        pixel_size_x)) * pixel_size_x + x_origin
                    x_max_round = (
                        int(old_div(
                            (x_max - x_origin),
                            (pixel_size_x))) + 1) * pixel_size_x + x_origin
                    y_min_round = (
                        int(old_div(
                            (y_min - y_origin),
                            (pixel_size_y))) - 1) * pixel_size_y + y_origin
                    y_max_round = int(
                        old_div((y_max - y_origin),
                                (pixel_size_y))) * pixel_size_y + y_origin
                    #
                    #     # Create the destination data source
                    x_res = int(
                        round(
                            old_div((x_max_round - x_min_round),
                                    pixel_size_x)))
                    y_res = int(
                        round(
                            old_div((y_max_round - y_min_round),
                                    pixel_size_y)))
                    #
                    #     # Create mask in memory
                    mem_driver = gdal.GetDriverByName('MEM')
                    mem_ds = mem_driver.Create('', x_res, y_res, 1,
                                               in_data_type_gdal)
                    mask_geoT = [
                        x_min_round, pixel_size_x, 0, y_max_round, 0,
                        -pixel_size_y
                    ]
                    mem_ds.SetGeoTransform(mask_geoT)
                    mem_ds.SetProjection(orig_cs.ExportToWkt())
                    #
                    #     # Create a Layer with '1' for the pixels to be selected
                    gdal.RasterizeLayer(mem_ds, [1], outLayer, burn_values=[1])
                    # gdal.RasterizeLayer(mem_ds, [1], outLayer, None, None, [1])

                    # Read the polygon-mask
                    band = mem_ds.GetRasterBand(1)
                    geo_values = mem_ds.ReadAsArray()

                    # Create a mask from geo_values (mask-out the '0's)
                    geo_mask = ma.make_mask(geo_values == 0)
                    geo_mask_created = True
                    #
                    #     # Clean/Close objects
                    mem_ds = None
                    mem_driver = None
                    outDriver = None
                    outLayer = None

                # Read data from input file
                x_offset = int(old_div((x_min - x_origin), pixel_size_x))
                y_offset = int(old_div((y_origin - y_max), pixel_size_y))

                band_in = orig_ds.GetRasterBand(1)
                data = band_in.ReadAsArray(x_offset, y_offset, x_res, y_res)
                #   Catch the Error ES2-105 (polygon not included in Mapset)
                if data is None:
                    logger.error(
                        'ERROR: polygon extends out of file mapset for file: %s'
                        % infile)
                    return []

                # Create a masked array from the data (considering Nodata)
                masked_data = ma.masked_equal(data, nodata)

                # Apply on top of it the geo mask
                mxnodata = ma.masked_where(geo_mask, masked_data)

                # Test ONLY
                # write_ds_to_geotiff(mem_ds, '/data/processing/exchange/Tests/mem_ds.tif')

                if aggregate['aggregation_type'] == 'count' or aggregate[
                        'aggregation_type'] == 'percent' or aggregate[
                            'aggregation_type'] == 'surface' or aggregate[
                                'aggregation_type'] == 'precip':

                    if mxnodata.count() == 0:
                        meanResult = None
                    else:
                        mxrange = mxnodata
                        min_val = aggregate['aggregation_min']
                        max_val = aggregate['aggregation_max']

                        if min_val is not None:
                            min_val_scaled = old_div((min_val - scale_offset),
                                                     scale_factor)
                            mxrange = ma.masked_less(mxnodata, min_val_scaled)

                            # See ES2-271
                            if max_val is not None:
                                # Scale threshold from physical to digital value
                                max_val_scaled = old_div(
                                    (max_val - scale_offset), scale_factor)
                                mxrange = ma.masked_greater(
                                    mxrange, max_val_scaled)

                        elif max_val is not None:
                            # Scale threshold from physical to digital value
                            max_val_scaled = old_div((max_val - scale_offset),
                                                     scale_factor)
                            mxrange = ma.masked_greater(
                                mxnodata, max_val_scaled)

                        if aggregate['aggregation_type'] == 'percent':
                            # 'percent'
                            meanResult = float(mxrange.count()) / float(
                                mxnodata.count()) * 100

                        elif aggregate['aggregation_type'] == 'surface':
                            # 'surface'
                            # Estimate 'average' Latitude
                            y_avg = (y_min + y_max) / 2.0
                            pixelAvgArea = area_km_equator * math.cos(
                                old_div(y_avg, 180) * math.pi)
                            meanResult = float(mxrange.count()) * pixelAvgArea
                        elif aggregate['aggregation_type'] == 'precip':
                            # 'surface'
                            # Estimate 'average' Latitude
                            y_avg = (y_min + y_max) / 2.0
                            pixelAvgArea = area_km_equator * math.cos(
                                old_div(y_avg, 180) * math.pi)
                            n_pixels = mxnodata.count()
                            avg_precip = mxnodata.mean()
                            # Result is in km * km * mmm i.e. 1E3 m*m*m -> we divide by 1E3 to get 1E6 m*m*m
                            meanResult = float(
                                n_pixels) * pixelAvgArea * avg_precip * 0.001
                        else:
                            # 'count'
                            meanResult = float(mxrange.count())

                    # Both results are equal
                    finalvalue = meanResult

                else:  # if aggregate['type'] == 'mean' or if aggregate['type'] == 'cumulate':
                    if mxnodata.count() == 0:
                        finalvalue = None
                        meanResult = None
                    else:
                        if aggregate['aggregation_type'] == 'mean':
                            # 'mean'
                            meanResult = mxnodata.mean()
                        else:
                            # 'cumulate'
                            meanResult = mxnodata.sum()

                        finalvalue = (meanResult * scale_factor + scale_offset)

                # Assign results
                single_result['filename'] = infile
                single_result['meanvalue_noscaling'] = meanResult
                single_result['meanvalue'] = finalvalue

            else:
                logger.debug('ERROR: raster file does not exist - %s' % infile)

            uniqueFilesValues.append(single_result)

        # Define a dictionary to associate filenames/values
        files_to_values_dict = dict(
            (x['filename'], x['meanvalue']) for x in uniqueFilesValues)

        # Prepare array for result
        resultDatesValues = []

        # Returns a list of 'filenames', 'dates', 'values'
        for mydate in dates_list:

            my_result = {'date': datetime.date.today(), 'meanvalue': nodata}

            # Assign the date
            my_result['date'] = mydate
            # Assign the filename
            my_filename = dates_to_files_dict[mydate]

            # Map from array of Values
            my_result['meanvalue'] = files_to_values_dict[my_filename]

            # Map from array of dates
            resultDatesValues.append(my_result)

        try:
            shutil.rmtree(tmpdir)
        except:
            logger.debug('ERROR: Error in deleting tmpdir. Exit')

        # Return result
        return resultDatesValues
    else:
        logger.debug(
            'ERROR: product not registered in the products table! - %s %s %s' %
            (productcode, subproductcode, version))
        return []
예제 #13
0
def getTimeseries_green(productcode, subproductcode, version, mapsetcode, wkt,
                        start_date, end_date, aggregate):
    #    Extract timeseries from a list of files and return as JSON object
    #    It applies to a single dataset (prod/sprod/version/mapset) and between 2 dates
    #    Several types of aggregation foreseen:
    #
    #       mean :      Sum(Xi)/N(Xi)        -> min/max not considered          e.g. Rain
    #       cumulate:   Sum(Xi)              -> min/max not considered          e.g. Fire
    #
    #       count:      N(Xi where min < Xi < max)                              e.g. Vegetation anomalies
    #       surface:    count * PixelArea                                       e.g. Water Bodies
    #       percent:    count/Ntot                                              e.g. Vegetation anomalies

    ogr.UseExceptions()
    theGeomWkt = ' '.join(wkt.strip().split())
    geom = Geometry(wkt=str(theGeomWkt), srs=4326)

    # Get Mapset Info
    mapset_info = querydb.get_mapset(mapsetcode=mapsetcode)

    # Compute pixel area by converting degree to km
    pixelArea = abs(mapset_info.pixel_shift_lat) * abs(
        mapset_info.pixel_shift_lat) * 12544.0

    # Get Product Info
    product_info = querydb.get_product_out_info(productcode=productcode,
                                                subproductcode=subproductcode,
                                                version=version)
    if product_info.__len__() > 0:
        scale_factor = 0
        scale_offset = 0
        nodata = 0
        date_format = ''
        for row in product_info:
            scale_factor = row.scale_factor
            scale_offset = row.scale_offset
            nodata = row.nodata
            unit = row.unit
            date_format = row.date_format

        [list_files,
         dates_list] = getFilesList(productcode, subproductcode, version,
                                    mapsetcode, date_format, start_date,
                                    end_date)

        # Built a dictionary with filenames/dates
        dates_to_files_dict = dict(zip(dates_list, list_files))

        # Generate unique list of files
        unique_list = set(list_files)
        uniqueFilesValues = []

        for infile in unique_list:
            single_result = {
                'filename': '',
                'meanvalue_noscaling': nodata,
                'meanvalue': None
            }

            if os.path.isfile(infile):
                try:
                    mx = []
                    with Raster(infile) as img:
                        # Assign nodata from prod_info
                        img._nodata = nodata
                        with img.clip(geom) as clipped:
                            # Save clipped image (for debug only)
                            # clipped.save('/data/processing/exchange/clipped_test.tif')
                            mx = clipped.array()

                    nodata_array_masked = ma.masked_equal(mx, nodata)
                    merged_mask = ma.mask_or(ma.getmask(mx),
                                             ma.getmask(nodata_array_masked))
                    mxnodata = ma.masked_array(ma.getdata(mx), merged_mask)

                    if aggregate['aggregation_type'] == 'count' or aggregate[
                            'aggregation_type'] == 'percent' or aggregate[
                                'aggregation_type'] == 'surface':

                        min_val = aggregate['aggregation_min']
                        max_val = aggregate['aggregation_max']
                        # Scale threshold from physical to digital value
                        min_val_scaled = (min_val -
                                          scale_offset) / scale_factor
                        max_val_scaled = (max_val -
                                          scale_offset) / scale_factor
                        mxrange = ma.masked_outside(mxnodata, min_val_scaled,
                                                    max_val_scaled)

                        if aggregate['aggregation_type'] == 'percent':
                            # 'percent'
                            meanResult = float(mxrange.count()) / float(
                                mxnodata.count()) * 100

                        elif aggregate['aggregation_type'] == 'surface':
                            # 'surface'
                            meanResult = float(mxrange.count()) * pixelArea
                        else:
                            # 'count'
                            meanResult = float(mxrange.count())

                        # Both results are equal
                        finalvalue = meanResult

                    else:  #if aggregate['type'] == 'mean' or if aggregate['type'] == 'cumulate':
                        if mxnodata.count() == 0:
                            meanResult = 0.0
                        else:
                            if aggregate['aggregation_type'] == 'mean':
                                # 'mean'
                                meanResult = mxnodata.mean()
                            else:
                                # 'cumulate'
                                meanResult = mxnodata.sum()

                        # Scale to physical value
                        finalvalue = (meanResult * scale_factor + scale_offset)

                    # Assign results
                    single_result['filename'] = infile
                    single_result['meanvalue_noscaling'] = meanResult
                    single_result['meanvalue'] = finalvalue

                except Exception, e:
                    logger.debug('ERROR: clipping - %s' % (e))
                    # sys.exit (1)
            else:
                logger.debug('ERROR: raster file does not exist - %s' % infile)
                # sys.exit (1)

            uniqueFilesValues.append(single_result)

        # Define a dictionary to associate filenames/values
        files_to_values_dict = dict(
            (x['filename'], x['meanvalue']) for x in uniqueFilesValues)

        # Prepare array for result
        resultDatesValues = []

        # Returns a list of 'filenames', 'dates', 'values'
        for mydate in dates_list:
            # my_result = {'date': datetime.date.today(), 'filename':'', 'meanvalue':nodata}
            my_result = {'date': datetime.date.today(), 'meanvalue': nodata}

            # Assign the date
            my_result['date'] = mydate
            # Assign the filename
            my_filename = dates_to_files_dict[mydate]
            # my_result['filename'] = my_filename
            # Map from array of Values
            my_result['meanvalue'] = files_to_values_dict[my_filename]

            # Map from array of dates
            resultDatesValues.append(my_result)

        return resultDatesValues
예제 #14
0
 def __init__(self, mapset_code):
     self.mapset_code = mapset_code
     kwargs = {'mapsetcode': self.mapset_code}
     self._mapset = querydb.get_mapset(**kwargs)
     if not self._mapset:
         raise NoMapsetFound(kwargs)
예제 #15
0
    def Test_get_mapset(self):

        mapset = querydb.get_mapset(mapsetcode='WGS84_Africa_1km')
        logger.info("Mapset: %s", mapset)

        self.assertEqual(1, 1)
예제 #16
0
 def __init__(self, mapset_code):
     self.mapset_code = mapset_code
     kwargs = {'mapsetcode': self.mapset_code}
     self._mapset = querydb.get_mapset(**kwargs)
     if not self._mapset:
         raise NoMapsetFound(kwargs)
예제 #17
0
    def test_data_management(self):
        import json

        db_products = querydb.get_products(activated=True)

        if db_products.__len__() > 0:
            products_dict_all = []
            # loop the products list
            for row in db_products:
                prod_dict = functions.row2dict(row)
                productcode = prod_dict['productcode']
                version = prod_dict['version']

                p = Product(product_code=productcode, version=version)
                # print productcode
                # does the product have mapsets AND subproducts?
                all_prod_mapsets = p.mapsets
                all_prod_subproducts = p.subproducts
                if all_prod_mapsets.__len__(
                ) > 0 and all_prod_subproducts.__len__() > 0:
                    prod_dict['productmapsets'] = []
                    for mapset in all_prod_mapsets:
                        mapset_dict = []
                        # print mapset
                        mapset_info = querydb.get_mapset(mapsetcode=mapset,
                                                         allrecs=False)
                        # if mapset_info.__len__() > 0:
                        mapset_dict = functions.row2dict(mapset_info)
                        # else:
                        #   mapset_dict['mapsetcode'] = mapset
                        mapset_dict['mapsetdatasets'] = []
                        all_mapset_datasets = p.get_subproducts(mapset=mapset)
                        for subproductcode in all_mapset_datasets:
                            # print 'productcode: ' + productcode
                            # print 'version: ' + version
                            # print 'subproductcode: ' + subproductcode
                            dataset_info = querydb.get_subproduct(
                                productcode=productcode,
                                version=version,
                                subproductcode=subproductcode)
                            # print dataset_info
                            # dataset_info = querydb.db.product.get(productcode, version, subproductcode)
                            # dataset_dict = {}
                            if dataset_info is not None:
                                dataset_dict = functions.row2dict(dataset_info)
                                # dataset_dict = dataset_info.__dict__
                                # del dataset_dict['_labels']
                                if hasattr(dataset_info, 'frequency_id'):
                                    if dataset_info.frequency_id == 'e15minute' or dataset_info.frequency_id == 'e30minute':
                                        dataset_dict[
                                            'nodisplay'] = 'no_minutes_display'
                                    # To be implemented in dataset.py
                                    elif dataset_info.frequency_id == 'e1year':
                                        dataset_dict[
                                            'nodisplay'] = 'no_minutes_display'
                                    else:
                                        dataset = p.get_dataset(
                                            mapset=mapset,
                                            sub_product_code=subproductcode)
                                        completeness = dataset.get_dataset_normalized_info(
                                        )
                                        dataset_dict[
                                            'datasetcompleteness'] = completeness
                                        dataset_dict['nodisplay'] = 'false'

                                    dataset_dict['mapsetcode'] = mapset_dict[
                                        'mapsetcode']
                                    dataset_dict[
                                        'mapset_descriptive_name'] = mapset_dict[
                                            'descriptive_name']

                                    mapset_dict['mapsetdatasets'].append(
                                        dataset_dict)
                                else:
                                    pass
                        prod_dict['productmapsets'].append(mapset_dict)
                products_dict_all.append(prod_dict)

            prod_json = json.dumps(products_dict_all,
                                   ensure_ascii=False,
                                   sort_keys=True,
                                   indent=4,
                                   separators=(', ', ': '))

            datamanagement_json = '{"success":"true", "total":'\
                                  + str(db_products.__len__())\
                                  + ',"products":'+prod_json+'}'

        else:
            datamanagement_json = '{"success":false, "error":"No data sets defined!"}'
예제 #18
0
def push_data_ftp(dry_run=False,
                  user=None,
                  psw=None,
                  url=None,
                  trg_dir=None,
                  masked=True):

    #   Synchronized data towards an ftp server (only for JRC)
    #   It replaces, since the new srv-ies-ftp.jrc.it ftp is set, the bash script: mirror_to_ftp.sh
    #   Configuration:  it looks at all 'non-masked' products and pushes them
    #                   For the mapsets, find what is in the filesystem, and pushes only the 'largest'
    #   It uses a command like:
    #       lftp -e "mirror -RLe /data/processing/vgt-ndvi/sv2-pv2.1/SPOTV-Africa-1km/derived/10dmax-linearx2/
    #                            /narma/eStation_2.0/processing/vgt-ndvi/sv2-pv2.1/SPOTV-Africa-1km/derived/10dmax-linearx2/;exit"
    #                            -u narma:JRCVRw2960 sftp://srv-ies-ftp.jrc.it"" >> /eStation2/log/push_data_ftp.log
    #

    spec_logger = log.my_logger('apps.es2system.push_data_ftp')

    try:
        from config import server_ftp
    except:
        logger.warning('Configuration file for ftp sync not found. Exit')
        return 1

    if user is None:
        user = server_ftp.server['user']
    if psw is None:
        psw = server_ftp.server['psw']
    if url is None:
        url = server_ftp.server['url']
    if trg_dir is None:
        trg_dir = server_ftp.server['data_dir']

    # Create an ad-hoc file for the lftp command output (beside the standard logger)
    logfile = es_constants.es2globals['log_dir'] + 'push_data_ftp.log'
    message = time.strftime(
        "%Y-%m-%d %H:%M") + ' INFO: Running the ftp sync now ... \n'

    logger.debug("Entering routine %s" % 'push_data_ftp')

    # Loop over 'not-masked' products
    products = querydb.get_products(masked=False)
    # produts=products[21:23]               # test a subset
    for row in products:

        prod_dict = functions.row2dict(row)
        productcode = prod_dict['productcode']
        version = prod_dict['version']
        spec_logger.info('Working on product {}/{}'.format(
            productcode, version))

        # TEMP - For testing only
        # if productcode!='vgt-ndvi' or version !='sv2-pv2.2':
        #     continue

        # Check it if is in the list of 'exclusions' defined in ./config/server_ftp.py
        key = '{}/{}'.format(productcode, version)
        skip = False
        if key in server_ftp.exclusions:
            skip = True
            logger.debug('Do not sync for {}/{}'.format(productcode, version))

        p = Product(product_code=productcode, version=version)

        all_prod_mapsets = p.mapsets
        all_prod_subproducts = p.subproducts

        # Check there is at least one mapset and one subproduct
        if all_prod_mapsets.__len__() > 0 and all_prod_subproducts.__len__(
        ) > 0 and not skip:

            # In case of several mapsets, check if there is a 'larger' one
            if len(all_prod_mapsets) > 1:
                mapset_to_use = []
                for my_mapset in all_prod_mapsets:
                    mapset_info = querydb.get_mapset(mapsetcode=my_mapset,
                                                     allrecs=False)
                    if hasattr(mapset_info, "mapsetcode"):
                        my_mapobj = MapSet()
                        my_mapobj.assigndb(my_mapset)

                        larger_mapset = my_mapobj.get_larger_mapset()
                        if larger_mapset is not None:
                            if larger_mapset not in mapset_to_use:
                                mapset_to_use.append(larger_mapset)
                        else:
                            if my_mapset not in mapset_to_use:
                                mapset_to_use.append(my_mapset)
            else:
                mapset_to_use = all_prod_mapsets
            # Loop over existing mapset
            for mapset in mapset_to_use:
                all_mapset_datasets = p.get_subproducts(mapset=mapset)

                # Loop over existing subproducts
                for subproductcode in all_mapset_datasets:
                    # Get info - and ONLY for NOT masked products
                    dataset_info = querydb.get_subproduct(
                        productcode=productcode,
                        version=version,
                        subproductcode=subproductcode,
                        masked=masked)  # -> TRUE means only NOT masked sprods

                    if dataset_info is not None:
                        dataset_dict = functions.row2dict(dataset_info)
                        dataset_dict['mapsetcode'] = mapset

                        logger.debug('Working on {}/{}/{}/{}'.format(
                            productcode, version, mapset, subproductcode))

                        subdir = functions.set_path_sub_directory(
                            productcode, subproductcode,
                            dataset_dict['product_type'], version, mapset)
                        source = data_dir + subdir
                        target = trg_dir + subdir

                        # command = 'lftp -e "mirror -RLe {} {};exit" -u {}:{} {}"" >> {}'.format(source,target,user,psw,url,logfile)
                        command = 'lftp -e "mirror -RLe {} {};exit" -u {}:{} {}"" >> /dev/null'.format(
                            source, target, user, psw, url)
                        logger.debug("Executing %s" % command)
                        spec_logger.info(
                            'Working on mapset/subproduct {}/{} \n'.format(
                                mapset, subproductcode))

                        # return
                        try:
                            status = os.system(command)
                            if status:
                                logger.error("Error in executing %s" % command)
                                spec_logger.error("Error in executing %s" %
                                                  command)
                        except:
                            logger.error(
                                'Error in executing command: {}'.format(
                                    command))
                            spec_logger.error(
                                'Error in executing command: {}'.format(
                                    command))