def data_ids(self, data_ids):
        """
        Constrain a query by data IDs.

        Args:
            data_ids (list of bytes): The data IDs to constrain by.
        Returns:
            A `pygw.query.query_constraints.QueryConstraints` with the given data ids.
        """
        byte_array_class = JavaClass("[B", java_gateway._gateway_client)
        j_data_ids = java_gateway.new_array(byte_array_class, len(data_ids))
        for idx, data_id in enumerate(data_ids):
            j_data_ids[idx] = _pbat.to_java(data_id)
        j_qc = self._java_ref.dataIds(j_data_ids)
        return QueryConstraints(j_qc)
    def jdbc(self, url, table, mode=None, properties=None):
        """Saves the content of the :class:`DataFrame` to an external database table via JDBC.

        .. note:: Don't create too many partitions in parallel on a large cluster; \
        otherwise Spark might crash your external database systems.

        :param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
        :param table: Name of the table in the external database.
        :param mode: specifies the behavior of the save operation when data already exists.

            * ``append``: Append contents of this :class:`DataFrame` to existing data.
            * ``overwrite``: Overwrite existing data.
            * ``ignore``: Silently ignore this operation if data already exists.
            * ``error`` (default case): Throw an exception if data already exists.
        :param properties: a dictionary of JDBC database connection arguments. Normally at
                           least properties "user" and "password" with their corresponding values.
                           For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
        """
        if properties is None:
            properties = dict()
        jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
        for k in properties:
            jprop.setProperty(k, properties[k])
        self._jwrite.mode(mode).jdbc(url, table, jprop)
示例#3
0
    def convert(self, object, gateway_client):
        JavaRasterizerOptions = JavaClass("geotrellis.raster.rasterize.Rasterizer$Options$", gateway_client)
        if (object.sampleType == 'PixelIsPoint'):
            sample = JavaClass("geotrellis.raster.PixelIsPoint$", gateway_client)
        elif (object.sampleType == 'PixelIsArea'):
            sample = JavaClass("geotrellis.raster.PixelIsArea$", gateway_client)
        else:
            raise TypeError("Could not convert {} to geotrellis.raster.PixelSampleType".format(object.sampleType))

        sample_instance = sample.__getattr__("MODULE$")
        return JavaRasterizerOptions().apply(object.includePartial, sample_instance)
示例#4
0
def _generate_procedural_method_code(gateway, client, mapop, name, signatures,
                                     instance):
    methods = _generate_methods(instance, signatures)

    # print("working on " + name)

    if (name == "add"):
        pass

    jvm = gateway.jvm
    cls = JavaClass(mapop, gateway_client=client)

    is_export = is_remote() and is_instance_of(gateway, cls, jvm.ExportMapOp)

    if len(methods) == 0:
        return None

    signature, self_method = _generate_proc_signature(methods)

    generator = CodeGenerator()
    # Signature
    generator.write("def " + name + "(" + signature + "):", post_indent=True)

    # code += "    print('" + name + "')\n"
    _generate_imports(generator, mapop, is_export)
    _generate_calls(generator, methods, is_export=is_export)
    _generate_run(generator, instance, is_export)
    # print(code)

    code = generator.generate()
    code = code.replace("self", self_method)

    generator.begin()
    for line in code.split("\n"):
        generator.write(line)
    return {name: generator}
示例#5
0
    def jdbc(self,
             url,
             table,
             column=None,
             lowerBound=None,
             upperBound=None,
             numPartitions=None,
             predicates=None,
             properties=None):
        """
        Construct a :class:`DataFrame` representing the database table named ``table``
        accessible via JDBC URL ``url`` and connection ``properties``.

        Partitions of the table will be retrieved in parallel if either ``column`` or
        ``predicates`` is specified. ``lowerBound``, ``upperBound`` and ``numPartitions``
        is needed when ``column`` is specified.

        If both ``column`` and ``predicates`` are specified, ``column`` will be used.

        .. versionadded:: 1.4.0

        Parameters
        ----------
        table : str
            the name of the table
        column : str, optional
            alias of ``partitionColumn`` option. Refer to ``partitionColumn`` in
            `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html#data-source-option>`_
            in the version you use.
        predicates : list, optional
            a list of expressions suitable for inclusion in WHERE clauses;
            each one defines one partition of the :class:`DataFrame`
        properties : dict, optional
            a dictionary of JDBC database connection arguments. Normally at
            least properties "user" and "password" with their corresponding values.
            For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }

        Other Parameters
        ----------------
        Extra options
            For the extra options, refer to
            `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html#data-source-option>`_
            in the version you use.

            .. # noqa

        Notes
        -----
        Don't create too many partitions in parallel on a large cluster;
        otherwise Spark might crash your external database systems.

        Returns
        -------
        :class:`DataFrame`
        """
        if properties is None:
            properties = dict()
        jprop = JavaClass("java.util.Properties",
                          self._spark._sc._gateway._gateway_client)()
        for k in properties:
            jprop.setProperty(k, properties[k])
        if column is not None:
            assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
            assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
            assert numPartitions is not None, \
                "numPartitions can not be None when ``column`` is specified"
            return self._df(
                self._jreader.jdbc(url, table, column, int(lowerBound),
                                   int(upperBound), int(numPartitions), jprop))
        if predicates is not None:
            gateway = self._spark._sc._gateway
            jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String,
                                         predicates)
            return self._df(self._jreader.jdbc(url, table, jpredicates, jprop))
        return self._df(self._jreader.jdbc(url, table, jprop))
示例#6
0
 def convert(self, object, gateway_client):
     JavaSet = JavaClass("java.util.HashSet", gateway_client)
     java_set = JavaSet()
     for element in object:
         java_set.add(element)
     return java_set
    def jdbc(self,
             url,
             table,
             column=None,
             lowerBound=None,
             upperBound=None,
             numPartitions=None,
             predicates=None,
             properties=None):
        """
        Construct a :class:`DataFrame` representing the database table named ``table``
        accessible via JDBC URL ``url`` and connection ``properties``.
		构建一个`DataFrame`类,它表示一个数据库表名为‘table’可以通过JDBC UR`url`链接和连接属性进行。

        Partitions of the table will be retrieved in parallel if either ``column`` or
        ``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions``
        is needed when ``column`` is specified.
		如果有`column`或者`predicates`任何一个被指定,表的分区将被并行检索。那么`lowerBound`、`lowerBound`、`numPartitions`都必须被指定。

        If both ``column`` and ``predicates`` are specified, ``column`` will be used.
		如果`column`和`predicates`同时被指定,那么column`也将被指定。

        .. note:: Don't create too many partitions in parallel on a large cluster; \
        otherwise Spark might crash your external database systems.
		不要在一个大型集群上创建太多的并行分区,否则,Spark可能会破坏您的外部数据库系统

        :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` 一个JDBC url
        :param table: the name of the table 表名
        :param column: the name of an integer column that will be used for partitioning;
                       if this parameter is specified, then ``numPartitions``, ``lowerBound``
                       (inclusive), and ``upperBound`` (exclusive) will form partition strides
                       for generated WHERE clause expressions used to split the column
                       ``column`` evenly
        :param lowerBound: the minimum value of ``column`` used to decide partition stride
        :param upperBound: the maximum value of ``column`` used to decide partition stride
        :param numPartitions: the number of partitions
        :param predicates: a list of expressions suitable for inclusion in WHERE clauses;
                           each one defines one partition of the :class:`DataFrame`
						   一个适合包含在where字句中的表达式列表,每一代表了`DataFrame`的分区
        :param properties: a dictionary of JDBC database connection arguments. Normally at
                           least properties "user" and "password" with their corresponding values.
                           For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
						   一个JDBC数据库连接参数的字典。通常情况下至少包含用户和密码两个属性
        :return: a DataFrame
        """
        if properties is None:
            properties = dict()
        jprop = JavaClass("java.util.Properties",
                          self._spark._sc._gateway._gateway_client)()
        for k in properties:
            jprop.setProperty(k, properties[k])
        if column is not None:
            assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
            assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
            assert numPartitions is not None, \
                "numPartitions can not be None when ``column`` is specified"
            return self._df(
                self._jreader.jdbc(url, table, column, int(lowerBound),
                                   int(upperBound), int(numPartitions), jprop))
        if predicates is not None:
            gateway = self._spark._sc._gateway
            jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String,
                                         predicates)
            return self._df(self._jreader.jdbc(url, table, jpredicates, jprop))
        return self._df(self._jreader.jdbc(url, table, jprop))
示例#8
0
    def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
             predicates=None, properties=None):
        """
        Construct a :class:`DataFrame` representing the database table named ``table``
        accessible via JDBC URL ``url`` and connection ``properties``.

        Partitions of the table will be retrieved in parallel if either ``column`` or
        ``predicates`` is specified. ``lowerBound``, ``upperBound`` and ``numPartitions``
        is needed when ``column`` is specified.

        If both ``column`` and ``predicates`` are specified, ``column`` will be used.

        .. versionadded:: 1.4.0

        Parameters
        ----------
        url : str
            a JDBC URL of the form ``jdbc:subprotocol:subname``
        table : str
            the name of the table
        column : str, optional
            the name of a column of numeric, date, or timestamp type
            that will be used for partitioning;
            if this parameter is specified, then ``numPartitions``, ``lowerBound``
            (inclusive), and ``upperBound`` (exclusive) will form partition strides
            for generated WHERE clause expressions used to split the column
            ``column`` evenly
        lowerBound : str or int, optional
            the minimum value of ``column`` used to decide partition stride
        upperBound : str or int, optional
            the maximum value of ``column`` used to decide partition stride
        numPartitions : int, optional
            the number of partitions
        predicates : list, optional
            a list of expressions suitable for inclusion in WHERE clauses;
            each one defines one partition of the :class:`DataFrame`
        properties : dict, optional
            a dictionary of JDBC database connection arguments. Normally at
            least properties "user" and "password" with their corresponding values.
            For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }

        Notes
        -----
        Don't create too many partitions in parallel on a large cluster;
        otherwise Spark might crash your external database systems.

        Returns
        -------
        :class:`DataFrame`
        """
        if properties is None:
            properties = dict()
        jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
        for k in properties:
            jprop.setProperty(k, properties[k])
        if column is not None:
            assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
            assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
            assert numPartitions is not None, \
                "numPartitions can not be None when ``column`` is specified"
            return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound),
                                               int(numPartitions), jprop))
        if predicates is not None:
            gateway = self._spark._sc._gateway
            jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates)
            return self._df(self._jreader.jdbc(url, table, jpredicates, jprop))
        return self._df(self._jreader.jdbc(url, table, jprop))
示例#9
0
def generate(mrgeo, gateway, gateway_client):
    global _initialized

    if _initialized:
        # Make sure the object have the proper code in them.  In case somewhere we've made a new mrgeo object
        for method_name, code in _mapop_code.items():
            if not hasattr(mrgeo, method_name):
                setattr(mrgeo, method_name,
                        code.compile(method_name).get(method_name))
        for method_name, code in _rastermapop_code.items():
            if not hasattr(RasterMapOp, method_name):
                setattr(RasterMapOp, method_name,
                        code.compile(method_name).get(method_name))
        for method_name, code in _vectormapop_code.items():
            if not hasattr(VectorMapOp, method_name):
                setattr(VectorMapOp, method_name,
                        code.compile(method_name).get(method_name))

        return

    jvm = gateway.jvm
    client = gateway_client
    java_import(jvm, "org.mrgeo.job.*")
    java_import(jvm, "org.mrgeo.mapalgebra.MapOpFactory")
    java_import(jvm, "org.mrgeo.mapalgebra.raster.RasterMapOp")
    java_import(jvm, "org.mrgeo.mapalgebra.vector.VectorMapOp")
    java_import(jvm, "org.mrgeo.mapalgebra.raster.MrsPyramidMapOp")
    java_import(jvm, "org.mrgeo.mapalgebra.IngestImageMapOp")
    java_import(jvm, "org.mrgeo.mapalgebra.ExportMapOp")
    java_import(jvm, "org.mrgeo.mapalgebra.PointsMapOp")
    java_import(jvm, "org.mrgeo.mapalgebra.MapOp")
    java_import(jvm, "org.mrgeo.utils.SparkUtils")
    java_import(jvm, "org.mrgeo.hdfs.utils.HadoopFileUtils")

    java_import(jvm, "org.mrgeo.data.*")

    mapops = jvm.MapOpFactory.getMapOpClasses()

    for rawmapop in mapops:
        mapop = str(rawmapop.getCanonicalName().rstrip('$'))

        # Skip IngestImageMapOp because there is an explicit method defined in
        # MrGeo class for ingesting an image, and _get_instance_type will raise
        # an exception when run against that map op.
        if not mapop.endswith(".IngestImageMapOp") and not mapop.endswith(
                ".InlineCsvMapOp"):
            java_import(jvm, mapop)

            cls = JavaClass(mapop, gateway_client=client)

            signatures = jvm.MapOpFactory.getSignatures(mapop)
            instance = _get_instance_type(signatures, gateway, cls, mapop)
            # for s in signatures:
            #     print("signature: " + s)

            for method in cls.register():
                ooCodes = None
                procCodes = None
                if method is not None:
                    name = method.strip().lower()
                    if len(name) > 0:
                        if name in _reserved:
                            # print("reserved: " + name)
                            continue
                        elif name in _operators:
                            # print("operator: " + name)
                            ooCodes = _generate_operator_code(
                                mapop, name, signatures, instance)
                        else:
                            # print("method: " + name)
                            ooCodes = _generate_oo_method_code(
                                gateway, client, mapop, name, signatures,
                                instance)
                            procCodes = _generate_procedural_method_code(
                                gateway, client, mapop, name, signatures,
                                instance)

                if ooCodes is not None:
                    for method_name, code in ooCodes.items():
                        # if method_name == "export":
                        #     print(code.generate(), file=sys.stderr)

                        if instance == 'RasterMapOp':
                            _rastermapop_code[method_name] = code
                            setattr(RasterMapOp, method_name,
                                    code.compile(method_name).get(method_name))
                        elif instance == "VectorMapOp":
                            _vectormapop_code[method_name] = code
                            setattr(VectorMapOp, method_name,
                                    code.compile(method_name).get(method_name))
                        elif is_instance_of(gateway, cls, jvm.MapOp):
                            # _mapop_code[method_name] = code
                            _rastermapop_code[method_name] = code
                            setattr(RasterMapOp, method_name,
                                    code.compile(method_name).get(method_name))
                            _vectormapop_code[method_name] = code
                            setattr(VectorMapOp, method_name,
                                    code.compile(method_name).get(method_name))

                if procCodes is not None:
                    for method_name, code in procCodes.items():
                        print(method_name)
                        _mapop_code[method_name] = code
                        setattr(mrgeo, method_name,
                                code.compile(method_name).get(method_name))

    _initialized = True
    print("add: " + str(hasattr(mrgeo, "add")))
示例#10
0
 def convert(self, object, gateway_client):
     ArrayList = JavaClass("java.util.ArrayList", gateway_client)
     java_list = ArrayList()
     for element in object:
         java_list.add(element)
     return java_list
示例#11
0
    def convert(self, obj, gateway_client):
        name = obj.value

        if name == 'NearestNeighbor':
            sample = JavaClass("geotrellis.raster.resample.NearestNeighbor$",
                               gateway_client)
        elif name == 'Bilinear':
            sample = JavaClass("geotrellis.raster.resample.Bilinear$",
                               gateway_client)
        elif name == 'CubicConvolution':
            sample = JavaClass("geotrellis.raster.resample.CubicConvolution$",
                               gateway_client)
        elif name == 'CubicSpline':
            sample = JavaClass("geotrellis.raster.resample.CubicSpline$",
                               gateway_client)
        elif name == 'Lanczos':
            sample = JavaClass("geotrellis.raster.resample.Lanczos$",
                               gateway_client)
        elif name == 'Average':
            sample = JavaClass("geotrellis.raster.resample.Average$",
                               gateway_client)
        elif name == 'Mode':
            sample = JavaClass("geotrellis.raster.resample.Mode$",
                               gateway_client)
        elif name == 'Median':
            sample = JavaClass("geotrellis.raster.resample.Median$",
                               gateway_client)
        elif name == 'Max':
            sample = JavaClass("geotrellis.raster.resample.Max$",
                               gateway_client)
        elif name == 'Min':
            sample = JavaClass("geotrellis.raster.resample.Min$",
                               gateway_client)
        else:
            raise TypeError(
                name, "Could not be converted to a GeoTrellis ResampleMethod.")

        return sample.__getattr__("MODULE$")
示例#12
0
 def convert(self, obj, gateway_client):
     Date = JavaClass("java.sql.Date", gateway_client)
     return Date.valueOf(obj.strftime("%Y-%m-%d"))
示例#13
0
 def convert(self, obj, gateway_client):
     JavaCellType = JavaClass("geotrellis.raster.CellType", gateway_client)
     return JavaCellType.fromName(obj.value)
示例#14
0
    def convert(self, obj, gateway_client):
        ScalaSourceInfo = JavaClass("geopyspark.geotrellis.vlm.SourceInfo",
                                    gateway_client)

        return ScalaSourceInfo.apply(obj.source, obj.source_to_target_band)
示例#15
0
    def convert(self, obj, gateway_client):

        ScalaSpatialStrategy = JavaClass(
            "geopyspark.geotrellis.SpatialPartitionStrategy", gateway_client)

        return ScalaSpatialStrategy.apply(obj.num_partitions, obj.bits)
示例#16
0
    def convert(self, obj, gateway_client):

        ScalaHashStrategy = JavaClass(
            "geopyspark.geotrellis.HashPartitionStrategy", gateway_client)

        return ScalaHashStrategy.apply(obj.num_partitions)
示例#17
0
def convert(object, gateway_client):
    ArrayList = JavaClass("java.util.ArrayList", gateway_client)
    java_list = ArrayList()
    java_list.addAll(object)
    return java_list
示例#18
0
 def convert(self, x, gateway_client):
     """We assume that a class with the same name exists in jnegmas"""
     class_name = f'j' + x.__class__.__module__ + '.' + x.__class__.__name__
     java_object = JavaClass(class_name, gateway_client)()
     java_object.fill(x.to_java())
     return java_object
示例#19
0
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession, SQLContext
import json
import sys
import os
import uuid

if __name__ == "__main__":
    print("*" * 80)
    spark = SparkSession.builder.appName(
        "PySpark Predix Eventhub basic read example").config(
            "spark.some.config.option", "some-value").getOrCreate()
    sqlContext = SQLContext(spark)
    sparkContext = spark.sparkContext
    properties = json.load(open('predix-eventhub-read-example.properties'))
    jprop = JavaClass("java.util.Properties",
                      sparkContext._gateway._gateway_client)()
    for k in properties:
        jprop.setProperty(k, properties[k])

    javaEventHubReceiver = JavaClass(
        "com.ge.predix.arf.connector.rtcommon.stream.JavaEventHubReceiver",
        sparkContext._gateway._gateway_client)(jprop)
    durationObject = JavaClass("org.apache.spark.streaming.Duration",
                               sparkContext._gateway._gateway_client)(1000)
    jssc = JavaClass(
        "org.apache.spark.streaming.api.java.JavaStreamingContext",
        sparkContext._gateway._gateway_client)(sparkContext._jsc,
                                               durationObject)
    jreceiver = jssc.receiverStream(javaEventHubReceiver)
    transformerObject = JavaClass(
        "com.ge.predix.arf.connector.stream.StreamTSTransformerObject",
示例#20
0
def scala_companion(class_name, gateway_client=None):
    """Returns referece to Scala companion object"""
    gateway_client = gateway_client or get_spark_context(
    )._gateway._gateway_client
    return JavaClass(class_name + "$", gateway_client).__getattr__("MODULE$")
示例#21
0
    def _load_mapops(self):
        jvm = self.gateway.jvm
        client = self.gateway._gateway_client
        java_import(jvm, "org.mrgeo.job.*")
        java_import(jvm, "org.mrgeo.mapalgebra.MapOpFactory")
        java_import(jvm, "org.mrgeo.mapalgebra.raster.RasterMapOp")
        java_import(jvm, "org.mrgeo.mapalgebra.raster.MrsPyramidMapOp")
        java_import(jvm, "org.mrgeo.mapalgebra.ExportMapOp")
        java_import(jvm, "org.mrgeo.mapalgebra.vector.VectorMapOp")
        java_import(jvm, "org.mrgeo.mapalgebra.MapOp")
        java_import(jvm, "org.mrgeo.utils.SparkUtils")

        java_import(jvm, "org.mrgeo.data.*")

        mapops = jvm.MapOpFactory.getMapOpClasses()

        for rawmapop in mapops:
            mapop = str(rawmapop.getCanonicalName().rstrip('$'))

            java_import(jvm, mapop)

            cls = JavaClass(mapop, gateway_client=client)

            if self.is_instance_of(cls, jvm.RasterMapOp):
                instance = 'RasterMapOp'
            elif self.is_instance_of(cls, jvm.VectorMapOp):
                instance = 'VectorMapOp'
            elif self.is_instance_of(cls, jvm.MapOp):
                instance = "MapOp"
            else:
                # raise Exception("mapop (" + mapop + ") is not a RasterMapOp, VectorMapOp, or MapOp")
                print("mapop (" + mapop +
                      ") is not a RasterMapOp, VectorMapOp, or MapOp")
                continue

            signatures = jvm.MapOpFactory.getSignatures(mapop)

            for method in cls.register():
                codes = None
                if method is not None:
                    name = method.strip().lower()
                    if len(name) > 0:
                        if name in self.reserved:
                            # print("reserved: " + name)
                            continue
                        elif name in self.operators:
                            # print("operator: " + name)
                            codes = self._generate_operator_code(
                                mapop, name, signatures, instance)
                        else:
                            # print("method: " + name)
                            codes = self._generate_method_code(
                                mapop, name, signatures, instance)

                if codes is not None:
                    for method_name, code in codes.iteritems():
                        # print(code)

                        compiled = {}
                        exec code in compiled

                        if instance == 'RasterMapOp':
                            setattr(RasterMapOp, method_name,
                                    compiled.get(method_name))
                        elif instance == "VectorMapOp":
                            #  setattr(VectorMapOp, method_name, compiled.get(method_name))
                            pass
                        elif self.is_instance_of(cls, jvm.MapOp):
                            setattr(RasterMapOp, method_name,
                                    compiled.get(method_name))
示例#22
0
 def convert(self, obj, gateway_client):
     Timestamp = JavaClass("java.sql.Timestamp", gateway_client)
     return Timestamp(
         int(time.mktime(obj.timetuple())) * 1000 + obj.microsecond // 1000)
示例#23
0
    def eexport(self,name,singleFile=False,zoom=-1,numTiles=-1,mosaic=-1,format="tif",randomTiles=False,tms=False,colorscale="",tileids="",bounds="",allLevels=False,overridenodata=float('-inf')):
        import copy
        from numbers import Number
        import base64
        import numpy
        from osgeo import gdal_array
        import zlib
        from osgeo import gdal
        from py4j.java_gateway import JavaClass
        cls = JavaClass('org.mrgeo.mapalgebra.ExportMapOp', gateway_client=self.gateway._gateway_client)
        local_name = name
        name = 'In-Memory'
        if hasattr(self, 'mapop') and self.is_instance_of(self.mapop, 'org.mrgeo.mapalgebra.raster.RasterMapOp') and type(name) is str and isinstance(singleFile, (int, long, float, str)) and isinstance(zoom, (int, long, float)) and isinstance(numTiles, (int, long, float)) and isinstance(mosaic, (int, long, float)) and type(format) is str and isinstance(randomTiles, (int, long, float, str)) and isinstance(tms, (int, long, float, str)) and type(colorscale) is str and type(tileids) is str and type(bounds) is str and isinstance(allLevels, (int, long, float, str)) and isinstance(overridenodata, (int, long, float)):
            op = cls.create(self.mapop, str(name), True if singleFile else False, int(zoom), int(numTiles), int(mosaic), str(format), True if randomTiles else False, True if tms else False, str(colorscale), str(tileids), str(bounds), True if allLevels else False, float(overridenodata))
        else:
            raise Exception('input types differ (TODO: expand this message!)')
        if (op.setup(self.job, self.context.getConf()) and
                op.execute(self.context) and
                op.teardown(self.job, self.context.getConf())):
            new_resource = copy.copy(self)
            new_resource.mapop = op
            gdalutils = JavaClass('org.mrgeo.utils.GDALUtils', gateway_client=self.gateway._gateway_client)
            java_image = op.image()
            width = java_image.getRasterXSize()
            height = java_image.getRasterYSize()
            options = []
            if format == 'jpg' or format == 'jpeg':
                driver_name = 'jpeg'
                extension = 'jpg'
            elif format == 'tif' or format == 'tiff' or format == 'geotif' or format == 'geotiff' or format == 'gtif'  or format == 'gtiff':
                driver_name = 'GTiff'
                options.append('INTERLEAVE=BAND')
                options.append('COMPRESS=DEFLATE')
                options.append('PREDICTOR=1')
                options.append('ZLEVEL=6')
                options.append('TILES=YES')
                if width < 2048:
                    options.append('BLOCKXSIZE=' + str(width))
                else:
                    options.append('BLOCKXSIZE=2048')
                if height < 2048:
                    options.append('BLOCKYSIZE=' + str(height))
                else:
                    options.append('BLOCKYSIZE=2048')

                extension = 'tif'

            else:
                driver_name = format
                extension = format

            datatype = java_image.GetRasterBand(1).getDataType()

            if not local_name.endswith(extension):
                local_name += "." + extension

            driver = gdal.GetDriverByName(driver_name)
            local_image = driver.Create(local_name, width, height, java_image.getRasterCount(), datatype, options)
            local_image.SetProjection(str(java_image.GetProjection()))
            local_image.SetGeoTransform(java_image.GetGeoTransform())

            java_nodatas = gdalutils.getnodatas(java_image)

            for i in xrange(1, local_image.RasterCount + 1):
                start = time.time()
                raw_data = gdalutils.getRasterDataAsCompressedBase64(java_image, i, 0, 0, width, height)
                print('compressed/encoded data ' + str(len(raw_data)))

                decoded_data = base64.b64decode(raw_data)
                print('decoded data ' + str(len(decoded_data)))

                decompressed_data = zlib.decompress(decoded_data, 16 + zlib.MAX_WBITS)
                print('decompressed data ' + str(len(decompressed_data)))

                byte_data = numpy.frombuffer(decompressed_data, dtype='b')
                print('byte data ' + str(len(byte_data)))

                image_data = byte_data.view(gdal_array.GDALTypeCodeToNumericTypeCode(datatype))
                print('gdal-type data ' + str(len(image_data)))

                image_data = image_data.reshape((-1, width))
                #print('reshaped ' + str(len(byte_data)))
                #print(byte_data)

                #for j in xrange(0, 10):
                #    print(byte_data[j])
                print('reshaped ' + str(len(image_data)) + " x " + str(len(image_data[0])))
                band = local_image.GetRasterBand(i)

                print('writing local image')
                band.WriteArray(image_data)
                print('done')
                end = time.time()

                print("elapsed time: " + str(end - start) + " sec.")

                band.SetNoDataValue(java_nodatas[i - 1])

            local_image.FlushCache()
            print('flushed cache')

            return new_resource
        return None
示例#24
0
 def __init__(self, subtype, py_type=(list, tuple)):
     self.subtype = subtype
     self._j_class = JavaClass(self.subtype.binding,
                               java_gateway._gateway_client)
     super().__init__("[L%s;" % self.subtype.binding, py_type)
示例#25
0
 def convert(self, object, gateway_client):
     HashMap = JavaClass("java.util.HashMap", gateway_client)
     java_map = HashMap()
     for key in object.keys():
         java_map[key] = object[key]
     return java_map