Пример #1
0
def to_avro(data):
    """
    Converts a column into binary of avro format.

    Note: Avro is built-in but external data source module since Spark 2.4. Please deploy the
    application as per the deployment section of "Apache Avro Data Source Guide".

    :param data: the data column.

    >>> from pyspark.sql import Row
    >>> from pyspark.sql.avro.functions import to_avro
    >>> data = [(1, Row(name='Alice', age=2))]
    >>> df = spark.createDataFrame(data, ("key", "value"))
    >>> df.select(to_avro(df.value).alias("avro")).collect()
    [Row(avro=bytearray(b'\\x00\\x00\\x04\\x00\\nAlice'))]
    """

    sc = SparkContext._active_spark_context
    try:
        jc = sc._jvm.org.apache.spark.sql.avro.functions.to_avro(_to_java_column(data))
    except TypeError as e:
        if str(e) == "'JavaPackage' object is not callable":
            _print_missing_jar("Avro", "avro", "avro", sc.version)
        raise
    return Column(jc)
Пример #2
0
def to_avro(data, jsonFormatSchema=""):
    """
    Converts a column into binary of avro format.

    Note: Avro is built-in but external data source module since Spark 2.4. Please deploy the
    application as per the deployment section of "Apache Avro Data Source Guide".

    :param data: the data column.
    :param jsonFormatSchema: user-specified output avro schema in JSON string format.

    >>> from pyspark.sql import Row
    >>> from pyspark.sql.avro.functions import to_avro
    >>> data = ['SPADES']
    >>> df = spark.createDataFrame(data, "string")
    >>> df.select(to_avro(df.value).alias("suite")).collect()
    [Row(suite=bytearray(b'\\x00\\x0cSPADES'))]
    >>> jsonFormatSchema = '''["null", {"type": "enum", "name": "value",
    ...     "symbols": ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]}]'''
    >>> df.select(to_avro(df.value, jsonFormatSchema).alias("suite")).collect()
    [Row(suite=bytearray(b'\\x02\\x00'))]
    """

    sc = SparkContext._active_spark_context
    try:
        if jsonFormatSchema == "":
            jc = sc._jvm.org.apache.spark.sql.avro.functions.to_avro(
                _to_java_column(data))
        else:
            jc = sc._jvm.org.apache.spark.sql.avro.functions.to_avro(
                _to_java_column(data), jsonFormatSchema)
    except TypeError as e:
        if str(e) == "'JavaPackage' object is not callable":
            _print_missing_jar("Avro", "avro", "avro", sc.version)
        raise
    return Column(jc)
Пример #3
0
def to_avro(data):
    """
    Converts a column into binary of avro format.

    Note: Avro is built-in but external data source module since Spark 2.4. Please deploy the
    application as per the deployment section of "Apache Avro Data Source Guide".

    :param data: the data column.

    >>> from pyspark.sql import Row
    >>> from pyspark.sql.avro.functions import to_avro
    >>> data = [(1, Row(name='Alice', age=2))]
    >>> df = spark.createDataFrame(data, ("key", "value"))
    >>> df.select(to_avro(df.value).alias("avro")).collect()
    [Row(avro=bytearray(b'\\x00\\x00\\x04\\x00\\nAlice'))]
    """

    sc = SparkContext._active_spark_context
    try:
        jc = sc._jvm.org.apache.spark.sql.avro.functions.to_avro(
            _to_java_column(data))
    except TypeError as e:
        if str(e) == "'JavaPackage' object is not callable":
            _print_missing_jar("Avro", "avro", "avro", sc.version)
        raise
    return Column(jc)
Пример #4
0
def from_avro(data: "ColumnOrName",
              jsonFormatSchema: str,
              options: Optional[Dict[str, str]] = None) -> Column:
    """
    Converts a binary column of Avro format into its corresponding catalyst value.
    The specified schema must match the read data, otherwise the behavior is undefined:
    it may fail or return arbitrary result.
    To deserialize the data with a compatible and evolved schema, the expected Avro schema can be
    set via the option avroSchema.

    .. versionadded:: 3.0.0

    Parameters
    ----------
    data : :class:`~pyspark.sql.Column` or str
        the binary column.
    jsonFormatSchema : str
        the avro schema in JSON string format.
    options : dict, optional
        options to control how the Avro record is parsed.

    Notes
    -----
    Avro is built-in but external data source module since Spark 2.4. Please deploy the
    application as per the deployment section of "Apache Avro Data Source Guide".

    Examples
    --------
    >>> from pyspark.sql import Row
    >>> from pyspark.sql.avro.functions import from_avro, to_avro
    >>> data = [(1, Row(age=2, name='Alice'))]
    >>> df = spark.createDataFrame(data, ("key", "value"))
    >>> avroDf = df.select(to_avro(df.value).alias("avro"))
    >>> avroDf.collect()
    [Row(avro=bytearray(b'\\x00\\x00\\x04\\x00\\nAlice'))]

    >>> jsonFormatSchema = '''{"type":"record","name":"topLevelRecord","fields":
    ...     [{"name":"avro","type":[{"type":"record","name":"value","namespace":"topLevelRecord",
    ...     "fields":[{"name":"age","type":["long","null"]},
    ...     {"name":"name","type":["string","null"]}]},"null"]}]}'''
    >>> avroDf.select(from_avro(avroDf.avro, jsonFormatSchema).alias("value")).collect()
    [Row(value=Row(avro=Row(age=2, name='Alice')))]
    """

    sc = SparkContext._active_spark_context
    assert sc is not None and sc._jvm is not None
    try:
        jc = sc._jvm.org.apache.spark.sql.avro.functions.from_avro(
            _to_java_column(data), jsonFormatSchema, options or {})
    except TypeError as e:
        if str(e) == "'JavaPackage' object is not callable":
            _print_missing_jar("Avro", "avro", "avro", sc.version)
        raise
    return Column(jc)
Пример #5
0
def from_avro(data, jsonFormatSchema, options={}):
    """
    Converts a binary column of Avro format into its corresponding catalyst value. If a schema is
    provided via the option actualSchema, a different (but compatible) schema can be used for
    reading. If no actualSchema option is provided, the specified schema must match the read data,
    otherwise the behavior is undefined: it may fail or return arbitrary result.

    Note: Avro is built-in but external data source module since Spark 2.4. Please deploy the
    application as per the deployment section of "Apache Avro Data Source Guide".

    :param data: the binary column.
    :param jsonFormatSchema: the avro schema in JSON string format.
    :param options: options to control how the Avro record is parsed.

    >>> from pyspark.sql import Row
    >>> from pyspark.sql.avro.functions import from_avro, to_avro
    >>> data = [(1, Row(name='Alice', age=2))]
    >>> df = spark.createDataFrame(data, ("key", "value"))
    >>> avroDf = df.select(to_avro(df.value).alias("avro"))
    >>> avroDf.collect()
    [Row(avro=bytearray(b'\\x00\\x00\\x04\\x00\\nAlice'))]
    >>> jsonFormatSchema = '''{"type":"record","name":"topLevelRecord","fields":
    ...     [{"name":"avro","type":[{"type":"record","name":"value","namespace":"topLevelRecord",
    ...     "fields":[{"name":"age","type":["long","null"]},
    ...     {"name":"name","type":["string","null"]}]},"null"]}]}'''
    >>> avroDf.select(from_avro(avroDf.avro, jsonFormatSchema).alias("value")).collect()
    [Row(value=Row(avro=Row(age=2, name=u'Alice')))]
    """

    sc = SparkContext._active_spark_context
    try:
        jc = sc._jvm.org.apache.spark.sql.avro.functions.from_avro(
            _to_java_column(data), jsonFormatSchema, options)
    except TypeError as e:
        if str(e) == "'JavaPackage' object is not callable":
            _print_missing_jar("Avro", "avro", "avro", sc.version)
        raise
    return Column(jc)
Пример #6
0
def from_avro(data, jsonFormatSchema, options={}):
    """
    Converts a binary column of avro format into its corresponding catalyst value. The specified
    schema must match the read data, otherwise the behavior is undefined: it may fail or return
    arbitrary result.

    Note: Avro is built-in but external data source module since Spark 2.4. Please deploy the
    application as per the deployment section of "Apache Avro Data Source Guide".

    :param data: the binary column.
    :param jsonFormatSchema: the avro schema in JSON string format.
    :param options: options to control how the Avro record is parsed.

    >>> from pyspark.sql import Row
    >>> from pyspark.sql.avro.functions import from_avro, to_avro
    >>> data = [(1, Row(name='Alice', age=2))]
    >>> df = spark.createDataFrame(data, ("key", "value"))
    >>> avroDf = df.select(to_avro(df.value).alias("avro"))
    >>> avroDf.collect()
    [Row(avro=bytearray(b'\\x00\\x00\\x04\\x00\\nAlice'))]
    >>> jsonFormatSchema = '''{"type":"record","name":"topLevelRecord","fields":
    ...     [{"name":"avro","type":[{"type":"record","name":"value","namespace":"topLevelRecord",
    ...     "fields":[{"name":"age","type":["long","null"]},
    ...     {"name":"name","type":["string","null"]}]},"null"]}]}'''
    >>> avroDf.select(from_avro(avroDf.avro, jsonFormatSchema).alias("value")).collect()
    [Row(value=Row(avro=Row(age=2, name=u'Alice')))]
    """

    sc = SparkContext._active_spark_context
    try:
        jc = sc._jvm.org.apache.spark.sql.avro.functions.from_avro(
            _to_java_column(data), jsonFormatSchema, options)
    except TypeError as e:
        if str(e) == "'JavaPackage' object is not callable":
            _print_missing_jar("Avro", "avro", "avro", sc.version)
        raise
    return Column(jc)
Пример #7
0
    def createStream(
        ssc: StreamingContext,
        kinesisAppName: str,
        streamName: str,
        endpointUrl: str,
        regionName: str,
        initialPositionInStream: str,
        checkpointInterval: int,
        metricsLevel: int = MetricsLevel.DETAILED,
        storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_2,
        awsAccessKeyId: Optional[str] = None,
        awsSecretKey: Optional[str] = None,
        decoder: Union[
            Callable[[Optional[bytes]], T], Callable[[Optional[bytes]], Optional[str]]
        ] = utf8_decoder,
        stsAssumeRoleArn: Optional[str] = None,
        stsSessionName: Optional[str] = None,
        stsExternalId: Optional[str] = None,
    ) -> Union["DStream[Union[T, Optional[str]]]", "DStream[T]"]:
        """
        Create an input stream that pulls messages from a Kinesis stream. This uses the
        Kinesis Client Library (KCL) to pull messages from Kinesis.

        Parameters
        ----------
        ssc : :class:`StreamingContext`
            StreamingContext object
        kinesisAppName : str
            Kinesis application name used by the Kinesis Client Library (KCL) to
            update DynamoDB
        streamName : str
            Kinesis stream name
        endpointUrl : str
            Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com)
        regionName : str
            Name of region used by the Kinesis Client Library (KCL) to update
            DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics)
        initialPositionInStream : int
            In the absence of Kinesis checkpoint info, this is the
            worker's initial starting position in the stream. The
            values are either the beginning of the stream per Kinesis'
            limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or
            the tip of the stream (InitialPositionInStream.LATEST).
        checkpointInterval : int
            Checkpoint interval(in seconds) for Kinesis checkpointing. See the Kinesis
            Spark Streaming documentation for more details on the different
            types of checkpoints.
        metricsLevel : int
            Level of CloudWatch PutMetrics.
            Can be set to either DETAILED, SUMMARY, or NONE. (default is DETAILED)
        storageLevel : :class:`pyspark.StorageLevel`, optional
            Storage level to use for storing the received objects (default is
            StorageLevel.MEMORY_AND_DISK_2)
        awsAccessKeyId : str, optional
            AWS AccessKeyId (default is None. If None, will use
            DefaultAWSCredentialsProviderChain)
        awsSecretKey : str, optional
            AWS SecretKey (default is None. If None, will use
            DefaultAWSCredentialsProviderChain)
        decoder : function, optional
            A function used to decode value (default is utf8_decoder)
        stsAssumeRoleArn : str, optional
            ARN of IAM role to assume when using STS sessions to read from
            the Kinesis stream (default is None).
        stsSessionName : str, optional
            Name to uniquely identify STS sessions used to read from Kinesis
            stream, if STS is being used (default is None).
        stsExternalId : str, optional
            External ID that can be used to validate against the assumed IAM
            role's trust policy, if STS is being used (default is None).

        Returns
        -------
        A DStream object

        Notes
        -----
        The given AWS credentials will get saved in DStream checkpoints if checkpointing
        is enabled. Make sure that your checkpoint directory is secure.
        """
        jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
        jduration = ssc._jduration(checkpointInterval)

        jvm = ssc._jvm
        assert jvm is not None

        try:
            helper = jvm.org.apache.spark.streaming.kinesis.KinesisUtilsPythonHelper()
        except TypeError as e:
            if str(e) == "'JavaPackage' object is not callable":
                _print_missing_jar(
                    "Streaming's Kinesis",
                    "streaming-kinesis-asl",
                    "streaming-kinesis-asl-assembly",
                    ssc.sparkContext.version,
                )
            raise
        jstream = helper.createStream(
            ssc._jssc,
            kinesisAppName,
            streamName,
            endpointUrl,
            regionName,
            initialPositionInStream,
            jduration,
            metricsLevel,
            jlevel,
            awsAccessKeyId,
            awsSecretKey,
            stsAssumeRoleArn,
            stsSessionName,
            stsExternalId,
        )
        stream: DStream = DStream(jstream, ssc, NoOpSerializer())
        return stream.map(lambda v: decoder(v))
Пример #8
0
    def createStream(ssc, kinesisAppName, streamName, endpointUrl, regionName,
                     initialPositionInStream, checkpointInterval,
                     storageLevel=StorageLevel.MEMORY_AND_DISK_2,
                     awsAccessKeyId=None, awsSecretKey=None, decoder=utf8_decoder,
                     stsAssumeRoleArn=None, stsSessionName=None, stsExternalId=None):
        """
        Create an input stream that pulls messages from a Kinesis stream. This uses the
        Kinesis Client Library (KCL) to pull messages from Kinesis.

        .. note:: The given AWS credentials will get saved in DStream checkpoints if checkpointing
            is enabled. Make sure that your checkpoint directory is secure.

        :param ssc:  StreamingContext object
        :param kinesisAppName:  Kinesis application name used by the Kinesis Client Library (KCL) to
                                update DynamoDB
        :param streamName:  Kinesis stream name
        :param endpointUrl:  Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com)
        :param regionName:  Name of region used by the Kinesis Client Library (KCL) to update
                            DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics)
        :param initialPositionInStream:  In the absence of Kinesis checkpoint info, this is the
                                         worker's initial starting position in the stream. The
                                         values are either the beginning of the stream per Kinesis'
                                         limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or
                                         the tip of the stream (InitialPositionInStream.LATEST).
        :param checkpointInterval:  Checkpoint interval for Kinesis checkpointing. See the Kinesis
                                    Spark Streaming documentation for more details on the different
                                    types of checkpoints.
        :param storageLevel:  Storage level to use for storing the received objects (default is
                              StorageLevel.MEMORY_AND_DISK_2)
        :param awsAccessKeyId:  AWS AccessKeyId (default is None. If None, will use
                                DefaultAWSCredentialsProviderChain)
        :param awsSecretKey:  AWS SecretKey (default is None. If None, will use
                              DefaultAWSCredentialsProviderChain)
        :param decoder:  A function used to decode value (default is utf8_decoder)
        :param stsAssumeRoleArn: ARN of IAM role to assume when using STS sessions to read from
                                 the Kinesis stream (default is None).
        :param stsSessionName: Name to uniquely identify STS sessions used to read from Kinesis
                               stream, if STS is being used (default is None).
        :param stsExternalId: External ID that can be used to validate against the assumed IAM
                              role's trust policy, if STS is being used (default is None).
        :return: A DStream object
        """
        jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
        jduration = ssc._jduration(checkpointInterval)

        try:
            # Use KinesisUtilsPythonHelper to access Scala's KinesisUtils
            helper = ssc._jvm.org.apache.spark.streaming.kinesis.KinesisUtilsPythonHelper()
        except TypeError as e:
            if str(e) == "'JavaPackage' object is not callable":
                _print_missing_jar(
                    "Streaming's Kinesis",
                    "streaming-kinesis-asl",
                    "streaming-kinesis-asl-assembly",
                    ssc.sparkContext.version)
            raise
        jstream = helper.createStream(ssc._jssc, kinesisAppName, streamName, endpointUrl,
                                      regionName, initialPositionInStream, jduration, jlevel,
                                      awsAccessKeyId, awsSecretKey, stsAssumeRoleArn,
                                      stsSessionName, stsExternalId)
        stream = DStream(jstream, ssc, NoOpSerializer())
        return stream.map(lambda v: decoder(v))
Пример #9
0
    def createStream(ssc,
                     kinesisAppName,
                     streamName,
                     endpointUrl,
                     regionName,
                     initialPositionInStream,
                     checkpointInterval,
                     storageLevel=StorageLevel.MEMORY_AND_DISK_2,
                     awsAccessKeyId=None,
                     awsSecretKey=None,
                     decoder=utf8_decoder,
                     stsAssumeRoleArn=None,
                     stsSessionName=None,
                     stsExternalId=None):
        """
        Create an input stream that pulls messages from a Kinesis stream. This uses the
        Kinesis Client Library (KCL) to pull messages from Kinesis.

        .. note:: The given AWS credentials will get saved in DStream checkpoints if checkpointing
            is enabled. Make sure that your checkpoint directory is secure.

        :param ssc:  StreamingContext object
        :param kinesisAppName:  Kinesis application name used by the Kinesis Client Library (KCL) to
                                update DynamoDB
        :param streamName:  Kinesis stream name
        :param endpointUrl:  Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com)
        :param regionName:  Name of region used by the Kinesis Client Library (KCL) to update
                            DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics)
        :param initialPositionInStream:  In the absence of Kinesis checkpoint info, this is the
                                         worker's initial starting position in the stream. The
                                         values are either the beginning of the stream per Kinesis'
                                         limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or
                                         the tip of the stream (InitialPositionInStream.LATEST).
        :param checkpointInterval:  Checkpoint interval for Kinesis checkpointing. See the Kinesis
                                    Spark Streaming documentation for more details on the different
                                    types of checkpoints.
        :param storageLevel:  Storage level to use for storing the received objects (default is
                              StorageLevel.MEMORY_AND_DISK_2)
        :param awsAccessKeyId:  AWS AccessKeyId (default is None. If None, will use
                                DefaultAWSCredentialsProviderChain)
        :param awsSecretKey:  AWS SecretKey (default is None. If None, will use
                              DefaultAWSCredentialsProviderChain)
        :param decoder:  A function used to decode value (default is utf8_decoder)
        :param stsAssumeRoleArn: ARN of IAM role to assume when using STS sessions to read from
                                 the Kinesis stream (default is None).
        :param stsSessionName: Name to uniquely identify STS sessions used to read from Kinesis
                               stream, if STS is being used (default is None).
        :param stsExternalId: External ID that can be used to validate against the assumed IAM
                              role's trust policy, if STS is being used (default is None).
        :return: A DStream object
        """
        jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
        jduration = ssc._jduration(checkpointInterval)

        try:
            # Use KinesisUtilsPythonHelper to access Scala's KinesisUtils
            helper = ssc._jvm.org.apache.spark.streaming.kinesis.KinesisUtilsPythonHelper(
            )
        except TypeError as e:
            if str(e) == "'JavaPackage' object is not callable":
                _print_missing_jar("Streaming's Kinesis",
                                   "streaming-kinesis-asl",
                                   "streaming-kinesis-asl-assembly",
                                   ssc.sparkContext.version)
            raise
        jstream = helper.createStream(ssc._jssc, kinesisAppName, streamName,
                                      endpointUrl, regionName,
                                      initialPositionInStream, jduration,
                                      jlevel, awsAccessKeyId, awsSecretKey,
                                      stsAssumeRoleArn, stsSessionName,
                                      stsExternalId)
        stream = DStream(jstream, ssc, NoOpSerializer())
        return stream.map(lambda v: decoder(v))