def app_startup():
    """Initial startup commands and then separate threads"""

    # Access and create Stream Manager components
    # First wait until LocalDataStream is available
    logger.info("Creating and accessing Stream Manager components")
    try:
        while True:
            stream_names = client.list_streams()
            if LOCAL_STREAM in stream_names:
                break
            logger.warning(
                f"Target consumer stream {LOCAL_STREAM} not found, pausing 1 second..."
            )
            sleep(1)
        logger.info(f"Found target consumer stream {LOCAL_STREAM}")
        pass
    except Exception as e:
        print(f"Some error: {e}")
    # Create AggregateDataStream to Kinesis
    try:
        # The Aggregate data stream is a high priority source for aggregate data
        # sent to Kinesis Data Streams.
        client.create_message_stream(
            MessageStreamDefinition(
                name=AGGREGATE_STREAM,  # Required.
                # max_size=268435456,  # Default is 256 MB.
                # stream_segment_size=16777216,  # Default is 16 MB.
                # time_to_live_millis=None,  # By default, no TTL is enabled.
                strategy_on_full=StrategyOnFull.
                OverwriteOldestData,  # Required.
                # persistence=Persistence.File,  # Default is File.
                # flush_on_write=False,  # Default is false.
                export_definition=ExportDefinition(kinesis=[
                    KinesisConfig(
                        identifier="AggregateData",
                        kinesis_stream_name=kinesis_data_stream,
                        # Highest priority
                        priority=1,
                        batch_size=1,
                    )
                ]),
            ))
        logger.info(
            f"Created aggregate producer stream: AggregateDataStream, with target producer Kinesis Data Stream: {kinesis_data_stream}"
        )
    except StreamManagerException as e:
        logger.error(f"Error creating message stream: {e}")
        pass
    except Exception as e:
        logger.error(f"General exception error: {e}")
        pass

    # Create and start threads
    logger.info("Starting Stream Manager Thread")
    stream_manager_thread = Thread(target=stream_manager_worker, args=[])
    stream_manager_thread.start()
    logger.info("Starting Flask API Thread")
    api_server_thread = Thread(target=api_server_worker, args=[])
    api_server_thread.start()
    def set_stream_manager_client(self) -> None:
        """
        Sets the Stream Manager client only when Kinesis Stream name is provided.
        Failure of the Stream Manager client does not block any other actions.

        :param job_name: The job name for the stream.
        :raises: :err:`Exception` when any exception other than `InvalidRequestException` happens.
        """
        if self.kinesis_stream_name:
            if not self.stream_manager_client.connected:
                self.stream_manager_client = StreamManagerClient()

            try:
                exports = ExportDefinition(kinesis=[
                    KinesisConfig(identifier="KinesisExport",
                                  kinesis_stream_name=self.kinesis_stream_name,
                                  batch_size=1)
                ])

                self.stream_manager_client.create_message_stream(
                    MessageStreamDefinition(
                        name=self.stream_name,
                        strategy_on_full=StrategyOnFull.OverwriteOldestData,
                        export_definition=exports))
            except InvalidRequestException:
                # One centralized stream manager is going to be used to send data to Kinesis Data Stream,
                # so `InvalidRequestException` will happens when new job is deployed.
                pass
            except Exception as err:
                self.logger.error(
                    "Unknown error happened, so your Stream Manager might not be working: %s",
                    str(err))
Example #3
0
def main(logger):
    try:
        stream_name = "SomeStream"
        client = StreamManagerClient()

        # Try deleting the stream (if it exists) so that we have a fresh start
        try:
            client.delete_message_stream(stream_name=stream_name)
        except ResourceNotFoundException:
            pass

        exports = ExportDefinition(
            iot_sitewise=[IoTSiteWiseConfig(identifier="IoTSiteWiseExport" + stream_name, batch_size=5)]
        )
        client.create_message_stream(
            MessageStreamDefinition(
                name=stream_name, strategy_on_full=StrategyOnFull.OverwriteOldestData, export_definition=exports
            )
        )

        logger.info("Now going to start writing random IoTSiteWiseEntry to the stream")
        # Now start putting in random site wise entries.
        while True:
            logger.debug("Appending new random IoTSiteWiseEntry to stream")
            client.append_message(stream_name, Util.validate_and_serialize_to_json_bytes(get_random_site_wise_entry()))
            time.sleep(1)
    except asyncio.TimeoutError:
        print("Timed out")
    except Exception as e:
        print(e)
        print(type(e))
    finally:
        if client:
            client.close()
client = StreamManagerClient()

try:
    # The LocalDataStream is low priority source for incoming sensor data and
    # aggregator function.
    client.create_message_stream(
        MessageStreamDefinition(
            name="LocalDataStream",  # Required.
            max_size=268435456,  # Default is 256 MB.
            stream_segment_size=16777216,  # Default is 16 MB.
            time_to_live_millis=None,  # By default, no TTL is enabled.
            strategy_on_full=StrategyOnFull.OverwriteOldestData,  # Required.
            persistence=Persistence.File,  # Default is File.
            flush_on_write=False,  # Default is false.
            export_definition=ExportDefinition(iot_analytics=[
                IoTAnalyticsConfig(
                    identifier="RawData",
                    iot_channel=channel_name,
                    # iot_msg_id_prefix="test",
                    # batch_size=1,
                    # batch_interval_millis=1000,
                    # priority=1
                )
            ]),
        ))
except StreamManagerException as e:
    logger.error(f"Error creating message stream: {e}")
    pass
except Exception as e:
    logger.error(f"General exception error: {e}")
    pass
Example #5
0
def main(logger):
    try:
        stream_name = "SomeStream"
        status_stream_name = "SomeStatusStreamName"
        bucket_name = "SomeBucket"
        key_name = "SomeKey"
        file_url = "file:/path/to/some/file.someExtension"
        client = StreamManagerClient()

        # Try deleting the status stream (if it exists) so that we have a fresh start
        try:
            client.delete_message_stream(stream_name=status_stream_name)
        except ResourceNotFoundException:
            pass

        # Try deleting the stream (if it exists) so that we have a fresh start
        try:
            client.delete_message_stream(stream_name=stream_name)
        except ResourceNotFoundException:
            pass

        exports = ExportDefinition(s3_task_executor=[
            S3ExportTaskExecutorConfig(
                identifier="S3TaskExecutor" + stream_name,  # Required
                # Optional. Add an export status stream to add statuses for all S3 upload tasks.
                status_config=StatusConfig(
                    status_level=StatusLevel.
                    INFO,  # Default is INFO level statuses.
                    # Status Stream should be created before specifying in S3 Export Config.
                    status_stream_name=status_stream_name,
                ),
            )
        ])

        # Create the Status Stream.
        client.create_message_stream(
            MessageStreamDefinition(
                name=status_stream_name,
                strategy_on_full=StrategyOnFull.OverwriteOldestData))

        # Create the message stream with the S3 Export definition.
        client.create_message_stream(
            MessageStreamDefinition(
                name=stream_name,
                strategy_on_full=StrategyOnFull.OverwriteOldestData,
                export_definition=exports))

        # Append a S3 Task definition and print the sequence number
        s3_export_task_definition = S3ExportTaskDefinition(input_url=file_url,
                                                           bucket=bucket_name,
                                                           key=key_name)
        logger.info(
            "Successfully appended S3 Task Definition to stream with sequence number %d",
            client.append_message(
                stream_name,
                Util.validate_and_serialize_to_json_bytes(
                    s3_export_task_definition)),
        )

        # Read the statuses from the export status stream
        is_file_uploaded_to_s3 = False
        while not is_file_uploaded_to_s3:
            try:
                messages_list = client.read_messages(
                    status_stream_name,
                    ReadMessagesOptions(min_message_count=1,
                                        read_timeout_millis=1000))
                for message in messages_list:
                    # Deserialize the status message first.
                    status_message = Util.deserialize_json_bytes_to_obj(
                        message.payload, StatusMessage)

                    # Check the status of the status message. If the status is "Success",
                    # the file was successfully uploaded to S3.
                    # If the status was either "Failure" or "Cancelled", the server was unable to upload the file to S3.
                    # We will print the message for why the upload to S3 failed from the status message.
                    # If the status was "InProgress", the status indicates that the server has started uploading
                    # the S3 task.
                    if status_message.status == Status.Success:
                        logger.info("Successfully uploaded file at path " +
                                    file_url + " to S3.")
                        is_file_uploaded_to_s3 = True
                    elif status_message.status == Status.Failure or status_message.status == Status.Canceled:
                        logger.info("Unable to upload file at path " +
                                    file_url + " to S3. Message: " +
                                    status_message.message)
                        is_file_uploaded_to_s3 = True
                time.sleep(5)
            except StreamManagerException:
                logger.exception("Exception while running")
    except asyncio.TimeoutError:
        logger.exception("Timed out while executing")
    except Exception:
        logger.exception("Exception while running")
    finally:
        if client:
            client.close()
def main(logger):
    try:
        stream_name = "SomeStream"
        kinesis_stream_name = "MyKinesisStream"

        # Create a client for the StreamManager
        client = StreamManagerClient()

        # Try deleting the stream (if it exists) so that we have a fresh start
        try:
            client.delete_message_stream(stream_name=stream_name)
        except ResourceNotFoundException:
            pass

        exports = ExportDefinition(kinesis=[
            KinesisConfig(identifier="KinesisExport" + stream_name,
                          kinesis_stream_name=kinesis_stream_name)
        ])
        client.create_message_stream(
            MessageStreamDefinition(
                name=stream_name,
                strategy_on_full=StrategyOnFull.OverwriteOldestData,
                export_definition=exports))

        # Append 2 messages and print their sequence numbers
        logger.info(
            "Successfully appended message to stream with sequence number %d",
            client.append_message(stream_name,
                                  "ABCDEFGHIJKLMNO".encode("utf-8")),
        )
        logger.info(
            "Successfully appended message to stream with sequence number %d",
            client.append_message(stream_name, "PQRSTUVWXYZ".encode("utf-8")),
        )

        # Try reading the 2 messages we just appended and print them out
        logger.info(
            "Successfully read 2 messages: %s",
            client.read_messages(
                stream_name,
                ReadMessagesOptions(min_message_count=2,
                                    read_timeout_millis=1000)),
        )

        logger.info(
            "Now going to start writing random integers between 0 and 1000 to the stream"
        )
        # Now start putting in random data between 0 and 1000 to emulate device sensor input
        while True:
            logger.debug("Appending new random integer to stream")
            client.append_message(
                stream_name,
                random.randint(0, 1000).to_bytes(length=4,
                                                 signed=True,
                                                 byteorder="big"))
            time.sleep(1)

    except asyncio.TimeoutError:
        logger.exception("Timed out while executing")
    except Exception:
        logger.exception("Exception while running")
    finally:
        # Always close the client to avoid resource leaks
        if client:
            client.close()