コード例 #1
0
def main(logger):
    try:
        stream_name = "SomeStream"
        client = StreamManagerClient()

        # Try deleting the stream (if it exists) so that we have a fresh start
        try:
            client.delete_message_stream(stream_name=stream_name)
        except ResourceNotFoundException:
            pass

        exports = ExportDefinition(
            iot_sitewise=[IoTSiteWiseConfig(identifier="IoTSiteWiseExport" + stream_name, batch_size=5)]
        )
        client.create_message_stream(
            MessageStreamDefinition(
                name=stream_name, strategy_on_full=StrategyOnFull.OverwriteOldestData, export_definition=exports
            )
        )

        logger.info("Now going to start writing random IoTSiteWiseEntry to the stream")
        # Now start putting in random site wise entries.
        while True:
            logger.debug("Appending new random IoTSiteWiseEntry to stream")
            client.append_message(stream_name, Util.validate_and_serialize_to_json_bytes(get_random_site_wise_entry()))
            time.sleep(1)
    except asyncio.TimeoutError:
        print("Timed out")
    except Exception as e:
        print(e)
        print(type(e))
    finally:
        if client:
            client.close()
コード例 #2
0
def read_from_stream_aggregate_and_publish(client: StreamManagerClient):
    """Read the higher precision local data stream, aggregate, and publish to the aggregate stream"""
    global aggregate_values
    raw_stream_data = read_from_stream(
        # Source data is approx 20 messages-per-second, so read 5 seconds worth (100)
        client=client,
        msg_count=100,
        read_timeout_millis=5000,
    )

    result = []
    distinct_ids = set(ids)
    for id in distinct_ids:
        values = [(v['voltage'], v['timestamp']) for v in raw_stream_data
                  if v['id'] == id]
        voltages, timestamps = zip(*values)

        num_values = len(voltages)
        latest_v = voltages[num_values - 1]

        if num_values > 1 and voltages[num_values - 2] > latest_v:
            lost_v = voltages[num_values - 2] - latest_v
            timespan = timestamps[num_values - 2] - timestamps[num_values - 1]
            factor = (3 - latest_v) / lost_v
            timeleft = factor * timespan
        else:
            timeleft = 'N/A'

    result.append({
        'id': id,
        'current_voltage': latest_v,
        'time_left': timeleft
    })

    # Update aggregate values
    lock.acquire()
    aggregate_values = result
    lock.release()

    retries = 3
    backoff = 0.2
    # Try appending data up to 3 times. If that fails, then just move on.
    for tryNum in range(retries):
        try:
            sequence_number = client.append_message(
                AGGREGATE_STREAM,
                json.dumps(aggregated_data).encode("utf-8"))
            logger.info(
                "Successfully appended aggregated data as sequence number %d",
                sequence_number,
            )
            break
        except Exception:
            logger.warning(
                "Exception while trying to append aggregated data. Try %d of %d.",
                tryNum,
                retries,
                exc_info=True,
            )
            sleep(backoff)
コード例 #3
0
def read_from_stream_aggregate_and_publish(client: StreamManagerClient):
    """Read the higher precision local data stream, aggregate, and publish to the aggregate stream"""
    global aggregate_values
    raw_stream_data = read_from_stream(
        # Source data is approx 20 messages-per-second, so read 5 seconds worth (100)
        client=client,
        msg_count=100,
        read_timeout_millis=5000,
    )
    aggregated_data = {
        "avg_temperature":
        mean(map(lambda m: m["temperature"], raw_stream_data)),
        "avg_hertz":
        mean(map(lambda m: m["hertz"], raw_stream_data)),
        "timestamp":
        max(map(lambda m: m["timestamp"], raw_stream_data)),
        "last_sequence_number":
        max(map(lambda m: m["sequence_number"], raw_stream_data)),
    }
    # Update aggregate values
    lock.acquire()
    aggregate_values["avg_temperature"] = aggregated_data["avg_temperature"]
    aggregate_values["avg_hertz"] = aggregated_data["avg_hertz"]
    aggregate_values["timestamp"] = aggregated_data["timestamp"]
    lock.release()

    retries = 3
    backoff = 0.2
    # Try appending data up to 3 times. If that fails, then just move on.
    for tryNum in range(retries):
        try:
            sequence_number = client.append_message(
                AGGREGATE_STREAM,
                json.dumps(aggregated_data).encode("utf-8"))
            logger.info(
                "Successfully appended aggregated data as sequence number %d",
                sequence_number,
            )
            break
        except Exception:
            logger.warning(
                "Exception while trying to append aggregated data. Try %d of %d.",
                tryNum,
                retries,
                exc_info=True,
            )
            sleep(backoff)
コード例 #4
0
def main(logger):
    try:
        stream_name = "SomeStream"
        status_stream_name = "SomeStatusStreamName"
        bucket_name = "SomeBucket"
        key_name = "SomeKey"
        file_url = "file:/path/to/some/file.someExtension"
        client = StreamManagerClient()

        # Try deleting the status stream (if it exists) so that we have a fresh start
        try:
            client.delete_message_stream(stream_name=status_stream_name)
        except ResourceNotFoundException:
            pass

        # Try deleting the stream (if it exists) so that we have a fresh start
        try:
            client.delete_message_stream(stream_name=stream_name)
        except ResourceNotFoundException:
            pass

        exports = ExportDefinition(s3_task_executor=[
            S3ExportTaskExecutorConfig(
                identifier="S3TaskExecutor" + stream_name,  # Required
                # Optional. Add an export status stream to add statuses for all S3 upload tasks.
                status_config=StatusConfig(
                    status_level=StatusLevel.
                    INFO,  # Default is INFO level statuses.
                    # Status Stream should be created before specifying in S3 Export Config.
                    status_stream_name=status_stream_name,
                ),
            )
        ])

        # Create the Status Stream.
        client.create_message_stream(
            MessageStreamDefinition(
                name=status_stream_name,
                strategy_on_full=StrategyOnFull.OverwriteOldestData))

        # Create the message stream with the S3 Export definition.
        client.create_message_stream(
            MessageStreamDefinition(
                name=stream_name,
                strategy_on_full=StrategyOnFull.OverwriteOldestData,
                export_definition=exports))

        # Append a S3 Task definition and print the sequence number
        s3_export_task_definition = S3ExportTaskDefinition(input_url=file_url,
                                                           bucket=bucket_name,
                                                           key=key_name)
        logger.info(
            "Successfully appended S3 Task Definition to stream with sequence number %d",
            client.append_message(
                stream_name,
                Util.validate_and_serialize_to_json_bytes(
                    s3_export_task_definition)),
        )

        # Read the statuses from the export status stream
        is_file_uploaded_to_s3 = False
        while not is_file_uploaded_to_s3:
            try:
                messages_list = client.read_messages(
                    status_stream_name,
                    ReadMessagesOptions(min_message_count=1,
                                        read_timeout_millis=1000))
                for message in messages_list:
                    # Deserialize the status message first.
                    status_message = Util.deserialize_json_bytes_to_obj(
                        message.payload, StatusMessage)

                    # Check the status of the status message. If the status is "Success",
                    # the file was successfully uploaded to S3.
                    # If the status was either "Failure" or "Cancelled", the server was unable to upload the file to S3.
                    # We will print the message for why the upload to S3 failed from the status message.
                    # If the status was "InProgress", the status indicates that the server has started uploading
                    # the S3 task.
                    if status_message.status == Status.Success:
                        logger.info("Successfully uploaded file at path " +
                                    file_url + " to S3.")
                        is_file_uploaded_to_s3 = True
                    elif status_message.status == Status.Failure or status_message.status == Status.Canceled:
                        logger.info("Unable to upload file at path " +
                                    file_url + " to S3. Message: " +
                                    status_message.message)
                        is_file_uploaded_to_s3 = True
                time.sleep(5)
            except StreamManagerException:
                logger.exception("Exception while running")
    except asyncio.TimeoutError:
        logger.exception("Timed out while executing")
    except Exception:
        logger.exception("Exception while running")
    finally:
        if client:
            client.close()
コード例 #5
0
class ConnectorClient:
    """
    Creates a client for the connector. All parameters are required.

    :param kinesis_stream_name: The Kinesis Stream name to send the Stream Manager messages.
    """

    CONFIGURATION_PATH = "/m2c2/job"

    def __init__(self, kinesis_stream_name: str, connection_retry: int = 10):
        # class general variables
        self.has_started = False
        self.is_running = False
        self.kinesis_stream_name = kinesis_stream_name

        # Logging
        self.logger = logging.getLogger(self.__class__.__name__)
        self.logger.setLevel(logging.INFO)

        # Stream Manager client and Greegrass IoT data client
        self.stream_name = "m2c2-stream"
        self.iot_client = greengrasssdk.client("iot-data")

        # Connection retry if Stream Manager is not ready
        for i in range(connection_retry):
            try:
                self.stream_manager_client = StreamManagerClient()
                break
            except Exception as err:
                if i == connection_retry - 1:
                    self.logger.error(
                        "Unable to connect to Stream Manager. Error: %s",
                        str(err))
                    self.stream_manager_client = None
                    break

                time.sleep(i + 1)

    def set_kinesis_stream_name(self, kinesis_stream_name: str) -> None:
        """
        Sets the Kinesis Stream name.

        :param kinesis_stream_name: The Kinesis Stream name to send the Stream Manager messages.
        """
        self.kinesis_stream_name = kinesis_stream_name

    def start_client(self, job_name: str, job_configuration: dict) -> None:
        """
        Starts the connector client. It sets up the Stream Manager client.
        """
        self.set_stream_manager_client()
        self.write_local_job_configuration_file(
            job_name=job_name, job_configuration=job_configuration)
        self.has_started = True
        self.is_running = True

    def stop_client(self) -> None:
        """
        Stops the connector client. It closes the Stream Manager connection.
        """
        self.close_stream_manager_client()
        self.is_running = False

    def set_stream_manager_client(self) -> None:
        """
        Sets the Stream Manager client only when Kinesis Stream name is provided.
        Failure of the Stream Manager client does not block any other actions.

        :param job_name: The job name for the stream.
        :raises: :err:`Exception` when any exception other than `InvalidRequestException` happens.
        """
        if self.kinesis_stream_name:
            if not self.stream_manager_client.connected:
                self.stream_manager_client = StreamManagerClient()

            try:
                exports = ExportDefinition(kinesis=[
                    KinesisConfig(identifier="KinesisExport",
                                  kinesis_stream_name=self.kinesis_stream_name,
                                  batch_size=1)
                ])

                self.stream_manager_client.create_message_stream(
                    MessageStreamDefinition(
                        name=self.stream_name,
                        strategy_on_full=StrategyOnFull.OverwriteOldestData,
                        export_definition=exports))
            except InvalidRequestException:
                # One centralized stream manager is going to be used to send data to Kinesis Data Stream,
                # so `InvalidRequestException` will happens when new job is deployed.
                pass
            except Exception as err:
                self.logger.error(
                    "Unknown error happened, so your Stream Manager might not be working: %s",
                    str(err))

    def close_stream_manager_client(self) -> None:
        """
        Closes the Stream Manager client.
        """
        try:
            self.stream_manager_client.close()
        except:
            pass

    def publish_message_to_iot_topic(self, topic: str, payload: dict) -> None:
        """
        Publishes a message to the IoT topic.

        :param topic: The IoT topic to publish the payload.
        :param payload: The payload to publish.
        """
        try:
            self.iot_client.publish(topic=topic,
                                    qos=1,
                                    payload=json.dumps(payload))
        except Exception as err:
            self.logger.error(
                "Failed to publish telemetry data to the IoT topic. Error: %s",
                str(err))

    def append_stream_manager_message(self, message: dict) -> None:
        """
        Appends a message to the Stream Manager.

        :param message: The message to append.
        """
        try:
            self.stream_manager_client.append_message(
                stream_name=self.stream_name,
                data=json.dumps(message).encode("utf-8"))
        except Exception as err:
            self.logger.error(
                "Failed to append telemetry data to the Stream Manager. Error: %s",
                str(err))

    def read_local_job_configuration(self, job_name: str) -> dict:
        """
        Reads the local job configuration file.

        :param job_name: The job name to get the local job configuration.
        :return: The local configuration dictionary for the job. If the file does not exist, return an empty dictionary.
        :raises: :err:`Exception` when any exception happens.
        """
        try:
            file_name = "{path}/{job_name}.json".format(
                path=self.CONFIGURATION_PATH, job_name=job_name)

            if os.path.exists(file_name):
                with open(file_name) as file:
                    return json.load(file)
            else:
                return {}
        except Exception as err:
            self.logger.error("Failed to read the file: %s", str(err))
            raise Exception("Failed to read the file: {}".format(err))

    def write_local_job_configuration_file(self, job_name: str,
                                           job_configuration: dict) -> None:
        """
        Writes the local job configuration file.

        :param job_name: The job name to write the local job configuration.
        :param job_configuration: The job configuration to write locally.
        :raises: :err:`Exception` when any exception happens.
        """
        try:
            job_configuration["job"]["_last-update-timestamp_"] = datetime.now(
            ).strftime("%Y-%m-%d %H:%M:%S.%f")
            file_name = "{path}/{job_name}.json".format(
                path=self.CONFIGURATION_PATH, job_name=job_name)

            with open(file_name, "w+") as file:
                json.dump(job_configuration, file, indent=2)
        except Exception as err:
            self.logger.error("Failed to write to file: %s", str(err))
            raise Exception("Failed to write to file: {}".format(err))
コード例 #6
0
def main(logger):
    try:
        stream_name = "SomeStream"
        kinesis_stream_name = "MyKinesisStream"

        # Create a client for the StreamManager
        client = StreamManagerClient()

        # Try deleting the stream (if it exists) so that we have a fresh start
        try:
            client.delete_message_stream(stream_name=stream_name)
        except ResourceNotFoundException:
            pass

        exports = ExportDefinition(kinesis=[
            KinesisConfig(identifier="KinesisExport" + stream_name,
                          kinesis_stream_name=kinesis_stream_name)
        ])
        client.create_message_stream(
            MessageStreamDefinition(
                name=stream_name,
                strategy_on_full=StrategyOnFull.OverwriteOldestData,
                export_definition=exports))

        # Append 2 messages and print their sequence numbers
        logger.info(
            "Successfully appended message to stream with sequence number %d",
            client.append_message(stream_name,
                                  "ABCDEFGHIJKLMNO".encode("utf-8")),
        )
        logger.info(
            "Successfully appended message to stream with sequence number %d",
            client.append_message(stream_name, "PQRSTUVWXYZ".encode("utf-8")),
        )

        # Try reading the 2 messages we just appended and print them out
        logger.info(
            "Successfully read 2 messages: %s",
            client.read_messages(
                stream_name,
                ReadMessagesOptions(min_message_count=2,
                                    read_timeout_millis=1000)),
        )

        logger.info(
            "Now going to start writing random integers between 0 and 1000 to the stream"
        )
        # Now start putting in random data between 0 and 1000 to emulate device sensor input
        while True:
            logger.debug("Appending new random integer to stream")
            client.append_message(
                stream_name,
                random.randint(0, 1000).to_bytes(length=4,
                                                 signed=True,
                                                 byteorder="big"))
            time.sleep(1)

    except asyncio.TimeoutError:
        logger.exception("Timed out while executing")
    except Exception:
        logger.exception("Exception while running")
    finally:
        # Always close the client to avoid resource leaks
        if client:
            client.close()