def __init__(self, kinesis_stream_name: str, connection_retry: int = 10):
        # class general variables
        self.has_started = False
        self.is_running = False
        self.kinesis_stream_name = kinesis_stream_name

        # Logging
        self.logger = logging.getLogger(self.__class__.__name__)
        self.logger.setLevel(logging.INFO)

        # Stream Manager client and Greegrass IoT data client
        self.stream_name = "m2c2-stream"
        self.iot_client = greengrasssdk.client("iot-data")

        # Connection retry if Stream Manager is not ready
        for i in range(connection_retry):
            try:
                self.stream_manager_client = StreamManagerClient()
                break
            except Exception as err:
                if i == connection_retry - 1:
                    self.logger.error(
                        "Unable to connect to Stream Manager. Error: %s",
                        str(err))
                    self.stream_manager_client = None
                    break

                time.sleep(i + 1)
Esempio n. 2
0
def greengrass_hello_world_run():
    # Create the green grass client so that we can send messages to IoT console
    client = greengrasssdk.client('iot-data')
    iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])

    # Stream configuration, name and retention
    # Note that the name will appear as deeplens-myStream
    stream_name = 'myStream'
    retention = 2  #hours

    # Amount of time to stream
    wait_time = 60 * 60 * 5  #seconds

    # Use the boto session API to grab credentials
    session = Session()
    creds = session.get_credentials()

    # Create producer and stream.
    producer = dkv.createProducer(creds.access_key, creds.secret_key,
                                  creds.token, "us-east-1")
    client.publish(topic=iot_topic, payload="Producer created")
    kvs_stream = producer.createStream(stream_name, retention)
    client.publish(topic=iot_topic,
                   payload="Stream {} created".format(stream_name))

    # Start putting data into the KVS stream
    kvs_stream.start()
    client.publish(topic=iot_topic, payload="Stream started")
    time.sleep(wait_time)
    # Stop putting data into the KVS stream
    kvs_stream.stop()
    client.publish(topic=iot_topic, payload="Stream stopped")
Esempio n. 3
0
    def loadGreengrassClient(self, config):
        logger.info("Loading Greengrass client...")

        # Nothing to do if not inside greengrass
        if is_greengrass_enabled():
            return greengrasssdk.client("iot-data")
        return None
Esempio n. 4
0
def function_handler(event, context):
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    client = greengrasssdk.client("iot-data")

    data = event

    toggle = None
    if ("toggle" in data):
        toggle = data["toggle"]

    if (toggle == None):
        logger.info("toggle was never passed to Lambda, exiting.")
        return

    if (toggle.lower() == "on"):
        logger.info("Sending \"On\" signal to coffee machine.")
        topic = "$aws/things/Home_IoT_Devices_Coffee/shadow/update"
        payload = '{"state":{"desired":{"property": "on"}}}'
        client.publish(topic=topic, payload=payload)
    elif (toggle.lower() == "off"):
        logger.info("Sending \"Off\" signal to coffee machine.")
        topic = "$aws/things/Home_IoT_Devices_Coffee/shadow/update"
        payload = '{"state":{"desired":{"property": "off"}}}'
        client.publish(topic=topic, payload=payload)
    else:
        logger.info("Invalid toggle, options are \"on\" or \"off.\"")
Esempio n. 5
0
def write_image_to_s3(img, output, time, file_name, devices, resized_img):
    # Create an IoT client for sending to messages to the cloud.
    client = greengrasssdk.client('iot-data')
    iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
    session = Session()
    s3 = session.create_client('s3')
    device = devices[os.environ['AWS_IOT_THING_NAME']]
    record = 'json/record_' + device + '_' + time + '.json'
    #path to a resized image
    resized_image = 'frames_resized/resized_' + device + '_' + time + '.jpg'
    #latest record uploaded to it's own directory
    latest = 'latest/latest.json'
    # You can contorl the size and quality of the image
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
    _, jpg_data = cv2.imencode('.jpg', img, encode_param)
    _, resized_data = cv2.imencode('.jpg', resized_img, encode_param)
    response = s3.put_object(Body=jpg_data.tostring(),
                             Bucket='YOUR-BUCKET-NAME',
                             Key=file_name)
    response2 = s3.put_object(Body=json.dumps(output),
                              Bucket='YOUR-BUCKET-NAME',
                              Key=record)
    response3 = s3.put_object(Body=json.dumps(output),
                              Bucket='YOUR-BUCKET-NAME',
                              Key=latest)
    response4 = s3.put_object(Body=resized_data.tostring(),
                              Bucket='YOUR-BUCKET-NAME',
                              Key=resized_image)

    #client.publish(topic=iot_topic, payload="Response: {}".format(response))
    client.publish(topic=iot_topic, payload="Response: {}".format(response2))
    client.publish(topic=iot_topic, payload="Data pushed to S3")

    image_url = 'https://s3.amazonaws.com/YOUR-BUCKET-NAME/' + file_name
    return image_url
def init_greengrass():

    # This face detection model is implemented as single shot detector (ssd).
    model_type = 'ssd'
    output_map = {1: 'face'}
    # Create an IoT client for sending to messages to the cloud.
    client = greengrasssdk.client('iot-data')

    iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
    # Create a local display instance that will dump the image bytes to a FIFO
    # file that the image can be rendered locally.
    local_display = LocalDisplay('480p')
    local_display.start()
    # The sample projects come with optimized artifacts, hence only the artifact
    # path is required.
    model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml'
    # Load the model onto the GPU.
    client.publish(topic=iot_topic, payload='Loading face detection model')
    model = awscam.Model(model_path, {'GPU': 1})
    client.publish(topic=iot_topic, payload='Face detection model loaded')
    # Set the threshold for detection
    detection_threshold = 0.5
    # The height and width of the training set images
    input_height = 300
    input_width = 300

    return model_type, output_map, client, iot_topic, local_display, model_path, model, detection_threshold, input_height, input_width
def infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This cat-dog model is implemented as binary classifier, since the number
        # of labels is small, create a dictionary that converts the machine
        # labels to human readable labels.
        model_type = 'classification'
        output_map = {0: 'dog', 1: 'cat'}
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = '/opt/awscam/artifacts/mxnet_resnet18-catsvsdogs_FP32_FUSED.xml'
        # Load the model onto the GPU.
        client.publish(topic=iot_topic, payload='Loading action cat-dog model')
        model = awscam.Model(model_path, {'GPU': 1})
        client.publish(topic=iot_topic, payload='Cat-Dog model loaded')
        # Since this is a binary classifier only retrieve 2 classes.
        num_top_k = 2
        # The height and width of the training set images
        input_height = 224
        input_width = 224
        # Do inference until the lambda is killed.
        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')
            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a classification model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(model_type,
                                                         model.doInference(frame_resize))
            # Get top k results with highest probabilities
            top_k = parsed_inference_results[model_type][0:num_top_k]
            # Add the label of the top result to the frame used by local display.
            # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
            # for more information about the cv2.putText method.
            # Method signature: image, text, origin, font face, font scale, color, and tickness
            cv2.putText(frame, output_map[top_k[0]['label']], (10, 70),
                        cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 165, 20), 8)
            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            # Send the top k results to the IoT console via MQTT
            cloud_output = {}
            for obj in top_k:
                cloud_output[output_map[obj['label']]] = obj['prob']
            client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
    except Exception as ex:
        client.publish(topic=iot_topic, payload='Error in cat-dog lambda: {}'.format(ex))
Esempio n. 8
0
def main():

    client = greengrasssdk.client('iot-data')

    p1_meter = dsmr4_reader.Meter(p1_port, simulate=False)
    glob_p1_data = p1_meter.get_telegram()

    if glob_p1_data is not None:

        # ugly hack for now as aws IoT analytics does requires _
        p1_data = json.dumps(glob_p1_data).replace("-", "_")

        # power_data['p1'] = p1_data
        # print "**********************"
        # print power_data
        #
        # client.publish(topic + "/timestamp", str(int(time.time())))
        # logger.debug(p1_data)

        client.publish(topic=topic + "/p1", payload=p1_data, qos=1)

        # for k, v in glob_p1_data.iteritems():
        #     logger.debug("Topic: {0}/p1 Key: {1} Value: {2}"
        #                     .format(topic, k.replace("-", "_"), v))
        #     # client.publish(topic + "/" + k, v)

    sunspec_client = sunspec_modbus_tcp_reader.SunSpecModBusTcpClient(
        pv_address, 502, 2)
    glob_pv_data = sunspec_client.get_sunspec_data()

    if glob_pv_data is not None:

        pv_data = json.dumps(glob_pv_data)
        logger.debug(pv_data)

        client.publish(topic=topic + "/pv", payload=pv_data, qos=1)

        # client.publish(topic + "/timestamp", str(int(time.time())))
        # for k, v in glob_pv_data.iteritems():

        # logger.debug("Topic: {0}/pv Key: {1} Value: {2}"
        # .format(topic, k, v))
        # client.publish(topic + "/" + k, v)
    else:
        logger.warning('No PV Data! Sun down? or Logger Down')

    # Asynchronously schedule this function to be run again in 10 seconds
    Timer(poll_interval, main).start()
Esempio n. 9
0
    def __init__(self, admin, main, thing):
        self.admin = admin
        self.main = main
        self.thing = thing

        if platform.system() != 'Darwin':
            GGC = greengrasssdk.client('iot-data')
            def debug(topic=self.admin, payload={}):
                payload['thing'] = self.thing
                GGC.publish(topic=topic, payload=json.dumps(payload))
            self.publish = debug
        else:
            def debug(topic=self.admin, payload={}):
                payload['thing'] = self.thing
                print(topic, json.dumps(payload))
            self.publish = debug
    def __init__(self, client=None):
        # Greengrass SDK が読み込めない場合は、Boto3 を読み込んで実行できるようにする。
        # greengrasssdk をモック実行できるようにするため。
        if client is None:
            try:
                import greengrasssdk
                iotdata = greengrasssdk.client('iot-data')
                logger.info('Use greengrasssdk.')
            except Exception:
                import boto3
                iotdata = boto3.client('iot-data')
                logger.info('Use boto3.')

            self.__client = iotdata
        else:
            self.__client = client
def greengrass_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        print("About to greengrass_infinite_infer_run()")
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        iot_topic = '$aws/things/{}/infer'.format(
            os.environ['AWS_IOT_THING_NAME'])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # path is required.
        # The height and width of the training set images
        input_height = 300
        input_width = 300

        # Do inference until the lambda is killed.
        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')
            # Resize frame to the same size as the training set.
            #frame_resize = cv2.resize(frame, (input_height, input_width))

            # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
            # for more information about the cv2.rectangle method.
            # Method signature: image, point1, point2, color, and tickness.
            frame = cv2.rectangle(frame, (0, 0), (40, 20), (255, 165, 20), 10)
            # Amount to offset the label/probability text above the bounding box.
            #text_offset = 15
            # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
            # for more information about the cv2.putText method.
            # Method signature: image, text, origin, font face, font scale, color,
            # and tickness
            #cv2.putText(frame, "Project Stream"),
            #            (0, text_offset),
            #            cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 165, 20), 6)
            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            # Send results to the cloud
            client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
    except Exception as ex:
        client.publish(
            topic=iot_topic,
            payload='Error in object detection lambda: {}'.format(ex))
def greengrass_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client("iot-data")
        iot_topic = "$aws/things/{}/infer".format(
            os.environ["AWS_IOT_THING_NAME"])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay("480p")
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = "/opt/awscam/artifacts/mxnet_style_FP32_FUSE.xml"
        # Load the model onto the GPU.
        client.publish(topic=iot_topic, payload="Loading style transfer model")
        model = awscam.Model(model_path, {"GPU": 1})
        client.publish(topic=iot_topic, payload="Style transfer model loaded")
        # The style model is classified as a segmentation model, in the sense that the output is
        # an image.
        model_type = "segmentation"
        # The height and width of the training set images
        input_height = 224
        input_width = 224
        # Do inference until the lambda is killed.
        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception("Failed to get frame from the stream")
            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results
            infer_output = model.doInference(frame_resize)
            parsed_results = infer_output["Convolution_last_conv"]
            parsed_results = np.reshape(parsed_results[::-1],
                                        (input_height, input_width, 3),
                                        order="F")
            parsed_results = np.rot90(parsed_results)
            # Set the next frame in the local display stream.
            local_display.set_frame_data(parsed_results * 2)

    except Exception as ex:
        print("Error", ex)
        client.publish(topic=iot_topic,
                       payload="Error in style transfer lambda: {}".format(ex))
Esempio n. 13
0
def write_training_data(raw_file_name, raw_img, train_annotation):
    client = greengrasssdk.client('iot-data')
    iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
    session = Session()
    s3 = session.create_client('s3')
    ##path to raw_image
    raw_image_path = 'raw_images/' + raw_file_name + '.jpg'
    #path to json annotation
    json_annotation_path = 'training_annotation/' + raw_file_name + '.json'

    # You can contorl the size and quality of the image
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
    _, raw_data = cv2.imencode('.jpg', raw_img, encode_param)
    raw_image_response = s3.put_object(Body=raw_data.tostring(),
                                       Bucket='YOUR-BUCKET-NAME',
                                       Key=raw_image_path)
    annotation_response = s3.put_object(Body=json.dumps(train_annotation),
                                        Bucket='YOUR-BUCKET-NAME',
                                        Key=json_annotation_path)
Esempio n. 14
0
    def __init__(self, thing='default', prefix='sputnik', dummy=False):
        self.thing = thing
        self.prefix = prefix
        self.topicPrefix = self.prefix + '/' + self.thing + '/'
        self.topicLogger = self.topicPrefix + 'logger'

        if dummy == False:
            import greengrasssdk
            ggsdk = greengrasssdk.client('iot-data')

            def prepPublish(topic=self.topicLogger, payload={}):
                ggsdk.publish(topic=topic, payload=json.dumps(payload))

            def prepUpdateShadow(thing=self.thing, payload={}):
                ggsdk.update_thing_shadow(thingName=thing, payload=json.dumps(payload))

            def prepGetShadow(thingName=self.thing):
                response = ggsdk.get_thing_shadow(thingName=thingName)
                payloadDict = json.loads(response['payload'])
                return payloadDict

            self.publish = prepPublish
            self.updateThingShadow = prepUpdateShadow
            self.getThingShadow = prepGetShadow

        else:
            logging.warn("Setting up GGSDK in dummy mode")

            def debug(topic=self.topicLogger, payload={}):
                logging.debug(topic + ': ' + json.dumps(payload))

            def debugUpdateShadow(thing=self.thing, payload={}):
                logging.debug("ggsdk.updateThingShadow: " + thing + ": " + json.dumps(payload))

            def debugGetShadow(thing=self.thing, payload={}):
                logging.debug("ggsdk.getThingShadow: " + thing + ": {}")
                return {}

            self.publish = debug
            self.updateThingShadow = debugUpdateShadow
            self.getThingShadow = debugGetShadow
Esempio n. 15
0
from threading import Timer
from redis import Redis
import greengrasssdk
import json
import time

#Configuration
TOPIC = os.environ['topic']  #"car1/sensors"
FREQUENCY = int(os.environ['frequency'])  # 5

# Setup logging to stdout
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

# Creating a greengrass core sdk client
client = greengrasssdk.client("iot-data")

my_platform = platform.platform()


def upload_to_cloud():
    try:
        cli = Redis('localhost')
        ultrasonic_M = int(cli.get('ultrasonic_M'))
        infrared_LMR = int(cli.get('infrared_LMR'))
        #battery_level = int(cli.get('battery_level'))
        #motor_1 = int(cli.get('motor_1'))
        #motor_2 = int(cli.get('motor_2'))
        #motor_3 = int(cli.get('motor_3'))
        #motor_4 = int(cli.get('motor_4'))
        #rfid_pos = int(cli.get('rfid_pos'))
SITE_NAME = os.environ["sitename"]
# Area from Greengrass Lambda Environment variables
AREA = os.environ["area"]
# Process from Greengrass Lambda Environment variables
PROCESS = os.environ["process"]
# Machine name from Greengrass Lambda Environment variables
MACHINE_NAME = os.environ["machinename"]
# Kinesis Data Stream name
KINESIS_STREAM_NAME = os.environ["kinesisstream"]
# Connection retry count
CONNECTION_RETRY = 10
# Error retry count
ERROR_RETRY = 5

# Clients and logging
opcda_iot_client = greengrasssdk.client("iot-data")
connector_client = ConnectorClient(kinesis_stream_name=KINESIS_STREAM_NAME,
                                   connection_retry=CONNECTION_RETRY)
logger = logging.getLogger()
logger.setLevel(logging.INFO)


def post_to_user(post_type, message):
    """Post messages to users through the IoT topic and Stream Manager."""

    user_message = {
        "_id_": str(uuid.uuid4()),
        "_timestamp_": datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
    }

    if version:
def greengrass_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This face detection model is implemented as single shot detector (ssd).
        model_type = 'ssd'
        output_map = {1: 'face'}
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        thing_name = os.environ['AWS_IOT_THING_NAME']
        iot_topic = '$aws/things/{}/infer'.format(thing_name)
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # Create a s3 backgound uploader
        session = Session()
        s3 = session.create_client('s3',
                                   region_name=os.getenv(
                                       'REGION_NAME', 'ap-southeast-2'))
        bucket = os.getenv('FRAMES_BUCKET',
                           'virtual-concierge-frames-ap-southeast-2')
        uploader = ImageUploader(s3, bucket, client, iot_topic)
        uploader.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_dir = '/opt/awscam/artifacts/'
        model_path = model_dir + 'mxnet_deploy_ssd_FP16_FUSED.xml'
        # Load the model onto the GPU.
        msg = 'Loading face detection model for {}'.format(thing_name)
        client.publish(topic=iot_topic, payload=msg)
        model_start = time.time()
        model = awscam.Model(model_path, {'GPU': 1})
        msg = 'Face detection model loaded in {}s'.format(time.time() -
                                                          model_start)
        client.publish(topic=iot_topic, payload=msg)
        # Attempt to load scorer library
        try:
            model_start = time.time()
            scorer = Scorer(model_dir)
            msg = 'Image classification model loaded {} in {}s'.format(
                scorer.vecs.shape[0],
                time.time() - model_start)
            client.publish(topic=iot_topic, payload=msg)
        except Exception as e:
            print('Failed to load scorer', e)
        # Set the threshold for detection
        detection_threshold = float(os.getenv('DETECT_THRESHOLD', '0.7'))
        # This is the similarity threshold
        sim_threshold = float(os.getenv('DETECT_THRESHOLD', '0.99'))
        # The height and width of the training set images
        input_height = 300
        input_width = 300
        # Do inference until the lambda is killed.
        while True:
            # get thing shadow state, to see if we should register
            cloud_output = {}
            # Get a frame from the video stream
            cloud_output["frame_start"] = time.time()
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')
            # Future integrate the shadow callback
            if False:
                cloud_output["shadow_start"] = time.time()
                shadow = client.get_thing_shadow(thingName=thing_name)
                jsonState = json.loads(shadow["payload"])
                register = jsonState['state']['desired'].get('register')
                cloud_output["shadow_register"] = register
                cloud_output["shadow_latency"] = time.time(
                ) - cloud_output["shadow_start"]
            # Resize frame to the same size as the training set.
            cloud_output["detect_start"] = time.time()
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(
                model_type, model.doInference(frame_resize))
            cloud_output["detect_latency"] = time.time(
            ) - cloud_output["detect_start"]
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0] / input_height)
            xscale = float(frame.shape[1] / input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            # Get the detected faces and probabilities
            for i, obj in enumerate(parsed_inference_results[model_type]):
                if obj['prob'] > detection_threshold:
                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj['xmin']) \
                           + int((obj['xmin'] - input_width/2) + input_width/2)
                    ymin = int(yscale * obj['ymin'])
                    xmax = int(xscale * obj['xmax']) \
                           + int((obj['xmax'] - input_width/2) + input_width/2)
                    ymax = int(yscale * obj['ymax'])
                    # Set the default title and color
                    title = '{:.2f}%'.format(obj['prob'] * 100)
                    color = (255, 0, 0)  # blue
                    upload = False
                    if scorer:
                        try:
                            # Attempt to find similar face
                            cloud_output['classify_start'] = time.time()
                            bbox = [xmin, ymin, xmax, ymax]
                            vec = scorer.vectorize(frame, bbox)
                            sim, z_score, prob, name = scorer.similar(vec)
                            if prob >= sim_threshold:
                                title = name
                                if round(prob, 3) < 1.0:
                                    title += ' ({:.2f}%)'.format(prob)
                                color = (0, 255, 0)  # green
                                upload = True
                            cloud_output['classify'] = {
                                'name': name,
                                'sim': float(sim),
                                'zscore': float(z_score),
                                'prob': float(prob)
                            }
                            cloud_output['classify_latency'] = time.time(
                            ) - cloud_output['classify_start']
                        except Exception as e:
                            msg = "Face similarity error: " + str(e)
                            client.publish(topic=iot_topic, payload=msg)
                    if upload:
                        try:
                            metadata = {
                                'ThingName':
                                thing_name,
                                'FullName':
                                title,
                                'Confidence':
                                str(obj['prob']),
                                'Similarity':
                                str(cloud_output['classify']['sim']),
                                'Probability':
                                str(cloud_output['classify']['prob']),
                                'FaceHeight':
                                str(xmax - xmin),
                                'FaceWidth':
                                str(ymax - ymin),
                            }
                            crop_img = uploader.crop(frame, xmin, ymin, xmax,
                                                     ymax)
                            item = uploader.upload(crop_img,
                                                   i,
                                                   metadata=metadata)
                            if item:
                                cloud_output['upload_key'] = item['key']
                            else:
                                cloud_output['upload_skip'] = True
                        except Exception as e:
                            msg = "Upload error: " + str(e)
                            client.publish(topic=iot_topic, payload=msg)
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cloud_output["draw_start"] = time.time()
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 10)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 12
                    cv2.putText(frame, title, (xmin, ymin - text_offset),
                                cv2.FONT_HERSHEY_SIMPLEX, 2.5, color, 6)
                    # Store label and probability to send to cloud
                    cloud_output[output_map[obj['label']]] = obj['prob']
                    cloud_output["draw_latency"] = time.time(
                    ) - cloud_output["draw_start"]
            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            cloud_output["frame_end"] = time.time()
            cloud_output["frame_latency"] = cloud_output[
                "frame_end"] - cloud_output["frame_start"]
            client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
    except Exception as ex:
        print('Error in face detection lambda: {}'.format(ex))
Esempio n. 18
0
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
# This sample is used in the AWS IoT Greengrass Developer Guide: 
# https://docs.aws.amazon.com/greengrass/latest/developerguide/secrets-console.html
#
# snippet-start:[greengrass.python.secret-resource-access.complete]
import greengrasssdk
 
# Create SDK clients.
secrets_client = greengrasssdk.client('secretsmanager')
message_client = greengrasssdk.client('iot-data')
message = ''

# This handler is called when the function is invoked.
# It uses the 'secretsmanager' client to get the value of the test secret using the secret name.
# The test secret is a text type, so the SDK returns a string. 
# For binary secret values, the SDK returns a base64-encoded string.
def function_handler(event, context):
    response = secrets_client.get_secret_value(SecretId='greengrass-TestSecret')
    secret_value = response.get('SecretString')
    if secret_value is None:
        message = 'Failed to retrieve secret.'
    else:
        message = 'Success! Retrieved secret.'
    
Esempio n. 19
0
 def __init__(self, ipc_topic: str, fields: [str], option: {}) -> None:
     self.ipc_topic = ipc_topic
     self._channel = greengrasssdk.client('iot-data')
     self._buffer = []
     self._fields = fields
     self._message = {}
Esempio n. 20
0
def infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This object detection model is implemented as single shot detector (ssd), since
        # the number of labels is small we create a dictionary that will help us convert
        # the machine labels to human readable labels.
        model_type = "ssd"
        output_map = {
            1: "aeroplane",
            2: "bicycle",
            3: "bird",
            4: "boat",
            5: "bottle",
            6: "bus",
            7: "car",
            8: "cat",
            9: "chair",
            10: "cow",
            11: "dinning table",
            12: "dog",
            13: "horse",
            14: "motorbike",
            15: "person",
            16: "pottedplant",
            17: "sheep",
            18: "sofa",
            19: "train",
            20: "tvmonitor",
        }
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client("iot-data")
        iot_topic = "$aws/things/{}/infer".format(os.environ["AWS_IOT_THING_NAME"])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay("480p")
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = (
            "/opt/awscam/artifacts/mxnet_deploy_ssd_resnet50_300_FP16_FUSED.xml"
        )
        # Load the model onto the GPU.
        client.publish(topic=iot_topic, payload="Loading object detection model")
        model = awscam.Model(model_path, {"GPU": 1})
        client.publish(topic=iot_topic, payload="Object detection model loaded")
        # Set the threshold for detection
        detection_threshold = 0.5
        # The height and width of the training set images
        input_height = 300
        input_width = 300
        # Do inference until the lambda is killed.
        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception("Failed to get frame from the stream")
            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(
                model_type, model.doInference(frame_resize)
            )
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0]) / float(input_height)
            xscale = float(frame.shape[1]) / float(input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            cloud_output = {}
            # Get the detected objects and probabilities
            for obj in parsed_inference_results[model_type]:
                if (
                    obj["prob"] > detection_threshold
                    and output_map[obj["label"]] == "person"
                ):
                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj["xmin"])
                    ymin = int(yscale * obj["ymin"])
                    xmax = int(xscale * obj["xmax"])
                    ymax = int(yscale * obj["ymax"])
                    # if a person was found, upload the target area to S3 for further inspection
                    # get the person image
                    try:
                        person = frame[ymin:ymax, xmin:xmax]
                        # create a s3 file key
                        filename = (
                            datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S.%f")
                            + ".jpg"
                        )
                        _, jpg_data = cv2.imencode(".jpg", person)
                        key = "incoming/{}".format(filename)
                        res = s3.put_object(
                            ACL="public-read",
                            Body=jpg_data.tostring(),
                            Bucket=storage_name,
                            Key=key,
                        )
                        print(res)
                        client.publish(topic=iot_topic, payload=res)
                    except Exception as ex:
                        print("Error", ex)
                        client.publish(
                            topic=iot_topic,
                            payload="Error in s3 put lambda: {}".format(ex),
                        )
            # Get the detected objects and probabilities
            for obj in parsed_inference_results[model_type]:
                if (
                    obj["prob"] > detection_threshold
                    and output_map[obj["label"]] == "person"
                ):
                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj["xmin"])
                    ymin = int(yscale * obj["ymin"])
                    xmax = int(xscale * obj["xmax"])
                    ymax = int(yscale * obj["ymax"])
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 5)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 15
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.putText method.
                    # Method signature: image, text, origin, font face, font scale, color,
                    # and tickness
                    cv2.putText(
                        frame,
                        "{}: {:.2f}%".format(
                            output_map[obj["label"]], obj["prob"] * 100
                        ),
                        (xmin, ymin - text_offset),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        2.5,
                        (255, 165, 20),
                        5,
                    )
                    # Store label and probability to send to cloud
                    cloud_output[output_map[obj["label"]]] = obj["prob"]

            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            # Send results to the cloud
            client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
    except Exception as ex:
        print("Error", ex)
        client.publish(
            topic=iot_topic, payload="Error in object detection lambda: {}".format(ex)
        )
Esempio n. 21
0
def greengrass_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        lapse = 0
        interval = 10
        # This object detection model is implemented as single shot detector (ssd), since
        # the number of labels is small we create a dictionary that will help us convert
        # the machine labels to human readable labels.
        model_type = 'ssd'
        output_map = {1: 'thumbs-up'}
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        iot_topic = '$aws/things/{}/infer'.format(
            os.environ['AWS_IOT_THING_NAME'])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_name = 'model_algo_1'
        #aux_inputs = {--epoch: 30}
        #error, model_path = mo.optimize(model_name, 512, 512, 'MXNet', aux_inputs)
        #if not error:
        #raise Exception('Failed to optimize model')
        model_path = '/opt/awscam/artifacts/model_algo_1.xml'
        # Load the model onto the GPU.
        client.publish(topic=iot_topic,
                       payload='Loading object detection model')
        model = awscam.Model(model_path, {'GPU': 1})
        client.publish(topic=iot_topic,
                       payload='Object detection model loaded')
        ## Debiging AWS_IOT_THING_NAME
        client.publish(topic=iot_topic,
                       payload=os.environ['AWS_IOT_THING_NAME'])
        # Set the threshold for detection
        detection_threshold = 0.90
        # The height and width of the training set images
        input_height = 512
        input_width = 512
        # A dictionary to identify device id's
        devices = {}
        devices['deeplens_HoVip9KQTXiC3UFub47lJA'] = "Seattle01"
        devices['deeplens_bmTWwitIRUi_mASjZASUHA'] = "Chicago01"
        devices['deeplens_Hgs6kj_yQASF2x-3fOxCHA'] = "Chicago02"

        # Do inference until the lambda is killed.
        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')
            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))

            ##Store a raw resized image before inference to be used for retraining later
            raw_training = cv2.resize(frame, (input_height, input_width))

            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(
                model_type, model.doInference(frame_resize))
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0] / input_height)
            xscale = float(frame.shape[1] / input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            cloud_output = {}
            # Index to keep track of multiple detections in a frame
            index = 0
            # Get the detected objects and probabilities
            # A dictionary containing data for a single detection in a frame
            frame_time = time.strftime("%Y%m%d-%H%M%S")
            detection = {}
            detection["device_id"] = devices[os.environ['AWS_IOT_THING_NAME']]
            detection["timestamp"] = frame_time
            # A list that will contain the information of the objects detected in the frame
            objects_det = []
            # A boolean recording if a detection was made or not
            detection_made = False

            ##Set a list of annotations to be outputted in json file for training
            annotations = []

            for obj in parsed_inference_results[model_type]:
                # A dictionary to contain the info from an object detected
                object = {}

                # A dictionary containing annotation info for retraining
                annotation = {}

                if obj['prob'] > detection_threshold:

                    # Set the annotation data for retraining
                    annotation['class_id'] = 0
                    annotation['left'] = int(obj['xmin'])
                    annotation['top'] = int(obj['ymin'])
                    annotation['width'] = abs(
                        int(obj['xmax']) - int(obj['xmin']))
                    annotation['height'] = abs(
                        int(obj['ymax']) - int(obj['ymin']))
                    ## append to the list of annotations
                    annotations.append(annotation)

                    #Set detection_made to true
                    detection_made = True
                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj['xmin']) \
                          + int((obj['xmin'] - input_width/2))
                    ymin = int(yscale * obj['ymin']) \
                           + int((obj['ymin'] - input_height/2))
                    xmax = int(xscale * obj['xmax']) \
                           + int((obj['xmax'] - input_width/2))
                    ymax = int(yscale * obj['ymax']) \
                           + int((obj['ymax'] - input_height/2))
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                  (255, 165, 20), 10)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 15
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.putText method.
                    # Method signature: image, text, origin, font face, font scale, color,
                    # and tickness
                    cv2.putText(
                        frame, "{}: {:.2f}%".format(output_map[obj['label']],
                                                    obj['prob'] * 100),
                        (xmin, ymin - text_offset), cv2.FONT_HERSHEY_SIMPLEX,
                        2.5, (255, 165, 20), 6)
                    ##This is putting bounding boxes for images resized to 512
                    cv2.rectangle(frame_resize,
                                  (int(obj['xmin']), int(obj['ymin'])),
                                  (int(obj['xmax']), int(obj['ymax'])),
                                  (255, 165, 20), 10)
                    cv2.putText(
                        frame_resize,
                        "{}: {:.2f}%".format(output_map[obj['label']],
                                             obj['prob'] * 100),
                        (int(obj['xmin']), int(obj['ymin']) - text_offset),
                        cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 165, 20), 6)
                    # Store label and probability to send to cloud
                    #cloud_output[output_map[obj['label']]] = obj['prob']
                    ##Add to the dictionary of cloudoutput the index of the detection
                    #cloud_output['index'] = index
                    # set detection data for the object
                    object["index"] = index
                    object["object"] = output_map[obj['label']]
                    object["confidence"] = obj['prob']
                    # append the object to the list of detections for the frame
                    objects_det.append(object)
                    index += 1
            # Add the detections to the dictionary for the frame
            detection["objects"] = objects_det

            # add a link to the image to detections
            img_file_name = 'images/image_' + detection[
                "device_id"] + '_' + frame_time + '.jpg'
            link = 'https://s3.amazonaws.com/thumbs-up-output-3/' + img_file_name
            detection["link_to_img"] = link

            #a filename for the raw image to be used for trianing later
            raw_file_name = 'retrain_' + detection[
                "device_id"] + '_' + frame_time

            # Upload to S3 to allow viewing the image in the browser, only if a detection was made
            if detection_made and lapse > interval:
                #Create the json for retraining
                training_json = create_training_json(raw_file_name,
                                                     annotations)
                #Upload the retraining data
                write_training_data(raw_file_name, raw_training, training_json)
                #Upload the inference data
                image_url = write_image_to_s3(frame, detection, frame_time,
                                              img_file_name, devices,
                                              frame_resize)
                #Publish success messages
                client.publish(topic=iot_topic,
                               payload='{{"img":"{}"}}'.format(image_url))
                client.publish(topic=iot_topic, payload=json.dumps(detection))
                lapse = 0
            else:
                client.publish(topic=iot_topic, payload="NO DETECTIONS MADE")
                lapse += 1
            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
    except Exception as ex:
        client.publish(
            topic=iot_topic,
            payload='Error in object detection lambda: {}'.format(ex))
Esempio n. 22
0
# be able to see 'Invoked successfully' returned by 'invokee' lambda. A lambda
# function can support non-json payload, which is a new feature introduced in
# GGC version 1.5.

import base64
import json
import logging
import sys

import greengrasssdk

# Setup logging to stdout
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

client = greengrasssdk.client("lambda")


def handler(event, context):
    client_context = json.dumps({"custom": "custom text"})

    try:
        response = client.invoke(
            ClientContext=base64.b64encode(bytes(client_context)),
            FunctionName=
            "arn:aws:lambda:<region>:<accountId>:function:<targetFunctionName>:<targetFunctionQualifier>",
            InvocationType="RequestResponse",
            Payload="Non-JSON Data",
            Qualifier="1",
        )
def infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This face detection model is implemented as single shot detector (ssd).
        model_type = "ssd"
        output_map = {1: "face"}
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client("iot-data")
        iot_topic = "$aws/things/{}/infer".format(os.environ["AWS_IOT_THING_NAME"])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay("480p")
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = "/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml"
        # Load the model onto the GPU.
        client.publish(topic=iot_topic, payload="Loading face detection model")
        model = awscam.Model(model_path, {"GPU": 1})
        client.publish(topic=iot_topic, payload="Face detection model loaded")
        # Set the threshold for detection
        detection_threshold = 0.25
        # The height and width of the training set images
        input_height = 300
        input_width = 300
        # Do inference until the lambda is killed.
        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception("Failed to get frame from the stream")
            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(
                model_type, model.doInference(frame_resize)
            )
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0]) / float(input_height)
            xscale = float(frame.shape[1]) / float(input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            cloud_output = {}
            # Get the detected faces and probabilities
            for obj in parsed_inference_results[model_type]:
                if obj["prob"] > detection_threshold:
                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj["xmin"])
                    ymin = int(yscale * obj["ymin"])
                    xmax = int(xscale * obj["xmax"])
                    ymax = int(yscale * obj["ymax"])
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 10)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 15
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.putText method.
                    # Method signature: image, text, origin, font face, font scale, color,
                    # and tickness
                    cv2.putText(
                        frame,
                        "{:.2f}%".format(obj["prob"] * 100),
                        (xmin, ymin - text_offset),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        2.5,
                        (255, 165, 20),
                        6,
                    )
                    # Store label and probability to send to cloud
                    cloud_output[output_map[obj["label"]]] = obj["prob"]
            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            # Send results to the cloud
            client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
    except Exception as ex:
        client.publish(
            topic=iot_topic, payload="Error in face detection lambda: {}".format(ex)
        )
import json
from collections import OrderedDict

from openvino.inference_engine import IENetwork, IEPlugin

# Specify the delta in seconds between each report
reporting_interval = 1.0

# Parameters for IoT Cloud
enable_iot_cloud_output = True

# Parameters for jpeg output on local disk
enable_local_jpeg_output = True

# Create a Greengrass Core SDK client for publishing messages to AWS Cloud
client = greengrasssdk.client("iot-data")


# Read environment variables set by Lambda function configuration
PARAM_MODEL_XML = os.environ.get("PARAM_MODEL_XML")
PARAM_INPUT_SOURCE = os.environ.get("PARAM_INPUT_SOURCE")
PARAM_DEVICE = os.environ.get("PARAM_DEVICE")
PARAM_OUTPUT_DIRECTORY = os.environ.get("PARAM_OUTPUT_DIRECTORY")
PARAM_CPU_EXTENSION_PATH = os.environ.get("PARAM_CPU_EXTENSION_PATH")
PARAM_LABELMAP_FILE = os.environ.get("PARAM_LABELMAP_FILE")
PARAM_TOPIC_NAME = os.environ.get("PARAM_TOPIC_NAME", "intel/faas/ssd")

def report(res_json, frame):
    now = datetime.datetime.now()
    date_prefix = str(now).replace(" ", "_")
    if enable_iot_cloud_output:
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#snippet-start:[greengrass.python.secret-resource-access-staging-label.complete]
import greengrasssdk

# Creating a greengrass core sdk client
client = greengrasssdk.client('secretsmanager')


# This handler is called when the function is invoked
# It uses the secretsmanager client to get the value of a secret
def function_handler(event, context):
    response = client.get_secret_value(SecretId='greengrass-MySecret-abc')
    raw_secret = response.get('SecretString')


#snippet-end:[greengrass.python.secret-resource-access-staging-label.complete]
#
#snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
#snippet-sourcedescription:[Accesses a specific version of a secret on the core. https://docs.aws.amazon.com/greengrass/latest/developerguide/secrets-using.html ]
#snippet-keyword:[Python]
#snippet-keyword:[Code Sample]
def greengrass_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This face detection model is implemented as single shot detector (ssd).
        model_type = 'ssd'
        output_map = {1: 'face'}
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        iot_topic = '$aws/things/{}/infer'.format(
            os.environ['AWS_IOT_THING_NAME'])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml'
        # Load the model onto the GPU.
        client.publish(topic=iot_topic, payload='Loading face detection model')
        model = awscam.Model(model_path, {'GPU': 1})
        client.publish(topic=iot_topic, payload='Face detection model loaded')
        # Set the threshold for detection
        detection_threshold = 0.25
        # The height and width of the training set images
        input_height = 300
        input_width = 300

        #schedule.every(30).seconds.do(upload_tracked_people_result)

        # Do inference until the lambda is killed.
        while True:

            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')
            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            raw_inference_result = model.doInference(frame_resize)
            # {
            #     'SoftMax_67': array(
            #         [
            #             2.41881448e-08,
            #             3.57339691e-09,
            #             1.00263861e-07,
            #             5.40415579e-09,
            #             4.37702547e-04,
            #             6.16787545e-08
            #         ],
            #         dtype=float32)
            # }

            parsed_inference_results = model.parseResult(
                model_type, raw_inference_result)
            # {
            #     "output": [
            #         {"label": "318", "xmin": 124, "xmax": 245, "ymin": 10, "ymax": 142", "prob": 0.5},
            #         {"label": "277", "xmin": 89, "xmax": 166, "ymin": 233, "ymax": 376", "prob": 0.3},
            #                 ...,
            #         {"label": "433", "xmin": 355, "xmax": 468, "ymin": 210, "ymax": 266", "prob": 0.001}
            #     ]
            # }

            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0] / input_height)
            xscale = float(frame.shape[1] / input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            cloud_output = {}
            # Get the detected faces and probabilities
            for obj in parsed_inference_results[model_type]:
                if obj['prob'] > detection_threshold:
                    image_url = write_image_to_s3(frame)

                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj['xmin']) \
                           + int((obj['xmin'] - input_width/2) + input_width/2)
                    ymin = int(yscale * obj['ymin'])
                    xmax = int(xscale * obj['xmax']) \
                           + int((obj['xmax'] - input_width/2) + input_width/2)
                    ymax = int(yscale * obj['ymax'])
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                  (255, 165, 20), 10)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 15
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.putText method.
                    # Method signature: image, text, origin, font face, font scale, color,
                    # and tickness
                    cv2.putText(frame, '{:.2f}%'.format(obj['prob'] * 100),
                                (xmin, ymin - text_offset),
                                cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 165, 20),
                                6)

                    # Upload to S3 to allow viewing the image in the browser
                    cropped = imcrop(frame, (xmin, ymin, xmax, ymax))
                    write_image_to_s3(cropped)

                    # Store label and probability to send to cloud
                    cloud_output[output_map[obj['label']]] = obj['prob']
            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            # Send results to the cloud
            client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
    except Exception as ex:
        client.publish(topic=iot_topic,
                       payload='Error in face detection lambda: {}'.format(ex))
def catcritter_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # captured ssd image info for training
        ssd_image_list = []

        capture_class_images = False

        class_labels = {
            0: 'background',
            1: 'buddy',
            2: 'jade',
            3: 'lucy',
            4: 'tim'
        }

        # This object detection model is implemented as single shot detector (ssd), since
        # the number of labels is small we create a dictionary that will help us convert
        # the machine labels to human readable labels.
        output_map = {
            1: 'aeroplane',
            2: 'bicycle',
            3: 'bird',
            4: 'boat',
            5: 'bottle',
            6: 'bus',
            7: 'car',
            8: 'cat',
            9: 'chair',
            10: 'cow',
            11: 'dinning table',
            12: 'dog',
            13: 'horse',
            14: 'motorbike',
            15: 'person',
            16: 'pottedplant',
            17: 'sheep',
            18: 'sofa',
            19: 'train',
            20: 'tvmonitor'
        }
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        iot_topic = '$aws/things/{}/infer'.format(
            os.environ['AWS_IOT_THING_NAME'])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()

        # The height and width of the training set images
        class_input_height = 100
        class_input_width = 100
        ssd_input_height = 300
        ssd_input_width = 300

        ssd_model_type = "ssd"
        class_model_type = "classification"
        class_model_name = "image-classification"

        client.publish(topic=iot_topic, payload='optimizing model')
        error, class_model_path = mo.optimize(class_model_name,
                                              class_input_width,
                                              class_input_height,
                                              aux_inputs={'--epoch': 100})

        # The aux_inputs is equal to the number of epochs and in this case, it is 100
        # Load model to GPU (use {"GPU": 0} for CPU)
        mcfg = {"GPU": 1}

        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        ssd_model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_resnet50_300_FP16_FUSED.xml'
        # Load the model onto the GPU
        client.publish(topic=iot_topic,
                       payload='Loading object detection model')
        ssd_model = awscam.Model(ssd_model_path, mcfg)
        class_model = awscam.Model(class_model_path, mcfg)
        client.publish(topic=iot_topic,
                       payload='Object detection model loaded')

        # Set the threshold for detection
        detection_threshold = 0.25

        counter = 1
        ssd_counter = 1
        irand = randrange(0, 1000)
        num_classes = 4

        # prepare training csv
        if not os.path.isdir("/tmp/cats"):
            os.mkdir("/tmp/cats")
            os.mkdir("/tmp/cats/train")
            os.chmod("/tmp/cats", stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
            os.chmod("/tmp/cats/train",
                     stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)

        if not os.path.isfile("/tmp/cats/train/train.csv"):
            with open('/tmp/cats/train/train.csv', 'a') as outcsv:
                writer = csv.writer(outcsv)
                writer.writerow(
                    ['frame', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'])
                outcsv.close()

        today = datetime.datetime.now().strftime("%Y%m%d")

        # Do inference until the lambda is killed.
        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')

            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame,
                                      (ssd_input_height, ssd_input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = ssd_model.parseResult(
                ssd_model_type, ssd_model.doInference(frame_resize))
            #client.publish(topic=iot_topic, payload='ssd infer complete')

            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0] / ssd_input_height)
            xscale = float(frame.shape[1] / ssd_input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            cloud_output = {}

            image_saved = False

            # Get the detected objects and probabilities
            for obj in parsed_inference_results[ssd_model_type]:
                if obj['prob'] > detection_threshold:

                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj['xmin']) \
                           + int((obj['xmin'] - ssd_input_width/2) + ssd_input_width/2)
                    ymin = int(yscale * obj['ymin'])
                    xmax = int(xscale * obj['xmax']) \
                           + int((obj['xmax'] - ssd_input_width/2) + ssd_input_width/2)
                    ymax = int(yscale * obj['ymax'])

                    if xmin < 0:
                        xmin = 0
                    if ymin < 0:
                        ymin = 0

                    # if we found a cat, then save the image to a file and publish to IOT
                    if obj['label'] == 8 or obj['label'] == 12 or obj[
                            'label'] == 15:
                        # save the ssd image
                        if not image_saved:
                            frame_filename = "{}_{:03d}_{}_{:03d}".format(
                                today, ssd_counter, 'cats', irand)
                            frame_path = "/tmp/cats/train/" + frame_filename + '.jpg'
                            cv2.imwrite(frame_path, frame_resize)
                            ssd_counter += 1
                            image_saved = True

                        crop = frame[ymin:ymax, xmin:xmax].copy()
                        crop_resize = cv2.resize(
                            crop, (class_input_height, class_input_width))

                        # Run model inference on the cropped frame
                        inferOutput = class_model.doInference(crop_resize)
                        #client.publish(topic=iot_topic, payload='classification infer complete')

                        class_inference_results = class_model.parseResult(
                            class_model_type, inferOutput)
                        top_k = class_inference_results[class_model_type][
                            0:num_classes]
                        first = top_k[0]

                        if first['prob'] > detection_threshold:
                            #client.publish(topic=iot_topic, payload='found {}, saving file'.format(labels[first['label']]))

                            if capture_class_images:
                                dir = "/tmp/cats/train/" + frame_filename
                                if not os.path.isdir(dir):
                                    os.mkdir(dir)
                                    os.chmod(
                                        dir, stat.S_IRWXU | stat.S_IRWXG
                                        | stat.S_IRWXO)

                            the_label = class_labels[first['label']]
                            the_class = first['label']

                            if (obj['label'] == 15 and the_class < 3):
                                the_label = 'person'
                                the_class = 3

                            path = "/tmp/cats/train/{}/train_{}_{:03d}_{}_{:03d}_{:03d}_{:03d}_{:03d}_{:03d}.jpg".format(
                                frame_filename, today, counter, the_label,
                                irand, int(round(obj['xmin'])),
                                int(round(obj['xmax'])),
                                int(round(obj['ymin'])),
                                int(round(obj['ymax'])))

                            if capture_class_images:
                                cv2.imwrite(path, crop)
                                os.chmod(
                                    path, stat.S_IRUSR | stat.S_IWUSR
                                    | stat.S_IRGRP | stat.S_IWGRP
                                    | stat.S_IROTH | stat.S_IWOTH)
                                counter += 1

                            msg = '{'
                            prob_num = 0
                            for kitty in top_k:
                                if prob_num == num_classes - 1:
                                    msg += '"{}": {:.2f}'.format(
                                        class_labels[kitty["label"]],
                                        kitty["prob"] * 100)
                                else:
                                    msg += '"{}": {:.2f},'.format(
                                        class_labels[kitty["label"]],
                                        kitty["prob"] * 100)

                            prob_num += 1
                            msg += "}"

                            # Send results to the cloud
                            #client.publish(topic=iot_topic, payload=json.dumps(msg))

                        else:
                            the_class = 0
                            the_label = class_labels[the_class]

                            path = "/tmp/cats/train/{}/train_{}_{:03d}_{}_{:03d}_{:03d}_{:03d}_{:03d}_{:03d}.jpg".format(
                                frame_filename, today, counter, the_label,
                                irand, int(round(obj['xmin'])),
                                int(round(obj['xmax'])),
                                int(round(obj['ymin'])),
                                int(round(obj['ymax'])))

                            if capture_class_images:
                                cv2.imwrite(path, crop)
                                os.chmod(
                                    path, stat.S_IRUSR | stat.S_IWUSR
                                    | stat.S_IRGRP | stat.S_IWGRP
                                    | stat.S_IROTH | stat.S_IWOTH)
                                counter += 1

                        # create ssd entry
                        ssd_image_desc = [
                            frame_filename + ".jpg",
                            int(round(obj['xmin'])),
                            int(round(obj['xmax'])),
                            int(round(obj['ymin'])),
                            int(round(obj['ymax'])), the_class
                        ]
                        ssd_image_list.append(ssd_image_desc)

                    if obj['label'] < 20:
                        # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                        # for more information about the cv2.rectangle method.
                        # Method signature: image, point1, point2, color, and tickness.
                        cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                      (255, 165, 20), 10)
                        # Amount to offset the label/probability text above the bounding box.
                        text_offset = 15
                        # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                        # for more information about the cv2.putText method.
                        # Method signature: image, text, origin, font face, font scale, color,
                        # and tickness
                        cv2.putText(
                            frame,
                            "{}: {:.2f}%".format(output_map[obj['label']],
                                                 obj['prob'] * 100),
                            (xmin, ymin - text_offset),
                            cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 165, 20), 6)

            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)

            if image_saved:
                with open('/tmp/cats/train/train.csv', 'a') as outcsv:
                    writer = csv.writer(outcsv)
                    writer.writerows(ssd_image_list)
                    ssd_image_list = []
                    outcsv.close()

    except Exception as ex:
        client.publish(
            topic=iot_topic,
            payload='Error in cat-plus-critter lambda: {}'.format(ex))
        outcsv.close()
from threading import Thread, Event, Timer
import os
import json
import numpy as np
import greengrasssdk
import sys
import datetime
import time
import awscam
import cv2
import urllib
import zipfile
import mo

# Create a greengrass core sdk client
client = greengrasssdk.client('iot-data')

# The information exchanged between IoT and clould has a topic and a
# message body. This is the topic used to send messages to cloud.
iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])

client.publish(topic=iot_topic, payload='At start of lambda function')

# boto3 is not installed on device by default. Install it if it is not
# already present.

boto_dir = '/tmp/boto_dir'
if not os.path.exists(boto_dir):
    os.mkdir(boto_dir)
    urllib.urlretrieve('https://s3.amazonaws.com/dear-demo/boto_3_dist.zip',
                        '/tmp/boto_3_dist.zip')
Esempio n. 29
0
def greengrass_infer_image_run():
    """ Entry point of the lambda function"""
    try:
        # This object detection model is implemented as single shot detector (ssd), since
        # the number of labels is small we create a dictionary that will help us convert
        # the machine labels to human readable labels.
        model_type = 'ssd'
        output_map = {1: 'aeroplane', 2: 'bicycle', 3: 'bird', 4: 'boat', 5: 'bottle', 6: 'bus',
                      7 : 'car', 8 : 'cat', 9 : 'chair', 10 : 'cow', 11 : 'dinning table',
                      12 : 'dog', 13 : 'horse', 14 : 'motorbike', 15 : 'person',
                      16 : 'pottedplant', 17 : 'sheep', 18 : 'sofa', 19 : 'train',
                      20 : 'tvmonitor'}
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_resnet50_300_FP16_FUSED.xml'
        # Load the model onto the GPU.
        client.publish(topic=iot_topic, payload='Loading object detection model')
        model = awscam.Model(model_path, {'GPU': 1})
        client.publish(topic=iot_topic, payload='Object detection model loaded')
        # Set the threshold for detection
        detection_threshold = 0.25
        # The height and width of the training set images
        input_height = 300
        input_width = 300
        # Do inference until the lambda is killed.

        # Get a frame from the video stream
        ret, frame = awscam.getLastFrame()
        if not ret:
            raise Exception('Failed to get frame from the stream')
        # Resize frame to the same size as the training set.
        frame_resize = cv2.resize(frame, (input_height, input_width))
        # Run the images through the inference engine and parse the results using
        # the parser API, note it is possible to get the output of doInference
        # and do the parsing manually, but since it is a ssd model,
        # a simple API is provided.
        parsed_inference_results = model.parseResult(model_type, model.doInference(frame_resize))
        # Compute the scale in order to draw bounding boxes on the full resolution
        # image.
        yscale = float(frame.shape[0]) / float(input_height)
        xscale = float(frame.shape[1]) / float(input_width)
        # Dictionary to be filled with labels and probabilities for MQTT
        cloud_output = {}
        # Get the detected objects and probabilities
        for obj in parsed_inference_results[model_type]:
            if obj['prob'] > detection_threshold:
                # Add bounding boxes to full resolution frame
                xmin = int(xscale * obj['xmin'])
                ymin = int(yscale * obj['ymin'])
                xmax = int(xscale * obj['xmax'])
                ymax = int(yscale * obj['ymax'])
                # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                # for more information about the cv2.rectangle method.
                # Method signature: image, point1, point2, color, and tickness.
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 10)
                # Amount to offset the label/probability text above the bounding box.
                text_offset = 15
                cv2.putText(frame, "{}: {:.2f}%".format(output_map[obj['label']], obj['prob'] * 100),
                                (xmin, ymin-text_offset),
                                cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 165, 20), 6)
                # Store label and probability to send to cloud
                cloud_output[output_map[obj['label']]] = obj['prob']
        # Set the next frame in the local display stream.
        local_display.set_frame_data(frame)
        # Send results to the cloud
        client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
    except Exception as ex:
        client.publish(topic=iot_topic, payload='Error in object detection lambda: {}'.format(ex))
Esempio n. 30
0
import logging as log
import time
import redis
import pandas as pd
import numpy as numpy

import greengrasssdk

# Initialize redis connection
pool = redis.ConnectionPool(host='localhost',
                            port=6379,
                            db=0,
                            decode_responses=True)
conn = redis.StrictRedis(connection_pool=pool)

ggdevice_client = greengrasssdk.client('iot-data')
pd.options.mode.chained_assignment = None


def handler(event, context):
    log.getLogger().setLevel(log.INFO)
    global aws_request_id

    aws_request_id = event['upstream-request-id']
    topic = context.client_context.custom['subject']

    log.info('Received message from topic: {0} with payload:\n {1}'.format(
        topic, json.dumps(event, indent=4)))

    log.info('The upstream request id is: {}'.format(aws_request_id))
Esempio n. 31
0
#
# Copyright Amazon AWS DeepLens, 2017.
#

import os
import greengrasssdk
from threading import Timer
import time
import awscam
import cv2
from threading import Thread
import base64

# Creating a greengrass core sdk client
client = greengrasssdk.client('iot-data')

# The information exchanged between IoT and clould has
# a topic and a message body.
# This is the topic that this code uses to send messages to cloud
iotTopic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])


def cropFace(img, x, y, w, h):

    #Crop face
    cimg = img[y:y + h, x:x + w]

    #Convert to jpeg
    ret, jpeg = cv2.imencode('.jpg', cimg)
    face = base64.b64encode(jpeg.tobytes())
def infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This object detection model is implemented as single shot detector (ssd), since
        # the number of labels is small we create a dictionary that will help us convert
        # the machine labels to human readable labels.
        model_type = "ssd"
        output_map = {
            1: "aeroplane",
            2: "bicycle",
            3: "bird",
            4: "boat",
            5: "bottle",
            6: "bus",
            7: "car",
            8: "cat",
            9: "chair",
            10: "cow",
            11: "dinning table",
            12: "dog",
            13: "horse",
            14: "motorbike",
            15: "person",
            16: "pottedplant",
            17: "sheep",
            18: "sofa",
            19: "train",
            20: "tvmonitor",
        }
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client("iot-data")
        iot_topic = "$aws/things/{}/infer".format(os.environ["AWS_IOT_THING_NAME"])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay("480p")
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = (
            "/opt/awscam/artifacts/mxnet_deploy_ssd_resnet50_300_FP16_FUSED.xml"
        )
        # Load the model onto the GPU.
        client.publish(topic=iot_topic, payload="Loading object detection model")
        model = awscam.Model(model_path, {"GPU": 1})
        client.publish(topic=iot_topic, payload="Object detection model loaded")
        # Set the threshold for detection
        detection_threshold = 0.25
        # The height and width of the training set images
        input_height = 300
        input_width = 300

        """extra part of code for rekognition"""

        # model trained in us east 2
        # projectVersionArn = "arn:aws:rekognition:us-east-2:510335724440:project/PPE_detection_May_2020/version/PPE_detection_May_2020.2020-06-01T23.33.22/1591025603184"
        # model trained in us east 1, version 1
        # projectVersionArn = "arn:aws:rekognition:us-east-1:510335724440:project/ppe-detection-deeplens/version/ppe-detection-deeplens.2020-06-12T14.25.57/1591943158364"
        # model trained in us east 1, version 2
        projectVersionArn = "arn:aws:rekognition:us-east-1:510335724440:project/ppe-detection-deeplens/version/ppe-detection-deeplens.2020-06-17T14.28.47/1592375328862"

        rekognition = boto3.client("rekognition")
        customLabels = []

        s3 = boto3.client("s3")

        iterator = 0
        """extra part of code for rekognition"""
        # Do inference until the lambda is killed.
        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception("Failed to get frame from the stream")
            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(
                model_type, model.doInference(frame_resize)
            )
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0]) / float(input_height)
            xscale = float(frame.shape[1]) / float(input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            cloud_output = {}
            # Get the detected objects and probabilities
            # for obj in parsed_inference_results[model_type]:
            #     if obj["prob"] > detection_threshold:
            #         # Add bounding boxes to full resolution frame
            #         xmin = int(xscale * obj["xmin"])
            #         ymin = int(yscale * obj["ymin"])
            #         xmax = int(xscale * obj["xmax"])
            #         ymax = int(yscale * obj["ymax"])
            #         # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
            #         # for more information about the cv2.rectangle method.
            #         # Method signature: image, point1, point2, color, and tickness.

            #         # comment out the drawing part to avoid the results of two models all on one frame
            #         cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 10)
            #         # Amount to offset the label/probability text above the bounding box.
            #         text_offset = 15
            #         # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
            #         # for more information about the cv2.putText method.
            #         # Method signature: image, text, origin, font face, font scale, color,
            #         # and tickness
            #         cv2.putText(
            #             frame,
            #             "{}: {:.2f}%".format(
            #                 output_map[obj["label"]], obj["prob"] * 100
            #             ),
            #             (xmin, ymin - text_offset),
            #             cv2.FONT_HERSHEY_SIMPLEX,
            #             2.5,
            #             (255, 165, 20),
            #             6,
            #         )
            #         # Store label and probability to send to cloud
            #         cloud_output[output_map[obj["label"]]] = obj["prob"]
            # # Set the next frame in the local display stream.
            # local_display.set_frame_data(frame)
            # # Send results to the cloud
            # client.publish(topic=iot_topic, payload=json.dumps(cloud_output))

            """extra part of code for rekognition"""
            hasFrame, imageBytes = cv2.imencode(".jpg", frame)
            client.publish(topic=iot_topic, payload="import done")
            if hasFrame:
                response = rekognition.detect_custom_labels(
                    Image={"Bytes": imageBytes.tobytes(),},
                    ProjectVersionArn=projectVersionArn,
                )
            client.publish(topic=iot_topic, payload="analyse done")                    

            # image = Img.fromarray(frame)
            # imgWidth, imgHeight = image.size
            # draw = ImageDraw.Draw(image)
            imgHeight, imgWidth, c = frame.shape
            image = frame

            ppe = 0
            person = 0

            for elabel in response["CustomLabels"]:
                # elabel["Timestamp"] = (frameId/frameRate)*1000
                customLabels.append(elabel)

                print("Label " + str(elabel["Name"]))
                print("Confidence " + str(elabel["Confidence"]))
                
                if str(elabel["Name"]) == "PPE":
                    ppe = ppe + 1
                else if str(elabel["Name"]) == "person"
                    person = person + 1
                
                if "Geometry" in elabel:
                    box = elabel["Geometry"]["BoundingBox"]
                    left = imgWidth * box["Left"]
                    top = imgHeight * box["Top"]
                    width = imgWidth * box["Width"]
                    height = imgHeight * box["Height"]
                    
                    if str(elabel["Name"]) == "person":
                        cv2.putText(
                            image,
                            elabel["Name"],
                            (int(left), int(top)),
                            cv2.FONT_HERSHEY_COMPLEX,
                            1,
                            (0, 255, 0),
                            1,
                        )
                    else:
                        cv2.putText(
                            image,
                            elabel["Name"],
                            (int(left), int(top)),
                            cv2.FONT_HERSHEY_COMPLEX,
                            1,
                            (255, 0, 0),
                            1,
                        )

                    print("Left: " + "{0:.0f}".format(left))
                    print("Top: " + "{0:.0f}".format(top))
                    print("Label Width: " + "{0:.0f}".format(width))
                    print("Label Height: " + "{0:.0f}".format(height))

                    # points = (
                    #     (left, top),
                    #     (left + width, top),
                    #     (left + width, top + height),
                    #     (left, top + height),
                    #     (left, top),
                    # )
                    # if str(elabel["Name"]) == "person":
                    #     draw.line(points, fill="#00d400", width=3)
                    # else:
                    #     draw.line(points, fill="#800000", width=3)
                    if str(elabel["Name"]) == "person":
                        cv2.rectangle(
                            image,
                            (int(left), int(top)),
                            (int(left + width), int(top + height)),
                            (0, 255, 0),
                            2,
                        )
                    else:
                        cv2.rectangle(
                            image,
                            (int(left), int(top)),
                            (int(left + width), int(top + height)),
                            (255, 0, 0),
                            2,
                        )
            # save the image locally and then upload them into s3
            client.publish(topic=iot_topic, payload="drawing done")
            # cv2.imwrite("frame" + format(iterator) + ".jpg", image)
            # image.save("frame" + format(iterator) + ".jpg")
            # dont save it to the disk anymore
            #client.publish(topic=iot_topic, payload="image saving done")
            iterator = iterator + 1

            # upload as bytes
            # imageBytes = image.tobytes()
            # with io.BytesIO(imageBytes) as f:
            #     s3.upload_fileobj(
            #         f,
            #         "custom-labels-console-us-east-1-5e4c514f5b",
            #         "frameID: " + format(iterator) + ".jpg",
            #     )

            # write the metadata
            # metadata = {"NumberOfPersons": str(person), "NumberOfPPEs": str(ppe)}
            
            # upload as string
            img_str = cv2.imencode('.jpg', image)[1].tostring()
            s3.put_object(
                Bucket="custom-labels-console-us-east-1-5e4c514f5b",
                Key="frameID: " + format(iterator) + ".jpg",
                Body=img_str,
                ACL="public-read",
                Metadata={"NumberOfPersons": str(person), "NumberOfPPEs": str(ppe)}
            )
            client.publish(topic=iot_topic, payload="send to s3 done")

            # to retrieve meatadata in s3
            # $ aws s3api head-object --bucket custom-labels-console-us-east-1-5e4c514f5b --key testImage.jpg

            # print(customLabels)

            """extra part of code for rekognition"""

        # cap.release() not sure if we need to keep this

    except Exception as ex:
        client.publish(
            topic=iot_topic, payload="Error in object detection lambda: {}".format(ex)
        )