コード例 #1
0
 def run(self):
     fifo_path = "/tmp/results.mjpeg"
     if not os.path.exists(fifo_path):
         os.mkfifo(fifo_path)
     f = open(fifo_path, 'w')
     client.publish(topic=iotTopic, payload="Opened Pipe")
     while Write_To_FIFO:
         try:
             f.write(jpeg.tobytes())
         except IOError as e:
             continue
コード例 #2
0
def Email_Trigger( FILE_NAME_PREFIX):
    response = client.publish(
    TopicArn='arn:aws:sns:us-east-1:125810979284:Today_File_Present_Check',  
    Message='This is a late sub alert mail for  ' + FILE_NAME_PREFIX + ' file,Pls contact with source',
    Subject= 'Late Sub Alert For: ' +  FILE_NAME_PREFIX + ' File'
    )
    print ("Email Sent")
コード例 #3
0
def greengrass_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This object detection model is implemented as single shot detector (ssd), since
        # the number of labels is small we create a dictionary that will help us convert
        # the machine labels to human readable labels.
        model_type = 'ssd'
        output_map = {
            1: 'aeroplane',
            2: 'bicycle',
            3: 'bird',
            4: 'boat',
            5: 'bottle',
            6: 'bus',
            7: 'car',
            8: 'cat',
            9: 'chair',
            10: 'cow',
            11: 'dinning table',
            12: 'dog',
            13: 'horse',
            14: 'motorbike',
            15: 'person',
            16: 'pottedplant',
            17: 'sheep',
            18: 'sofa',
            19: 'train',
            20: 'tvmonitor'
        }
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        iot_topic = '$aws/things/{}/infer'.format(
            os.environ['AWS_IOT_THING_NAME'])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_resnet50_300_FP16_FUSED.xml'
        # Load the model onto the GPU.
        client.publish(topic=iot_topic,
                       payload='Loading object detection model')
        model = awscam.Model(model_path, {'GPU': 1})
        client.publish(topic=iot_topic,
                       payload='Object detection model loaded')
        # Set the threshold for detection
        detection_threshold = 0.8
        # The height and width of the training set images
        input_height = 300
        input_width = 300

        lastmatch = datetime.utcnow()

        # Do inference until the lambda is killed.
        while True:

            # Pull polly messages from queue
            try:
                for message in polly_queue.receive_messages():
                    client.publish(topic=iot_topic,
                                   payload="Loading Polly Message: %s" %
                                   message.body)
                    polly_text = message.body
                    polly_client = boto3.client('polly',
                                                region_name=aws_region)
                    polly_response = polly_client.synthesize_speech(
                        OutputFormat='mp3',
                        Text=polly_text,
                        TextType='text',
                        VoiceId='Joanna')

                    if "AudioStream" in polly_response:
                        with closing(polly_response["AudioStream"]) as stream:
                            # Write file to temporary directory
                            polly_data = stream.read()
                            polly_filename = "/tmp/polly_file.mp3"
                            polly_file = open(polly_filename, "w")
                            polly_file.write(polly_data)
                            polly_file.close()

                            client.publish(topic=iot_topic,
                                           payload="Polly: Playing audio")
                            os.system('mplayer ' + polly_filename)

                    client.publish(
                        topic=iot_topic,
                        payload="Polly: Deleting message from queue")
                    message.delete()

            except Exception as ex:
                #[Errno 2] No such file or directory
                client.publish(
                    topic=iot_topic,
                    payload='Error in polly integration: {}'.format(ex))

            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')

            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(
                model_type, model.doInference(frame_resize))
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0] / input_height)
            xscale = float(frame.shape[1] / input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            cloud_output = {}
            # Get the detected objects and probabilities
            for obj in parsed_inference_results[model_type]:
                if obj['prob'] > detection_threshold:
                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj['xmin']) \
                           + int((obj['xmin'] - input_width/2) + input_width/2)
                    ymin = int(yscale * obj['ymin'])
                    xmax = int(xscale * obj['xmax']) \
                           + int((obj['xmax'] - input_width/2) + input_width/2)
                    ymax = int(yscale * obj['ymax'])

                    # hold off for timespan between matches to let the system catch up
                    if datetime.utcnow() > (lastmatch + timedelta(seconds=1)):

                        try:
                            # if a person was found, upload the target area to S3 for further inspection
                            if output_map[obj['label']] == 'person':

                                # get the person image
                                person = frame[ymin:ymax, xmin:xmax]

                                face_cascade = cv2.CascadeClassifier(
                                    'haarcascade_frontalface_default.xml')
                                gray = cv2.cvtColor(person, cv2.COLOR_BGR2GRAY)
                                faces = face_cascade.detectMultiScale(
                                    gray, 1.1, 5)

                                if len(faces) != 1:
                                    client.publish(
                                        topic=iot_topic,
                                        payload="Skipping, no faces")
                                    continue

                                #image = base64.b64encode(person)

                                #resp = client.search_faces_by_image(
                                #    CollectionId=rekognition_collection_id,
                                #    Image=image,
                                #    MaxFaces=1,
                                #    FaceMatchThreshold=70)

                                #if len(resp['FaceMatches']) > 0:
                                #    client.publish(topic=iot_topic, payload=resp)

                                # create a nice s3 file key
                                s3_key = datetime.utcnow().strftime(
                                    '%Y-%m-%d_%H_%M_%S.%f') + '.jpg'
                                encode_param = [
                                    int(cv2.IMWRITE_JPEG_QUALITY), 90
                                ]  # 90% should be more than enough
                                _, jpg_data = cv2.imencode(
                                    '.jpg', person, encode_param)
                                filename = "incoming/%s" % s3_key  # the guess lambda function is listening here
                                response = s3.put_object(
                                    ACL='public-read',
                                    Body=jpg_data.tostring(),
                                    Bucket=s3_bucket,
                                    Key=filename)

                                # reset the timer for the next match
                                lastmatch = datetime.utcnow()
                        except Exception as ex:
                            client.publish(
                                topic=iot_topic,
                                payload='Error in person finder to S3 block: {}'
                                .format(ex))

                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                  (255, 165, 20), 10)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 15

                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.putText method.
                    # Method signature: image, text, origin, font face, font scale, color,
                    # and tickness
                    cv2.putText(
                        frame, "{}: {:.2f}%".format(output_map[obj['label']],
                                                    obj['prob'] * 100),
                        (xmin, ymin - text_offset), cv2.FONT_HERSHEY_SIMPLEX,
                        2.5, (255, 165, 20), 6)
                    # Store label and probability to send to cloud
                    cloud_output[output_map[obj['label']]] = obj['prob']
            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            # Send results to the cloud

            if cloud_output != {}:
                client.publish(topic=iot_topic,
                               payload=json.dumps(cloud_output))

    except Exception as ex:
        client.publish(
            topic=iot_topic,
            payload='Error in object detection lambda: {}'.format(ex))
コード例 #4
0
import urlparse

count = 0
while True:

	# The message data we will publish to the topic
    data = {
        "job_id": job_id,
        "link": "https://zhuoyuzhu.ucmpcs.org/annotations/" + job_id,
		"email": email_addr,
		"user_person": user_person,
		"user_role": user_role,
		"completion_time": int(completion_time),
		"result_file": item['s3_key_result_file']
    }

    if count > 10:
    	time.sleep(120)
    	count = count - 8

    client = boto3.client('sns')
    response_notification = client.publish(
    	TopicArn = 'arn:aws:sns:us-east-1:127134666975:zhuoyuzhu_job_request_notify',
        Message = json.dumps(data)
	)
	count = count + 1
    time.sleep(25)



コード例 #5
0
          "input_file_name": filename,
          "s3_inputs_bucket": bucket_name,
          "s3_key_input_file": s3key,
          "submit_time": int(time.time()),
          "job_status": "PENDING",
		      "user_email_addr": auth.current_user.email_addr,
		      "user_role": auth.current_user.role
  }

  # Insert the new data into data table
  ann_table.put_item(Item=data)

  # Publish a notification message to the SNS topic
  client = boto3.client('sns', region_name = region_name)
  response_notification = client.publish(
          TopicArn = job_request_topic,
          Message = json.dumps(data)
  )
  
  # Render upload_confirm template      
	return template(request.app.config['mpcs.env.templates'] + 'upload_confirm', auth=auth, job_id=job_id, alert=False)


'''
*******************************************************************************
List all annotations for the user
*******************************************************************************
'''
@route('/annotations', method='GET', name="annotations_list")
def get_annotations_list():
	# Check that user is authenticated
	auth.require(fail_redirect='/login?redirect_url=' + request.url)
コード例 #6
0
        # Update DynamoDB successfully
        print("UpdateItem succeeded:")

        # Clean up (delete) local job files
        shutil.rmtree(prefix)

        # Publish a notification message to the SNS topic
        DBresponse = ann_table.get_item(Key={'job_id': job_id})
        item = DBresponse['Item']
        email_addr = item['user_email_addr']
        user_person = item['username']
        user_role = item['user_role']
        completion_time = item['complete_time']
        # The message data we will publish to the topic
        data = {
            "job_id": job_id,
            "link": "https://zhuoyuzhu.ucmpcs.org/annotations/" + job_id,
            "email": email_addr,
            "user_person": user_person,
            "user_role": user_role,
            "completion_time": int(completion_time),
            "result_file": item['s3_key_result_file']
        }
        client = boto3.client('sns')
        response_notification = client.publish(
            TopicArn=request.app.config['mpcs.aws.sns.job_complete_topic'],
            Message=json.dumps(data))

    else:
        print 'A valid .vcf file must be provided as input to this program.'
コード例 #7
0
def greengrass_infinite_infer_run():
    logger.info('first run {}'.format(FIRST_RUN))
    if FIRST_RUN:
        firstRunFunc()
    try:
        results_thread = FIFO_Thread()
        results_thread.start()
        client.publish(topic=iot_topic, payload="Inference starting")
        ret, frame = awscam.getLastFrame()
        if ret == False:
            raise Exception("Failed to get frame from the stream")
        yscale = float(frame.shape[0] / input_height)
        xscale = float(frame.shape[1] / input_width)
        occurs = 0
        doInfer = True
        while doInfer:
            frameContainsText = False
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            # Raise an exception if failing to get a frame
            if ret == False:
                raise Exception("Failed to get frame from the stream")

            # Resize frame to fit model input requirement
            frameResize = cv2.resize(frame, (input_width, input_height))
            # Run model inference on the resized frame
            inferOutput = model.doInference(frameResize)
            # Output inference result to the fifo file so it can be viewed with mplayer
            parsed_results = model.parseResult(model_type,
                                               inferOutput)[model_type]
            label = ''
            text_blocks = filter(lambda x: x['prob'] > prob_thresh,
                                 parsed_results)
            # see the order of the text blocks
            # for this, we really only want to grab the highest probable text block, so just ignore the rest
            if len(text_blocks) > 0:
                obj = text_blocks[0]
                logger.debug('number of text blocks detected: [{}]'.format(
                    len(text_blocks)))
                text_blocks.sort(key=lambda x: x['prob'])
                text_blocks = text_blocks[len(text_blocks) - 1:]
                frameContainsText = True
                occurs += 1
                logger.debug('text blocks detected')
                xmin = int(xscale * obj['xmin']) + int(
                    (obj['xmin'] - input_width / 2) + input_width / 2)
                ymin = int(yscale * obj['ymin'])
                xmax = int(xscale * obj['xmax']) + int(
                    (obj['xmax'] - input_width / 2) + input_width / 2)
                ymax = int(yscale * obj['ymax'])
                logger.debug('xmin {} xmax {} ymin {} ymax {}'.format(
                    xmin, xmax, ymin, ymax))
                label_show = "{}: conseq: {}:    {:.2f}%".format(
                    outMap[obj['label']], occurs, obj['prob'] * 100)
                if occurs >= occursThreshold:
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                  (121, 255, 20), 4)
                    cv2.putText(frame, label_show, (xmin, ymin - 15),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (121, 255, 20),
                                4)
                    speak.playAudioFile(
                        os.path.join('staticfiles', 'chime.mp3'))
                    try:
                        occurs = 0
                        logger.debug('ocr image')
                        tb = ip.getRoi(frame, xmin, xmax, ymin, ymax)
                        tb = ip.correctSkew(tb)
                        clean = ip.cleanUpTextArea(tb)
                        txt = ip.ocrImage(clean,
                                          extractBadChars=True,
                                          spellCheck=False)
                        if txt == '':
                            speak.speak(
                                'Sorry, I am unable to read the page. \nPlease try again.'
                            )
                        else:
                            logger.info(txt)
                            speak.speak(txt)

                    except Exception as e:
                        msg = "ocr failed: " + str(e)
                        cv2.imwrite(
                            os.path.join(
                                os.path.abspath(os.sep), 'tmp',
                                '{}-{}-{}-{}-{}.jpg'.format(
                                    time.strftime("%Y-%m-%d %H:%M:%S"), xmin,
                                    xmax, ymin, ymax)), tb)
                        client.publish(topic=iot_topic, payload=msg)

                else:
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                  (255, 165, 20), 4)
                    cv2.putText(frame, label_show, (xmin, ymin - 15),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 165, 20),
                                4)

            else:
                occurs = 0

            global jpeg
            shp = frame.shape
            smallerFrame = cv2.resize(frame, (shp[1] / 3, shp[0] / 3))
            ret, jpeg = cv2.imencode('.jpg', smallerFrame)
            #ret, jpeg = cv2.imencode('.jpg', frame)

    except Exception as e:
        msg = "Lambda function failed: " + str(e)
        logger.info(msg)
        speak.speak("I'm sorry, I wasn't able to read that for some reason.")

    # Asynchronously schedule this function to be run again in 15 seconds
    Timer(15, greengrass_infinite_infer_run).start()
コード例 #8
0
# a topic and a message body.
# This is the topic that this code uses to send messages to cloud
iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
ret, frame = awscam.getLastFrame()
Write_To_FIFO = True
FIRST_RUN = True
PLAY_INTRO = False
input_width = 300
input_height = 300
prob_thresh = 0.55
outMap = {1: 'text_block'}
model_name = "read-to-me"
occursThreshold = 10
error, model_path = mo.optimize(model_name, input_width, input_height)
model = awscam.Model(model_path, {"GPU": 1})
client.publish(topic=iot_topic, payload="Model loaded.")
model_type = "ssd"
jpeg = None


def firstRunFunc():
    logger.info('first run')
    global FIRST_RUN
    try:
        if PLAY_INTRO:
            speak.playAudioFile(os.path.join('staticfiles', 'intro.mp3'))
            sleep(0.5)
            speak.playAudioFile(os.path.join('staticfiles', 'dir1.mp3'))
            speak.playAudioFile(os.path.join('staticfiles', 'chime.mp3'))
            speak.playAudioFile(os.path.join('staticfiles', 'dir2.mp3'))
            sleep(1)
コード例 #9
0
def greengrass_infinite_infer_run():
    logger.info('starting lambda')
    global FIRST_RUN
    logger.info('first run {}'.format(FIRST_RUN))
    if FIRST_RUN:
        firstRunFunc()
    try:
        input_width = 300
        input_height = 300
        prob_thresh = 0.65
        results_thread = FIFO_Thread()
        results_thread.start()

        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

        # Send a starting message to IoT console
        client.publish(topic=iotTopic, payload="Text detection starts now")

        ret, frame = awscam.getLastFrame()
        if ret == False:
            raise Exception("Failed to get frame from the stream")

        yscale = float(frame.shape[0] / input_height)
        xscale = float(frame.shape[1] / input_width)
        with detection_graph.as_default():
            with tf.Session(graph=detection_graph) as sess:
                while True:
                    # Get a frame from the video stream
                    ret, frame = awscam.getLastFrame()
                    # Raise an exception if failing to get a frame
                    if ret == False:
                        raise Exception("Failed to get frame from the stream")

                    # Resize frame to fit model input requirement
                    image_np_expanded = np.expand_dims(frame, axis=0)
                    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
                    # Each box represents a part of the image where a particular object was detected.
                    boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
                    # Each score represent how level of confidence for each of the objects.
                    # Score is shown on the result image, together with the class label.
                    scores = detection_graph.get_tensor_by_name('detection_scores:0')
                    classes = detection_graph.get_tensor_by_name('detection_classes:0')
                    num_detections = detection_graph.get_tensor_by_name('num_detections:0')
                    # Actual detection.
                    (boxes, scores, classes, num_detections) = sess.run(
                        [boxes, scores, classes, num_detections],
                        feed_dict={image_tensor: image_np_expanded})
                    # Visualization of the results of a detection.
                  # TODO see test script to get this working
                    print("boxes: {}".format(len(boxes)))

            global jpeg
            ret, jpeg = cv2.imencode('.jpg', frame)

    except Exception as e:
        msg = "OCR failed: " + str(e)
        speak.speak("I'm sorry, I wasn't able to read that for some reason.")
        client.publish(topic=iotTopic, payload=msg)

    # Asynchronously schedule this function to be run again in 15 seconds
    Timer(15, greengrass_infinite_infer_run).start()