Example #1
0
    def parse(self, device_ID: str, message: Dict[str, str]) -> None:
        if not self.validate_message(message):
            logging.error(f'{self.name}.parse: invalid message={message}')
            return 

        if self.messageType_Image == self.get_message_type(message):
            #logging.warning(f'{self.name}.parse: ignoring old chunked images '
            #        'from old clients.')
            deprecated = DeprecatedImageChunking()
            deprecated.save_old_chunked_image(message, device_ID)
            return 

        # New way of handling (already) uploaded images.  
        if self.messageType_ImageUpload == self.get_message_type(message):
            self.save_uploaded_image(message, device_ID)
            return

        # Device sent a recipe event (start or stop) and we must 
        # republish a notification message to the notifications topic
        # using our NotificationMessaging class.
        if self.messageType_RecipeEvent == self.get_message_type(message):
            action = message.get(self.recipeAction_KEY)
            message_type = None
            name = message.get(self.recipeName_KEY)
            if action == 'start':
                message_type = NotificationMessaging.recipe_start
                self.runs.start(device_ID,name)
            elif action == 'stop':
                message_type = NotificationMessaging.recipe_stop
                self.runs.stop(device_ID)
            elif action == 'end':
                message_type = NotificationMessaging.recipe_end
                self.runs.stop(device_ID)
            if message_type is None:
                logging.error(f'{self.name}.parse: invalid recipe event '
                        f'action={action}')
                return
            # TODO: Re-enable this when notifications get turned back on (After removing saving of the
            #   runs() data from the notification service.
            # self.notification_messaging.publish(device_ID, message_type, name)
            return

        # Save the most recent data as properties on the Device entity in the
        # datastore.
        self.save_data_to_Device(message, device_ID)

        # Also insert into BQ (Env vars and command replies)
        rowsList = []
        if self.makeBQRowList(message, device_ID, rowsList):
            bigquery.data_insert(rowsList)
Example #2
0
    def save_uploaded_image(self, pydict, deviceId):
        try:
            if self.messageType_ImageUpload != self.get_message_type(pydict):
                logging.error("save_uploaded_image: invalid message type")
                return

            # each received image message must have these fields
            if not utils.key_in_dict(pydict, self.varName_KEY) or \
            not utils.key_in_dict(pydict, self.fileName_KEY ):
                logging.error('save_uploaded_image: missing key(s) in dict.')
                return

            var_name =  pydict.get(self.varName_KEY)
            file_name = pydict.get(self.fileName_KEY)

            start = datetime.now()
            # get a timedelta of the difference
            delta = datetime.now() - start

            # keep checking for image curl upload for 5 minutes
            while delta.total_seconds() <= 5 * 60:

                # Has this image already been handled?
                # (this can happen since google pub-sub is "at least once" 
                # message delivery, the same message can get delivered again)
                if storage.isUploadedImageInBucket(file_name, 
                        env_vars.cs_bucket):
                    logging.info(f'save_uploaded_image: file {file_name} '
                        f'already handled.')
                    break

                # Check if the file is in the upload bucket.
                if not storage.isUploadedImageInBucket(file_name, 
                        env_vars.cs_upload_bucket):
                    time.sleep(10)
                    delta = datetime.now() - start
                    logging.debug(f'save_uploaded_image: waited '
                            f'{delta.total_seconds()} secs for '
                            f'upload of {file_name}')
                    continue

                # Move image from one gstorage bucket to another:
                #   openag-public-image-uploads > openag-v1-images
                publicURL = storage.moveFileBetweenBuckets( 
                        env_vars.cs_upload_bucket, 
                        env_vars.cs_bucket, file_name)
                if publicURL is None:
                    logging.warning(f'save_uploaded_image: '
                        f'image already moved: {file_name}')
                    break

                # Put the URL in the datastore for the UI to use.
                datastore.saveImageURL(deviceId, publicURL, var_name)

                # Put the URL as an env. var in BQ.
                message_obj = {}
                # keep old message type, UI code may depend on it
                message_obj[ self.messageType_KEY ] = self.messageType_Image
                message_obj[ self.var_KEY ] = var_name
                valuesJson = "{'values':["
                valuesJson += "{'name':'URL', 'type':'str', 'value':'%s'}" % \
                    (publicURL)
                valuesJson += "]}"
                message_obj[ self.values_KEY ] = valuesJson

                # Generate the data that will be sent to BigQuery for insertion.
                # Each value must be a row that matches the table schema.
                rowsList = []
                if self.makeBQRowList(message_obj, deviceId, rowsList):
                    bigquery.data_insert(rowsList)

                delta = datetime.now() - start
                logging.info(f"save_uploaded_image: Done with {file_name} "
                        f"in {delta.total_seconds()} secs")
                break
    
            # Remove any files in the uploads bucket that are over 2 hours old
            storage.delete_files_over_two_hours_old(env_vars.cs_upload_bucket)

        except Exception as e:
            logging.critical(f"Exception in save_uploaded_image(): {e}")
Example #3
0
    def save_uploaded_image(self, pydict, deviceId):
        try:
            if self.messageType_ImageUpload != self.get_message_type(pydict):
                logging.error("save_uploaded_image: invalid message type")
                return

            # each received image message must have these fields
            if not utils.key_in_dict(pydict, self.varName_KEY) or \
            not utils.key_in_dict(pydict, self.fileName_KEY ):
                logging.error('save_uploaded_image: missing key(s) in dict.')
                return

            var_name =  pydict.get(self.varName_KEY)
            file_name = pydict.get(self.fileName_KEY)

            start = datetime.now()
            # get a timedelta of the difference
            delta = datetime.now() - start

            # keep checking for image curl upload for 5 minutes
            while delta.total_seconds() <= 5 * 60:

                # Has this image already been handled?
                # (this can happen since google pub-sub is "at least once" 
                # message delivery, the same message can get delivered again)
                if storage.isUploadedImageInBucket(file_name, 
                        env_vars.cs_bucket):
                    logging.info(f'save_uploaded_image: file {file_name} '
                        f'already handled.')
                    break

                # Check if the file is in the upload bucket.
                if not storage.isUploadedImageInBucket(file_name, 
                        env_vars.cs_upload_bucket):
                    time.sleep(10)
                    delta = datetime.now() - start
                    logging.debug(f'save_uploaded_image: waited '
                            f'{delta.total_seconds()} secs for '
                            f'upload of {file_name}')
                    continue

                # Move image from one gstorage bucket to another:
                #   openag-public-image-uploads > openag-v1-images
                publicURL = storage.moveFileBetweenBuckets( 
                        env_vars.cs_upload_bucket, 
                        env_vars.cs_bucket, file_name)
                if publicURL is None:
                    logging.warning(f'save_uploaded_image: '
                        f'image already moved: {file_name}')
                    break

                #img_in_bucket = storage.isUploadedImageInBucket(file_name, 
                #        env_vars.cs_bucket)
                #logging.info(f'save_uploaded_image: {img_in_bucket} '
                #        f'file {file_name} is in {env_vars.cs_bucket}')

                # use named temporary files to download and resize the image
                f_split = os.path.splitext(file_name)
                with tempfile.NamedTemporaryFile(suffix=f_split[1]) \
                        as downloaded_image_fp:

                    downloaded = storage.downloadFile(downloaded_image_fp, 
                            env_vars.cs_bucket, file_name)
                    if not downloaded:
                        logging.error(f'save_uploaded_image: '
                                f'image not downloaded: {file_name}')

                    # save a medium sized version of the image
                    downloaded_image_fp.seek(0) # rewind to start of stream
                    with tempfile.NamedTemporaryFile(suffix=f_split[1]) \
                            as smaller_image_fp:

                        # halves each dimension by default
                        images.resize(downloaded_image_fp.name, 
                                smaller_image_fp.name)

                        fn = f_split[0] + '_medium' + f_split[1]
                        storage.uploadFile(smaller_image_fp, 
                                env_vars.cs_bucket, fn)

                    # save a small sized version of the image
                    downloaded_image_fp.seek(0) # rewind to start of stream
                    with tempfile.NamedTemporaryFile(suffix=f_split[1]) \
                            as smaller_image_fp:

                        images.resize(downloaded_image_fp.name, 
                                smaller_image_fp.name,
                                (640, 480)) # good for ani gif

                        fn = f_split[0] + '_small' + f_split[1]
                        storage.uploadFile(smaller_image_fp, 
                                env_vars.cs_bucket, fn)

                    # save a small sized version of the image
                    downloaded_image_fp.seek(0) # rewind to start of stream
                    with tempfile.NamedTemporaryFile(suffix=f_split[1]) \
                            as smaller_image_fp:

                        images.resize(downloaded_image_fp.name, 
                                smaller_image_fp.name,
                                (128, 128)) # thumbnail size

                        fn = f_split[0] + '_thumbnail' + f_split[1]
                        storage.uploadFile(smaller_image_fp, 
                                env_vars.cs_bucket, fn)

                # Put the URL in the datastore for the UI to use.
                datastore.saveImageURL(deviceId, publicURL, var_name)

                # Put the URL as an env. var in BQ.
                message_obj = {}
                # keep old message type, UI code may depend on it
                message_obj[ self.messageType_KEY ] = self.messageType_Image
                message_obj[ self.var_KEY ] = var_name
                valuesJson = "{'values':["
                valuesJson += "{'name':'URL', 'type':'str', 'value':'%s'}" % \
                    (publicURL)
                valuesJson += "]}"
                message_obj[ self.values_KEY ] = valuesJson

                # Generate the data that will be sent to BigQuery for insertion.
                # Each value must be a row that matches the table schema.
                rowsList = []
                if self.makeBQRowList(message_obj, deviceId, rowsList):
                    bigquery.data_insert(rowsList)

                delta = datetime.now() - start
                logging.info(f"save_uploaded_image: Done with {file_name} "
                        f"in {delta.total_seconds()} secs")
                break
    
            # Remove any files in the uploads bucket that are over 2 hours old
            storage.delete_files_over_two_hours_old(env_vars.cs_upload_bucket)

        except Exception as e:
            logging.critical(f"Exception in save_uploaded_image(): {e}")
    def save_old_chunked_image(self, pydict, deviceId):
        try:
            # each received image message must have these fields
            if not utils.key_in_dict( pydict, self.varName_KEY ) or \
                    not utils.key_in_dict( pydict, self.imageType_KEY ) or \
                    not utils.key_in_dict( pydict, self.chunk_KEY ) or \
                    not utils.key_in_dict( pydict, self.totalChunks_KEY ) or \
                    not utils.key_in_dict( pydict, self.imageChunk_KEY ) or \
                    not utils.key_in_dict( pydict, self.messageID_KEY ):
                logging.error(
                    'save_old_chunked_image: Missing key(s) in dict.')
                return

            messageId = pydict[self.messageID_KEY]
            varName = pydict[self.varName_KEY]
            imageType = pydict[self.imageType_KEY]
            chunkNum = pydict[self.chunk_KEY]
            totalChunks = pydict[self.totalChunks_KEY]
            imageChunk = pydict[self.imageChunk_KEY]

            # Get rid of all chunks if we receive one bad chunk - so we don't
            # make bad partial images.
            if 0 == len(imageChunk):
                logging.error(
                    "save_old_chunked_image: received empty imageChunk from {}, cleaning up turds"
                    .format(deviceId))
                self.deleteImageChunksFromDatastore(deviceId, messageId)
                self.saveTurd(deviceId, messageId)
                return

            # Clean up any smelly old turds from previous images (if they don't
            # match the current messageId from this device).
            turds = self.getTurds(deviceId)
            for badImage in turds:
                badMessageId = badImage['messageId']
                if badMessageId != messageId:
                    self.deleteImageChunksFromDatastore(deviceId, badMessageId)
                    self.deleteTurd(deviceId, badMessageId)

            # Save this chunk to the datastore cache.
            self.saveImageChunkToDatastore(deviceId, messageId, varName,
                                           imageType, chunkNum, totalChunks,
                                           imageChunk)

            # For every message received, check data store to see if we can
            # assemble chunks.  Messages will probably be received out of order.

            # Start with a list of the number of chunks received:
            listOfChunksReceived = []
            for c in range(0, totalChunks):
                listOfChunksReceived.append(False)

            # What chunks have we already received?
            oldChunks = self.getImageChunksFromDatastore(deviceId, messageId)
            for oc in oldChunks:
                listOfChunksReceived[oc['chunkNum']] = True
                logging.debug('save_old_chunked_image: received {} of {} '
                              'for messageId={}'.format(
                                  oc['chunkNum'], totalChunks, messageId))

            # Do we have all chunks?
            haveAllChunks = True
            chunkCount = 0
            for c in listOfChunksReceived:
                logging.debug(
                    'save_old_chunked_image: listOfChunksReceived [{}]={}'.
                    format(chunkCount, c))
                chunkCount += 1
                if not c:
                    haveAllChunks = False
            logging.debug('save_old_chunked_image: haveAllChunks={}'.format(
                haveAllChunks))

            # No, so just add this chunk to the datastore and return
            if not haveAllChunks:
                logging.debug(
                    'save_old_chunked_image: returning to wait for more chunks'
                )
                return

            # YES! We have all our chunks, so reassemble the binary image.

            # Delete the temporary datastore cache for the chunks
            self.deleteImageChunksFromDatastore(deviceId, messageId)
            self.deleteTurd(deviceId, messageId)

            # Sort the chunks by chunkNum (we get messages out of order)
            oldChunks = sorted(oldChunks, key=lambda k: k['chunkNum'])

            # Reassemble the b64 chunks into one string (in order).
            b64str = ''
            for oc in oldChunks:
                b64str += oc['imageChunk']
                logging.debug(
                    'save_old_chunked_image: assemble {} of {}'.format(
                        oc['chunkNum'], oc['totalChunks']))

            # Now covert our base64 string into binary image bytes
            imageBytes = base64.b64decode(b64str)

            # Put the image bytes in cloud storage as a file, and get an URL
            publicURL = storage.saveFile(varName, imageType, imageBytes,
                                         deviceId)

            # Put the URL in the datastore for the UI to use.
            datastore.saveImageURL(deviceId, publicURL, varName)

            # Put the URL as an env. var in BQ.
            message_obj = {}
            message_obj[self.messageType_KEY] = self.messageType_Image
            message_obj[self.var_KEY] = varName
            valuesJson = "{'values':["
            valuesJson += "{'name':'URL', 'type':'str', 'value':'%s'}" % \
                                ( publicURL )
            valuesJson += "]}"
            message_obj[self.values_KEY] = valuesJson
            rowsList = []
            self.makeBQEnvVarRowList(message_obj, deviceId, rowsList)
            bigquery.data_insert(rowsList)

        except Exception as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            logging.critical("Exception in save_old_chunked_image(): %s" % e)
            traceback.print_tb(exc_traceback, file=sys.stdout)