コード例 #1
0
    def makeBQEnvVarRowList(self, valueDict, deviceId, rowsList):
        # each received EnvVar type message must have these fields
        if not utils.key_in_dict(valueDict, self.var_KEY ) or \
           not utils.key_in_dict(valueDict, self.values_KEY ):
            logging.error('makeBQEnvVarRowList: Missing key(s) in dict.')
            return

        idKey = 'Env'
        varName = valueDict[self.var_KEY]
        values = valueDict[self.values_KEY]

        # clean / scrub / check the values.
        deviceId = deviceId.replace('~', '')
        varName = varName.replace('~', '')

        # NEW ID format:  <KEY>~<valName>~<created UTC TS>~<deviceId>
        ID = idKey + '~{}~{}~' + deviceId

        row = (
            ID.format(varName, time.strftime('%FT%XZ',
                                             time.gmtime())),  # id column
            values,
            0,
            0)  # values column, with zero for X, Y

        rowsList.append(row)
コード例 #2
0
    def save_data_to_Device(self, pydict, deviceId):
        try:
            if self.messageType_EnvVar != self.get_message_type(pydict) and \
            self.messageType_CommandReply != self.get_message_type(pydict):
                logging.debug(f"save_data_to_Device: invalid message type {self.get_message_type(pydict)}")
                return

            # each received EnvVar type message must have these fields
            if not utils.key_in_dict(pydict, self.var_KEY ) or \
                not utils.key_in_dict(pydict, self.values_KEY ):
                logging.error('save_data_to_Device: Missing key(s) in dict.')
                return
            varName = pydict[ self.var_KEY ]

            value = self.__string_to_value( pydict[ self.values_KEY ] )
            name = self.__string_to_name( pydict[ self.values_KEY ] )
            valueToSave = { 
                'timestamp': str( time.strftime( '%FT%XZ', time.gmtime())),
                'name': str( name ),
                'value': str( value ) }

            datastore.save_device_data(deviceId, varName, valueToSave)

        except Exception as e:
            logging.critical(f"Exception in save_data_to_Device(): {e}")
コード例 #3
0
 def validate_message(self, message: Dict[str, str]) -> bool:
     if not utils.key_in_dict(message, self.messageType_KEY):
         return False
     message_type = self.get_message_type(message)
     if not (message_type == self.messageType_EnvVar or \
             message_type == self.messageType_CommandReply or \
             message_type == self.messageType_Image or \
             message_type == self.messageType_ImageUpload or \
             message_type == self.messageType_RecipeEvent):
         return False
     if message_type == self.messageType_EnvVar or \
             message_type == self.messageType_CommandReply:
         # mandatory keys for msg types 'EnvVar' and 'CommandReply'
         if not (utils.key_in_dict(message, self.var_KEY) or \
                 utils.key_in_dict(message, self.values_KEY)):
             return False
     if message_type == self.messageType_Image or \
             message_type == self.messageType_ImageUpload:
         # mandatory keys for image messages
         if not (utils.key_in_dict(message, self.varName_KEY) or \
                 utils.key_in_dict(message, self.imageType_KEY) or \
                 utils.key_in_dict(message, self.fileName_KEY)):
             return False
     if message_type == self.messageType_RecipeEvent:
         # mandatory keys for recipe event messages
         if not (utils.key_in_dict(message, self.recipeAction_KEY) or \
                 utils.key_in_dict(message, self.recipeName_KEY)):
             return False
     return True
コード例 #4
0
 def validate_message(self, message: Dict[str, str]) -> bool:
     if not utils.key_in_dict(message, self.device_ID_key):
         return False
     if not utils.key_in_dict(message, self.message_type_key):
         return False
     if not utils.key_in_dict(message, self.message_key):
         return False
     message_type = message.get(self.message_type_key)
     if not (message_type == self.recipe_start or \
             message_type == self.recipe_stop or \
             message_type == self.recipe_end or \
             message_type == self.set_testing_hours):
         return False
     return True
コード例 #5
0
    def get_message_type(self, message):
        if not utils.key_in_dict(message, self.messageType_KEY):
            logging.error('Missing key %s' % self.messageType_KEY)
            return None

        if self.messageType_EnvVar == message.get(self.messageType_KEY):
            return self.messageType_EnvVar

        if self.messageType_CommandReply == message.get(self.messageType_KEY):
            return self.messageType_CommandReply

        # deprecated
        if self.messageType_Image == message.get(self.messageType_KEY):
            return self.messageType_Image

        if self.messageType_ImageUpload == message.get(self.messageType_KEY):
            return self.messageType_ImageUpload

        if self.messageType_RecipeEvent == message.get(self.messageType_KEY):
            return self.messageType_RecipeEvent

        logging.error('get_message_type: Invalid value {} for key {}'.format(
            message.get(self.messageType_KEY), self.messageType_KEY ))
        return None
コード例 #6
0
    def save_uploaded_image(self, pydict, deviceId):
        try:
            if self.messageType_ImageUpload != self.get_message_type(pydict):
                logging.error("save_uploaded_image: invalid message type")
                return

            # each received image message must have these fields
            if not utils.key_in_dict(pydict, self.varName_KEY) or \
            not utils.key_in_dict(pydict, self.fileName_KEY ):
                logging.error('save_uploaded_image: missing key(s) in dict.')
                return

            var_name =  pydict.get(self.varName_KEY)
            file_name = pydict.get(self.fileName_KEY)

            start = datetime.now()
            # get a timedelta of the difference
            delta = datetime.now() - start

            # keep checking for image curl upload for 5 minutes
            while delta.total_seconds() <= 5 * 60:

                # Has this image already been handled?
                # (this can happen since google pub-sub is "at least once" 
                # message delivery, the same message can get delivered again)
                if storage.isUploadedImageInBucket(file_name, 
                        env_vars.cs_bucket):
                    logging.info(f'save_uploaded_image: file {file_name} '
                        f'already handled.')
                    break

                # Check if the file is in the upload bucket.
                if not storage.isUploadedImageInBucket(file_name, 
                        env_vars.cs_upload_bucket):
                    time.sleep(10)
                    delta = datetime.now() - start
                    logging.debug(f'save_uploaded_image: waited '
                            f'{delta.total_seconds()} secs for '
                            f'upload of {file_name}')
                    continue

                # Move image from one gstorage bucket to another:
                #   openag-public-image-uploads > openag-v1-images
                publicURL = storage.moveFileBetweenBuckets( 
                        env_vars.cs_upload_bucket, 
                        env_vars.cs_bucket, file_name)
                if publicURL is None:
                    logging.warning(f'save_uploaded_image: '
                        f'image already moved: {file_name}')
                    break

                #img_in_bucket = storage.isUploadedImageInBucket(file_name, 
                #        env_vars.cs_bucket)
                #logging.info(f'save_uploaded_image: {img_in_bucket} '
                #        f'file {file_name} is in {env_vars.cs_bucket}')

                # use named temporary files to download and resize the image
                f_split = os.path.splitext(file_name)
                with tempfile.NamedTemporaryFile(suffix=f_split[1]) \
                        as downloaded_image_fp:

                    downloaded = storage.downloadFile(downloaded_image_fp, 
                            env_vars.cs_bucket, file_name)
                    if not downloaded:
                        logging.error(f'save_uploaded_image: '
                                f'image not downloaded: {file_name}')

                    # save a medium sized version of the image
                    downloaded_image_fp.seek(0) # rewind to start of stream
                    with tempfile.NamedTemporaryFile(suffix=f_split[1]) \
                            as smaller_image_fp:

                        # halves each dimension by default
                        images.resize(downloaded_image_fp.name, 
                                smaller_image_fp.name)

                        fn = f_split[0] + '_medium' + f_split[1]
                        storage.uploadFile(smaller_image_fp, 
                                env_vars.cs_bucket, fn)

                    # save a small sized version of the image
                    downloaded_image_fp.seek(0) # rewind to start of stream
                    with tempfile.NamedTemporaryFile(suffix=f_split[1]) \
                            as smaller_image_fp:

                        images.resize(downloaded_image_fp.name, 
                                smaller_image_fp.name,
                                (640, 480)) # good for ani gif

                        fn = f_split[0] + '_small' + f_split[1]
                        storage.uploadFile(smaller_image_fp, 
                                env_vars.cs_bucket, fn)

                    # save a small sized version of the image
                    downloaded_image_fp.seek(0) # rewind to start of stream
                    with tempfile.NamedTemporaryFile(suffix=f_split[1]) \
                            as smaller_image_fp:

                        images.resize(downloaded_image_fp.name, 
                                smaller_image_fp.name,
                                (128, 128)) # thumbnail size

                        fn = f_split[0] + '_thumbnail' + f_split[1]
                        storage.uploadFile(smaller_image_fp, 
                                env_vars.cs_bucket, fn)

                # Put the URL in the datastore for the UI to use.
                datastore.saveImageURL(deviceId, publicURL, var_name)

                # Put the URL as an env. var in BQ.
                message_obj = {}
                # keep old message type, UI code may depend on it
                message_obj[ self.messageType_KEY ] = self.messageType_Image
                message_obj[ self.var_KEY ] = var_name
                valuesJson = "{'values':["
                valuesJson += "{'name':'URL', 'type':'str', 'value':'%s'}" % \
                    (publicURL)
                valuesJson += "]}"
                message_obj[ self.values_KEY ] = valuesJson

                # Generate the data that will be sent to BigQuery for insertion.
                # Each value must be a row that matches the table schema.
                rowsList = []
                if self.makeBQRowList(message_obj, deviceId, rowsList):
                    bigquery.data_insert(rowsList)

                delta = datetime.now() - start
                logging.info(f"save_uploaded_image: Done with {file_name} "
                        f"in {delta.total_seconds()} secs")
                break
    
            # Remove any files in the uploads bucket that are over 2 hours old
            storage.delete_files_over_two_hours_old(env_vars.cs_upload_bucket)

        except Exception as e:
            logging.critical(f"Exception in save_uploaded_image(): {e}")
コード例 #7
0
    def save_uploaded_image(self, pydict, deviceId):
        try:
            if self.messageType_ImageUpload != self.get_message_type(pydict):
                logging.error("save_uploaded_image: invalid message type")
                return

            # each received image message must have these fields
            if not utils.key_in_dict(pydict, self.varName_KEY) or \
            not utils.key_in_dict(pydict, self.fileName_KEY ):
                logging.error('save_uploaded_image: missing key(s) in dict.')
                return

            var_name =  pydict.get(self.varName_KEY)
            file_name = pydict.get(self.fileName_KEY)

            start = datetime.now()
            # get a timedelta of the difference
            delta = datetime.now() - start

            # keep checking for image curl upload for 5 minutes
            while delta.total_seconds() <= 5 * 60:

                # Has this image already been handled?
                # (this can happen since google pub-sub is "at least once" 
                # message delivery, the same message can get delivered again)
                if storage.isUploadedImageInBucket(file_name, 
                        env_vars.cs_bucket):
                    logging.info(f'save_uploaded_image: file {file_name} '
                        f'already handled.')
                    break

                # Check if the file is in the upload bucket.
                if not storage.isUploadedImageInBucket(file_name, 
                        env_vars.cs_upload_bucket):
                    time.sleep(10)
                    delta = datetime.now() - start
                    logging.debug(f'save_uploaded_image: waited '
                            f'{delta.total_seconds()} secs for '
                            f'upload of {file_name}')
                    continue

                # Move image from one gstorage bucket to another:
                #   openag-public-image-uploads > openag-v1-images
                publicURL = storage.moveFileBetweenBuckets( 
                        env_vars.cs_upload_bucket, 
                        env_vars.cs_bucket, file_name)
                if publicURL is None:
                    logging.warning(f'save_uploaded_image: '
                        f'image already moved: {file_name}')
                    break

                # Put the URL in the datastore for the UI to use.
                datastore.saveImageURL(deviceId, publicURL, var_name)

                # Put the URL as an env. var in BQ.
                message_obj = {}
                # keep old message type, UI code may depend on it
                message_obj[ self.messageType_KEY ] = self.messageType_Image
                message_obj[ self.var_KEY ] = var_name
                valuesJson = "{'values':["
                valuesJson += "{'name':'URL', 'type':'str', 'value':'%s'}" % \
                    (publicURL)
                valuesJson += "]}"
                message_obj[ self.values_KEY ] = valuesJson

                # Generate the data that will be sent to BigQuery for insertion.
                # Each value must be a row that matches the table schema.
                rowsList = []
                if self.makeBQRowList(message_obj, deviceId, rowsList):
                    bigquery.data_insert(rowsList)

                delta = datetime.now() - start
                logging.info(f"save_uploaded_image: Done with {file_name} "
                        f"in {delta.total_seconds()} secs")
                break
    
            # Remove any files in the uploads bucket that are over 2 hours old
            storage.delete_files_over_two_hours_old(env_vars.cs_upload_bucket)

        except Exception as e:
            logging.critical(f"Exception in save_uploaded_image(): {e}")
コード例 #8
0
    def save_old_chunked_image(self, pydict, deviceId):
        try:
            # each received image message must have these fields
            if not utils.key_in_dict( pydict, self.varName_KEY ) or \
                    not utils.key_in_dict( pydict, self.imageType_KEY ) or \
                    not utils.key_in_dict( pydict, self.chunk_KEY ) or \
                    not utils.key_in_dict( pydict, self.totalChunks_KEY ) or \
                    not utils.key_in_dict( pydict, self.imageChunk_KEY ) or \
                    not utils.key_in_dict( pydict, self.messageID_KEY ):
                logging.error(
                    'save_old_chunked_image: Missing key(s) in dict.')
                return

            messageId = pydict[self.messageID_KEY]
            varName = pydict[self.varName_KEY]
            imageType = pydict[self.imageType_KEY]
            chunkNum = pydict[self.chunk_KEY]
            totalChunks = pydict[self.totalChunks_KEY]
            imageChunk = pydict[self.imageChunk_KEY]

            # Get rid of all chunks if we receive one bad chunk - so we don't
            # make bad partial images.
            if 0 == len(imageChunk):
                logging.error(
                    "save_old_chunked_image: received empty imageChunk from {}, cleaning up turds"
                    .format(deviceId))
                self.deleteImageChunksFromDatastore(deviceId, messageId)
                self.saveTurd(deviceId, messageId)
                return

            # Clean up any smelly old turds from previous images (if they don't
            # match the current messageId from this device).
            turds = self.getTurds(deviceId)
            for badImage in turds:
                badMessageId = badImage['messageId']
                if badMessageId != messageId:
                    self.deleteImageChunksFromDatastore(deviceId, badMessageId)
                    self.deleteTurd(deviceId, badMessageId)

            # Save this chunk to the datastore cache.
            self.saveImageChunkToDatastore(deviceId, messageId, varName,
                                           imageType, chunkNum, totalChunks,
                                           imageChunk)

            # For every message received, check data store to see if we can
            # assemble chunks.  Messages will probably be received out of order.

            # Start with a list of the number of chunks received:
            listOfChunksReceived = []
            for c in range(0, totalChunks):
                listOfChunksReceived.append(False)

            # What chunks have we already received?
            oldChunks = self.getImageChunksFromDatastore(deviceId, messageId)
            for oc in oldChunks:
                listOfChunksReceived[oc['chunkNum']] = True
                logging.debug('save_old_chunked_image: received {} of {} '
                              'for messageId={}'.format(
                                  oc['chunkNum'], totalChunks, messageId))

            # Do we have all chunks?
            haveAllChunks = True
            chunkCount = 0
            for c in listOfChunksReceived:
                logging.debug(
                    'save_old_chunked_image: listOfChunksReceived [{}]={}'.
                    format(chunkCount, c))
                chunkCount += 1
                if not c:
                    haveAllChunks = False
            logging.debug('save_old_chunked_image: haveAllChunks={}'.format(
                haveAllChunks))

            # No, so just add this chunk to the datastore and return
            if not haveAllChunks:
                logging.debug(
                    'save_old_chunked_image: returning to wait for more chunks'
                )
                return

            # YES! We have all our chunks, so reassemble the binary image.

            # Delete the temporary datastore cache for the chunks
            self.deleteImageChunksFromDatastore(deviceId, messageId)
            self.deleteTurd(deviceId, messageId)

            # Sort the chunks by chunkNum (we get messages out of order)
            oldChunks = sorted(oldChunks, key=lambda k: k['chunkNum'])

            # Reassemble the b64 chunks into one string (in order).
            b64str = ''
            for oc in oldChunks:
                b64str += oc['imageChunk']
                logging.debug(
                    'save_old_chunked_image: assemble {} of {}'.format(
                        oc['chunkNum'], oc['totalChunks']))

            # Now covert our base64 string into binary image bytes
            imageBytes = base64.b64decode(b64str)

            # Put the image bytes in cloud storage as a file, and get an URL
            publicURL = storage.saveFile(varName, imageType, imageBytes,
                                         deviceId)

            # Put the URL in the datastore for the UI to use.
            datastore.saveImageURL(deviceId, publicURL, varName)

            # Put the URL as an env. var in BQ.
            message_obj = {}
            message_obj[self.messageType_KEY] = self.messageType_Image
            message_obj[self.var_KEY] = varName
            valuesJson = "{'values':["
            valuesJson += "{'name':'URL', 'type':'str', 'value':'%s'}" % \
                                ( publicURL )
            valuesJson += "]}"
            message_obj[self.values_KEY] = valuesJson
            rowsList = []
            self.makeBQEnvVarRowList(message_obj, deviceId, rowsList)
            bigquery.data_insert(rowsList)

        except Exception as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            logging.critical("Exception in save_old_chunked_image(): %s" % e)
            traceback.print_tb(exc_traceback, file=sys.stdout)