def test_sas_delete(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recordingfile(self.test_mode): return # Arrange entity = self._insert_random_entity() token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.DELETE, datetime.utcnow() + timedelta(hours=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) service.delete_entity(self.table_name, entity.PartitionKey, entity.RowKey) # Assert with self.assertRaises(AzureMissingResourceHttpError): self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey)
def delete_extra_ips_from_storage_table(storage_table_name): credentials_dict = authenticate(client_id, key, tenant_id) table_service = TableService(account_name='progaccount', account_key=table_key) table_rows = get_all_rows(storage_table_name) print 'LIST of table rows:\n', table_rows print 'list of NIC rows:\n', get_all_ips(resource_group_name, vm_ss_name) for curr_storage_table_ip in table_rows['ip']: if curr_storage_table_ip not in get_new_ips( get_all_rows(table_name), get_all_ips(resource_group_name, vm_ss_name)): print 'Deleting Entity:', table_rows['partition_key'][ table_rows['ip'].index( curr_storage_table_ip )], ' Current storage table IP: ', curr_storage_table_ip table_service.delete_entity( storage_table_name, table_rows['partition_key'][ table_rows['ip'].index(curr_storage_table_ip)], curr_storage_table_ip)
def test_sas_delete(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.DELETE, datetime.utcnow() + timedelta(hours=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) service.delete_entity(self.table_name, entity.PartitionKey, entity.RowKey) # Assert with self.assertRaises(AzureMissingResourceHttpError): self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey)
class StorageTableContext(): """Initializes the repository with the specified settings dict. Required settings in config dict are: - AZURE_STORAGE_NAME - STORAGE_KEY """ _models = [] _encryptproperties = False _encrypted_properties = [] _tableservice = None _storage_key = '' _storage_name = '' def __init__(self, **kwargs): self._storage_name = kwargs.get('AZURE_STORAGE_NAME', '') self._storage_key = kwargs.get('AZURE_STORAGE_KEY', '') """ service init """ self._models = [] if self._storage_key != '' and self._storage_name != '': self._tableservice = TableService(account_name = self._storage_name, account_key = self._storage_key, protocol='https') """ encrypt queue service """ if kwargs.get('AZURE_REQUIRE_ENCRYPTION', False): # Create the KEK used for encryption. # KeyWrapper is the provided sample implementation, but the user may use their own object as long as it implements the interface above. kek = KeyWrapper(kwargs.get('AZURE_KEY_IDENTIFIER', 'otrrentapi'), kwargs.get('SECRET_KEY', 'super-duper-secret')) # Key identifier # Create the key resolver used for decryption. # KeyResolver is the provided sample implementation, but the user may use whatever implementation they choose so long as the function set on the service object behaves appropriately. key_resolver = KeyResolver() key_resolver.put_key(kek) # Set the require Encryption, KEK and key resolver on the service object. self._encryptproperties = True self._tableservice.key_encryption_key = kek self._tableservice.key_resolver_funcion = key_resolver.resolve_key self._tableservice.encryption_resolver_function = self.__encryptionresolver__ pass def __createtable__(self, tablename) -> bool: if (not self._tableservice is None): try: self._tableservice.create_table(tablename) return True except AzureException as e: log.error('failed to create {} with error {}'.format(tablename, e)) return False else: return True pass # Define the encryption resolver_function. def __encryptionresolver__(self, pk, rk, property_name): if property_name in self._encrypted_properties: return True #log.debug('encrypt field {}'.format(property_name)) #log.debug('dont encrypt field {}'.format(property_name)) return False def register_model(self, storagemodel:object): modelname = storagemodel.__class__.__name__ if isinstance(storagemodel, StorageTableModel): if (not modelname in self._models): self.__createtable__(storagemodel._tablename) self._models.append(modelname) """ set properties to be encrypted client side """ if self._encryptproperties: self._encrypted_properties += storagemodel._encryptedproperties log.info('model {} registered successfully. Models are {!s}. Encrypted fields are {!s} '.format(modelname, self._models, self._encrypted_properties)) pass def table_isempty(self, tablename, PartitionKey='', RowKey = '') -> bool: if (not self._tableservice is None): filter = "PartitionKey eq '{}'".format(PartitionKey) if PartitionKey != '' else '' if filter == '': filter = "RowKey eq '{}'".format(RowKey) if RowKey != '' else '' else: filter = filter + ("and RowKey eq '{}'".format(RowKey) if RowKey != '' else '') try: entities = list(self._tableservice.query_entities(tablename, filter = filter, select='PartitionKey', num_results=1)) if len(entities) == 1: return False else: return True except AzureMissingResourceHttpError as e: log.debug('failed to query {} with error {}'.format(tablename, e)) return True else: return True pass def exists(self, storagemodel) -> bool: exists = False if isinstance(storagemodel, StorageTableModel): modelname = storagemodel.__class__.__name__ if (modelname in self._models): if storagemodel._exists is None: try: entity = self._tableservice.get_entity(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey) storagemodel._exists = True exists = True except AzureMissingResourceHttpError: storagemodel._exists = False else: exists = storagemodel._exists else: log.debug('please register model {} first'.format(modelname)) return exists def get(self, storagemodel) -> StorageTableModel: """ load entity data from storage to vars in self """ if isinstance(storagemodel, StorageTableModel): modelname = storagemodel.__class__.__name__ if (modelname in self._models): try: entity = self._tableservice.get_entity(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey) storagemodel._exists = True """ sync with entity values """ for key, default in vars(storagemodel).items(): if not key.startswith('_') and key not in ['','PartitionKey','RowKey']: value = getattr(entity, key, None) if not value is None: setattr(storagemodel, key, value) except AzureMissingResourceHttpError as e: log.debug('can not get table entity: Table {}, PartitionKey {}, RowKey {} because {!s}'.format(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey, e)) storagemodel._exists = False except Exception as e: log.debug('can not get table entity: Table {}, PartitionKey {}, RowKey {} because {!s}'.format(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey, e)) storagemodel._exists = False else: log.debug('please register model {} first to {!s}'.format(modelname, self._models)) return storagemodel else: return None def insert(self, storagemodel) -> StorageTableModel: """ insert model into storage """ if isinstance(storagemodel, StorageTableModel): modelname = storagemodel.__class__.__name__ if (modelname in self._models): try: self._tableservice.insert_or_replace_entity(storagemodel._tablename, storagemodel.entity()) storagemodel._exists = True except AzureMissingResourceHttpError as e: log.debug('can not insert or replace table entity: Table {}, PartitionKey {}, RowKey {} because {!s}'.format(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey, e)) else: log.debug('please register model {} first'.format(modelname)) return storagemodel else: return None def merge(self, storagemodel) -> StorageTableModel: """ try to merge entry """ if isinstance(storagemodel, StorageTableModel): modelname = storagemodel.__class__.__name__ if (modelname in self._models): try: self._tableservice.insert_or_merge_entity(storagemodel._tablename, storagemodel.entity()) storagemodel._exists = True except AzureMissingResourceHttpError as e: log.debug('can not insert or merge table entity: Table {}, PartitionKey {}, RowKey {} because {!s}'.format(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey, e)) else: log.debug('please register model {} first'.format(modelname)) return storagemodel else: return None def delete(self,storagemodel): """ delete existing Entity """ if isinstance(storagemodel, StorageTableModel): modelname = storagemodel.__class__.__name__ if (modelname in self._models): try: self._tableservice.delete_entity(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey) storagemodel._exists = False except AzureMissingResourceHttpError as e: log.debug('can not delete table entity: Table {}, PartitionKey {}, RowKey {} because {!s}'.format(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey, e)) else: log.debug('please register model {} first'.format(modelname)) return storagemodel else: return None def __changeprimarykeys__(self, PartitionKey = '', RowKey = ''): """ Change Entity Primary Keys into new instance: - PartitionKey and/or - RowKey """ PartitionKey = PartitionKey if PartitionKey != '' else self._PartitionKey RowKey = RowKey if RowKey != '' else self._RowKey """ change Primary Keys if different to existing ones """ if (PartitionKey != self._PartitionKey) or (RowKey != self._RowKey): return True, PartitionKey, RowKey else: return False, PartitionKey, RowKey pass def moveto(self, PartitionKey = '', RowKey = ''): """ Change Entity Primary Keys and move in Storage: - PartitionKey and/or - RowKey """ changed, PartitionKey, RowKey = self.__changeprimarykeys__(PartitionKey, RowKey) if changed: """ sync self """ new = self.copyto(PartitionKey, RowKey) new.save() """ delete Entity if exists in Storage """ self.delete() def copyto(self, PartitionKey = '', RowKey = '') -> object: """ Change Entity Primary Keys and copy to new Instance: - PartitionKey and/or - RowKey """ changed, PartitionKey, RowKey = self.__changeprimarykeys__(PartitionKey, RowKey) self.load() new = self new._PartitionKey = PartitionKey new._RowKey = RowKey new.load() return new def query(self, storagecollection) -> StorageTableCollection: if isinstance(storagecollection, StorageTableCollection): try: storagecollection.extend(self._tableservice.query_entities(storagecollection._tablename,storagecollection._filter)) except AzureMissingResourceHttpError as e: log.debug('can not query table {} with filters {} because {!s}'.format(storagecollection._tablename, storagecollection._filter, e)) return storagecollection else: return None
} task11 = { 'PartitionKey': 'tasksSeattle', 'RowKey': '11', 'description': 'Clean the bathroom', 'priority': 100 } table_service.begin_batch() table_service.insert_entity('tasktable', task10) table_service.insert_entity('tasktable', task11) table_service.commit_batch() task = table_service.get_entity('tasktable', 'tasksSeattle', '1') print(task.description) print(task.priority) tasks = table_service.query_entities('tasktable', "PartitionKey eq 'tasksSeattle'") for task in tasks: print(task.description) print(task.priority) tasks = table_service.query_entities('tasktable', "PartitionKey eq 'tasksSeattle'", 'description') for task in tasks: print(task.description) table_service.delete_entity('tasktable', 'tasksSeattle', '1') table_service.delete_table('tasktable')
class TableStorage(): def __init__(self, CONNECTION_STRING): """ Constructor. Espera el Connection String del Azure Storage Account. Se obtiene ingresando al recurso de Storage -> Access Keys Parametros: CONNECTION_STRING = El string que incluye el AccountName, AccountKey y el EndPointSuffix """ self.CONNECTION_STRING = CONNECTION_STRING # Separa por partes el string de conexión Config = dict( s.split('=', 1) for s in CONNECTION_STRING.split(';') if s) # Obtiene el nombre de la cuenta de storage y en EndpointSuffix self.AccountName = Config.get('AccountName') self.EndPointSuffix = Config.get('EndpointSuffix') def CreateTableServices(self): """ Inicializa una instancia del Table Services para poder comunicarse con el storage en Azure """ self.TableService = TableService( account_name=self.AccountName, connection_string=self.CONNECTION_STRING, endpoint_suffix=self.EndPointSuffix) def createTable(self, TableName): """ Revisa si la tabla no exista ya y la crea. De lo contrario, avisa que ya existe. Paramentros: TableName = Nombre de la tabla que se quiere crear """ print('\nCreate a table with name - ' + TableName) if (self.TableService.exists(TableName) != True): self.TableService.create_table(TableName) print("Table created succesfully!") else: print('Error creating table, ' + TableName + ' check if it already exists') def insertEntity(self, TableName, Entity): """ Se inserta una entidad a la tabla especificada. Paramentros: TableName = Nombre de la tabla que se quiere crear Entity = El objecto con la entidad que se quiere agregar """ print('\nInserting a new entity into table - ' + TableName) self.TableService.insert_or_merge_entity(TableName, Entity) print('Successfully inserted the new entity') def getEntity(self, TableName, PartitionKey, RowKey): """ Traerse la entidad completa en base a la Partition Key y Row Key. Regresa un objeto como tal, no hay que hacer json.loads() Paramentros: TableName = Nombre de la tabla que se quiere crear PartitionKey = String con la partition key de la entidad deseada RowKey = String con la row key de la entidad deseada """ print('\nGetting entity.') Entity = self.TableService.get_entity(TableName, PartitionKey, RowKey) return Entity def updateEntity(self, TableName, NewEntity): """ Toma el objeto con los datos actualizados y hace update en la table storage. Paramentros: TableName = Nombre de la tabla que se quiere crear NewEntity = El objecto con la entidad que se quiere hacer update """ print('\nUpdating entity. PK: ' + NewEntity.PartitionKey + ' RK: ' + NewEntity.RowKey) self.TableService.update_entity(TableName, NewEntity) def deleteEntity(self, TableName, PartitionKey, RowKey): """ Borrar la entidad que coincida en Partition Key y Row Key Paramentros: TableName = Nombre de la tabla que se quiere crear PartitionKey = String con la partition key de la entidad RowKey = String con la row key de la entidad """ print('\nDeleting entity') self.TableService.delete_entity(TableName, PartitionKey, RowKey) def deleteTable(self, TableName): """ Revisa si la tabla existe y la borra, en caso contrario solo avisa que no existe. Paramentros: TableName = Nombre de la tabla que se quiere borrar """ print('\nDeleting the table.') if (self.TableService.exists(TableName)): self.TableService.delete_table(TableName) print('Successfully deleted the table') else: print('The table does not exists')
class TableStorageHandlerTest(_TestCase): def _divide_key(self, key): divided = [] hostname = gethostname() if key.find(hostname) >= 0: preceding, hostname, remaining = key.rpartition(hostname) preceding = preceding[:-1] if preceding.endswith( '-') else preceding divided.extend(preceding.split('-')) divided.append(hostname) remaining = remaining[1:] if remaining.startswith( '-') else remaining divided.extend(remaining.split('-')) else: divided.extend(key.split('-')) return iter(divided) def _get_formatter_name(self, handler_name, formatter_type): name = _get_handler_config_value(handler_name, formatter_type) if name: if name.startswith('cfg://formatters.'): name = name.split('.')[1] return name def _get_partition_key_formatter_name(self, handler_name): return self._get_formatter_name(handler_name, 'partition_key_formatter') def _get_row_key_formatter_name(self, handler_name): return self._get_formatter_name(handler_name, 'row_key_formatter') def setUp(self): self.service = TableService(ACCOUNT_NAME, ACCOUNT_KEY) # ensure that there's no entity in the table before each test tables = set() for cfg in LOGGING['handlers'].values(): if 'table' in cfg: tables.add(cfg['table']) for table in self.service.query_tables(): if table.name in tables: for entity in self.service.query_entities(table.name): self.service.delete_entity(table.name, entity.PartitionKey, entity.RowKey) def test_logging(self): # get the logger for the test logger_name = 'table' logger = logging.getLogger(logger_name) handler_name = _get_handler_name(logger_name) # perform logging log_text = 'logging test' logging_started = datetime.now() logger.info(log_text) logging_finished = datetime.now() # confirm that the entity has correct log text table = _get_handler_config_value(handler_name, 'table') entities = iter(self.service.query_entities(table)) entity = next(entities) self.assertEqual(entity.message, 'INFO %s' % log_text) # confirm that the entity has the default partitiok key fmt = '%Y%m%d%H%M' try: self.assertEqual(entity.PartitionKey, logging_started.strftime(fmt)) except AssertionError: if logging_started == logging_finished: raise self.assertEqual(entity.PartitionKey, logging_finished.strftime(fmt)) # confirm that the entity has the default row key divided = self._divide_key(entity.RowKey) timestamp = next(divided) fmt = '%Y%m%d%H%M%S' self.assertGreaterEqual(timestamp[:-3], logging_started.strftime(fmt)) self.assertLessEqual(timestamp[:-3], logging_finished.strftime(fmt)) self.assertRegex(timestamp[-3:], '^[0-9]{3}$') self.assertEqual(next(divided), gethostname()) self.assertEqual(int(next(divided)), os.getpid()) self.assertEqual(next(divided), '00') with self.assertRaises(StopIteration): next(divided) # confirm that there's no more entity in the table with self.assertRaises(StopIteration): next(entities) @unittest.skipIf(_EMULATED, "Azure Storage Emulator doesn't support batch operation.") def test_batch(self): # get the logger for the test logger_name = 'batch' logger = logging.getLogger(logger_name) handler_name = _get_handler_name(logger_name) # perform logging and execute the first batch batch_size = _get_handler_config_value(handler_name, 'batch_size') log_text = 'batch logging test' for i in range(batch_size + int(batch_size / 2)): logger.info('%s#%02d' % (log_text, i)) # confirm that only batch_size entities are committed at this point table = _get_handler_config_value(handler_name, 'table') entities = list(iter(self.service.query_entities(table))) self.assertEqual(len(entities), batch_size) rowno_found = set() seq_found = set() for entity in entities: # partition key self.assertEqual(entity.PartitionKey, 'batch-%s' % gethostname()) # row key rowno = entity.RowKey.split('-')[-1] self.assertLess(int(rowno), batch_size) self.assertNotIn(rowno, rowno_found) rowno_found.add(rowno) # message message, seq = entity.message.split('#') self.assertEqual(message, 'INFO %s' % log_text) self.assertLess(int(seq), batch_size) self.assertNotIn(seq, seq_found) seq_found.add(seq) # remove currently created entities before the next batch for entity in entities: self.service.delete_entity(table, entity.PartitionKey, entity.RowKey) # perform logging again and execute the next batch for j in range(i + 1, int(batch_size / 2) + i + 1): logger.info('%s#%02d' % (log_text, j)) # confirm that the remaining entities are committed in the next batch entities = list(iter(self.service.query_entities(table))) self.assertEqual(len(entities), batch_size) rowno_found.clear() for entity in entities: # partition key self.assertEqual(entity.PartitionKey, 'batch-%s' % gethostname()) # row key rowno = entity.RowKey.split('-')[-1] self.assertLess(int(rowno), batch_size) self.assertNotIn(rowno, rowno_found) rowno_found.add(rowno) # message message, seq = entity.message.split('#') self.assertEqual(message, 'INFO %s' % log_text) self.assertGreaterEqual(int(seq), batch_size) self.assertLess(int(seq), batch_size * 2) self.assertNotIn(seq, seq_found) seq_found.add(seq) def test_extra_properties(self): # get the logger for the test logger_name = 'extra_properties' logger = logging.getLogger(logger_name) handler_name = _get_handler_name(logger_name) # perform logging log_text = 'extra properties test' logger.info(log_text) # confirm that the entity has correct log text table = _get_handler_config_value(handler_name, 'table') entities = iter(self.service.query_entities(table)) entity = next(entities) self.assertEqual(entity.message, 'INFO %s' % log_text) # confirm that the extra properties have correct values entity = next(iter(self.service.query_entities(table))) self.assertEqual(entity.hostname, gethostname()) self.assertEqual(entity.levelname, 'INFO') self.assertEqual(int(entity.levelno), logging.INFO) self.assertEqual(entity.module, os.path.basename(__file__).rpartition('.')[0]) self.assertEqual(entity.name, logger_name) self.assertEqual(int(entity.process), os.getpid()) self.assertEqual(int(entity.thread), current_thread().ident) # confirm that there's no more entity in the table with self.assertRaises(StopIteration): next(entities) def test_custom_key_formatters(self): # get the logger for the test logger_name = 'custom_keys' logger = logging.getLogger(logger_name) handler_name = _get_handler_name(logger_name) # perform logging log_text = 'custom key formatters test' logging_started = datetime.now() logger.info(log_text) logging_finished = datetime.now() # confirm that the entity correct log text table = _get_handler_config_value(handler_name, 'table') entities = iter(self.service.query_entities(table)) entity = next(entities) self.assertEqual(entity.message, 'INFO %s' % log_text) # confirm that the entity has a custom partitiok key divided = self._divide_key(entity.PartitionKey) self.assertEqual(next(divided), 'mycustompartitionkey') self.assertEqual(next(divided), gethostname()) formatter_name = self._get_partition_key_formatter_name(handler_name) fmt = _get_formatter_config_value(formatter_name, 'datefmt') asctime = next(divided) try: self.assertEqual(asctime, logging_started.strftime(fmt)) except AssertionError: if logging_started == logging_finished: raise self.assertEqual(asctime, logging_finished.strftime(fmt)) with self.assertRaises(StopIteration): next(divided) # confirm that the entity has a custom row key divided = self._divide_key(entity.RowKey) self.assertEqual(next(divided), 'mycustomrowkey') self.assertEqual(next(divided), gethostname()) formatter_name = self._get_row_key_formatter_name(handler_name) fmt = _get_formatter_config_value(formatter_name, 'datefmt') asctime = next(divided) try: self.assertEqual(asctime, logging_started.strftime(fmt)) except AssertionError: if logging_started == logging_finished: raise self.assertEqual(asctime, logging_finished.strftime(fmt)) with self.assertRaises(StopIteration): next(divided) # confirm that there's no more entity in the table with self.assertRaises(StopIteration): next(entities)
class TableStorageHandlerTest(_TestCase): def _divide_key(self, key): divided = [] hostname = gethostname() if key.find(hostname) >= 0: preceding, hostname, remaining = key.rpartition(hostname) preceding = preceding[:-1] if preceding.endswith('-') else preceding divided.extend(preceding.split('-')) divided.append(hostname) remaining = remaining[1:] if remaining.startswith('-') else remaining divided.extend(remaining.split('-')) else: divided.extend(key.split('-')) return iter(divided) def _get_formatter_name(self, handler_name, formatter_type): name = _get_handler_config_value(handler_name, formatter_type) if name: if name.startswith('cfg://formatters.'): name = name.split('.')[1] return name def _get_partition_key_formatter_name(self, handler_name): return self._get_formatter_name(handler_name, 'partition_key_formatter') def _get_row_key_formatter_name(self, handler_name): return self._get_formatter_name(handler_name, 'row_key_formatter') def setUp(self): self.service = TableService(account_name=ACCOUNT_NAME, account_key=ACCOUNT_KEY, is_emulated=_EMULATED) # ensure that there's no entity in the table before each test tables = set() for cfg in LOGGING['handlers'].values(): if 'table' in cfg: tables.add(cfg['table']) for table in self.service.list_tables(): if table.name in tables: for entity in self.service.query_entities(table.name): self.service.delete_entity(table.name, entity.PartitionKey, entity.RowKey) def test_logging(self): # get the logger for the test logger_name = 'table' logger = logging.getLogger(logger_name) handler_name = _get_handler_name(logger_name) # perform logging log_text = 'logging test' logging_started = datetime.now() logger.info(log_text) logging_finished = datetime.now() # confirm that the entity has correct log text table = _get_handler_config_value(handler_name, 'table') entities = iter(self.service.query_entities(table)) entity = next(entities) self.assertEqual(entity.message, 'INFO %s' % log_text) # confirm that the entity has the default partitiok key fmt = '%Y%m%d%H%M' try: self.assertEqual(entity.PartitionKey, logging_started.strftime(fmt)) except AssertionError: if logging_started == logging_finished: raise self.assertEqual(entity.PartitionKey, logging_finished.strftime(fmt)) # confirm that the entity has the default row key divided = self._divide_key(entity.RowKey) timestamp = next(divided) fmt = '%Y%m%d%H%M%S' self.assertGreaterEqual(timestamp[:-3], logging_started.strftime(fmt)) self.assertLessEqual(timestamp[:-3], logging_finished.strftime(fmt)) self.assertRegex(timestamp[-3:], '^[0-9]{3}$') self.assertEqual(next(divided), gethostname()) self.assertEqual(int(next(divided)), os.getpid()) self.assertEqual(next(divided), '00') with self.assertRaises(StopIteration): next(divided) # confirm that there's no more entity in the table with self.assertRaises(StopIteration): next(entities) @unittest.skipIf(_EMULATED, "Azure Storage Emulator doesn't support batch operation.") def test_batch(self): # get the logger for the test logger_name = 'batch' logger = logging.getLogger(logger_name) handler_name = _get_handler_name(logger_name) # perform logging and execute the first batch batch_size = _get_handler_config_value(handler_name, 'batch_size') log_text = 'batch logging test' for i in range(batch_size + int(batch_size/2)): logger.info('%s#%02d' % (log_text, i)) # confirm that only batch_size entities are committed at this point table = _get_handler_config_value(handler_name, 'table') entities = list(iter(self.service.query_entities(table))) self.assertEqual(len(entities), batch_size) rowno_found = set() seq_found = set() for entity in entities: # partition key self.assertEqual(entity.PartitionKey, 'batch-%s' % gethostname()) # row key rowno = entity.RowKey.split('-')[-1] self.assertLess(int(rowno), batch_size) self.assertNotIn(rowno, rowno_found) rowno_found.add(rowno) # message message, seq = entity.message.split('#') self.assertEqual(message, 'INFO %s' % log_text) self.assertLess(int(seq), batch_size) self.assertNotIn(seq, seq_found) seq_found.add(seq) # remove currently created entities before the next batch for entity in entities: self.service.delete_entity(table, entity.PartitionKey, entity.RowKey) # perform logging again and execute the next batch for j in range(i+1, int(batch_size/2)+i+1): logger.info('%s#%02d' % (log_text, j)) # confirm that the remaining entities are committed in the next batch entities = list(iter(self.service.query_entities(table))) self.assertEqual(len(entities), batch_size) rowno_found.clear() for entity in entities: # partition key self.assertEqual(entity.PartitionKey, 'batch-%s' % gethostname()) # row key rowno = entity.RowKey.split('-')[-1] self.assertLess(int(rowno), batch_size) self.assertNotIn(rowno, rowno_found) rowno_found.add(rowno) # message message, seq = entity.message.split('#') self.assertEqual(message, 'INFO %s' % log_text) self.assertGreaterEqual(int(seq), batch_size) self.assertLess(int(seq), batch_size*2) self.assertNotIn(seq, seq_found) seq_found.add(seq) def test_extra_properties(self): # get the logger for the test logger_name = 'extra_properties' logger = logging.getLogger(logger_name) handler_name = _get_handler_name(logger_name) # perform logging log_text = 'extra properties test' logger.info(log_text) # confirm that the entity has correct log text table = _get_handler_config_value(handler_name, 'table') entities = iter(self.service.query_entities(table)) entity = next(entities) self.assertEqual(entity.message, 'INFO %s' % log_text) # confirm that the extra properties have correct values entity = next(iter(self.service.query_entities(table))) self.assertEqual(entity.hostname, gethostname()) self.assertEqual(entity.levelname, 'INFO') self.assertEqual(int(entity.levelno), logging.INFO) self.assertEqual(entity.module, os.path.basename(__file__).rpartition('.')[0]) self.assertEqual(entity.name, logger_name) self.assertEqual(int(entity.process), os.getpid()) self.assertEqual(int(entity.thread), current_thread().ident) # confirm that there's no more entity in the table with self.assertRaises(StopIteration): next(entities) def test_custom_key_formatters(self): # get the logger for the test logger_name = 'custom_keys' logger = logging.getLogger(logger_name) handler_name = _get_handler_name(logger_name) # perform logging log_text = 'custom key formatters test' logging_started = datetime.now() logger.info(log_text) logging_finished = datetime.now() # confirm that the entity correct log text table = _get_handler_config_value(handler_name, 'table') entities = iter(self.service.query_entities(table)) entity = next(entities) self.assertEqual(entity.message, 'INFO %s' % log_text) # confirm that the entity has a custom partitiok key divided = self._divide_key(entity.PartitionKey) self.assertEqual(next(divided), 'mycustompartitionkey') self.assertEqual(next(divided), gethostname()) formatter_name = self._get_partition_key_formatter_name(handler_name) fmt = _get_formatter_config_value(formatter_name, 'datefmt') asctime = next(divided) try: self.assertEqual(asctime, logging_started.strftime(fmt)) except AssertionError: if logging_started == logging_finished: raise self.assertEqual(asctime, logging_finished.strftime(fmt)) with self.assertRaises(StopIteration): next(divided) # confirm that the entity has a custom row key divided = self._divide_key(entity.RowKey) self.assertEqual(next(divided), 'mycustomrowkey') self.assertEqual(next(divided), gethostname()) formatter_name = self._get_row_key_formatter_name(handler_name) fmt = _get_formatter_config_value(formatter_name, 'datefmt') asctime = next(divided) try: self.assertEqual(asctime, logging_started.strftime(fmt)) except AssertionError: if logging_started == logging_finished: raise self.assertEqual(asctime, logging_finished.strftime(fmt)) with self.assertRaises(StopIteration): next(divided) # confirm that there's no more entity in the table with self.assertRaises(StopIteration): next(entities)
table_service.update_entity('tasktable', 'tasksSeattle', '1', task) # task = {'description' : 'Take out the garbage again', 'priority' : 250} table_service.insert_or_replace_entity('tasktable', 'tasksSeattle', '1', task) task = {'description' : 'Buy detergent', 'priority' : 300} table_service.insert_or_replace_entity('tasktable', 'tasksSeattle', '3', task) task10 = {'PartitionKey': 'tasksSeattle', 'RowKey': '10', 'description' : 'Go grocery shopping', 'priority' : 400} task11 = {'PartitionKey': 'tasksSeattle', 'RowKey': '11', 'description' : 'Clean the bathroom', 'priority' : 100} table_service.begin_batch() table_service.insert_entity('tasktable', task10) table_service.insert_entity('tasktable', task11) table_service.commit_batch() task = table_service.get_entity('tasktable', 'tasksSeattle', '1') print(task.description) print(task.priority) tasks = table_service.query_entities('tasktable', "PartitionKey eq 'tasksSeattle'") for task in tasks: print(task.description) print(task.priority) tasks = table_service.query_entities('tasktable', "PartitionKey eq 'tasksSeattle'", 'description') for task in tasks: print(task.description) table_service.delete_entity('tasktable', 'tasksSeattle', '1') table_service.delete_table('tasktable')
class PersistentSubscriber(subscriber.AzureSubscriber): __metaclass__ = ABCMeta def __init__(self, tableName, topicName, subscriptionName, ruleName = None, rule = None, table_cred = None): # Call super class constructor subscriber.AzureSubscriber.__init__(self, topicName, subscriptionName, ruleName, rule) # Table Service and operations self.tableName = tableName if table_cred is None: table_cred = azurehook.table_cred self.table = TableService(account_name=table_cred['account_name'], account_key=table_cred['mykey']) if not self.table.exists(tableName): self.table.create_table(tableName) self.dump = False # Specify behavior on message received (from subscription). # Default is insert entity. def onNewMessage(self, dic): entity = self.dictToEntity(dic) # print("INSERT CAMERA IN TABLE") self.table.insert_entity(self.tableName, entity) # Wrapper function for querying the table # Azure limitation: only a maximum of 1000 entities can be retrieved per query # # Reference: # http://stackoverflow.com/questions/28019437/python-querying-all-rows-of-azure-table def queryTable(self, query_string): if not self.table.exists(self.TABLE): raise ValueError('Table %s does not exist', self.TABLE) # hasRows = True marker = None results = [] entities = self.table.query_entities( self.TABLE, query_string, marker = marker, num_results=1000) for entity in entities: results.append(entity) return results # Retrieve all entities from a given partition (i.e. that match a given partitionkey) def retrievePartition(self, partitionKey): return self.queryTable("PartitionKey eq '%s'" % partitionKey); # Flush all entities from a given partition (i.e. that match a given partitionkey) def flushPartition(self, partitionKey): if not self.table.exists(self.tableName): raise ValueError("Given table does not exist") entities = self.retrievePartition(partitionKey) for entity in entities: self.table.delete_entity(self.tableName, entity.PartitionKey, entity.RowKey) # To be implemented by child classes: return an entity given the body # of the message (as a dictionary). @abstractmethod def dictToEntity(self, dic): pass
class AzureTableDatabase(object): def __init__(self): self.connection = TableService(account_name=storage_account, account_key=table_connection_string) self.table_name = table_name def _update_entity(self, record): record.LastModified = datetime.now() self.connection.update_entity(self.table_name, record) def create_table(self): self.connection.create_table(self.table_name) def raw_table(self, limit=100): """ Retrieve a list of rows in the table. """ calls = self.connection.query_entities(self.table_name, num_results=limit) return calls def list_calls(self, limit=100, select='PartitionKey'): """ Retrieve a set of records that need a phone call """ calls = self.connection.query_entities(self.table_name, num_results=limit, select=select) return [c.PartitionKey for c in calls] def reset_stale_calls(self, time_limit): """ Retrieve calls that are not done and whose last modified time was older than the limit. """ records = self.connection.query_entities( self.table_name, filter="LastModified lt datetime'{0}' and Status ne '{1}'".format( time_limit.date(), Statuses.extracting_done)) if not records.items: raise NoRecordsToProcessError() num_records = len(records.items) for record in records: if 'LastErrorStep' in record: record.Status = record.LastErrorStep del record.LastErrorStep record.Status = Statuses.reset_map.get(record.Status, record.Status) self._update_entity(record) return num_records def retrieve_next_record_for_call(self): """ Retrieve a set of records that need a phone call """ records = self.connection.query_entities( self.table_name, num_results=1, filter="Status eq '{0}'".format(Statuses.new)) if len(records.items) == 0: raise NoRecordsToProcessError() record = records.items[0] record.Status = Statuses.calling self._update_entity(record) return record.PartitionKey def set_error(self, partition_key, step): """ Reset a row from error state """ record = self.connection.get_entity(self.table_name, partition_key, partition_key) record.Status = Statuses.error record['LastErrorStep'] = step self._update_entity(record) def retrieve_next_record_for_transcribing(self): records = self.connection.query_entities( self.table_name, num_results=1, filter="Status eq '{0}'".format(Statuses.recording_ready), ) if not records.items: raise NoRecordsToProcessError() record = records.items[0] record.Status = Statuses.transcribing self._update_entity(record) return record.CallUploadUrl, record.PartitionKey def update_transcript(self, partition_key, transcript, status): record = self.connection.get_entity( self.table_name, partition_key, partition_key, ) if status == TranscriptionStatus.success: record.CallTranscript = transcript record.Status = Statuses.transcribing_done record.TranscribeTimestamp = datetime.now() self._update_entity(record) elif status == TranscriptionStatus.request_error: self.set_error(partition_key, Statuses.transcribing) else: record.Status = Statuses.transcribing_failed self._update_entity(record) def change_status(self, original_status, new_status): records = self.connection.query_entities( self.table_name, filter="Status eq '{0}'".format(original_status), ) if not records.items: return for record in records.items: record.Status = new_status self.connection.update_entity(self.table_name, record) def query(self, column, value, limit=1): records = self.connection.query_entities(self.table_name, num_results=limit, filter="{0} eq '{1}'".format( column, value)) return records def retrieve_next_record_for_extraction(self): records = self.connection.query_entities( self.table_name, num_results=1, filter="Status eq '{0}'".format(Statuses.transcribing_done)) if not records.items: raise NoRecordsToProcessError() record = records.items[0] record.Status = Statuses.extracting self._update_entity(record) return record.CallTranscript, record.PartitionKey def update_location_date(self, partition_key, location_dict, date_dict): record = self.connection.get_entity(self.table_name, partition_key, partition_key) if location_dict and date_dict: record.update(**location_dict) record.update(**date_dict) record.Status = Statuses.extracting_done else: record.Status = Statuses.failed_to_return_info self._update_entity(record) def upload_new_requests(self, request_ids): """ Upload new request ids to the database """ for request_id in request_ids: record = { 'PartitionKey': request_id, 'RowKey': request_id, 'Status': Statuses.new, 'LastModified': datetime.now() } try: self.connection.insert_entity(self.table_name, record) except AzureConflictHttpError: pass # already exists. silently ignore. def update_call_id(self, alien_registration_id, call_id): record = self.connection.get_entity(self.table_name, alien_registration_id, alien_registration_id) record.CallID = call_id record.Status = Statuses.calling record.CallTimestamp = datetime.now() self._update_entity(record) def update_azure_path(self, alien_registration_id, azure_path): record = self.connection.get_entity(self.table_name, alien_registration_id, alien_registration_id) record.Status = Statuses.recording_ready record.CallUploadUrl = azure_path self._update_entity(record) def delete_ain(self, ain): return self.connection.delete_entity(self.table_name, ain, ain) def get_ain(self, ain): return self.connection.get_entity(self.table_name, ain, ain)
class az(object): def __init__(self, default_table_name=DEFAULT_TABLE, partitionKey='default'): self.TABLE_STORAGE_KEY = os.getenv('AZURE_STORAGE_KEY') self.STORAGE_NAME = os.getenv('STORAGE_NAME') self.default_table_name = default_table_name self.default_partition = partitionKey if self.TABLE_STORAGE_KEY == None: from tokens import TABLE_STORAGE_ACCESS_KEY, STORAGE_ACCOUNT_NAME self.TABLE_STORAGE_KEY = TABLE_STORAGE_ACCESS_KEY self.STORAGE_NAME = STORAGE_ACCOUNT_NAME self.table_service = TableService(account_name=self.STORAGE_NAME, account_key=self.TABLE_STORAGE_KEY) #create_table_if_does_not_exists(self.default_table_name) def insert_or_replace_entity_to_azure(self, rowKey, entry, t_name=DEFAULT_TABLE): ''' takes table service Takes a list Uploads to azure table storage ''' segment = Entity() segment.PartitionKey = self.default_partition segment.RowKey = str(rowKey).zfill(8) segment.latA = str(entry['latA']) segment.longA = str(entry['longA']) segment.latB = str(entry['latB']) segment.longB = str(entry['longB']) segment.colorKey = str(entry['color']) #print segment.colorKey if os.name == 'nt': self.table_service.insert_or_replace_entity( t_name, self.default_partition, str(rowKey).zfill(8), segment) else: self.table_service.insert_or_replace_entity(t_name, segment) def create_table(self, name): return self.table_service.create_table(name) def delete_table(self, name): return self.table_service.delete_table(name) def delete_entity_by_rowKey(self, rowKey, table_name=DEFAULT_TABLE): return self.table_service.delete_entity(table_name, self.default_partition, rowKey) def does_table_exist(self, table_name): if os.name == 'nt': for i in self.table_service.query_tables(): if i.name == table_name: return True else: for i in self.table_service.list_tables(): if i.name == table_name: return True return False def list_tables(self): if os.name == 'nt': for j in self.table_service.query_tables(): print j.name else: for j in self.table_service.list_tables(): print j.name def create_table_if_does_not_exist(self, table_name=DEFAULT_TABLE): if self.does_table_exist(table_name): return 'already exists' else: self.table_service.create_table(table_name) def create_entry(self, latA, lonA, latB, lonB, bumpiness): x = { 'latA': latA, 'longA': lonA, 'latB': latB, 'longB': lonB, 'color': bumpiness } return x def create_random_entry(self): x = { 'latA': random.uniform(37, 38), 'longA': random.uniform(-122, -123), 'latB': random.uniform(37, 38), 'longB': random.uniform(-122, -123), 'color': random.randint(0, 7) } return x def create_and_insert_or_replace_entity_azure(self, latA, lonA, latB, lonB, bumpiness, rowKey, table_name=DEFAULT_TABLE): return self.insert_or_replace_entity_to_azure( rowKey, create_entry(latA, lonA, latB, lonB, bumpiness), table_name)
class az(object): def __init__(self, default_table_name=DEFAULT_TABLE, partitionKey='default'): self.TABLE_STORAGE_KEY = os.getenv('AZURE_STORAGE_KEY') self.STORAGE_NAME = os.getenv('STORAGE_NAME') self.default_table_name = default_table_name self.default_partition = partitionKey if self.TABLE_STORAGE_KEY == None: from tokens import TABLE_STORAGE_ACCESS_KEY, STORAGE_ACCOUNT_NAME self.TABLE_STORAGE_KEY = TABLE_STORAGE_ACCESS_KEY self.STORAGE_NAME = STORAGE_ACCOUNT_NAME self.table_service = TableService(account_name=self.STORAGE_NAME, account_key=self.TABLE_STORAGE_KEY) #create_table_if_does_not_exists(self.default_table_name) def insert_or_replace_entity_to_azure(self, rowKey, entry, t_name=DEFAULT_TABLE): ''' takes table service Takes a list Uploads to azure table storage ''' segment = Entity() segment.PartitionKey = self.default_partition segment.RowKey = str(rowKey).zfill(8) segment.latA = str(entry['latA']) segment.longA = str(entry['longA']) segment.latB = str(entry['latB']) segment.longB = str(entry['longB']) segment.colorKey = str(entry['color']) #print segment.colorKey if os.name == 'nt': self.table_service.insert_or_replace_entity(t_name, self.default_partition, str(rowKey).zfill(8), segment) else: self.table_service.insert_or_replace_entity(t_name, segment) def create_table(self, name): return self.table_service.create_table(name) def delete_table(self, name): return self.table_service.delete_table(name) def delete_entity_by_rowKey(self, rowKey, table_name=DEFAULT_TABLE): return self.table_service.delete_entity(table_name, self.default_partition, rowKey) def does_table_exist(self, table_name): if os.name == 'nt': for i in self.table_service.query_tables(): if i.name == table_name: return True else: for i in self.table_service.list_tables(): if i.name == table_name: return True return False def list_tables(self): if os.name == 'nt': for j in self.table_service.query_tables(): print j.name else: for j in self.table_service.list_tables(): print j.name def create_table_if_does_not_exist(self, table_name=DEFAULT_TABLE): if self.does_table_exist(table_name): return 'already exists' else: self.table_service.create_table(table_name) def create_entry(self, latA, lonA, latB, lonB, bumpiness): x = { 'latA':latA, 'longA':lonA, 'latB':latB, 'longB':lonB, 'color': bumpiness } return x def create_random_entry(self): x = { 'latA':random.uniform(37,38), 'longA':random.uniform(-122,-123), 'latB':random.uniform(37,38), 'longB':random.uniform(-122,-123), 'color': random.randint(0,7) } return x def create_and_insert_or_replace_entity_azure(self, latA, lonA, latB, lonB, bumpiness, rowKey, table_name=DEFAULT_TABLE ): return self.insert_or_replace_entity_to_azure(rowKey, create_entry(latA, lonA, latB, lonB, bumpiness), table_name)