def receiver(): bus_service = ServiceBusService( service_namespace='comp6905', shared_access_key_name='RootManageSharedAccessKey', shared_access_key_value='rK2FMzVKvCjpad7xVSj1AB3hDimhxZq3WtEE4y28yaM=') table_service = TableService( account_name='comp6905kirk', account_key= 'H1YuP8hBxJ2PKw2hoW4Dr+DMAMvKZ/nGhstHw+87mE+OSBTb23cBxhkUvILgKOHWHA3hi3oaoohwVkp6lOXOlA==' ) while (True): msg = bus_service.receive_queue_message('queue1', peek_lock=False) msg1 = msg.body.decode("utf-8") print(msg1) parsed_json = json.loads(msg1) #print(parsed_json['UserId' task = { 'PartitionKey': 'Zanko', 'RowKey': parsed_json['TransactionID'], 'UserId': parsed_json['UserId'], 'SellerId': parsed_json['SellerID'], 'ProductName': parsed_json['Product Name'], 'SalePrice': parsed_json['Sale Price'], 'TransactionDate': parsed_json['Transaction Date'] } table_service.insert_entity('Requests', task)
def test_sas_upper_case_table_name(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() # Table names are case insensitive, so simply upper case our existing table name to test token = self.ts.generate_table_shared_access_signature( self.table_name.upper(), TablePermissions.QUERY, datetime.utcnow() + timedelta(hours=1), datetime.utcnow() - timedelta(minutes=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) entities = list( service.query_entities(self.table_name, filter="PartitionKey eq '{}'".format( entity['PartitionKey']))) # Assert self.assertEqual(len(entities), 1) self._assert_default_entity(entities[0])
def __init__(self, **kwargs): self._storage_name = kwargs.get('AZURE_STORAGE_NAME', '') self._storage_key = kwargs.get('AZURE_STORAGE_KEY', '') """ service init """ self._models = [] if self._storage_key != '' and self._storage_name != '': self._tableservice = TableService(account_name = self._storage_name, account_key = self._storage_key, protocol='https') """ encrypt queue service """ if kwargs.get('AZURE_REQUIRE_ENCRYPTION', False): # Create the KEK used for encryption. # KeyWrapper is the provided sample implementation, but the user may use their own object as long as it implements the interface above. kek = KeyWrapper(kwargs.get('AZURE_KEY_IDENTIFIER', 'otrrentapi'), kwargs.get('SECRET_KEY', 'super-duper-secret')) # Key identifier # Create the key resolver used for decryption. # KeyResolver is the provided sample implementation, but the user may use whatever implementation they choose so long as the function set on the service object behaves appropriately. key_resolver = KeyResolver() key_resolver.put_key(kek) # Set the require Encryption, KEK and key resolver on the service object. self._encryptproperties = True self._tableservice.key_encryption_key = kek self._tableservice.key_resolver_funcion = key_resolver.resolve_key self._tableservice.encryption_resolver_function = self.__encryptionresolver__ pass
def update(self, area, selector, manifest, hash): assert area is not None, 'area is none; should already be validated' area_config = config.load_area(area) tracking_config = config.load_tracking(area_config['tracking']) table_service = TableService(account_name=tracking_config['name'], account_key=tracking_config['key1']) area = area.lower() entity = { 'PartitionKey': area, 'RowKey': selector.replace('/','_'), 'selector': selector, 'hash': hash } for key, value in [(key, value) for key, value in manifest.iteritems() if key[0] != '_']: if key in ('PartitionKey', 'RowKey', 'selector', 'hash'): continue entity[key] = value table_service.insert_or_replace_entity(self._get_table(area), entity)
def test_sas_update(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.UPDATE, datetime.utcnow() + timedelta(hours=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) updated_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey) resp = service.update_entity(self.table_name, updated_entity) # Assert received_entity = self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey) self._assert_updated_entity(received_entity)
class TableBase(object): """docstring for TableBase""" def __init__(self): super(TableBase, self).__init__() self.table_service = TableService( account_name='boburstorage', account_key= 'wRgukLsyhLtnI7qEk8mSGnIBC+IsiTTXEDF1/xnmBGDudJLSeYdtyuVzuSN5/cplJz88AJPyoVyjCmL9N1ECXw==' ) def add_empty_row(self, table, partition_key, row_key, status): print('adding empty row to table...\n') row = { 'PartitionKey': partition_key, 'RowKey': row_key, 'result': '', 'status': status } self.table_service.insert_or_replace_entity(table, row) def update_row_with_result(self, table, partition_key, row_key, sum, status): print('updating table row with result...\n') xml = '<?xml version="1.0"?><sum>' + str(sum) + '</sum>' row = { 'PartitionKey': partition_key, 'RowKey': row_key, 'result': xml, 'status': status } self.table_service.update_entity(table, row)
def __init__(self): super(TableBase, self).__init__() self.table_service = TableService( account_name='boburstorage', account_key= 'wRgukLsyhLtnI7qEk8mSGnIBC+IsiTTXEDF1/xnmBGDudJLSeYdtyuVzuSN5/cplJz88AJPyoVyjCmL9N1ECXw==' )
def get_last_run_id(): table_service = TableService(account_name=STORAGE_ACCOUNT_NAME, account_key=STORAGE_ACCOUNT_KEY) databricks_cluster_details_entries = table_service.query_entities('databricks', filter="PartitionKey eq 'pdm'") databricks_cluster_details = list(databricks_cluster_details_entries) if databricks_cluster_details: return databricks_cluster_details[0]['run_id'] return None
def test_sas_add_outside_range(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recordingfile(self.test_mode): return # Arrange token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.ADD, datetime.utcnow() + timedelta(hours=1), start_pk='test', start_rk='test1', end_pk='test', end_rk='test1', ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) with self.assertRaises(AzureHttpError): entity = self._create_random_entity_dict() service.insert_entity(self.table_name, entity)
def test_sas_add(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recordingfile(self.test_mode): return # Arrange token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.ADD, datetime.utcnow() + timedelta(hours=1), datetime.utcnow() - timedelta(minutes=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) entity = self._create_random_entity_dict() service.insert_entity(self.table_name, entity) # Assert resp = self.ts.get_entity(self.table_name, entity['PartitionKey'], entity['RowKey']) self._assert_default_entity(resp)
def __init__(self): super(TableBase, self).__init__() self.table_service = TableService( account_name='bobur', account_key= '6e60FZapOXAmUbFBw0SpE1lHRP3RkXOMYRaalWmRBoz4+xI5tvjaJzxXuYyt+yfWxjPXpz5X3PmyIFiQmSkjbw==' )
def test_sas_update(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recordingfile(self.test_mode): return # Arrange entity = self._insert_random_entity() token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.UPDATE, datetime.utcnow() + timedelta(hours=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) updated_entity = self._create_updated_entity_dict( entity.PartitionKey, entity.RowKey) resp = service.update_entity(self.table_name, updated_entity) # Assert received_entity = self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey) self._assert_updated_entity(received_entity)
def test_sas_delete(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recordingfile(self.test_mode): return # Arrange entity = self._insert_random_entity() token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.DELETE, datetime.utcnow() + timedelta(hours=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) service.delete_entity(self.table_name, entity.PartitionKey, entity.RowKey) # Assert with self.assertRaises(AzureMissingResourceHttpError): self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey)
def test_sas_add_inside_range(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.ADD, datetime.utcnow() + timedelta(hours=1), start_pk='test', start_rk='test1', end_pk='test', end_rk='test1', ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) entity = self._create_random_entity_dict('test', 'test1') service.insert_entity(self.table_name, entity) # Assert resp = self.ts.get_entity(self.table_name, 'test', 'test1') self._assert_default_entity(resp)
def test_account_sas(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange table_name = self._create_table() entity = {"PartitionKey": "test", "RowKey": "test1", "text": "hello"} self.ts.insert_entity(table_name, entity) entity["RowKey"] = "test2" self.ts.insert_entity(table_name, entity) token = self.ts.generate_account_shared_access_signature( ResourceTypes.OBJECT, AccountPermissions.READ, datetime.utcnow() + timedelta(hours=1), datetime.utcnow() - timedelta(minutes=1), ) # Act service = TableService(account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token) self._set_service_options(service, self.settings) entities = list(service.query_entities(table_name)) # Assert self.assertEqual(len(entities), 2) self.assertEqual(entities[0].text, "hello") self.assertEqual(entities[1].text, "hello")
def __init__(self, vm_count=0, sku_type='standard_d2_v2', username='******', password='******'): self.vm_count = int(vm_count) self.sku_type = sku_type self.username = username self.password = password self.BATCH_ACCOUNT_NAME = os.environ['BATCH_ACCOUNT_NAME'] BATCH_ACCOUNT_KEY = os.environ['BATCH_ACCOUNT_KEY'] BATCH_SERVICE_URL = os.environ['BATCH_ACCOUNT_URL'] STORAGE_ACCOUNT_SUFFIX = 'core.windows.net' self.STORAGE_ACCOUNT_NAME = os.environ['STORAGE_ACCOUNT_NAME'] self.STORAGE_ACCOUNT_KEY = os.environ['STORAGE_ACCOUNT_KEY'] self.secrets_config = aztk.spark.models.SecretsConfiguration( shared_key=aztk.models.SharedKeyConfiguration( batch_account_name=self.BATCH_ACCOUNT_NAME, batch_account_key=BATCH_ACCOUNT_KEY, batch_service_url=BATCH_SERVICE_URL, storage_account_name=self.STORAGE_ACCOUNT_NAME, storage_account_key=self.STORAGE_ACCOUNT_KEY, storage_account_suffix=STORAGE_ACCOUNT_SUFFIX), ssh_pub_key="") self.table_service = TableService( account_name=self.STORAGE_ACCOUNT_NAME, account_key=self.STORAGE_ACCOUNT_KEY)
def get_keywords(): # get table service reference account_name = getenv('STORAGE_ACCOUNT') account_key = getenv('STORAGE_KEY') keyword_table = getenv('KEYWORD_TABLE_NAME') table_service = TableService(account_name=account_name, account_key=account_key) # query all keyword entities keywords = table_service.query_entities(keyword_table, filter="PartitionKey eq 'Keyword'") # separate each keyword by language arKeywords = {} enKeywords = {} for keyword in keywords: # map each keyword by its canonical form (currently lowercase English) canonicalKeyword = keyword.en_term.lower() # pre-compile regex for each keyword arKeywordRegex = create_keyword_regex(keyword.ar_term) enKeywordRegex = create_keyword_regex(keyword.en_term) arKeywords[canonicalKeyword] = arKeywordRegex enKeywords[canonicalKeyword] = enKeywordRegex return {'ar': arKeywords, 'en': enKeywords}
def test_sas_upper_case_table_name(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() # Table names are case insensitive, so simply upper case our existing table name to test token = self.ts.generate_table_shared_access_signature( self.table_name.upper(), TablePermissions.QUERY, datetime.utcnow() + timedelta(hours=1), datetime.utcnow() - timedelta(minutes=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) entities = list(service.query_entities(self.table_name, filter="PartitionKey eq '{}'".format(entity['PartitionKey']))) # Assert self.assertEqual(len(entities), 1) self._assert_default_entity(entities[0])
class RgbAzureRepository: """Performs storage/retrieval functions for RGB sequences""" def __init__(self, azure_account_name, azure_account_key): self.table_service = TableService(account_name=azure_account_name, account_key=azure_account_key) def create_rgb_sequence_table(self): self.table_service.create_table('rgbsequences') #def add_color_to_sequence(sequenceName, red, green, blue): # color = Entity() # color.PartitionKey = sequenceName # color.RowKey = '1' # color.Red = red # color.Green = green # color.Blue = blue # # table_service.insert_entity('rgbsequences', color) def get_sequence(self, sequence_name): colors = table_service.query_entities('rgbsequences', "PartitionKey eq '%s'" % sequence_name) sequence = ColorSequence(sequence_name) for color in colors: rgb = RgbColor(color.red, color.green, color.blue) sequence.add_color(rgb) return sequence
def account_sas(self): table_name = self._create_table() entity = { 'PartitionKey': 'test', 'RowKey': 'test1', 'text': 'hello world', } self.service.insert_entity(table_name, entity) # Access to all entities in all the tables # Expires in an hour token = self.service.generate_account_shared_access_signature( ResourceTypes.OBJECT, AccountPermissions.READ, datetime.utcnow() + timedelta(hours=1), ) # Create a service and use the SAS sas_service = TableService( account_name=self.account.account_name, sas_token=token, ) entities = list(sas_service.query_entities(table_name)) for entity in entities: print(entity.text) # hello world self.service.delete_table(table_name)
def table_sas(self): table_name = self._create_table() entity = { 'PartitionKey': 'test', 'RowKey': 'test1', 'text': 'hello world', } self.service.insert_entity(table_name, entity) # Access only to the entities in the given table # Query permissions to access entities # Expires in an hour token = self.service.generate_table_shared_access_signature( table_name, TablePermissions.QUERY, datetime.utcnow() + timedelta(hours=1), ) # Create a service and use the SAS sas_service = TableService( account_name=self.account.account_name, sas_token=token, ) entities = sas_service.query_entities(table_name) for entity in entities: print(entity.text) # hello world self.service.delete_table(table_name)
def sas_with_signed_identifiers(self): table_name = self._create_table() entity = { 'PartitionKey': 'test', 'RowKey': 'test1', 'text': 'hello world', } self.service.insert_entity(table_name, entity) # Set access policy on table access_policy = AccessPolicy(permission=TablePermissions.QUERY, expiry=datetime.utcnow() + timedelta(hours=1)) identifiers = {'id': access_policy} acl = self.service.set_table_acl(table_name, identifiers) # Wait 30 seconds for acl to propagate time.sleep(30) # Indicates to use the access policy set on the table token = self.service.generate_table_shared_access_signature( table_name, id='id' ) # Create a service and use the SAS sas_service = TableService( account_name=self.account.account_name, sas_token=token, ) entities = list(sas_service.query_entities(table_name)) for entity in entities: print(entity.text) # hello world self.service.delete_table(table_name)
def __init__( self, account_name=None, account_key=None, protocol='https', table='logs', batch_size=0, extra_properties=None, partition_key_formatter=None, row_key_formatter=None, is_emulated=False, ): """ Initialize the handler. """ logging.Handler.__init__(self) self.service = TableService(account_name=account_name, account_key=account_key, is_emulated=is_emulated, protocol=protocol) self.meta = {'hostname': gethostname(), 'process': os.getpid()} self.table = _formatName(table, self.meta) self.ready = False self.rowno = 0 if not partition_key_formatter: # default format for partition keys fmt = '%(asctime)s' datefmt = '%Y%m%d%H%M' partition_key_formatter = logging.Formatter(fmt, datefmt) self.partition_key_formatter = partition_key_formatter if not row_key_formatter: # default format for row keys fmt = '%(asctime)s%(msecs)03d-%(hostname)s-%(process)d-%(rowno)02d' datefmt = '%Y%m%d%H%M%S' row_key_formatter = logging.Formatter(fmt, datefmt) self.row_key_formatter = row_key_formatter # extra properties and formatters for them self.extra_properties = extra_properties if extra_properties: self.extra_property_formatters = {} self.extra_property_names = {} for extra in extra_properties: if _PY3: f = logging.Formatter(fmt=extra, style=extra[0]) else: f = logging.Formatter(fmt=extra) self.extra_property_formatters[extra] = f self.extra_property_names[extra] = self._getFormatName(extra) # the storage emulator doesn't support batch operations if batch_size <= 1 or is_emulated: self.batch = None else: self.batch = TableBatch() if batch_size > TableStorageHandler.MAX_BATCH_SIZE: self.batch_size = TableStorageHandler.MAX_BATCH_SIZE else: self.batch_size = batch_size if self.batch: self.current_partition_key = None
def get_rows(credentials, result_set): ts = TableService(account_name=credentials.account_name, account_key=credentials.account_key) for entity in ts.query_entities(result_set.table, filter=result_set.filter): row = entity row['account_name'] = credentials.account_name yield row
def buscar_meeting_code_processado(meeting_code): table_service = TableService(account_name=ACCOUNT_NAME, account_key=ACCOUNT_KEY) records = table_service.query_entities( TABLE_TRACKING, filter="PartitionKey eq 'audio-analysis' and RowKey eq '" + meeting_code + "'") return len(records.items) > 0
def main(req: func.HttpRequest) -> func.HttpResponse: try: logging.info("Trigger started") ret = {} if "code" not in req.params: logging.info("Invalid code") ret["message"] = "The parameter code is no present in the request." ret["status"] = False return func.HttpResponse(json.dumps(ret), headers=headers) else: code = req.params.get('code') logging.info("Processing " + str(code) + "...") table_service = TableService(account_name=ACCOUNT_NAME, account_key=ACCOUNT_KEY) records = table_service.query_entities( TABLE_NAME_TRACKING, filter="PartitionKey eq 'tracking-analysis' and RowKey eq '" + code + "'") if len(records.items) == 0: ret["message"] = "Meeting coding not found" ret["status"] = False logging.info("Code not found.") return func.HttpResponse(json.dumps(ret), headers=headers) else: additional_stop_words = table_service.get_entity( TABLE_NAME_PARAMETERS, "stopwords", "general").Value record = records.items[0] freq_dist = json.loads(record["FreqDist"]) words = [] for word in freq_dist: if freq_dist[word] > 1 and len( word) > 2 and word not in additional_stop_words: words.append({"name": word, "weight": freq_dist[word]}) ret["message"] = "Code found at the database" ret["status"] = True ret["words"] = words logging.info("Code successfully processed.") return func.HttpResponse(json.dumps(ret), headers=headers) except Exception as error: logging.error(error) return func.HttpResponse(error, status_code=400, headers=headers)
def table_service(self): if not self._table_service: self._table_service = TableService( self.storage_account, self.access_key_result.keys[0].value) if not self._table_service. \ exists(table_name=self.public_key_storage_table_name): self._table_service.create_table( self.public_key_storage_table_name) return self._table_service
def set_last_run_id(run_id): table_service = TableService(account_name=STORAGE_ACCOUNT_NAME, account_key=STORAGE_ACCOUNT_KEY) databricks_details = { 'PartitionKey': 'pdm', 'RowKey': 'pdm', 'run_id': str(run_id) } table_service.insert_or_replace_entity('databricks', databricks_details)
def CreateTableServices(self): """ Inicializa una instancia del Table Services para poder comunicarse con el storage en Azure """ self.TableService = TableService( account_name=self.AccountName, connection_string=self.CONNECTION_STRING, endpoint_suffix=self.EndPointSuffix)
def init_table(): table_service = TableService(account_name=os.environ['STORAGE_ACCOUNT_NAME'], account_key=os.environ['STORAGE_ACCOUNT_KEY']) table_name = os.environ['TABLE_NAME'] table_service.create_table(table_name) pk = socket.gethostname() rkroot = str(uuid.uuid4()) return { 'service': table_service, 'name': table_name, 'pk': pk, 'rk': rkroot }
def updateTask(cp, pk, rk, data): tablesvc = TableService(account_name=cp.get('storage_account', 'account_name'), account_key=cp.get('storage_account', 'account_key'), endpoint_suffix=cp.get('storage_account', 'endpoint_suffix')) task = {'PartitionKey': pk, 'RowKey': rk} task.update(data) tablesvc.merge_entity(cp.get('storage_account', 'task_table'), task)
def get_keywords(): # get table service reference account_name = getenv('STORAGE_ACCOUNT') account_key = getenv('STORAGE_KEY') keyword_table = getenv('KEYWORD_TABLE_NAME') site_name = getenv('SITE_NAME') table_service = TableService(account_name=account_name, account_key=account_key) filter_str = "PartitionKey eq '%s'" % (site_name) # query all keyword entities keywords = table_service.query_entities(keyword_table, filter=filter_str) # TODO: automate the language detection # separate each keyword by language deKeywords = {} urKeywords = {} arKeywords = {} enKeywords = {} idKeywords = {} for keyword in keywords: canonicalKeyword = keyword.name.encode('UTF-8').lower() if hasattr(keyword, 'name_de'): # pre-compile regex for each keyword deKeywordRegex = create_keyword_regex( keyword.name_de.encode('UTF-8')) deKeywords[canonicalKeyword] = deKeywordRegex if hasattr(keyword, 'name_ur'): # pre-compile regex for each keyword urKeywordRegex = create_keyword_regex( keyword.name_ur.encode('UTF-8')) urKeywords[canonicalKeyword] = urKeywordRegex if hasattr(keyword, 'name_ar'): # pre-compile regex for each keyword arKeywordRegex = create_keyword_regex( keyword.name_ar.encode('UTF-8')) arKeywords[canonicalKeyword] = arKeywordRegex if hasattr(keyword, 'name_id'): # pre-compile regex for each keyword idKeywordRegex = create_keyword_regex( keyword.name_id.encode('UTF-8')) idKeywords[canonicalKeyword] = idKeywordRegex if hasattr(keyword, 'name_en'): # pre-compile regex for each keyword enKeywordRegex = create_keyword_regex( keyword.name_en.encode('UTF-8')) enKeywords[canonicalKeyword] = enKeywordRegex return { 'de': deKeywords, 'en': enKeywords, 'id': idKeywords, 'ur': urKeywords, 'ar': arKeywords }
def get_video_ids_by_term(search_term): table_service = TableService(account_name=storage_acc_name, account_key=storage_acc_key) vid_ids = table_service.query_entities(table_name='CorpusInvertedIndex', filter='PartitionKey eq \'' + search_term + '\'', select='RowKey') if not vid_ids.items or len(vid_ids.items) == 0: return [] video_ids = {record['RowKey'] for record in vid_ids.items} return video_ids
def prepare_storage_account(storage_account_name, storage_access_key, endpoint_suffix, protocol="https"): blob_service = AppendBlobService(account_name=storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix, protocol=protocol) blob_service.create_container('bosh') blob_service.create_container( container_name='stemcell', public_access='blob' ) # Prepare the table for storing metadata of storage account and stemcells table_service = TableService(account_name=storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix, protocol=protocol) table_service.create_table('stemcells')
def setUp(self): self.service = TableService(ACCOUNT_NAME, ACCOUNT_KEY) # ensure that there's no entity in the table before each test tables = set() for cfg in LOGGING['handlers'].values(): if 'table' in cfg: tables.add(cfg['table']) for table in self.service.query_tables(): if table.name in tables: for entity in self.service.query_entities(table.name): self.service.delete_entity(table.name, entity.PartitionKey, entity.RowKey)
def prepare(self, area): assert area is not None, 'area is none; should already be validated' area_config = config.load_area(area) tracking_config = config.load_tracking(area_config['tracking']) table_service = TableService(account_name=tracking_config['name'], account_key=tracking_config['key1']) trackingContainer = self._get_table(area) table_service.create_table(self._get_table(area))
class MonitorManager(object): def __init__(self): #Please first create Azure Storage account and obtain your account name and key self._tableService = TableService(account_name='YOUR_ACCOUNT_NAME', account_key='YOUR_ACCOUNT_KEY') self._tableService.create_table('sensordata') def Insert(self, distance, currentTime): distanceData = Entity() distanceData.PartitionKey = 'sensorKey' distanceData.RowKey = str(uuid.uuid1()) distanceData.distance = str(distance) distanceData.time = str(currentTime) self._tableService.insert_entity('sensordata', distanceData)
def __init__(self, tableName, topicName, subscriptionName, ruleName = None, rule = None, table_cred = None): # Call super class constructor subscriber.AzureSubscriber.__init__(self, topicName, subscriptionName, ruleName, rule) # Table Service and operations self.tableName = tableName if table_cred is None: table_cred = azurehook.table_cred self.table = TableService(account_name=table_cred['account_name'], account_key=table_cred['mykey']) if not self.table.exists(tableName): self.table.create_table(tableName) self.dump = False
def get_keyword_filters(): # get table service reference account_name = getenv('STORAGE_ACCOUNT') account_key = getenv('STORAGE_KEY') filter_table = getenv('FILTER_TABLE_NAME') table_service = TableService(account_name=account_name, account_key=account_key) # query all entities rows = table_service.query_entities(filter_table) # create a list of conjunct regexes return [[ create_keyword_regex(term) for term in json.loads(row.filteredTerms) ] for row in rows]
def __init__(self, default_table_name=DEFAULT_TABLE, partitionKey='default'): self.TABLE_STORAGE_KEY = os.getenv('AZURE_STORAGE_KEY') self.STORAGE_NAME = os.getenv('STORAGE_NAME') self.default_table_name = default_table_name self.default_partition = partitionKey if self.TABLE_STORAGE_KEY == None: from tokens import TABLE_STORAGE_ACCESS_KEY, STORAGE_ACCOUNT_NAME self.TABLE_STORAGE_KEY = TABLE_STORAGE_ACCESS_KEY self.STORAGE_NAME = STORAGE_ACCOUNT_NAME self.table_service = TableService(account_name=self.STORAGE_NAME, account_key=self.TABLE_STORAGE_KEY)
def prepare_storage(settings): default_storage_account_name = settings["DEFAULT_STORAGE_ACCOUNT_NAME"] storage_access_key = settings["STORAGE_ACCESS_KEY"] blob_service = AppendBlobService(default_storage_account_name, storage_access_key) blob_service.create_container('bosh') blob_service.create_container( container_name='stemcell', public_access='blob' ) # Prepare the table for storing meta datas of storage account and stemcells table_service = TableService(default_storage_account_name, storage_access_key) table_service.create_table('stemcells')
class SummaryTable: def __init__(self, account_name, account_key, table_name="summary"): """Initialiaze a table to store summary data. Values must be provided for 'account_name' and 'account_key' which are values associated with the Azure Storage account. 'table_name' is optional and is the name of the table used (and created if necessary) in the storage account. """ self.log = Log() self.account_name = account_name self.account_key = account_key self.table_name = table_name self.createAzureTable() def createAzureTable(self): """ Create an Azure Table in which to store the summary results. """ self.table_service = TableService(self.account_name, self.account_key) self.table_service.create_table(self.table_name) def deleteTable(self, name): """ Delete a table in which summary results have been stored. """ self.table_service.delete_table(name, False) def writeCount(self, count_type, count): entry = {'PartitionKey': "count", "RowKey": count_type, 'total_count' : count} self.table_service.insert_entity(self.table_name, entry) def updateCount(self, count_type, count): entry = {'total_count' : count} self.table_service.update_entity(self.table_name, "count", count_type, entry) def getCount(self, event_type): """ Get the total number of events of a given type. """ count = 0 entries = self.table_service.query_entities(self.table_name, "PartitionKey eq 'count' and RowKey eq '" + event_type + "'") if len(entries) == 0: self.writeCount(event_type, 0) elif len(entries) > 1: raise Exception('We have more than one summary entry for ' + event_type) else: count = entries[0].total_count return count
def get_table_details(creds, resource_group_name, account_name, table_name, next_partition_key=None, next_row_key=None): keys = _get_storage_account_keys(creds, resource_group_name, account_name) table_service = TableService(account_name, keys.key1) model = StorageAccountTableDetails() model.table_name = table_name model.entities = table_service.query_entities( table_name, top=3, # small to demonstrate continuations next_partition_key=next_partition_key, next_row_key=next_row_key, ) model.custom_fields = _get_entities_custom_fields(model.entities) return model
def __init__(self, accountName, namePrefix, config=None, jobChunkSize=maxAzureTablePropertySize): self.jobChunkSize = jobChunkSize self.keyPath = None self.account_key = _fetchAzureAccountKey(accountName) self.accountName = accountName # Table names have strict requirements in Azure self.namePrefix = self._sanitizeTableName(namePrefix) logger.debug("Creating job store with name prefix '%s'" % self.namePrefix) # These are the main API entrypoints. self.tableService = TableService(account_key=self.account_key, account_name=accountName) self.blobService = BlobService(account_key=self.account_key, account_name=accountName) exists = self._jobStoreExists() self._checkJobStoreCreation(config is not None, exists, accountName + ":" + self.namePrefix) # Serialized jobs table self.jobItems = self._getOrCreateTable(self.qualify('jobs')) # Job<->file mapping table self.jobFileIDs = self._getOrCreateTable(self.qualify('jobFileIDs')) # Container for all shared and unshared files self.files = self._getOrCreateBlobContainer(self.qualify('files')) # Stats and logging strings self.statsFiles = self._getOrCreateBlobContainer(self.qualify('statsfiles')) # File IDs that contain stats and logging strings self.statsFileIDs = self._getOrCreateTable(self.qualify('statsFileIDs')) super(AzureJobStore, self).__init__(config=config) if self.config.cseKey is not None: self.keyPath = self.config.cseKey
def read(self, area, selector): assert area is not None, 'area is none; should already be validated' area_config = config.load_area(area) tracking_config = config.load_tracking(area_config['tracking']) table_service = TableService(account_name=tracking_config['name'], account_key=tracking_config['key1']) area = area.lower() item = table_service.query_entity(self._get_table(area), area, selector.replace('/','_')) item = item[0] if len(item) > 0 else {} return item
def post_table_log(json_str): from azure.storage.table import TableService, Entity table_service = TableService(account_name=pkey.azure_storage_id, account_key=pkey.azure_storage_key) table_service.create_table("facebooklog") def get_table_timestamp_key(): import time current_time = time.gmtime() start = time.mktime(current_time) last = time.mktime(time.struct_time((2070,1,1,0,0,0,3,100,-1))) return str(int(last - start)) task = Entity() task.PartitionKey = 'feedlog' task.RowKey = get_table_timestamp_key() task.json = json_str table_service.insert_entity('facebooklog', task)
def __init__(self, account_name=None, account_key=None, protocol='https', table='logs', batch_size=0, extra_properties=None, partition_key_formatter=None, row_key_formatter=None, is_emulated=False, ): """ Initialize the handler. """ logging.Handler.__init__(self) self.service = TableService(account_name=account_name, account_key=account_key, is_emulated=is_emulated, protocol=protocol) self.meta = {'hostname': gethostname(), 'process': os.getpid()} self.table = _formatName(table, self.meta) self.ready = False self.rowno = 0 if not partition_key_formatter: # default format for partition keys fmt = '%(asctime)s' datefmt = '%Y%m%d%H%M' partition_key_formatter = logging.Formatter(fmt, datefmt) self.partition_key_formatter = partition_key_formatter if not row_key_formatter: # default format for row keys fmt = '%(asctime)s%(msecs)03d-%(hostname)s-%(process)d-%(rowno)02d' datefmt = '%Y%m%d%H%M%S' row_key_formatter = logging.Formatter(fmt, datefmt) self.row_key_formatter = row_key_formatter # extra properties and formatters for them self.extra_properties = extra_properties if extra_properties: self.extra_property_formatters = {} self.extra_property_names = {} for extra in extra_properties: if _PY3: f = logging.Formatter(fmt=extra, style=extra[0]) else: f = logging.Formatter(fmt=extra) self.extra_property_formatters[extra] = f self.extra_property_names[extra] = self._getFormatName(extra) # the storage emulator doesn't support batch operations if batch_size <= 1 or is_emulated: self.batch = None else: self.batch = TableBatch() if batch_size > TableStorageHandler.MAX_BATCH_SIZE: self.batch_size = TableStorageHandler.MAX_BATCH_SIZE else: self.batch_size = batch_size if self.batch: self.current_partition_key = None
def SendAzure(): table_service = TableService(account_name='[NAMEHERE]', account_key='[KEYHERE]') table_name = 'tempData' partition_key = 'central' table_service.create_table(table_name, False) date = datetime.datetime.now() iso_date = date.isoformat() tempRecord = ReadTemp() result = "" if(tempRecord < 70): entry = {'PartitionKey': partition_key, 'RowKey': iso_date, 'Temperature': tempRecord} table_service.insert_entity(table_name, entry) result = "SENT " + str(tempRecord) else: result = "ERROR " + str(tempRecord) return result
def prepare_storage(settings): default_storage_account_name = settings["DEFAULT_STORAGE_ACCOUNT_NAME"] storage_access_key = settings["DEFAULT_STORAGE_ACCESS_KEY"] endpoint_suffix = settings["SERVICE_HOST_BASE"] protocol = "https" if settings["ENVIRONMENT"] == "AzureStack": protocol = "http" blob_service = AppendBlobService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix, protocol=protocol) blob_service.create_container('bosh') blob_service.create_container( container_name='stemcell', public_access='blob' ) # Prepare the table for storing meta datas of storage account and stemcells table_service = TableService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix, protocol=protocol) table_service.create_table('stemcells')
def __init__(self, settings): """Initialise UOTD repository with the given settings dict. Required settings: STORAGE_NAME -- the Azure Storage account name STORAGE_KEY -- an access key for the Storage account STORAGE_TABLE_UOTD -- the name of the table """ self.service = TableService(settings["STORAGE_NAME"], settings["STORAGE_KEY"]) self.uotd_table = settings["STORAGE_TABLE_UOTD"] self.service.create_table(self.uotd_table) self.partition_key_format = "%Y%m" self.row_key_format = "%d"
def test_sas_delete(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.DELETE, datetime.utcnow() + timedelta(hours=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_service_options(service, self.settings) service.delete_entity(self.table_name, entity.PartitionKey, entity.RowKey) # Assert with self.assertRaises(AzureMissingResourceHttpError): self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey)
def __init__(self, settings): """Initializes the repository with the specified settings dict. Required settings are: - STORAGE_NAME - STORAGE_KEY - STORAGE_TABLE_POLL - STORAGE_TABLE_CHOICE """ self.name = 'Azure Table Storage' self.storage_name = settings['STORAGE_NAME'] self.storage_key = settings['STORAGE_KEY'] self.poll_table = settings['STORAGE_TABLE_POLL'] self.choice_table = settings['STORAGE_TABLE_CHOICE'] self.svc = TableService(self.storage_name, self.storage_key) self.svc.create_table(self.poll_table) self.svc.create_table(self.choice_table)
def prepare_storage(settings): default_storage_account_name = settings["DEFAULT_STORAGE_ACCOUNT_NAME"] storage_access_key = settings["STORAGE_ACCESS_KEY"] endpoint_suffix = settings["SERVICE_HOST_BASE"] blob_service = AppendBlobService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix) blob_service.create_container('bosh') blob_service.create_container( container_name='stemcell', public_access='blob' ) # Prepare the table for storing meta datas of storage account and stemcells table_service = TableService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix) table_service.create_table('stemcells') # For secondary default_storage_account_name_secondary = settings["DEFAULT_STORAGE_ACCOUNT_NAME_SECONDARY"] default_storage_access_key_secondary = settings["DEFAULT_STORAGE_ACCESS_KEY_SECONDARY"] endpoint_suffix = settings["SERVICE_HOST_BASE"] blob_service = AppendBlobService(account_name=default_storage_account_name_secondary, account_key=default_storage_access_key_secondary, endpoint_suffix=endpoint_suffix) blob_service.create_container('bosh') blob_service.create_container( container_name='stemcell', public_access='blob' ) # Prepare the table for storing meta datas of storage account and stemcells table_service = TableService(account_name=default_storage_account_name_secondary, account_key=default_storage_access_key_secondary, endpoint_suffix=endpoint_suffix) table_service.create_table('stemcells') # Prepare primary premium storage account storage_account_name_primary = settings["STORAGE_ACCOUNT_NAME_PRIMARY"] storage_access_key_primary = settings["STORAGE_ACCESS_KEY_PRIMARY"] endpoint_suffix = settings["SERVICE_HOST_BASE"] blob_service = AppendBlobService(account_name=storage_account_name_primary, account_key=storage_access_key_primary, endpoint_suffix=endpoint_suffix) blob_service.create_container('bosh') blob_service.create_container('stemcell') # Prepare secondary premium storage account storage_account_name_secondary = settings["STORAGE_ACCOUNT_NAME_SECONDARY"] storage_access_key_secondary = settings["STORAGE_ACCESS_KEY_SECONDARY"] endpoint_suffix = settings["SERVICE_HOST_BASE"] blob_service = AppendBlobService(account_name=storage_account_name_secondary, account_key=storage_access_key_secondary, endpoint_suffix=endpoint_suffix) blob_service.create_container('bosh') blob_service.create_container('stemcell')
def __init__(self, locator, jobChunkSize=maxAzureTablePropertySize): super(AzureJobStore, self).__init__() accountName, namePrefix = locator.split(":", 1) if "--" in namePrefix: raise ValueError( "Invalid name prefix '%s'. Name prefixes may not contain %s." % (namePrefix, self.nameSeparator) ) if not self.containerNameRe.match(namePrefix): raise ValueError( "Invalid name prefix '%s'. Name prefixes must contain only digits, " "hyphens or lower-case letters and must not start or end in a " "hyphen." % namePrefix ) # Reserve 13 for separator and suffix if len(namePrefix) > self.maxContainerNameLen - self.maxNameLen - len(self.nameSeparator): raise ValueError( ("Invalid name prefix '%s'. Name prefixes may not be longer than 50 " "characters." % namePrefix) ) if "--" in namePrefix: raise ValueError( "Invalid name prefix '%s'. Name prefixes may not contain " "%s." % (namePrefix, self.nameSeparator) ) self.locator = locator self.jobChunkSize = jobChunkSize self.accountKey = _fetchAzureAccountKey(accountName) self.accountName = accountName # Table names have strict requirements in Azure self.namePrefix = self._sanitizeTableName(namePrefix) # These are the main API entry points. self.tableService = TableService(account_key=self.accountKey, account_name=accountName) self.blobService = BlobService(account_key=self.accountKey, account_name=accountName) # Serialized jobs table self.jobItems = None # Job<->file mapping table self.jobFileIDs = None # Container for all shared and unshared files self.files = None # Stats and logging strings self.statsFiles = None # File IDs that contain stats and logging strings self.statsFileIDs = None
class Repository(object): """Azure Table storage repository for UOTD.""" def __init__(self, settings): """Initialise UOTD repository with the given settings dict. Required settings: STORAGE_NAME -- the Azure Storage account name STORAGE_KEY -- an access key for the Storage account STORAGE_TABLE_UOTD -- the name of the table """ self.service = TableService(settings["STORAGE_NAME"], settings["STORAGE_KEY"]) self.uotd_table = settings["STORAGE_TABLE_UOTD"] self.service.create_table(self.uotd_table) self.partition_key_format = "%Y%m" self.row_key_format = "%d" def get_uotd(self): """Get the UUID for the current day. If the UUID does not yet exist then it will be created. """ partition_key = date.today().strftime(self.partition_key_format) row_key = date.today().strftime(self.row_key_format) try: uotd_entity = self.service.get_entity(self.uotd_table, partition_key, row_key) uuid = uotd_entity.uuid except AzureMissingResourceHttpError: uuid = str(uuid4()) uotd_entity = { "PartitionKey": partition_key, "RowKey": row_key, "uuid": uuid } self.service.insert_entity(self.uotd_table, uotd_entity) return uuid def get_uuids(self, partition_key): """Get all the UUIDs in a given partition.""" filter = "PartitionKey eq '{0}'".format(partition_key) entities = self.service.query_entities(self.uotd_table, filter=filter) return entities