def test_sas_delete(self): # SAS URL is calculated from storage key, so this test runs live only if TestMode.need_recording_file(self.test_mode): return # Arrange entity = self._insert_random_entity() token = self.ts.generate_table_shared_access_signature( self.table_name, TablePermissions.DELETE, datetime.utcnow() + timedelta(hours=1), ) # Act service = TableService( account_name=self.settings.STORAGE_ACCOUNT_NAME, sas_token=token, ) self._set_test_proxy(service, self.settings) service.delete_entity(self.table_name, entity.PartitionKey, entity.RowKey) # Assert with self.assertRaises(AzureMissingResourceHttpError): self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey)
class ShoppingCartServiceCloud: """Shopping Cart Methods called from the API to interact with the DB.""" def __init__(self, shards=1): self.shards = shards self.table_name = "ShoppingCartTable" try: self.db = TableService( endpoint_suffix="table.cosmos.azure.com", connection_string=os.getenv("AZURE_COSMOS_CONNECTION_STRING"), ) except ValueError: raise Exception( "Please initialize $AZURE_COSMOS_CONNECTION_STRING") try: self.db.create_table(self.table_name, fail_on_exist=True) except AzureConflictHttpError: # Accept error only if already exists pass def get_product_items(self, customer_id): row_key = utils.hash_key(customer_id) partition_key = 'ShoppingCart' + str(row_key % self.shards).zfill(3) # Get Entity try: items = self.db.get_entity(self.table_name, partition_key, str(row_key)) product_items = json.loads(items.ProductItems) except AzureMissingResourceHttpError: product_items = [] return product_items def update_product_items(self, customer_id, product_items): row_key = utils.hash_key(customer_id) partition_key = 'ShoppingCart' + str(row_key % self.shards).zfill(3) product_items = [ item for item in product_items if item["unitCount"] > 0 ] # Insert or Update Items items = Entity() items.PartitionKey = partition_key items.RowKey = str(row_key) items.CustomerId = customer_id items.ProductItems = json.dumps(product_items) self.db.insert_or_replace_entity(self.table_name, items) def delete_shopping_cart(self, customer_id): row_key = utils.hash_key(customer_id) partition_key = 'ShoppingCart' + str(row_key % self.shards).zfill(3) # Get Items to Checkout before Delete try: items = self.db.get_entity(self.table_name, partition_key, str(row_key)) checkout_items = json.loads(items.ProductItems) except AzureMissingResourceHttpError: checkout_items = [] self.db.delete_entity(self.table_name, partition_key, str(row_key)) return checkout_items
class EventRepository: events_by_date_table = "eventsByDate" event_duplicates_table = "eventDuplicates" def __init__(self, connection_string=None): if not connection_string: connection_string = "AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;DefaultEndpointsProtocol=http;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;QueueEndpoint=http://127.0.0.1:10001/devstoreaccount1;TableEndpoint=http://127.0.0.1:10002/devstoreaccount1;" self.table_client = TableService(connection_string=connection_string, is_emulated=True) def list_events_by_date(self, dt: datetime.date) -> List[dict]: pk = self._date_to_pk(dt) for event in self.table_client.query_entities( self.events_by_date_table, filter="PartitionKey eq '%s'" % (pk, )): if 'place' in event: event['place'] = json.loads(event['place']) if 'dates' in event: event['dates'] = json.loads(event['dates']) if 'raw_dates' in event: event['raw_dates'] = event['raw_dates'].split('\n') if 'tags' in event: event['tags'] = event['tags'].split(',') if 'type' in event: event['type'] = event['type'].split(',') if 'cost' in event: event['cost'] = event['cost'].split(',') yield event def remove_rows(self, dt, row_keys): pk = self._date_to_pk(dt) for key in row_keys: self.table_client.delete_entity(self.events_by_date_table, pk, key) def save_events_by_date(self, events: List[dict], dt: datetime.date, table_name=events_by_date_table): partition_keys = set() for event in events: if 'PartitionKey' not in event: if dt: event['PartitionKey'] = self._date_to_pk(dt) else: event['PartitionKey'] = str(datetime.date.today().year) if 'RowKey' not in event: full_text = event['title'] + "\n" + event[ 'short_description'] + "\n" + event['description'] event['RowKey'] = str(hash(full_text)) event['place'] = json.dumps(event['place'], ensure_ascii=False) event['dates'] = json.dumps(event['dates']) event['tags'] = ",".join(event['tags']) if 'type' in event: event['type'] = ",".join(event['type']) if "raw_dates" in event: event['raw_dates'] = "\n".join(event['raw_dates']) if 'cost' in event and event['cost']: event['cost'] = ",".join(str(c) for c in event['cost']) else: event['cost'] = None self.table_client.insert_or_replace_entity(table_name, event) partition_keys.add(event['PartitionKey']) for pk in partition_keys: self.table_client.insert_or_replace_entity(table_name, { "PartitionKey": "PARTITIONS", "RowKey": pk }) def save_events_json(self, events: List[dict]): grouped_events = group_by_dates(events) for dt, events in grouped_events.items(): self.save_events_by_date(events, dt) def save_verified_events(self, events: List[Event]): pk = datetime.datetime.now().timestamp() % 255 for event in events: event_description = event.to_str() event_hash = hash(event_description) self.table_client.insert_or_replace_entity( "verifiedEvents", { "PartitionKey": str(pk), "RowKey": str(event_hash), "Text": event_description, "Labels": ",".join(event.event_tags) }) @staticmethod def _date_to_pk(dt: datetime.date): return "%d_%d_%d" % (dt.year, dt.month, dt.day)
class StorageAccount(object): json_serializer = TaggedJSONSerializer() def __init__(self, connection_str: str, table_name: str, partition_key: str, create_table_if_not_exists: bool): self.table_name = table_name self.partition_key = partition_key self.create_table_if_not_exists = create_table_if_not_exists self.table_service = TableService(connection_string=connection_str) def write(self, key: str, data: dict, encryption_key: bytes) -> None: """ serializes and encrypts the passed dict object object and writes it to the storage """ data = self.json_serializer.dumps(data) encoded_data, tag, nonce = self.encrypt(data, encryption_key) entity = { "PartitionKey": self.partition_key, "RowKey": key, "Data": encoded_data, "Tag": tag, "Nonce": nonce } try: self.table_service.insert_or_merge_entity(self.table_name, entity) except AzureMissingResourceHttpError: if not self.create_table_if_not_exists: raise self.table_service.create_table(self.table_name) self.table_service.insert_or_merge_entity(self.table_name, entity) def read(self, key: str, app_key: bytes) -> Union[List[Dict], None]: """ reads encrypted data from storage and decrypts and deserializes it. Returns None if no data was found or decryption failed. """ try: data = self.table_service.get_entity(self.table_name, self.partition_key, key) decoded = self.decrypt(data["Data"], data["Tag"], data["Nonce"], app_key) if decoded is not None: return self.json_serializer.loads(decoded) return None except AzureMissingResourceHttpError: return None def delete(self, key: str) -> None: """ Removes an element from storage if it exists """ try: self.table_service.delete_entity(self.table_name, self.partition_key, key) except AzureMissingResourceHttpError: pass @staticmethod def encrypt(data: str, secret_text: bytes) -> Tuple[str, str, str]: """ encrypts the passed data with the secret text. :return: a tuple of three elements: encrypted data, verification_tag and nonce element. All elements are base64 encoded strings """ cipher = AES.new(secret_text, AES.MODE_EAX) ciphertext, tag = cipher.encrypt_and_digest((data.encode("utf-8"))) return (base64.b64encode(ciphertext).decode("ascii"), base64.b64encode(tag).decode("ascii"), base64.b64encode(cipher.nonce).decode("ascii")) @staticmethod def decrypt(encrypted_data: str, verification_tag: str, nonce: str, secret_text: bytes) -> Union[str, None]: """ Decrypts encoded data using the passed secret_text :param encrypted_data: as base64 encoded string or byte array :param verification_tag: as base64 encoded string or byte array :param nonce: as base64 encoded string or byte array :param secret_text: the same secret text with wich the element was encoded :return: the plaintext on success, None if the data could not be decoded or verified """ nonce = base64.b64decode(nonce) cipher = AES.new(secret_text, AES.MODE_EAX, nonce=nonce) data = base64.b64decode(encrypted_data) plaintext = cipher.decrypt(data) tag = base64.b64decode(verification_tag) try: cipher.verify(tag) return plaintext.decode("utf-8") except ValueError: return None
class Azure: # Tags used RG_RULE_PROGRAMMED_TAG = 'PANORAMA_PROGRAMMED' HUB_MANAGED_TAG = 'PanoramaManaged' # Resource types VMSS_TYPE = 'Microsoft.Compute/virtualMachineScaleSets' ILB_TYPE = 'Microsoft.Network/loadBalancers' APPINSIGHTS_TYPE = 'Microsoft.Insights/components' # Hardcoded names used for internal Azure resources ILB_NAME = 'myPrivateLB' ALPHANUM = r'[^A-Za-z0-9]+' def __init__(self, cred, subs_id, hub, vmss_rg_name, vmss_name, storage, pan_handle, logger=None): self.credentials = cred self.subscription_id = subs_id self.logger = logger self.hub_name = hub self.storage_name = storage self.panorama_handler = pan_handle self.vmss_table_name = re.sub(self.ALPHANUM, '', vmss_name + 'vmsstable') self.vmss_rg_name = vmss_rg_name try: self.resource_client = ResourceManagementClient(cred, subs_id) self.compute_client = ComputeManagementClient(cred, subs_id) self.network_client = NetworkManagementClient(cred, subs_id) self.store_client = StorageManagementClient(cred, subs_id) store_keys = self.store_client.storage_accounts.list_keys( hub, storage).keys[0].value self.table_service = TableService(account_name=storage, account_key=store_keys) except Exception as e: self.logger.error("Getting Azure Infra handlers failed %s" % str(e)) raise e # Start -> List out all RGs and identify new spokes to mark them with tags. # Look for Resource Groups (RGs) which do not have tags or does not have a # a tag named "PANORAMA_PROGRAMMED". # potential_new_spokes = [x.name for x in self.resource_client.resource_groups.list()\ # if not x.tags or not x.tags.get(self.RG_RULE_PROGRAMMED_TAG, None)] # If the RG has a VMSS which has a tag named "PanoramaManaged" with a value # as Hub Resource Group name then we know that this is a new spoke that is # launched managed by the Hub and not yet programmed for NAT/Azure Instrumentation # key. # for rg in potential_new_spokes: # fw_vm_list = [x for x in self.resource_client.resources.list_by_resource_group(rg) # if x.type == self.VMSS_TYPE and self.filter_vmss(rg, x.name)] # if fw_vm_list: # rg_params = {'location': self.resource_client.resource_groups.get(rg).location} # rg_params.update(tags={ # self.RG_RULE_PROGRAMMED_TAG : 'No', # self.HUB_MANAGED_TAG : self.hub_name # }) # self.resource_client.resource_groups.create_or_update(rg, rg_params) # self.logger.info("RG %s marked as a spoke managed by this hub %s" % (rg, self.hub_name)) # End -> List out all RGs and identify new spokes to mark them with tags. # Populate the list of spokes managed by this Azure hub. rg_list = self.resource_client.resource_groups.list() self.managed_spokes = [] self.managed_spokes.append(vmss_rg_name) self.new_spokes = [] # for rg in rg_list: # if rg.tags and rg.tags.get(self.HUB_MANAGED_TAG, None) == self.hub_name: # self.managed_spokes.append(rg.name) # if rg.tags.get(self.RG_RULE_PROGRAMMED_TAG, 'Yes') == 'No': # self.new_spokes.append(rg.name) # self.logger.debug('%s identified as spokes managed by %s' % (self.managed_spokes, self.hub_name)) # if self.new_spokes: # self.logger.info('%s identified as new spokes to be programmed by %s' % (self.new_spokes, self.hub_name)) # # def filter_vmss(self, spoke, vmss_name): vmss = self.compute_client.virtual_machine_scale_sets.get( spoke, vmss_name) if vmss.tags and vmss.tags.get(self.HUB_MANAGED_TAG, None) == self.hub_name: return True return False def get_ilb_ip(self, spoke): for resource in self.resource_client.resources.list_by_resource_group( spoke): # Get the ILB IP Address from the spoke. The ILB address is always # hardcoded to be myPrivateILB. if resource.name == self.ILB_NAME and resource.type == self.ILB_TYPE: ilb_obj = self.network_client.load_balancers.get( spoke, resource.name) ilb_frontend_cfg = ilb_obj.frontend_ip_configurations try: ilb_private_ip = ilb_frontend_cfg[0].private_ip_address except IndexError as e: self.logger.info("ILB is not setup yet in RG %s." % spoke) return None return ilb_private_ip return None def get_appinsights_instr_key(self, spoke): for resource in self.resource_client.resources.list_by_resource_group( spoke): # Get the Appinsights instance where the custom metrics are being # published. if resource.type == self.APPINSIGHTS_TYPE and 'appinsights' in resource.name: appinsights_obj = self.resource_client.resources.get_by_id( resource.id, '2014-04-01') instr_key = appinsights_obj.properties.get( 'InstrumentationKey', '') if not instr_key: self.logger.info("InstrKey is not setup yet in %s." % spoke) return None return instr_key return None def set_spoke_as_programmed(self, spoke): spoke_params = { 'location': self.resource_client.resource_groups.get(spoke).location } spoke_tags = self.resource_client.resource_groups.get(spoke).tags spoke_tags[self.RG_RULE_PROGRAMMED_TAG] = 'Yes' spoke_params.update(tags=spoke_tags) self.resource_client.resource_groups.create_or_update( spoke, spoke_params) self.logger.info( "RG %s marked as programmed and spoke managed by this hub %s" % (spoke, self.hub_name)) def create_worker_ready_tag(self, worker_name): self.compute_client.virtual_machines.create_or_update( self.vmss_rg, worker_name, { 'location': self.resource_client.resource_groups.get( self.vmss_rg).location, 'tags': { 'WORKER_READY': 'Yes' } }) def create_new_cosmos_table(self, table_name): # Create the Cosmos DB if it does not exist already if not self.table_service.exists(table_name): try: ok = self.table_service.create_table(table_name) if not ok: self.logger.error('Creating VMSS table failed') return False self.logger.info('VMSS Table %s created succesfully' % table_name) except Exception as e: self.logger.error('Creating VMSS table failed ' + str(e)) return False return True def clear_cosmos_table(self, table_name): self.table_service.delete_table(table_name) def get_vmss_by_name(self, spoke, vmss_name): vmss_list = [ x.name for x in self.resource_client.resources.list_by_resource_group(spoke) if x.type == self.VMSS_TYPE and x.name == vmss_name ] if vmss_list: return vmss_list[0] else: self.logger.error("No VMSS found in Resource Group %s" % spoke) return None def get_vmss_in_spoke(self, spoke): vmss_list = [ x.name for x in self.resource_client.resources.list_by_resource_group(spoke) if x.type == self.VMSS_TYPE and self.filter_vmss(spoke, x.name) ] if vmss_list: return vmss_list[0] else: self.logger.error("No VMSS found in Resource Group %s" % spoke) return None def get_vms_in_vmss(self, spoke, vmss_name): return self.compute_client.virtual_machine_scale_set_vms.list( spoke, vmss_name) def get_vm_in_cosmos_db(self, spoke, vm_hostname): try: db_vm_info = self.table_service.get_entity(self.vmss_table_name, spoke, vm_hostname) except AzureMissingResourceHttpError: self.logger.info("New VM %s found in spoke %s" % (vm_hostname, spoke)) return None except Exception as e: self.logger.error("Querying for %s failed" % vm_hostname) return None else: # IF possible update status TODO self.logger.debug("VM %s is available in VMSS, Pan and DB" % (vm_hostname)) return db_vm_info # 'name' : global_device['@name'], # 'hostname' : global_device['hostname'], # 'serial' : global_device['serial'], # 'ip-address' : global_device['ip-address'], # 'connected' : global_device['connected'], # 'deactivated': global_device['deactivated'] def create_db_entity(self, spoke, vm_details): vm = Entity() # PartitionKey is nothing but the spoke name vm.PartitionKey = spoke # RowKey is nothing but the VM name itself. vm.RowKey = vm_details['hostname'] vm.name = vm_details['name'] vm.serial_no = vm_details['serial'] vm.ip_addr = vm_details['ip-address'] vm.connected = vm_details['connected'] vm.deactivated = vm_details['deactivated'] vm.subs_id = self.subscription_id vm.delicensed_on = 'not applicable' vm.is_delicensed = 'No' try: self.table_service.insert_entity(self.vmss_table_name, vm) self.logger.info("VM %s with serial no. %s in db" % (vm_details['hostname'], vm_details['serial'])) except Exception as e: self.logger.info("Insert entry to db for %s failed with error %s" % (vm_details['hostname'], e)) return False return True def get_fw_vms_in_cosmos_db(self, spoke=None): if spoke: filter_str = "PartitionKey eq '%s'" % spoke else: filter_str = None db_vms_list = self.table_service.query_entities(self.vmss_table_name, filter=filter_str) if spoke: db_hostname_list = [{'hostname': x.RowKey, 'serial': x.serial_no, 'name': x.name} \ for x in db_vms_list if x.PartitionKey == spoke] return db_hostname_list else: return db_vms_list def delete_vm_from_cosmos_db(self, spoke, vm_name): self.table_service.delete_entity(self.vmss_table_name, spoke, vm_name)
class Azure: # Tags used RG_RULE_PROGRAMMED_TAG = 'PANORAMA_PROGRAMMED' HUB_MANAGED_TAG = 'PanoramaManaged' # Resource types VMSS_TYPE = 'Microsoft.Compute/virtualMachineScaleSets' ILB_TYPE = 'Microsoft.Network/loadBalancers' APPINSIGHTS_TYPE = 'Microsoft.Insights/components' # Hardcoded names used for internal Azure resources ILB_NAME = 'myPrivateLB' ALPHANUM = r'[^A-Za-z0-9]+' def __init__(self, cred, subs_id, my_storage_rg, vmss_rg_name, vmss_name, storage, pan_handle, logger=None): self.credentials = cred self.subscription_id = subs_id self.logger = logger self.hub_name = vmss_rg_name self.storage_name = storage self.panorama_handler = pan_handle self.vmss_table_name = re.sub(self.ALPHANUM, '', vmss_name + 'vmsstable') self.vmss_rg_name = vmss_rg_name try: self.resource_client = ResourceManagementClient(cred, subs_id) self.compute_client = ComputeManagementClient(cred, subs_id) self.network_client = NetworkManagementClient(cred, subs_id) self.store_client = StorageManagementClient(cred, subs_id) store_keys = self.store_client.storage_accounts.list_keys( my_storage_rg, storage).keys[0].value self.table_service = TableService(account_name=storage, account_key=store_keys) except Exception as e: self.logger.error("Getting Azure Infra handlers failed %s" % str(e)) raise e rg_list = self.resource_client.resource_groups.list() self.managed_spokes = [] self.managed_spokes.append(vmss_rg_name) self.new_spokes = [] def filter_vmss(self, spoke, vmss_name): vmss = self.compute_client.virtual_machine_scale_sets.get( spoke, vmss_name) if vmss.tags and vmss.tags.get(self.HUB_MANAGED_TAG, None) == self.hub_name: return True return False def create_worker_ready_tag(self, worker_name): self.compute_client.virtual_machines.create_or_update( self.vmss_rg, worker_name, { 'location': self.resource_client.resource_groups.get( self.vmss_rg).location, 'tags': { 'WORKER_READY': 'Yes' } }) def create_new_cosmos_table(self, table_name): # Create the Cosmos DB if it does not exist already if not self.table_service.exists(table_name): try: ok = self.table_service.create_table(table_name) if not ok: self.logger.error('Creating VMSS table failed') return False self.logger.info('VMSS Table %s created succesfully' % table_name) except Exception as e: self.logger.error('Creating VMSS table failed ' + str(e)) return False return True def clear_cosmos_table(self, table_name): self.table_service.delete_table(table_name) def get_vmss_by_name(self, spoke, vmss_name): vmss_list = [ x.name for x in self.resource_client.resources.list_by_resource_group(spoke) if x.type == self.VMSS_TYPE and x.name == vmss_name ] if vmss_list: return vmss_list[0] else: self.logger.error("No VMSS found in Resource Group %s" % spoke) return None def get_vmss_in_spoke(self, spoke): vmss_list = [ x.name for x in self.resource_client.resources.list_by_resource_group(spoke) if x.type == self.VMSS_TYPE and self.filter_vmss(spoke, x.name) ] if vmss_list: return vmss_list[0] else: self.logger.error("No VMSS found in Resource Group %s" % spoke) return None def get_vms_in_vmss(self, spoke, vmss_name): return self.compute_client.virtual_machine_scale_set_vms.list( spoke, vmss_name) def get_vm_in_cosmos_db(self, spoke, vm_hostname): try: db_vm_info = self.table_service.get_entity(self.vmss_table_name, spoke, vm_hostname) except AzureMissingResourceHttpError: self.logger.info("New VM %s found in spoke %s" % (vm_hostname, spoke)) return None except Exception as e: self.logger.error("Querying for %s failed" % vm_hostname) return None else: # IF possible update status TODO self.logger.debug("VM %s is available in VMSS, Pan and DB" % (vm_hostname)) return db_vm_info # 'name' : global_device['@name'], # 'hostname' : global_device['hostname'], # 'serial' : global_device['serial'], # 'ip-address' : global_device['ip-address'], # 'connected' : global_device['connected'], # 'deactivated': global_device['deactivated'] def create_db_entity(self, spoke, vm_details): vm = Entity() # PartitionKey is nothing but the spoke name vm.PartitionKey = spoke # RowKey is nothing but the VM name itself. vm.RowKey = vm_details['hostname'] vm.name = vm_details['name'] vm.serial_no = vm_details['serial'] vm.ip_addr = vm_details['ip-address'] vm.connected = vm_details['connected'] vm.deactivated = vm_details['deactivated'] vm.subs_id = self.subscription_id vm.delicensed_on = 'not applicable' vm.is_delicensed = 'No' try: self.table_service.insert_entity(self.vmss_table_name, vm) self.logger.info("VM %s with serial no. %s in db" % (vm_details['hostname'], vm_details['serial'])) except Exception as e: self.logger.info("Insert entry to db for %s failed with error %s" % (vm_details['hostname'], e)) return False return True def get_fw_vms_in_cosmos_db(self, spoke=None): if spoke: filter_str = "PartitionKey eq '%s'" % spoke else: filter_str = None db_vms_list = self.table_service.query_entities(self.vmss_table_name, filter=filter_str) if spoke: db_hostname_list = [{'hostname': x.RowKey, 'serial': x.serial_no, 'name': x.name} \ for x in db_vms_list if x.PartitionKey == spoke] return db_hostname_list else: return db_vms_list def delete_vm_from_cosmos_db(self, spoke, vm_name): self.table_service.delete_entity(self.vmss_table_name, spoke, vm_name)