Пример #1
0
    def test_sas_add(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        token = self.ts.generate_table_shared_access_signature(
            self.table_name,
            TablePermissions.ADD,
            datetime.utcnow() + timedelta(hours=1),
            datetime.utcnow() - timedelta(minutes=1),
        )

        # Act
        service = TableService(
            account_name=self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
        )
        self._set_test_proxy(service, self.settings)

        entity = self._create_random_entity_dict()
        service.insert_entity(self.table_name, entity)

        # Assert
        resp = self.ts.get_entity(self.table_name, entity['PartitionKey'],
                                  entity['RowKey'])
        self._assert_default_entity(resp)
Пример #2
0
    def test_sas_add_outside_range(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange

        token = self.ts.generate_table_shared_access_signature(
            self.table_name,
            TablePermissions.ADD,
            datetime.utcnow() + timedelta(hours=1),
            start_pk='test',
            start_rk='test1',
            end_pk='test',
            end_rk='test1',
        )

        # Act
        service = TableService(
            account_name=self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
        )
        self._set_test_proxy(service, self.settings)
        with self.assertRaises(AzureHttpError):
            entity = self._create_random_entity_dict()
            service.insert_entity(self.table_name, entity)
Пример #3
0
def process_event(table_client: azuretable.TableService, table_name: str,
                  source: str, event: str, ts: float, message: str) -> None:
    """Process event
    :param azure.cosmosdb.table.TableService table_client: table client
    :param str table_name: table name
    :param str source: source
    :param str event: event
    :param float ts: time stamp
    :param str message: message
    """
    entity = {
        'PartitionKey': _PARTITION_KEY,
        'RowKey': str(ts),
        'Event': '{}:{}'.format(source, event),
        'NodeId': _NODEID,
        'Message': message,
    }
    while True:
        try:
            table_client.insert_entity(table_name, entity)
            break
        except azure.common.AzureConflictHttpError:
            if not isinstance(ts, float):
                ts = float(ts)
            ts += 0.000001
            entity['RowKey'] = str(ts)
Пример #4
0
def _merge_resource(table_client: azuretable.TableService, resource: str,
                    nglobalresources: int) -> None:
    """Merge resource to the image table
    :param azuretable.TableService table_client: table client
    :param str resource: resource to add to the image table
    :param int nglobalresources: number of global resources
    """
    # merge resource to the image table
    entity = {
        'PartitionKey': _PARTITION_KEY,
        'RowKey': compute_resource_hash(resource),
        'Resource': resource,
        'VmList0': _NODEID,
    }
    logger.debug('merging entity {} to the image table'.format(entity))
    try:
        table_client.insert_entity(_STORAGE_CONTAINERS['table_images'],
                                   entity=entity)
    except azure.common.AzureConflictHttpError:
        while True:
            entity = table_client.get_entity(
                _STORAGE_CONTAINERS['table_images'], entity['PartitionKey'],
                entity['RowKey'])
            # merge VmList into entity
            evms = []
            for i in range(0, _MAX_VMLIST_PROPERTIES):
                prop = 'VmList{}'.format(i)
                if prop in entity:
                    evms.extend(entity[prop].split(','))
            if _NODEID in evms:
                break
            evms.append(_NODEID)
            for i in range(0, _MAX_VMLIST_PROPERTIES):
                prop = 'VmList{}'.format(i)
                start = i * _MAX_VMLIST_IDS_PER_PROPERTY
                end = start + _MAX_VMLIST_IDS_PER_PROPERTY
                if end > len(evms):
                    end = len(evms)
                if start < end:
                    entity[prop] = ','.join(evms[start:end])
                else:
                    entity[prop] = None
            etag = entity['etag']
            entity.pop('etag')
            try:
                table_client.merge_entity(_STORAGE_CONTAINERS['table_images'],
                                          entity=entity,
                                          if_match=etag)
                break
            except azure.common.AzureHttpError as ex:
                if ex.status_code != 412:
                    raise
    logger.info('entity {} merged to the image table'.format(entity))
    global _GR_DONE
    if not _GR_DONE:
        try:
            entities = table_client.query_entities(
                _STORAGE_CONTAINERS['table_images'],
                filter='PartitionKey eq \'{}\''.format(_PARTITION_KEY))
        except azure.common.AzureMissingResourceHttpError:
            entities = []
        count = 0
        for entity in entities:
            for i in range(0, _MAX_VMLIST_PROPERTIES):
                prop = 'VmList{}'.format(i)
                mode_prefix = _CONTAINER_MODE.name.lower() + ':'
                if (prop in entity and _NODEID in entity[prop]
                        and entity['Resource'].startswith(mode_prefix)):
                    count += 1
        if count == nglobalresources:
            _record_perf('gr-done',
                         'nglobalresources={}'.format(nglobalresources))
            _GR_DONE = True
            logger.info(
                'all {} global resources of container mode "{}" loaded'.format(
                    nglobalresources, _CONTAINER_MODE.name.lower()))
        else:
            logger.info(
                '{}/{} global resources of container mode "{}" loaded'.format(
                    count, nglobalresources, _CONTAINER_MODE.name.lower()))
class Azure:
    # Tags used
    RG_RULE_PROGRAMMED_TAG = 'PANORAMA_PROGRAMMED'
    HUB_MANAGED_TAG = 'PanoramaManaged'

    # Resource types
    VMSS_TYPE = 'Microsoft.Compute/virtualMachineScaleSets'
    ILB_TYPE = 'Microsoft.Network/loadBalancers'
    APPINSIGHTS_TYPE = 'Microsoft.Insights/components'

    # Hardcoded names used for internal Azure resources
    ILB_NAME = 'myPrivateLB'
    ALPHANUM = r'[^A-Za-z0-9]+'

    def __init__(self,
                 cred,
                 subs_id,
                 hub,
                 vmss_rg_name,
                 vmss_name,
                 storage,
                 pan_handle,
                 logger=None):
        self.credentials = cred
        self.subscription_id = subs_id
        self.logger = logger
        self.hub_name = hub
        self.storage_name = storage
        self.panorama_handler = pan_handle
        self.vmss_table_name = re.sub(self.ALPHANUM, '',
                                      vmss_name + 'vmsstable')
        self.vmss_rg_name = vmss_rg_name

        try:
            self.resource_client = ResourceManagementClient(cred, subs_id)
            self.compute_client = ComputeManagementClient(cred, subs_id)
            self.network_client = NetworkManagementClient(cred, subs_id)
            self.store_client = StorageManagementClient(cred, subs_id)
            store_keys = self.store_client.storage_accounts.list_keys(
                hub, storage).keys[0].value
            self.table_service = TableService(account_name=storage,
                                              account_key=store_keys)
        except Exception as e:
            self.logger.error("Getting Azure Infra handlers failed %s" %
                              str(e))
            raise e

        # Start -> List out all RGs and identify new spokes to mark them with tags.
        # Look for Resource Groups (RGs) which do not have tags or does not have a
        # a tag named "PANORAMA_PROGRAMMED".
        # potential_new_spokes = [x.name for x in self.resource_client.resource_groups.list()\
        #                  if not x.tags or not x.tags.get(self.RG_RULE_PROGRAMMED_TAG, None)]

        # If the RG has a VMSS which has a tag named "PanoramaManaged" with a value
        # as Hub Resource Group name then we know that this is a new spoke that is
        # launched managed by the Hub and not yet programmed for NAT/Azure Instrumentation
        # key.
        # for rg in potential_new_spokes:
        #     fw_vm_list = [x for x in self.resource_client.resources.list_by_resource_group(rg)
        #                   if x.type == self.VMSS_TYPE and self.filter_vmss(rg, x.name)]
        #     if fw_vm_list:
        #         rg_params = {'location': self.resource_client.resource_groups.get(rg).location}
        #         rg_params.update(tags={
        #                                  self.RG_RULE_PROGRAMMED_TAG : 'No',
        #                                  self.HUB_MANAGED_TAG        : self.hub_name
        #                               })
        #         self.resource_client.resource_groups.create_or_update(rg, rg_params)
        #         self.logger.info("RG %s marked as a spoke managed by this hub %s" % (rg, self.hub_name))
        # End -> List out all RGs and identify new spokes to mark them with tags.

        # Populate the list of spokes managed by this Azure hub.
        rg_list = self.resource_client.resource_groups.list()
        self.managed_spokes = []
        self.managed_spokes.append(vmss_rg_name)
        self.new_spokes = []
        # for rg in rg_list:
        #     if rg.tags and rg.tags.get(self.HUB_MANAGED_TAG, None) == self.hub_name:
        #         self.managed_spokes.append(rg.name)
        #         if rg.tags.get(self.RG_RULE_PROGRAMMED_TAG, 'Yes') == 'No':
        #             self.new_spokes.append(rg.name)
        # self.logger.debug('%s identified as spokes managed by %s' % (self.managed_spokes, self.hub_name))
        # if self.new_spokes:
        #     self.logger.info('%s identified as new spokes to be programmed by %s' % (self.new_spokes, self.hub_name))
        #
        #

    def filter_vmss(self, spoke, vmss_name):
        vmss = self.compute_client.virtual_machine_scale_sets.get(
            spoke, vmss_name)
        if vmss.tags and vmss.tags.get(self.HUB_MANAGED_TAG,
                                       None) == self.hub_name:
            return True
        return False

    def get_ilb_ip(self, spoke):
        for resource in self.resource_client.resources.list_by_resource_group(
                spoke):
            # Get the ILB IP Address from the spoke. The ILB address is always
            # hardcoded to be myPrivateILB.
            if resource.name == self.ILB_NAME and resource.type == self.ILB_TYPE:
                ilb_obj = self.network_client.load_balancers.get(
                    spoke, resource.name)
                ilb_frontend_cfg = ilb_obj.frontend_ip_configurations
                try:
                    ilb_private_ip = ilb_frontend_cfg[0].private_ip_address
                except IndexError as e:
                    self.logger.info("ILB is not setup yet in RG %s." % spoke)
                    return None
                return ilb_private_ip
        return None

    def get_appinsights_instr_key(self, spoke):
        for resource in self.resource_client.resources.list_by_resource_group(
                spoke):
            # Get the Appinsights instance where the custom metrics are being
            # published.
            if resource.type == self.APPINSIGHTS_TYPE and 'appinsights' in resource.name:
                appinsights_obj = self.resource_client.resources.get_by_id(
                    resource.id, '2014-04-01')
                instr_key = appinsights_obj.properties.get(
                    'InstrumentationKey', '')
                if not instr_key:
                    self.logger.info("InstrKey is not setup yet in %s." %
                                     spoke)
                    return None
                return instr_key
        return None

    def set_spoke_as_programmed(self, spoke):
        spoke_params = {
            'location':
            self.resource_client.resource_groups.get(spoke).location
        }
        spoke_tags = self.resource_client.resource_groups.get(spoke).tags
        spoke_tags[self.RG_RULE_PROGRAMMED_TAG] = 'Yes'
        spoke_params.update(tags=spoke_tags)
        self.resource_client.resource_groups.create_or_update(
            spoke, spoke_params)
        self.logger.info(
            "RG %s marked as programmed and spoke managed by this hub %s" %
            (spoke, self.hub_name))

    def create_worker_ready_tag(self, worker_name):
        self.compute_client.virtual_machines.create_or_update(
            self.vmss_rg, worker_name, {
                'location':
                self.resource_client.resource_groups.get(
                    self.vmss_rg).location,
                'tags': {
                    'WORKER_READY': 'Yes'
                }
            })

    def create_new_cosmos_table(self, table_name):
        # Create the Cosmos DB if it does not exist already
        if not self.table_service.exists(table_name):
            try:
                ok = self.table_service.create_table(table_name)
                if not ok:
                    self.logger.error('Creating VMSS table failed')
                    return False
                self.logger.info('VMSS Table %s created succesfully' %
                                 table_name)
            except Exception as e:
                self.logger.error('Creating VMSS table failed ' + str(e))
                return False
        return True

    def clear_cosmos_table(self, table_name):
        self.table_service.delete_table(table_name)

    def get_vmss_by_name(self, spoke, vmss_name):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and x.name == vmss_name
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
        return None

    def get_vmss_in_spoke(self, spoke):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and self.filter_vmss(spoke, x.name)
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
            return None

    def get_vms_in_vmss(self, spoke, vmss_name):

        return self.compute_client.virtual_machine_scale_set_vms.list(
            spoke, vmss_name)

    def get_vm_in_cosmos_db(self, spoke, vm_hostname):

        try:
            db_vm_info = self.table_service.get_entity(self.vmss_table_name,
                                                       spoke, vm_hostname)
        except AzureMissingResourceHttpError:
            self.logger.info("New VM %s found in spoke %s" %
                             (vm_hostname, spoke))
            return None
        except Exception as e:
            self.logger.error("Querying for %s failed" % vm_hostname)
            return None
        else:
            # IF possible update status TODO
            self.logger.debug("VM %s is available in VMSS, Pan and DB" %
                              (vm_hostname))

        return db_vm_info

    # 'name'       : global_device['@name'],
    # 'hostname'   : global_device['hostname'],
    # 'serial'     : global_device['serial'],
    # 'ip-address' : global_device['ip-address'],
    # 'connected'  : global_device['connected'],
    # 'deactivated': global_device['deactivated']
    def create_db_entity(self, spoke, vm_details):

        vm = Entity()

        # PartitionKey is nothing but the spoke name
        vm.PartitionKey = spoke
        # RowKey is nothing but the VM name itself.
        vm.RowKey = vm_details['hostname']
        vm.name = vm_details['name']
        vm.serial_no = vm_details['serial']
        vm.ip_addr = vm_details['ip-address']
        vm.connected = vm_details['connected']
        vm.deactivated = vm_details['deactivated']
        vm.subs_id = self.subscription_id
        vm.delicensed_on = 'not applicable'
        vm.is_delicensed = 'No'
        try:
            self.table_service.insert_entity(self.vmss_table_name, vm)
            self.logger.info("VM %s with serial no. %s in db" %
                             (vm_details['hostname'], vm_details['serial']))
        except Exception as e:
            self.logger.info("Insert entry to db for %s failed with error %s" %
                             (vm_details['hostname'], e))
            return False
        return True

    def get_fw_vms_in_cosmos_db(self, spoke=None):

        if spoke:
            filter_str = "PartitionKey eq '%s'" % spoke
        else:
            filter_str = None

        db_vms_list = self.table_service.query_entities(self.vmss_table_name,
                                                        filter=filter_str)
        if spoke:
            db_hostname_list = [{'hostname': x.RowKey, 'serial': x.serial_no, 'name': x.name} \
                            for x in db_vms_list if x.PartitionKey == spoke]
            return db_hostname_list
        else:
            return db_vms_list

    def delete_vm_from_cosmos_db(self, spoke, vm_name):

        self.table_service.delete_entity(self.vmss_table_name, spoke, vm_name)
Пример #6
0
class Azure:
    # Tags used
    RG_RULE_PROGRAMMED_TAG = 'PANORAMA_PROGRAMMED'
    HUB_MANAGED_TAG = 'PanoramaManaged'

    # Resource types
    VMSS_TYPE = 'Microsoft.Compute/virtualMachineScaleSets'
    ILB_TYPE = 'Microsoft.Network/loadBalancers'
    APPINSIGHTS_TYPE = 'Microsoft.Insights/components'

    # Hardcoded names used for internal Azure resources
    ILB_NAME = 'myPrivateLB'
    ALPHANUM = r'[^A-Za-z0-9]+'

    def __init__(self,
                 cred,
                 subs_id,
                 my_storage_rg,
                 vmss_rg_name,
                 vmss_name,
                 storage,
                 pan_handle,
                 logger=None):
        self.credentials = cred
        self.subscription_id = subs_id
        self.logger = logger
        self.hub_name = vmss_rg_name
        self.storage_name = storage
        self.panorama_handler = pan_handle
        self.vmss_table_name = re.sub(self.ALPHANUM, '',
                                      vmss_name + 'vmsstable')
        self.vmss_rg_name = vmss_rg_name

        try:
            self.resource_client = ResourceManagementClient(cred, subs_id)
            self.compute_client = ComputeManagementClient(cred, subs_id)
            self.network_client = NetworkManagementClient(cred, subs_id)
            self.store_client = StorageManagementClient(cred, subs_id)
            store_keys = self.store_client.storage_accounts.list_keys(
                my_storage_rg, storage).keys[0].value
            self.table_service = TableService(account_name=storage,
                                              account_key=store_keys)
        except Exception as e:
            self.logger.error("Getting Azure Infra handlers failed %s" %
                              str(e))
            raise e

        rg_list = self.resource_client.resource_groups.list()
        self.managed_spokes = []
        self.managed_spokes.append(vmss_rg_name)
        self.new_spokes = []

    def filter_vmss(self, spoke, vmss_name):
        vmss = self.compute_client.virtual_machine_scale_sets.get(
            spoke, vmss_name)
        if vmss.tags and vmss.tags.get(self.HUB_MANAGED_TAG,
                                       None) == self.hub_name:
            return True
        return False

    def create_worker_ready_tag(self, worker_name):
        self.compute_client.virtual_machines.create_or_update(
            self.vmss_rg, worker_name, {
                'location':
                self.resource_client.resource_groups.get(
                    self.vmss_rg).location,
                'tags': {
                    'WORKER_READY': 'Yes'
                }
            })

    def create_new_cosmos_table(self, table_name):
        # Create the Cosmos DB if it does not exist already
        if not self.table_service.exists(table_name):
            try:
                ok = self.table_service.create_table(table_name)
                if not ok:
                    self.logger.error('Creating VMSS table failed')
                    return False
                self.logger.info('VMSS Table %s created succesfully' %
                                 table_name)
            except Exception as e:
                self.logger.error('Creating VMSS table failed ' + str(e))
                return False
        return True

    def clear_cosmos_table(self, table_name):
        self.table_service.delete_table(table_name)

    def get_vmss_by_name(self, spoke, vmss_name):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and x.name == vmss_name
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
        return None

    def get_vmss_in_spoke(self, spoke):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and self.filter_vmss(spoke, x.name)
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
            return None

    def get_vms_in_vmss(self, spoke, vmss_name):

        return self.compute_client.virtual_machine_scale_set_vms.list(
            spoke, vmss_name)

    def get_vm_in_cosmos_db(self, spoke, vm_hostname):

        try:
            db_vm_info = self.table_service.get_entity(self.vmss_table_name,
                                                       spoke, vm_hostname)
        except AzureMissingResourceHttpError:
            self.logger.info("New VM %s found in spoke %s" %
                             (vm_hostname, spoke))
            return None
        except Exception as e:
            self.logger.error("Querying for %s failed" % vm_hostname)
            return None
        else:
            # IF possible update status TODO
            self.logger.debug("VM %s is available in VMSS, Pan and DB" %
                              (vm_hostname))

        return db_vm_info

    # 'name'       : global_device['@name'],
    # 'hostname'   : global_device['hostname'],
    # 'serial'     : global_device['serial'],
    # 'ip-address' : global_device['ip-address'],
    # 'connected'  : global_device['connected'],
    # 'deactivated': global_device['deactivated']
    def create_db_entity(self, spoke, vm_details):

        vm = Entity()

        # PartitionKey is nothing but the spoke name
        vm.PartitionKey = spoke
        # RowKey is nothing but the VM name itself.
        vm.RowKey = vm_details['hostname']
        vm.name = vm_details['name']
        vm.serial_no = vm_details['serial']
        vm.ip_addr = vm_details['ip-address']
        vm.connected = vm_details['connected']
        vm.deactivated = vm_details['deactivated']
        vm.subs_id = self.subscription_id
        vm.delicensed_on = 'not applicable'
        vm.is_delicensed = 'No'
        try:
            self.table_service.insert_entity(self.vmss_table_name, vm)
            self.logger.info("VM %s with serial no. %s in db" %
                             (vm_details['hostname'], vm_details['serial']))
        except Exception as e:
            self.logger.info("Insert entry to db for %s failed with error %s" %
                             (vm_details['hostname'], e))
            return False
        return True

    def get_fw_vms_in_cosmos_db(self, spoke=None):

        if spoke:
            filter_str = "PartitionKey eq '%s'" % spoke
        else:
            filter_str = None

        db_vms_list = self.table_service.query_entities(self.vmss_table_name,
                                                        filter=filter_str)
        if spoke:
            db_hostname_list = [{'hostname': x.RowKey, 'serial': x.serial_no, 'name': x.name} \
                            for x in db_vms_list if x.PartitionKey == spoke]
            return db_hostname_list
        else:
            return db_vms_list

    def delete_vm_from_cosmos_db(self, spoke, vm_name):

        self.table_service.delete_entity(self.vmss_table_name, spoke, vm_name)
Пример #7
0
from azure.cosmosdb.table.models import Entity

tablename = 'tasktable'
task = {
    'PartitionKey': 'tasksSeattle',
    'RowKey': '001',
    'description': 'Take out the trash',
    'priority': 200
}

#Blob Storage - Table Service
print("Demo / Blob Storage - Table Service ... Start")
Blob_table_service = TableService(account_name='BlobStorageAccountName',
                                  account_key='BlobStorageAccountKey')
Blob_table_service.create_table(tablename)
Blob_table_service.insert_entity(tablename, task)
# get_table_service_stats()
# Retrieves statistics related to replication for the Table service.
# IMPORTANT NOTE:
# It is only available when read-access geo-redundant replication is enabled for the storage account.
# ref: https://docs.microsoft.com/en-us/python/api/azure-cosmosdb-table/azure.cosmosdb.table.tableservice.tableservice?view=azure-python#get-table-service-stats-timeout-none-
Blob_table_service.get_table_service_stats()
print("Demo / Blob Storage - Table Service ... End")

#Cosmos DB - Table API
print("Demo / Cosmos DB - Table API ... Start")
connstring = 'DefaultEndpointsProtocol=https;AccountName=CosmosDBAccountName;AccountKey=CosmosDBAccountKey;TableEndpoint=https://CosmosDBAccountName.table.cosmos.azure.com:443/;'
Cosmos_table_service = TableService(endpoint_suffix="table.cosmos.azure.com",
                                    connection_string=connstring)
Cosmos_table_service.create_table(tablename)
Cosmos_table_service.insert_entity(tablename, task)