def table_sas(self):
        table_name = self._create_table()
        entity = {
            'PartitionKey': 'test',
            'RowKey': 'test1',
            'text': 'hello world',
        }
        self.service.insert_entity(table_name, entity)

        # Access only to the entities in the given table
        # Query permissions to access entities
        # Expires in an hour
        token = self.service.generate_table_shared_access_signature(
            table_name,
            TablePermissions.QUERY,
            datetime.utcnow() + timedelta(hours=1),
        )

        # Create a service and use the SAS
        sas_service = TableService(
            account_name=self.account.account_name,
            sas_token=token,
        )

        entities = sas_service.query_entities(table_name)
        for entity in entities:
            print(entity.text)  # hello world

        self.service.delete_table(table_name)
    def account_sas(self):
        table_name = self._create_table()
        entity = {
            'PartitionKey': 'test',
            'RowKey': 'test1',
            'text': 'hello world',
        }
        self.service.insert_entity(table_name, entity)

        # Access to all entities in all the tables
        # Expires in an hour
        token = self.service.generate_account_shared_access_signature(
            ResourceTypes.OBJECT,
            AccountPermissions.READ,
            datetime.utcnow() + timedelta(hours=1),
        )

        # Create a service and use the SAS
        sas_service = TableService(
            account_name=self.account.account_name,
            sas_token=token,
        )

        entities = list(sas_service.query_entities(table_name))
        for entity in entities:
            print(entity.text)  # hello world

        self.service.delete_table(table_name)
Пример #3
0
    def test_logging(self):
        # key to identify this session
        test_id = str(uuid4())

        # setup logger
        logger = logging.getLogger(__name__)
        logger.setLevel(logging.DEBUG)

        # setup handler and add to logger
        handler = AzureTableStorageHandler(
            account_name=account_name,
            account_key=account_key,
            table_name=table_name
        )
        logger.addHandler(handler)

        # write logs
        logger.debug(f"DEBUG: {test_id}")
        logger.info(f"INFO: {test_id}")
        logger.warning(f"WARNING: {test_id}")
        logger.error(f"ERROR: {test_id}")
        logger.critical(f"CRITICAL: {test_id}")

        # get log messages
        ts = TableService(account_name=account_name, account_key=account_key)
        for ent in ts.query_entities(table_name=table_name, filter="PartitionKey eq '__main__'"):
            self.assertEqual(ent["LevelName"] + ": " + test_id, ent["Message"])
    def sas_with_signed_identifiers(self):
        table_name = self._create_table()
        entity = {
            'PartitionKey': 'test',
            'RowKey': 'test1',
            'text': 'hello world',
        }
        self.service.insert_entity(table_name, entity)

        # Set access policy on table
        access_policy = AccessPolicy(permission=TablePermissions.QUERY,
                                     expiry=datetime.utcnow() +
                                     timedelta(hours=1))
        identifiers = {'id': access_policy}
        acl = self.service.set_table_acl(table_name, identifiers)

        # Wait 30 seconds for acl to propagate
        time.sleep(30)

        # Indicates to use the access policy set on the table
        token = self.service.generate_table_shared_access_signature(table_name,
                                                                    id='id')

        # Create a service and use the SAS
        sas_service = TableService(
            account_name=self.account.account_name,
            sas_token=token,
        )

        entities = list(sas_service.query_entities(table_name))
        for entity in entities:
            print(entity.text)  # hello world

        self.service.delete_table(table_name)
Пример #5
0
    def test_sas_upper_case_table_name(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        entity = self._insert_random_entity()

        # Table names are case insensitive, so simply upper case our existing table name to test
        token = self.ts.generate_table_shared_access_signature(
            self.table_name.upper(),
            TablePermissions.QUERY,
            datetime.utcnow() + timedelta(hours=1),
            datetime.utcnow() - timedelta(minutes=1),
        )

        # Act
        service = TableService(
            account_name=self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
        )
        self._set_test_proxy(service, self.settings)
        entities = list(
            service.query_entities(self.table_name,
                                   filter="PartitionKey eq '{}'".format(
                                       entity['PartitionKey'])))

        # Assert
        self.assertEqual(len(entities), 1)
        self._assert_default_entity(entities[0])
Пример #6
0
class AzureTableConnection:
    def __init__(self, tableName):
        self.tableName = tableName
        self.tableService = TableService(
            account_name=os.environ['STORAGEACCOUNTNAME'],
            account_key=os.environ['STORAGEACCOUNTKEY'])

    def commitBatch(self, batch):
        self.tableService.commit_batch(self.tableName, batch)

    def getData(self, partitionKey, rowKey):
        startRowKey = '{0}_0'.format(rowKey)
        endRowKey = '{0}_9999'.format(rowKey)
        filterExpression = "PartitionKey eq '{0}' and \
                        RowKey gt '{1}' and \
                        RowKey lt '{2}'"     \
                            .format(partitionKey, startRowKey, endRowKey)
        return self.tableService.query_entities(self.tableName,
                                                filter=filterExpression)
Пример #7
0
def _unmerge_resources(table_client: azuretable.TableService) -> None:
    """Remove node from the image table
    :param azuretable.TableService table_client: table client
    """
    logger.debug(
        'removing node {} from the image table for container mode {}'.format(
            _NODEID, _CONTAINER_MODE.name.lower()))
    try:
        entities = table_client.query_entities(
            _STORAGE_CONTAINERS['table_images'],
            filter='PartitionKey eq \'{}\''.format(_PARTITION_KEY))
    except azure.common.AzureMissingResourceHttpError:
        entities = []
    mode_prefix = _CONTAINER_MODE.name.lower() + ':'
    for entity in entities:
        if entity['Resource'].startswith(mode_prefix):
            _unmerge_resource(table_client, entity)
    logger.info(
        'node {} removed from the image table for container mode {}'.format(
            _NODEID, _CONTAINER_MODE.name.lower()))
Пример #8
0
def distribute_global_resources(loop: asyncio.BaseEventLoop,
                                blob_client: azureblob.BlockBlobService,
                                table_client: azuretable.TableService) -> None:
    """Distribute global services/resources
    :param asyncio.BaseEventLoop loop: event loop
    :param azureblob.BlockBlobService blob_client: blob client
    :param azuretable.TableService table_client: table client
    """
    # remove node from the image table because cascade relies on it to know
    # when its work is done
    _unmerge_resources(table_client)
    # get globalresources from table
    try:
        entities = table_client.query_entities(
            _STORAGE_CONTAINERS['table_globalresources'],
            filter='PartitionKey eq \'{}\''.format(_PARTITION_KEY))
    except azure.common.AzureMissingResourceHttpError:
        entities = []
    nentities = 0
    for ent in entities:
        resource = ent['Resource']
        grtype, image = get_container_image_name_from_resource(resource)
        if grtype == _CONTAINER_MODE.name.lower():
            nentities += 1
            _DIRECTDL_QUEUE.put(resource)
            key_fingerprint = ent.get('KeyFingerprint', None)
            if key_fingerprint is not None:
                _DIRECTDL_KEY_FINGERPRINT_DICT[image] = key_fingerprint
        else:
            logger.info('skipping resource {}:'.format(resource) +
                        'not matching container mode "{}"'.format(
                            _CONTAINER_MODE.name.lower()))
    if nentities == 0:
        logger.info('no global resources specified')
        return
    logger.info('{} global resources matching container mode "{}"'.format(
        nentities, _CONTAINER_MODE.name.lower()))
    # run async func in loop
    loop.run_until_complete(
        download_monitor_async(loop, blob_client, table_client, nentities))
Пример #9
0
class AzureStorage():

    def __init__(self, container=None):
        self.AZURE_STORAGE_ACCOUNT = 'logodetectionstorage'
        self.AZURE_STORAGE_KEY  = 'jPJyzct+8WD1lKU5M+ZwDflWUGRu+YBpH8n/3Z6qR7WD7uc3HV2U1rtiQKesLRq2tU3jtXIe26RklAYdKzoydA=='
        self.table_service = TableService(account_name=self.AZURE_STORAGE_ACCOUNT, account_key=self.AZURE_STORAGE_KEY)
        self.blob_service = BlockBlobService(account_name=self. AZURE_STORAGE_ACCOUNT, account_key=self.AZURE_STORAGE_KEY)
        self.container = "input"
        self.table_list = [] #everything in the table for this logo
        self.logo = ""

    def query(self, tableName, partitionKey, rowKey):
        task = self.table_service.get_entity(tableName, partitionKey, rowKey)
        return task

    def retrieve_table(self, tableName):
        #tasks = table_service.query_entities(tableName, filter="PartitionKey eq 'tasksSeattle'", select='description')
        try:
            tasks = self.table_service.query_entities(tableName)
        except:
            return None
        self.logo = tableName
        for task in tasks:
            self.table_list.append(task)
        self.table_list = sorted(self.table_list, key=lambda k: k['has_logo'], reverse=True) 
        return self.table_list
    
    def download_blob(self, path, logoName):
        #download pic into logoName file
        path = "images/" + logoName 

        self.blob_service.get_blob_to_path(self.container, path, "test.jpeg")

    def exists(self, name):
        try:
            self.blob_service.get_blob_properties(self.container, name)
            return True
        except:
            return False
Пример #10
0
    def test_account_sas(self):
        # SAS URL is calculated from storage key, so this test runs live only
        if TestMode.need_recording_file(self.test_mode):
            return

        # Arrange
        table_name = self._create_table()
        entity = {
            'PartitionKey': 'test',
            'RowKey': 'test1',
            'text': 'hello',
        }
        self.ts.insert_entity(table_name, entity)

        entity['RowKey'] = 'test2'
        self.ts.insert_entity(table_name, entity)

        token = self.ts.generate_account_shared_access_signature(
            ResourceTypes.OBJECT,
            AccountPermissions.READ,
            datetime.utcnow() + timedelta(hours=1),
            datetime.utcnow() - timedelta(minutes=1),
        )

        # Act
        service = TableService(
            account_name=self.settings.STORAGE_ACCOUNT_NAME,
            sas_token=token,
        )
        self._set_test_proxy(service, self.settings)
        entities = list(service.query_entities(table_name))

        # Assert
        self.assertEqual(len(entities), 2)
        self.assertEqual(entities[0].text, 'hello')
        self.assertEqual(entities[1].text, 'hello')
Пример #11
0
def coalesce_data(table_client: azuretable.TableService) -> tuple:
    """Coalesce perf data from table
    :param azure.cosmosdb.table.TableService table_client: table client
    :rtype: tuple
    :return: (timing, sizes, offer, sku)
    """
    print('graphing data from {} with pk={}'.format(_TABLE_NAME,
                                                    _PARTITION_KEY))
    entities = table_client.query_entities(
        _TABLE_NAME, filter='PartitionKey eq \'{}\''.format(_PARTITION_KEY))
    data = {}
    # process events
    for ent in entities:
        nodeid = ent['NodeId']
        event = ent['Event']
        if nodeid not in data:
            data[nodeid] = {}
        if event not in data[nodeid]:
            data[nodeid][event] = []
        ev = {
            'timestamp': datetime.datetime.fromtimestamp(float(ent['RowKey'])),
        }
        try:
            ev['message'] = _parse_message(event, ent['Message'])
        except KeyError:
            ev['message'] = None
        data[nodeid][event].append(ev)
    del entities
    sizes = {}
    offer = None
    sku = None
    for nodeid in data:
        if offer is None:
            offer = data[nodeid]['nodeprep:start'][0]['message']['offer']
            sku = data[nodeid]['nodeprep:start'][0]['message']['sku']
        # calculate dt timings
        timing = {
            'nodeprep':
            _compute_delta_t(data, nodeid, 'nodeprep:start', 0, 'nodeprep:end',
                             0),
            'global_resources_loaded':
            _compute_delta_t(data, nodeid, 'cascade:start', 0,
                             'cascade:gr-done', 0),
        }
        try:
            timing['docker_install'] = _compute_delta_t(
                data, nodeid, 'nodeprep:start', 0, 'privateregistry:start', 0)
        except KeyError:
            # when no private registry setup exists, install time is
            # equivalent to nodeprep time
            timing['docker_install'] = timing['nodeprep']
        try:
            timing['private_registry_setup'] = _compute_delta_t(
                data, nodeid, 'privateregistry:start', 0,
                'privateregistry:end', 0)
        except KeyError:
            timing['private_registry_setup'] = 0
        try:
            timing['docker_shipyard_container_pull'] = _compute_delta_t(
                data, nodeid, 'shipyard:pull-start', 0, 'shipyard:pull-end', 0)
        except KeyError:
            timing['docker_shipyard_container_pull'] = 0
        data[nodeid]['start'] = data[nodeid]['nodeprep:start'][0][
            'timestamp'].timestamp()
        data[nodeid].pop('nodeprep:start')
        data[nodeid].pop('nodeprep:end')
        data[nodeid].pop('privateregistry:start', None)
        data[nodeid].pop('privateregistry:end', None)
        data[nodeid].pop('shipyard:pull-start', None)
        data[nodeid].pop('shipyard:pull-end', None)
        data[nodeid].pop('cascade:start')
        data[nodeid].pop('cascade:gr-done')
        for event in data[nodeid]:
            # print(event, data[nodeid][event])
            if event == 'cascade:pull-start':
                _diff_events(data, nodeid, event, 'cascade:pull-end', timing,
                             'pull:', sizes)
            elif event == 'cascade:save-start':
                _diff_events(data, nodeid, event, 'cascade:save-end', timing,
                             'save:', sizes)
            elif event == 'cascade:torrent-start':
                _diff_events(data, nodeid, event, 'cascade:load-start', timing,
                             'torrent:')
            elif event == 'cascade:load-start':
                _diff_events(data, nodeid, event, 'cascade:load-end', timing,
                             'load:', sizes)
        data[nodeid].pop('cascade:pull-start', None)
        data[nodeid].pop('cascade:pull-end', None)
        data[nodeid].pop('cascade:save-start', None)
        data[nodeid].pop('cascade:save-end', None)
        data[nodeid].pop('cascade:torrent-start', None)
        data[nodeid].pop('cascade:load-start', None)
        data[nodeid].pop('cascade:load-end', None)
        data[nodeid]['timing'] = timing
    return data, sizes, offer, sku
Пример #12
0
class EventRepository:
    events_by_date_table = "eventsByDate"
    event_duplicates_table = "eventDuplicates"

    def __init__(self, connection_string=None):
        if not connection_string:
            connection_string = "AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;DefaultEndpointsProtocol=http;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;QueueEndpoint=http://127.0.0.1:10001/devstoreaccount1;TableEndpoint=http://127.0.0.1:10002/devstoreaccount1;"
        self.table_client = TableService(connection_string=connection_string,
                                         is_emulated=True)

    def list_events_by_date(self, dt: datetime.date) -> List[dict]:
        pk = self._date_to_pk(dt)
        for event in self.table_client.query_entities(
                self.events_by_date_table,
                filter="PartitionKey eq '%s'" % (pk, )):
            if 'place' in event:
                event['place'] = json.loads(event['place'])
            if 'dates' in event:
                event['dates'] = json.loads(event['dates'])
            if 'raw_dates' in event:
                event['raw_dates'] = event['raw_dates'].split('\n')
            if 'tags' in event:
                event['tags'] = event['tags'].split(',')
            if 'type' in event:
                event['type'] = event['type'].split(',')
            if 'cost' in event:
                event['cost'] = event['cost'].split(',')
            yield event

    def remove_rows(self, dt, row_keys):
        pk = self._date_to_pk(dt)
        for key in row_keys:
            self.table_client.delete_entity(self.events_by_date_table, pk, key)

    def save_events_by_date(self,
                            events: List[dict],
                            dt: datetime.date,
                            table_name=events_by_date_table):
        partition_keys = set()
        for event in events:
            if 'PartitionKey' not in event:
                if dt:
                    event['PartitionKey'] = self._date_to_pk(dt)
                else:
                    event['PartitionKey'] = str(datetime.date.today().year)

            if 'RowKey' not in event:
                full_text = event['title'] + "\n" + event[
                    'short_description'] + "\n" + event['description']
                event['RowKey'] = str(hash(full_text))

            event['place'] = json.dumps(event['place'], ensure_ascii=False)
            event['dates'] = json.dumps(event['dates'])
            event['tags'] = ",".join(event['tags'])
            if 'type' in event:
                event['type'] = ",".join(event['type'])
            if "raw_dates" in event:
                event['raw_dates'] = "\n".join(event['raw_dates'])
            if 'cost' in event and event['cost']:
                event['cost'] = ",".join(str(c) for c in event['cost'])
            else:
                event['cost'] = None
            self.table_client.insert_or_replace_entity(table_name, event)
            partition_keys.add(event['PartitionKey'])

        for pk in partition_keys:
            self.table_client.insert_or_replace_entity(table_name, {
                "PartitionKey": "PARTITIONS",
                "RowKey": pk
            })

    def save_events_json(self, events: List[dict]):
        grouped_events = group_by_dates(events)
        for dt, events in grouped_events.items():
            self.save_events_by_date(events, dt)

    def save_verified_events(self, events: List[Event]):
        pk = datetime.datetime.now().timestamp() % 255
        for event in events:
            event_description = event.to_str()
            event_hash = hash(event_description)
            self.table_client.insert_or_replace_entity(
                "verifiedEvents", {
                    "PartitionKey": str(pk),
                    "RowKey": str(event_hash),
                    "Text": event_description,
                    "Labels": ",".join(event.event_tags)
                })

    @staticmethod
    def _date_to_pk(dt: datetime.date):
        return "%d_%d_%d" % (dt.year, dt.month, dt.day)
Пример #13
0
def _merge_resource(table_client: azuretable.TableService, resource: str,
                    nglobalresources: int) -> None:
    """Merge resource to the image table
    :param azuretable.TableService table_client: table client
    :param str resource: resource to add to the image table
    :param int nglobalresources: number of global resources
    """
    # merge resource to the image table
    entity = {
        'PartitionKey': _PARTITION_KEY,
        'RowKey': compute_resource_hash(resource),
        'Resource': resource,
        'VmList0': _NODEID,
    }
    logger.debug('merging entity {} to the image table'.format(entity))
    try:
        table_client.insert_entity(_STORAGE_CONTAINERS['table_images'],
                                   entity=entity)
    except azure.common.AzureConflictHttpError:
        while True:
            entity = table_client.get_entity(
                _STORAGE_CONTAINERS['table_images'], entity['PartitionKey'],
                entity['RowKey'])
            # merge VmList into entity
            evms = []
            for i in range(0, _MAX_VMLIST_PROPERTIES):
                prop = 'VmList{}'.format(i)
                if prop in entity:
                    evms.extend(entity[prop].split(','))
            if _NODEID in evms:
                break
            evms.append(_NODEID)
            for i in range(0, _MAX_VMLIST_PROPERTIES):
                prop = 'VmList{}'.format(i)
                start = i * _MAX_VMLIST_IDS_PER_PROPERTY
                end = start + _MAX_VMLIST_IDS_PER_PROPERTY
                if end > len(evms):
                    end = len(evms)
                if start < end:
                    entity[prop] = ','.join(evms[start:end])
                else:
                    entity[prop] = None
            etag = entity['etag']
            entity.pop('etag')
            try:
                table_client.merge_entity(_STORAGE_CONTAINERS['table_images'],
                                          entity=entity,
                                          if_match=etag)
                break
            except azure.common.AzureHttpError as ex:
                if ex.status_code != 412:
                    raise
    logger.info('entity {} merged to the image table'.format(entity))
    global _GR_DONE
    if not _GR_DONE:
        try:
            entities = table_client.query_entities(
                _STORAGE_CONTAINERS['table_images'],
                filter='PartitionKey eq \'{}\''.format(_PARTITION_KEY))
        except azure.common.AzureMissingResourceHttpError:
            entities = []
        count = 0
        for entity in entities:
            for i in range(0, _MAX_VMLIST_PROPERTIES):
                prop = 'VmList{}'.format(i)
                mode_prefix = _CONTAINER_MODE.name.lower() + ':'
                if (prop in entity and _NODEID in entity[prop]
                        and entity['Resource'].startswith(mode_prefix)):
                    count += 1
        if count == nglobalresources:
            _record_perf('gr-done',
                         'nglobalresources={}'.format(nglobalresources))
            _GR_DONE = True
            logger.info(
                'all {} global resources of container mode "{}" loaded'.format(
                    nglobalresources, _CONTAINER_MODE.name.lower()))
        else:
            logger.info(
                '{}/{} global resources of container mode "{}" loaded'.format(
                    count, nglobalresources, _CONTAINER_MODE.name.lower()))
class Azure:
    # Tags used
    RG_RULE_PROGRAMMED_TAG = 'PANORAMA_PROGRAMMED'
    HUB_MANAGED_TAG = 'PanoramaManaged'

    # Resource types
    VMSS_TYPE = 'Microsoft.Compute/virtualMachineScaleSets'
    ILB_TYPE = 'Microsoft.Network/loadBalancers'
    APPINSIGHTS_TYPE = 'Microsoft.Insights/components'

    # Hardcoded names used for internal Azure resources
    ILB_NAME = 'myPrivateLB'
    ALPHANUM = r'[^A-Za-z0-9]+'

    def __init__(self,
                 cred,
                 subs_id,
                 hub,
                 vmss_rg_name,
                 vmss_name,
                 storage,
                 pan_handle,
                 logger=None):
        self.credentials = cred
        self.subscription_id = subs_id
        self.logger = logger
        self.hub_name = hub
        self.storage_name = storage
        self.panorama_handler = pan_handle
        self.vmss_table_name = re.sub(self.ALPHANUM, '',
                                      vmss_name + 'vmsstable')
        self.vmss_rg_name = vmss_rg_name

        try:
            self.resource_client = ResourceManagementClient(cred, subs_id)
            self.compute_client = ComputeManagementClient(cred, subs_id)
            self.network_client = NetworkManagementClient(cred, subs_id)
            self.store_client = StorageManagementClient(cred, subs_id)
            store_keys = self.store_client.storage_accounts.list_keys(
                hub, storage).keys[0].value
            self.table_service = TableService(account_name=storage,
                                              account_key=store_keys)
        except Exception as e:
            self.logger.error("Getting Azure Infra handlers failed %s" %
                              str(e))
            raise e

        # Start -> List out all RGs and identify new spokes to mark them with tags.
        # Look for Resource Groups (RGs) which do not have tags or does not have a
        # a tag named "PANORAMA_PROGRAMMED".
        # potential_new_spokes = [x.name for x in self.resource_client.resource_groups.list()\
        #                  if not x.tags or not x.tags.get(self.RG_RULE_PROGRAMMED_TAG, None)]

        # If the RG has a VMSS which has a tag named "PanoramaManaged" with a value
        # as Hub Resource Group name then we know that this is a new spoke that is
        # launched managed by the Hub and not yet programmed for NAT/Azure Instrumentation
        # key.
        # for rg in potential_new_spokes:
        #     fw_vm_list = [x for x in self.resource_client.resources.list_by_resource_group(rg)
        #                   if x.type == self.VMSS_TYPE and self.filter_vmss(rg, x.name)]
        #     if fw_vm_list:
        #         rg_params = {'location': self.resource_client.resource_groups.get(rg).location}
        #         rg_params.update(tags={
        #                                  self.RG_RULE_PROGRAMMED_TAG : 'No',
        #                                  self.HUB_MANAGED_TAG        : self.hub_name
        #                               })
        #         self.resource_client.resource_groups.create_or_update(rg, rg_params)
        #         self.logger.info("RG %s marked as a spoke managed by this hub %s" % (rg, self.hub_name))
        # End -> List out all RGs and identify new spokes to mark them with tags.

        # Populate the list of spokes managed by this Azure hub.
        rg_list = self.resource_client.resource_groups.list()
        self.managed_spokes = []
        self.managed_spokes.append(vmss_rg_name)
        self.new_spokes = []
        # for rg in rg_list:
        #     if rg.tags and rg.tags.get(self.HUB_MANAGED_TAG, None) == self.hub_name:
        #         self.managed_spokes.append(rg.name)
        #         if rg.tags.get(self.RG_RULE_PROGRAMMED_TAG, 'Yes') == 'No':
        #             self.new_spokes.append(rg.name)
        # self.logger.debug('%s identified as spokes managed by %s' % (self.managed_spokes, self.hub_name))
        # if self.new_spokes:
        #     self.logger.info('%s identified as new spokes to be programmed by %s' % (self.new_spokes, self.hub_name))
        #
        #

    def filter_vmss(self, spoke, vmss_name):
        vmss = self.compute_client.virtual_machine_scale_sets.get(
            spoke, vmss_name)
        if vmss.tags and vmss.tags.get(self.HUB_MANAGED_TAG,
                                       None) == self.hub_name:
            return True
        return False

    def get_ilb_ip(self, spoke):
        for resource in self.resource_client.resources.list_by_resource_group(
                spoke):
            # Get the ILB IP Address from the spoke. The ILB address is always
            # hardcoded to be myPrivateILB.
            if resource.name == self.ILB_NAME and resource.type == self.ILB_TYPE:
                ilb_obj = self.network_client.load_balancers.get(
                    spoke, resource.name)
                ilb_frontend_cfg = ilb_obj.frontend_ip_configurations
                try:
                    ilb_private_ip = ilb_frontend_cfg[0].private_ip_address
                except IndexError as e:
                    self.logger.info("ILB is not setup yet in RG %s." % spoke)
                    return None
                return ilb_private_ip
        return None

    def get_appinsights_instr_key(self, spoke):
        for resource in self.resource_client.resources.list_by_resource_group(
                spoke):
            # Get the Appinsights instance where the custom metrics are being
            # published.
            if resource.type == self.APPINSIGHTS_TYPE and 'appinsights' in resource.name:
                appinsights_obj = self.resource_client.resources.get_by_id(
                    resource.id, '2014-04-01')
                instr_key = appinsights_obj.properties.get(
                    'InstrumentationKey', '')
                if not instr_key:
                    self.logger.info("InstrKey is not setup yet in %s." %
                                     spoke)
                    return None
                return instr_key
        return None

    def set_spoke_as_programmed(self, spoke):
        spoke_params = {
            'location':
            self.resource_client.resource_groups.get(spoke).location
        }
        spoke_tags = self.resource_client.resource_groups.get(spoke).tags
        spoke_tags[self.RG_RULE_PROGRAMMED_TAG] = 'Yes'
        spoke_params.update(tags=spoke_tags)
        self.resource_client.resource_groups.create_or_update(
            spoke, spoke_params)
        self.logger.info(
            "RG %s marked as programmed and spoke managed by this hub %s" %
            (spoke, self.hub_name))

    def create_worker_ready_tag(self, worker_name):
        self.compute_client.virtual_machines.create_or_update(
            self.vmss_rg, worker_name, {
                'location':
                self.resource_client.resource_groups.get(
                    self.vmss_rg).location,
                'tags': {
                    'WORKER_READY': 'Yes'
                }
            })

    def create_new_cosmos_table(self, table_name):
        # Create the Cosmos DB if it does not exist already
        if not self.table_service.exists(table_name):
            try:
                ok = self.table_service.create_table(table_name)
                if not ok:
                    self.logger.error('Creating VMSS table failed')
                    return False
                self.logger.info('VMSS Table %s created succesfully' %
                                 table_name)
            except Exception as e:
                self.logger.error('Creating VMSS table failed ' + str(e))
                return False
        return True

    def clear_cosmos_table(self, table_name):
        self.table_service.delete_table(table_name)

    def get_vmss_by_name(self, spoke, vmss_name):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and x.name == vmss_name
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
        return None

    def get_vmss_in_spoke(self, spoke):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and self.filter_vmss(spoke, x.name)
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
            return None

    def get_vms_in_vmss(self, spoke, vmss_name):

        return self.compute_client.virtual_machine_scale_set_vms.list(
            spoke, vmss_name)

    def get_vm_in_cosmos_db(self, spoke, vm_hostname):

        try:
            db_vm_info = self.table_service.get_entity(self.vmss_table_name,
                                                       spoke, vm_hostname)
        except AzureMissingResourceHttpError:
            self.logger.info("New VM %s found in spoke %s" %
                             (vm_hostname, spoke))
            return None
        except Exception as e:
            self.logger.error("Querying for %s failed" % vm_hostname)
            return None
        else:
            # IF possible update status TODO
            self.logger.debug("VM %s is available in VMSS, Pan and DB" %
                              (vm_hostname))

        return db_vm_info

    # 'name'       : global_device['@name'],
    # 'hostname'   : global_device['hostname'],
    # 'serial'     : global_device['serial'],
    # 'ip-address' : global_device['ip-address'],
    # 'connected'  : global_device['connected'],
    # 'deactivated': global_device['deactivated']
    def create_db_entity(self, spoke, vm_details):

        vm = Entity()

        # PartitionKey is nothing but the spoke name
        vm.PartitionKey = spoke
        # RowKey is nothing but the VM name itself.
        vm.RowKey = vm_details['hostname']
        vm.name = vm_details['name']
        vm.serial_no = vm_details['serial']
        vm.ip_addr = vm_details['ip-address']
        vm.connected = vm_details['connected']
        vm.deactivated = vm_details['deactivated']
        vm.subs_id = self.subscription_id
        vm.delicensed_on = 'not applicable'
        vm.is_delicensed = 'No'
        try:
            self.table_service.insert_entity(self.vmss_table_name, vm)
            self.logger.info("VM %s with serial no. %s in db" %
                             (vm_details['hostname'], vm_details['serial']))
        except Exception as e:
            self.logger.info("Insert entry to db for %s failed with error %s" %
                             (vm_details['hostname'], e))
            return False
        return True

    def get_fw_vms_in_cosmos_db(self, spoke=None):

        if spoke:
            filter_str = "PartitionKey eq '%s'" % spoke
        else:
            filter_str = None

        db_vms_list = self.table_service.query_entities(self.vmss_table_name,
                                                        filter=filter_str)
        if spoke:
            db_hostname_list = [{'hostname': x.RowKey, 'serial': x.serial_no, 'name': x.name} \
                            for x in db_vms_list if x.PartitionKey == spoke]
            return db_hostname_list
        else:
            return db_vms_list

    def delete_vm_from_cosmos_db(self, spoke, vm_name):

        self.table_service.delete_entity(self.vmss_table_name, spoke, vm_name)
Пример #15
0
class Azure:
    # Tags used
    RG_RULE_PROGRAMMED_TAG = 'PANORAMA_PROGRAMMED'
    HUB_MANAGED_TAG = 'PanoramaManaged'

    # Resource types
    VMSS_TYPE = 'Microsoft.Compute/virtualMachineScaleSets'
    ILB_TYPE = 'Microsoft.Network/loadBalancers'
    APPINSIGHTS_TYPE = 'Microsoft.Insights/components'

    # Hardcoded names used for internal Azure resources
    ILB_NAME = 'myPrivateLB'
    ALPHANUM = r'[^A-Za-z0-9]+'

    def __init__(self,
                 cred,
                 subs_id,
                 my_storage_rg,
                 vmss_rg_name,
                 vmss_name,
                 storage,
                 pan_handle,
                 logger=None):
        self.credentials = cred
        self.subscription_id = subs_id
        self.logger = logger
        self.hub_name = vmss_rg_name
        self.storage_name = storage
        self.panorama_handler = pan_handle
        self.vmss_table_name = re.sub(self.ALPHANUM, '',
                                      vmss_name + 'vmsstable')
        self.vmss_rg_name = vmss_rg_name

        try:
            self.resource_client = ResourceManagementClient(cred, subs_id)
            self.compute_client = ComputeManagementClient(cred, subs_id)
            self.network_client = NetworkManagementClient(cred, subs_id)
            self.store_client = StorageManagementClient(cred, subs_id)
            store_keys = self.store_client.storage_accounts.list_keys(
                my_storage_rg, storage).keys[0].value
            self.table_service = TableService(account_name=storage,
                                              account_key=store_keys)
        except Exception as e:
            self.logger.error("Getting Azure Infra handlers failed %s" %
                              str(e))
            raise e

        rg_list = self.resource_client.resource_groups.list()
        self.managed_spokes = []
        self.managed_spokes.append(vmss_rg_name)
        self.new_spokes = []

    def filter_vmss(self, spoke, vmss_name):
        vmss = self.compute_client.virtual_machine_scale_sets.get(
            spoke, vmss_name)
        if vmss.tags and vmss.tags.get(self.HUB_MANAGED_TAG,
                                       None) == self.hub_name:
            return True
        return False

    def create_worker_ready_tag(self, worker_name):
        self.compute_client.virtual_machines.create_or_update(
            self.vmss_rg, worker_name, {
                'location':
                self.resource_client.resource_groups.get(
                    self.vmss_rg).location,
                'tags': {
                    'WORKER_READY': 'Yes'
                }
            })

    def create_new_cosmos_table(self, table_name):
        # Create the Cosmos DB if it does not exist already
        if not self.table_service.exists(table_name):
            try:
                ok = self.table_service.create_table(table_name)
                if not ok:
                    self.logger.error('Creating VMSS table failed')
                    return False
                self.logger.info('VMSS Table %s created succesfully' %
                                 table_name)
            except Exception as e:
                self.logger.error('Creating VMSS table failed ' + str(e))
                return False
        return True

    def clear_cosmos_table(self, table_name):
        self.table_service.delete_table(table_name)

    def get_vmss_by_name(self, spoke, vmss_name):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and x.name == vmss_name
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
        return None

    def get_vmss_in_spoke(self, spoke):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and self.filter_vmss(spoke, x.name)
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
            return None

    def get_vms_in_vmss(self, spoke, vmss_name):

        return self.compute_client.virtual_machine_scale_set_vms.list(
            spoke, vmss_name)

    def get_vm_in_cosmos_db(self, spoke, vm_hostname):

        try:
            db_vm_info = self.table_service.get_entity(self.vmss_table_name,
                                                       spoke, vm_hostname)
        except AzureMissingResourceHttpError:
            self.logger.info("New VM %s found in spoke %s" %
                             (vm_hostname, spoke))
            return None
        except Exception as e:
            self.logger.error("Querying for %s failed" % vm_hostname)
            return None
        else:
            # IF possible update status TODO
            self.logger.debug("VM %s is available in VMSS, Pan and DB" %
                              (vm_hostname))

        return db_vm_info

    # 'name'       : global_device['@name'],
    # 'hostname'   : global_device['hostname'],
    # 'serial'     : global_device['serial'],
    # 'ip-address' : global_device['ip-address'],
    # 'connected'  : global_device['connected'],
    # 'deactivated': global_device['deactivated']
    def create_db_entity(self, spoke, vm_details):

        vm = Entity()

        # PartitionKey is nothing but the spoke name
        vm.PartitionKey = spoke
        # RowKey is nothing but the VM name itself.
        vm.RowKey = vm_details['hostname']
        vm.name = vm_details['name']
        vm.serial_no = vm_details['serial']
        vm.ip_addr = vm_details['ip-address']
        vm.connected = vm_details['connected']
        vm.deactivated = vm_details['deactivated']
        vm.subs_id = self.subscription_id
        vm.delicensed_on = 'not applicable'
        vm.is_delicensed = 'No'
        try:
            self.table_service.insert_entity(self.vmss_table_name, vm)
            self.logger.info("VM %s with serial no. %s in db" %
                             (vm_details['hostname'], vm_details['serial']))
        except Exception as e:
            self.logger.info("Insert entry to db for %s failed with error %s" %
                             (vm_details['hostname'], e))
            return False
        return True

    def get_fw_vms_in_cosmos_db(self, spoke=None):

        if spoke:
            filter_str = "PartitionKey eq '%s'" % spoke
        else:
            filter_str = None

        db_vms_list = self.table_service.query_entities(self.vmss_table_name,
                                                        filter=filter_str)
        if spoke:
            db_hostname_list = [{'hostname': x.RowKey, 'serial': x.serial_no, 'name': x.name} \
                            for x in db_vms_list if x.PartitionKey == spoke]
            return db_hostname_list
        else:
            return db_vms_list

    def delete_vm_from_cosmos_db(self, spoke, vm_name):

        self.table_service.delete_entity(self.vmss_table_name, spoke, vm_name)