示例#1
0
文件: table.py 项目: skymysky/onefuzz
def get_client(table: Optional[str] = None,
               account_id: Optional[str] = None) -> TableService:
    if account_id is None:
        account_id = os.environ["ONEFUZZ_FUNC_STORAGE"]

    logging.debug("getting table account: (account_id: %s)", account_id)
    name, key = get_storage_account_name_key(account_id)
    client = TableService(account_name=name, account_key=key)

    if table and not client.exists(table):
        logging.info("creating missing table %s", table)
        client.create_table(table, fail_on_exist=False)
    return client
示例#2
0
class azure_table:
    def __init__(self, table_name='HemoniDataTable'):
        connection_string = "**"

        self.table_client = TableService(connection_string=connection_string)
        self.table_name = table_name
        if self.table_client.exists(table_name):
            pass
        else:
            self.table_client.create_table(table_name=table_name)

    def delete_table(self):
        self.table_client.delete_table(table_name=self.table_name)

    def insert_entity(self, entity):
        """
        When inserting an entity into a table, you must specify values for the
        PartitionKey and RowKey system properties. Together, these properties
        form the primary key and must be unique within the table. Both the
        PartitionKey and RowKey values must be string values; each key value may
        be up to 64 KB in size. If you are using an integer value for the key
        value, you should convert the integer to a fixed-width string, because
        they are canonically sorted. For example, you should convert the value
        1 to 0000001 to ensure proper sorting.
        :param entity:The entity to insert. Could be a dict or an entity object.
            Must contain a PartitionKey and a RowKey.
        :return: null
        """
        self.table_client.insert_or_replace_entity(table_name=self.table_name,
                                                   entity=entity)

    def get_entity(self, partition, row):
        """
        Get an entity from the specified table. Throws if the entity does not exist.
        :param partition:  The PartitionKey of the entity.
        :param row: The RowKey of the entity.
        :return:
        """
        return self.table_client.get_entity(self.table_name,
                                            partition_key=partition,
                                            row_key=row)
示例#3
0
class AzureJobStore(AbstractJobStore):
    """
    A job store that uses Azure's blob store for file storage and Table Service to store job info
    with strong consistency.
    """

    # Dots in container names should be avoided because container names are used in HTTPS bucket
    # URLs where the may interfere with the certificate common name. We use a double underscore
    # as a separator instead.
    #
    containerNameRe = re.compile(r'^[a-z0-9][a-z0-9-]+[a-z0-9]$')

    # See https://msdn.microsoft.com/en-us/library/azure/dd135715.aspx
    #
    minContainerNameLen = 3
    maxContainerNameLen = 63
    maxNameLen = 10
    nameSeparator = 'xx'  # Table names must be alphanumeric
    # Length of a jobID - used to test if a stats file has been read already or not
    jobIDLength = len(str(uuid.uuid4()))

    def __init__(self, locator, jobChunkSize=maxAzureTablePropertySize):
        super(AzureJobStore, self).__init__()
        accountName, namePrefix = locator.split(':', 1)
        if '--' in namePrefix:
            raise ValueError("Invalid name prefix '%s'. Name prefixes may not contain %s."
                             % (namePrefix, self.nameSeparator))
        if not self.containerNameRe.match(namePrefix):
            raise ValueError("Invalid name prefix '%s'. Name prefixes must contain only digits, "
                             "hyphens or lower-case letters and must not start or end in a "
                             "hyphen." % namePrefix)
        # Reserve 13 for separator and suffix
        if len(namePrefix) > self.maxContainerNameLen - self.maxNameLen - len(self.nameSeparator):
            raise ValueError(("Invalid name prefix '%s'. Name prefixes may not be longer than 50 "
                              "characters." % namePrefix))
        if '--' in namePrefix:
            raise ValueError("Invalid name prefix '%s'. Name prefixes may not contain "
                             "%s." % (namePrefix, self.nameSeparator))
        self.locator = locator
        self.jobChunkSize = jobChunkSize
        self.accountKey = _fetchAzureAccountKey(accountName)
        self.accountName = accountName
        # Table names have strict requirements in Azure
        self.namePrefix = self._sanitizeTableName(namePrefix)
        # These are the main API entry points.
        self.tableService = TableService(account_key=self.accountKey, account_name=accountName)
        self.blobService = BlockBlobService(account_key=self.accountKey, account_name=accountName)
        # Serialized jobs table
        self.jobItems = None
        # Job<->file mapping table
        self.jobFileIDs = None
        # Container for all shared and unshared files
        self.files = None
        # Stats and logging strings
        self.statsFiles = None
        # File IDs that contain stats and logging strings
        self.statsFileIDs = None

    @property
    def keyPath(self):
        return self.config.cseKey

    def initialize(self, config):
        if self._jobStoreExists():
            raise JobStoreExistsException(self.locator)
        logger.debug("Creating job store at '%s'" % self.locator)
        self._bind(create=True)
        super(AzureJobStore, self).initialize(config)

    def resume(self):
        if not self._jobStoreExists():
            raise NoSuchJobStoreException(self.locator)
        logger.debug("Using existing job store at '%s'" % self.locator)
        self._bind(create=False)
        super(AzureJobStore, self).resume()

    def destroy(self):
        self._bind()
        for name in 'jobItems', 'jobFileIDs', 'files', 'statsFiles', 'statsFileIDs':
            resource = getattr(self, name)
            if resource is not None:
                if isinstance(resource, AzureTable):
                    resource.delete_table()
                elif isinstance(resource, AzureBlobContainer):
                    resource.delete_container()
                else:
                    assert False
                setattr(self, name, None)

    def _jobStoreExists(self):
        """
        Checks if job store exists by querying the existence of the statsFileIDs table. Note that
        this is the last component that is deleted in :meth:`.destroy`.
        """
        for attempt in retry_azure():
            with attempt:
                try:
                    exists = self.tableService.exists(table_name=self._qualify('statsFileIDs'))
                except AzureMissingResourceHttpError as e:
                    if e.status_code == 404:
                        return False
                    else:
                        raise
                else:
                    return exists

    def _bind(self, create=False):
        table = self._bindTable
        container = self._bindContainer
        for name, binder in (('jobItems', table),
                             ('jobFileIDs', table),
                             ('files', container),
                             ('statsFiles', container),
                             ('statsFileIDs', table)):
            if getattr(self, name) is None:
                setattr(self, name, binder(self._qualify(name), create=create))

    def _qualify(self, name):
        return self.namePrefix + self.nameSeparator + name.lower()

    def jobs(self):

        # How many jobs have we done?
        total_processed = 0

        for jobEntity in self.jobItems.query_entities():
            # Process the items in the page
            yield AzureJob.fromEntity(jobEntity)
            total_processed += 1

            if total_processed % 1000 == 0:
                # Produce some feedback for the user, because this can take
                # a long time on, for example, Azure
                logger.debug("Processed %d total jobs" % total_processed)

        logger.debug("Processed %d total jobs" % total_processed)

    def create(self, jobNode):
        jobStoreID = self._newJobID()
        job = AzureJob.fromJobNode(jobNode, jobStoreID, self._defaultTryCount())
        entity = job.toEntity(chunkSize=self.jobChunkSize)
        self.jobItems.insert_entity(entity=entity)
        return job

    def exists(self, jobStoreID):
        if self.jobItems.get_entity(row_key=str(jobStoreID)) is None:
            return False
        return True

    def load(self, jobStoreID):
        jobEntity = self.jobItems.get_entity(row_key=str(jobStoreID))
        if jobEntity is None:
            raise NoSuchJobException(jobStoreID)
        return AzureJob.fromEntity(jobEntity)

    def update(self, job):
        self.jobItems.update_entity(entity=job.toEntity(chunkSize=self.jobChunkSize))

    def delete(self, jobStoreID):
        try:
            self.jobItems.delete_entity(row_key=str(jobStoreID))
        except AzureMissingResourceHttpError:
            # Job deletion is idempotent, and this job has been deleted already
            return
        filterString = "PartitionKey eq '%s'" % jobStoreID
        for fileEntity in self.jobFileIDs.query_entities(filter=filterString):
            jobStoreFileID = fileEntity.RowKey
            self.deleteFile(jobStoreFileID)

    def getEnv(self):
        return dict(AZURE_ACCOUNT_KEY=self.accountKey)

    class BlobInfo(namedtuple('BlobInfo', ('account', 'container', 'name'))):
        @property
        @memoize
        def service(self):
            return BlockBlobService(account_name=self.account,
                                    account_key=_fetchAzureAccountKey(self.account))

    @classmethod
    def getSize(cls, url):
        blob = cls._parseWasbUrl(url)
        blob = blob.service.get_blob_properties(blob.container, blob.name)
        return blob.properties.content_length

    @classmethod
    def _readFromUrl(cls, url, writable):
        blob = cls._parseWasbUrl(url)
        for attempt in retry_azure():
            with attempt:
                blob.service.get_blob_to_stream(container_name=blob.container,
                                                blob_name=blob.name,
                                                stream=writable)

    @classmethod
    def _writeToUrl(cls, readable, url):
        blob = cls._parseWasbUrl(url)
        blob.service.create_blob_from_stream(container_name=blob.container,
                                             blob_name=blob.name,
                                             max_connections=1,
                                             stream=readable)

    @classmethod
    def _parseWasbUrl(cls, url):
        """
        :param urlparse.ParseResult url: x
        :rtype: AzureJobStore.BlobInfo
        """
        assert url.scheme in ('wasb', 'wasbs')
        try:
            container, account = url.netloc.split('@')
        except ValueError:
            raise InvalidImportExportUrlException(url)
        suffix = '.blob.core.windows.net'
        if account.endswith(suffix):
            account = account[:-len(suffix)]
        else:
            raise InvalidImportExportUrlException(url)
        assert url.path[0] == '/'
        return cls.BlobInfo(account=account, container=container, name=url.path[1:])

    @classmethod
    def _supportsUrl(cls, url, export=False):
        return url.scheme.lower() in ('wasb', 'wasbs')

    def writeFile(self, localFilePath, jobStoreID=None):
        jobStoreFileID = self._newFileID()
        self.updateFile(jobStoreFileID, localFilePath)
        self._associateFileWithJob(jobStoreFileID, jobStoreID)
        return jobStoreFileID

    def updateFile(self, jobStoreFileID, localFilePath):
        with open(localFilePath, 'rb') as read_fd:
            with self._uploadStream(jobStoreFileID, self.files) as write_fd:
                while True:
                    buf = read_fd.read(self._maxAzureBlockBytes)
                    write_fd.write(buf)
                    if len(buf) == 0:
                        break

    def readFile(self, jobStoreFileID, localFilePath, symlink=False):
        try:
            with self._downloadStream(jobStoreFileID, self.files) as read_fd:
                with open(localFilePath, 'wb') as write_fd:
                    while True:
                        buf = read_fd.read(self._maxAzureBlockBytes)
                        write_fd.write(buf)
                        if not buf:
                            break
        except AzureMissingResourceHttpError:
            raise NoSuchFileException(jobStoreFileID)

    def deleteFile(self, jobStoreFileID):
        try:
            self.files.delete_blob(blob_name=str(jobStoreFileID))
            self._dissociateFileFromJob(jobStoreFileID)
        except AzureMissingResourceHttpError:
            pass

    def fileExists(self, jobStoreFileID):
        # As Azure doesn't have a blob_exists method (at least in the
        # python API) we just try to download the metadata, and hope
        # the metadata is small so the call will be fast.
        try:
            self.files.get_blob_metadata(blob_name=str(jobStoreFileID))
            return True
        except AzureMissingResourceHttpError:
            return False

    @contextmanager
    def writeFileStream(self, jobStoreID=None):
        # TODO: this (and all stream methods) should probably use the
        # Append Blob type, but that is not currently supported by the
        # Azure Python API.
        jobStoreFileID = self._newFileID()
        with self._uploadStream(jobStoreFileID, self.files) as fd:
            yield fd, jobStoreFileID
        self._associateFileWithJob(jobStoreFileID, jobStoreID)

    @contextmanager
    def updateFileStream(self, jobStoreFileID):
        with self._uploadStream(jobStoreFileID, self.files, checkForModification=True) as fd:
            yield fd

    def getEmptyFileStoreID(self, jobStoreID=None):
        jobStoreFileID = self._newFileID()
        with self._uploadStream(jobStoreFileID, self.files) as _:
            pass
        self._associateFileWithJob(jobStoreFileID, jobStoreID)
        return jobStoreFileID

    @contextmanager
    def readFileStream(self, jobStoreFileID):
        if not self.fileExists(jobStoreFileID):
            raise NoSuchFileException(jobStoreFileID)
        with self._downloadStream(jobStoreFileID, self.files) as fd:
            yield fd

    @contextmanager
    def writeSharedFileStream(self, sharedFileName, isProtected=None):
        assert self._validateSharedFileName(sharedFileName)
        sharedFileID = self._newFileID(sharedFileName)
        with self._uploadStream(sharedFileID, self.files, encrypted=isProtected) as fd:
            yield fd

    @contextmanager
    def readSharedFileStream(self, sharedFileName):
        assert self._validateSharedFileName(sharedFileName)
        sharedFileID = self._newFileID(sharedFileName)
        if not self.fileExists(sharedFileID):
            raise NoSuchFileException(sharedFileID)
        with self._downloadStream(sharedFileID, self.files) as fd:
            yield fd

    def writeStatsAndLogging(self, statsAndLoggingString):
        # TODO: would be a great use case for the append blobs, once implemented in the Azure SDK
        jobStoreFileID = self._newFileID()
        encrypted = self.keyPath is not None
        if encrypted:
            statsAndLoggingString = encryption.encrypt(statsAndLoggingString, self.keyPath)
        self.statsFiles.create_blob_from_text(blob_name=str(jobStoreFileID),
                                              text=statsAndLoggingString,
                                              metadata=dict(encrypted=str(encrypted)))
        self.statsFileIDs.insert_entity(entity={'RowKey': jobStoreFileID})

    def readStatsAndLogging(self, callback, readAll=False):
        suffix = '_old'
        numStatsFiles = 0
        for attempt in retry_azure():
            with attempt:
                for entity in self.statsFileIDs.query_entities():
                    jobStoreFileID = entity.RowKey
                    hasBeenRead = len(jobStoreFileID) > self.jobIDLength
                    if not hasBeenRead:
                        with self._downloadStream(jobStoreFileID, self.statsFiles) as fd:
                            callback(fd)
                        # Mark this entity as read by appending the suffix
                        self.statsFileIDs.insert_entity(entity={'RowKey': jobStoreFileID + suffix})
                        self.statsFileIDs.delete_entity(row_key=str(jobStoreFileID))
                        numStatsFiles += 1
                    elif readAll:
                        # Strip the suffix to get the original ID
                        jobStoreFileID = jobStoreFileID[:-len(suffix)]
                        with self._downloadStream(jobStoreFileID, self.statsFiles) as fd:
                            callback(fd)
                        numStatsFiles += 1
        return numStatsFiles

    _azureTimeFormat = "%Y-%m-%dT%H:%M:%SZ"

    def getPublicUrl(self, jobStoreFileID):
        try:
            self.files.get_blob_properties(blob_name=str(jobStoreFileID))
        except AzureMissingResourceHttpError:
            raise NoSuchFileException(jobStoreFileID)
        startTime = (datetime.utcnow() - timedelta(minutes=5))
        endTime = datetime.utcnow() + self.publicUrlExpiration
        sas_token = self.files.generate_blob_shared_access_signature(blob_name=str(jobStoreFileID),
                                                                     permission=BlobPermissions.READ,
                                                                     start=startTime,
                                                                     expiry=endTime)
        return self.files.make_blob_url(blob_name=str(jobStoreFileID)) + '?' + sas_token

    def getSharedPublicUrl(self, sharedFileName):
        jobStoreFileID = self._newFileID(sharedFileName)
        return self.getPublicUrl(jobStoreFileID)

    def _newJobID(self):
        # raw UUIDs don't work for Azure property names because the '-' character is disallowed.
        return str(uuid.uuid4()).replace('-', '_')

    # A dummy job ID under which all shared files are stored.
    sharedFileJobID = uuid.UUID('891f7db6-e4d9-4221-a58e-ab6cc4395f94')

    def _newFileID(self, sharedFileName=None):
        if sharedFileName is None:
            ret = str(uuid.uuid4())
        else:
            ret = str(uuid.uuid5(self.sharedFileJobID, sharedFileName))
        return ret.replace('-', '_')

    def _associateFileWithJob(self, jobStoreFileID, jobStoreID=None):
        if jobStoreID is not None:
            self.jobFileIDs.insert_entity(entity={'PartitionKey': EntityProperty('Edm.String', jobStoreID),
                                                  'RowKey': EntityProperty('Edm.String', jobStoreFileID)})

    def _dissociateFileFromJob(self, jobStoreFileID):
        entities = list(self.jobFileIDs.query_entities(filter="RowKey eq '%s'" % jobStoreFileID))
        if entities:
            assert len(entities) == 1
            jobStoreID = entities[0].PartitionKey
            self.jobFileIDs.delete_entity(partition_key=str(jobStoreID), row_key=str(jobStoreFileID))

    def _bindTable(self, tableName, create=False):
        for attempt in retry_azure():
            with attempt:
                try:
                    exists = self.tableService.exists(table_name=tableName)
                except AzureMissingResourceHttpError as e:
                    if e.status_code != 404:
                        raise
                else:
                    if exists:
                        return AzureTable(self.tableService, tableName)
                if create:
                    self.tableService.create_table(tableName)
                    return AzureTable(self.tableService, tableName)
                else:
                    return None

    def _bindContainer(self, containerName, create=False):
        for attempt in retry_azure():
            with attempt:
                try:
                    self.blobService.get_container_properties(containerName)
                except AzureMissingResourceHttpError as e:
                    if e.status_code == 404:
                        if create:
                            self.blobService.create_container(containerName)
                        else:
                            return None
                    else:
                        raise
        return AzureBlobContainer(self.blobService, containerName)

    def _sanitizeTableName(self, tableName):
        """
        Azure table names must start with a letter and be alphanumeric.

        This will never cause a collision if uuids are used, but
        otherwise may not be safe.
        """
        return 'a' + ''.join([x for x in tableName if x.isalnum()])

    # Maximum bytes that can be in any block of an Azure block blob
    # https://github.com/Azure/azure-storage-python/blob/4c7666e05a9556c10154508335738ee44d7cb104/azure/storage/blob/blobservice.py#L106
    _maxAzureBlockBytes = 4 * 1024 * 1024

    @contextmanager
    def _uploadStream(self, jobStoreFileID, container, checkForModification=False, encrypted=None):
        """
        :param encrypted: True to enforce encryption (will raise exception unless key is set),
        False to prevent encryption or None to encrypt if key is set.
        """
        if checkForModification:
            try:
                expectedVersion = container.get_blob_properties(blob_name=str(jobStoreFileID)).properties.etag
            except AzureMissingResourceHttpError:
                expectedVersion = None

        if encrypted is None:
            encrypted = self.keyPath is not None
        elif encrypted:
            if self.keyPath is None:
                raise RuntimeError('Encryption requested but no key was provided')

        maxBlockSize = self._maxAzureBlockBytes
        if encrypted:
            # There is a small overhead for encrypted data.
            maxBlockSize -= encryption.overhead

        store = self

        class UploadPipe(WritablePipe):

            def readFrom(self, readable):
                blocks = []
                try:
                    while True:
                        buf = readable.read(maxBlockSize)
                        if len(buf) == 0:
                            # We're safe to break here even if we never read anything, since
                            # putting an empty block list creates an empty blob.
                            break
                        if encrypted:
                            buf = encryption.encrypt(buf, store.keyPath)
                        blockID = store._newFileID()
                        container.put_block(blob_name=str(jobStoreFileID),
                                            block=buf,
                                            block_id=blockID)
                        blocks.append(BlobBlock(blockID))
                except:
                    with panic(log=logger):
                        # This is guaranteed to delete any uncommitted blocks.
                        container.delete_blob(blob_name=str(jobStoreFileID))

                if checkForModification and expectedVersion is not None:
                    # Acquire a (60-second) write lock,
                    leaseID = container.acquire_blob_lease(blob_name=str(jobStoreFileID),
                                                           lease_duration=60)
                    # check for modification,
                    blob = container.get_blob_properties(blob_name=str(jobStoreFileID))
                    if blob.properties.etag != expectedVersion:
                        container.release_blob_lease(blob_name=str(jobStoreFileID), lease_id=leaseID)
                        raise ConcurrentFileModificationException(jobStoreFileID)
                    # commit the file,
                    container.put_block_list(blob_name=str(jobStoreFileID),
                                             block_list=blocks,
                                             lease_id=leaseID,
                                             metadata=dict(encrypted=str(encrypted)))
                    # then release the lock.
                    container.release_blob_lease(blob_name=str(jobStoreFileID), lease_id=leaseID)
                else:
                    # No need to check for modification, just blindly write over whatever
                    # was there.
                    container.put_block_list(blob_name=str(jobStoreFileID),
                                             block_list=blocks,
                                             metadata=dict(encrypted=str(encrypted)))

        with UploadPipe() as writable:
            yield writable

    @contextmanager
    def _downloadStream(self, jobStoreFileID, container):
        # The reason this is not in the writer is so we catch non-existant blobs early

        blob = container.get_blob_properties(blob_name=str(jobStoreFileID))

        encrypted = strict_bool(blob.metadata['encrypted'])
        if encrypted and self.keyPath is None:
            raise AssertionError('Content is encrypted but no key was provided.')

        outer_self = self

        class DownloadPipe(ReadablePipe):
            def writeTo(self, writable):
                chunkStart = 0
                fileSize = blob.properties.content_length
                while chunkStart < fileSize:
                    chunkEnd = chunkStart + outer_self._maxAzureBlockBytes - 1
                    buf = container.get_blob_to_bytes(blob_name=str(jobStoreFileID),
                                                      start_range=chunkStart,
                                                      end_range=chunkEnd).content
                    if encrypted:
                        buf = encryption.decrypt(buf, outer_self.keyPath)
                    writable.write(buf)
                    chunkStart = chunkEnd + 1

        with DownloadPipe() as readable:
            yield readable
class Azure:
    # Tags used
    RG_RULE_PROGRAMMED_TAG = 'PANORAMA_PROGRAMMED'
    HUB_MANAGED_TAG = 'PanoramaManaged'

    # Resource types
    VMSS_TYPE = 'Microsoft.Compute/virtualMachineScaleSets'
    ILB_TYPE = 'Microsoft.Network/loadBalancers'
    APPINSIGHTS_TYPE = 'Microsoft.Insights/components'

    # Hardcoded names used for internal Azure resources
    ILB_NAME = 'myPrivateLB'
    ALPHANUM = r'[^A-Za-z0-9]+'

    def __init__(self,
                 cred,
                 subs_id,
                 hub,
                 vmss_rg_name,
                 vmss_name,
                 storage,
                 pan_handle,
                 logger=None):
        self.credentials = cred
        self.subscription_id = subs_id
        self.logger = logger
        self.hub_name = hub
        self.storage_name = storage
        self.panorama_handler = pan_handle
        self.vmss_table_name = re.sub(self.ALPHANUM, '',
                                      vmss_name + 'vmsstable')
        self.vmss_rg_name = vmss_rg_name

        try:
            self.resource_client = ResourceManagementClient(cred, subs_id)
            self.compute_client = ComputeManagementClient(cred, subs_id)
            self.network_client = NetworkManagementClient(cred, subs_id)
            self.store_client = StorageManagementClient(cred, subs_id)
            store_keys = self.store_client.storage_accounts.list_keys(
                hub, storage).keys[0].value
            self.table_service = TableService(account_name=storage,
                                              account_key=store_keys)
        except Exception as e:
            self.logger.error("Getting Azure Infra handlers failed %s" %
                              str(e))
            raise e

        # Start -> List out all RGs and identify new spokes to mark them with tags.
        # Look for Resource Groups (RGs) which do not have tags or does not have a
        # a tag named "PANORAMA_PROGRAMMED".
        # potential_new_spokes = [x.name for x in self.resource_client.resource_groups.list()\
        #                  if not x.tags or not x.tags.get(self.RG_RULE_PROGRAMMED_TAG, None)]

        # If the RG has a VMSS which has a tag named "PanoramaManaged" with a value
        # as Hub Resource Group name then we know that this is a new spoke that is
        # launched managed by the Hub and not yet programmed for NAT/Azure Instrumentation
        # key.
        # for rg in potential_new_spokes:
        #     fw_vm_list = [x for x in self.resource_client.resources.list_by_resource_group(rg)
        #                   if x.type == self.VMSS_TYPE and self.filter_vmss(rg, x.name)]
        #     if fw_vm_list:
        #         rg_params = {'location': self.resource_client.resource_groups.get(rg).location}
        #         rg_params.update(tags={
        #                                  self.RG_RULE_PROGRAMMED_TAG : 'No',
        #                                  self.HUB_MANAGED_TAG        : self.hub_name
        #                               })
        #         self.resource_client.resource_groups.create_or_update(rg, rg_params)
        #         self.logger.info("RG %s marked as a spoke managed by this hub %s" % (rg, self.hub_name))
        # End -> List out all RGs and identify new spokes to mark them with tags.

        # Populate the list of spokes managed by this Azure hub.
        rg_list = self.resource_client.resource_groups.list()
        self.managed_spokes = []
        self.managed_spokes.append(vmss_rg_name)
        self.new_spokes = []
        # for rg in rg_list:
        #     if rg.tags and rg.tags.get(self.HUB_MANAGED_TAG, None) == self.hub_name:
        #         self.managed_spokes.append(rg.name)
        #         if rg.tags.get(self.RG_RULE_PROGRAMMED_TAG, 'Yes') == 'No':
        #             self.new_spokes.append(rg.name)
        # self.logger.debug('%s identified as spokes managed by %s' % (self.managed_spokes, self.hub_name))
        # if self.new_spokes:
        #     self.logger.info('%s identified as new spokes to be programmed by %s' % (self.new_spokes, self.hub_name))
        #
        #

    def filter_vmss(self, spoke, vmss_name):
        vmss = self.compute_client.virtual_machine_scale_sets.get(
            spoke, vmss_name)
        if vmss.tags and vmss.tags.get(self.HUB_MANAGED_TAG,
                                       None) == self.hub_name:
            return True
        return False

    def get_ilb_ip(self, spoke):
        for resource in self.resource_client.resources.list_by_resource_group(
                spoke):
            # Get the ILB IP Address from the spoke. The ILB address is always
            # hardcoded to be myPrivateILB.
            if resource.name == self.ILB_NAME and resource.type == self.ILB_TYPE:
                ilb_obj = self.network_client.load_balancers.get(
                    spoke, resource.name)
                ilb_frontend_cfg = ilb_obj.frontend_ip_configurations
                try:
                    ilb_private_ip = ilb_frontend_cfg[0].private_ip_address
                except IndexError as e:
                    self.logger.info("ILB is not setup yet in RG %s." % spoke)
                    return None
                return ilb_private_ip
        return None

    def get_appinsights_instr_key(self, spoke):
        for resource in self.resource_client.resources.list_by_resource_group(
                spoke):
            # Get the Appinsights instance where the custom metrics are being
            # published.
            if resource.type == self.APPINSIGHTS_TYPE and 'appinsights' in resource.name:
                appinsights_obj = self.resource_client.resources.get_by_id(
                    resource.id, '2014-04-01')
                instr_key = appinsights_obj.properties.get(
                    'InstrumentationKey', '')
                if not instr_key:
                    self.logger.info("InstrKey is not setup yet in %s." %
                                     spoke)
                    return None
                return instr_key
        return None

    def set_spoke_as_programmed(self, spoke):
        spoke_params = {
            'location':
            self.resource_client.resource_groups.get(spoke).location
        }
        spoke_tags = self.resource_client.resource_groups.get(spoke).tags
        spoke_tags[self.RG_RULE_PROGRAMMED_TAG] = 'Yes'
        spoke_params.update(tags=spoke_tags)
        self.resource_client.resource_groups.create_or_update(
            spoke, spoke_params)
        self.logger.info(
            "RG %s marked as programmed and spoke managed by this hub %s" %
            (spoke, self.hub_name))

    def create_worker_ready_tag(self, worker_name):
        self.compute_client.virtual_machines.create_or_update(
            self.vmss_rg, worker_name, {
                'location':
                self.resource_client.resource_groups.get(
                    self.vmss_rg).location,
                'tags': {
                    'WORKER_READY': 'Yes'
                }
            })

    def create_new_cosmos_table(self, table_name):
        # Create the Cosmos DB if it does not exist already
        if not self.table_service.exists(table_name):
            try:
                ok = self.table_service.create_table(table_name)
                if not ok:
                    self.logger.error('Creating VMSS table failed')
                    return False
                self.logger.info('VMSS Table %s created succesfully' %
                                 table_name)
            except Exception as e:
                self.logger.error('Creating VMSS table failed ' + str(e))
                return False
        return True

    def clear_cosmos_table(self, table_name):
        self.table_service.delete_table(table_name)

    def get_vmss_by_name(self, spoke, vmss_name):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and x.name == vmss_name
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
        return None

    def get_vmss_in_spoke(self, spoke):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and self.filter_vmss(spoke, x.name)
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
            return None

    def get_vms_in_vmss(self, spoke, vmss_name):

        return self.compute_client.virtual_machine_scale_set_vms.list(
            spoke, vmss_name)

    def get_vm_in_cosmos_db(self, spoke, vm_hostname):

        try:
            db_vm_info = self.table_service.get_entity(self.vmss_table_name,
                                                       spoke, vm_hostname)
        except AzureMissingResourceHttpError:
            self.logger.info("New VM %s found in spoke %s" %
                             (vm_hostname, spoke))
            return None
        except Exception as e:
            self.logger.error("Querying for %s failed" % vm_hostname)
            return None
        else:
            # IF possible update status TODO
            self.logger.debug("VM %s is available in VMSS, Pan and DB" %
                              (vm_hostname))

        return db_vm_info

    # 'name'       : global_device['@name'],
    # 'hostname'   : global_device['hostname'],
    # 'serial'     : global_device['serial'],
    # 'ip-address' : global_device['ip-address'],
    # 'connected'  : global_device['connected'],
    # 'deactivated': global_device['deactivated']
    def create_db_entity(self, spoke, vm_details):

        vm = Entity()

        # PartitionKey is nothing but the spoke name
        vm.PartitionKey = spoke
        # RowKey is nothing but the VM name itself.
        vm.RowKey = vm_details['hostname']
        vm.name = vm_details['name']
        vm.serial_no = vm_details['serial']
        vm.ip_addr = vm_details['ip-address']
        vm.connected = vm_details['connected']
        vm.deactivated = vm_details['deactivated']
        vm.subs_id = self.subscription_id
        vm.delicensed_on = 'not applicable'
        vm.is_delicensed = 'No'
        try:
            self.table_service.insert_entity(self.vmss_table_name, vm)
            self.logger.info("VM %s with serial no. %s in db" %
                             (vm_details['hostname'], vm_details['serial']))
        except Exception as e:
            self.logger.info("Insert entry to db for %s failed with error %s" %
                             (vm_details['hostname'], e))
            return False
        return True

    def get_fw_vms_in_cosmos_db(self, spoke=None):

        if spoke:
            filter_str = "PartitionKey eq '%s'" % spoke
        else:
            filter_str = None

        db_vms_list = self.table_service.query_entities(self.vmss_table_name,
                                                        filter=filter_str)
        if spoke:
            db_hostname_list = [{'hostname': x.RowKey, 'serial': x.serial_no, 'name': x.name} \
                            for x in db_vms_list if x.PartitionKey == spoke]
            return db_hostname_list
        else:
            return db_vms_list

    def delete_vm_from_cosmos_db(self, spoke, vm_name):

        self.table_service.delete_entity(self.vmss_table_name, spoke, vm_name)
示例#5
0
class Azure:
    # Tags used
    RG_RULE_PROGRAMMED_TAG = 'PANORAMA_PROGRAMMED'
    HUB_MANAGED_TAG = 'PanoramaManaged'

    # Resource types
    VMSS_TYPE = 'Microsoft.Compute/virtualMachineScaleSets'
    ILB_TYPE = 'Microsoft.Network/loadBalancers'
    APPINSIGHTS_TYPE = 'Microsoft.Insights/components'

    # Hardcoded names used for internal Azure resources
    ILB_NAME = 'myPrivateLB'
    ALPHANUM = r'[^A-Za-z0-9]+'

    def __init__(self,
                 cred,
                 subs_id,
                 my_storage_rg,
                 vmss_rg_name,
                 vmss_name,
                 storage,
                 pan_handle,
                 logger=None):
        self.credentials = cred
        self.subscription_id = subs_id
        self.logger = logger
        self.hub_name = vmss_rg_name
        self.storage_name = storage
        self.panorama_handler = pan_handle
        self.vmss_table_name = re.sub(self.ALPHANUM, '',
                                      vmss_name + 'vmsstable')
        self.vmss_rg_name = vmss_rg_name

        try:
            self.resource_client = ResourceManagementClient(cred, subs_id)
            self.compute_client = ComputeManagementClient(cred, subs_id)
            self.network_client = NetworkManagementClient(cred, subs_id)
            self.store_client = StorageManagementClient(cred, subs_id)
            store_keys = self.store_client.storage_accounts.list_keys(
                my_storage_rg, storage).keys[0].value
            self.table_service = TableService(account_name=storage,
                                              account_key=store_keys)
        except Exception as e:
            self.logger.error("Getting Azure Infra handlers failed %s" %
                              str(e))
            raise e

        rg_list = self.resource_client.resource_groups.list()
        self.managed_spokes = []
        self.managed_spokes.append(vmss_rg_name)
        self.new_spokes = []

    def filter_vmss(self, spoke, vmss_name):
        vmss = self.compute_client.virtual_machine_scale_sets.get(
            spoke, vmss_name)
        if vmss.tags and vmss.tags.get(self.HUB_MANAGED_TAG,
                                       None) == self.hub_name:
            return True
        return False

    def create_worker_ready_tag(self, worker_name):
        self.compute_client.virtual_machines.create_or_update(
            self.vmss_rg, worker_name, {
                'location':
                self.resource_client.resource_groups.get(
                    self.vmss_rg).location,
                'tags': {
                    'WORKER_READY': 'Yes'
                }
            })

    def create_new_cosmos_table(self, table_name):
        # Create the Cosmos DB if it does not exist already
        if not self.table_service.exists(table_name):
            try:
                ok = self.table_service.create_table(table_name)
                if not ok:
                    self.logger.error('Creating VMSS table failed')
                    return False
                self.logger.info('VMSS Table %s created succesfully' %
                                 table_name)
            except Exception as e:
                self.logger.error('Creating VMSS table failed ' + str(e))
                return False
        return True

    def clear_cosmos_table(self, table_name):
        self.table_service.delete_table(table_name)

    def get_vmss_by_name(self, spoke, vmss_name):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and x.name == vmss_name
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
        return None

    def get_vmss_in_spoke(self, spoke):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and self.filter_vmss(spoke, x.name)
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
            return None

    def get_vms_in_vmss(self, spoke, vmss_name):

        return self.compute_client.virtual_machine_scale_set_vms.list(
            spoke, vmss_name)

    def get_vm_in_cosmos_db(self, spoke, vm_hostname):

        try:
            db_vm_info = self.table_service.get_entity(self.vmss_table_name,
                                                       spoke, vm_hostname)
        except AzureMissingResourceHttpError:
            self.logger.info("New VM %s found in spoke %s" %
                             (vm_hostname, spoke))
            return None
        except Exception as e:
            self.logger.error("Querying for %s failed" % vm_hostname)
            return None
        else:
            # IF possible update status TODO
            self.logger.debug("VM %s is available in VMSS, Pan and DB" %
                              (vm_hostname))

        return db_vm_info

    # 'name'       : global_device['@name'],
    # 'hostname'   : global_device['hostname'],
    # 'serial'     : global_device['serial'],
    # 'ip-address' : global_device['ip-address'],
    # 'connected'  : global_device['connected'],
    # 'deactivated': global_device['deactivated']
    def create_db_entity(self, spoke, vm_details):

        vm = Entity()

        # PartitionKey is nothing but the spoke name
        vm.PartitionKey = spoke
        # RowKey is nothing but the VM name itself.
        vm.RowKey = vm_details['hostname']
        vm.name = vm_details['name']
        vm.serial_no = vm_details['serial']
        vm.ip_addr = vm_details['ip-address']
        vm.connected = vm_details['connected']
        vm.deactivated = vm_details['deactivated']
        vm.subs_id = self.subscription_id
        vm.delicensed_on = 'not applicable'
        vm.is_delicensed = 'No'
        try:
            self.table_service.insert_entity(self.vmss_table_name, vm)
            self.logger.info("VM %s with serial no. %s in db" %
                             (vm_details['hostname'], vm_details['serial']))
        except Exception as e:
            self.logger.info("Insert entry to db for %s failed with error %s" %
                             (vm_details['hostname'], e))
            return False
        return True

    def get_fw_vms_in_cosmos_db(self, spoke=None):

        if spoke:
            filter_str = "PartitionKey eq '%s'" % spoke
        else:
            filter_str = None

        db_vms_list = self.table_service.query_entities(self.vmss_table_name,
                                                        filter=filter_str)
        if spoke:
            db_hostname_list = [{'hostname': x.RowKey, 'serial': x.serial_no, 'name': x.name} \
                            for x in db_vms_list if x.PartitionKey == spoke]
            return db_hostname_list
        else:
            return db_vms_list

    def delete_vm_from_cosmos_db(self, spoke, vm_name):

        self.table_service.delete_entity(self.vmss_table_name, spoke, vm_name)