Ejemplo n.º 1
0
class azure_table:
    def __init__(self, table_name='HemoniDataTable'):
        connection_string = "**"

        self.table_client = TableService(connection_string=connection_string)
        self.table_name = table_name
        if self.table_client.exists(table_name):
            pass
        else:
            self.table_client.create_table(table_name=table_name)

    def delete_table(self):
        self.table_client.delete_table(table_name=self.table_name)

    def insert_entity(self, entity):
        """
        When inserting an entity into a table, you must specify values for the
        PartitionKey and RowKey system properties. Together, these properties
        form the primary key and must be unique within the table. Both the
        PartitionKey and RowKey values must be string values; each key value may
        be up to 64 KB in size. If you are using an integer value for the key
        value, you should convert the integer to a fixed-width string, because
        they are canonically sorted. For example, you should convert the value
        1 to 0000001 to ensure proper sorting.
        :param entity:The entity to insert. Could be a dict or an entity object.
            Must contain a PartitionKey and a RowKey.
        :return: null
        """
        self.table_client.insert_or_replace_entity(table_name=self.table_name,
                                                   entity=entity)

    def get_entity(self, partition, row):
        """
        Get an entity from the specified table. Throws if the entity does not exist.
        :param partition:  The PartitionKey of the entity.
        :param row: The RowKey of the entity.
        :return:
        """
        return self.table_client.get_entity(self.table_name,
                                            partition_key=partition,
                                            row_key=row)
Ejemplo n.º 2
0
class AzureStorage():

    def __init__(self, container=None):
        self.AZURE_STORAGE_ACCOUNT = 'logodetectionstorage'
        self.AZURE_STORAGE_KEY  = 'jPJyzct+8WD1lKU5M+ZwDflWUGRu+YBpH8n/3Z6qR7WD7uc3HV2U1rtiQKesLRq2tU3jtXIe26RklAYdKzoydA=='
        self.table_service = TableService(account_name=self.AZURE_STORAGE_ACCOUNT, account_key=self.AZURE_STORAGE_KEY)
        self.blob_service = BlockBlobService(account_name=self. AZURE_STORAGE_ACCOUNT, account_key=self.AZURE_STORAGE_KEY)
        self.container = "input"
        self.table_list = [] #everything in the table for this logo
        self.logo = ""

    def query(self, tableName, partitionKey, rowKey):
        task = self.table_service.get_entity(tableName, partitionKey, rowKey)
        return task

    def retrieve_table(self, tableName):
        #tasks = table_service.query_entities(tableName, filter="PartitionKey eq 'tasksSeattle'", select='description')
        try:
            tasks = self.table_service.query_entities(tableName)
        except:
            return None
        self.logo = tableName
        for task in tasks:
            self.table_list.append(task)
        self.table_list = sorted(self.table_list, key=lambda k: k['has_logo'], reverse=True) 
        return self.table_list
    
    def download_blob(self, path, logoName):
        #download pic into logoName file
        path = "images/" + logoName 

        self.blob_service.get_blob_to_path(self.container, path, "test.jpeg")

    def exists(self, name):
        try:
            self.blob_service.get_blob_properties(self.container, name)
            return True
        except:
            return False
Ejemplo n.º 3
0
def _unmerge_resource(table_client: azuretable.TableService,
                      entity: dict) -> None:
    """Remove node from entity
    :param azuretable.TableService table_client: table client
    """
    while True:
        entity = table_client.get_entity(_STORAGE_CONTAINERS['table_images'],
                                         entity['PartitionKey'],
                                         entity['RowKey'])
        # merge VmList into entity
        evms = []
        for i in range(0, _MAX_VMLIST_PROPERTIES):
            prop = 'VmList{}'.format(i)
            if prop in entity:
                evms.extend(entity[prop].split(','))
        if _NODEID in evms:
            evms.remove(_NODEID)
        for i in range(0, _MAX_VMLIST_PROPERTIES):
            prop = 'VmList{}'.format(i)
            start = i * _MAX_VMLIST_IDS_PER_PROPERTY
            end = start + _MAX_VMLIST_IDS_PER_PROPERTY
            if end > len(evms):
                end = len(evms)
            if start < end:
                entity[prop] = ','.join(evms[start:end])
            else:
                entity[prop] = None
        etag = entity['etag']
        entity.pop('etag')
        try:
            table_client.update_entity(_STORAGE_CONTAINERS['table_images'],
                                       entity=entity,
                                       if_match=etag)
            break
        except azure.common.AzureHttpError as ex:
            if ex.status_code != 412:
                raise
Ejemplo n.º 4
0
class ShoppingCartServiceCloud:
    """Shopping Cart Methods called from the API to interact with the DB."""
    def __init__(self, shards=1):
        self.shards = shards
        self.table_name = "ShoppingCartTable"
        try:
            self.db = TableService(
                endpoint_suffix="table.cosmos.azure.com",
                connection_string=os.getenv("AZURE_COSMOS_CONNECTION_STRING"),
            )
        except ValueError:
            raise Exception(
                "Please initialize $AZURE_COSMOS_CONNECTION_STRING")
        try:
            self.db.create_table(self.table_name, fail_on_exist=True)
        except AzureConflictHttpError:
            # Accept error only if already exists
            pass

    def get_product_items(self, customer_id):
        row_key = utils.hash_key(customer_id)
        partition_key = 'ShoppingCart' + str(row_key % self.shards).zfill(3)

        # Get Entity
        try:
            items = self.db.get_entity(self.table_name, partition_key,
                                       str(row_key))
            product_items = json.loads(items.ProductItems)
        except AzureMissingResourceHttpError:
            product_items = []
        return product_items

    def update_product_items(self, customer_id, product_items):
        row_key = utils.hash_key(customer_id)
        partition_key = 'ShoppingCart' + str(row_key % self.shards).zfill(3)
        product_items = [
            item for item in product_items if item["unitCount"] > 0
        ]

        # Insert or Update Items
        items = Entity()
        items.PartitionKey = partition_key
        items.RowKey = str(row_key)
        items.CustomerId = customer_id
        items.ProductItems = json.dumps(product_items)

        self.db.insert_or_replace_entity(self.table_name, items)

    def delete_shopping_cart(self, customer_id):
        row_key = utils.hash_key(customer_id)
        partition_key = 'ShoppingCart' + str(row_key % self.shards).zfill(3)

        # Get Items to Checkout before Delete
        try:
            items = self.db.get_entity(self.table_name, partition_key,
                                       str(row_key))
            checkout_items = json.loads(items.ProductItems)
        except AzureMissingResourceHttpError:
            checkout_items = []

        self.db.delete_entity(self.table_name, partition_key, str(row_key))
        return checkout_items
Ejemplo n.º 5
0
def _merge_resource(table_client: azuretable.TableService, resource: str,
                    nglobalresources: int) -> None:
    """Merge resource to the image table
    :param azuretable.TableService table_client: table client
    :param str resource: resource to add to the image table
    :param int nglobalresources: number of global resources
    """
    # merge resource to the image table
    entity = {
        'PartitionKey': _PARTITION_KEY,
        'RowKey': compute_resource_hash(resource),
        'Resource': resource,
        'VmList0': _NODEID,
    }
    logger.debug('merging entity {} to the image table'.format(entity))
    try:
        table_client.insert_entity(_STORAGE_CONTAINERS['table_images'],
                                   entity=entity)
    except azure.common.AzureConflictHttpError:
        while True:
            entity = table_client.get_entity(
                _STORAGE_CONTAINERS['table_images'], entity['PartitionKey'],
                entity['RowKey'])
            # merge VmList into entity
            evms = []
            for i in range(0, _MAX_VMLIST_PROPERTIES):
                prop = 'VmList{}'.format(i)
                if prop in entity:
                    evms.extend(entity[prop].split(','))
            if _NODEID in evms:
                break
            evms.append(_NODEID)
            for i in range(0, _MAX_VMLIST_PROPERTIES):
                prop = 'VmList{}'.format(i)
                start = i * _MAX_VMLIST_IDS_PER_PROPERTY
                end = start + _MAX_VMLIST_IDS_PER_PROPERTY
                if end > len(evms):
                    end = len(evms)
                if start < end:
                    entity[prop] = ','.join(evms[start:end])
                else:
                    entity[prop] = None
            etag = entity['etag']
            entity.pop('etag')
            try:
                table_client.merge_entity(_STORAGE_CONTAINERS['table_images'],
                                          entity=entity,
                                          if_match=etag)
                break
            except azure.common.AzureHttpError as ex:
                if ex.status_code != 412:
                    raise
    logger.info('entity {} merged to the image table'.format(entity))
    global _GR_DONE
    if not _GR_DONE:
        try:
            entities = table_client.query_entities(
                _STORAGE_CONTAINERS['table_images'],
                filter='PartitionKey eq \'{}\''.format(_PARTITION_KEY))
        except azure.common.AzureMissingResourceHttpError:
            entities = []
        count = 0
        for entity in entities:
            for i in range(0, _MAX_VMLIST_PROPERTIES):
                prop = 'VmList{}'.format(i)
                mode_prefix = _CONTAINER_MODE.name.lower() + ':'
                if (prop in entity and _NODEID in entity[prop]
                        and entity['Resource'].startswith(mode_prefix)):
                    count += 1
        if count == nglobalresources:
            _record_perf('gr-done',
                         'nglobalresources={}'.format(nglobalresources))
            _GR_DONE = True
            logger.info(
                'all {} global resources of container mode "{}" loaded'.format(
                    nglobalresources, _CONTAINER_MODE.name.lower()))
        else:
            logger.info(
                '{}/{} global resources of container mode "{}" loaded'.format(
                    count, nglobalresources, _CONTAINER_MODE.name.lower()))
class StorageAccount(object):
    json_serializer = TaggedJSONSerializer()

    def __init__(self, connection_str: str, table_name: str,
                 partition_key: str, create_table_if_not_exists: bool):
        self.table_name = table_name
        self.partition_key = partition_key
        self.create_table_if_not_exists = create_table_if_not_exists
        self.table_service = TableService(connection_string=connection_str)

    def write(self, key: str, data: dict, encryption_key: bytes) -> None:
        """
        serializes and encrypts the passed dict object object and writes it to the storage
        """

        data = self.json_serializer.dumps(data)
        encoded_data, tag, nonce = self.encrypt(data, encryption_key)
        entity = {
            "PartitionKey": self.partition_key,
            "RowKey": key,
            "Data": encoded_data,
            "Tag": tag,
            "Nonce": nonce
        }
        try:
            self.table_service.insert_or_merge_entity(self.table_name, entity)
        except AzureMissingResourceHttpError:
            if not self.create_table_if_not_exists:
                raise
            self.table_service.create_table(self.table_name)
            self.table_service.insert_or_merge_entity(self.table_name, entity)

    def read(self, key: str, app_key: bytes) -> Union[List[Dict], None]:
        """
        reads encrypted data from storage and decrypts and deserializes it.
        Returns None if no data was found or decryption failed.
        """
        try:
            data = self.table_service.get_entity(self.table_name,
                                                 self.partition_key, key)
            decoded = self.decrypt(data["Data"], data["Tag"], data["Nonce"],
                                   app_key)
            if decoded is not None:
                return self.json_serializer.loads(decoded)
            return None
        except AzureMissingResourceHttpError:
            return None

    def delete(self, key: str) -> None:
        """
        Removes an element from storage if it exists
        """
        try:
            self.table_service.delete_entity(self.table_name,
                                             self.partition_key, key)
        except AzureMissingResourceHttpError:
            pass

    @staticmethod
    def encrypt(data: str, secret_text: bytes) -> Tuple[str, str, str]:
        """
        encrypts the passed data with the secret text.
        :return: a tuple of three elements: encrypted data, verification_tag and nonce element.
        All elements are base64 encoded strings
        """
        cipher = AES.new(secret_text, AES.MODE_EAX)
        ciphertext, tag = cipher.encrypt_and_digest((data.encode("utf-8")))
        return (base64.b64encode(ciphertext).decode("ascii"),
                base64.b64encode(tag).decode("ascii"),
                base64.b64encode(cipher.nonce).decode("ascii"))

    @staticmethod
    def decrypt(encrypted_data: str, verification_tag: str, nonce: str,
                secret_text: bytes) -> Union[str, None]:
        """
        Decrypts encoded data using the passed secret_text
        :param encrypted_data:  as base64 encoded string or byte array
        :param verification_tag: as base64 encoded string or byte array
        :param nonce: as base64 encoded string or byte array
        :param secret_text: the same secret text with wich the element was encoded
        :return: the plaintext on success, None if the data could not be decoded or verified
        """
        nonce = base64.b64decode(nonce)
        cipher = AES.new(secret_text, AES.MODE_EAX, nonce=nonce)
        data = base64.b64decode(encrypted_data)
        plaintext = cipher.decrypt(data)
        tag = base64.b64decode(verification_tag)
        try:
            cipher.verify(tag)
            return plaintext.decode("utf-8")
        except ValueError:
            return None
Ejemplo n.º 7
0
    storage_key = sys.argv[2]
    batch_account = sys.argv[3]
    batch_key = sys.argv[4]
    batch_url = sys.argv[5]
    table_name = sys.argv[6]
    job_id = sys.argv[7]
    entity_pk = sys.argv[8]
    entity_rk = sys.argv[9]

    table_service = TableService(account_name=storage_account,
                                 account_key=storage_key)
    blob_service = BlockBlobService(account_name=storage_account,
                                    account_key=storage_key)
    credentials = batchauth.SharedKeyCredentials(batch_account, batch_key)
    batch_client = batch.BatchServiceClient(credentials, base_url=batch_url)
    entity = table_service.get_entity(table_name, entity_pk, entity_rk)

    wait_for_tasks_to_complete(table_service, batch_client, table_name, entity,
                               job_id)

    if table_name == 'DatabaseEntity':
        container_name = sys.argv[10]
        files = 0
        total_size = 0
        db_type = 'Nucleotide'
        generator = blob_service.list_blobs(container_name,
                                            prefix=entity_rk + '.')
        for blob in generator:
            files += 1
            total_size += blob.properties.content_length
            extension = blob.name.split(".")[-1]
class Azure:
    # Tags used
    RG_RULE_PROGRAMMED_TAG = 'PANORAMA_PROGRAMMED'
    HUB_MANAGED_TAG = 'PanoramaManaged'

    # Resource types
    VMSS_TYPE = 'Microsoft.Compute/virtualMachineScaleSets'
    ILB_TYPE = 'Microsoft.Network/loadBalancers'
    APPINSIGHTS_TYPE = 'Microsoft.Insights/components'

    # Hardcoded names used for internal Azure resources
    ILB_NAME = 'myPrivateLB'
    ALPHANUM = r'[^A-Za-z0-9]+'

    def __init__(self,
                 cred,
                 subs_id,
                 hub,
                 vmss_rg_name,
                 vmss_name,
                 storage,
                 pan_handle,
                 logger=None):
        self.credentials = cred
        self.subscription_id = subs_id
        self.logger = logger
        self.hub_name = hub
        self.storage_name = storage
        self.panorama_handler = pan_handle
        self.vmss_table_name = re.sub(self.ALPHANUM, '',
                                      vmss_name + 'vmsstable')
        self.vmss_rg_name = vmss_rg_name

        try:
            self.resource_client = ResourceManagementClient(cred, subs_id)
            self.compute_client = ComputeManagementClient(cred, subs_id)
            self.network_client = NetworkManagementClient(cred, subs_id)
            self.store_client = StorageManagementClient(cred, subs_id)
            store_keys = self.store_client.storage_accounts.list_keys(
                hub, storage).keys[0].value
            self.table_service = TableService(account_name=storage,
                                              account_key=store_keys)
        except Exception as e:
            self.logger.error("Getting Azure Infra handlers failed %s" %
                              str(e))
            raise e

        # Start -> List out all RGs and identify new spokes to mark them with tags.
        # Look for Resource Groups (RGs) which do not have tags or does not have a
        # a tag named "PANORAMA_PROGRAMMED".
        # potential_new_spokes = [x.name for x in self.resource_client.resource_groups.list()\
        #                  if not x.tags or not x.tags.get(self.RG_RULE_PROGRAMMED_TAG, None)]

        # If the RG has a VMSS which has a tag named "PanoramaManaged" with a value
        # as Hub Resource Group name then we know that this is a new spoke that is
        # launched managed by the Hub and not yet programmed for NAT/Azure Instrumentation
        # key.
        # for rg in potential_new_spokes:
        #     fw_vm_list = [x for x in self.resource_client.resources.list_by_resource_group(rg)
        #                   if x.type == self.VMSS_TYPE and self.filter_vmss(rg, x.name)]
        #     if fw_vm_list:
        #         rg_params = {'location': self.resource_client.resource_groups.get(rg).location}
        #         rg_params.update(tags={
        #                                  self.RG_RULE_PROGRAMMED_TAG : 'No',
        #                                  self.HUB_MANAGED_TAG        : self.hub_name
        #                               })
        #         self.resource_client.resource_groups.create_or_update(rg, rg_params)
        #         self.logger.info("RG %s marked as a spoke managed by this hub %s" % (rg, self.hub_name))
        # End -> List out all RGs and identify new spokes to mark them with tags.

        # Populate the list of spokes managed by this Azure hub.
        rg_list = self.resource_client.resource_groups.list()
        self.managed_spokes = []
        self.managed_spokes.append(vmss_rg_name)
        self.new_spokes = []
        # for rg in rg_list:
        #     if rg.tags and rg.tags.get(self.HUB_MANAGED_TAG, None) == self.hub_name:
        #         self.managed_spokes.append(rg.name)
        #         if rg.tags.get(self.RG_RULE_PROGRAMMED_TAG, 'Yes') == 'No':
        #             self.new_spokes.append(rg.name)
        # self.logger.debug('%s identified as spokes managed by %s' % (self.managed_spokes, self.hub_name))
        # if self.new_spokes:
        #     self.logger.info('%s identified as new spokes to be programmed by %s' % (self.new_spokes, self.hub_name))
        #
        #

    def filter_vmss(self, spoke, vmss_name):
        vmss = self.compute_client.virtual_machine_scale_sets.get(
            spoke, vmss_name)
        if vmss.tags and vmss.tags.get(self.HUB_MANAGED_TAG,
                                       None) == self.hub_name:
            return True
        return False

    def get_ilb_ip(self, spoke):
        for resource in self.resource_client.resources.list_by_resource_group(
                spoke):
            # Get the ILB IP Address from the spoke. The ILB address is always
            # hardcoded to be myPrivateILB.
            if resource.name == self.ILB_NAME and resource.type == self.ILB_TYPE:
                ilb_obj = self.network_client.load_balancers.get(
                    spoke, resource.name)
                ilb_frontend_cfg = ilb_obj.frontend_ip_configurations
                try:
                    ilb_private_ip = ilb_frontend_cfg[0].private_ip_address
                except IndexError as e:
                    self.logger.info("ILB is not setup yet in RG %s." % spoke)
                    return None
                return ilb_private_ip
        return None

    def get_appinsights_instr_key(self, spoke):
        for resource in self.resource_client.resources.list_by_resource_group(
                spoke):
            # Get the Appinsights instance where the custom metrics are being
            # published.
            if resource.type == self.APPINSIGHTS_TYPE and 'appinsights' in resource.name:
                appinsights_obj = self.resource_client.resources.get_by_id(
                    resource.id, '2014-04-01')
                instr_key = appinsights_obj.properties.get(
                    'InstrumentationKey', '')
                if not instr_key:
                    self.logger.info("InstrKey is not setup yet in %s." %
                                     spoke)
                    return None
                return instr_key
        return None

    def set_spoke_as_programmed(self, spoke):
        spoke_params = {
            'location':
            self.resource_client.resource_groups.get(spoke).location
        }
        spoke_tags = self.resource_client.resource_groups.get(spoke).tags
        spoke_tags[self.RG_RULE_PROGRAMMED_TAG] = 'Yes'
        spoke_params.update(tags=spoke_tags)
        self.resource_client.resource_groups.create_or_update(
            spoke, spoke_params)
        self.logger.info(
            "RG %s marked as programmed and spoke managed by this hub %s" %
            (spoke, self.hub_name))

    def create_worker_ready_tag(self, worker_name):
        self.compute_client.virtual_machines.create_or_update(
            self.vmss_rg, worker_name, {
                'location':
                self.resource_client.resource_groups.get(
                    self.vmss_rg).location,
                'tags': {
                    'WORKER_READY': 'Yes'
                }
            })

    def create_new_cosmos_table(self, table_name):
        # Create the Cosmos DB if it does not exist already
        if not self.table_service.exists(table_name):
            try:
                ok = self.table_service.create_table(table_name)
                if not ok:
                    self.logger.error('Creating VMSS table failed')
                    return False
                self.logger.info('VMSS Table %s created succesfully' %
                                 table_name)
            except Exception as e:
                self.logger.error('Creating VMSS table failed ' + str(e))
                return False
        return True

    def clear_cosmos_table(self, table_name):
        self.table_service.delete_table(table_name)

    def get_vmss_by_name(self, spoke, vmss_name):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and x.name == vmss_name
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
        return None

    def get_vmss_in_spoke(self, spoke):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and self.filter_vmss(spoke, x.name)
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
            return None

    def get_vms_in_vmss(self, spoke, vmss_name):

        return self.compute_client.virtual_machine_scale_set_vms.list(
            spoke, vmss_name)

    def get_vm_in_cosmos_db(self, spoke, vm_hostname):

        try:
            db_vm_info = self.table_service.get_entity(self.vmss_table_name,
                                                       spoke, vm_hostname)
        except AzureMissingResourceHttpError:
            self.logger.info("New VM %s found in spoke %s" %
                             (vm_hostname, spoke))
            return None
        except Exception as e:
            self.logger.error("Querying for %s failed" % vm_hostname)
            return None
        else:
            # IF possible update status TODO
            self.logger.debug("VM %s is available in VMSS, Pan and DB" %
                              (vm_hostname))

        return db_vm_info

    # 'name'       : global_device['@name'],
    # 'hostname'   : global_device['hostname'],
    # 'serial'     : global_device['serial'],
    # 'ip-address' : global_device['ip-address'],
    # 'connected'  : global_device['connected'],
    # 'deactivated': global_device['deactivated']
    def create_db_entity(self, spoke, vm_details):

        vm = Entity()

        # PartitionKey is nothing but the spoke name
        vm.PartitionKey = spoke
        # RowKey is nothing but the VM name itself.
        vm.RowKey = vm_details['hostname']
        vm.name = vm_details['name']
        vm.serial_no = vm_details['serial']
        vm.ip_addr = vm_details['ip-address']
        vm.connected = vm_details['connected']
        vm.deactivated = vm_details['deactivated']
        vm.subs_id = self.subscription_id
        vm.delicensed_on = 'not applicable'
        vm.is_delicensed = 'No'
        try:
            self.table_service.insert_entity(self.vmss_table_name, vm)
            self.logger.info("VM %s with serial no. %s in db" %
                             (vm_details['hostname'], vm_details['serial']))
        except Exception as e:
            self.logger.info("Insert entry to db for %s failed with error %s" %
                             (vm_details['hostname'], e))
            return False
        return True

    def get_fw_vms_in_cosmos_db(self, spoke=None):

        if spoke:
            filter_str = "PartitionKey eq '%s'" % spoke
        else:
            filter_str = None

        db_vms_list = self.table_service.query_entities(self.vmss_table_name,
                                                        filter=filter_str)
        if spoke:
            db_hostname_list = [{'hostname': x.RowKey, 'serial': x.serial_no, 'name': x.name} \
                            for x in db_vms_list if x.PartitionKey == spoke]
            return db_hostname_list
        else:
            return db_vms_list

    def delete_vm_from_cosmos_db(self, spoke, vm_name):

        self.table_service.delete_entity(self.vmss_table_name, spoke, vm_name)
Ejemplo n.º 9
0
class Azure:
    # Tags used
    RG_RULE_PROGRAMMED_TAG = 'PANORAMA_PROGRAMMED'
    HUB_MANAGED_TAG = 'PanoramaManaged'

    # Resource types
    VMSS_TYPE = 'Microsoft.Compute/virtualMachineScaleSets'
    ILB_TYPE = 'Microsoft.Network/loadBalancers'
    APPINSIGHTS_TYPE = 'Microsoft.Insights/components'

    # Hardcoded names used for internal Azure resources
    ILB_NAME = 'myPrivateLB'
    ALPHANUM = r'[^A-Za-z0-9]+'

    def __init__(self,
                 cred,
                 subs_id,
                 my_storage_rg,
                 vmss_rg_name,
                 vmss_name,
                 storage,
                 pan_handle,
                 logger=None):
        self.credentials = cred
        self.subscription_id = subs_id
        self.logger = logger
        self.hub_name = vmss_rg_name
        self.storage_name = storage
        self.panorama_handler = pan_handle
        self.vmss_table_name = re.sub(self.ALPHANUM, '',
                                      vmss_name + 'vmsstable')
        self.vmss_rg_name = vmss_rg_name

        try:
            self.resource_client = ResourceManagementClient(cred, subs_id)
            self.compute_client = ComputeManagementClient(cred, subs_id)
            self.network_client = NetworkManagementClient(cred, subs_id)
            self.store_client = StorageManagementClient(cred, subs_id)
            store_keys = self.store_client.storage_accounts.list_keys(
                my_storage_rg, storage).keys[0].value
            self.table_service = TableService(account_name=storage,
                                              account_key=store_keys)
        except Exception as e:
            self.logger.error("Getting Azure Infra handlers failed %s" %
                              str(e))
            raise e

        rg_list = self.resource_client.resource_groups.list()
        self.managed_spokes = []
        self.managed_spokes.append(vmss_rg_name)
        self.new_spokes = []

    def filter_vmss(self, spoke, vmss_name):
        vmss = self.compute_client.virtual_machine_scale_sets.get(
            spoke, vmss_name)
        if vmss.tags and vmss.tags.get(self.HUB_MANAGED_TAG,
                                       None) == self.hub_name:
            return True
        return False

    def create_worker_ready_tag(self, worker_name):
        self.compute_client.virtual_machines.create_or_update(
            self.vmss_rg, worker_name, {
                'location':
                self.resource_client.resource_groups.get(
                    self.vmss_rg).location,
                'tags': {
                    'WORKER_READY': 'Yes'
                }
            })

    def create_new_cosmos_table(self, table_name):
        # Create the Cosmos DB if it does not exist already
        if not self.table_service.exists(table_name):
            try:
                ok = self.table_service.create_table(table_name)
                if not ok:
                    self.logger.error('Creating VMSS table failed')
                    return False
                self.logger.info('VMSS Table %s created succesfully' %
                                 table_name)
            except Exception as e:
                self.logger.error('Creating VMSS table failed ' + str(e))
                return False
        return True

    def clear_cosmos_table(self, table_name):
        self.table_service.delete_table(table_name)

    def get_vmss_by_name(self, spoke, vmss_name):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and x.name == vmss_name
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
        return None

    def get_vmss_in_spoke(self, spoke):

        vmss_list = [
            x.name for x in
            self.resource_client.resources.list_by_resource_group(spoke)
            if x.type == self.VMSS_TYPE and self.filter_vmss(spoke, x.name)
        ]

        if vmss_list:
            return vmss_list[0]
        else:
            self.logger.error("No VMSS found in Resource Group %s" % spoke)
            return None

    def get_vms_in_vmss(self, spoke, vmss_name):

        return self.compute_client.virtual_machine_scale_set_vms.list(
            spoke, vmss_name)

    def get_vm_in_cosmos_db(self, spoke, vm_hostname):

        try:
            db_vm_info = self.table_service.get_entity(self.vmss_table_name,
                                                       spoke, vm_hostname)
        except AzureMissingResourceHttpError:
            self.logger.info("New VM %s found in spoke %s" %
                             (vm_hostname, spoke))
            return None
        except Exception as e:
            self.logger.error("Querying for %s failed" % vm_hostname)
            return None
        else:
            # IF possible update status TODO
            self.logger.debug("VM %s is available in VMSS, Pan and DB" %
                              (vm_hostname))

        return db_vm_info

    # 'name'       : global_device['@name'],
    # 'hostname'   : global_device['hostname'],
    # 'serial'     : global_device['serial'],
    # 'ip-address' : global_device['ip-address'],
    # 'connected'  : global_device['connected'],
    # 'deactivated': global_device['deactivated']
    def create_db_entity(self, spoke, vm_details):

        vm = Entity()

        # PartitionKey is nothing but the spoke name
        vm.PartitionKey = spoke
        # RowKey is nothing but the VM name itself.
        vm.RowKey = vm_details['hostname']
        vm.name = vm_details['name']
        vm.serial_no = vm_details['serial']
        vm.ip_addr = vm_details['ip-address']
        vm.connected = vm_details['connected']
        vm.deactivated = vm_details['deactivated']
        vm.subs_id = self.subscription_id
        vm.delicensed_on = 'not applicable'
        vm.is_delicensed = 'No'
        try:
            self.table_service.insert_entity(self.vmss_table_name, vm)
            self.logger.info("VM %s with serial no. %s in db" %
                             (vm_details['hostname'], vm_details['serial']))
        except Exception as e:
            self.logger.info("Insert entry to db for %s failed with error %s" %
                             (vm_details['hostname'], e))
            return False
        return True

    def get_fw_vms_in_cosmos_db(self, spoke=None):

        if spoke:
            filter_str = "PartitionKey eq '%s'" % spoke
        else:
            filter_str = None

        db_vms_list = self.table_service.query_entities(self.vmss_table_name,
                                                        filter=filter_str)
        if spoke:
            db_hostname_list = [{'hostname': x.RowKey, 'serial': x.serial_no, 'name': x.name} \
                            for x in db_vms_list if x.PartitionKey == spoke]
            return db_hostname_list
        else:
            return db_vms_list

    def delete_vm_from_cosmos_db(self, spoke, vm_name):

        self.table_service.delete_entity(self.vmss_table_name, spoke, vm_name)
Ejemplo n.º 10
0
import sys
from azure.cosmosdb.table import TableService

if __name__ == '__main__':
    storage_account = sys.argv[1]
    storage_key = sys.argv[2]
    entity_pk = sys.argv[3]
    entity_rk = sys.argv[4]
    state = sys.argv[5]
    error = None
    if len(sys.argv) == 7:
        error = sys.argv[6]

    table_service = TableService(account_name=storage_account,
                                 account_key=storage_key)

    entity = table_service.get_entity('SearchEntity', entity_pk, entity_rk)

    try:
        entity._State = state
        if error:
            entity.Errors = error
        table_service.update_entity('SearchEntity',
                                    entity,
                                    if_match=entity.etag)
    except Exception as e:
        print('Error updating entityt {}'.format(e))