def __init__(self, default_resource_group_name: str, default_region: str = 'eastus', profile_name: Optional[str] = None) -> None: """Initialize the AZAccount class. Args: default_resource_group_name (str): The default resource group in which to create new resources in. If the resource group does not exists, it will be automatically created. default_region (str): Optional. The default region to create new resources in. Default is eastus. profile_name (str): Optional. The name of the profile to use for Azure operations. For more information on profiles, see GetCredentials() in libcloudforensics.providers.azure.internal.common.py. Default does not use profiles and will authenticate to Azure using environment variables. """ self.subscription_id, self.credentials = common.GetCredentials(profile_name) self.default_region = default_region self.compute_client = azure_compute.ComputeManagementClient( self.credentials, self.subscription_id) self.storage_client = storage.StorageManagementClient( self.credentials, self.subscription_id) self.resource_client = resource.ResourceManagementClient( self.credentials, self.subscription_id) self.network_client = network.NetworkManagementClient( self.credentials, self.subscription_id) self.default_resource_group_name = self._GetOrCreateResourceGroup( default_resource_group_name)
def discover_resources(**kwargs): containers = [] resource_type = ResourceType.objects.get(name__iexact='storage') blue_print = ServiceBlueprint.objects.get( name__iexact='azure storage container') for handler in AzureARMHandler.objects.all(): set_progress('Connecting to Azure for handler: {}'.format(handler)) # getting all storage accounts credentials = ServicePrincipalCredentials( client_id=handler.client_id, secret=handler.secret, tenant=handler.tenant_id, ) client = storage.StorageManagementClient(credentials, handler.serviceaccount) accounts = client.storage_accounts.list() for resource_group in ARMResourceGroup.objects.all(): for account in accounts: for container in client.blob_containers.list( resource_group_name=resource_group.name, account_name=account.name).value: try: storage_account = container.as_dict().get('id').split( '/')[-5] resource, _ = Resource.objects.get_or_create( name=container.name, defaults={ 'blueprint': blue_print, 'resource_type': resource_type, 'group': Group.objects.get( name__icontains='unassigned'), 'parent_resource': Resource.objects.filter( name__icontains=account.name).first(), }) resource_g_name = resource_group.name # Get and save account key res = client.storage_accounts.list_keys( resource_g_name, storage_account) keys = res.keys resource.lifecycle = "ACTIVE" resource.azure_rh_id = handler.id resource.azure_container_name = container.name resource.azure_account_key = keys[0].value resource.resource_group_name = resource_g_name resource.azure_account_name = account.name resource.save() except Exception as e: set_progress('Azure ClientError: {}'.format(e)) continue return containers
def discover_resources(**kwargs): discovered_az_block_storages = [] discovered_az_block_storage_names = [] for handler in AzureARMHandler.objects.all(): set_progress('Connecting to Azure Block Storage \ for handler: {}'.format(handler)) credentials = ServicePrincipalCredentials(client_id=handler.client_id, secret=handler.secret, tenant=handler.tenant_id) azure_blob_client = storage.StorageManagementClient( credentials, handler.serviceaccount) azure_resources_client = resources.ResourceManagementClient( credentials, handler.serviceaccount) set_progress("Connection to Azure established") for st in azure_blob_client.storage_accounts.list(): if st.kind == 'BlobStorage': if st.name not in discovered_az_block_storage_names: discovered_az_block_storage_names.append(st.name) discovered_az_block_storages.append({ 'name': st.name, 'azure_storage_blob_name': st.name, 'resource_group_name': st.id.split('/')[4], 'azure_location': st.primary_location, 'azure_rh_id': handler.id }) return discovered_az_block_storages
def _get_client(handler): """ Get the clients using newer methods from the CloudBolt main repo if this CB is running a version greater than 9.2.2. These internal methods implicitly take care of much of the other features in CloudBolt such as proxy and ssl verification. Otherwise, manually instantiate clients without support for those other CloudBolt settings. """ import settings from common.methods import is_version_newer cb_version = settings.VERSION_INFO["VERSION"] if is_version_newer(cb_version, "9.2.2"): wrapper = handler.get_api_wrapper() storage_client = wrapper.storage_client else: # TODO: Remove once versions <= 9.2.2 are no longer supported. credentials = ServicePrincipalCredentials(client_id=handler.client_id, secret=handler.secret, tenant=handler.tenant_id) storage_client = storage.StorageManagementClient( credentials, handler.serviceaccount) set_progress("Connection to Azure established") return storage_client
def __init__(self, az_account: 'account.AZAccount') -> None: """Initialize the Azure storage class. Args: az_account (AZAccount): An Azure account object. """ self.az_account = az_account self.storage_client = storage.StorageManagementClient( self.az_account.credentials, self.az_account.subscription_id)
def run(job, **kwargs): resource = kwargs.get('resource') create_custom_fields_as_needed() env_id = '{{ env_id }}' env = Environment.objects.get(id=env_id) rh = env.resource_handler.cast() location = env.node_location resource_group = '{{ resource_group }}' account_name = '{{ account_name }}' sku_name = '{{ sku_name }}' access_tier = '{{ access_tier }}' set_progress("Connecting To Azure Storage Service...") credentials = ServicePrincipalCredentials( client_id=rh.client_id, secret=rh.secret, tenant=rh.tenant_id, ) client = storage.StorageManagementClient(credentials, rh.serviceaccount) set_progress("Connection to Azure storage established") sku = models.Sku(name=sku_name) parameters = models.StorageAccountCreateParameters(sku=sku, kind='BlobStorage', location=location, access_tier=access_tier) set_progress("Creating Blob storage %s..." % account_name) command = client.storage_accounts.create( resource_group, account_name, parameters, ) while not command.done(): set_progress("Waiting for Blob storage to be created...") command.wait(20) # Verify that the blob storage was created: set_progress("Verifying the blob storage was created...") try: client.storage_accounts.get_properties(resource_group, account_name) except Exception: return "FAILURE", "Failed to create the blob storage", "" resource.name = account_name resource.azure_storage_blob_name = account_name resource.resource_group_name = resource_group resource.azure_location = location resource.azure_rh_id = rh.id resource.save() set_progress("Blob Storage %s has been created." % account_name)
def get_azure_storage_client(handler) -> StorageManagementClient: """Return an Azure storage client with the Resource Handler details.""" credentials = ServicePrincipalCredentials( client_id=handler.client_id, secret=handler.secret, tenant=handler.tenant_id, ) client = storage.StorageManagementClient(credentials, handler.serviceaccount) return client
def provision_check(self, provision_request): """Checks a ProvisionRequest object for validity with the Batch backend""" try: credentials = azcredentials.ServicePrincipalCredentials( client_id=provision_request.service_principal.client_id, secret=provision_request.service_principal.secret, tenant=provision_request.service_principal.tenant) resource_client = azresource_mgmt.ResourceManagementClient( credentials, provision_request.subscription_id) storage_client = azstorage_mgmt.StorageManagementClient( credentials, provision_request.subscription_id) batch_client = azbatch_mgmt.BatchManagementClient( credentials, provision_request.subscription_id) rg_check_result = resource_client.resource_groups.check_existence( provision_request.resource_group) if rg_check_result is True: with resource_client.resource_groups.get( provision_request.resource_group) as resource_group: if not resource_group.location == provision_request.location: raise AzCloudError( 'Resource group exists but in different provision_request.location than provided.' ) storage_check_result = storage_client.storage_accounts.check_name_availability( name=provision_request.storage_account_name) if not storage_check_result.name_available: if not storage_check_result.reason == 'AlreadyExists': raise tesmodels.CloudError(storage_check_result.message) else: storage_client.storage_accounts.get_properties( # <-- will throw exception if in different RG resource_group_name=provision_request.resource_group, account_name=provision_request.storage_account_name) batch_check_result = batch_client.location.check_name_availability( location_name=provision_request.location, name=provision_request.batch_account_name) if not batch_check_result.name_available: if not batch_check_result.reason.value == 'AlreadyExists': raise AzCloudError(batch_check_result.message) else: batch_client.batch_account.get( # <-- will throw exception if in different RG resource_group_name=provision_request.resource_group, account_name=provision_request.batch_account_name) except AzCloudError as err: # Return non-azure specific exception instead raise tesmodels.CloudError(err) return True
def generate_options_for_storage_account(server=None, **kwargs): discovered_az_stores = [] for handler in AzureARMHandler.objects.all(): set_progress('Connecting to Azure Storage \ for handler: {}'.format(handler)) credentials = ServicePrincipalCredentials(client_id=handler.client_id, secret=handler.secret, tenant=handler.tenant_id) azure_client = storage.StorageManagementClient(credentials, handler.serviceaccount) set_progress("Connection to Azure established") for st in azure_client.storage_accounts.list(): discovered_az_stores.append(st.name) return discovered_az_stores
def discover_resources(**kwargs): discovered_azure_queues = [] for handler in AzureARMHandler.objects.all(): set_progress("Connecting to Azure sql \ DB for handler: {}".format(handler)) credentials = ServicePrincipalCredentials(client_id=handler.client_id, secret=handler.secret, tenant=handler.tenant_id) azure_client = storage.StorageManagementClient(credentials, handler.serviceaccount) azure_resources_client = resources.ResourceManagementClient( credentials, handler.serviceaccount) for resource_group in azure_resources_client.resource_groups.list(): try: for st in ( azure_client.storage_accounts.list_by_resource_group( resource_group.name)._get_next().json()["value"]): try: res = azure_client.storage_accounts.list_keys( resource_group.name, st["name"]) keys = res.keys for queue in QueueService( account_name=st["name"], account_key=keys[1].value).list_queues(): discovered_azure_queues.append({ "name": queue.name, "azure_queue_name": "Azure queues - " + queue.name, "resource_group_name": resource_group.name, "azure_rh_id": handler.id, "azure_storage_account_name": st["name"], "azure_account_key": keys[0].value, "azure_account_key_fallback": keys[1].value, }) except: # noqa: E722 continue except CloudError as e: set_progress("Azure CloudError: {}".format(e)) continue return discovered_azure_queues
def _create_storage_account(self, credentials, subscription_id, resource_group, name, sku, location): """ Creates requested storage account on Azure. Returns storage id, a url endpoint and single key """ from azure.mgmt.storage.models import StorageAccountCreateParameters, Kind, Sku storage_client = azstorage_mgmt.StorageManagementClient( credentials, subscription_id) storage_async_operation = storage_client.storage_accounts.create( resource_group_name=resource_group, account_name=name, parameters=StorageAccountCreateParameters(sku=Sku(name=sku), kind=Kind.storage, location=location)) storage_account = storage_async_operation.result() storage_keys = storage_client.storage_accounts.list_keys( resource_group_name=resource_group, account_name=name) return (storage_account.id, storage_account.name, storage_keys.keys[0].value)
def discover_resources(**kwargs): discovered_azure_sql = [] for handler in AzureARMHandler.objects.all(): set_progress('Connecting to Azure storage \ files for handler: {}'.format(handler)) credentials = ServicePrincipalCredentials( client_id=handler.client_id, secret=handler.secret, tenant=handler.tenant_id ) azure_client = storage.StorageManagementClient( credentials, handler.serviceaccount) azure_resources_client = resources.ResourceManagementClient( credentials, handler.serviceaccount) for resource_group in azure_resources_client.resource_groups.list(): try: for st in azure_client.storage_accounts.list_by_resource_group(resource_group.name)._get_next().json()['value']: res = azure_client.storage_accounts.list_keys( resource_group.name, st['name']) keys = res.keys file_service = FileService( account_name=st['name'], account_key=keys[1].value) for share in file_service.list_shares(): for file in file_service.list_directories_and_files(share_name=share.name).items: if type(file) is File: discovered_azure_sql.append( { 'name': share.name + '-' + file.name, 'azure_storage_file_name': file.name, 'azure_file_identifier': share.name + '-' + file.name, 'azure_storage_file_share_name': share.name, 'resource_group_name': resource_group.name, 'azure_rh_id': handler.id, 'azure_storage_account_name': st['name'], 'azure_account_key': keys[0].value, 'azure_account_key_fallback': keys[1].value } ) except: continue return discovered_azure_sql
def generate_options_for_storage_accounts(control_value=None, **kwargs): storage_accounts = [] if control_value is None or control_value == "": return [] resource_group = ARMResourceGroup.objects.get(id=control_value) rh = resource_group.handler credentials = ServicePrincipalCredentials( client_id=rh.client_id, secret=rh.secret, tenant=rh.tenant_id, ) client = storage.StorageManagementClient(credentials, rh.serviceaccount) accounts = client.storage_accounts.list() for account in accounts: try: client.storage_accounts.list_keys(resource_group.name, account.name) storage_accounts.append(account.name) except Exception as e: continue return storage_accounts
def run(job, **kwargs): resource = kwargs.pop('resources').first() account_name = resource.attributes.get( field__name='azure_storage_blob_name').value resource_group = resource.attributes.get( field__name='resource_group_name').value rh_id = resource.attributes.get(field__name='azure_rh_id').value rh = AzureARMHandler.objects.get(id=rh_id) set_progress("Connecting to azure storage service...") credentials = ServicePrincipalCredentials( client_id=rh.client_id, secret=rh.secret, tenant=rh.tenant_id ) client = storage.StorageManagementClient(credentials, rh.serviceaccount) set_progress("Connection to azure Storage established") set_progress("Deleting blob storage account %s..." % account_name) client.storage_accounts.delete(resource_group, account_name) return "Success", "The blob storage account has been deleted", ""
def run(job, *args, **kwargs): create_custom_fields_as_needed() resource = kwargs.get('resource') env_id = '{{ env_id }}' resource_group = "{{ resource_group }}" storage_account = "{{ storage_accounts }}" permission = "{{ permissions }}" container_name = "{{container_name}}" env = Environment.objects.get(id=env_id) rh = env.resource_handler.cast() location = env.node_location set_progress('Location: %s' % location) credentials = ServicePrincipalCredentials( client_id=rh.client_id, secret=rh.secret, tenant=rh.tenant_id, ) client = storage.StorageManagementClient(credentials, rh.serviceaccount) resource_g = ARMResourceGroup.objects.get(id=resource_group) resource_g_name = resource_g.name resource.name = container_name resource.azure_account_name = storage_account resource.azure_container_name = container_name resource.resource_group_name = resource_g_name resource.azure_location = location resource.lifecycle = "ACTIVE" resource.azure_rh_id = rh.id # Get and save accountkey res = client.storage_accounts.list_keys(resource_g_name, storage_account) keys = res.keys resource.azure_account_key = keys[0].value resource.save() azure_account_key = resource.azure_account_key if azure_account_key: block_blob_service = BlockBlobService(account_name=storage_account, account_key=azure_account_key) set_progress(f"Creating container named '{container_name}' ...") result = block_blob_service.create_container(container_name.lower()) if result: # PublicAccess.OFF is the default so act if this is not what has been selected. if permission != PublicAccess.OFF: set_progress( f"Setting access permissions for '{container_name}'") set_progress(permission) block_blob_service.set_container_acl(container_name, public_access=permission) return "SUCCESS", f"'{container_name}' created successfuly", "" else: return "FAILURE", f"'{container_name}' already exists.", "" return "FAILURE", f"You don't have the account key for '{storage_account}'.", ""
def setUpClass(cls): if identity is None: raise unittest.SkipTest('missing azure-identity library') if resource is None or resource_models is None: raise unittest.SkipTest('missing azure-mgmt-resource library') if storage is None or storage_models is None: raise unittest.SkipTest('missing azure-mgmt-storage library') config = { key: os.getenv(key) for key in ( 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID', 'AZURE_CLIENT_ID', 'AZURE_CLIENT_SECRET', ) } for key, value in config.items(): if not value: raise unittest.SkipTest('missing environment variable %s' % key) credentials = identity.ClientSecretCredential( tenant_id=config['AZURE_TENANT_ID'], client_id=config['AZURE_CLIENT_ID'], client_secret=config['AZURE_CLIENT_SECRET'], ) resource_client = resource.ResourceManagementClient( credentials, config['AZURE_SUBSCRIPTION_ID'], ) storage_client = storage.StorageManagementClient( credentials, config['AZURE_SUBSCRIPTION_ID'], ) location = os.getenv('AZURE_LOCATION', DEFAULT_AZURE_LOCATION) name = RESOURCE_GROUP_NAME_PREFIX name += random_string(MAX_STORAGE_ACCOUNT_NAME_LENGTH - len(name)) timeout = float(os.getenv('AZURE_TIMEOUT_SECONDS', DEFAULT_TIMEOUT_SECONDS)) # We clean up any left over resource groups from previous runs on setUpClass. If tests on # CI get terminated non-gracefully, old resources will be left laying around and we want # to clean those up to ensure we dont hit any limits. # To avoid deleting groups from concurrent runs, we only delete resources older than a # couple (6) of hours print("Checking and cleaning up any old stray resource groups...") resource_groups = resource_client.resource_groups.list() now_ts = int(time.time()) delete_threshold_ts = now_ts - int(datetime.timedelta(hours=6).total_seconds()) for resource_group in resource_groups: resource_create_ts = int(resource_group.tags.get('create_ts', now_ts)) if resource_group.name.startswith(RESOURCE_GROUP_NAME_PREFIX) and \ resource_group.location.lower() == location.lower() and \ 'test' in resource_group.tags and resource_create_ts <= delete_threshold_ts: assert resource_group.name.startswith(RESOURCE_GROUP_NAME_PREFIX) print("Deleting old stray resource group: %s..." % (resource_group.name)) try: resource_client.resource_groups.begin_delete(resource_group.name) except Exception as e: print("Failed to delete resource group: %s" % (str(e)), file=sys.stderr) group = resource_client.resource_groups.create_or_update( resource_group_name=name, parameters=resource_models.ResourceGroup( location=location, tags={ 'test': cls.__name__, 'create_ts': str(now_ts), 'gh_run_id': os.getenv('GITHUB_RUN_ID', 'unknown'), 'gh_job_id': os.getenv('GITHUB_JOB_ID', 'unknown'), 'gh_sha': os.getenv('GITHUB_SHA', 'unknown'), 'gh_ref': os.getenv('GITHUB_REF', 'unknown'), }, ), timeout=timeout, ) cls.addClassCleanup(lambda: resource_client.resource_groups .begin_delete(group.name) .result(timeout)) account = storage_client.storage_accounts.begin_create( resource_group_name=group.name, account_name=name, parameters=storage_models.StorageAccountCreateParameters( sku=storage_models.Sku(name=storage_models.SkuName.STANDARD_LRS), access_tier=cls.access_tier, kind=cls.kind, location=location, ), ).result(timeout) keys = storage_client.storage_accounts.list_keys( resource_group_name=group.name, account_name=account.name, timeout=timeout, ) cls.account = account.name cls.secret = keys.keys[0].value
def setUpClass(cls): if identity is None: raise unittest.SkipTest('missing azure-identity library') if resource is None or resource_models is None: raise unittest.SkipTest('missing azure-mgmt-resource library') if storage is None or storage_models is None: raise unittest.SkipTest('missing azure-mgmt-storage library') config = { key: os.getenv(key) for key in ( 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID', 'AZURE_CLIENT_ID', 'AZURE_CLIENT_SECRET', ) } for key, value in config.items(): if not value: raise unittest.SkipTest('missing environment variable %s' % key) credentials = identity.ClientSecretCredential( tenant_id=config['AZURE_TENANT_ID'], client_id=config['AZURE_CLIENT_ID'], client_secret=config['AZURE_CLIENT_SECRET'], ) resource_client = resource.ResourceManagementClient( credentials, config['AZURE_SUBSCRIPTION_ID'], ) storage_client = storage.StorageManagementClient( credentials, config['AZURE_SUBSCRIPTION_ID'], ) location = os.getenv('AZURE_LOCATION', DEFAULT_AZURE_LOCATION) name = 'libcloud' name += random_string(MAX_STORAGE_ACCOUNT_NAME_LENGTH - len(name)) timeout = float( os.getenv('AZURE_TIMEOUT_SECONDS', DEFAULT_TIMEOUT_SECONDS)) group = resource_client.resource_groups.create_or_update( resource_group_name=name, parameters=resource_models.ResourceGroup( location=location, tags={ 'test': cls.__name__, 'run': os.getenv('GITHUB_RUN_ID', '-'), }, ), timeout=timeout, ) cls.addClassCleanup(lambda: resource_client.resource_groups. begin_delete(group.name).result(timeout)) account = storage_client.storage_accounts.begin_create( resource_group_name=group.name, account_name=name, parameters=storage_models.StorageAccountCreateParameters( sku=storage_models.Sku( name=storage_models.SkuName.STANDARD_LRS), access_tier=cls.access_tier, kind=cls.kind, location=location, ), ).result(timeout) keys = storage_client.storage_accounts.list_keys( resource_group_name=group.name, account_name=account.name, timeout=timeout, ) cls.account = account.name cls.secret = keys.keys[0].value