def run_example(): """Resource Group management example.""" # # Create all clients with an Application (service principal) token provider # subscription_id = os.environ.get( 'AZURE_SUBSCRIPTION_ID', '11111111-1111-1111-1111-111111111111') # your Azure Subscription Id credentials = ServicePrincipalCredentials( client_id=os.environ['AZURE_CLIENT_ID'], secret=os.environ['AZURE_CLIENT_SECRET'], tenant=os.environ['AZURE_TENANT_ID']) resource_client = ResourceManagementClient(credentials, subscription_id) compute_client = ComputeManagementClient(credentials, subscription_id) storage_client = StorageManagementClient(credentials, subscription_id) network_client = NetworkManagementClient(credentials, subscription_id) # Create Resource group print('Create Resource Group') resource_client.resource_groups.create_or_update(GROUP_NAME, {'location': LOCATION}) # Create PublicIP print('Create Public IP') public_ip_parameters = { 'location': LOCATION, 'public_ip_allocation_method': 'static', 'dns_settings': { 'domain_name_label': DOMAIN_LABEL_NAME }, 'idle_timeout_in_minutes': 4 } async_publicip_creation = network_client.public_ip_addresses.create_or_update( GROUP_NAME, PUBLIC_IP_NAME, public_ip_parameters) public_ip_info = async_publicip_creation.result() # Building a FrontEndIpPool print('Create FrontEndIpPool configuration') frontend_ip_configurations = [{ 'name': FIP_NAME, 'private_ip_allocation_method': 'Dynamic', 'public_ip_address': { 'id': public_ip_info.id } }] # Building a BackEnd address pool print('Create BackEndAddressPool configuration') backend_address_pools = [{'name': ADDRESS_POOL_NAME}] # Building a HealthProbe print('Create HealthProbe configuration') probes = [{ 'name': PROBE_NAME, 'protocol': 'Http', 'port': 80, 'interval_in_seconds': 15, 'number_of_probes': 4, 'request_path': 'healthprobe.aspx' }] # Building a LoadBalancer rule print('Create LoadBalancerRule configuration') load_balancing_rules = [{ 'name': LB_RULE_NAME, 'protocol': 'tcp', 'frontend_port': 80, 'backend_port': 80, 'idle_timeout_in_minutes': 4, 'enable_floating_ip': False, 'load_distribution': 'Default', 'frontend_ip_configuration': { 'id': construct_fip_id(subscription_id) }, 'backend_address_pool': { 'id': construct_bap_id(subscription_id) }, 'probe': { 'id': construct_probe_id(subscription_id) } }] # Building InboundNATRule1 print('Create InboundNATRule1 configuration') inbound_nat_rules = [{ 'name': NETRULE_NAME_1, 'protocol': 'tcp', 'frontend_port': FRONTEND_PORT_1, 'backend_port': BACKEND_PORT, 'enable_floating_ip': False, 'idle_timeout_in_minutes': 4, 'frontend_ip_configuration': { 'id': construct_fip_id(subscription_id) } }] # Building InboundNATRule2 print('Create InboundNATRule2 configuration') inbound_nat_rules.append({ 'name': NETRULE_NAME_2, 'protocol': 'tcp', 'frontend_port': FRONTEND_PORT_2, 'backend_port': BACKEND_PORT, 'enable_floating_ip': False, 'idle_timeout_in_minutes': 4, 'frontend_ip_configuration': { 'id': construct_fip_id(subscription_id) } }) # Creating Load Balancer print('Creating Load Balancer') lb_async_creation = network_client.load_balancers.create_or_update( GROUP_NAME, LB_NAME, { 'location': LOCATION, 'frontend_ip_configurations': frontend_ip_configurations, 'backend_address_pools': backend_address_pools, 'probes': probes, 'load_balancing_rules': load_balancing_rules, 'inbound_nat_rules': inbound_nat_rules }) lb_info = lb_async_creation.result() ############################################################## # From here, we create the VM and link the LB inside the NIC # ############################################################## # Create VNet print('Create Vnet') async_vnet_creation = network_client.virtual_networks.create_or_update( GROUP_NAME, VNET_NAME, { 'location': LOCATION, 'address_space': { 'address_prefixes': ['10.0.0.0/16'] } }) async_vnet_creation.wait() # Create Subnet async_subnet_creation = network_client.subnets.create_or_update( GROUP_NAME, VNET_NAME, SUBNET_NAME, {'address_prefix': '10.0.0.0/24'}) subnet_info = async_subnet_creation.result() # Creating NIC print('Creating NetworkInterface 1') back_end_address_pool_id = lb_info.backend_address_pools[0].id inbound_nat_rule_1_id = lb_info.inbound_nat_rules[0].id async_nic1_creation = network_client.network_interfaces.create_or_update( GROUP_NAME, VMS_INFO[1]['nic_name'], create_nic_parameters(subnet_info.id, back_end_address_pool_id, inbound_nat_rule_1_id)) inbound_nat_rule_2_id = lb_info.inbound_nat_rules[1].id print('Creating NetworkInterface 2') async_nic2_creation = network_client.network_interfaces.create_or_update( GROUP_NAME, VMS_INFO[2]['nic_name'], create_nic_parameters(subnet_info.id, back_end_address_pool_id, inbound_nat_rule_2_id)) nic1_info = async_nic1_creation.result() nic2_info = async_nic2_creation.result() # Create availability set print('Create availability set') availability_set_info = compute_client.availability_sets.create_or_update( GROUP_NAME, AVAILABILITY_SET_NAME, {'location': LOCATION}) # Create a storage account print('Create a storage account') storage_async_operation = storage_client.storage_accounts.create( GROUP_NAME, STORAGE_ACCOUNT_NAME, { 'sku': { 'name': 'standard_lrs' }, 'kind': 'storage', 'location': LOCATION }) storage_async_operation.wait() # Create VMs print('Creating Virtual Machine 1') vm_parameters1 = create_vm_parameters(nic1_info.id, availability_set_info.id, VMS_INFO[1]) async_vm1_creation = compute_client.virtual_machines.create_or_update( GROUP_NAME, VMS_INFO[1]['name'], vm_parameters1) async_vm1_creation.wait() print('Creating Virtual Machine 2') vm_parameters2 = create_vm_parameters(nic2_info.id, availability_set_info.id, VMS_INFO[2]) async_vm2_creation = compute_client.virtual_machines.create_or_update( GROUP_NAME, VMS_INFO[2]['name'], vm_parameters2) async_vm2_creation.wait() provide_vm_login_info_to_user(1, public_ip_info, FRONTEND_PORT_1, VMS_INFO[1]) provide_vm_login_info_to_user(2, public_ip_info, FRONTEND_PORT_2, VMS_INFO[2]) input("Press enter to delete this Resource Group.") # Delete Resource group and everything in it print('Delete Resource Group') delete_async_operation = resource_client.resource_groups.delete(GROUP_NAME) delete_async_operation.wait() print("\nDeleted: {}".format(GROUP_NAME))
def deploy_template(): credentials, subscription_id = get_credentials() resource_client = ResourceManagementClient(credentials, subscription_id) compute_client = ComputeManagementClient(credentials, subscription_id) network_client = NetworkManagementClient(credentials, subscription_id) """Create resource group """ print(f'Check Resource Group {GROUP_NAME}') resource_client.resource_groups.create_or_update(GROUP_NAME, {'location': LOCATION}) """Create network security groups """ create_network_security_groups_parameters = [ [network_client, NSG_PARAMETER["nsg-VNet-DC-Name"]], [network_client, NSG_PARAMETER["nsg-VNet-SQL-Name"]], [network_client, NSG_PARAMETER["nsg-VNet-SP-Name"]] ] pool = ThreadPool(3) network_security_groups_info = pool.starmap(create_network_security_groups, create_network_security_groups_parameters) pool.close() pool.join() """Create network """ # Create VNet try: vnet_info = network_client.virtual_networks.get(GROUP_NAME, NETWORK_REFERENCE['vNetPrivateName']) print(f"\nFound Vnet {vnet_info.name}") except CloudError: print(f"\nCreate Vnet {NETWORK_REFERENCE['vNetPrivateName']}") vnet_creation = network_client.virtual_networks.create_or_update( GROUP_NAME, NETWORK_REFERENCE['vNetPrivateName'], { 'location': LOCATION, 'address_space': { 'address_prefixes': [NETWORK_REFERENCE['vNetPrivatePrefix']] } } ) vnet_info = vnet_creation.result() # Create subnet create_subnet_parameters = [ [ network_client, vnet_info.name, next(nsg_info for nsg_info in network_security_groups_info if nsg_info.name == NSG_PARAMETER["nsg-VNet-DC-Name"]), NETWORK_REFERENCE["dc"] ], [ network_client, vnet_info.name, next(nsg_info for nsg_info in network_security_groups_info if nsg_info.name == NSG_PARAMETER["nsg-VNet-SQL-Name"]), NETWORK_REFERENCE["sql"] ], [ network_client, vnet_info.name, next(nsg_info for nsg_info in network_security_groups_info if nsg_info.name == NSG_PARAMETER["nsg-VNet-SP-Name"]), NETWORK_REFERENCE["sp"] ] ] # Create only 1 pool of workers because creating subnet in parallel fails: https://github.com/terraform-providers/terraform-provider-azurerm/issues/3780 pool = ThreadPool(1) subnets_info = pool.starmap(create_subnet, create_subnet_parameters) pool.close() pool.join() # Create public IP addresses create_pip_parameters = [] for vm in VM_REFERENCE: vmJson = str(VM_REFERENCE[vm]).replace("\'", "\"") vmDetails = json.loads(vmJson) create_pip_parameters.append([network_client, vmDetails]) pool = ThreadPool(3) pips_info = pool.starmap(create_pip, create_pip_parameters) pool.close() pool.join() """Create virtual machines """ create_vm_parameters = [ [ compute_client, network_client, VM_REFERENCE["dc"], { 'location': LOCATION, 'ip_configurations': [{ 'name': 'ipconfig1', 'private_ip_allocation_method': 'Static', 'private_ip_address': '10.0.1.4', 'subnet': { 'id': next(subnet_info for subnet_info in subnets_info if subnet_info.name == NETWORK_REFERENCE["dc"]["vNetPrivateSubnetName"]).id }, 'public_ip_address': next(pip_info for pip_info in pips_info if pip_info.name == VM_REFERENCE["dc"]['vmPublicIPName']) }] }, True, VM_DSC_REFERENCE["dc"] ], [ compute_client, network_client, VM_REFERENCE["sql"], { 'location': LOCATION, 'ip_configurations': [{ 'name': 'ipconfig1', 'private_ip_allocation_method': 'Dynamic', 'subnet': { 'id': next(subnet_info for subnet_info in subnets_info if subnet_info.name == NETWORK_REFERENCE["sql"]["vNetPrivateSubnetName"]).id }, 'public_ip_address': next(pip_info for pip_info in pips_info if pip_info.name == VM_REFERENCE["sql"]['vmPublicIPName']) }] }, False, VM_DSC_REFERENCE["sql"] ], [ compute_client, network_client, VM_REFERENCE["sp"], { 'location': LOCATION, 'ip_configurations': [{ 'name': 'ipconfig1', 'private_ip_allocation_method': 'Dynamic', 'subnet': { 'id': next(subnet_info for subnet_info in subnets_info if subnet_info.name == NETWORK_REFERENCE["sp"]["vNetPrivateSubnetName"]).id }, 'public_ip_address': next(pip_info for pip_info in pips_info if pip_info.name == VM_REFERENCE["sp"]['vmPublicIPName']) }] }, False, VM_DSC_REFERENCE["sp"] ] ] pool = ThreadPool(3) pool.starmap(create_vm, create_vm_parameters) pool.close() pool.join() # Start DSC extension of SQL and SP and wait for them to finish create_vm_extension_parameters = [ [compute_client, VM_REFERENCE["sql"]["vmName"], VM_DSC_REFERENCE["sql"]], [compute_client, VM_REFERENCE["sp"]["vmName"], VM_DSC_REFERENCE["sp"]] ] pool = ThreadPool(2) # create_vm_extension returns multiple parameters, so vmExtensions_status is a list of tuple vmExtensions_status = list(pool.starmap(create_vm_extension, create_vm_extension_parameters)) pool.close() pool.join() # tuple[0] is LROPoller (if dsc extension was (re)created during this execution of the script) or None # tuple[1] is the name of the VM # Keep only the items where tuple[0] is LROPoller: vmExtensions_status = [vmExtension_status for vmExtension_status in vmExtensions_status if type(vmExtension_status[0]) is LROPoller] if len(vmExtensions_status) > 0: print(f'\nWaiting for {len(vmExtensions_status)} DSC configuration to complete...') pool = ThreadPool(len(vmExtensions_status)) pool.starmap(check_vm_extension_status, vmExtensions_status) pool.close() pool.join()
def check_subscription(tenant_id, tenant_name, sub_id, sub_name, creds): print("\n\t[*] Checking subscription {}:".format(sub_name), flush=True) storage_client = StorageManagementClient(creds, sub_id) # Obtain the management object for resources resource_client = ResourceManagementClient(creds, sub_id) # Retrieve the list of resource groups group_list = resource_client.resource_groups.list() resource_groups = [group.name for group in list(group_list)] print("\t\t[+] Found {} resource groups".format(len(resource_groups)), flush=True) group_to_names_dict = {group: dict() for group in resource_groups} accounts_counter = 0 for group in resource_groups: for item in storage_client.storage_accounts.list_by_resource_group( group): accounts_counter += 1 group_to_names_dict[group][item.name] = '' print("\t\t[+] Found {} storage accounts".format(accounts_counter), flush=True) for group in resource_groups: for account in group_to_names_dict[group].keys(): try: storage_keys = storage_client.storage_accounts.list_keys( group, account) storage_keys = {v.key_name: v.value for v in storage_keys.keys} group_to_names_dict[group][account] = storage_keys['key1'] except azure.core.exceptions.HttpResponseError: print( "\t\t[-] User do not have permissions to retrieve storage accounts keys in the given" " subscription", flush=True) print("\t\t Can not scan storage accounts", flush=True) return output_list = list() for group in resource_groups: for account in group_to_names_dict[group].keys(): key = group_to_names_dict[group][account] public_containers = check_storage_account(account, key) for cont in public_containers: access_level = cont.public_access container_client = ContainerClient( ENDPOINT_URL.format(account), cont.name, credential=key) files = [f.name for f in container_client.list_blobs()] ext_dict = count_files_extensions(files, EXTENSIONS) row = [ tenant_id, tenant_name, sub_id, sub_name, group, account, cont.name, access_level, CONTAINER_URL.format(account, cont.name), len(files) ] for ext in ext_dict.keys(): row.append(ext_dict[ext]) output_list.append(row) print("\t\t[+] Scanned all storage accounts successfully", flush=True) if len(output_list) > 0: print("\t\t[+] Found {} PUBLIC containers".format(len(output_list)), flush=True) else: print("\t\t[+] No PUBLIC containers found") header = [ "Tenant ID", "Tenant Name", "Subscription ID", "Subscription Name", "Resource Group", "Storage Account", "Container", "Public Access Level", "URL", "Total Files" ] for ext in EXTENSIONS: header.append(ext) header.append("others") write_csv('public-containers-{}.csv'.format(date.today()), header, output_list)
subscription_id = subscription_id[0] source = source[0] target = target[0] # This program creates this resource group. If it's an existing resource group, comment out the code that creates the resource group #rg_name = 'poc-westeurope-gp-data-rg' rg_name = 'poc-westeurope-gp-data-rg' # The data factory name. It must be globally unique. #df_name = 'poc-westeurope-gp-data-df-atradius' df_name = 'poc-westeurope-gp-data-df' #credentials = ServicePrincipalCredentials(client_id=client_id, secret=secret, tenant=tenant) credentials = get_azure_cli_credentials() resource_client = ResourceManagementClient(credentials[0], credentials[1]) adf_client = DataFactoryManagementClient(credentials[0], credentials[1]) rg_params = {'location':'westeurope'} df_params = {'location':'westeurope'} # Create database linked service ls_tgt_name = 'tgtazuresqldb' # Create an Azure Storage linked service ls_src_name = 'srcgrpblob' # Parameter File with list of tables with open('param_tables.json') as json_param_file: table_name = json.load(json_param_file)
def main(): SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None) GROUP_NAME = "testgroupx" PIPELINE = "pipelinexxyyzz" REGISTRIES = "registriesxxyyzz" # Create client # # For other authentication approaches, please see: https://pypi.org/project/azure-identity/ resource_client = ResourceManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) containerregistry_client = ContainerRegistryManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID, api_version="2019-12-01-preview") # Create resource group resource_client.resource_groups.create_or_update(GROUP_NAME, {"location": "eastus"}) # - init depended resources - registries = containerregistry_client.registries.begin_create( GROUP_NAME, REGISTRIES, { "location": "eastus", "tags": { "key": "value" }, "sku": { "name": "Premium" }, "admin_user_enabled": True }).result() # - end - # Create import pipeline pipeline = containerregistry_client.import_pipelines.begin_create( GROUP_NAME, REGISTRIES, PIPELINE, { "location": "eastus", "identity": { "type": "SystemAssigned" # "user_assigned_identities": {} }, "source": { "type": "AzureStorageBlobContainer", "uri": "https://accountname.blob.core.windows.net/containername", "key_vault_uri": "https://myvault.vault.azure.net/secrets/acrimportsas" }, "options": ["OverwriteTags", "DeleteSourceBlobOnSuccess", "ContinueOnErrors"] }).result() print("Create import pipeline:\n{}".format(pipeline)) # Create export pipeline pipeline = containerregistry_client.export_pipelines.begin_create( GROUP_NAME, REGISTRIES, PIPELINE, { "location": "eastus", "identity": { "type": "SystemAssigned" }, "target": { "type": "AzureStorageBlobContainer", "uri": "https://accountname.blob.core.windows.net/containername", "key_vault_uri": "https://myvault.vault.azure.net/secrets/acrexportsas" }, "options": ["OverwriteBlobs"] }).result() print("Create import pipeline:\n{}".format(pipeline)) # Get import pipeline pipeline = containerregistry_client.import_pipelines.get( GROUP_NAME, REGISTRIES, PIPELINE) print("Get import pipeline:\n{}".format(pipeline)) # Get export pipeline pipeline = containerregistry_client.export_pipelines.get( GROUP_NAME, REGISTRIES, PIPELINE) print("Get export pipeline:\n{}".format(pipeline)) # Delete import pipeline pipeline = containerregistry_client.import_pipelines.begin_delete( GROUP_NAME, REGISTRIES, PIPELINE).result() print("Delete import pipeline.\n") # Delete export pipeline pipeline = containerregistry_client.export_pipelines.begin_delete( GROUP_NAME, REGISTRIES, PIPELINE).result() print("Delete export pipeline.\n") # Delete Group resource_client.resource_groups.begin_delete(GROUP_NAME).result()
def main(): SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None) GROUP_NAME = "testgroupx" WEBHOOK = "webhookxxyyzz" AUTOMATION_ACCOUNT = "automationaccountxxyyzz" RUNBOOK = "Get-AzureVMTutorial" # Create client # # For other authentication approaches, please see: https://pypi.org/project/azure-identity/ resource_client = ResourceManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) automation_client = AutomationClient(credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) # Create resource group resource_client.resource_groups.create_or_update(GROUP_NAME, {"location": "eastus"}) # - init depended resources - # Create automation account automation_account = automation_client.automation_account.create_or_update( GROUP_NAME, AUTOMATION_ACCOUNT, { "sku": { "name": "Free" }, "name": AUTOMATION_ACCOUNT, "location": "East US 2" }) print("Create automation account:\n{}".format(automation_account)) # Create runbook runbook = automation_client.runbook.create_or_update( GROUP_NAME, AUTOMATION_ACCOUNT, RUNBOOK, { "log_verbose": False, "log_progress": True, "runbook_type": "PowerShellWorkflow", "publish_content_link": { "uri": "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/0.0.0.3/101-automation-runbook-getvms/Runbooks/Get-AzureVMTutorial.ps1", "content_hash": { "algorithm": "SHA256", "value": "4fab357cab33adbe9af72ae4b1203e601e30e44de271616e376c08218fd10d1c" }, }, "description": "Description of the Runbook", "log_activity_trace": "1", "name": RUNBOOK, "location": "East US 2", "tags": { "tag01": "value01", "tag02": "value02" } }) print("Create runbook:\n{}".format(runbook)) # - end - # Create webhook webhook = automation_client.webhook.create_or_update( GROUP_NAME, AUTOMATION_ACCOUNT, WEBHOOK, { "name": "TestWebhook", "is_enabled": True, "uri": "https://s1events.azure-automation.net/webhooks?token=7u3KfQvM1vUPWaDMFRv2%2fAA4Jqx8QwS8aBuyO6Xsdcw%3d", "expiry_time": "2021-03-29T22:18:13.7002872Z", "runbook": { "name": RUNBOOK } }) print("Create webhook:\n{}".format(webhook)) # Get webhook webhook = automation_client.webhook.get(GROUP_NAME, AUTOMATION_ACCOUNT, WEBHOOK) print("Get webhook:\n{}".format(webhook)) # Update webhook webhook = automation_client.webhook.update( GROUP_NAME, AUTOMATION_ACCOUNT, WEBHOOK, { "name": WEBHOOK, "is_enabled": False, "description": "updated webhook" }) print("Update webhook:\n{}".format(webhook)) # Delete webhook webhook = automation_client.webhook.delete(GROUP_NAME, AUTOMATION_ACCOUNT, WEBHOOK) print("Delete webhook.\n") # Delete Group resource_client.resource_groups.begin_delete(GROUP_NAME).result()
def run_example(): """Azure NetApp Files SDK management example.""" print_header("Azure NetAppFiles Python SDK Sample - Sample " "project that creates a SMB Volume with Azure NetApp " "Files SDK with Python") # Getting Active Directory Identity's password domain_join_user_password = getpass( ("Please type Active Directory's user password that will " "domain join ANF's SMB server and press [ENTER]:")) if len(domain_join_user_password) == 0: console_output('An error ocurred. Password cannot be empty string') raise Exception('Password cannot be empty string') # Creating the Azure NetApp Files Client with an Application # (service principal) token provider credentials, subscription_id = sample_utils.get_credentials() anf_client = AzureNetAppFilesManagementClient(credentials, subscription_id) # Checking if vnet/subnet information leads to a valid resource resources_client = ResourceManagementClient(credentials, subscription_id) SUBNET_ID = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'.format( subscription_id, VNET_RESOURCE_GROUP_NAME, VNET_NAME, SUBNET_NAME) result = resource_exists(resources_client, SUBNET_ID, VIRTUAL_NETWORKS_SUBNET_API_VERSION) if not result: console_output( "ERROR: Subnet not with id {} not found".format(SUBNET_ID)) raise Exception( "Subnet not found error. Subnet Id {}".format(SUBNET_ID)) # Creating an Azure NetApp Account ''' Building the ActiveDirectories object to be passed down to create_account() function. Notice that this is a list but only one active directory configuration is supported per subscription and region at the time this sample was first published ''' active_directories = [ ActiveDirectory(dns=DNS_LIST, domain=AD_FQDN, username=DOMAIN_JOIN_USERNAME, password=domain_join_user_password, smb_server_name=SMB_SERVERNAME_PREFIX) ] console_output('Creating Azure NetApp Files account ...') account = None try: account = create_account(anf_client, RESOURCE_GROUP_NAME, ANF_ACCOUNT_NAME, LOCATION, active_directories) console_output( '\tAccount successfully created, resource id: {}'.format( account.id)) except CloudError as ex: console_output('An error ocurred. Error details: {}'.format( ex.message)) raise # Creating a Capacity Pool console_output('Creating Capacity Pool ...') capacity_pool = None try: capacity_pool = create_capacitypool(anf_client, RESOURCE_GROUP_NAME, account.name, CAPACITYPOOL_NAME, CAPACITYPOOL_SERVICE_LEVEL, CAPACITYPOOL_SIZE, LOCATION) console_output( '\tCapacity Pool successfully created, resource id: {}'.format( capacity_pool.id)) except CloudError as ex: console_output('An error ocurred. Error details: {}'.format( ex.message)) raise # Creating a Volume ''' Note: With exception of Accounts, all resources with Name property returns a relative path up to the name and to use this property in other methods, like Get for example, the argument needs to be sanitized and just the actual name needs to be used (the hierarchy needs to be cleaned up in the name). Capacity Pool Name property example: "pmarques-anf01/pool01" "pool01" is the actual name that needs to be used instead. Below you will see a sample function that parses the name from its resource id: resource_uri_utils.get_anf_capacity_pool() ''' console_output('Creating a Volume ...') volume = None try: pool_name = resource_uri_utils.get_anf_capacity_pool(capacity_pool.id) volume = create_volume(anf_client, RESOURCE_GROUP_NAME, account.name, pool_name, VOLUME_NAME, VOLUME_USAGE_QUOTA, CAPACITYPOOL_SERVICE_LEVEL, SUBNET_ID, LOCATION) console_output('\tVolume successfully created, resource id: {}'.format( volume.id)) console_output('\t====> SMB Server FQDN: {}'.format( volume.mount_targets[0].additional_properties["smbServerFQDN"])) except CloudError as ex: console_output('An error ocurred. Error details: {}'.format( ex.message)) raise ''' Cleaning up volumes - for this to happen, please change the value of SHOULD_CLEANUP variable to true. Note: Volume deletion operations at the RP level are executed serially ''' if SHOULD_CLEANUP: ''' Cleaning up. This process needs to start the cleanup from the innermost resources down in the hierarchy chain in our case Snapshots->Volumes->Capacity Pools->Accounts ''' console_output('Cleaning up...') console_output("\tDeleting Volumes...") try: volume_ids = [volume.id] for volume_id in volume_ids: console_output("\t\tDeleting {}".format(volume_id)) anf_client.volumes.delete( RESOURCE_GROUP_NAME, account.name, resource_uri_utils.get_anf_capacitypool(capacity_pool.id), resource_uri_utils.get_anf_volume(volume_id)).wait() # ARM Workaround to wait the deletion complete/propagate sample_utils.wait_for_no_anf_resource(anf_client, volume_id) console_output('\t\tDeleted Volume: {}'.format(volume_id)) except CloudError as ex: console_output('An error ocurred. Error details: {}'.format( ex.message)) raise # Cleaning up Capacity Pool console_output("\tDeleting Capacity Pool {} ...".format( resource_uri_utils.get_anf_capacitypool(capacity_pool.id))) try: anf_client.pools.delete( RESOURCE_GROUP_NAME, account.name, resource_uri_utils.get_anf_capacitypool( capacity_pool.id)).wait() # ARM Workaround to wait the deletion complete/propagate sample_utils.wait_for_no_anf_resource(anf_client, capacity_pool.id) console_output('\t\tDeleted Capacity Pool: {}'.format( capacity_pool.id)) except CloudError as ex: console_output('An error ocurred. Error details: {}'.format( ex.message)) raise # Cleaning up Account console_output("\tDeleting Account {} ...".format(account.name)) try: anf_client.accounts.delete(RESOURCE_GROUP_NAME, account.name) console_output('\t\tDeleted Account: {}'.format(account.id)) except CloudError as ex: console_output('An error ocurred. Error details: {}'.format( ex.message)) raise
def resource_mgmt_client(self): if (self.__resource_mgmt_client is None): self.__resoure_mgmt_client = ResourceManagementClient( self.sp_credentials(), self.subscription()) return self.__resoure_mgmt_client
'template': template, 'parameters': params } resource_client.deployments.create_or_update(server_id, "deployment_" + server_id, properties) def get_all_tks_resource_groups(): paged = resource_client.resource_groups.list( filter="tagName eq 'service_type'") return [{ "server_id": g.name, "service_type": g.tags["service_type"] } for g in paged] def get_resource_service_type(server_id: str): print("get service_type for " + server_id) group = resource_client.resource_groups.get(server_id) if group is not None and 'service_type' in group.tags: tagValue = group.tags['service_type'] print("value is " + tagValue) return ServiceType[tagValue] print("group is None or no service_type tag") resource_client = ResourceManagementClient(AuthInfo.credentials, AuthInfo.subscription_id)
def main(): SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None) GROUP_NAME = "testgroupx" ACTIVITY_LOG_ALERT_NAME = "activitylogalertx" # Create client # For other authentication approaches, please see: https://pypi.org/project/azure-identity/ resource_client = ResourceManagementClient( credential=DefaultAzureCredentials(), subscription_id=SUBSCRIPTION_ID) monitor_client = MonitorClient(credential=DefaultAzureCredentials(), subscription_id=SUBSCRIPTION_ID) # Create resource group resource_client.resource_groups.create_or_update(GROUP_NAME, {"location": "eastus"}) # Create activity log alert log_alert = monitor_client.activity_log_alerts.create_or_update( GROUP_NAME, ACTIVITY_LOG_ALERT_NAME, { "location": "Global", "scopes": ["subscriptions/" + SUBSCRIPTION_ID], "enabled": True, "condition": { "all_of": [{ "field": "category", "equals": "Administrative" }, { "field": "level", "equals": "Error" }] }, "actions": { "action_groups": [] }, "description": "Sample activity log alert description" }) print("Create activity log alert:\n{}".format(log_alert)) # Get activity log alert log_alert = monitor_client.activity_log_alerts.get( GROUP_NAME, ACTIVITY_LOG_ALERT_NAME) print("Get activity log alert:\n{}".format(log_alert)) # Patch acitivity log alert log_alert = monitor_client.activity_log_alerts.update( GROUP_NAME, ACTIVITY_LOG_ALERT_NAME, { "tags": { "key1": "value1", "key2": "value2" }, "properties": { "enabled": False } }) print("Update activity log alert:\n{}".format(log_alert)) # Delete activate log alert monitor_client.activity_log_alerts.delete(GROUP_NAME, ACTIVITY_LOG_ALERT_NAME) print("Delete activity log alert.\n") # Delete Group resource_client.resource_groups.begin_delete(GROUP_NAME).result()
def run_example(config): """Resource Group management example.""" try: logging.basicConfig(level=logging.ERROR) mystack_cloud = get_cloud_from_metadata_endpoint( config['resourceManagerUrl']) subscription_id = config['subscriptionId'] # Azure stack location location = config['location'] credentials = ClientSecretCredential( client_id = config['clientId'], client_secret = config['clientSecret'], tenant_id = config['tenantId'], authority = mystack_cloud.endpoints.active_directory ) KnownProfiles.default.use(KnownProfiles.v2020_09_01_hybrid) scope = "openid profile offline_access" + " " + mystack_cloud.endpoints.active_directory_resource_id + "/.default" client = ResourceManagementClient( credentials , subscription_id, base_url=mystack_cloud.endpoints.resource_manager, credential_scopes=[scope]) # # Managing resource groups # resource_group_params = {'location': location} # List Resource Groups print('List Resource Groups') for item in client.resource_groups.list(): print_item(item) # Create Resource group print('Create Resource Group') print_item(client.resource_groups.create_or_update(GROUP_NAME, resource_group_params)) # Modify the Resource group print('Modify Resource Group') resource_group_params.update(tags={'hello': 'world'}) print_item(client.resource_groups.create_or_update(GROUP_NAME, resource_group_params)) # Create a Key Vault in the Resource Group print('Create a Key Vault via a Generic Resource Put') key_vault_params = { 'location': location, 'properties': { 'sku': {'family': 'A', 'name': 'standard'}, 'tenantId': config['tenantId'], 'accessPolicies': [], 'enabledForDeployment': True, 'enabledForTemplateDeployment': True, 'enabledForDiskEncryption': True } } client.resources.begin_create_or_update( resource_group_name=GROUP_NAME, resource_provider_namespace="Microsoft.KeyVault", parent_resource_path="", resource_type="vaults", resource_name='azureSampleVault' + datetime.utcnow().strftime("-%H%M%S"), parameters = key_vault_params, api_version="2016-10-01" ).result() # List Resources within the group print('List all of the resources within the group') for item in client.resources.list_by_resource_group(GROUP_NAME): print_item(item) # Export the Resource group template print('Export Resource Group Template') BODY = { 'resources': ['*'] } rgTemplate = client.resource_groups.begin_export_template(GROUP_NAME, BODY).result() print(rgTemplate.template) print('\n\n') finally: # Delete Resource group and everything in it print('Delete Resource Group') client.resource_groups.begin_delete(GROUP_NAME).result() print("\nDeleted: {}".format(GROUP_NAME))
def main(): SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None) PASSWORD = os.environ.get("PASSWORD", None) GROUP_NAME = "testgroupx" JOB = "jobxxyyzz" SERVER = "serverxxyz" DATABASE = "databasexxyz" JOB_AGENT = "jobagentxx" CREDENTIAL = "credentialxx" JOB_STEP = "jobstepxx" TARGET_GROUP = "targetgroupxx" JOB_EXECUTION_ID = "622ffff7-c4be-4c62-8098-3867c5db6427" # Create client # # For other authentication approaches, please see: https://pypi.org/project/azure-identity/ resource_client = ResourceManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) sql_client = SqlManagementClient(credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) # Create resource group resource_client.resource_groups.create_or_update(GROUP_NAME, {"location": "eastus"}) # - init depended resources - # Create Server server = sql_client.servers.begin_create_or_update( GROUP_NAME, SERVER, { "location": "eastus", "administrator_login": "******", "administrator_login_password": PASSWORD }).result() print("Create server:\n{}".format(server)) # Create database database = sql_client.databases.begin_create_or_update( GROUP_NAME, SERVER, DATABASE, { "location": "eastus", "read_scale": "Disabled" }).result() print("Create database:\n{}".format(database)) # - end - # Create job agent agent = sql_client.job_agents.begin_create_or_update( GROUP_NAME, SERVER, JOB_AGENT, { "location": "eastus", "database_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + GROUP_NAME + "/providers/Microsoft.Sql/servers/" + SERVER + "/databases/" + DATABASE }).result() print("Create job agent:\n{}".format(agent)) # Create job credential credential = sql_client.job_credentials.create_or_update( GROUP_NAME, SERVER, JOB_AGENT, CREDENTIAL, { "username": "******", "password": "******" }) print("Create job credential:\n{}".format(credential)) # Create job target group group = sql_client.job_target_groups.create_or_update( GROUP_NAME, SERVER, JOB_AGENT, TARGET_GROUP, {"members": []}) print("Create job target group:\n{}".format(group)) # Create job job = sql_client.jobs.create_or_update( GROUP_NAME, SERVER, JOB_AGENT, JOB, { "description": "my favourite job", "schedule": { "start_time": "2020-10-24T18:30:01Z", "end_time": "2020-10-24T23:59:59Z", "type": "Recurring", "interval": "PT5M", "enabled": True } }) print("Create job:\n{}".format(job)) # Create job step step = sql_client.job_steps.create_or_update( GROUP_NAME, SERVER, JOB_AGENT, JOB, JOB_STEP, { "target_group": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + GROUP_NAME + "/providers/Microsoft.Sql/servers/" + SERVER + "/jobAgents/" + JOB_AGENT + "/targetGroups/" + TARGET_GROUP, "credential": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + GROUP_NAME + "/providers/Microsoft.Sql/servers/" + SERVER + "/jobAgents/" + JOB_AGENT + "/credentials/" + CREDENTIAL, "action": { "value": "select 1" } }) print("Create job step:\n{}".format(step)) # Create job execution execution = sql_client.job_executions.begin_create_or_update( GROUP_NAME, SERVER, JOB_AGENT, JOB, JOB_EXECUTION_ID).result() print("Create execution:\n{}".format(execution)) # Get job job = sql_client.jobs.get(GROUP_NAME, SERVER, JOB_AGENT, JOB) print("Get job:\n{}".format(job)) # Delete job job = sql_client.jobs.delete(GROUP_NAME, SERVER, JOB_AGENT, JOB) print("Delete job.\n") # Delete Group resource_client.resource_groups.begin_delete(GROUP_NAME).result()
def run_example(): """Storage management example.""" # # Create the Resource Manager Client with an Application (service principal) token provider # # By Default, use AzureStack supported profile KnownProfiles.default.use(KnownProfiles.v2018_03_01_hybrid) logging.basicConfig(level=logging.ERROR) credentials, subscription_id, mystack_cloud = get_credentials() resource_client = ResourceManagementClient( credentials, subscription_id, base_url=mystack_cloud.endpoints.resource_manager) storage_client = StorageManagementClient( credentials, subscription_id, base_url=mystack_cloud.endpoints.resource_manager) # You MIGHT need to add Storage as a valid provider for these credentials # If so, this operation has to be done only once for each credentials resource_client.providers.register('Microsoft.Storage') # Create Resource group print('Create Resource Group') resource_group_params = {'location': LOCATION} print_item( resource_client.resource_groups.create_or_update( GROUP_NAME, resource_group_params)) # Check availability print('Check name availability') bad_account_name = 'invalid-or-used-name' availability = storage_client.storage_accounts.check_name_availability( bad_account_name) print('The account {} is available: {}'.format( bad_account_name, availability.name_available)) print('Reason: {}'.format(availability.reason)) print('Detailed message: {}'.format(availability.message)) print('\n\n') # Create a storage account print('Create a storage account') storage_async_operation = storage_client.storage_accounts.create( GROUP_NAME, STORAGE_ACCOUNT_NAME, { 'sku': { 'name': 'standard_lrs' }, 'kind': 'storage', 'location': LOCATION }) storage_account = storage_async_operation.result() print_item(storage_account) print('\n\n') # Get storage account properties print('Get storage account properties') storage_account = storage_client.storage_accounts.get_properties( GROUP_NAME, STORAGE_ACCOUNT_NAME) print_item(storage_account) print("\n\n") # List Storage accounts print('List storage accounts') for item in storage_client.storage_accounts.list(): print_item(item) print("\n\n") # List Storage accounts by resource group print('List storage accounts by resource group') for item in storage_client.storage_accounts.list_by_resource_group( GROUP_NAME): print_item(item) print("\n\n") # Get the account keys print('Get the account keys') storage_keys = storage_client.storage_accounts.list_keys( GROUP_NAME, STORAGE_ACCOUNT_NAME) storage_keys = {v.key_name: v.value for v in storage_keys.keys} print('\tKey 1: {}'.format(storage_keys['key1'])) print('\tKey 2: {}'.format(storage_keys['key2'])) print("\n\n") # Regenerate the account key 1 print('Regenerate the account key 1') storage_keys = storage_client.storage_accounts.regenerate_key( GROUP_NAME, STORAGE_ACCOUNT_NAME, 'key1') storage_keys = {v.key_name: v.value for v in storage_keys.keys} print('\tNew key 1: {}'.format(storage_keys['key1'])) print("\n\n") print_item(storage_account) print("\n\n") # Delete the storage account print('Delete the storage account') storage_client.storage_accounts.delete(GROUP_NAME, STORAGE_ACCOUNT_NAME) print("\n\n") # Delete Resource group and everything in it print('Delete Resource Group') delete_async_operation = resource_client.resource_groups.delete(GROUP_NAME) delete_async_operation.wait() print("Deleted: {}".format(GROUP_NAME)) print("\n\n") # List usage print('List usage') for usage in storage_client.usage.list(): print('\t{}'.format(usage.name.value))
def main(): SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None) GROUP_NAME = "testgroupx" NAMESPACE = "namespacexxyyzz" # Create client # # For other authentication approaches, please see: https://pypi.org/project/azure-identity/ resource_client = ResourceManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID ) notificationhubs_client = NotificationHubsManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID ) # Create resource group resource_client.resource_groups.create_or_update( GROUP_NAME, {"location": "eastus"} ) # Create namespace namespace = notificationhubs_client.namespaces.create_or_update( GROUP_NAME, NAMESPACE, { "location": "eastus" } ) print("Create namespace:\n{}".format(namespace)) # Get namespace namespace = notificationhubs_client.namespaces.get( GROUP_NAME, NAMESPACE ) while namespace.status == "Created": time.sleep(30) namespace = notificationhubs_client.namespaces.get( GROUP_NAME, NAMESPACE, ) print("Get namespace:\n{}".format(namespace)) # Update namespace namespace = notificationhubs_client.namespaces.patch( GROUP_NAME, NAMESPACE, { "enabled": True } ) print("Update namespace:\n{}".format(namespace)) # Delete namespace notificationhubs_client.namespaces.begin_delete( GROUP_NAME, NAMESPACE ).result() print("Delete namespace.\n") # Delete Group resource_client.resource_groups.begin_delete( GROUP_NAME ).result()
def resource_client(self): return ResourceManagementClient(self.credentials, self.subscription_id)
def run_example(): """Resource Group management example.""" # # Create the Resource Manager Client with an Application (service principal) token provider # subscription_id = os.environ.get( 'AZURE_SUBSCRIPTION_ID', '11111111-1111-1111-1111-111111111111') # your Azure Subscription Id credentials = ServicePrincipalCredentials( client_id=os.environ['AZURE_CLIENT_ID'], secret=os.environ['AZURE_CLIENT_SECRET'], tenant=os.environ['AZURE_TENANT_ID']) resource_client = ResourceManagementClient(credentials, subscription_id) event_grid_client = EventGridManagementClient(credentials, subscription_id) # Create Resource group print('\nCreating a Resource Group...') resource_group = resource_client.resource_groups.create_or_update( GROUP_NAME, {'location': LOCATION}) print_item(resource_group) # Create EventGrid topic print('\nCreating an EventGrid topic...') topic_result_async_poller = event_grid_client.topics.create_or_update( resource_group.name, TOPIC_NAME, Topic(location=resource_group.location, tags={ 'key1': 'value1', 'key2': 'value2' })) # Blocking call for the Topic to be created topic = topic_result_async_poller.result() # type: Topic print_item(topic) # Get the keys for the topic print('\nGetting the topic keys...') keys = event_grid_client.topics.list_shared_access_keys( resource_group.name, topic.name) # type: TopicSharedAccessKeys print('The key1 value of topic {} is: {}'.format(topic.name, keys.key1)) # Create an event subscription print('\nCreating an event subscription') event_subscription_name = 'EventSubscription1' destination = WebHookEventSubscriptionDestination( endpoint_url=ENDPOINT_URL) filter = EventSubscriptionFilter( # By default, "All" event types are included is_subject_case_sensitive=False, subject_begins_with='', subject_ends_with='') event_subscription_info = EventSubscription(destination=destination, filter=filter) event_subscription_async_poller = event_grid_client.event_subscriptions.create_or_update( topic.id, event_subscription_name, event_subscription_info, ) # Blocking call for the EventSubscription to be created event_subscription = event_subscription_async_poller.result( ) # type: EventSubscription print_item(event_subscription) input("Press enter to delete all created resources.") # Delete the EventSubscription print('\nDeleting the event subscription') delete_async_operation = event_grid_client.event_subscriptions.delete( topic.id, event_subscription_name) delete_async_operation.wait() print("\nDeleted: {}".format(event_subscription_name)) # Delete the topic print('\nDeleting the topic') delete_async_operation = event_grid_client.topics.delete( resource_group.name, topic.name) delete_async_operation.wait() print("\nDeleted: {}".format(topic.name)) # Delete Resource group and everything in it print('\nDelete Resource Group') delete_async_operation = resource_client.resource_groups.delete(GROUP_NAME) delete_async_operation.wait() print("\nDeleted: {}".format(GROUP_NAME))
def run(self): #---- """Resource Group management example.""" # # Create all clients with an Application (service principal) token provider # subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', 'ef80a466-7372-49e9-b247-57b95886881c' ) # your Azure Subscription Id credentials = ServicePrincipalCredentials( client_id='445a1911-819a-41e8-a093-adfd66ca5ccd', secret='rJ--cHsg@=fucrddh3svx1VUe91q2h1N', tenant='8ee0f3e4-b788-4efa-bd84-e6bfe7fe9943') resource_client = ResourceManagementClient(credentials, subscription_id) compute_client = ComputeManagementClient(credentials, subscription_id) storage_client = StorageManagementClient(credentials, subscription_id) network_client = NetworkManagementClient(credentials, subscription_id) ########### # Prepare # ########### # Create Resource group print('\nCreate Resource Group') resource_client.resource_groups.create_or_update( GROUP_NAME, {'location': LOCATION}) # Create a storage account print('\nCreate a storage account') storage_async_operation = storage_client.storage_accounts.create( GROUP_NAME, STORAGE_ACCOUNT_NAME, { 'sku': { 'name': 'standard_lrs' }, 'kind': 'storage', 'location': LOCATION }) storage_async_operation.wait() # Create a NIC nic = create_nic(network_client) ############# # VM Sample # ############# # Create Linux VM print('\nCreating Linux Virtual Machine') vm_parameters = create_vm_parameters(nic.id, VM_REFERENCE['linux']) async_vm_creation = compute_client.virtual_machines.create_or_update( GROUP_NAME, VM_NAME, vm_parameters) async_vm_creation.wait() # Tag the VM print('\nTag Virtual Machine') async_vm_update = compute_client.virtual_machines.create_or_update( GROUP_NAME, VM_NAME, { 'location': LOCATION, 'tags': { 'who-rocks': 'python', 'where': 'on azure' } }) async_vm_update.wait() # Attach data disk print('\nAttach Data Disk') async_vm_update = compute_client.virtual_machines.create_or_update( GROUP_NAME, VM_NAME, { 'location': LOCATION, 'storage_profile': { 'data_disks': [{ 'name': 'mydatadisk1', 'disk_size_gb': 1, 'lun': 0, 'vhd': { 'uri': "http://{}.blob.core.windows.net/vhds/mydatadisk1.vhd" .format(STORAGE_ACCOUNT_NAME) }, 'create_option': 'Empty' }] } }) async_vm_update.wait() # Get one the virtual machine by name print('\nGet Virtual Machine by Name') virtual_machine = compute_client.virtual_machines.get( GROUP_NAME, VM_NAME) # Detach data disk print('\nDetach Data Disk') data_disks = virtual_machine.storage_profile.data_disks data_disks[:] = [ disk for disk in data_disks if disk.name != 'mydatadisk1' ] async_vm_update = compute_client.virtual_machines.create_or_update( GROUP_NAME, VM_NAME, virtual_machine) virtual_machine = async_vm_update.result() # Deallocating the VM (resize prepare) print('\nDeallocating the VM (resize prepare)') async_vm_deallocate = compute_client.virtual_machines.deallocate( GROUP_NAME, VM_NAME) async_vm_deallocate.wait() # Update OS disk size by 10Gb print('\nUpdate OS disk size') # Server is not returning the OS Disk size (None), possible bug in server if not virtual_machine.storage_profile.os_disk.disk_size_gb: print( "\tServer is not returning the OS disk size, possible bug in the server?" ) print("\tAssuming that the OS disk size is 256 GB") virtual_machine.storage_profile.os_disk.disk_size_gb = 256 virtual_machine.storage_profile.os_disk.disk_size_gb += 10 async_vm_update = compute_client.virtual_machines.create_or_update( GROUP_NAME, VM_NAME, virtual_machine) virtual_machine = async_vm_update.result() # Start the VM print('\nStart VM') async_vm_start = compute_client.virtual_machines.start( GROUP_NAME, VM_NAME) async_vm_start.wait() # Restart the VM print('\nRestart VM') async_vm_restart = compute_client.virtual_machines.restart( GROUP_NAME, VM_NAME) async_vm_restart.wait() # Stop the VM print('\nStop VM') async_vm_stop = compute_client.virtual_machines.power_off( GROUP_NAME, VM_NAME) async_vm_stop.wait() # List VMs in subscription print('\nList VMs in subscription') for vm in compute_client.virtual_machines.list_all(): print("\tVM: {}".format(vm.name)) # List VM in resource group print('\nList VMs in resource group') for vm in compute_client.virtual_machines.list(GROUP_NAME): print("\tVM: {}".format(vm.name)) # Delete VM # print('\nDelete VM') # async_vm_delete = compute_client.virtual_machines.delete( # GROUP_NAME, VM_NAME) # async_vm_delete.wait() return
def main(): SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None) PASSWORD = os.environ.get("PASSWORD", None) GROUP_NAME = "testgroupx" VULNERABILITY_ASSESSMENT = "vulnerabilityassessmentxxyyzz" SECURITY_ALERT_POLICY_NAME = "securityalertpolicy" SERVER = "serverxxy" ACCOUNT = "accountxxy" CONTAINER = "containerxxy" # Create client # # For other authentication approaches, please see: https://pypi.org/project/azure-identity/ resource_client = ResourceManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) sql_client = SqlManagementClient(credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) # - init depended client - storage_client = StorageManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) # - end - # Create resource group resource_client.resource_groups.create_or_update(GROUP_NAME, {"location": "eastus"}) # - init depended resources - # Create storage account storageaccount = storage_client.storage_accounts.begin_create( GROUP_NAME, ACCOUNT, { "sku": { "name": "Standard_GRS" }, "kind": "StorageV2", "location": "eastus", "encryption": { "services": { "file": { "key_type": "Account", "enabled": True }, "blob": { "key_type": "Account", "enabled": True } }, "key_source": "Microsoft.Storage" }, "tags": { "key1": "value1", "key2": "value2" } }).result() print("Create storage account:\n{}".format(storageaccount)) # Create blob container container = storage_client.blob_containers.create(GROUP_NAME, ACCOUNT, CONTAINER, {}) print("Create blob container:\n{}".format(container)) # Regenerate storage account key key = storage_client.storage_accounts.regenerate_key( GROUP_NAME, ACCOUNT, {"key_name": "key2"}) ACCESS_KEY = key.keys[0].value print("Generate key:\n{}".format(key)) # Create Server server = sql_client.servers.begin_create_or_update( GROUP_NAME, SERVER, { "location": "eastus", "administrator_login": "******", "administrator_login_password": PASSWORD }).result() print("Create server:\n{}".format(server)) # Create server security alert policy policy = sql_client.server_security_alert_policies.begin_create_or_update( GROUP_NAME, SERVER, SECURITY_ALERT_POLICY_NAME, { "state": "Enabled", "email_account_admins": True, "disabled_alerts": [], "email_addresses": [] }).result() print("Create server security alert policy:\n{}".format(policy)) # - end - # Create vulnerability assessment vulnerability_assessment = sql_client.server_vulnerability_assessments.create_or_update( GROUP_NAME, SERVER, VULNERABILITY_ASSESSMENT, { "storage_container_path": "https://" + ACCOUNT + ".blob.core.windows.net/" + CONTAINER + "/", "storage_account_access_key": ACCESS_KEY }) print("Create vulnerability assessment:\n{}".format( vulnerability_assessment)) # Get vulnerability assessment vulnerability_assessment = sql_client.server_vulnerability_assessments.get( GROUP_NAME, SERVER, VULNERABILITY_ASSESSMENT) print("Get vulnerability assessment:\n{}".format(vulnerability_assessment)) # Delete vulnerability assessment vulnerability_assessment = sql_client.server_vulnerability_assessments.delete( GROUP_NAME, SERVER, VULNERABILITY_ASSESSMENT) print("Delete vulnerability assessment.\n") # Delete Group resource_client.resource_groups.begin_delete(GROUP_NAME).result()
def main(): SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None) GROUP_NAME = "testgroupx" ACTION_GROUP_NAME = "actiongroupx" # Create client # For other authentication approaches, please see: https://pypi.org/project/azure-identity/ resource_client = ResourceManagementClient( credential=DefaultAzureCredentials(), subscription_id=SUBSCRIPTION_ID) monitor_client = MonitorClient(credential=DefaultAzureCredentials(), subscription_id=SUBSCRIPTION_ID) # Create resource group resource_client.resource_groups.create_or_update(GROUP_NAME, {"location": "eastus"}) # Create action group action_group = monitor_client.action_groups.create_or_update( GROUP_NAME, ACTION_GROUP_NAME, { "location": "Global", "group_short_name": "sample", "enabled": True, "email_receivers": [{ "name": "John Doe's email", "email_address": "*****@*****.**", "use_common_alert_schema": False }], "sms_receivers": [{ "name": "John Doe's mobile", "country_code": "1", "phone_number": "1234567890" }] }) print("Create action group:\n{}".format(action_group)) # Get action group action_group = monitor_client.action_groups.get(GROUP_NAME, ACTION_GROUP_NAME) print("Get action group:\n{}".format(action_group)) # Update action group action_group = monitor_client.action_groups.update( GROUP_NAME, ACTION_GROUP_NAME, { "tags": { "key1": "value1", "key2": "value2" }, "properties": { "enabled": False } }) print("Update action group:\n{}".format(action_group)) # Delete action group monitor_client.action_groups.delete(GROUP_NAME, ACTION_GROUP_NAME) print("Delete action group:\n{}".format(action_group)) # Delete Group resource_client.resource_groups.begin_delete(GROUP_NAME).result()
def run_example(): """Web Site management example.""" # # Create the Resource Manager Client with an Application (service principal) token provider # subscription_id = os.environ.get( 'AZURE_SUBSCRIPTION_ID', '11111111-1111-1111-1111-111111111111') # your Azure Subscription Id credentials = ServicePrincipalCredentials( client_id=os.environ['AZURE_CLIENT_ID'], secret=os.environ['AZURE_CLIENT_SECRET'], tenant=os.environ['AZURE_TENANT_ID'] ) resource_client = ResourceManagementClient(credentials, subscription_id) web_client = WebSiteManagementClient(credentials, subscription_id) # Create Resource group print('Create Resource Group') resource_group_params = {'location':'westus'} print_item(resource_client.resource_groups.create_or_update(GROUP_NAME, resource_group_params)) # # Create a Server Farm for your WebApp # print('Create a Server Farm for your WebApp') server_farm_async_operation = web_client.server_farms.create_or_update_server_farm( GROUP_NAME, SERVER_FARM_NAME, ServerFarmWithRichSku( location=WEST_US, sku=SkuDescription( name='S1', capacity=1, tier='Standard' ) ) ) server_farm = server_farm_async_operation.result() print_item(server_farm) # # Create a Site to be hosted in the Server Farm # print('Create a Site to be hosted in the Server Farm') site_async_operation = web_client.sites.create_or_update_site( GROUP_NAME, SITE_NAME, Site( location=WEST_US, server_farm_id=server_farm.id ) ) site = site_async_operation.result() print_item(site) # # List Sites by Resource Group # print('List Sites by Resource Group') for site in web_client.sites.get_sites(GROUP_NAME).value: print_item(site) # # Get a single Site # print('Get a single Site') site = web_client.sites.get_site(GROUP_NAME, SITE_NAME) print_item(site) print("Your site and server farm have been created. " \ "You can now go and visit at http://{}/".format(site.default_host_name)) input("Press enter to delete the site and server farm.") # # Delete a Site # print('Deleting the Site') web_client.sites.delete_site(GROUP_NAME, SITE_NAME) # # Delete the Resource Group # print('Deleting the resource group') delete_async_operation = resource_client.resource_groups.delete(GROUP_NAME) delete_async_operation.wait()
def main(): SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None) GROUP_NAME = "testgroupx" NAMESPACE = "myNamespacexxyyzzzy" NAMESPACE_PRIMARY = "myNamespacexxyyzzzysecond" AUTHORIZATION_RULE_NAME = "myAuthorizationRule" DISASTER_RECOVERY_CONFIG = "mydisasterrecovercf" # Create client # # For other authentication approaches, please see: https://pypi.org/project/azure-identity/ resource_client = ResourceManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) servicebus_client = ServiceBusManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) # Create resource group resource_client.resource_groups.create_or_update(GROUP_NAME, {"location": "eastus"}) # - init depended resources - # Create namespace namespace = servicebus_client.namespaces.begin_create_or_update( GROUP_NAME, NAMESPACE, { "sku": { "name": "Premium", "tier": "Premium" }, "location": "eastus", "tags": { "tag1": "value1", "tag2": "value2" } }).result() # Create namespace primary second_namespace = servicebus_client.namespaces.begin_create_or_update( GROUP_NAME, NAMESPACE_PRIMARY, { "sku": { "name": "Premium", "tier": "Premium" }, "location": "westus", "tags": { "tag1": "value1", "tag2": "value2" } }).result() # Create namespace authorization rule rule = servicebus_client.namespaces.create_or_update_authorization_rule( GROUP_NAME, NAMESPACE, AUTHORIZATION_RULE_NAME, {"rights": ["Listen", "Send"]}) # - end - # Check name availability result = servicebus_client.disaster_recovery_configs.check_name_availability( GROUP_NAME, NAMESPACE, {"name": DISASTER_RECOVERY_CONFIG}) # Create disaster recovery config disaster_recovery_config = servicebus_client.disaster_recovery_configs.create_or_update( GROUP_NAME, NAMESPACE, DISASTER_RECOVERY_CONFIG, {"partner_namespace": second_namespace.id}) print("Create disaster recovery config:\n{}".format( disaster_recovery_config)) # Get disaster recovery config disaster_recovery_config = servicebus_client.disaster_recovery_configs.get( GROUP_NAME, NAMESPACE, DISASTER_RECOVERY_CONFIG) count = 0 while disaster_recovery_config.provisioning_state != "Succeeded" and count < 10: time.sleep(30) disaster_recovery_config = servicebus_client.disaster_recovery_configs.get( GROUP_NAME, NAMESPACE, DISASTER_RECOVERY_CONFIG) count += 1 print("Get disaster recovery config:\n{}".format(disaster_recovery_config)) # Fail over result = servicebus_client.disaster_recovery_configs.fail_over( GROUP_NAME, NAMESPACE_PRIMARY, DISASTER_RECOVERY_CONFIG) print("Fail over disaster recovery config.\n") # Delete disaster recovery config count = 0 while count < 10: try: disaster_recovery_config = servicebus_client.disaster_recovery_configs.delete( GROUP_NAME, NAMESPACE_PRIMARY, DISASTER_RECOVERY_CONFIG) except HttpResponseError: time.sleep(30) count += 1 else: break print("Delete disaster recovery config.\n") # Delete Group resource_client.resource_groups.begin_delete(GROUP_NAME).result()
def run(self,Group_Name='azure-group',Location='Southeast Asia',VM_Name='Linux-VM'): subscription_id = os.environ.get( 'AZURE_SUBSCRIPTION_ID', 'ef80a466-7372-49e9-b247-57b95886881c') credentials = ServicePrincipalCredentials( client_id='445a1911-819a-41e8-a093-adfd66ca5ccd', secret='rJ--cHsg@=fucrddh3svx1VUe91q2h1N', tenant='8ee0f3e4-b788-4efa-bd84-e6bfe7fe9943' ) resource_client = ResourceManagementClient(credentials, subscription_id) compute_client = ComputeManagementClient(credentials, subscription_id) storage_client = StorageManagementClient(credentials, subscription_id) network_client = NetworkManagementClient(credentials, subscription_id) ########### # Prepare # ########### # Create Resource group print('\nCreate Resource Group') resource_client.resource_groups.create_or_update( GROUP_NAME, {'location': LOCATION}) # Create a storage account print('\nCreate a storage account') storage_async_operation = storage_client.storage_accounts.create( GROUP_NAME, STORAGE_ACCOUNT_NAME, { 'sku': {'name': 'standard_lrs'}, 'kind': 'storage', 'location': LOCATION } ) storage_async_operation.wait() create_vnet(network_client) create_subnet(network_client) # Create a NIC nic = create_nic(network_client, NIC_NAME) # Create Linux VM print('\nCreating Linux Virtual Machine') vm_parameters = create_vm_parameters(nic.id, VM_REFERENCE['linux'], VM_NAME) async_vm_creation = compute_client.virtual_machines.create_or_update( GROUP_NAME, VM_NAME, vm_parameters) async_vm_creation.wait() # Start the VM print('\nStart Linux Virtual Machine') async_vm_start = compute_client.virtual_machines.start(GROUP_NAME, VM_NAME) async_vm_start.wait() # For windows # Create a NIC nic = create_nic(network_client, W_NIC_NAME) print('\nCreating Windows Virtual Machine') # Create Windows VM vm_parameters = create_vm_parameters(nic.id, VM_REFERENCE['windows'], W_VM_NAME) async_w_vm_creation = compute_client.virtual_machines.create_or_update( GROUP_NAME, W_VM_NAME, vm_parameters) async_w_vm_creation.wait() # Start the VM print('\nStart Windows Virtual Machine') async_w_vm_start = compute_client.virtual_machines.start(GROUP_NAME, W_VM_NAME) async_w_vm_start.wait() return
def main(): SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None) PASSWORD = os.environ.get("PASSWORD", None) GROUP_NAME = "testgroupx" VIRTUAL_NETWORK_RULE = "virtual_network_rulexxyyzz" SERVER = "serverxxy" NETWORK = "networkxxy" SUBNET = "subnetxxy" # Create client # # For other authentication approaches, please see: https://pypi.org/project/azure-identity/ resource_client = ResourceManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) sql_client = SqlManagementClient(credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) network_client = NetworkManagementClient( credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID) # Create resource group resource_client.resource_groups.create_or_update(GROUP_NAME, {"location": "eastus"}) # - init depended resources - # Create Server server = sql_client.servers.begin_create_or_update( GROUP_NAME, SERVER, { "location": "eastus", "administrator_login": "******", "administrator_login_password": PASSWORD }).result() print("Create server:\n{}".format(server)) # Create virtual network network = network_client.virtual_networks.begin_create_or_update( GROUP_NAME, NETWORK, { 'location': "eastus", 'address_space': { 'address_prefixes': ['10.0.0.0/16'] } }).result() print("Create virtual network:\n{}".format(network)) # Create subnet subnet = network_client.subnets.begin_create_or_update( GROUP_NAME, NETWORK, SUBNET, { 'address_prefix': '10.0.0.0/24', }).result() print("Create subnet:\n{}".format(subnet)) # - end - # Create virtual network rule virtual_network_rule = sql_client.virtual_network_rules.begin_create_or_update( GROUP_NAME, SERVER, VIRTUAL_NETWORK_RULE, { "ignore_missing_vnet_service_endpoint": True, "virtual_network_subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + GROUP_NAME + "/providers/Microsoft.Network/virtualNetworks/" + NETWORK + "/subnets/" + SUBNET }).result() print("Create virtual network rule:\n{}".format(virtual_network_rule)) # Get virtual network rule virtual_network_rule = sql_client.virtual_network_rules.get( GROUP_NAME, SERVER, VIRTUAL_NETWORK_RULE) print("Get virtual network rule:\n{}".format(virtual_network_rule)) # Delete virtual network rule virtual_network_rule = sql_client.virtual_network_rules.begin_delete( GROUP_NAME, SERVER, VIRTUAL_NETWORK_RULE).result() print("Delete virtual network rule.\n") # Delete Group resource_client.resource_groups.begin_delete(GROUP_NAME).result()
def main(req: func.HttpRequest) -> func.HttpResponse: polling_param = req.params.get("polling") polling = True if (polling_param != None and polling_param.lower() == "true") else False try: body_yaml = req.get_body().decode("utf-8") cg_definition = yaml.safe_load(body_yaml) except BaseException as ex: return func.HttpResponse( body=json.dumps({ "message": "Error while parsing yaml:\n\n" + str(ex) }), headers={ "Content-Type": "application/json" }, status_code=400 ) try: # tenant_id = os.environ["AZURE_TENANT_ID"] # client_id = os.environ["AZURE_CLIENT_ID"] # client_secret = os.environ["AZURE_CLIENT_SECRET"] subscription_id = os.environ["AZURE_SUBSCRIPTION_ID"] resource_group = os.environ["AZURE_RESOURCE_GROUP"] except KeyError as ex: return func.HttpResponse( body=json.dumps({ "message": "Error loading environment variable: " + str(ex) }), headers={ "Content-Type": "application/json" }, status_code=500 ) credentials = DefaultAzureCredential() aci_client = ContainerInstanceManagementClient( subscription_id=subscription_id, credential=credentials ) res_client = ResourceManagementClient( subscription_id=subscription_id, credential=credentials ) cg_name = cg_definition.get('name', None) location = cg_definition.get('location', None) or res_client.resource_groups.get(resource_group).location api_version = cg_definition.get('apiVersion', None) or aci_client._config.api_version cg_definition['location'] = location cg_definition['apiVersion'] = api_version if cg_name == None: return func.HttpResponse( body=json.dumps({ "message": "Name property missing in yaml definition" }), headers={ "Content-Type": "application/json" }, status_code=400 ) try: cg = aci_client.container_groups.get(resource_group, cg_name) if cg.instance_view.state not in ["Succeeded", "Stopped", "Failed"]: # Container Group already running return func.HttpResponse( body=json.dumps({ "message": f"Container Group {cg_name} is already running", "name": cg_name, "state": cg.instance_view.state }), headers={ "Content-Type": "application/json" }, status_code=400 ) except AzureError: # container group not found, thats fine pass try: # create or update container group res_client.resources.begin_create_or_update( resource_group_name=resource_group, resource_provider_namespace="Microsoft.ContainerInstance", parent_resource_path="", resource_type="containerGroups", resource_name=cg_name, api_version=api_version, parameters=cg_definition ) # start container group and wait until it's truly started aci_client.container_groups.begin_start(resource_group, cg_name) cg = aci_client.container_groups.get(resource_group, cg_name) while cg.instance_view.state not in ["Pending", "Running"]: aci_client.container_groups.begin_start(resource_group, cg_name) cg = aci_client.container_groups.get(resource_group, cg_name) time.sleep(5) except AzureError as ex: return func.HttpResponse( body=json.dumps({ "message": ex.message }), headers={ "Content-Type": "application/json" }, status_code=400 ) # redirect client to status function if polling == True: return func.HttpResponse( body=json.dumps({ "message": f"Container Group {cg_name} started", "name": cg_name, "state": cg.instance_view.state }), headers={ "Content-Type": "application/json", "Location": f"https://{os.environ['WEBSITE_HOSTNAME']}/api/status/{cg_name}" }, status_code=202 ) # just return success else: return func.HttpResponse( body=json.dumps({ "message": f"Container Group {cg_name} started", "name": cg_name, "state": cg.instance_view.state }), headers={ "Content-Type": "application/json" }, status_code=200 )
# Variable configuration # Resource Group Configuration resource_group = os.environ["resource_group"] location = os.environ["location"] storage_account_name = os.environ["storage_account_name"] container_registry_name = os.environ["container_registry_name"] key_vault_name = os.environ["key_vault_name"] app_insights_name = os.environ["app_insights_name"] workspace_name = os.environ["workspace_name"] # Create Azure Credential object credentials = ServicePrincipalCredentials(client_id=client_id, secret=secret, tenant=tenant) client = ResourceManagementClient(credentials, subscription_id) # Create Resource Group resource_group_param = {"location": location} client.resource_groups.create_or_update(resource_group, resource_group_param) # Create Azure Storage Account storage_account_param = StorageAccountCreateParameters( sku=Sku(name=SkuName.standard_ragrs), kind=Kind.storage, location=location) storage_client = StorageManagementClient(credentials, subscription_id) storage_async_operation = storage_client.storage_accounts.create( resource_group, storage_account_name, storage_account_param) storage_account = storage_async_operation.result() # Create Azure Keyvault key_vault_params = {
def make_resource_client(credentials): return ResourceManagementClient(credentials.get_service_principal(), credentials.subscription_id)
def rotate_autoscalers_cloud(configMap, username, **key_args): auth = configMap['Global']['azure_credentials'][key_args.get('account')] credentials = ClientSecretCredential(auth.get('tenant'), auth.get('client_id'), auth.get('secret')) wrapped_credential = AzureIdentityCredentialAdapter(credentials) subscriptions = key_args.get('resource_group_subscriptionid') for item in subscriptions: to_rotate = [] for key in item: region = key for resource_group in item.get(region): resource_group_name = resource_group sub_id = item.get(region).get(resource_group) client = ResourceManagementClient(wrapped_credential, sub_id) compute_client = ComputeManagementClient( wrapped_credential, sub_id) resource_groups = client.resources.list_by_resource_group( resource_group_name) for rg in resource_groups: if rg.type == 'Microsoft.Compute/virtualMachines': try: result = compute_client.virtual_machines.get( resource_group_name, rg.name, expand='instanceView') if 'running' in result.instance_view.statuses[ 1].display_status: to_rotate.append(rg.name) else: logging.warning( f'{rg.name} Not in RUNNING state - skipping' ) except msrestazure.azure_exceptions.CloudError as e: if 'not found' in e.message: logging.warning( f'{rg.name} Not Found - skipping') logging.info(f'Found the following VMs: {to_rotate}') # Build dns names for vm in to_rotate: # rotate key for server type markers = [] commands = [] if 'autoscaler' in vm: for host in key_args.get('autoscalers'): r = region.replace('-', '') if r in host: key_args['hostname'] = host logging.info(' Writing key to ' + key_args['hostname']) for pkey in key_args.get('pkeys'): if region.replace('-', '') in pkey: key_args['pkey'] = pkey for marker in key_args.get('autoscaler_markers_commands'): markers.append(marker) commands.append( key_args.get('autoscaler_markers_commands').get( marker)) key_args['commands'] = commands key_args['markers'] = markers ssh_server_command(configMap, username, **key_args) else: # its a flight server key_args['hostname'] = key_args.get('f_host').replace( '<SERVER>', vm).replace('<REGION>', region.replace('-', '')) logging.info(' Writing key to ' + key_args['hostname']) for pkey in key_args.get('pkeys'): if region.replace('-', '') in pkey: key_args['pkey'] = pkey for marker in key_args.get('fadmin_markers_commands'): markers.append(marker) commands.append( key_args.get('fadmin_markers_commands').get(marker)) key_args['commands'] = commands key_args['markers'] = markers ssh_server_command(configMap, username, **key_args)
tenant = "tenant-id" ) return credentials # calling function credentials = get_credentials() # create resources resource_group_client = ResourceManagementClient( credentials, sub-id ) network_client = NetworkManagementClient( credentials, sub-id ) compute_client = ComputeManagementClient( credentials, sub-id ) # create vm def create_resource_group(resource_group_client): resource_group_params = { "location":LOCATION }
AZURE_LOCATION = 'eastus' RESOURCE_GROUP = "myResourceGroup" # credentials from environment SUBSCRIPTION_ID = os.environ['AZURE_SUBSCRIPTION_ID'] TENANT_ID = os.environ['AZURE_TENANT'] CLIENT_ID = os.environ['AZURE_CLIENT_ID'] CLIENT_SECRET = os.environ['AZURE_SECRET'] ACCOUNT_NAME = "myAccount" # management client credentials = ServicePrincipalCredentials(client_id=CLIENT_ID, secret=CLIENT_SECRET, tenant=TENANT_ID) mgmt_client = MixedRealityClient(credentials, SUBSCRIPTION_ID) resource_client = ResourceManagementClient(credentials, SUBSCRIPTION_ID) # CREATE RESOURCE GROUP print("Creating Resource Group") resource_client.resource_groups.create_or_update( resource_group_name=RESOURCE_GROUP, parameters={'location': AZURE_LOCATION}) # /SpatialAnchorsAccounts/put/Create spatial anchor account[put] print("Create spatial anchor account") result = mgmt_client.spatial_anchors_accounts.create( resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, location=AZURE_LOCATION) # /RemoteRenderingAccounts/put/Create remote rendering account[put] print("Create remote rendering account")
import os import azure.functions as func import datetime from dateutil.parser import parse from azure.mgmt.compute import ComputeManagementClient from azure.mgmt.resource import ResourceManagementClient import azure.mgmt.resource.resources.models from azure.common.credentials import ServicePrincipalCredentials credentials = ServicePrincipalCredentials( client_id=os.environ['AZURE_CLIENT_ID'], secret=os.environ['AZURE_CLIENT_SECRET'], tenant=os.environ['AZURE_TENANT_ID']) resource_client = ResourceManagementClient(credentials, os.environ['AZURE_SUBSCRIPTION_ID']) compute_client = ComputeManagementClient(credentials, os.environ['AZURE_SUBSCRIPTION_ID']) def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('ResourceCleanupFunction was called (HTTP Trigger).') rgList = resource_client.resource_groups.list() for rg in rgList: print_item(rg) # In case there are no tags or no tag with 'Project' name, tag the resource group as unknown and set an expiration date if rg.tags != None and 'ExpirationDate' in rg.tags: expDate = datetime.datetime.strptime(rg.tags['ExpirationDate'],