def main(): '''Main routine.''' # check for single command argument if len(sys.argv) != 2: usage() rgname = sys.argv[1] # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # delete a resource group rgreturn = azurerm.delete_resource_group(access_token, subscription_id, rgname) print(rgreturn)
def __init__(self, tenant_id, app_id, app_secret, subscription_id): self.sub_id = subscription_id self.tenant_id = tenant_id self.app_id = app_id self.app_secret = app_secret self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
def setUp(self): # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] self.subscription_id = config_data['subscriptionId'] self.access_token = azurerm.get_access_token( tenant_id, app_id, app_secret) # self.location = config_data['location'] # comment out during preview self.location = 'westus' # create resource names haik = Haikunator() self.rgname = haik.haikunate() self.container_name = haik.haikunate(delimiter='') self.container_name2 = haik.haikunate(delimiter='') self.container_group_name = haik.haikunate(delimiter='') # create resource group print('Creating resource group: ' + self.rgname) response = azurerm.create_resource_group(self.access_token, self.subscription_id, self.rgname, self.location) self.assertEqual(response.status_code, 201)
def setUp(self): # Load Azure app defaults try: with open('azurermconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] self.subscription_id = configData['subscriptionId'] self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) self.location = configData['location'] # generate resource group name self.rgname = Haikunator.haikunate() # create resource group print('Creating resource group: ' + self.rgname) response = azurerm.create_resource_group(self.access_token, self.subscription_id, \ self.rgname, self.location) self.assertEqual(response.status_code, 201) # generate vnet name self.vnet = Haikunator.haikunate(delimiter='') # generate public ip address names self.ipname = self.vnet + 'ip' self.lbipname = self.vnet + 'lbip'
def setUp(self): # Load Azure app defaults try: with open('azurermconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] self.subscription_id = configData['subscriptionId'] self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) self.location = configData['location'] # create resource names h = Haikunator() self.rgname = h.haikunate() self.service_name = h.haikunate(delimiter='') self.agent_dns = h.haikunate(delimiter='') self.master_dns = h.haikunate(delimiter='') # generate RSA Key for container service key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, \ key_size=2048) self.public_key = key.public_key().public_bytes(serialization.Encoding.OpenSSH, \ serialization.PublicFormat.OpenSSH).decode('utf-8') # create resource group print('Creating resource group: ' + self.rgname) response = azurerm.create_resource_group(self.access_token, self.subscription_id, \ self.rgname, self.location) self.assertEqual(response.status_code, 201)
def __init__(self, tenant_id, app_id, app_secret, subscription_id): self.sub_id = subscription_id self.tenant_id = tenant_id self.app_id = app_id self.app_secret = app_secret self.vmsslist = [] self.vmssdict = {} self.status = "" self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
def setUp(self): # Load Azure app defaults try: with open('azurermconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] self.subscription_id = configData['subscriptionId'] self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) self.location = configData['location'] self.rgname = Haikunator.haikunate()
def getAgents(self): """ Get a list of all agents in the Pool. """ tenant_id = self.config.get('Subscription', 'tenant_id') app_id = self.config.get('Subscription', 'app_id') app_secret = self.config.get('Subscription', 'app_secret') subscription_id = self.config.get('Subscription', 'subscription_id') rgname = self.config.get('Group', 'name') access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # TODO: This assumes only a single VMSS in the resource group, this will not always be the # case and will never be the case if when there are multiple Agent Pools vmsslist = azurerm.list_vm_scale_sets(access_token, subscription_id, rgname)['value'] # self.log.debug("List of VMSS: " + json.dumps(vmsslist, indent=True)) vmssname = vmsslist[0]['name'] self.log.debug("Looking up VMs in VMSS called " + vmssname + " (if this is wrong maybe it is because AgentPool.py currently only supports a single VMSS)") vms = azurerm.list_vmss_vms(access_token, subscription_id, rgname, vmssname) return vms['value']
def main(): '''Main routine.''' # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting vmssConfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] sub_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # list resource groups resource_groups = azurerm.list_resource_groups(access_token, sub_id) for rgname in resource_groups['value']: print(rgname['name'] + ', ' + rgname['location']) '''
def scale(self, capacity): """ Scale the agent count up or down to the supplied number. """ tenant_id = self.config.get('Subscription', 'tenant_id') app_id = self.config.get('Subscription', 'app_id') app_secret = self.config.get('Subscription', 'app_secret') subscription_id = self.config.get('Subscription', 'subscription_id') rg_name = self.config.get('Group', 'name') access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # TODO: This assumes only a single VMSS in the resource group, this will not always be the # case and will never be the case if when there are multiple Agent Pools vmsslist = azurerm.list_vm_scale_sets(access_token, subscription_id, rg_name)['value'] # self.log.debug("List of VMSS: " + json.dumps(vmsslist, indent=True)) vmss_name = vmsslist[0]['name'] self.log.debug("Scaling " + vmss_name + " to " + capacity + " VMs (if this is the wrong VMSS it is because AgentPool.py currently only supports a single VMSS)") size = self.config.get("ACS", "AgentVMSize") tier = self.config.get("ACS", "AgentVMSize").split('_')[0] scale_result = azurerm.scale_vmss(access_token, subscription_id, rg_name, vmss_name, size, tier, capacity) self.log.warning("We scaled the cluster, but note that any of the features added will not be present on new machines.")
def setUp(self): # Load Azure app defaults try: with open("azurermconfig.json") as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData["tenantId"] app_id = configData["appId"] app_secret = configData["appSecret"] self.subscription_id = configData["subscriptionId"] self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) self.location = configData["location"] self.rgname = Haikunator.haikunate() # create resource group print("Creating resource group: " + self.rgname) response = azurerm.create_resource_group(self.access_token, self.subscription_id, self.rgname, self.location) self.assertEqual(response.status_code, 201) # storage account name self.storage_account = Haikunator.haikunate(delimiter="")
def main(): '''Main routine.''' # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # list locations locations = azurerm.list_locations(access_token, subscription_id) for location in locations['value']: print(location['name'] + ', Display Name: ' + location['displayName'] + ', Coords: ' + location['latitude'] + ', ' + location['longitude'])
def main(): '''Main routine.''' # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermonfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) ''' pubs = azurerm.list_publishers(access_token, subscription_id, 'southeastasia') for pub in pubs: # print(json.dumps(pub, sort_keys=False, indent=2, separators=(',', ': '))) print(pub['name']) offers = azurerm.list_offers(access_token, subscription_id, 'southeastasia', 'rancher') for offer in offers: print(json.dumps(offer, sort_keys=False, indent=2, separators=(',', ': '))) skus = azurerm.list_skus(access_token, subscription_id, 'southeastasia', 'rancher', 'rancheros') for sku in skus: print(sku['name']) ''' #print('Versions for CoreOS:') # versions = azurerm.list_sku_versions(access_token, subscription_id, 'eastasia', 'CoreOS', # 'CoreOS', 'Stable') versions = azurerm.list_sku_versions(access_token, subscription_id, 'eastus2', 'Canonical', 'UbuntuServer', '16.04-LTS') for version in versions: print(version['name'])
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmname', '-n', required=True, action='store', help='Name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--user', '-u', required=False, action='store', default='azure', help='Optional username') arg_parser.add_argument('--password', '-p', required=False, action='store', help='Optional password') arg_parser.add_argument('--sshkey', '-k', required=False, action='store', help='SSH public key') arg_parser.add_argument('--sshpath', '-s', required=False, action='store', help='SSH public key file path') arg_parser.add_argument('--location', '-l', required=False, action='store', help='Location, e.g. eastus') arg_parser.add_argument('--vmsize', required=False, action='store', default='Standard_D1_V2', help='VM size, defaults to Standard_D1_V2') arg_parser.add_argument('--dns', '-d', required=False, action='store', help='DNS, e.g. myuniquename') arg_parser.add_argument( '--vnet', required=False, action='store', help='Optional VNET Name (else first VNET in resource group used)') arg_parser.add_argument('--nowait', action='store_true', default=False, help='Do not wait for VM to finish provisioning') arg_parser.add_argument( '--nonsg', action='store_true', default=False, help='Do not create a network security group on the NIC') arg_parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Print operational details') args = arg_parser.parse_args() name = args.vmname rgname = args.rgname vnet = args.vnet location = args.location username = args.user password = args.password sshkey = args.sshkey sshpath = args.sshpath verbose = args.verbose dns_label = args.dns no_wait = args.nowait no_nsg = args.nonsg vmsize = args.vmsize # make sure all authentication scenarios are handled if sshkey is not None and sshpath is not None: sys.exit( 'Error: You can provide an SSH public key, or a public key file path, not both.' ) if password is not None and (sshkey is not None or sshpath is not None): sys.exit('Error: provide a password or SSH key (or nothing), not both') use_password = False if password is not None: use_password = True else: if sshkey is None and sshpath is None: # no auth parameters were provided # look for ~/id_rsa.pub home = os.path.expanduser('~') sshpath = home + os.sep + '.ssh' + os.sep + 'id_rsa.pub' if os.path.isfile(sshpath) is False: print('Default public key file not found.') use_password = True password = Haikunator().haikunate( delimiter=',') # creates random password print('Created new password = '******'Default public key file found') if use_password is False: print('Reading public key..') if sshkey is None: # at this point sshpath should have a valid Value with open(sshpath, 'r') as pub_ssh_file_fd: sshkey = pub_ssh_file_fd.read() # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # if no location parameter was specified now would be a good time to figure out the location if location is None: try: rgroup = azurerm.get_resource_group(access_token, subscription_id, rgname) location = rgroup['location'] except KeyError: print('Cannot find resource group ' + rgname + '. Check connection/authorization.') print( json.dumps(rgroup, sort_keys=False, indent=2, separators=(',', ': '))) sys.exit() print('location = ' + location) # get VNET print('Getting VNet') vnet_not_found = False if vnet is None: print('VNet not set, checking resource group') # get first VNET in resource group try: vnets = azurerm.list_vnets_rg(access_token, subscription_id, rgname) # print(json.dumps(vnets, sort_keys=False, indent=2, separators=(',', ': '))) vnetresource = vnets['value'][0] except IndexError: print('No VNET found in resource group.') vnet_not_found = True vnet = name + 'vnet' else: print('Getting VNet: ' + vnet) vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet) if 'properties' not in vnetresource: print('VNet ' + vnet + ' not found in resource group ' + rgname) vnet_not_found = True if vnet_not_found is True: # create a vnet print('Creating vnet: ' + vnet) rmresource = azurerm.create_vnet(access_token, subscription_id, rgname, vnet, location, \ address_prefix='10.0.0.0/16', nsg_id=None) if rmresource.status_code != 201: print('Error ' + str(vnetresource.status_code) + ' creating VNET. ' + vnetresource.text) sys.exit() vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet) try: subnet_id = vnetresource['properties']['subnets'][0]['id'] except KeyError: print('Subnet not found for VNet ' + vnet) sys.exit() if verbose is True: print('subnet_id = ' + subnet_id) public_ip_name = name + 'ip' if dns_label is None: dns_label = name + 'dns' print('Creating public ipaddr') rmreturn = azurerm.create_public_ip(access_token, subscription_id, rgname, public_ip_name, dns_label, location) if rmreturn.status_code not in [200, 201]: print(rmreturn.text) sys.exit('Error: ' + str(rmreturn.status_code) + ' from azurerm.create_public_ip()') ip_id = rmreturn.json()['id'] if verbose is True: print('ip_id = ' + ip_id) print('Waiting for IP provisioning..') waiting = True while waiting: pip = azurerm.get_public_ip(access_token, subscription_id, rgname, public_ip_name) if pip['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) if no_nsg is True: nsg_id = None else: # create NSG nsg_name = name + 'nsg' print('Creating NSG: ' + nsg_name) rmreturn = azurerm.create_nsg(access_token, subscription_id, rgname, nsg_name, location) if rmreturn.status_code not in [200, 201]: print('Error ' + str(rmreturn.status_code) + ' creating NSG. ' + rmreturn.text) sys.exit() nsg_id = rmreturn.json()['id'] # create NSG rule for ssh, scp nsg_rule = 'ssh' print('Creating NSG rule: ' + nsg_rule) rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, rgname, nsg_name, nsg_rule, description='ssh rule', destination_range='22') if rmreturn.status_code not in [200, 201]: print('Error ' + str(rmreturn.status_code) + ' creating NSG rule. ' + rmreturn.text) sys.exit() # create NIC nic_name = name + 'nic' print('Creating NIC: ' + nic_name) rmreturn = azurerm.create_nic(access_token, subscription_id, rgname, nic_name, ip_id, subnet_id, location, nsg_id=nsg_id) if rmreturn.status_code not in [200, 201]: print('Error ' + rmreturn.status_code + ' creating NSG rule. ' + rmreturn.text) sys.exit() nic_id = rmreturn.json()['id'] print('Waiting for NIC provisioning..') waiting = True while waiting: nic = azurerm.get_nic(access_token, subscription_id, rgname, nic_name) if nic['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) # create VM vm_name = name #publisher = 'CoreOS' #offer = 'CoreOS' #sku = 'Stable' publisher = 'Canonical' offer = 'UbuntuServer' sku = '16.04-LTS' version = 'latest' print('Creating VM: ' + vm_name) if use_password is True: rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize, publisher, offer, sku, version, nic_id, location, username=username, password=password) else: rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize, publisher, offer, sku, version, nic_id, location, username=username, public_key=sshkey) if rmreturn.status_code != 201: sys.exit('Error ' + rmreturn.status_code + ' creating VM. ' + rmreturn.text) if no_wait is False: print('Waiting for VM provisioning..') waiting = True while waiting: vm_model = azurerm.get_vm(access_token, subscription_id, rgname, vm_name) if vm_model['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(5) print('VM provisioning complete.') print('Connect with:') print('ssh ' + dns_label + '.' + location + '.cloudapp.azure.com -l ' + username)
def setUp(self): # Load Azure app defaults try: with open('azurermconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] self.subscription_id = configData['subscriptionId'] self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) self.location = configData['location'] # generate names for resources self.h = Haikunator() self.rgname = self.h.haikunate() self.vnet = self.h.haikunate(delimiter='') self.vmssname = self.h.haikunate(delimiter='') self.setting_name = self.h.haikunate(delimiter='') # generate RSA Key for compute resources key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, \ key_size=2048) self.public_key = key.public_key().public_bytes(serialization.Encoding.OpenSSH, \ serialization.PublicFormat.OpenSSH).decode('utf-8') # create resource group print('Creating resource group: ' + self.rgname) response = azurerm.create_resource_group(self.access_token, self.subscription_id, \ self.rgname, self.location) self.assertEqual(response.status_code, 201) # create vnet print('Creating vnet: ' + self.vnet) response = azurerm.create_vnet(self.access_token, self.subscription_id, self.rgname, \ self.vnet, self.location, address_prefix='10.0.0.0/16', nsg_id=None) self.assertEqual(response.status_code, 201) self.subnet_id = response.json()['properties']['subnets'][0]['id'] # create public ip address for VMSS LB self.ipname2 = self.vnet + 'ip2' print('Creating VMSS LB public ip address: ' + self.ipname2) dns_label2 = self.vnet + '2' response = azurerm.create_public_ip(self.access_token, self.subscription_id, self.rgname, \ self.ipname2, dns_label2, self.location) self.assertEqual(response.status_code, 201) self.ip2_id = response.json()['id'] # create load balancer with nat pool for VMSS create lb_name = self.vnet + 'lb' print('Creating load balancer with nat pool: ' + lb_name) response = azurerm.create_lb_with_nat_pool(self.access_token, self.subscription_id, \ self.rgname, lb_name, self.ip2_id, '50000', '50100', '22', self.location) self.be_pool_id = response.json( )['properties']['backendAddressPools'][0]['id'] self.lb_pool_id = response.json( )['properties']['inboundNatPools'][0]['id'] # create VMSS capacity = 1 vm_size = 'Standard_D1' publisher = 'Canonical' offer = 'UbuntuServer' sku = '16.04-LTS' version = 'latest' username = '******' password = self.h.haikunate(delimiter=',') print('Creating VMSS: ' + self.vmssname + ', capacity = ' + str(capacity)) response = azurerm.create_vmss(self.access_token, self.subscription_id, self.rgname, \ self.vmssname, vm_size, capacity, publisher, offer, sku, version, \ self.subnet_id, self.be_pool_id, self.lb_pool_id, self.location, username=username, \ public_key=self.public_key)
def vmss_monitor_thread(window_information, panel_information, window_continents, panel_continents): global access_token, insightsOneEnabled, insightsTwoEnabled; run_event = threading.Event() run_event.set() # start a timer in order to refresh the access token in 10 minutes start_time = time.time(); # get an access token for Azure authentication access_token = azurerm.get_access_token(str(tenant_id), str(app_id), str(app_secret)); # ---= ASCii Dashboard THREADS =--- # Logtail Thread... log_thread = threading.Thread(target=tail_in_window, args=(logName, window_information['log'], panel_information['log'], run_event)) log_thread.start() # VMSS Monitoring Thread... vmss_thread = threading.Thread(target=get_vmss_properties, args=(access_token, run_event, window_information, panel_information, window_continents, panel_continents)) vmss_thread.start() # start a CMD Interpreter thread cmd_thread = threading.Thread(target=get_cmd, args=(access_token, run_event, window_information, panel_information)) cmd_thread.start() #Simple consistent check for the Insights configuration... if (insightsOneEnabled.lower() == "yes"): if ((insightsOneUrl == "" and insightsUrl == "") or (insightsOneTitle == "") or (insightsOneMetric == "")): logging.warning("Configuration for insightsOne Graph is inconsistent. You need to configure insightsUrl or insightsOneUrl AND insightsOneTitle AND insightsOneMetric"); insightsOneEnabled = "No"; if (insightsTwoEnabled.lower() == "yes"): if ((insightsTwoUrl == "" and insightsUrl == "") or (insightsTwoTitle == "") or (insightsTwoMetric == "")): logging.warning("Configuration for InsightsTwo Graph is inconsistent. You need to configure insightsUrl or insightsTwoUrl AND insightsTwoTitle AND insightsTwoMetric"); insightsTwoEnabled = "No"; # Insights Thread... if (insightsOneEnabled.lower() == "yes" or insightsTwoEnabled.lower() == "yes"): insights_thread = threading.Thread(target=insights_in_window, args=(logName, window_information, run_event)) insights_thread.start() time.sleep(.2); try: while (quit == 0): time.sleep(.1); if (quit == 1): raise KeyboardInterrupt except KeyboardInterrupt: show_panel(panel_information['exit']); update_panels(); doupdate(); run_event.clear(); log_thread.join(); vmss_thread.join(); cmd_thread.join(); if (insightsOneEnabled.lower() == "yes" or insightsTwoEnabled.lower() == "yes"): insights_thread.join(); wmove(window_information['exit'], 3, 5); wclrtoeol(window_information['exit']); box(window_information['exit']); write_str_color(window_information['exit'], 3, 6, "Console Update threads successfully closed.", 4, 1); update_panels(); doupdate();
def main(): '''Main routine.''' # validate command line arguments argparser = argparse.ArgumentParser() argparser.add_argument('--uri', '-u', required=True, action='store', help='Template URI') argparser.add_argument('--params', '-f', required=True, action='store', help='Parameters json file') argparser.add_argument('--location', '-l', required=True, action='store', help='Location, e.g. eastus') argparser.add_argument('--rg', '-g', required=False, action='store', help='Resource Group name') argparser.add_argument('--sub', '-s', required=False, action='store', help='Subscription ID') argparser.add_argument( '--genparams', '-p', required=False, action='store', help='Comma separated list of parameters to generate strings for') argparser.add_argument('--wait', '-w', required=False, action='store_true', default=False, help='Wait for deployment to complete and time it') argparser.add_argument('--debug', '-d', required=False, action='store_true', default=False, help='Debug mode: print additional deployment') args = argparser.parse_args() template_uri = args.uri params = args.params rgname = args.rg location = args.location subscription_id = args.sub # Load Azure app defaults try: with open('azurermconfig.json') as configfile: configdata = json.load(configfile) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = configdata['tenantId'] app_id = configdata['appId'] app_secret = configdata['appSecret'] if subscription_id is None: subscription_id = configdata['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # load parameters file try: with open(params) as params_file: param_data = json.load(params_file) except FileNotFoundError: sys.exit('Error: Expecting ' + params + ' in current folder') # prep Haikunator haikunator = Haikunator() # if there is a genparams argument generate values and merge the list if args.genparams is not None: newdict = {} genlist = args.genparams.split(',') for param in genlist: # generate a random prhase, include caps and puncs in case it's a passwd newval = haikunator.haikunate(delimiter='-').title() newdict[param] = {'value': newval} params = {**param_data, **newdict} else: params = param_data # create resource group if not specified if rgname is None: rgname = haikunator.haikunate() ret = azurerm.create_resource_group(access_token, subscription_id, rgname, location) print('Creating resource group: ' + rgname + ', location:', location + ', return code:', ret) deployment_name = haikunator.haikunate() # measure time from beginning of deployment call (after creating resource group etc.) start_time = time.time() # deploy template and print response deploy_return = azurerm.deploy_template_uri(access_token, subscription_id, rgname, deployment_name, template_uri, params) print('Deployment name: ' + deployment_name + ', return code:', deploy_return) if 'Response [20' not in str(deploy_return): print('Return from deployment: ', deploy_return.text) sys.exit('Deployment failed. Exiting.. ') if args.debug is True: print( json.dumps(deploy_return.json(), sort_keys=False, indent=2, separators=(',', ': '))) # show deployment status if args.debug is True: print('Deployment status:') deploy_return = azurerm.show_deployment(access_token, subscription_id, rgname, deployment_name) print( json.dumps(deploy_return, sort_keys=False, indent=2, separators=(',', ': '))) # wait for deployment to complete if args.wait is True: print('Waiting for provisioning to complete..') provisioning_state = '' try: while True: time.sleep(10) deploy_return = azurerm.show_deployment( access_token, subscription_id, rgname, deployment_name) provisioning_state = deploy_return['properties'][ 'provisioningState'] if provisioning_state != 'Running': break print('Provisioning state:', provisioning_state) except KeyError: print('Deployment failure:', deploy_return) elapsed_time = time.time() - start_time print('Elapsed time:', elapsed_time)
def main(): # create parser argParser = argparse.ArgumentParser() # arguments: resource group lb name 1, 2 argParser.add_argument('--resourcegroup', '-g', required=True, dest='resource_group', action='store', help='Resource group name') argParser.add_argument('--lb1', '-1', required=True, action='store', help='Load balancer 1 name') argParser.add_argument('--lb2', '-2', required=True, action='store', help='Load balancer 2 name') argParser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show additional information') argParser.add_argument('-y', dest='noprompt', action='store_true', default=False, help='Do not prompt for confirmation') args = argParser.parse_args() verbose = args.verbose # print extra status information when True resource_group = args.resource_group lb1 = args.lb1 lb2 = args.lb2 # Load Azure app defaults try: with open('azurermconfig.json') as configFile: configdata = json.load(configFile) except FileNotFoundError: print("Error: Expecting lbconfig.json in current folder") sys.exit() tenant_id = configdata['tenantId'] app_id = configdata['appId'] app_secret = configdata['appSecret'] subscription_id = configdata['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # figure out location of resource group and use that for the float ip rg = azurerm.get_resource_group(access_token, subscription_id, resource_group) location = rg['location'] # Create a spare public IP address ip_name = Haikunator().haikunate(delimiter='') dns_label = ip_name + 'dns' print('Creating float public IP: ' + ip_name) ip_ret = azurerm.create_public_ip(access_token, subscription_id, resource_group, ip_name, dns_label, location) floatip_id = ip_ret.json()['id'] if verbose is True: print('Float ip id = ' + floatip_id) # 1. Get lb 2 lbmodel2 = azurerm.get_load_balancer(access_token, subscription_id, resource_group, lb2) lb2_ip_id = lbmodel2['properties']['frontendIPConfigurations'][0]['properties']['publicIPAddress']['id'] lb2_ip_name = lb2_ip_id.split('publicIPAddresses/',1)[1] if verbose is True: print(lb2 + ' ip id: ' + lb2_ip_id) print(lb2 + ' model:') print(json.dumps(lbmodel2, sort_keys=False, indent=2, separators=(',', ': '))) # 2. Assign new ip to lb 2 print('Updating ' + lb2 + ' ip to float ip: ' + ip_name) lbmodel2['properties']['frontendIPConfigurations'][0]['properties']['publicIPAddress']['id'] = floatip_id ret = azurerm.update_load_balancer(access_token, subscription_id, resource_group, lb2, json.dumps(lbmodel2)) if (ret.status_code != 200): handle_bad_update("updating " + lb2, ret) if verbose is True: print('original ip id: ' + lb2_ip_id + ', new ip id: ' + floatip_id) print(json.dumps(ret, sort_keys=False, indent=2, separators=(',', ': '))) print('Waiting for old ' + lb2 + ' ip: ' + lb2_ip_name + ' to be unnassigned') waiting = True start1 = time.time() while waiting: lbmodel2 = azurerm.get_load_balancer(access_token, subscription_id, resource_group, lb2) if lbmodel2['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(3) end1 = time.time() print('Elapsed time: ' + str(int(end1 - start1))) # 3. Get lb 1 lbmodel1 = azurerm.get_load_balancer(access_token, subscription_id, resource_group, lb1) lb1_ip_id = lbmodel1['properties']['frontendIPConfigurations'][0]['properties']['publicIPAddress']['id'] if verbose is True: print(lb1 + ' ip id: ' + lb1_ip_id) print(lb1 + ' model:') print(json.dumps(lbmodel1, sort_keys=False, indent=2, separators=(',', ': '))) lb1_ip_name = lb1_ip_id.split('publicIPAddresses/',1)[1] # 4. Assign old ip 2 to lb 1 print('Downtime begins: Updating ' + lb1 + ' ip to ' + lb2_ip_name) start2 = time.time() lbmodel1['properties']['frontendIPConfigurations'][0]['properties']['publicIPAddress']['id'] = lb2_ip_id ret = azurerm.update_load_balancer(access_token, subscription_id, resource_group, lb1, json.dumps(lbmodel1)) if (ret.status_code != 200): handle_bad_update("updating " + lb1, ret) if verbose is True: print(json.dumps(ret, sort_keys=False, indent=2, separators=(',', ': '))) print('Waiting for old ' + lb1 + ' ip: ' + lb1_ip_name + ' to be unnassigned') waiting = True while waiting: lbmodel1 = azurerm.get_load_balancer(access_token, subscription_id, resource_group, lb1) if lbmodel1['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(3) end2 = time.time() print('Staging IP ' + lb2_ip_name + ' now points to old production LB ' + lb1) print('Elapsed time: ' + str(int(end2 - start1))) # 5. Assign old ip 1 to lb 2 print('Updating ' + lb2 + ' ip to ' + lb1_ip_name) lbmodel2['properties']['frontendIPConfigurations'][0]['properties']['publicIPAddress']['id'] = lb1_ip_id ret = azurerm.update_load_balancer(access_token, subscription_id, resource_group, lb2, json.dumps(lbmodel2)) if (ret.status_code != 200): handle_bad_update("updating " + lb2, ret) if verbose is True: print('Original ip id: ' + lb2_ip_id + ', new ip id: ' + lb1_ip_id) print(json.dumps(ret, sort_keys=False, indent=2, separators=(',', ': '))) print('Waiting for ' + lb2 + ' provisioning to complete') waiting = True while waiting: lbmodel2 = azurerm.get_load_balancer(access_token, subscription_id, resource_group, lb2) if lbmodel2['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(3) end3 = time.time() # 6. Delete floatip print('VIP swap complete') print('Downtime: ' + str(int(end3 - start2)) + '. Total elapsed time: ' + \ str(int(end3 - start1))) print('Deleting float ip: ' + ip_name) azurerm.delete_public_ip(access_token, subscription_id, resource_group, ip_name)
def main(): # create argument parser argParser = argparse.ArgumentParser() argParser.add_argument('--vmss', '-n', required=True, action='store', \ help='Scale set name') argParser.add_argument('--resourcegroup', '-g', required=True, dest='resource_group', \ action='store', help='Resource group name') argParser.add_argument('--verbose', '-v', action='store_true', default=False, \ help='Print verbose metrics output to console') args = argParser.parse_args() verbose = args.verbose # dump metrics JSON output to console when True vmssname = args.vmss rgname = args.resource_group # Load Azure app defaults try: with open('azurermconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting azurermonfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] subscription_id = configData['subscriptionId'] # get Azure access token access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get host metrics for the named VM scale set provider = 'Microsoft.Compute' resource_type = 'virtualMachineScaleSets' metrics = azurerm.get_metrics_for_resource(access_token, subscription_id, rgname, \ provider, resource_type, vmssname) if verbose == True: print(json.dumps(metrics, sort_keys=False, indent=2, separators=(',', ': '))) # sort metrics into CPU and timestamp lists for easy plotting for metric in metrics['value']: if metric['name']['value'] == 'Percentage CPU': timestamp_list = [] cpu_list = [] for data in metric['data']: if 'average' in data: cpu_list.append(data['average']) # convert timestamp from 2016-11-26T06:26:00Z to python datetime format dt = datetime.datetime.strptime(data['timeStamp'], '%Y-%m-%dT%H:%M:%SZ') timestamp_list.append(dt) break # set figure title and graph style fig = pyplot.gcf() fig.canvas.set_window_title('Host metrics graph') #ig.patch.set_facecolor('white') pyplot.style.use('ggplot') #ax = pyplot.gca() #ax.set_axis_bgcolor('grey') # plot values pyplot.plot(timestamp_list, cpu_list) pyplot.gcf().autofmt_xdate() pyplot.ylim([0,100]) # label axis and graph pyplot.ylabel('CPU Percentage') pyplot.xlabel('Time stamp (UTC)') pyplot.title('Avg CPU over time for scale set: ' + vmssname) # display pyplot.show()
def main(): # create argument parser argParser = argparse.ArgumentParser() argParser.add_argument('--vmss', '-s', required=True, action='store', \ help='Scale set name') argParser.add_argument('--resourcegroup', '-g', required=True, dest='resource_group', \ action='store', help='Resource group name') argParser.add_argument('--verbose', '-v', action='store_true', default=False, \ help='Print verbose metrics output to console') args = argParser.parse_args() verbose = args.verbose # dump metrics JSON output to console when True vmssname = args.vmss rgname = args.resource_group # Load Azure app defaults try: with open('vmssconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] subscription_id = configData['subscriptionId'] # get Azure access token access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get host metrics for the named VM scale set provider = 'Microsoft.Compute' resource_type = 'virtualMachineScaleSets' metrics = azurerm.get_metrics_for_resource(access_token, subscription_id, rgname, \ provider, resource_type, vmssname) if verbose == True: print( json.dumps(metrics, sort_keys=False, indent=2, separators=(',', ': '))) # sort metrics into CPU and timestamp lists for easy plotting for metric in metrics['value']: if metric['name']['value'] == 'Percentage CPU': timestamp_list = [] cpu_list = [] for data in metric['data']: if 'average' in data: cpu_list.append(data['average']) # convert timestamp from 2016-11-26T06:26:00Z to python datetime format dt = datetime.datetime.strptime(data['timeStamp'], '%Y-%m-%dT%H:%M:%SZ') timestamp_list.append(dt) break # set figure title and graph style fig = pyplot.gcf() fig.canvas.set_window_title('Host metrics graph') #ig.patch.set_facecolor('white') pyplot.style.use('ggplot') #ax = pyplot.gca() #ax.set_axis_bgcolor('grey') # plot values pyplot.plot(timestamp_list, cpu_list) pyplot.gcf().autofmt_xdate() pyplot.ylim([0, 100]) # label axis and graph pyplot.ylabel('CPU Percentage') pyplot.xlabel('Time stamp (UTC)') pyplot.title('Avg CPU over time for scale set: ' + vmssname) # display pyplot.show()
def main(): # create parser argParser = argparse.ArgumentParser() argParser.add_argument('--vmssname', '-s', required=True, action='store', help='VM Scale Set name') argParser.add_argument('--resourcegroup', '-r', required=True, dest='resource_group', action='store', help='Resource group name') argParser.add_argument('--newversion', '-n', dest='newversion', action='store', help='New platform image version string') argParser.add_argument('--customuri', '-c', dest='customuri', action='store', help='New custom image URI string') argParser.add_argument('--updatedomain', '-u', dest='updatedomain', action='store', type=int, help='Update domain (int)') argParser.add_argument('--vmid', '-i', dest='vmid', action='store', type=int, help='Single VM ID (int)') argParser.add_argument('--vmlist', '-l', dest='vmlist', action='store', help='List of VM IDs e.g. "["1", "2"]"') argParser.add_argument('--nowait', '-w', action='store_true', default=False, help='Start upgrades and then exit without waiting') argParser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show additional information') argParser.add_argument('-y', dest='noprompt', action='store_true', default=False, help='Do not prompt for confirmation') args = argParser.parse_args() # switches to determine program behavior noprompt = args.noprompt # go ahead and upgrade without waiting for confirmation when True nowait = args.nowait # don't loop waiting for upgrade provisioning to complete when True verbose = args.verbose # print extra status information when True vmssname = args.vmssname resource_group = args.resource_group if args.newversion is not None: newversion = args.newversion storagemode = 'platform' elif args.customuri is not None: customuri = args.customuri storagemode = 'custom' else: argParser.error( 'You must specify a new version for platform images or a custom uri for custom images' ) if args.updatedomain is not None: updatedomain = args.updatedomain upgrademode = 'updatedomain' elif args.vmid is not None: vmid = args.vmid upgrademode = 'vmid' elif args.vmlist is not None: vmlist = args.vmlist upgrademode = 'vmlist' else: argParser.error( 'You must specify an update domain, a vm id, or a vm list') # Load Azure app defaults try: with open('vmssconfig.json') as configFile: configdata = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssconfig.json in current folder") sys.exit() tenant_id = configdata['tenantId'] app_id = configdata['appId'] app_secret = configdata['appSecret'] subscription_id = configdata['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get the vmss model vmssmodel = azurerm.get_vmss(access_token, subscription_id, resource_group, vmssname) # print(json.dumps(vmssmodel, sort_keys=False, indent=2, separators=(',', ': '))) if storagemode == 'platform': # check current version imagereference = vmssmodel['properties']['virtualMachineProfile'][ 'storageProfile']['imageReference'] print('Current image reference in Scale Set model:') print( json.dumps(imagereference, sort_keys=False, indent=2, separators=(',', ': '))) # compare current version with new version if imagereference['version'] == newversion: print('Scale Set model version is already set to ' + newversion + ', skipping model update.') else: if not noprompt: response = input('Confirm version upgrade to: ' + newversion + ' (y/n)') if response.lower() != 'y': sys.exit(1) # change the version vmssmodel['properties']['virtualMachineProfile']['storageProfile'][ 'imageReference']['version'] = newversion # put the vmss model updateresult = azurerm.update_vmss(access_token, subscription_id, resource_group, vmssname, json.dumps(vmssmodel)) if verbose: print(updateresult) print('OS version updated to ' + newversion + ' in model for VM Scale Set: ' + vmssname) else: # storagemode = custom # check current uri oldimageuri = vmssmodel['properties']['virtualMachineProfile'][ 'storageProfile']['osDisk']['image']['uri'] print('Current image URI in Scale Set model:' + oldimageuri) # compare current uri with new uri if oldimageuri == customuri: print('Scale Set model version is already set to ' + customuri + ', skipping model update.') else: if not noprompt: response = input('Confirm uri upgrade to: ' + customuri + ' (y/n)') if response.lower() != 'y': sys.exit(1) # change the version vmssmodel['properties']['virtualMachineProfile']['storageProfile'][ 'osDisk']['image']['uri'] = customuri # put the vmss model updateresult = azurerm.update_vmss(access_token, subscription_id, resource_group, vmssname, json.dumps(vmssmodel)) if verbose: print(updateresult) print('Image URI updated to ' + customuri + ' in model for VM Scale Set: ' + vmssname) # build the list of VMs to upgrade depending on the upgrademode setting if upgrademode == 'updatedomain': # list the VMSS VM instance views to determine their update domains print('Examining the scale set..') udinstancelist = get_vm_ids_by_ud(access_token, subscription_id, resource_group, vmssname, updatedomain) print('VM instances in UD: ' + str(updatedomain) + ' to upgrade:') print(udinstancelist) vmids = json.dumps(udinstancelist) print('Upgrading VMs in UD: ' + str(updatedomain)) elif upgrademode == 'vmid': vmids = json.dumps([str(vmid)]) print('Upgrading VM ID: ' + str(vmid)) else: # upgrademode = vmlist vmids = vmlist print('Upgrading VM IDs: ' + vmlist) # do manualupgrade on the VMs in the list upgraderesult = azurerm.upgrade_vmss_vms(access_token, subscription_id, resource_group, vmssname, vmids) print(upgraderesult) # now wait for upgrade to complete # query VM scale set instance view if not nowait: updatecomplete = False provisioningstate = '' while not updatecomplete: vmssinstanceview = azurerm.get_vmss_instance_view( access_token, subscription_id, resource_group, vmssname) for status in vmssinstanceview['statuses']: provisioningstate = status['code'] if provisioningstate == 'ProvisioningState/succeeded': updatecomplete = True if verbose: print(provisioningstate) time.sleep(5) print(status['code']) else: print( 'Check Scale Set provisioning state to determine when upgrade is complete.' )
def setUp(self): # Load Azure app defaults try: with open('azurermconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] self.subscription_id = configData['subscriptionId'] self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) self.location = configData['location'] # generate names used in tests self.rgname = Haikunator.haikunate() self.vnet = Haikunator.haikunate(delimiter='') self.saname = Haikunator.haikunate(delimiter='') self.vmname = Haikunator.haikunate(delimiter='') self.vmssname = Haikunator.haikunate(delimiter='') # create resource group print('Creating resource group: ' + self.rgname) response = azurerm.create_resource_group(self.access_token, self.subscription_id, \ self.rgname, self.location) self.assertEqual(response.status_code, 201) # create vnet print('Creating vnet: ' + self.vnet) response = azurerm.create_vnet(self.access_token, self.subscription_id, self.rgname, \ self.vnet, self.location, address_prefix='10.0.0.0/16', nsg_id=None) self.assertEqual(response.status_code, 201) self.subnet_id = response.json()['properties']['subnets'][0]['id'] # create public ip address for VM NIC self.ipname = self.vnet + 'ip' print('Creating VM NIC public ip address: ' + self.ipname) dns_label = self.vnet response = azurerm.create_public_ip(self.access_token, self.subscription_id, self.rgname, \ self.ipname, dns_label, self.location) self.assertEqual(response.status_code, 201) self.ip_id = response.json()['id'] # create public ip address for VMSS LB self.ipname2 = self.vnet + 'ip2' print('Creating VMSS LB public ip address: ' + self.ipname2) dns_label2 = self.vnet + '2' response = azurerm.create_public_ip(self.access_token, self.subscription_id, self.rgname, \ self.ipname2, dns_label2, self.location) self.assertEqual(response.status_code, 201) self.ip2_id = response.json()['id'] # create storage account for VM print('Creating storage account: ' + self.saname) response = azurerm.create_storage_account(self.access_token, self.subscription_id, self.rgname, \ self.saname, self.location, storage_type='Standard_LRS') self.assertEqual(response.status_code, 202) # create 5 storage accounts for vmssname print('Creating storage accounts for scale set') self.container_list = [] for count in range(5): sa_name = ''.join(choice(ascii_lowercase) for i in range(10)) print(sa_name) response = azurerm.create_storage_account(self.access_token, self.subscription_id, \ self.rgname, sa_name, self.location, storage_type='Standard_LRS') self.assertEqual(response.status_code, 202) container = 'https://' + sa_name + '.blob.core.windows.net/' + self.vmssname + 'vhd' self.container_list.append(container) # create NSG nsg_name = self.vnet + 'nsg' print('Creating NSG: ' + nsg_name) response = azurerm.create_nsg(self.access_token, self.subscription_id, self.rgname, \ nsg_name, self.location) self.assertEqual(response.status_code, 201) # print(json.dumps(response.json())) self.nsg_id = response.json()['id'] # create NSG rule nsg_rule = 'ssh' print('Creating NSG rule: ' + nsg_rule) response = azurerm.create_nsg_rule(self.access_token, self.subscription_id, self.rgname, \ nsg_name, nsg_rule, description='ssh rule', destination_range='22') self.assertEqual(response.status_code, 201) # create nic for VM create nic_name = self.vnet + 'nic' print('Creating nic: ' + nic_name) response = azurerm.create_nic(self.access_token, self.subscription_id, self.rgname, \ nic_name, self.ip_id, self.subnet_id, self.location) self.assertEqual(response.status_code, 201) self.nic_id = response.json()['id'] # create load balancer with nat pool for VMSS create lb_name = self.vnet + 'lb' print('Creating load balancer with nat pool: ' + lb_name) response = azurerm.create_lb_with_nat_pool(self.access_token, self.subscription_id, \ self.rgname, lb_name, self.ip2_id, '50000', '50100', '22', self.location) self.be_pool_id = response.json()['properties']['backendAddressPools'][0]['id'] self.lb_pool_id = response.json()['properties']['inboundNatPools'][0]['id']
import time import azurerm import pprint import datetime import sys import json import argparse sys.path.insert(0, '/home/negat/credentials/aad/') from aadcredentials import * pp = pprint.PrettyPrinter(indent=2) access_token = azurerm.get_access_token(tenant_id, application_id, application_secret) parser = argparse.ArgumentParser( description='Swap a PIP from one LB to another.') parser.add_argument( '--src_lb_rg', type=str, required=True, help='The resource group name of the LB with the PIP to transfer.') parser.add_argument('--src_lb_name', type=str, required=True, help='The name of the LB with the PIP to transfer.') parser.add_argument( '--dest_lb_rg', type=str, required=True,
print(" \\ -> " + msg) eventName = 'quota-' + provider_name + "-" + quota[ 'metric'] event.fire( cloud_provider.metadata.name, cloud_provider.metadata.namespace, 'secret', eventName, "Quota warning for cloud provider " + provider_name + ": " + msg, 'FullQuota', 'Warning', api_core) elif cloud == "azr": azOSP = json.loads( dc(secret_data['osServicePrincipal.json'])) access_token = azurerm.get_access_token( azOSP['tenantId'], azOSP['clientId'], azOSP['clientSecret']) for region in [ 'centralus', 'eastus', 'eastus2', 'westus', 'westus2', 'southcentralus' ]: compute_usage = azurerm.get_compute_usage( access_token, azOSP['subscriptionId'], region)['value'] compute_usage = compute_usage + azurerm.get_network_usage( access_token, azOSP['subscriptionId'], region)['value'] compute_usage = compute_usage + azurerm.get_storage_usage( access_token, azOSP['subscriptionId'], region)['value'] print("Azure: Processing Cloud Provider: " +
def setUp(self): # Load Azure app defaults try: with open('azurermconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] self.subscription_id = configData['subscriptionId'] self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) self.location = configData['location'] # generate names for resources self.rgname = Haikunator.haikunate() self.vnet = Haikunator.haikunate(delimiter='') self.vmssname = Haikunator.haikunate(delimiter='') self.setting_name = Haikunator.haikunate(delimiter='') # create resource group print('Creating resource group: ' + self.rgname) response = azurerm.create_resource_group(self.access_token, self.subscription_id, \ self.rgname, self.location) self.assertEqual(response.status_code, 201) # create vnet print('Creating vnet: ' + self.vnet) response = azurerm.create_vnet(self.access_token, self.subscription_id, self.rgname, \ self.vnet, self.location, address_prefix='10.0.0.0/16', nsg_id=None) self.assertEqual(response.status_code, 201) self.subnet_id = response.json()['properties']['subnets'][0]['id'] # create public ip address for VMSS LB self.ipname2 = self.vnet + 'ip2' print('Creating VMSS LB public ip address: ' + self.ipname2) dns_label2 = self.vnet + '2' response = azurerm.create_public_ip(self.access_token, self.subscription_id, self.rgname, \ self.ipname2, dns_label2, self.location) self.assertEqual(response.status_code, 201) self.ip2_id = response.json()['id'] # create 5 storage accounts for vmssname print('Creating storage accounts for scale set') self.container_list = [] for count in range(5): sa_name = ''.join(choice(ascii_lowercase) for i in range(10)) print(sa_name) response = azurerm.create_storage_account(self.access_token, self.subscription_id, \ self.rgname, sa_name, self.location, storage_type='Standard_LRS') self.assertEqual(response.status_code, 202) container = 'https://' + sa_name + '.blob.core.windows.net/' + self.vmssname + 'vhd' self.container_list.append(container) # create load balancer with nat pool for VMSS create lb_name = self.vnet + 'lb' print('Creating load balancer with nat pool: ' + lb_name) response = azurerm.create_lb_with_nat_pool(self.access_token, self.subscription_id, \ self.rgname, lb_name, self.ip2_id, '50000', '50100', '22', self.location) self.be_pool_id = response.json()['properties']['backendAddressPools'][0]['id'] self.lb_pool_id = response.json()['properties']['inboundNatPools'][0]['id'] # create VMSS capacity = 1 vm_size = 'Standard_D1' publisher = 'Canonical' offer = 'UbuntuServer' sku = '16.04.0-LTS' version = 'latest' username = '******' password = Haikunator.haikunate(delimiter=',') print('Creating VMSS: ' + self.vmssname + ', capacity = ' + str(capacity)) response = azurerm.create_vmss(self.access_token, self.subscription_id, self.rgname, \ self.vmssname, vm_size, capacity, publisher, offer, sku, version, self.container_list, \ self.subnet_id, self.be_pool_id, self.lb_pool_id, self.location, username=username, \ password=password)
def main(): '''Main routine.''' # validate command line arguments argparser = argparse.ArgumentParser() argparser.add_argument('--uri', '-u', required=True, action='store', help='Template URI') argparser.add_argument('--params', '-f', required=True, action='store', help='Parameters json file') argparser.add_argument('--location', '-l', required=True, action='store', help='Location, e.g. eastus') argparser.add_argument('--rg', '-g', required=False, action='store', help='Resource Group name') argparser.add_argument('--sub', '-s', required=False, action='store', help='Subscription ID') argparser.add_argument('--genparams', '-p', required=False, action='store', help='Comma separated list of parameters to generate strings for') argparser.add_argument('--wait', '-w', required=False, action='store_true', default=False, help='Wait for deployment to complete and time it') argparser.add_argument('--debug', '-d', required=False, action='store_true', default=False, help='Debug mode: print additional deployment') args = argparser.parse_args() template_uri = args.uri params = args.params rgname = args.rg location = args.location subscription_id = args.sub # Load Azure app defaults try: with open('azurermconfig.json') as configfile: configdata = json.load(configfile) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = configdata['tenantId'] app_id = configdata['appId'] app_secret = configdata['appSecret'] if subscription_id is None: subscription_id = configdata['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # load parameters file try: with open(params) as params_file: param_data = json.load(params_file) except FileNotFoundError: sys.exit('Error: Expecting ' + params + ' in current folder') # prep Haikunator haikunator = Haikunator() # if there is a genparams argument generate values and merge the list if args.genparams is not None: newdict = {} genlist = args.genparams.split(',') for param in genlist: # generate a random prhase, include caps and puncs in case it's a passwd newval = haikunator.haikunate(delimiter='-').title() newdict[param] = {'value': newval} params = {**param_data, **newdict} else: params = param_data # create resource group if not specified if rgname is None: rgname = haikunator.haikunate() ret = azurerm.create_resource_group( access_token, subscription_id, rgname, location) print('Creating resource group: ' + rgname + ', location:', location + ', return code:', ret) deployment_name = haikunator.haikunate() # measure time from beginning of deployment call (after creating resource group etc.) start_time = time.time() # deploy template and print response deploy_return = azurerm.deploy_template_uri( access_token, subscription_id, rgname, deployment_name, template_uri, params) print('Deployment name: ' + deployment_name + ', return code:', deploy_return) if 'Response [20' not in str(deploy_return): print('Return from deployment: ', deploy_return.text) sys.exit('Deployment failed. Exiting.. ') if args.debug is True: print(json.dumps(deploy_return.json(), sort_keys=False, indent=2, separators=(',', ': '))) # show deployment status if args.debug is True: print('Deployment status:') deploy_return = azurerm.show_deployment( access_token, subscription_id, rgname, deployment_name) print(json.dumps(deploy_return, sort_keys=False, indent=2, separators=(',', ': '))) # wait for deployment to complete if args.wait is True: print('Waiting for provisioning to complete..') provisioning_state = '' try: while True: time.sleep(10) deploy_return = azurerm.show_deployment( access_token, subscription_id, rgname, deployment_name) provisioning_state = deploy_return['properties']['provisioningState'] if provisioning_state != 'Running': break print('Provisioning state:', provisioning_state) except KeyError: print('Deployment failure:', deploy_return) elapsed_time = time.time() - start_time print('Elapsed time:', elapsed_time)
def main(): # create parser argParser = argparse.ArgumentParser() argParser.add_argument('--vmssname', '-s', required=True, action='store', help='VM Scale Set name') argParser.add_argument('--resourcegroup', '-r', required=True, dest='resource_group', action='store', help='Resource group name') argParser.add_argument('--newversion', '-n', dest='newversion', action='store', help='New platform image version string') argParser.add_argument('--customuri', '-c', dest='customuri', action='store', help='New custom image URI string') argParser.add_argument('--updatedomain', '-u', dest='updatedomain', action='store', type=int, help='Update domain (int)') argParser.add_argument('--vmid', '-i', dest='vmid', action='store', type=int, help='Single VM ID (int)') argParser.add_argument('--vmlist', '-l', dest='vmlist', action='store', help='List of VM IDs e.g. "["1", "2"]"') argParser.add_argument('--nowait', '-w', action='store_true', default=False, help='Start upgrades and then exit without waiting') argParser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show additional information') argParser.add_argument('-y', dest='noprompt', action='store_true', default=False, help='Do not prompt for confirmation') args = argParser.parse_args() # switches to determine program behavior noprompt = args.noprompt # go ahead and upgrade without waiting for confirmation when True nowait = args.nowait # don't loop waiting for upgrade provisioning to complete when True verbose = args.verbose # print extra status information when True vmssname = args.vmssname resource_group = args.resource_group if args.newversion is not None: newversion = args.newversion storagemode = 'platform' elif args.customuri is not None: customuri = args.customuri storagemode = 'custom' else: argParser.error('You must specify a new version for platform images or a custom uri for custom images') if args.updatedomain is not None: updatedomain = args.updatedomain upgrademode = 'updatedomain' elif args.vmid is not None: vmid = args.vmid upgrademode = 'vmid' elif args.vmlist is not None: vmlist = args.vmlist upgrademode = 'vmlist' else: argParser.error('You must specify an update domain, a vm id, or a vm list') # Load Azure app defaults try: with open('vmssconfig.json') as configFile: configdata = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssconfig.json in current folder") sys.exit() tenant_id = configdata['tenantId'] app_id = configdata['appId'] app_secret = configdata['appSecret'] subscription_id = configdata['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get the vmss model vmssmodel = azurerm.get_vmss(access_token, subscription_id, resource_group, vmssname) # print(json.dumps(vmssmodel, sort_keys=False, indent=2, separators=(',', ': '))) if storagemode == 'platform': # check current version imagereference = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference'] print('Current image reference in Scale Set model:') print(json.dumps(imagereference, sort_keys=False, indent=2, separators=(',', ': '))) # compare current version with new version if imagereference['version'] == newversion: print('Scale Set model version is already set to ' + newversion + ', skipping model update.') else: if not noprompt: response = input('Confirm version upgrade to: ' + newversion + ' (y/n)') if response.lower() != 'y': sys.exit(1) # change the version vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']['version'] = newversion # put the vmss model updateresult = azurerm.update_vmss(access_token, subscription_id, resource_group, vmssname, json.dumps(vmssmodel)) if verbose: print(updateresult) print('OS version updated to ' + newversion + ' in model for VM Scale Set: ' + vmssname) else: # storagemode = custom # check current uri oldimageuri = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri'] print('Current image URI in Scale Set model:' + oldimageuri) # compare current uri with new uri if oldimageuri == customuri: print('Scale Set model version is already set to ' + customuri + ', skipping model update.') else: if not noprompt: response = input('Confirm uri upgrade to: ' + customuri + ' (y/n)') if response.lower() != 'y': sys.exit(1) # change the version vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri'] = customuri # put the vmss model updateresult = azurerm.update_vmss(access_token, subscription_id, resource_group, vmssname, json.dumps(vmssmodel)) if verbose: print(updateresult) print('Image URI updated to ' + customuri + ' in model for VM Scale Set: ' + vmssname) # build the list of VMs to upgrade depending on the upgrademode setting if upgrademode == 'updatedomain': # list the VMSS VM instance views to determine their update domains print('Examining the scale set..') udinstancelist = get_vm_ids_by_ud(access_token, subscription_id, resource_group, vmssname, updatedomain) print('VM instances in UD: ' + str(updatedomain) + ' to upgrade:') print(udinstancelist) vmids = json.dumps(udinstancelist) print('Upgrading VMs in UD: ' + str(updatedomain)) elif upgrademode == 'vmid': vmids = json.dumps([str(vmid)]) print('Upgrading VM ID: ' + str(vmid)) else: # upgrademode = vmlist vmids = vmlist print('Upgrading VM IDs: ' + vmlist) # do manualupgrade on the VMs in the list upgraderesult = azurerm.upgrade_vmss_vms(access_token, subscription_id, resource_group, vmssname, vmids) print(upgraderesult) # now wait for upgrade to complete # query VM scale set instance view if not nowait: updatecomplete = False provisioningstate = '' while not updatecomplete: vmssinstanceview = azurerm.get_vmss_instance_view(access_token, subscription_id, resource_group, vmssname) for status in vmssinstanceview['statuses']: provisioningstate = status['code'] if provisioningstate == 'ProvisioningState/succeeded': updatecomplete = True if verbose: print(provisioningstate) time.sleep(5) print(status['code']) else: print('Check Scale Set provisioning state to determine when upgrade is complete.')
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-n', required=True, action='store', help='Scale set name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--operation', '-o', required=True, action='store', help='Operation (attach/detach)') arg_parser.add_argument('--vmid', '-i', required=True, action='store', help='VM id') arg_parser.add_argument('--lun', '-l', required=True, action='store', help='lun id') arg_parser.add_argument('--diskname', '-d', required=False, action='store', help='Optional password') args = arg_parser.parse_args() vmssname = args.vmssname rgname = args.rgname operation = args.operation vmid = args.vmid lun = int(args.lun) diskname = args.diskname if operation != 'attach' and operation != 'detach': sys.exit('--operation must be attach or detach') if diskname is None and operation == 'attach': sys.exit('--diskname is required for attach operation.') # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # do a get on the VM vmssvm_model = azurerm.get_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid) # check operation if operation == 'attach': new_model = attach_model(subscription_id, rgname, vmssvm_model, diskname, lun) else: if operation == 'detach': new_model = detach_model(vmssvm_model, lun) # temporary fix - remove diskSizeGb del new_model['properties']['storageProfile']['osDisk']['diskSizeGB'] # do a put on the VM rmreturn = azurerm.put_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid, new_model) if rmreturn.status_code != 201: sys.exit('Error ' + str(rmreturn.status_code) + ' creating VM. ' + rmreturn.text) print( json.dumps(rmreturn, sort_keys=False, indent=2, separators=(',', ': ')))
def auth(self): '''update the authentication token for this subscription''' self.access_token = azurerm.get_access_token(self.tenant_id, self.app_id, self.app_secret) return self.access_token
def main(): global clickLoopCount global showScaleIn, showScaleOut global ipaddr, dns # get an access token for Azure authentication access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get public ip address for resource group (don't need to query this in a loop) # this gets the first ip address - modify this if your RG has multiple ips ips = azurerm.list_public_ips(access_token, subscription_id, rgname) dns = ips['value'][0]['properties']['dnsSettings']['fqdn'] ipaddr = ips['value'][0]['properties']['ipAddress'] # start a timer in order to refresh the access token in 10 minutes start_time = time.time() # start a VMSS monitoring thread vmss_thread = threading.Thread(target=get_vmss_properties, args=(access_token, subscription_id)) vmss_thread.start() # Loop until the user clicks the close button. done = False clock = pygame.time.Clock() pygame.key.set_repeat(1,2) # keyboard monitoring [pause before repeat, repeats/sec] while not done: screen.blit(background, [0, 0]) # set how many times per second to run the loop - if you reduce this value, then reduce clickLoopCount # reduce it too low and clicking on things becomes less responsive clock.tick(20) mousepos = pygame.mouse.get_pos() for event in pygame.event.get(): if event.type == pygame.QUIT: done = True elif event.type == pygame.MOUSEBUTTONDOWN and pygame.mouse.get_pressed()[0]: process_click(event.pos, access_token) # refresh screen if VMSS and VM details are available if len(vmssProperties) > 0: draw_vmss() if len(vmssVmProperties) > 0: draw_vmssvms() # loop counters to show transient messages for a few moments if showScaleIn == True: clickLoopCount -= 1 if clickLoopCount < 1: showScaleIn = False if showScaleOut == True: clickLoopCount -= 1 if clickLoopCount < 1: showScaleOut = False # update display pygame.display.flip() # renew Azure access token before timeout (600 secs), then reset timer if int(start_time - time.time()) > 600: access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) start_time = time.time() pygame.quit()
def setUp(self): # Load Azure app defaults try: with open('azurermconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] self.subscription_id = configData['subscriptionId'] self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) self.location = configData['location'] # generate names used in tests self.h = Haikunator() self.rgname = self.h.haikunate() self.vnet = self.h.haikunate() # self.saname = self.h.haikunate(delimiter='') self.vmname = self.h.haikunate(delimiter='') self.vmssname = self.h.haikunate(delimiter='') self.asname = self.h.haikunate() # generate RSA Key for compute resources key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, \ key_size=2048) self.public_key = key.public_key().public_bytes(serialization.Encoding.OpenSSH, \ serialization.PublicFormat.OpenSSH).decode('utf-8') # create resource group print('Creating resource group: ' + self.rgname) response = azurerm.create_resource_group(self.access_token, self.subscription_id, \ self.rgname, self.location) self.assertEqual(response.status_code, 201) # create vnet print('Creating vnet: ' + self.vnet) response = azurerm.create_vnet(self.access_token, self.subscription_id, self.rgname, \ self.vnet, self.location, address_prefix='10.0.0.0/16', nsg_id=None) self.assertEqual(response.status_code, 201) self.subnet_id = response.json()['properties']['subnets'][0]['id'] # create public ip address for VM NIC self.ipname = self.vnet + 'ip' print('Creating VM NIC public ip address: ' + self.ipname) dns_label = self.vnet response = azurerm.create_public_ip(self.access_token, self.subscription_id, self.rgname, \ self.ipname, dns_label, self.location) self.assertEqual(response.status_code, 201) self.ip_id = response.json()['id'] # create public ip address for VMSS LB self.ipname2 = self.vnet + 'ip2' print('Creating VMSS LB public ip address: ' + self.ipname2) dns_label2 = self.vnet + '2' response = azurerm.create_public_ip(self.access_token, self.subscription_id, self.rgname, \ self.ipname2, dns_label2, self.location) self.assertEqual(response.status_code, 201) self.ip2_id = response.json()['id'] # create NSG nsg_name = self.vnet + 'nsg' print('Creating NSG: ' + nsg_name) response = azurerm.create_nsg(self.access_token, self.subscription_id, self.rgname, \ nsg_name, self.location) self.assertEqual(response.status_code, 201) # print(json.dumps(response.json())) self.nsg_id = response.json()['id'] # create NSG rule nsg_rule = 'ssh' print('Creating NSG rule: ' + nsg_rule) response = azurerm.create_nsg_rule(self.access_token, self.subscription_id, self.rgname, \ nsg_name, nsg_rule, description='ssh rule', destination_range='22') self.assertEqual(response.status_code, 201) # create nic for VM create nic_name = self.vnet + 'nic' print('Creating nic: ' + nic_name) response = azurerm.create_nic(self.access_token, self.subscription_id, self.rgname, \ nic_name, self.ip_id, self.subnet_id, self.location) self.assertEqual(response.status_code, 201) self.nic_id = response.json()['id'] # create load balancer with nat pool for VMSS create lb_name = self.vnet + 'lb' print('Creating load balancer with nat pool: ' + lb_name) response = azurerm.create_lb_with_nat_pool(self.access_token, self.subscription_id, \ self.rgname, lb_name, self.ip2_id, '50000', '50100', '22', self.location) self.be_pool_id = response.json()['properties']['backendAddressPools'][0]['id'] self.lb_pool_id = response.json()['properties']['inboundNatPools'][0]['id']
# Load Azure app defaults try: with open('azurermconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] subscription_id = configData['subscriptionId'] resource_group = configData['resourceGroup'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # loop through resource groups resource_groups = azurerm.list_resource_groups(access_token, subscription_id) for rg in resource_groups["value"]: rgname = rg["name"] vmlist = azurerm.list_vms(access_token, subscription_id, rgname) for vm in vmlist['value']: name = vm['name'] location = vm['location'] offer = vm['properties']['storageProfile']['imageReference']['offer'] sku = vm['properties']['storageProfile']['imageReference']['sku'] print(''.join([ 'Name: ', name, ', RG: ', rgname, ', location: ', location, ', OS: ', offer, ' ', sku ]))
with open('windows.json', 'r') as windows_file: windows_json_string = windows_file.read() windows_json = json.loads(windows_json_string) linux_images = linux_json["parameters"]["image"]["allowedValues"] windows_images = windows_json["parameters"]["WindowsServerVersion"]["allowedValues"] linux_locations = linux_json["parameters"]["location"]["allowedValues"] windows_locations = windows_json["parameters"]["location"]["allowedValues"] #linux_skus = linux_json["parameters"]["vmSku"]["allowedValues"] #windows_skus = windows_json["parameters"]["vmSku"]["allowedValues"] #vmSkus = ["Standard_A0", "Standard_A10", "Standard_D1", "Standard_D11", "Standard_D1_v2", "Standard_D11_v2", "Standard_DS1", "Standard_DS11"] access_token = azurerm.get_access_token(tenant_id, application_id, application_secret) def test_linux(linux_image, auth_type, local_naming_infix, wl): return_val = False rg_name = local_naming_infix + "rg" dep_name = local_naming_infix[0:20] + "dep" res = azurerm.create_resource_group(access_token, subscription_id, rg_name, 'westus') cur_parameters_json = copy.deepcopy(parameters_json[auth_type]) if wl == "l": cur_parameters_json["image"] = {"value": linux_image} cur_parameters_json["authenticationType"] = {"value": auth_type} json_string = linux_json_string else: cur_parameters_json["WindowsServerVersion"] = {"value": linux_image}
def auth(self): self.access_token = azurerm.get_access_token(self.tenant_id, self.app_id, self.app_secret) return self.access_token
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--name', '-n', required=True, action='store', help='Name of vmss') arg_parser.add_argument('--capacity', '-c', required=True, action='store', help='Number of VMs') arg_parser.add_argument('--location', '-l', action='store', help='Location, e.g. eastus') arg_parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Print operational details') args = arg_parser.parse_args() name = args.name location = args.location capacity = args.capacity # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: print("Error: Expecting azurermconfig.json in current folder") sys.exit() tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # create resource group print('Creating resource group: ' + name) rmreturn = azurerm.create_resource_group(access_token, subscription_id, name, location) print(rmreturn) # create NSG nsg_name = name + 'nsg' print('Creating NSG: ' + nsg_name) rmreturn = azurerm.create_nsg(access_token, subscription_id, name, nsg_name, location) nsg_id = rmreturn.json()['id'] print('nsg_id = ' + nsg_id) # create NSG rule nsg_rule = 'ssh' print('Creating NSG rule: ' + nsg_rule) rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, name, nsg_name, nsg_rule, description='ssh rule', destination_range='22') #print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) # create VNET vnetname = name + 'vnet' print('Creating VNet: ' + vnetname) rmreturn = azurerm.create_vnet(access_token, subscription_id, name, vnetname, location, nsg_id=nsg_id) print(rmreturn) # print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) subnet_id = rmreturn.json()['properties']['subnets'][0]['id'] print('subnet_id = ' + subnet_id) # create public IP address public_ip_name = name + 'ip' dns_label = name + 'ip' print('Creating public IP address: ' + public_ip_name) rmreturn = azurerm.create_public_ip(access_token, subscription_id, name, public_ip_name, dns_label, location) print(rmreturn) ip_id = rmreturn.json()['id'] print('ip_id = ' + ip_id) # create load balancer with nat pool lb_name = vnetname + 'lb' print('Creating load balancer with nat pool: ' + lb_name) rmreturn = azurerm.create_lb_with_nat_pool(access_token, subscription_id, name, lb_name, ip_id, '50000', '50100', '22', location) be_pool_id = rmreturn.json()['properties']['backendAddressPools'][0]['id'] lb_pool_id = rmreturn.json()['properties']['inboundNatPools'][0]['id'] # create VMSS vmss_name = name vm_size = 'Standard_D1_v2' publisher = 'Canonical' offer = 'UbuntuServer' sku = '16.04-LTS' version = 'latest' username = '******' password = Haikunator().haikunate(delimiter=',') # creates random password print('Password = '******'Creating VMSS: ' + vmss_name) rmreturn = azurerm.create_vmss(access_token, subscription_id, name, vmss_name, vm_size, capacity, publisher, offer, sku, version, subnet_id, be_pool_id, lb_pool_id, location, username=username, password=password) print(rmreturn) print( json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
import azurerm # Load Azure app defaults try: with open('azurermconfig.json') as config_file: CONFIG_DATA = json.load(config_file) except FileNotFoundError: print("Error: Expecting azurermconfig.json in current folder") sys.exit() TENANT_ID = CONFIG_DATA['tenantId'] APP_ID = CONFIG_DATA['appId'] APP_SECRET = CONFIG_DATA['appSecret'] SUB_ID = CONFIG_DATA['subscriptionId'] ACCESS_TOKEN = azurerm.get_access_token(TENANT_ID, APP_ID, APP_SECRET) print('Enter an existing Azure Resource Group name.') RG_NAME = input() # create a Cosmos DB account print('Enter Cosmos DB account name to create.') CA_NAME = input() LOCATION = 'eastus' RET = azurerm.create_cosmosdb_account(ACCESS_TOKEN, SUB_ID, RG_NAME, CA_NAME, LOCATION, cosmosdb_kind='GlobalDocumentDB') print(RET) print('It can take 2-3 minutes to create the Cosmos DB account. The example below is for reference. This likely falls if ran right away.') # get storage account keys KEYS = azurerm.get_cosmosdb_account_keys(ACCESS_TOKEN, SUB_ID, RG_NAME, CA_NAME)
def setUp(self): # Load Azure app defaults try: with open('azurermconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] self.subscription_id = configData['subscriptionId'] self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) self.location = configData['location'] # generate names for resources self.h = Haikunator() self.rgname = self.h.haikunate() self.vnet = self.h.haikunate(delimiter='') self.vmssname = self.h.haikunate(delimiter='') self.setting_name = self.h.haikunate(delimiter='') # generate RSA Key for compute resources key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, \ key_size=2048) self.public_key = key.public_key().public_bytes(serialization.Encoding.OpenSSH, \ serialization.PublicFormat.OpenSSH).decode('utf-8') # create resource group print('Creating resource group: ' + self.rgname) response = azurerm.create_resource_group(self.access_token, self.subscription_id, \ self.rgname, self.location) self.assertEqual(response.status_code, 201) # create vnet print('Creating vnet: ' + self.vnet) response = azurerm.create_vnet(self.access_token, self.subscription_id, self.rgname, \ self.vnet, self.location, address_prefix='10.0.0.0/16', nsg_id=None) self.assertEqual(response.status_code, 201) self.subnet_id = response.json()['properties']['subnets'][0]['id'] # create public ip address for VMSS LB self.ipname2 = self.vnet + 'ip2' print('Creating VMSS LB public ip address: ' + self.ipname2) dns_label2 = self.vnet + '2' response = azurerm.create_public_ip(self.access_token, self.subscription_id, self.rgname, \ self.ipname2, dns_label2, self.location) self.assertEqual(response.status_code, 201) self.ip2_id = response.json()['id'] # create load balancer with nat pool for VMSS create lb_name = self.vnet + 'lb' print('Creating load balancer with nat pool: ' + lb_name) response = azurerm.create_lb_with_nat_pool(self.access_token, self.subscription_id, \ self.rgname, lb_name, self.ip2_id, '50000', '50100', '22', self.location) self.be_pool_id = response.json()['properties']['backendAddressPools'][0]['id'] self.lb_pool_id = response.json()['properties']['inboundNatPools'][0]['id'] # create VMSS capacity = 1 vm_size = 'Standard_D1' publisher = 'Canonical' offer = 'UbuntuServer' sku = '16.04-LTS' version = 'latest' username = '******' password = self.h.haikunate(delimiter=',') print('Creating VMSS: ' + self.vmssname + ', capacity = ' + str(capacity)) response = azurerm.create_vmss(self.access_token, self.subscription_id, self.rgname, \ self.vmssname, vm_size, capacity, publisher, offer, sku, version, \ self.subnet_id, self.be_pool_id, self.lb_pool_id, self.location, username=username, \ public_key=self.public_key)
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--name', '-n', required=True, action='store', help='Name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--location', '-l', required=True, action='store', help='Location, e.g. eastus') arg_parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Print operational details') args = arg_parser.parse_args() name = args.name rgname = args.rgname location = args.location # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # initialize haikunator hkn = Haikunator() # create NSG nsg_name = name + 'nsg' print('Creating NSG: ' + nsg_name) rmreturn = azurerm.create_nsg(access_token, subscription_id, rgname, nsg_name, location) nsg_id = rmreturn.json()['id'] print('nsg_id = ' + nsg_id) # create NSG rule nsg_rule = 'ssh' print('Creating NSG rule: ' + nsg_rule) rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, rgname, nsg_name, nsg_rule, description='ssh rule', destination_range='22') print(rmreturn) print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) # create VNET vnetname = name + 'vnet' print('Creating VNet: ' + vnetname) rmreturn = azurerm.create_vnet(access_token, subscription_id, rgname, vnetname, location, nsg_id=nsg_id) print(rmreturn) # print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) subnet_id = rmreturn.json()['properties']['subnets'][0]['id'] print('subnet_id = ' + subnet_id) # create public IP address public_ip_name = name + 'ip' dns_label = name + 'ip' print('Creating public IP address: ' + public_ip_name) rmreturn = azurerm.create_public_ip(access_token, subscription_id, rgname, public_ip_name, dns_label, location) print(rmreturn) ip_id = rmreturn.json()['id'] print('ip_id = ' + ip_id) print('Waiting for IP provisioning..') waiting = True while waiting: ipa = azurerm.get_public_ip(access_token, subscription_id, rgname, public_ip_name) if ipa['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) # create NIC nic_name = name + 'nic' print('Creating NIC: ' + nic_name) rmreturn = azurerm.create_nic(access_token, subscription_id, rgname, nic_name, ip_id, subnet_id, location) #print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) nic_id = rmreturn.json()['id'] print('Waiting for NIC provisioning..') waiting = True while waiting: nic = azurerm.get_nic(access_token, subscription_id, rgname, nic_name) if nic['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) # create VM vm_name = name vm_size = 'Standard_D1' publisher = 'CoreOS' offer = 'CoreOS' sku = 'Stable' version = 'latest' username = '******' password = hkn.haikunate(delimiter=',') # creates random password print('password = '******'Creating VM: ' + vm_name) rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vm_size, publisher, offer, sku, version, nic_id, location, username=username, password=password) print(rmreturn) print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
def subidkeepalive(): global access_token while True: time.sleep(2000) access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
def main(): global clickLoopCount global showScaleIn, showScaleOut global ipaddr, dns # get an access token for Azure authentication access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get public ip address for resource group (don't need to query this in a loop) # this gets the first ip address - modify this if your RG has multiple ips ips = azurerm.list_public_ips(access_token, subscription_id, rgname) # print(ips) if len(ips['value']) > 0: dns = ips['value'][0]['properties']['dnsSettings']['fqdn'] ipaddr = ips['value'][0]['properties']['ipAddress'] # start a timer in order to refresh the access token in 10 minutes start_time = time.time() # start a VMSS monitoring thread vmss_thread = threading.Thread(target=get_vmss_properties, args=(access_token, subscription_id)) vmss_thread.start() # Loop until the user clicks the close button. done = False clock = pygame.time.Clock() pygame.key.set_repeat( 1, 2) # keyboard monitoring [pause before repeat, repeats/sec] while not done: screen.blit(background, [0, 0]) # set how many times per second to run the loop - if you reduce this value, then reduce clickLoopCount # reduce it too low and clicking on things becomes less responsive clock.tick(20) mousepos = pygame.mouse.get_pos() for event in pygame.event.get(): if event.type == pygame.QUIT: done = True elif event.type == pygame.MOUSEBUTTONDOWN and pygame.mouse.get_pressed( )[0]: process_click(event.pos, access_token) # refresh screen if VMSS and VM details are available if len(vmssProperties) > 0: draw_vmss() if len(vmssVmProperties) > 0: draw_vmssvms() # loop counters to show transient messages for a few moments if showScaleIn == True: clickLoopCount -= 1 if clickLoopCount < 1: showScaleIn = False if showScaleOut == True: clickLoopCount -= 1 if clickLoopCount < 1: showScaleOut = False # update display pygame.display.flip() # renew Azure access token before timeout (600 secs), then reset timer if int(start_time - time.time()) > 600: access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) start_time = time.time() pygame.quit()
def setUp(self): # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] self.subscription_id = config_data['subscriptionId'] self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) self.location = config_data['location'] # generate names used in tests self.h = Haikunator() self.rgname = self.h.haikunate() self.vnet = self.h.haikunate() # self.saname = self.h.haikunate(delimiter='') self.vmname = self.h.haikunate(delimiter='') self.vmssname = self.h.haikunate(delimiter='') self.asname = self.h.haikunate() # generate RSA Key for compute resources key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, \ key_size=2048) self.public_key = key.public_key().public_bytes(serialization.Encoding.OpenSSH, \ serialization.PublicFormat.OpenSSH).decode('utf-8') # create resource group print('Creating resource group: ' + self.rgname) response = azurerm.create_resource_group(self.access_token, self.subscription_id, \ self.rgname, self.location) self.assertEqual(response.status_code, 201) # create vnet print('Creating vnet: ' + self.vnet) response = azurerm.create_vnet(self.access_token, self.subscription_id, self.rgname, \ self.vnet, self.location, address_prefix='10.0.0.0/16', nsg_id=None) self.assertEqual(response.status_code, 201) self.subnet_id = response.json()['properties']['subnets'][0]['id'] # create public ip address for VM NIC self.ipname = self.vnet + 'ip' print('Creating VM NIC public ip address: ' + self.ipname) dns_label = self.vnet response = azurerm.create_public_ip(self.access_token, self.subscription_id, self.rgname, \ self.ipname, dns_label, self.location) self.assertEqual(response.status_code, 201) self.ip_id = response.json()['id'] # create public ip address for VMSS LB self.ipname2 = self.vnet + 'ip2' print('Creating VMSS LB public ip address: ' + self.ipname2) dns_label2 = self.vnet + '2' response = azurerm.create_public_ip(self.access_token, self.subscription_id, self.rgname, \ self.ipname2, dns_label2, self.location) self.assertEqual(response.status_code, 201) self.ip2_id = response.json()['id'] # create NSG nsg_name = self.vnet + 'nsg' print('Creating NSG: ' + nsg_name) response = azurerm.create_nsg(self.access_token, self.subscription_id, self.rgname, \ nsg_name, self.location) self.assertEqual(response.status_code, 201) # print(json.dumps(response.json())) self.nsg_id = response.json()['id'] # create NSG rule time.sleep(5) nsg_rule = 'ssh' print('Creating NSG rule: ' + nsg_rule) response = azurerm.create_nsg_rule(self.access_token, self.subscription_id, self.rgname, \ nsg_name, nsg_rule, description='ssh rule', destination_range='22') # print(json.dumps(response.json())) self.assertEqual(response.status_code, 201) # create nic for VM create # sleep long enough for subnet to finish creating time.sleep(10) nic_name = self.vnet + 'nic' print('Creating nic: ' + nic_name) response = azurerm.create_nic(self.access_token, self.subscription_id, self.rgname, \ nic_name, self.ip_id, self.subnet_id, self.location) self.assertEqual(response.status_code, 201) self.nic_id = response.json()['id'] # create load balancer with nat pool for VMSS create lb_name = self.vnet + 'lb' print('Creating load balancer with nat pool: ' + lb_name) response = azurerm.create_lb_with_nat_pool(self.access_token, self.subscription_id, \ self.rgname, lb_name, self.ip2_id, '50000', '50100', '22', self.location) self.be_pool_id = response.json( )['properties']['backendAddressPools'][0]['id'] self.lb_pool_id = response.json( )['properties']['inboundNatPools'][0]['id']
def main(): # create parser argParser = argparse.ArgumentParser() # arguments: resource group lb name 1, 2 argParser.add_argument('--resourcegroup', '-g', required=True, dest='resource_group', action='store', help='Resource group name') argParser.add_argument('--lb1', '-1', required=True, action='store', help='Load balancer 1 name') argParser.add_argument('--lb2', '-2', required=True, action='store', help='Load balancer 2 name') argParser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show additional information') argParser.add_argument('-y', dest='noprompt', action='store_true', default=False, help='Do not prompt for confirmation') args = argParser.parse_args() verbose = args.verbose # print extra status information when True resource_group = args.resource_group lb1 = args.lb1 lb2 = args.lb2 # Load Azure app defaults try: with open('azurermconfig.json') as configFile: configdata = json.load(configFile) except FileNotFoundError: print("Error: Expecting lbconfig.json in current folder") sys.exit() tenant_id = configdata['tenantId'] app_id = configdata['appId'] app_secret = configdata['appSecret'] subscription_id = configdata['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # figure out location of resource group and use that for the float ip rg = azurerm.get_resource_group(access_token, subscription_id, resource_group) location = rg['location'] # Create a spare public IP address ip_name = Haikunator().haikunate(delimiter='') dns_label = ip_name + 'dns' print('Creating float public IP: ' + ip_name) ip_ret = azurerm.create_public_ip(access_token, subscription_id, resource_group, ip_name, dns_label, location) floatip_id = ip_ret.json()['id'] if verbose is True: print('Float ip id = ' + floatip_id) # 1. Get lb 2 lbmodel2 = azurerm.get_load_balancer(access_token, subscription_id, resource_group, lb2) lb2_ip_id = lbmodel2['properties']['frontendIPConfigurations'][0][ 'properties']['publicIPAddress']['id'] lb2_ip_name = lb2_ip_id.split('publicIPAddresses/', 1)[1] if verbose is True: print(lb2 + ' ip id: ' + lb2_ip_id) print(lb2 + ' model:') print( json.dumps(lbmodel2, sort_keys=False, indent=2, separators=(',', ': '))) # 2. Assign new ip to lb 2 print('Updating ' + lb2 + ' ip to float ip: ' + ip_name) lbmodel2['properties']['frontendIPConfigurations'][0]['properties'][ 'publicIPAddress']['id'] = floatip_id ret = azurerm.update_load_balancer(access_token, subscription_id, resource_group, lb2, json.dumps(lbmodel2)) if (ret.status_code != 200): handle_bad_update("updating " + lb2, ret) if verbose is True: print('original ip id: ' + lb2_ip_id + ', new ip id: ' + floatip_id) print( json.dumps(ret, sort_keys=False, indent=2, separators=(',', ': '))) print('Waiting for old ' + lb2 + ' ip: ' + lb2_ip_name + ' to be unnassigned') waiting = True start1 = time.time() while waiting: lbmodel2 = azurerm.get_load_balancer(access_token, subscription_id, resource_group, lb2) if lbmodel2['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(3) end1 = time.time() print('Elapsed time: ' + str(int(end1 - start1))) # 3. Get lb 1 lbmodel1 = azurerm.get_load_balancer(access_token, subscription_id, resource_group, lb1) lb1_ip_id = lbmodel1['properties']['frontendIPConfigurations'][0][ 'properties']['publicIPAddress']['id'] if verbose is True: print(lb1 + ' ip id: ' + lb1_ip_id) print(lb1 + ' model:') print( json.dumps(lbmodel1, sort_keys=False, indent=2, separators=(',', ': '))) lb1_ip_name = lb1_ip_id.split('publicIPAddresses/', 1)[1] # 4. Assign old ip 2 to lb 1 print('Downtime begins: Updating ' + lb1 + ' ip to ' + lb2_ip_name) start2 = time.time() lbmodel1['properties']['frontendIPConfigurations'][0]['properties'][ 'publicIPAddress']['id'] = lb2_ip_id ret = azurerm.update_load_balancer(access_token, subscription_id, resource_group, lb1, json.dumps(lbmodel1)) if (ret.status_code != 200): handle_bad_update("updating " + lb1, ret) if verbose is True: print( json.dumps(ret, sort_keys=False, indent=2, separators=(',', ': '))) print('Waiting for old ' + lb1 + ' ip: ' + lb1_ip_name + ' to be unnassigned') waiting = True while waiting: lbmodel1 = azurerm.get_load_balancer(access_token, subscription_id, resource_group, lb1) if lbmodel1['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(3) end2 = time.time() print('Staging IP ' + lb2_ip_name + ' now points to old production LB ' + lb1) print('Elapsed time: ' + str(int(end2 - start1))) # 5. Assign old ip 1 to lb 2 print('Updating ' + lb2 + ' ip to ' + lb1_ip_name) lbmodel2['properties']['frontendIPConfigurations'][0]['properties'][ 'publicIPAddress']['id'] = lb1_ip_id ret = azurerm.update_load_balancer(access_token, subscription_id, resource_group, lb2, json.dumps(lbmodel2)) if (ret.status_code != 200): handle_bad_update("updating " + lb2, ret) if verbose is True: print('Original ip id: ' + lb2_ip_id + ', new ip id: ' + lb1_ip_id) print( json.dumps(ret, sort_keys=False, indent=2, separators=(',', ': '))) print('Waiting for ' + lb2 + ' provisioning to complete') waiting = True while waiting: lbmodel2 = azurerm.get_load_balancer(access_token, subscription_id, resource_group, lb2) if lbmodel2['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(3) end3 = time.time() # 6. Delete floatip print('VIP swap complete') print('Downtime: ' + str(int(end3 - start2)) + '. Total elapsed time: ' + \ str(int(end3 - start1))) print('Deleting float ip: ' + ip_name) azurerm.delete_public_ip(access_token, subscription_id, resource_group, ip_name)
with open('azurermconfig.json') as configFile: configData = json.load(configFile) except FileNotFoundError: print("Error: Expecting vmssConfig.json in current folder") sys.exit() tenant_id = configData['tenantId'] app_id = configData['appId'] app_secret = configData['appSecret'] subscription_id = configData['subscriptionId'] #resource_group = configData['resourceGroup'] resource_group = 'guyqlen' access_token = azurerm.get_access_token( tenant_id, app_id, app_secret ) # create a storage account print('Enter storage account name to create.') saname = input() location = 'eastus' sareturn = azurerm.create_storage_account(access_token, subscription_id, resource_group, saname, location) print(sareturn) # list storage accounts per sub sa_list = azurerm.list_storage_accounts_sub(access_token, subscription_id) print(sa_list) #for rg in resource_groups["value"]: # print(rg["name"] + ', ' + rg["location"] + ', ' + rg["properties"]["provisioningState"])