def main(argv): config = ConfigParser.ConfigParser() #config.read([splunkHome + '/etc/apps/oovoo/config/app.conf']) config.read('/root/oovoo/config/app.conf') sms = ServiceManagementService(config.get('Azure','subscription_id'),config.get('Azure','certificate')) services = sms.list_hosted_services() for oneService in services: print('Service name:' + oneService.service_name) print 'Finish...'
class AzureServicesManager: # Storage container = 'vhds' windows_blob_url = 'blob.core.windows.net' # Linux linux_user = '******' linux_pass = '******' location = 'West US' # SSH Keys def __init__(self, subscription_id, cert_file): self.subscription_id = subscription_id self.cert_file = cert_file self.sms = ServiceManagementService(self.subscription_id, self.cert_file) @property def sms(self): return self.sms def list_locations(self): locations = self.sms.list_locations() for location in locations: print location def list_images(self): return self.sms.list_os_images() @utils.resource_not_found_handler def get_hosted_service(self, service_name): resp = self.sms.get_hosted_service_properties(service_name) properties = resp.hosted_service_properties return properties.__dict__ def delete_hosted_service(self, service_name): res = self.sms.check_hosted_service_name_availability(service_name) if not res.result: return self.sms.delete_hosted_service(service_name) def create_hosted_service(self, os_user, service_name=None, random=False): if not service_name: service_name = self.generate_cloud_service_name(os_user, random) available = False while not available: res = self.sms.check_hosted_service_name_availability(service_name) if not res.result: service_name = self.generate_cloud_service_name(os_user, random) else: available = True self.sms.create_hosted_service(service_name=service_name, label=service_name, location='West US') return service_name def create_virtual_machine(self, service_name, vm_name, image_name, role_size): media_link = self._get_media_link(vm_name) # Linux VM configuration hostname = '-'.join((vm_name, 'host')) linux_config = LinuxConfigurationSet(hostname, self.linux_user, self.linux_pass, True) # Hard disk for the OS os_hd = OSVirtualHardDisk(image_name, media_link) # Create vm result = self.sms.create_virtual_machine_deployment( service_name=service_name, deployment_name=vm_name, deployment_slot='production', label=vm_name, role_name=vm_name, system_config=linux_config, os_virtual_hard_disk=os_hd, role_size=role_size ) request_id = result.request_id return { 'request_id': request_id, 'media_link': media_link } def delete_virtual_machine(self, service_name, vm_name): resp = self.sms.delete_deployment(service_name, vm_name, True) self.sms.wait_for_operation_status(resp.request_id) result = self.sms.delete_hosted_service(service_name) return result def generate_cloud_service_name(self, os_user=None, random=False): if random: return utils.generate_random_name(10) return '-'.join((os_user, utils.generate_random_name(6))) @utils.resource_not_found_handler def get_virtual_machine_info(self, service_name, vm_name): vm_info = {} deploy_info = self.sms.get_deployment_by_name(service_name, vm_name) if deploy_info and deploy_info.role_instance_list: vm_info = deploy_info.role_instance_list[0].__dict__ return vm_info def list_virtual_machines(self): vm_list = [] services = self.sms.list_hosted_services() for service in services: deploys = service.deployments if deploys and deploys.role_instance_list: vm_name = deploys.role_instance_list[0].instance_name vm_list.append(vm_name) return vm_list def power_on(self, service_name, vm_name): resp = self.sms.start_role(service_name, vm_name, vm_name) return resp.request_id def power_off(self, service_name, vm_name): resp = self.sms.shutdown_role(service_name, vm_name, vm_name) return resp.request_id def soft_reboot(self, service_name, vm_name): resp = self.sms.restart_role(service_name, vm_name, vm_name) return resp.request_id def hard_reboot(self, service_name, vm_name): resp = self.sms.reboot_role_instance(service_name, vm_name, vm_name) return resp.request_id def attach_volume(self, service_name, vm_name, size, lun): disk_name = utils.generate_random_name(5, vm_name) media_link = self._get_media_link(vm_name, disk_name) self.sms.add_data_disk(service_name, vm_name, vm_name, lun, host_caching='ReadWrite', media_link=media_link, disk_name=disk_name, logical_disk_size_in_gb=size) def detach_volume(self, service_name, vm_name, lun): self.sms.delete_data_disk(service_name, vm_name, vm_name, lun, True) def get_available_lun(self, service_name, vm_name): try: role = self.sms.get_role(service_name, vm_name, vm_name) except Exception: return 0 disks = role.data_virtual_hard_disks luns = [disk.lun for disk in disks].sort() for i in range(1, 16): if i not in luns: return i return None def snapshot(self, service_name, vm_name, image_id, snanshot_name): image_desc = 'Snapshot for image %s' % vm_name image = CaptureRoleAsVMImage('Specialized', snanshot_name, image_id, image_desc, 'english') resp = self.sms.capture_vm_image(service_name, vm_name, vm_name, image) self.sms.wait_for_operation_status(resp.request_id) def _get_media_link(self, vm_name, filename=None, storage_account=None): """ The MediaLink should be constructed as: https://<storageAccount>.<blobLink>/<blobContainer>/<filename>.vhd """ if not storage_account: storage_account = self._get_or_create_storage_account() container = self.container filename = vm_name if filename is None else filename blob = vm_name + '-' + filename + '.vhd' media_link = "http://%s.%s/%s/%s" % (storage_account, self.windows_blob_url, container, blob) return media_link def _get_or_create_storage_account(self): account_list = self.sms.list_storage_accounts() if account_list: return account_list[-1].service_name storage_account = utils.generate_random_name(10) description = "Storage account %s description" % storage_account label = storage_account + 'label' self.sms.create_storage_account(storage_account, description, label, location=self.location) return storage_account def _wait_for_operation(self, request_id, timeout=3000, failure_callback=None, failure_callback_kwargs=None): try: self.sms.wait_for_operation_status(request_id, timeout=timeout) except Exception as ex: if failure_callback and failure_callback_kwargs: failure_callback(**failure_callback_kwargs) raise ex
class AzureInventory(object): def __init__(self): """Main execution path.""" # Inventory grouped by display group self.inventory = {} # Index of deployment name -> host self.index = {} self.host_metadata = {} # Cache setting defaults. # These can be overridden in settings (see `read_settings`). cache_dir = os.path.expanduser('~') self.cache_path_cache = os.path.join(cache_dir, '.ansible-azure.cache') self.cache_path_index = os.path.join(cache_dir, '.ansible-azure.index') self.cache_max_age = 0 # Read settings and parse CLI arguments self.read_settings() self.read_environment() self.parse_cli_args() # Initialize Azure ServiceManagementService self.sms = ServiceManagementService(self.subscription_id, self.cert_path) # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() if self.args.list_images: data_to_print = self.json_format_dict(self.get_images(), True) elif self.args.list or self.args.host: # Display list of nodes for inventory if len(self.inventory) == 0: data = json.loads(self.get_inventory_from_cache()) else: data = self.inventory if self.args.host: data_to_print = self.get_host(self.args.host) else: # Add the `['_meta']['hostvars']` information. hostvars = {} if len(data) > 0: for host in set( [h for hosts in data.values() for h in hosts if h]): hostvars[host] = self.get_host(host, jsonify=False) data['_meta'] = {'hostvars': hostvars} # JSONify the data. data_to_print = self.json_format_dict(data, pretty=True) print(data_to_print) def get_host(self, hostname, jsonify=True): """Return information about the given hostname, based on what the Windows Azure API provides. """ if hostname not in self.host_metadata: return "No host found: %s" % json.dumps(self.host_metadata) if jsonify: return json.dumps(self.host_metadata[hostname]) return self.host_metadata[hostname] def get_images(self): images = [] for image in self.sms.list_os_images(): if str(image.label).lower().find( self.args.list_images.lower()) >= 0: images.append(vars(image)) return json.loads(json.dumps(images, default=lambda o: o.__dict__)) def is_cache_valid(self): """Determines if the cache file has expired, or if it is still valid.""" if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): """Reads the settings from the .ini file.""" config = ConfigParser.SafeConfigParser() config.read( os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini') # Credentials related if config.has_option('azure', 'subscription_id'): self.subscription_id = config.get('azure', 'subscription_id') if config.has_option('azure', 'cert_path'): self.cert_path = config.get('azure', 'cert_path') # Cache related if config.has_option('azure', 'cache_path'): cache_path = os.path.expandvars( os.path.expanduser(config.get('azure', 'cache_path'))) self.cache_path_cache = os.path.join(cache_path, 'ansible-azure.cache') self.cache_path_index = os.path.join(cache_path, 'ansible-azure.index') if config.has_option('azure', 'cache_max_age'): self.cache_max_age = config.getint('azure', 'cache_max_age') def read_environment(self): ''' Reads the settings from environment variables ''' # Credentials if os.getenv("AZURE_SUBSCRIPTION_ID"): self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID") if os.getenv("AZURE_CERT_PATH"): self.cert_path = os.getenv("AZURE_CERT_PATH") def parse_cli_args(self): """Command line argument processing""" parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file based on Azure', ) parser.add_argument('--list', action='store_true', default=True, help='List nodes (default: True)') parser.add_argument('--list-images', action='store', help='Get all available images.') parser.add_argument( '--refresh-cache', action='store_true', default=False, help='Force refresh of thecache by making API requests to Azure ' '(default: False - use cache files)', ) parser.add_argument('--host', action='store', help='Get all information about an instance.') self.args = parser.parse_args() def do_api_calls_update_cache(self): """Do API calls, and save data in cache files.""" self.add_cloud_services() self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def add_cloud_services(self): """Makes an Azure API call to get the list of cloud services.""" try: for cloud_service in self.sms.list_hosted_services(): self.add_deployments(cloud_service) except Exception as e: sys.exit("Error: Failed to access cloud services - {0}".format( str(e))) def add_deployments(self, cloud_service): """Makes an Azure API call to get the list of virtual machines associated with a cloud service. """ try: for deployment in self.sms.get_hosted_service_properties( cloud_service.service_name, embed_detail=True).deployments.deployments: self.add_deployment(cloud_service, deployment) except Exception as e: sys.exit("Error: Failed to access deployments - {0}".format( str(e))) def add_deployment(self, cloud_service, deployment): """Adds a deployment to the inventory and index""" for role in deployment.role_instance_list.role_instances: try: # Default port 22 unless port found with name 'SSH' port = '22' for ie in role.instance_endpoints.instance_endpoints: if ie.name == 'SSH': port = ie.public_port break except AttributeError as e: pass finally: self.add_instance(role.instance_name, deployment, port, cloud_service, role.instance_status) def add_instance(self, hostname, deployment, ssh_port, cloud_service, status): """Adds an instance to the inventory and index""" dest = urlparse(deployment.url).hostname # Add to index self.index[hostname] = deployment.name self.host_metadata[hostname] = dict(ansible_ssh_host=dest, ansible_ssh_port=int(ssh_port), instance_status=status, private_id=deployment.private_id) # List of all azure deployments self.push(self.inventory, "azure", hostname) # Inventory: Group by service name self.push(self.inventory, self.to_safe(cloud_service.service_name), hostname) if int(ssh_port) == 22: self.push(self.inventory, "Cloud_services", hostname) # Inventory: Group by region self.push( self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), hostname) def push(self, my_dict, key, element): """Pushed an element onto an array that may not have been defined in the dict.""" if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def get_inventory_from_cache(self): """Reads the inventory from the cache file and returns it as a JSON object.""" cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): """Reads the index from the cache file and sets self.index.""" cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): """Writes data in JSON format to a file.""" json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """Escapes any characters that would be invalid in an ansible group name.""" return re.sub("[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """Converts a dict to a JSON object and dumps it as a formatted string.""" if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data)
class AzureInventory(object): def __init__(self): """Main execution path.""" # Inventory grouped by display group self.inventory = {} # Index of deployment name -> host self.index = {} self.host_metadata = {} # Cache setting defaults. # These can be overridden in settings (see `read_settings`). cache_dir = os.path.expanduser('~') self.cache_path_cache = os.path.join(cache_dir, '.ansible-azure.cache') self.cache_path_index = os.path.join(cache_dir, '.ansible-azure.index') self.cache_max_age = 0 # Read settings and parse CLI arguments self.read_settings() self.read_environment() self.parse_cli_args() # Initialize Azure ServiceManagementService self.sms = ServiceManagementService(self.subscription_id, self.cert_path) # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() if self.args.list_images: data_to_print = self.json_format_dict(self.get_images(), True) elif self.args.list or self.args.host: # Display list of nodes for inventory if len(self.inventory) == 0: data = json.loads(self.get_inventory_from_cache()) else: data = self.inventory if self.args.host: data_to_print = self.get_host(self.args.host) else: # Add the `['_meta']['hostvars']` information. hostvars = {} if len(data) > 0: for host in set([h for hosts in data.values() for h in hosts if h]): hostvars[host] = self.get_host(host, jsonify=False) data['_meta'] = {'hostvars': hostvars} # JSONify the data. data_to_print = self.json_format_dict(data, pretty=True) print(data_to_print) def get_host(self, hostname, jsonify=True): """Return information about the given hostname, based on what the Windows Azure API provides. """ if hostname not in self.host_metadata: return "No host found: %s" % json.dumps(self.host_metadata) if jsonify: return json.dumps(self.host_metadata[hostname]) return self.host_metadata[hostname] def get_images(self): images = [] for image in self.sms.list_os_images(): if str(image.label).lower().find(self.args.list_images.lower()) >= 0: images.append(vars(image)) return json.loads(json.dumps(images, default=lambda o: o.__dict__)) def is_cache_valid(self): """Determines if the cache file has expired, or if it is still valid.""" if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): """Reads the settings from the .ini file.""" config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini') # Credentials related if config.has_option('azure', 'subscription_id'): self.subscription_id = config.get('azure', 'subscription_id') if config.has_option('azure', 'cert_path'): self.cert_path = config.get('azure', 'cert_path') # Cache related if config.has_option('azure', 'cache_path'): cache_path = os.path.expandvars(os.path.expanduser(config.get('azure', 'cache_path'))) self.cache_path_cache = os.path.join(cache_path, 'ansible-azure.cache') self.cache_path_index = os.path.join(cache_path, 'ansible-azure.index') if config.has_option('azure', 'cache_max_age'): self.cache_max_age = config.getint('azure', 'cache_max_age') def read_environment(self): ''' Reads the settings from environment variables ''' # Credentials if os.getenv("AZURE_SUBSCRIPTION_ID"): self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID") if os.getenv("AZURE_CERT_PATH"): self.cert_path = os.getenv("AZURE_CERT_PATH") def parse_cli_args(self): """Command line argument processing""" parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file based on Azure', ) parser.add_argument('--list', action='store_true', default=True, help='List nodes (default: True)') parser.add_argument('--list-images', action='store', help='Get all available images.') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of thecache by making API requests to Azure ' '(default: False - use cache files)', ) parser.add_argument('--host', action='store', help='Get all information about an instance.') self.args = parser.parse_args() def do_api_calls_update_cache(self): """Do API calls, and save data in cache files.""" self.add_cloud_services() self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def add_cloud_services(self): """Makes an Azure API call to get the list of cloud services.""" try: for cloud_service in self.sms.list_hosted_services(): self.add_deployments(cloud_service) except WindowsAzureError as e: print("Looks like Azure's API is down:") print("") print(e) sys.exit(1) def add_deployments(self, cloud_service): """Makes an Azure API call to get the list of virtual machines associated with a cloud service. """ try: for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments: self.add_deployment(cloud_service, deployment) except WindowsAzureError as e: print("Looks like Azure's API is down:") print("") print(e) sys.exit(1) def add_deployment(self, cloud_service, deployment): """Adds a deployment to the inventory and index""" for role in deployment.role_instance_list.role_instances: try: # Default port 22 unless port found with name 'SSH' port = '22' for ie in role.instance_endpoints.instance_endpoints: if ie.name == 'SSH': port = ie.public_port break except AttributeError as e: pass finally: self.add_instance(role.instance_name, deployment, port, cloud_service, role.instance_status) def add_instance(self, hostname, deployment, ssh_port, cloud_service, status): """Adds an instance to the inventory and index""" dest = urlparse(deployment.url).hostname # Add to index self.index[hostname] = deployment.name self.host_metadata[hostname] = dict(ansible_ssh_host=dest, ansible_ssh_port=int(ssh_port), instance_status=status, private_id=deployment.private_id) # List of all azure deployments self.push(self.inventory, "azure", hostname) # Inventory: Group by service name self.push(self.inventory, self.to_safe(cloud_service.service_name), hostname) if int(ssh_port) == 22: self.push(self.inventory, "Cloud_services", hostname) # Inventory: Group by region self.push(self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), hostname) def push(self, my_dict, key, element): """Pushed an element onto an array that may not have been defined in the dict.""" if key in my_dict: my_dict[key].append(element); else: my_dict[key] = [element] def get_inventory_from_cache(self): """Reads the inventory from the cache file and returns it as a JSON object.""" cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): """Reads the index from the cache file and sets self.index.""" cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): """Writes data in JSON format to a file.""" json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """Escapes any characters that would be invalid in an ansible group name.""" return re.sub("[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """Converts a dict to a JSON object and dumps it as a formatted string.""" if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data)
storage_acc_name = name_generator() label = 'mystorageaccount' location = 'Central US' desc = 'My storage account description.' result = sms.create_storage_account(storage_acc_name, desc, label, location=location) operation_result = sms.get_operation_status(result.request_id) print('Operation status: ' + operation_result.status) print "The following services are now up:" result = sms.list_hosted_services() for hosted_service in result: print('Service name: ' + hosted_service.service_name) print('Management URL: ' + hosted_service.url) print('Location: ' + hosted_service.hosted_service_properties.location) print('') print "The following storage accounts are now up:" result = sms.list_storage_accounts() for account in result: print('Account Service name: ' + account.service_name) print('Storage account url: ' + account.url) print('Location: ' + account.storage_service_properties.location)
class AzureServicesManager: # Storage container = 'vhds' windows_blob_url = 'blob.core.windows.net' # Linux linux_user = '******' linux_pass = '******' location = 'West US' # SSH Keys def __init__(self, subscription_id, cert_file): self.subscription_id = subscription_id self.cert_file = cert_file self.sms = ServiceManagementService(self.subscription_id, self.cert_file) @property def sms(self): return self.sms def list_locations(self): locations = self.sms.list_locations() for location in locations: print location def list_images(self): return self.sms.list_os_images() @utils.resource_not_found_handler def get_hosted_service(self, service_name): resp = self.sms.get_hosted_service_properties(service_name) properties = resp.hosted_service_properties return properties.__dict__ def delete_hosted_service(self, service_name): res = self.sms.check_hosted_service_name_availability(service_name) if not res.result: return self.sms.delete_hosted_service(service_name) def create_hosted_service(self, os_user, service_name=None, random=False): if not service_name: service_name = self.generate_cloud_service_name(os_user, random) available = False while not available: res = self.sms.check_hosted_service_name_availability(service_name) if not res.result: service_name = self.generate_cloud_service_name( os_user, random) else: available = True self.sms.create_hosted_service(service_name=service_name, label=service_name, location='West US') return service_name def create_virtual_machine(self, service_name, vm_name, image_name, role_size): media_link = self._get_media_link(vm_name) # Linux VM configuration hostname = '-'.join((vm_name, 'host')) linux_config = LinuxConfigurationSet(hostname, self.linux_user, self.linux_pass, True) # Hard disk for the OS os_hd = OSVirtualHardDisk(image_name, media_link) # Create vm result = self.sms.create_virtual_machine_deployment( service_name=service_name, deployment_name=vm_name, deployment_slot='production', label=vm_name, role_name=vm_name, system_config=linux_config, os_virtual_hard_disk=os_hd, role_size=role_size) request_id = result.request_id return {'request_id': request_id, 'media_link': media_link} def delete_virtual_machine(self, service_name, vm_name): resp = self.sms.delete_deployment(service_name, vm_name, True) self.sms.wait_for_operation_status(resp.request_id) result = self.sms.delete_hosted_service(service_name) return result def generate_cloud_service_name(self, os_user=None, random=False): if random: return utils.generate_random_name(10) return '-'.join((os_user, utils.generate_random_name(6))) @utils.resource_not_found_handler def get_virtual_machine_info(self, service_name, vm_name): vm_info = {} deploy_info = self.sms.get_deployment_by_name(service_name, vm_name) if deploy_info and deploy_info.role_instance_list: vm_info = deploy_info.role_instance_list[0].__dict__ return vm_info def list_virtual_machines(self): vm_list = [] services = self.sms.list_hosted_services() for service in services: deploys = service.deployments if deploys and deploys.role_instance_list: vm_name = deploys.role_instance_list[0].instance_name vm_list.append(vm_name) return vm_list def power_on(self, service_name, vm_name): resp = self.sms.start_role(service_name, vm_name, vm_name) return resp.request_id def power_off(self, service_name, vm_name): resp = self.sms.shutdown_role(service_name, vm_name, vm_name) return resp.request_id def soft_reboot(self, service_name, vm_name): resp = self.sms.restart_role(service_name, vm_name, vm_name) return resp.request_id def hard_reboot(self, service_name, vm_name): resp = self.sms.reboot_role_instance(service_name, vm_name, vm_name) return resp.request_id def attach_volume(self, service_name, vm_name, size, lun): disk_name = utils.generate_random_name(5, vm_name) media_link = self._get_media_link(vm_name, disk_name) self.sms.add_data_disk(service_name, vm_name, vm_name, lun, host_caching='ReadWrite', media_link=media_link, disk_name=disk_name, logical_disk_size_in_gb=size) def detach_volume(self, service_name, vm_name, lun): self.sms.delete_data_disk(service_name, vm_name, vm_name, lun, True) def get_available_lun(self, service_name, vm_name): try: role = self.sms.get_role(service_name, vm_name, vm_name) except Exception: return 0 disks = role.data_virtual_hard_disks luns = [disk.lun for disk in disks].sort() for i in range(1, 16): if i not in luns: return i return None def snapshot(self, service_name, vm_name, image_id, snanshot_name): image_desc = 'Snapshot for image %s' % vm_name image = CaptureRoleAsVMImage('Specialized', snanshot_name, image_id, image_desc, 'english') resp = self.sms.capture_vm_image(service_name, vm_name, vm_name, image) self.sms.wait_for_operation_status(resp.request_id) def _get_media_link(self, vm_name, filename=None, storage_account=None): """ The MediaLink should be constructed as: https://<storageAccount>.<blobLink>/<blobContainer>/<filename>.vhd """ if not storage_account: storage_account = self._get_or_create_storage_account() container = self.container filename = vm_name if filename is None else filename blob = vm_name + '-' + filename + '.vhd' media_link = "http://%s.%s/%s/%s" % ( storage_account, self.windows_blob_url, container, blob) return media_link def _get_or_create_storage_account(self): account_list = self.sms.list_storage_accounts() if account_list: return account_list[-1].service_name storage_account = utils.generate_random_name(10) description = "Storage account %s description" % storage_account label = storage_account + 'label' self.sms.create_storage_account(storage_account, description, label, location=self.location) return storage_account def _wait_for_operation(self, request_id, timeout=3000, failure_callback=None, failure_callback_kwargs=None): try: self.sms.wait_for_operation_status(request_id, timeout=timeout) except Exception as ex: if failure_callback and failure_callback_kwargs: failure_callback(**failure_callback_kwargs) raise ex
class AzureInventory(object): def __init__(self): """Main execution path.""" # Inventory grouped by display group self.inventory = {} # Index of deployment name -> host self.index = {} # Read settings and parse CLI arguments self.read_settings() self.read_environment() self.parse_cli_args() # Initialize Azure ServiceManagementService self.sms = ServiceManagementService(self.subscription_id, self.cert_path) # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() if self.args.list_images: data_to_print = self.json_format_dict(self.get_images(), True) elif self.args.list: # Display list of nodes for inventory if len(self.inventory) == 0: data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print data_to_print def get_images(self): images = [] for image in self.sms.list_os_images(): if str(image.label).lower().find(self.args.list_images.lower()) >= 0: images.append(vars(image)) return json.loads(json.dumps(images, default=lambda o: o.__dict__)) def is_cache_valid(self): """Determines if the cache file has expired, or if it is still valid.""" if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): """Reads the settings from the .ini file.""" config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini') # Credentials related if config.has_option('azure', 'subscription_id'): self.subscription_id = config.get('azure', 'subscription_id') if config.has_option('azure', 'cert_path'): self.cert_path = config.get('azure', 'cert_path') # Cache related if config.has_option('azure', 'cache_path'): cache_path = config.get('azure', 'cache_path') self.cache_path_cache = cache_path + "/ansible-azure.cache" self.cache_path_index = cache_path + "/ansible-azure.index" if config.has_option('azure', 'cache_max_age'): self.cache_max_age = config.getint('azure', 'cache_max_age') def read_environment(self): ''' Reads the settings from environment variables ''' # Credentials if os.getenv("AZURE_SUBSCRIPTION_ID"): self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID") if os.getenv("AZURE_CERT_PATH"): self.cert_path = os.getenv("AZURE_CERT_PATH") def parse_cli_args(self): """Command line argument processing""" parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Azure') parser.add_argument('--list', action='store_true', default=True, help='List nodes (default: True)') parser.add_argument('--list-images', action='store', help='Get all available images.') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to Azure (default: False - use cache files)') self.args = parser.parse_args() def do_api_calls_update_cache(self): """Do API calls, and save data in cache files.""" self.add_cloud_services() self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def add_cloud_services(self): """Makes an Azure API call to get the list of cloud services.""" try: for cloud_service in self.sms.list_hosted_services(): self.add_deployments(cloud_service) except WindowsAzureError as e: print "Looks like Azure's API is down:" print print e sys.exit(1) def add_deployments(self, cloud_service): """Makes an Azure API call to get the list of virtual machines associated with a cloud service""" try: for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments: if deployment.deployment_slot == "Production": self.add_deployment(cloud_service, deployment) except WindowsAzureError as e: print "Looks like Azure's API is down:" print print e sys.exit(1) def add_deployment(self, cloud_service, deployment): """Adds a deployment to the inventory and index""" dest = urlparse(deployment.url).hostname # Add to index self.index[dest] = deployment.name # List of all azure deployments self.push(self.inventory, "azure", dest) # Inventory: Group by service name self.push(self.inventory, self.to_safe(cloud_service.service_name), dest) # Inventory: Group by region self.push(self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), dest) def push(self, my_dict, key, element): """Pushed an element onto an array that may not have been defined in the dict.""" if key in my_dict: my_dict[key].append(element); else: my_dict[key] = [element] def get_inventory_from_cache(self): """Reads the inventory from the cache file and returns it as a JSON object.""" cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): """Reads the index from the cache file and sets self.index.""" cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): """Writes data in JSON format to a file.""" json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """Escapes any characters that would be invalid in an ansible group name.""" return re.sub("[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """Converts a dict to a JSON object and dumps it as a formatted string.""" if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data)
operation_result = sms.get_operation_status(result.request_id) storage_acc_name = name_generator() label = 'mystorageaccount' location = 'Central US' desc = 'My storage account description.' result = sms.create_storage_account(storage_acc_name, desc, label, location=location) operation_result = sms.get_operation_status(result.request_id) print('Operation status: ' + operation_result.status) print "The following services are now up:" result = sms.list_hosted_services() for hosted_service in result: print('Service name: ' + hosted_service.service_name) print('Management URL: ' + hosted_service.url) print('Location: ' + hosted_service.hosted_service_properties.location) print('') print "The following storage accounts are now up:" result = sms.list_storage_accounts() for account in result: print('Account Service name: ' + account.service_name)
class AzureInventory(object): def __init__(self): """Main execution path.""" # Inventory grouped by display group self.inventory = {} # Index of deployment name -> host self.index = {} # Read settings and parse CLI arguments self.read_settings() self.read_environment() self.parse_cli_args() # Initialize Azure ServiceManagementService self.sms = ServiceManagementService(self.subscription_id, self.cert_path) # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() if self.args.list_images: data_to_print = self.json_format_dict(self.get_images(), True) elif self.args.list: # Display list of nodes for inventory if len(self.inventory) == 0: data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print data_to_print def get_images(self): images = [] for image in self.sms.list_os_images(): if str(image.label).lower().find( self.args.list_images.lower()) >= 0: images.append(vars(image)) return json.loads(json.dumps(images, default=lambda o: o.__dict__)) def is_cache_valid(self): """Determines if the cache file has expired, or if it is still valid.""" if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): """Reads the settings from the .ini file.""" config = ConfigParser.SafeConfigParser() config.read( os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini') # Credentials related if config.has_option('azure', 'subscription_id'): self.subscription_id = config.get('azure', 'subscription_id') if config.has_option('azure', 'cert_path'): self.cert_path = config.get('azure', 'cert_path') # Cache related if config.has_option('azure', 'cache_path'): cache_path = config.get('azure', 'cache_path') self.cache_path_cache = cache_path + "/ansible-azure.cache" self.cache_path_index = cache_path + "/ansible-azure.index" if config.has_option('azure', 'cache_max_age'): self.cache_max_age = config.getint('azure', 'cache_max_age') def read_environment(self): ''' Reads the settings from environment variables ''' # Credentials if os.getenv("AZURE_SUBSCRIPTION_ID"): self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID") if os.getenv("AZURE_CERT_PATH"): self.cert_path = os.getenv("AZURE_CERT_PATH") def parse_cli_args(self): """Command line argument processing""" parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file based on Azure') parser.add_argument('--list', action='store_true', default=True, help='List nodes (default: True)') parser.add_argument('--list-images', action='store', help='Get all available images.') parser.add_argument( '--refresh-cache', action='store_true', default=False, help= 'Force refresh of cache by making API requests to Azure (default: False - use cache files)' ) self.args = parser.parse_args() def do_api_calls_update_cache(self): """Do API calls, and save data in cache files.""" self.add_cloud_services() self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def add_cloud_services(self): """Makes an Azure API call to get the list of cloud services.""" try: for cloud_service in self.sms.list_hosted_services(): self.add_deployments(cloud_service) except WindowsAzureError as e: print "Looks like Azure's API is down:" print print e sys.exit(1) def add_deployments(self, cloud_service): """Makes an Azure API call to get the list of virtual machines associated with a cloud service""" try: for deployment in self.sms.get_hosted_service_properties( cloud_service.service_name, embed_detail=True).deployments.deployments: if deployment.deployment_slot == "Production": self.add_deployment(cloud_service, deployment) except WindowsAzureError as e: print "Looks like Azure's API is down:" print print e sys.exit(1) def add_deployment(self, cloud_service, deployment): """Adds a deployment to the inventory and index""" dest = urlparse(deployment.url).hostname # Add to index self.index[dest] = deployment.name # List of all azure deployments self.push(self.inventory, "azure", dest) # Inventory: Group by service name self.push(self.inventory, self.to_safe(cloud_service.service_name), dest) # Inventory: Group by region self.push( self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), dest) def push(self, my_dict, key, element): """Pushed an element onto an array that may not have been defined in the dict.""" if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def get_inventory_from_cache(self): """Reads the inventory from the cache file and returns it as a JSON object.""" cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): """Reads the index from the cache file and sets self.index.""" cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): """Writes data in JSON format to a file.""" json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """Escapes any characters that would be invalid in an ansible group name.""" return re.sub("[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """Converts a dict to a JSON object and dumps it as a formatted string.""" if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data)