def test_get_attribute(self): class X: def __init__(self): self.name = 'value' instance = X() Defaults.get_attribute(instance, 'name') assert instance.name == 'value'
def __decorate_os_image_attributes_for_update(self, image, update_record): ordered_update_record = collections.OrderedDict( sorted(update_record.items()) ) for name, value in ordered_update_record.items(): if value is not None: if '_date' in name: value = self.__convert_date_to_azure_format(value) if 'show_in_gui' in name: value = value.lower() in ("yes", "true", "t", "1") Defaults.set_attribute(image, name, value) return image
def test_docopt_for_account_type(self): result = Defaults.docopt_for_account_type('Standard_LRS') assert result == '--locally-redundant' result = Defaults.docopt_for_account_type('Standard_ZRS') assert result == '--zone-redundant' result = Defaults.docopt_for_account_type('Standard_GRS') assert result == '--geo-redundant' result = Defaults.docopt_for_account_type('Standard_RAGRS') assert result == '--read-access-geo-redundant'
def test_host_caching_for_docopts(self): # No cache host_caching_docopts = self.__host_caching_docopts('--no-cache') assert Defaults.host_caching_for_docopts(host_caching_docopts) == 'None' # read-only cache host_caching_docopts = self.__host_caching_docopts('--read-only-cache') assert Defaults.host_caching_for_docopts(host_caching_docopts) == \ 'ReadOnly' # read-write cache host_caching_docopts = self.__host_caching_docopts('--read-write-cache') assert Defaults.host_caching_for_docopts(host_caching_docopts) == \ 'ReadWrite'
def __decorate(self, result): decorated = { 'name': result.service_name, 'description': result.storage_service_properties.description, 'label': result.storage_service_properties.label, 'backup-strategy': Defaults.docopt_for_account_type( result.storage_service_properties.account_type ), 'region': result.storage_service_properties.geo_primary_region, 'status': result.storage_service_properties.status, 'backup': { 'status': result.storage_service_properties.status_of_primary, }, 'endpoints': result.storage_service_properties.endpoints } if decorated['backup-strategy'] != '--locally-redundant': decorated['backup'].update({ 'backup-region': result.storage_service_properties.geo_secondary_region, 'backup-region-status': result.storage_service_properties.status_of_secondary, 'last-failover': result.storage_service_properties.last_geo_failover_time }) if hasattr(result, 'containers') and result.containers: decorated['containers'] = result.containers if result.storage_service_keys: decorated['keys'] = { 'primary': result.storage_service_keys.primary, 'secondary': result.storage_service_keys.secondary } return decorated
def add_certificate(self, cloud_service_name, ssh_private_key_file): """ create Azure conform certificate from given ssh private key and add the CER formatted public pem file to the cloud service. The method returns the certificate fingerprint """ pem_cert = self.get_pem_certificate(ssh_private_key_file) pem_cert_file = NamedTemporaryFile() pem_cert_file.write(pem_cert) pem_cert_file.flush() openssl = subprocess.Popen([ 'openssl', 'pkcs12', '-export', '-inkey', ssh_private_key_file, '-in', pem_cert_file.name, '-passout', 'pass:'******'%s' % openssl_error) try: add_cert_request = self.service.add_service_certificate( cloud_service_name, base64.b64encode(pfx_cert).decode(), 'pfx', '') except Exception as e: raise AzureCloudServiceAddCertificateError( '%s: %s' % (type(e).__name__, format(e))) # Wait for the certficate to be created request_result = RequestResult( Defaults.unify_id(add_cert_request.request_id)) request_result.wait_for_request_completion(self.service) return self.get_fingerprint(pem_cert)
def usage(command_usage): """ Instead of the docopt way to show the usage information we provide an azurectl specific usage information. The usage data now always consists of * the generic call azurectl [global options] service <command> [<args>] * the command specific usage defined by the docopt string short form by default, long form with -h | --help * the global options """ with open(Defaults.project_file('cli.py'), 'r') as cli: program_code = cli.readlines() global_options = '\n' process_lines = False for line in program_code: if line.rstrip().startswith('global options'): process_lines = True if line.rstrip() == '"""': process_lines = False if process_lines: global_options += format(line) print('usage: azurectl [global options] service <command> [<args>]\n') print(format(command_usage).replace('usage:', ' ')) if 'global options' not in command_usage: print(format(global_options)) if not format(command_usage).startswith('usage:'): error_details = format(command_usage).splitlines()[0] print(error_details)
def create(self, cloud_service_name, location, description='CloudService', label=None): """ create cloud service with specified name and return the request id """ service_record = { 'service_name': cloud_service_name, 'label': cloud_service_name, 'description': description, 'location': location } if label: service_record['label'] = label if self.__cloud_service_exists(cloud_service_name): # indicate an existing cloud service by returning request id: 0 return 0 if self.__cloud_service_url_in_use(cloud_service_name): message = ('The cloud service name "%s" ' 'is already in use. ' 'Please choose a different name.') raise AzureCloudServiceAddressError(message % cloud_service_name) try: result = self.service.create_hosted_service(**service_record) return (Defaults.unify_id(result.request_id)) except Exception as e: raise AzureCloudServiceCreateError('%s: %s' % (type(e).__name__, format(e)))
def create(self, name, external_port, internal_port, protocol, idle_timeout): try: role = self.__get_role() config = self.__get_network_config_for_role(role) new_endpoint = ConfigurationSetInputEndpoint( name=name, protocol=protocol, port=external_port, local_port=internal_port, idle_timeout_in_minutes=idle_timeout, enable_direct_server_return=False, ) # If there are no endpoints the input_endpoints # attribute is None. Thus create and set new instance. if not config.input_endpoints: config.input_endpoints = ConfigurationSetInputEndpoints() config.input_endpoints.input_endpoints.append(new_endpoint) result = self.service.update_role( self.cloud_service_name, self.cloud_service_name, role.role_name, os_virtual_hard_disk=role.os_virtual_hard_disk, network_config=config, availability_set_name=role.availability_set_name, data_virtual_hard_disks=role.data_virtual_hard_disks) except Exception as e: raise AzureEndpointCreateError('%s: %s' % (type(e).__name__, format(e))) return Defaults.unify_id(result.request_id)
def delete(self, name): try: role = self.__get_role() config = self.__get_network_config_for_role(role) if not config.input_endpoints: raise AzureEndpointDeleteError("No endpoints found.") for index, endpoint in \ enumerate(config.input_endpoints.input_endpoints): if endpoint.name.upper() == name.upper(): del config.input_endpoints.input_endpoints[index] break else: # If for loop finishes normally no endpoint # exists that matches the name. raise AzureEndpointDeleteError( "No endpoint named %s was found." % name) result = self.service.update_role( self.cloud_service_name, self.cloud_service_name, role.role_name, os_virtual_hard_disk=role.os_virtual_hard_disk, network_config=config, availability_set_name=role.availability_set_name, data_virtual_hard_disks=role.data_virtual_hard_disks) except AzureEndpointDeleteError: raise # re-raise except Exception as e: raise AzureEndpointDeleteError('%s: %s' % (type(e).__name__, format(e))) return Defaults.unify_id(result.request_id)
def __attach(self): self.validate_at_least_one_argument_is_set( ['--disk-name', '--blob-name']) optional_args = {} if self.command_args['--label']: optional_args['label'] = self.command_args['--label'] if self.command_args['--lun']: optional_args['lun'] = int(self.command_args['--lun']) if self.command_args['--blob-name']: optional_args['blob_name'] = self.command_args['--blob-name'] if (self.command_args['--no-cache'] or self.command_args['--read-only-cache'] or self.command_args['--read-write-cache']): optional_args['host_caching'] = Defaults.host_caching_for_docopts( self.command_args) request_id = self.data_disk.attach( self.command_args['--disk-name'], self.command_args['--cloud-service-name'], self.command_args['--instance-name'], **optional_args) request_params = [ self.command_args['--cloud-service-name'], (self.command_args['--instance-name'] or self.command_args['--cloud-service-name']), int(self.data_disk.attached_lun) ] self.result.add('data-disk attach:%s:%s:%d' % tuple(request_params), request_id) if self.command_args['--wait']: self.request_wait(request_id) self.out.display()
def update( self, name, description, label, account_type, regenerate_primary_key=False, regenerate_secondary_key=False ): try: current = self.service.get_storage_account_properties(name) result = self.service.update_storage_account( name, (description or current.storage_service_properties.description), (label or current.storage_service_properties.label), account_type=( account_type or current.storage_service_properties.account_type ) ) if regenerate_primary_key: self.service.regenerate_storage_account_keys(name, 'Primary') if regenerate_secondary_key: self.service.regenerate_storage_account_keys(name, 'Secondary') except Exception as e: raise AzureStorageAccountUpdateError( '%s: %s' % (type(e).__name__, format(e)) ) return Defaults.unify_id(result.request_id)
def create(self, name, blob_name, label=None, container_name=None): if not container_name: container_name = self.account.storage_container() if not label: label = name try: storage = BaseBlobService( self.account.storage_name(), self.account.storage_key(), endpoint_suffix=self.account.get_blob_service_host_base() ) storage.get_blob_properties( container_name, blob_name ) except Exception as e: raise AzureBlobServicePropertyError( '%s not found in container %s' % (blob_name, container_name) ) try: media_link = storage.make_blob_url(container_name, blob_name) result = self.service.add_os_image( label, media_link, name, 'Linux' ) return Defaults.unify_id(result.request_id) except Exception as e: raise AzureOsImageCreateError( '%s: %s' % (type(e).__name__, format(e)) )
def setup(self): # construct an account account = AzureAccount( Config(region_name='East US 2', filename='../data/config')) self.service = mock.Mock() account.get_management_service = mock.Mock(return_value=self.service) account.get_blob_service_host_base = mock.Mock(return_value='test.url') account.storage_key = mock.Mock() # now that that's done, instantiate an Endpoint with the account self.endpoint = Endpoint(account) # asynchronous API operations return a request object self.my_request = mock.Mock(request_id=Defaults.unify_id(42)) # variables used in multiple tests self.cloud_service_name = 'mockcloudservice' self.instance_name = 'mockcloudserviceinstance1' self.endpoint_name = 'HTTPS' self.port = '443' self.idle_timeout = 10 self.protocol = 'tcp' self.udp_endpoint_name = 'SNMP' self.udp_port = '131' self.instance_port = '10000' self.udp_protocol = 'udp' # identify the instance for the Endpoint to work on self.endpoint.set_instance(self.cloud_service_name, self.instance_name) # mock out the get_role function of service self.service.get_role = mock.Mock(return_value=self.mock_role())
def delete(self, name): try: result = self.service.delete_reserved_ip_address(name) except Exception as e: raise AzureReservedIpDeleteError('%s: %s' % (type(e).__name__, format(e))) return Defaults.unify_id(result.request_id)
def setup(self): account = AzureAccount( Config(region_name='East US 2', filename='../data/config')) self.service = mock.Mock() account.get_management_service = mock.Mock(return_value=self.service) account.get_blob_service_host_base = mock.Mock(return_value='test.url') account.storage_key = mock.Mock() # now that that's done, instantiate a DataDisk with the account self.data_disk = DataDisk(account) # asynchronous API operations return a request object self.my_request = mock.Mock(request_id=Defaults.unify_id(42)) # variables used in multiple tests self.cloud_service_name = 'mockcloudservice' self.instance_name = 'mockcloudserviceinstance1' self.lun = 0 self.host_caching = 'ReadWrite' self.disk_filename = 'mockcloudserviceinstance1-data-disk-0.vhd' self.disk_name = 'mockcloudserviceinstance1-data-disk-0' self.disk_url = ('https://' + account.storage_name() + '.blob.' + account.get_blob_service_host_base() + '/' + account.storage_container() + '/' + self.disk_filename) self.disk_label = 'Mock data disk' self.disk_size = 42 self.timestamp = datetime.utcnow() self.time_string = datetime.isoformat(self.timestamp).replace(':', '_') self.account = account
def show_attached( self, cloud_service_name, instance_name=None, at_lun=None ): """ Show details of the data disks attached to the virtual machine. If a lun is specified show only details for the disk at the specified lun """ if not instance_name: instance_name = cloud_service_name disks = [] luns = [at_lun] if at_lun is not None else list(range(Defaults.max_vm_luns())) for lun in luns: try: disks.append(self.service.get_data_disk( cloud_service_name, cloud_service_name, instance_name, lun )) except Exception as e: if at_lun is not None: # only if a disk information is requested for a specific # lun but does not exist, an exception is raised raise AzureDataDiskShowError( '%s: %s' % (type(e).__name__, format(e)) ) return [self.__decorate_attached_disk(disk) for disk in disks]
def delete(self, name): try: result = self.service.delete_storage_account(name) except Exception as e: raise AzureStorageAccountDeleteError( '%s: %s' % (type(e).__name__, format(e)) ) return Defaults.unify_id(result.request_id)
def create(self, name, region): try: result = self.service.create_reserved_ip_address(name, location=region) except Exception as e: raise AzureReservedIpCreateError('%s: %s' % (type(e).__name__, format(e))) return Defaults.unify_id(result.request_id)
def publish(self, name, permission): try: result = self.service.share_vm_image(name, permission) return Defaults.unify_id(result.request_id) except Exception as e: raise AzureOsImagePublishError( '%s: %s' % (type(e).__name__, format(e)) )
def unreplicate(self, name): try: result = self.service.unreplicate_vm_image(name) return Defaults.unify_id(result.request_id) except Exception as e: raise AzureOsImageUnReplicateError( '%s: %s' % (type(e).__name__, format(e)) )
def start_instance(self, cloud_service_name, instance_name): """ Start the specified virtual disk image instance. """ try: result = self.service.start_role(cloud_service_name, cloud_service_name, instance_name) return (Defaults.unify_id(result.request_id)) except Exception as e: raise AzureVmStartError('%s: %s' % (type(e).__name__, format(e)))
def disassociate(self, name, cloud_service_name): try: result = self.service.disassociate_reserved_ip_address( name=name, service_name=cloud_service_name, deployment_name=cloud_service_name) except Exception as e: raise AzureReservedIpDisAssociateError( '%s: %s' % (type(e).__name__, format(e))) return Defaults.unify_id(result.request_id)
def delete(self, name, delete_disk=False): try: result = self.service.delete_os_image( name, delete_disk ) return Defaults.unify_id(result.request_id) except Exception as e: raise AzureOsImageDeleteError( '%s: %s' % (type(e).__name__, format(e)) )
def reboot_instance(self, cloud_service_name, instance_name): """ Requests reboot of a virtual disk image instance """ try: result = self.service.reboot_role_instance(cloud_service_name, cloud_service_name, instance_name) return (Defaults.unify_id(result.request_id)) except Exception as e: raise AzureVmRebootError('%s: %s' % (type(e).__name__, format(e)))
def test_account_type_for_docopts(self): self.__set_account_type_docopts() self.account_type_docopts['--locally-redundant'] = True result = Defaults.account_type_for_docopts(self.account_type_docopts) assert result == 'Standard_LRS' self.__set_account_type_docopts() self.account_type_docopts['--zone-redundant'] = True result = Defaults.account_type_for_docopts(self.account_type_docopts) assert result == 'Standard_ZRS' self.__set_account_type_docopts() self.account_type_docopts['--geo-redundant'] = True result = Defaults.account_type_for_docopts(self.account_type_docopts) assert result == 'Standard_GRS' self.__set_account_type_docopts() self.account_type_docopts['--read-access-geo-redundant'] = True result = Defaults.account_type_for_docopts(self.account_type_docopts) assert result == 'Standard_RAGRS'
def delete_instance(self, cloud_service_name, instance_name): """ delete a virtual disk image instance """ try: result = self.service.delete_role(cloud_service_name, cloud_service_name, instance_name, True) return (Defaults.unify_id(result.request_id)) except Exception as e: raise AzureVmDeleteError('%s: %s' % (type(e).__name__, format(e)))
def update(self, image_name, update_record): try: os_image = self.service.get_os_image(image_name) except Exception as e: raise AzureOsImageUpdateError( '%s: %s' % (type(e).__name__, format(e)) ) self.__decorate_os_image_attributes_for_update( os_image, update_record ) try: self.service.update_os_image_from_image_reference( image_name, os_image ) os_image_updated = self.service.get_os_image( image_name ) except Exception as e: raise AzureOsImageUpdateError( '%s: %s' % (type(e).__name__, format(e)) ) elements_not_changed = [] for name in sorted(update_record.keys()): value_desired = Defaults.get_attribute(os_image, name) value_current = Defaults.get_attribute(os_image_updated, name) if '_uri' in name: # Use normalized paths to compare, avoids false positives value_desired = os.path.normpath(value_desired) value_current = os.path.normpath(value_current) if value_desired != value_current: elements_not_changed.append(name) if elements_not_changed: message = [ 'The element(s) "%s" could not be updated.' % ','.join(elements_not_changed), 'Please check if your account is registered as image publisher' ] raise AzureOsImageUpdateError( ' '.join(message) )
def __create(self): request_id = self.storage_account.create( self.command_args['--name'], self.command_args['--description'], self.command_args['--label'], Defaults.account_type_for_docopts(self.command_args) ) if self.command_args['--wait']: self.request_wait(request_id) self.result.add( 'storage_account:' + self.command_args['--name'], request_id )
def delete(self, cloud_service_name, complete=False): """ delete specified cloud service, if complete is set to true all OS/data disks and the source blobs for the disks will be deleted too """ try: result = self.service.delete_hosted_service( cloud_service_name, complete) return (Defaults.unify_id(result.request_id)) except Exception as e: raise AzureCloudServiceDeleteError('%s: %s' % (type(e).__name__, format(e)))