def build_ip_configuration(ipc): ''' Attempts to construct a proper IP Configuration from a context object :params `cloudify.context.RelationshipSubjectContext` ipc: IP Configuration context object :returns: IP Configuration dict :rtype: dict or None ''' if not ipc or not ipc.instance.relationships: return None # Find a referenced Subnet subnet = utils.get_rel_id_reference( Subnet, constants.REL_IPC_CONNECTED_TO_SUBNET, _ctx=ipc) # Find a referenced Public IP pubip = utils.get_rel_id_reference( PublicIPAddress, constants.REL_IPC_CONNECTED_TO_PUBIP, _ctx=ipc) ip_configuration = { 'name': utils.get_resource_name(ipc), 'properties': { 'subnet': subnet, 'publicIPAddress': pubip } } ip_configuration['properties'] = utils.dict_update( ip_configuration['properties'], utils.get_resource_config(_ctx=ipc)) return ip_configuration
def configure(**_): ''' Uses an existing, or creates a new, Network Interface Card .. warning:: The "configure" operation is actually the second half of the "create" operation. This is necessary since IP Configuration nodes are treated as separate, stand-alone types and must be "connected" to the NIC before it's actually created. The actual "create" operation simply assigns a UUID for the node and the "configure" operation creates the object ''' # Create a resource (if necessary) utils.task_resource_create( NetworkInterfaceCard(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.dict_update( utils.get_resource_config(), { 'networkSecurityGroup': get_connected_nsg(), 'ipConfigurations': get_ip_configurations() }) })
def handle_credentials(self, azure_config): """ Gets any Azure API access information from the current node properties or a provider context file created during manager bootstrapping. :returns: Azure credentials and access information :rtype: dict """ def get_credentials_from_file(config_path=constants.CONFIG_PATH): """ Gets Azure API access information from the provider context config file :returns: Azure credentials and access information :rtype: dict """ cred_keys = [ 'client_id', 'client_secret', 'subscription_id', 'tenant_id', 'endpoint_resource', 'endpoint_verify', 'endpoints_resource_manager', 'endpoints_active_directory', 'certificate', 'thumbprint', 'cloud_environment' ] config = SafeConfigParser() config.read(config_path) return {k: config.get('Credentials', k) for k in cred_keys} f_creds = dict() f_config_path = environ.get(constants.CONFIG_PATH_ENV_VAR_NAME, constants.CONFIG_PATH) if path.exists(f_config_path): f_creds = get_credentials_from_file(f_config_path) creds = utils.dict_update(f_creds, azure_config) if 'endpoint_verify' not in creds: creds['endpoint_verify'] = True return utils.cleanup_empty_params(creds)
def build_ip_configuration(ipc): ''' Attempts to construct a proper IP Configuration from a context object :params `cloudify.context.RelationshipSubjectContext` ipc: IP Configuration context object :returns: IP Configuration dict :rtype: dict or None ''' if not ipc or not ipc.instance.relationships: return None # Find a referenced Subnet subnet = utils.get_rel_id_reference(Subnet, constants.REL_IPC_CONNECTED_TO_SUBNET, _ctx=ipc) # Find a referenced Public IP pubip = utils.get_rel_id_reference(PublicIPAddress, constants.REL_IPC_CONNECTED_TO_PUBIP, _ctx=ipc) # Build a partial config and update it with properties config return utils.dict_update( { 'name': utils.get_resource_name(ipc), 'properties': { 'subnet': subnet, 'publicIPAddress': pubip } }, utils.get_resource_config(_ctx=ipc))
def configure(**_): '''Uses an existing, or creates a new, Load Balancer''' # Get the Frontend IP Configuration fe_ip_cfg = get_ip_configurations(rel=constants.REL_LB_CONNECTED_TO_IPC) ctx.logger.debug('fe_ip_cfg: {0}'.format(fe_ip_cfg)) if not len(fe_ip_cfg): raise NonRecoverableError( 'At least 1 Frontend IP Configuration must be ' 'associated with the Load Balancer') # Remove the subnet if there's a public IP present for ip_cfg in fe_ip_cfg: if ip_cfg.get('properties', dict()).get('publicIPAddress'): if ip_cfg.get('properties', dict()).get('subnet'): del ip_cfg['properties']['subnet'] # Create a resource (if necessary) utils.task_resource_create( LoadBalancer(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.dict_update(utils.get_resource_config(), {'frontendIPConfigurations': fe_ip_cfg}) }) # Get an interface to the Load Balancer lb_iface = LoadBalancer(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)) lb_data = lb_iface.get(utils.get_resource_name()) # Get the ID of the Frontend IP Configuration for fe_ipc_data in lb_data.get('properties', dict()).get('frontendIPConfigurations', list()): ipc_iface = IPConfiguration() ipc_id = fe_ipc_data.get('id') if not ipc_id: break ipc_iface.endpoint = '{0}{1}'.format(constants.CONN_API_ENDPOINT, ipc_id) # Get the Frontend private IP address ipc_data = ipc_iface.get() ctx.instance.runtime_properties['ip'] = \ ipc_data.get('properties', dict()).get('privateIPAddress') # Get the ID of the Frontend Public IP Configuration pipc_iface = PublicIPAddress() pipc_id = fe_ipc_data.get('properties', dict()).get('publicIPAddress', dict()).get('id') if not pipc_id: break pipc_iface.endpoint = '{0}{1}'.format(constants.CONN_API_ENDPOINT, pipc_id) # Get the Frontend public IP address pipc_data = pipc_iface.get() ctx.instance.runtime_properties['public_ip'] = \ pipc_data.get('properties', dict()).get('ipAddress')
def configure(**_): '''Uses an existing, or creates a new, Load Balancer''' # Get the Frontend IP Configuration fe_ip_cfg = get_ip_configurations(rel=constants.REL_LB_CONNECTED_TO_IPC) ctx.logger.debug('fe_ip_cfg: {0}'.format(fe_ip_cfg)) if not len(fe_ip_cfg): raise NonRecoverableError( 'At least 1 Frontend IP Configuration must be ' 'associated with the Load Balancer') # Remove the subnet if there's a public IP present for ip_cfg in fe_ip_cfg: if ip_cfg.get('properties', dict()).get('publicIPAddress'): if ip_cfg.get('properties', dict()).get('subnet'): del ip_cfg['properties']['subnet'] # Create a resource (if necessary) utils.task_resource_create( LoadBalancer(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.dict_update( utils.get_resource_config(), { 'frontendIPConfigurations': fe_ip_cfg }) }) # Get an interface to the Load Balancer lb_iface = LoadBalancer(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)) lb_data = lb_iface.get(utils.get_resource_name()) # Get the ID of the Frontend IP Configuration for fe_ipc_data in lb_data.get('properties', dict()).get( 'frontendIPConfigurations', list()): ipc_iface = IPConfiguration() ipc_id = fe_ipc_data.get('id') if not ipc_id: break ipc_iface.endpoint = '{0}{1}'.format( constants.CONN_API_ENDPOINT, ipc_id) # Get the Frontend private IP address ipc_data = ipc_iface.get() ctx.instance.runtime_properties['ip'] = \ ipc_data.get('properties', dict()).get('privateIPAddress') # Get the ID of the Frontend Public IP Configuration pipc_iface = PublicIPAddress() pipc_id = fe_ipc_data.get('properties', dict()).get( 'publicIPAddress', dict()).get('id') if not pipc_id: break pipc_iface.endpoint = '{0}{1}'.format( constants.CONN_API_ENDPOINT, pipc_id) # Get the Frontend public IP address pipc_data = pipc_iface.get() ctx.instance.runtime_properties['public_ip'] = \ pipc_data.get('properties', dict()).get('ipAddress')
def create(args=None, **_): '''Uses an existing, or creates a new, Virtual Machine''' # Generate a resource name (if needed) utils.generate_resource_name( VirtualMachine(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)), generator=vm_name_generator) res_cfg = utils.get_resource_config(args=args) or dict() # Build storage profile osdisk = build_osdisk_profile( res_cfg.get('storageProfile', dict()).get('osDisk', dict())) datadisks = build_datadisks_profile( res_cfg.get('storageProfile', dict()).get('dataDisks', list())) storage_profile = {'osDisk': osdisk, 'dataDisks': datadisks} # Build the network profile network_profile = build_network_profile() # Build the OS profile os_family = ctx.node.properties.get('os_family', '').lower() os_profile = dict() # Set defaults for Windows installs to enable WinRM listener if os_family == 'windows' and \ not res_cfg.get('osProfile', dict()).get('windowsConfiguration'): os_profile = { 'windowsConfiguration': { # This is required for extension scripts to work 'provisionVMAgent': True, 'winRM': { 'listeners': [{ 'protocol': 'Http', 'certificateUrl': None }] } }, 'linuxConfiguration': None } elif not res_cfg.get('osProfile', dict()).get('linuxConfiguration'): os_profile = { 'linuxConfiguration': { 'disablePasswordAuthentication': False }, 'windowsConfiguration': None } # Set the computerName if it's not set already os_profile['computerName'] = \ res_cfg.get( 'osProfile', dict() ).get('computerName', utils.get_resource_name()) resource_create_payload = \ { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'plan': ctx.node.properties.get('plan'), 'properties': utils.dict_update( utils.get_resource_config(args=args), { 'availabilitySet': utils.get_rel_id_reference( AvailabilitySet, constants.REL_CONNECTED_TO_AS), 'networkProfile': network_profile, 'storageProfile': storage_profile, 'osProfile': os_profile } ) } # support userdata from args. os_profile = resource_create_payload['properties']['osProfile'] userdata = _handle_userdata(os_profile.get('customData')) if userdata: ctx.logger.warn('Azure customData implementation is dependent on ' 'Virtual Machine image support.') os_profile['customData'] = base64.b64encode(userdata.encode()) # Remove customData from osProfile if empty to avoid 400 Error. elif 'customData' in resource_create_payload['properties']['osProfile']: del resource_create_payload['properties']['osProfile']['customData'] # Create a resource (if necessary) utils.task_resource_create( VirtualMachine(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)), resource_create_payload)
def create(ctx, args=None, **_): """Uses an existing, or creates a new, Virtual Machine""" azure_config = utils.get_client_config(ctx.node.properties) name = utils.get_resource_name(ctx) resource_group_name = utils.get_resource_group(ctx) api_version = \ ctx.node.properties.get('api_version', constants.API_VER_COMPUTE) virtual_machine = VirtualMachine(azure_config, ctx.logger, api_version) res_cfg = ctx.node.properties.get("resource_config", {}) spot_instance = res_cfg.pop("spot_instance", None) # Build storage profile osdisk = build_osdisk_profile(ctx, res_cfg.get( 'storageProfile', dict()).get('osDisk', dict())) datadisks = build_datadisks_profile(ctx, res_cfg.get( 'storageProfile', dict()).get('dataDisks', list())) storage_profile = { 'os_disk': osdisk, 'data_disks': datadisks } # Build the network profile network_profile = build_network_profile(ctx) # Build the OS profile os_family = ctx.node.properties.get('os_family', '').lower() os_profile = dict() # Set defaults for Windows installs to enable WinRM listener if os_family == 'windows' and \ not res_cfg.get('osProfile', dict()).get('windowsConfiguration'): os_profile = { 'windows_configuration': { # This is required for extension scripts to work 'provision_vm_agent': True, 'win_rm': { 'listeners': [{ 'protocol': 'Http', 'certificate_url': None }] } }, 'linux_configuration': None } elif not res_cfg.get('osProfile', dict()).get('linuxConfiguration'): os_profile = { 'linux_configuration': { 'disable_password_authentication': False }, 'windows_configuration': None } # Set the computerName if it's not set already os_profile['computer_name'] = \ res_cfg.get( 'osProfile', dict() ).get('computerName', name) availability_set = None rel_type = constants.REL_CONNECTED_TO_AS for rel in ctx.instance.relationships: if isinstance(rel_type, tuple): if any(x in rel.type_hierarchy for x in rel_type): availability_set = { 'id': rel.target.instance.runtime_properties.get( "resource_id") } else: if rel_type in rel.type_hierarchy: availability_set = { 'id': rel.target.instance.runtime_properties.get( "resource_id") } resource_create_payload = { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'plan': ctx.node.properties.get('plan'), 'availabilitySet': availability_set, 'networkProfile': network_profile, 'storageProfile': storage_profile, 'osProfile': os_profile } # check if spot_instance if spot_instance and spot_instance.get("is_spot_instance"): # this is just an indacator not part of the api spot_instance.pop("is_spot_instance") # handle the params resource_create_payload = \ utils.dict_update(resource_create_payload, spot_instance) resource_create_payload = \ utils.handle_resource_config_params(resource_create_payload, utils.get_resource_config( _ctx=ctx, args=args)) # support userdata from args. os_profile = resource_create_payload['os_profile'] userdata = _handle_userdata(ctx, os_profile.get('custom_data')) if userdata: ctx.logger.warn( 'Azure customData implementation is dependent on ' 'Virtual Machine image support.') resource_create_payload['os_profile']['custom_data'] = \ base64.b64encode(userdata.encode('utf-8')).decode('utf-8') # Remove custom_data from os_profile if empty to avoid Errors. elif 'custom_data' in resource_create_payload['os_profile']: del resource_create_payload['os_profile']['custom_data'] # Create a resource (if necessary) try: result = \ virtual_machine.create_or_update(resource_group_name, name, resource_create_payload) except CloudError as cr: raise cfy_exc.NonRecoverableError( "create virtual_machine '{0}' " "failed with this error : {1}".format(name, cr.message) ) ctx.instance.runtime_properties['resource_group'] = resource_group_name ctx.instance.runtime_properties['resource'] = result ctx.instance.runtime_properties['resource_id'] = result.get("id", "")
def update(self, name, params, force=False): ''' Updates an existing resource :param string name: Name of the resource :param dict params: Parameters to update the resource with :param boolean force: Forces the params to be sent without merging with the resources' existing data :raises: :exc:`cloudify.exceptions.RecoverableError`, :exc:`cloudify.exceptions.NonRecoverableError`, :exc:`requests.RequestException` ''' if not force: # Get the existing data (since partial updates seem to # be in a questionable state on Azure's side of things) data = self.get(name) # Updating the data with our new data params = utils.dict_update(data, params) self.log.info('Updating {0} "{1}"'.format(self.name, name)) if self.ctx.instance._modifiable: self.ctx.instance.runtime_properties['async_op'] = None # Sanitize input data params = self.sanitize_json_input(params) # Make the request res = self.client.request( method='put', url='{0}/{1}'.format(self.endpoint, name), json=params) # Convert headers from CaseInsensitiveDict to Dict headers = dict(res.headers) self.log.debug('headers: {0}'.format( utils.secure_logging_content(headers))) # Check the response # HTTP 202 (ACCEPTED) - The operation has started but is asynchronous if res.status_code == httplib.ACCEPTED: if not headers.get('location'): raise RecoverableError( 'HTTP 202 ACCEPTED but no Location header present') if not self.ctx.instance._modifiable: self.log.warn( 'Not retrying async operation, ' 'because NodeInstanceContext is not modifiable. ' 'headers: {0}'.format(headers) ) return self.ctx.instance.runtime_properties['async_op'] = headers return ctx.operation.retry( 'Operation: "{0}" started' .format(self.get_operation_id(headers)), retry_after=self.get_retry_after(headers)) # HTTP 200 (OK) - The resource already exists elif res.status_code == httplib.OK: if headers.get('azure-asyncoperation'): if not self.ctx.instance._modifiable: self.log.warn( 'Not retrying async operation, ' 'because NodeInstanceContext is not modifiable. ' 'headers: {0}'.format(headers) ) return self.ctx.instance.runtime_properties['async_op'] = headers return ctx.operation.retry( 'Operation: "{0}" started' .format(self.get_operation_id(headers)), retry_after=self.get_retry_after(headers)) self.log.warn('{0} already exists. Using resource.' .format(self.name)) return # HTTP 400 (BAD_REQUEST) - We're sending bad data elif res.status_code == httplib.BAD_REQUEST: self.log.info('BAD REQUEST: response: {}'.format( utils.secure_logging_content(res.content))) raise UnexpectedResponse( 'Recieved HTTP 400 BAD REQUEST', res.json()) # HTTP 409 (CONFLICT) - Operation failed elif res.status_code == httplib.CONFLICT: raise NonRecoverableError( 'Operation failed. (code={0}, data={1})' .format(res.status_code, res.text)) # All other errors will be treated as recoverable elif res.status_code != httplib.CREATED: raise RecoverableError( 'Expected HTTP status code {0}, recieved {1}' .format(httplib.CREATED, res.status_code))
def configure(ctx, **_): """Uses an existing, or creates a new, Load Balancer""" # Get the Frontend IP Configuration fe_ip_cfg = get_ip_configurations(rel=constants.REL_LB_CONNECTED_TO_IPC) ctx.logger.debug('fe_ip_cfg: {0}'.format(fe_ip_cfg)) if not len(fe_ip_cfg): raise cfy_exc.NonRecoverableError( 'At least 1 Frontend IP Configuration must be ' 'associated with the Load Balancer') # Remove the subnet if there's a public IP present for ip_cfg in fe_ip_cfg: if ip_cfg.get('public_ip_address'): if ip_cfg.get('subnet'): del ip_cfg['subnet'] # Create a resource (if necessary) azure_config = ctx.node.properties.get('azure_config') if not azure_config.get("subscription_id"): azure_config = ctx.node.properties.get('client_config') else: ctx.logger.warn("azure_config is deprecated please use client_config, " "in later version it will be removed") name = ctx.instance.runtime_properties.get('name') resource_group_name = utils.get_resource_group(ctx) api_version = \ ctx.node.properties.get('api_version', constants.API_VER_NETWORK) load_balancer = LoadBalancer(azure_config, ctx.logger, api_version) lb_params = { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), } lb_params = \ utils.handle_resource_config_params(lb_params, ctx.node.properties.get( 'resource_config', {})) lb_params = utils.dict_update( lb_params, { 'frontend_ip_configurations': fe_ip_cfg } ) # clean empty values from params lb_params = \ utils.cleanup_empty_params(lb_params) try: result = \ load_balancer.create_or_update(resource_group_name, name, lb_params) except CloudError as cr: raise cfy_exc.NonRecoverableError( "create load_balancer '{0}' " "failed with this error : {1}".format(name, cr.message) ) ctx.instance.runtime_properties['resource_group'] = resource_group_name ctx.instance.runtime_properties['resource'] = result ctx.instance.runtime_properties['resource_id'] = result.get("id", "") ctx.instance.runtime_properties['name'] = name for fe_ipc_data in result.get('frontend_ip_configurations', list()): ctx.instance.runtime_properties['ip'] = \ fe_ipc_data.get('private_ip_address') public_ip = \ fe_ipc_data.get('public_ip_address', {}).get('ip_address', None) if not public_ip: pip = PublicIPAddress(azure_config, ctx.logger) pip_name = \ ip_cfg.get('public_ip_address').get('id').rsplit('/', 1)[1] public_ip_data = pip.get(resource_group_name, pip_name) public_ip = public_ip_data.get("ip_address") ctx.instance.runtime_properties['public_ip'] = public_ip
def update(self, name, params, force=False): ''' Updates an existing resource :param string name: Name of the resource :param dict params: Parameters to update the resource with :param boolean force: Forces the params to be sent without merging with the resources' existing data :raises: :exc:`cloudify.exceptions.RecoverableError`, :exc:`cloudify.exceptions.NonRecoverableError`, :exc:`requests.RequestException` ''' if not force: # Get the existing data (since partial updates seem to # be in a questionable state on Azure's side of things) data = self.get(name) # Updating the data with our new data params = utils.dict_update(data, params) self.log.info('Updating {0} "{1}"'.format(self.name, name)) if self.ctx.instance._modifiable: self.ctx.instance.runtime_properties['async_op'] = None # Sanitize input data params = self.sanitize_json_input(params) # Make the request res = self.client.request( method='put', url='{0}/{1}'.format(self.endpoint, name), json=params) self.log.debug('headers: {0}'.format( utils.secure_logging_content(dict(res.headers)))) headers = self.lowercase_headers(res.headers) # Check the response # HTTP 202 (ACCEPTED) - The operation has started but is asynchronous if res.status_code == httplib.ACCEPTED: if not headers.get('location'): raise RecoverableError( 'HTTP 202 ACCEPTED but no Location header present') if not self.ctx.instance._modifiable: self.log.warn( 'Not retrying async operation, ' 'because NodeInstanceContext is not modifiable. ' 'headers: {0}'.format(headers) ) return self.ctx.instance.runtime_properties['async_op'] = headers return ctx.operation.retry( 'Operation: "{0}" started' .format(self.get_operation_id(headers)), retry_after=self.get_retry_after(headers)) # HTTP 200 (OK) - The resource already exists elif res.status_code == httplib.OK: if headers.get('azure-asyncoperation'): if not self.ctx.instance._modifiable: self.log.warn( 'Not retrying async operation, ' 'because NodeInstanceContext is not modifiable. ' 'headers: {0}'.format(headers) ) return self.ctx.instance.runtime_properties['async_op'] = headers return ctx.operation.retry( 'Operation: "{0}" started' .format(self.get_operation_id(headers)), retry_after=self.get_retry_after(headers)) self.log.warn('{0} already exists. Using resource.' .format(self.name)) return # HTTP 400 (BAD_REQUEST) - We're sending bad data elif res.status_code == httplib.BAD_REQUEST: self.log.info('BAD REQUEST: response: {}'.format( utils.secure_logging_content(res.content))) raise UnexpectedResponse( 'Recieved HTTP 400 BAD REQUEST', res.json()) # HTTP 409 (CONFLICT) - Operation failed elif res.status_code == httplib.CONFLICT: if "AnotherOperationInProgress" in res.text: self.log.warn( "Another Operation In Progress, let's wait: {0}" .format(headers) ) raise RecoverableError('Another Operation In Progress') raise NonRecoverableError( 'Operation failed. (code={0}, data={1})' .format(res.status_code, res.text)) # All other errors will be treated as recoverable elif res.status_code != httplib.CREATED: raise RecoverableError( 'Expected HTTP status code {0}, recieved {1}' .format(httplib.CREATED, res.status_code))
def configure(command_to_execute, file_uris, type_handler_version='v2.0', **_): '''Configures the resource''' os_family = ctx.node.properties.get('os_family', '').lower() if os_family == 'windows': utils.task_resource_create( VirtualMachineExtension( virtual_machine=utils.get_resource_name(), api_version=ctx.node.properties.get('api_version', constants.API_VER_COMPUTE) ), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': { 'publisher': 'Microsoft.Compute', 'type': 'CustomScriptExtension', 'typeHandlerVersion': type_handler_version, 'settings': { 'fileUris': file_uris, 'commandToExecute': command_to_execute } } }) virtual_machine_name = ctx.instance.runtime_properties.get('name') virtual_machine_iface = \ VirtualMachine( api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)).get( name=virtual_machine_name) # Write the IP address to runtime properties for the agent # Get a reference to the NIC rel_nics = utils.get_relationships_by_type( ctx.instance.relationships, constants.REL_CONNECTED_TO_NIC) # No NIC? Exit and hope the user doesn't plan to install an agent if not rel_nics: return for rel_nic in rel_nics: # Get the NIC data from the API directly (because of IPConfiguration) nic_iface = NetworkInterfaceCard( _ctx=rel_nic.target, api_version=rel_nic.target.node.properties.get( 'api_version', constants.API_VER_NETWORK)) nic_name = utils.get_resource_name(rel_nic.target) nic_data = nic_iface.get(nic_name) nic_virtual_machine_id = nic_data.get( 'properties', dict()).get( 'virtualMachine', dict()).get('id') if virtual_machine_name not in nic_virtual_machine_id: nic_data['properties'] = \ utils.dict_update( nic_data.get('properties', {}), { 'virtualMachine': { 'id': virtual_machine_iface.get('id') } } ) utils.task_resource_update( nic_iface, nic_data, _ctx=rel_nic.target) nic_data = nic_iface.get(nic_name) if virtual_machine_name not in nic_data.get( 'properties', dict()).get( 'virtualMachine', dict()).get('id', str()): return ctx.operation.retry( message='Waiting for NIC {0} to ' 'attach to VM {1}..' .format(nic_name, virtual_machine_name), retry_after=10) # Iterate over each IPConfiguration entry creds = utils.get_credentials(_ctx=ctx) for ip_cfg in nic_data.get( 'properties', dict()).get( 'ipConfigurations', list()): # Get the Private IP Address endpoint ctx.instance.runtime_properties['ip'] = \ ip_cfg.get('properties', dict()).get('privateIPAddress') # Get the Public IP Address endpoint pubip_id = ip_cfg.get( 'properties', dict()).get( 'publicIPAddress', dict()).get('id') if isinstance(pubip_id, basestring): # use the ID to get the data on the public ip pubip = PublicIPAddress( _ctx=rel_nic.target, api_version=rel_nic.target.node.properties.get( 'api_version', constants.API_VER_NETWORK)) pubip.endpoint = '{0}{1}'.format( creds.endpoints_resource_manager, pubip_id) pubip_data = pubip.get() if isinstance(pubip_data, dict): public_ip = \ pubip_data.get('properties', dict()).get('ipAddress') # Maintained for backwards compatibility. ctx.instance.runtime_properties['public_ip'] = \ public_ip # For consistency with other plugins. ctx.instance.runtime_properties[PUBLIC_IP_PROPERTY] = \ public_ip # We should also consider that maybe there will be many # public ip addresses. public_ip_addresses = \ ctx.instance.runtime_properties.get( PUBLIC_IP_PROPERTY, []) if public_ip not in public_ip_addresses: public_ip_addresses.append(public_ip) ctx.instance.runtime_properties['public_ip_addresses'] = \ public_ip_addresses # See if the user wants to use the public IP as primary IP if ctx.node.properties.get('use_public_ip') and \ ctx.instance.runtime_properties.get('public_ip'): ctx.instance.runtime_properties['ip'] = \ ctx.instance.runtime_properties.get('public_ip') ctx.logger.info('OUTPUT {0}.{1} = "{2}"'.format( ctx.instance.id, 'ip', ctx.instance.runtime_properties.get('ip'))) ctx.logger.info('OUTPUT {0}.{1} = "{2}"'.format( ctx.instance.id, 'public_ip', ctx.instance.runtime_properties.get('public_ip')))
def create(args=None, **_): '''Uses an existing, or creates a new, Virtual Machine''' # Generate a resource name (if needed) utils.generate_resource_name( VirtualMachine(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)), generator=vm_name_generator) res_cfg = utils.get_resource_config(args=args) or dict() # Build storage profile osdisk = build_osdisk_profile( res_cfg.get('storageProfile', dict()).get('osDisk', dict())) datadisks = build_datadisks_profile( res_cfg.get('storageProfile', dict()).get('dataDisks', list())) storage_profile = { 'osDisk': osdisk, 'dataDisks': datadisks } # Build the network profile network_profile = build_network_profile() # Build the OS profile os_family = ctx.node.properties.get('os_family', '').lower() os_profile = dict() # Set defaults for Windows installs to enable WinRM listener if os_family == 'windows' and \ not res_cfg.get('osProfile', dict()).get('windowsConfiguration'): os_profile = { 'windowsConfiguration': { # This is required for extension scripts to work 'provisionVMAgent': True, 'winRM': { 'listeners': [{ 'protocol': 'Http', 'certificateUrl': None }] } }, 'linuxConfiguration': None } elif not res_cfg.get('osProfile', dict()).get('linuxConfiguration'): os_profile = { 'linuxConfiguration': { 'disablePasswordAuthentication': False }, 'windowsConfiguration': None } # Set the computerName if it's not set already os_profile['computerName'] = \ res_cfg.get( 'osProfile', dict() ).get('computerName', utils.get_resource_name()) resource_create_payload = \ { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'plan': ctx.node.properties.get('plan'), 'properties': utils.dict_update( utils.get_resource_config(args=args), { 'availabilitySet': utils.get_rel_id_reference( AvailabilitySet, constants.REL_CONNECTED_TO_AS), 'networkProfile': network_profile, 'storageProfile': storage_profile, 'osProfile': os_profile } ) } # support userdata from args. os_profile = resource_create_payload['properties']['osProfile'] userdata = _handle_userdata(os_profile.get('customData')) if userdata: ctx.logger.warn( 'Azure customData implementation is dependent on ' 'Virtual Machine image support.') os_profile['customData'] = base64.b64encode(userdata.encode()) # Remove customData from osProfile if empty to avoid 400 Error. elif 'customData' in resource_create_payload['properties']['osProfile']: del resource_create_payload['properties']['osProfile']['customData'] # Create a resource (if necessary) utils.task_resource_create(VirtualMachine( api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)), resource_create_payload)
def configure(command_to_execute, file_uris, type_handler_version='v2.0', **_): '''Configures the resource''' os_family = ctx.node.properties.get('os_family', '').lower() if os_family == 'windows': utils.task_resource_create( VirtualMachineExtension(virtual_machine=utils.get_resource_name(), api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': { 'publisher': 'Microsoft.Compute', 'type': 'CustomScriptExtension', 'typeHandlerVersion': type_handler_version, 'settings': { 'fileUris': file_uris, 'commandToExecute': command_to_execute } } }) virtual_machine_name = ctx.instance.runtime_properties.get('name') virtual_machine_iface = \ VirtualMachine( api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)).get( name=virtual_machine_name) # Write the IP address to runtime properties for the agent # Get a reference to the NIC rel_nics = utils.get_relationships_by_type(ctx.instance.relationships, constants.REL_CONNECTED_TO_NIC) # No NIC? Exit and hope the user doesn't plan to install an agent if not rel_nics: return for rel_nic in rel_nics: # Get the NIC data from the API directly (because of IPConfiguration) nic_iface = NetworkInterfaceCard( _ctx=rel_nic.target, api_version=rel_nic.target.node.properties.get( 'api_version', constants.API_VER_NETWORK)) nic_name = utils.get_resource_name(rel_nic.target) nic_data = nic_iface.get(nic_name) nic_virtual_machine_id = nic_data.get('properties', dict()).get( 'virtualMachine', dict()).get('id') if virtual_machine_name not in nic_virtual_machine_id: nic_data['properties'] = \ utils.dict_update( nic_data.get('properties', {}), { 'virtualMachine': { 'id': virtual_machine_iface.get('id') } } ) utils.task_resource_update(nic_iface, nic_data, _ctx=rel_nic.target) nic_data = nic_iface.get(nic_name) if virtual_machine_name not in nic_data.get( 'properties', dict()).get('virtualMachine', dict()).get('id', str()): return ctx.operation.retry(message='Waiting for NIC {0} to ' 'attach to VM {1}..'.format( nic_name, virtual_machine_name), retry_after=10) # Iterate over each IPConfiguration entry creds = utils.get_credentials(_ctx=ctx) for ip_cfg in nic_data.get('properties', dict()).get('ipConfigurations', list()): # Get the Private IP Address endpoint ctx.instance.runtime_properties['ip'] = \ ip_cfg.get('properties', dict()).get('privateIPAddress') # Get the Public IP Address endpoint pubip_id = ip_cfg.get('properties', dict()).get('publicIPAddress', dict()).get('id') if isinstance(pubip_id, basestring): # use the ID to get the data on the public ip pubip = PublicIPAddress( _ctx=rel_nic.target, api_version=rel_nic.target.node.properties.get( 'api_version', constants.API_VER_NETWORK)) pubip.endpoint = '{0}{1}'.format( creds.endpoints_resource_manager, pubip_id) pubip_data = pubip.get() if isinstance(pubip_data, dict): public_ip = \ pubip_data.get('properties', dict()).get('ipAddress') # Maintained for backwards compatibility. ctx.instance.runtime_properties['public_ip'] = \ public_ip # For consistency with other plugins. ctx.instance.runtime_properties[PUBLIC_IP_PROPERTY] = \ public_ip # We should also consider that maybe there will be many # public ip addresses. public_ip_addresses = \ ctx.instance.runtime_properties.get( PUBLIC_IP_PROPERTY, []) if public_ip not in public_ip_addresses: public_ip_addresses.append(public_ip) ctx.instance.runtime_properties['public_ip_addresses'] = \ public_ip_addresses # See if the user wants to use the public IP as primary IP if ctx.node.properties.get('use_public_ip') and \ ctx.instance.runtime_properties.get('public_ip'): ctx.instance.runtime_properties['ip'] = \ ctx.instance.runtime_properties.get('public_ip') ctx.logger.info('OUTPUT {0}.{1} = "{2}"'.format( ctx.instance.id, 'ip', ctx.instance.runtime_properties.get('ip'))) ctx.logger.info('OUTPUT {0}.{1} = "{2}"'.format( ctx.instance.id, 'public_ip', ctx.instance.runtime_properties.get('public_ip')))
def create(**_): '''Uses an existing, or creates a new, Virtual Machine''' # Generate a resource name (if needed) utils.generate_resource_name( VirtualMachine(), generator=vm_name_generator) res_cfg = utils.get_resource_config() or dict() # Build storage profile osdisk = build_osdisk_profile( res_cfg.get('storageProfile', dict()).get('osDisk', dict())) datadisks = build_datadisks_profile( res_cfg.get('storageProfile', dict()).get('dataDisks', list())) storage_profile = { 'osDisk': osdisk, 'dataDisks': datadisks } # Build the network profile network_profile = build_network_profile() # Build the OS profile os_family = ctx.node.properties.get('os_family', '').lower() os_profile = dict() # Set defaults for Windows installs to enable WinRM listener if os_family == 'windows' and \ not res_cfg.get('osProfile', dict()).get('windowsConfiguration'): os_profile = { 'windowsConfiguration': { # This is required for extension scripts to work 'provisionVMAgent': True, 'winRM': { 'listeners': [{ 'protocol': 'Http', 'certificateUrl': None }] } }, 'linuxConfiguration': None } elif not res_cfg.get('osProfile', dict()).get('linuxConfiguration'): os_profile = { 'linuxConfiguration': { 'disablePasswordAuthentication': False }, 'windowsConfiguration': None } # Set the computerName if it's not set already os_profile['computerName'] = \ res_cfg.get( 'osProfile', dict() ).get('computerName', utils.get_resource_name()) # Create a resource (if necessary) utils.task_resource_create( VirtualMachine(), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'plan': ctx.node.properties.get('plan'), 'properties': utils.dict_update( utils.get_resource_config(), { 'availabilitySet': utils.get_rel_id_reference( AvailabilitySet, constants.REL_CONNECTED_TO_AS), 'networkProfile': network_profile, 'storageProfile': storage_profile, 'osProfile': os_profile } ) })
def configure(ctx, **_): """ Uses an existing, or creates a new, Network Interface Card .. warning:: The "configure" operation is actually the second half of the "create" operation. This is necessary since IP Configuration nodes are treated as separate, stand-alone types and must be "connected" to the NIC before it's actually created. The actual "create" operation simply assigns a UUID for the node and the "configure" operation creates the object """ # Create a resource (if necessary) azure_config = ctx.node.properties.get('azure_config') if not azure_config.get("subscription_id"): azure_config = ctx.node.properties.get('client_config') else: ctx.logger.warn("azure_config is deprecated please use client_config, " "in later version it will be removed") name = ctx.instance.runtime_properties.get('name') resource_group_name = utils.get_resource_group(ctx) api_version = \ ctx.node.properties.get('api_version', constants.API_VER_NETWORK) network_interface_card = NetworkInterfaceCard(azure_config, ctx.logger, api_version) nic_params = { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'primary': ctx.node.properties.get('primary'), } nic_params = \ utils.handle_resource_config_params(nic_params, ctx.node.properties.get( 'resource_config', {})) # Special Case network_security_group instead of networkSecurityGroups nic_params['network_security_group'] = \ nic_params.pop('network_security_groups', None) # clean empty values from params nic_params = \ utils.cleanup_empty_params(nic_params) nic_params = utils.dict_update( nic_params, { 'network_security_group': get_connected_nsg(ctx), 'ip_configurations': get_ip_configurations(ctx) }) # clean empty values from params nic_params = \ utils.cleanup_empty_params(nic_params) try: result = \ network_interface_card.create_or_update( resource_group_name, name, nic_params) except CloudError as cr: raise cfy_exc.NonRecoverableError( "configure nic '{0}' " "failed with this error : {1}".format(name, cr.message)) ctx.instance.runtime_properties['resource_group'] = resource_group_name ctx.instance.runtime_properties['resource'] = result ctx.instance.runtime_properties['resource_id'] = result.get("id", "") ctx.instance.runtime_properties['name'] = name