def create_probe(**_): '''Uses an existing, or creates a new, Load Balancer Probe''' # Check if invalid external resource if ctx.node.properties.get('use_external_resource', False) and \ not ctx.node.properties.get('name'): raise NonRecoverableError( '"use_external_resource" specified without a resource "name"') # Generate a name if it doesn't exist utils.generate_resource_name(Probe(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK))) # Get an interface to the Load Balancer lb_rel = utils.get_relationship_by_type( ctx.instance.relationships, constants.REL_CONTAINED_IN_LB) lb_name = utils.get_resource_name(lb_rel.target) lb_iface = LoadBalancer(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)) # Get the existing probes lb_data = lb_iface.get(lb_name) lb_probes = lb_data.get('properties', dict()).get( 'probes', list()) lb_probes.append({ 'name': utils.get_resource_name(), 'properties': utils.get_resource_config() }) # Update the Load Balancer with the new probe utils.task_resource_update( lb_iface, { 'properties': { 'probes': lb_probes } }, name=lb_name)
def delete_data_disk(**_): '''Deletes a Data Disk''' res_cfg = utils.get_resource_config() or dict() disk_name = ctx.instance.runtime_properties.get('name') disk_container = ctx.instance.runtime_properties.get('container') # If we're not deleting the disk, skip the lifecycle operation if ctx.node.properties.get('use_external_resource', False) or \ not res_cfg.get('force_delete', False): return # Validate the name exists if not disk_name or not disk_container: raise NonRecoverableError( 'Attempted to delete Data Disk without a name or ' 'container name specified') # Get the storage account csa = get_cloud_storage_account() # Get an interface to the Page Blob Service pageblobsvc = csa.create_page_blob_service() # Delete the blob ctx.logger.info('Deleting Data Disk "{0}/{1}"' .format(disk_container, disk_name)) pageblobsvc.delete_blob(disk_container, disk_name) for prop in ['name', 'diskSizeGB', 'container', 'uri']: try: del ctx.instance.runtime_properties[prop] except IndexError: ctx.logger.debug( 'Attempted to delete property {0} but failed.'.format( prop))
def configure(**_): ''' Uses an existing, or creates a new, Network Interface Card .. warning:: The "configure" operation is actually the second half of the "create" operation. This is necessary since IP Configuration nodes are treated as separate, stand-alone types and must be "connected" to the NIC before it's actually created. The actual "create" operation simply assigns a UUID for the node and the "configure" operation creates the object ''' # Create a resource (if necessary) utils.task_resource_create( NetworkInterfaceCard(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.dict_update( utils.get_resource_config(), { 'networkSecurityGroup': get_connected_nsg(), 'ipConfigurations': get_ip_configurations() }) })
def create_probe(**_): '''Uses an existing, or creates a new, Load Balancer Probe''' # Check if invalid external resource if ctx.node.properties.get('use_external_resource', False) and \ not ctx.node.properties.get('name'): raise NonRecoverableError( '"use_external_resource" specified without a resource "name"') # Generate a name if it doesn't exist utils.generate_resource_name( Probe(api_version=ctx.node.properties.get('api_version', constants.API_VER_NETWORK))) # Get an interface to the Load Balancer lb_rel = utils.get_relationship_by_type(ctx.instance.relationships, constants.REL_CONTAINED_IN_LB) lb_name = utils.get_resource_name(lb_rel.target) lb_iface = LoadBalancer(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)) # Get the existing probes lb_data = lb_iface.get(lb_name) lb_probes = lb_data.get('properties', dict()).get('probes', list()) lb_probes.append({ 'name': utils.get_resource_name(), 'properties': utils.get_resource_config() }) # Update the Load Balancer with the new probe utils.task_resource_update(lb_iface, {'properties': { 'probes': lb_probes }}, name=lb_name)
def build_ip_configuration(ipc): ''' Attempts to construct a proper IP Configuration from a context object :params `cloudify.context.RelationshipSubjectContext` ipc: IP Configuration context object :returns: IP Configuration dict :rtype: dict or None ''' if not ipc or not ipc.instance.relationships: return None # Find a referenced Subnet subnet = utils.get_rel_id_reference( Subnet, constants.REL_IPC_CONNECTED_TO_SUBNET, _ctx=ipc) # Find a referenced Public IP pubip = utils.get_rel_id_reference( PublicIPAddress, constants.REL_IPC_CONNECTED_TO_PUBIP, _ctx=ipc) ip_configuration = { 'name': utils.get_resource_name(ipc), 'properties': { 'subnet': subnet, 'publicIPAddress': pubip } } ip_configuration['properties'] = utils.dict_update( ip_configuration['properties'], utils.get_resource_config(_ctx=ipc)) return ip_configuration
def build_ip_configuration(ipc): ''' Attempts to construct a proper IP Configuration from a context object :params `cloudify.context.RelationshipSubjectContext` ipc: IP Configuration context object :returns: IP Configuration dict :rtype: dict or None ''' if not ipc or not ipc.instance.relationships: return None # Find a referenced Subnet subnet = utils.get_rel_id_reference(Subnet, constants.REL_IPC_CONNECTED_TO_SUBNET, _ctx=ipc) # Find a referenced Public IP pubip = utils.get_rel_id_reference(PublicIPAddress, constants.REL_IPC_CONNECTED_TO_PUBIP, _ctx=ipc) # Build a partial config and update it with properties config return utils.dict_update( { 'name': utils.get_resource_name(ipc), 'properties': { 'subnet': subnet, 'publicIPAddress': pubip } }, utils.get_resource_config(_ctx=ipc))
def configure(**_): '''Uses an existing, or creates a new, Load Balancer''' # Get the Frontend IP Configuration fe_ip_cfg = get_ip_configurations(rel=constants.REL_LB_CONNECTED_TO_IPC) ctx.logger.debug('fe_ip_cfg: {0}'.format(fe_ip_cfg)) if not len(fe_ip_cfg): raise NonRecoverableError( 'At least 1 Frontend IP Configuration must be ' 'associated with the Load Balancer') # Remove the subnet if there's a public IP present for ip_cfg in fe_ip_cfg: if ip_cfg.get('properties', dict()).get('publicIPAddress'): if ip_cfg.get('properties', dict()).get('subnet'): del ip_cfg['properties']['subnet'] # Create a resource (if necessary) utils.task_resource_create( LoadBalancer(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.dict_update(utils.get_resource_config(), {'frontendIPConfigurations': fe_ip_cfg}) }) # Get an interface to the Load Balancer lb_iface = LoadBalancer(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)) lb_data = lb_iface.get(utils.get_resource_name()) # Get the ID of the Frontend IP Configuration for fe_ipc_data in lb_data.get('properties', dict()).get('frontendIPConfigurations', list()): ipc_iface = IPConfiguration() ipc_id = fe_ipc_data.get('id') if not ipc_id: break ipc_iface.endpoint = '{0}{1}'.format(constants.CONN_API_ENDPOINT, ipc_id) # Get the Frontend private IP address ipc_data = ipc_iface.get() ctx.instance.runtime_properties['ip'] = \ ipc_data.get('properties', dict()).get('privateIPAddress') # Get the ID of the Frontend Public IP Configuration pipc_iface = PublicIPAddress() pipc_id = fe_ipc_data.get('properties', dict()).get('publicIPAddress', dict()).get('id') if not pipc_id: break pipc_iface.endpoint = '{0}{1}'.format(constants.CONN_API_ENDPOINT, pipc_id) # Get the Frontend public IP address pipc_data = pipc_iface.get() ctx.instance.runtime_properties['public_ip'] = \ pipc_data.get('properties', dict()).get('ipAddress')
def create(**_): '''Uses an existing, or creates a new, Virtual Network''' # Create a resource (if necessary) utils.task_resource_create( VirtualNetwork(), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.get_resource_config() })
def create(**_): '''Uses an existing, or creates a new, Public IP Address''' # Create a resource (if necessary) utils.task_resource_create( PublicIPAddress(), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.get_resource_config() })
def create(**_): '''Uses an existing, or creates a new, Availability Set''' # Create a resource (if necessary) utils.task_resource_create( AvailabilitySet(), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.get_resource_config() })
def configure(**_): '''Uses an existing, or creates a new, Load Balancer''' # Get the Frontend IP Configuration fe_ip_cfg = get_ip_configurations(rel=constants.REL_LB_CONNECTED_TO_IPC) ctx.logger.debug('fe_ip_cfg: {0}'.format(fe_ip_cfg)) if not len(fe_ip_cfg): raise NonRecoverableError( 'At least 1 Frontend IP Configuration must be ' 'associated with the Load Balancer') # Remove the subnet if there's a public IP present for ip_cfg in fe_ip_cfg: if ip_cfg.get('properties', dict()).get('publicIPAddress'): if ip_cfg.get('properties', dict()).get('subnet'): del ip_cfg['properties']['subnet'] # Create a resource (if necessary) utils.task_resource_create( LoadBalancer(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.dict_update( utils.get_resource_config(), { 'frontendIPConfigurations': fe_ip_cfg }) }) # Get an interface to the Load Balancer lb_iface = LoadBalancer(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)) lb_data = lb_iface.get(utils.get_resource_name()) # Get the ID of the Frontend IP Configuration for fe_ipc_data in lb_data.get('properties', dict()).get( 'frontendIPConfigurations', list()): ipc_iface = IPConfiguration() ipc_id = fe_ipc_data.get('id') if not ipc_id: break ipc_iface.endpoint = '{0}{1}'.format( constants.CONN_API_ENDPOINT, ipc_id) # Get the Frontend private IP address ipc_data = ipc_iface.get() ctx.instance.runtime_properties['ip'] = \ ipc_data.get('properties', dict()).get('privateIPAddress') # Get the ID of the Frontend Public IP Configuration pipc_iface = PublicIPAddress() pipc_id = fe_ipc_data.get('properties', dict()).get( 'publicIPAddress', dict()).get('id') if not pipc_id: break pipc_iface.endpoint = '{0}{1}'.format( constants.CONN_API_ENDPOINT, pipc_id) # Get the Frontend public IP address pipc_data = pipc_iface.get() ctx.instance.runtime_properties['public_ip'] = \ pipc_data.get('properties', dict()).get('ipAddress')
def create(**_): '''Uses an existing, or creates a new, Public IP Address''' # Create a resource (if necessary) utils.task_resource_create( PublicIPAddress(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.get_resource_config() })
def create(**_): '''Uses an existing, or creates a new, Network Security Group''' # Create a resource (if necessary) utils.task_resource_create( NetworkSecurityGroup(), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.get_resource_config() })
def create(**_): '''Uses an existing, or creates a new, Availability Set''' # Create a resource (if necessary) utils.task_resource_create( AvailabilitySet(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.get_resource_config() })
def create(**_): '''Uses an existing, or creates a new, Network Security Group''' # Create a resource (if necessary) utils.task_resource_create( NetworkSecurityGroup(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.get_resource_config() })
def create(**_): '''Uses an existing, or creates a new, Storage Account''' # Generate a resource name (if needed) utils.generate_resource_name(StorageAccount(), generator=sa_name_generator) # Create a resource (if necessary) utils.task_resource_create( StorageAccount(), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.get_resource_config() })
def create_rule(**_): '''Uses an existing, or creates a new, Load Balancer Rule''' # Check if invalid external resource if ctx.node.properties.get('use_external_resource', False) and \ not ctx.node.properties.get('name'): raise NonRecoverableError( '"use_external_resource" specified without a resource "name"') # Generate a name if it doesn't exist utils.generate_resource_name(LoadBalancerRule( api_version=ctx.node.properties.get('api_version', constants.API_VER_NETWORK))) # Get the resource config res_cfg = utils.get_resource_config() # Get an interface to the Load Balancer lb_rel = utils.get_relationship_by_type( ctx.instance.relationships, constants.REL_CONTAINED_IN_LB) lb_name = utils.get_resource_name(lb_rel.target) lb_iface = LoadBalancer(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)) lb_data = lb_iface.get(lb_name) # Get the Load Balancer Backend Pool lb_be_pool_id = utils.get_rel_id_reference( BackendAddressPool, constants.REL_CONNECTED_TO_LB_BE_POOL) # Get the Load Balancer Probe lb_probe_id = utils.get_rel_id_reference( Probe, constants.REL_CONNECTED_TO_LB_PROBE) # Get the Load Balancer Frontend IP Configuration lb_fe_ipc_name = utils.get_rel_node_name(constants.REL_CONNECTED_TO_IPC) lb_fe_ipc_id = utils.get_full_resource_id( FrontendIPConfiguration(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK) ), lb_fe_ipc_name) # Get the existing Load Balancer Rules lb_rules = lb_data.get('properties', dict()).get( 'loadBalancingRules', list()) # Update the resource config res_cfg['backendAddressPool'] = lb_be_pool_id res_cfg['frontendIPConfiguration'] = lb_fe_ipc_id res_cfg['probe'] = lb_probe_id lb_rules.append({ 'name': utils.get_resource_name(), 'properties': res_cfg }) # Update the Load Balancer with the new rule utils.task_resource_update( lb_iface, { 'properties': { 'loadBalancingRules': lb_rules } }, name=lb_name)
def create(**_): '''Uses an existing, or creates a new, Storage Account''' # Generate a resource name (if needed) utils.generate_resource_name( StorageAccount(), generator=sa_name_generator) # Create a resource (if necessary) utils.task_resource_create( StorageAccount(), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.get_resource_config() })
def create_rule(**_): '''Uses an existing, or creates a new, Load Balancer Rule''' # Check if invalid external resource if ctx.node.properties.get('use_external_resource', False) and \ not ctx.node.properties.get('name'): raise NonRecoverableError( '"use_external_resource" specified without a resource "name"') # Generate a name if it doesn't exist utils.generate_resource_name(LoadBalancerRule()) # Get the resource config res_cfg = utils.get_resource_config() # Get an interface to the Load Balancer lb_rel = utils.get_relationship_by_type( ctx.instance.relationships, constants.REL_CONTAINED_IN_LB) lb_name = utils.get_resource_name(lb_rel.target) lb_iface = LoadBalancer() lb_data = lb_iface.get(lb_name) # Get the Load Balancer Backend Pool lb_be_pool_id = utils.get_rel_id_reference( BackendAddressPool, constants.REL_CONNECTED_TO_LB_BE_POOL) # Get the Load Balancer Probe lb_probe_id = utils.get_rel_id_reference( Probe, constants.REL_CONNECTED_TO_LB_PROBE) # Get the Load Balancer Frontend IP Configuration lb_fe_ipc_name = utils.get_rel_node_name(constants.REL_CONNECTED_TO_IPC) lb_fe_ipc_id = utils.get_full_resource_id( FrontendIPConfiguration(), lb_fe_ipc_name) # Get the existing Load Balancer Rules lb_rules = lb_data.get('properties', dict()).get( 'loadBalancingRules', list()) # Update the resource config res_cfg['backendAddressPool'] = lb_be_pool_id res_cfg['frontendIPConfiguration'] = lb_fe_ipc_id res_cfg['probe'] = lb_probe_id lb_rules.append({ 'name': utils.get_resource_name(), 'properties': res_cfg }) # Update the Load Balancer with the new rule utils.task_resource_update( lb_iface, { 'properties': { 'loadBalancingRules': lb_rules } }, name=lb_name)
def create(**_): '''Uses an existing, or creates a new, Storage Account''' # Generate a resource name (if needed) utils.generate_resource_name( StorageAccount(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_STORAGE)), generator=sa_name_generator) sa_sku = ctx.node.properties.get('sku') sa_params = { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.get_resource_config() } if sa_sku: sa_params['sku'] = sa_sku # Create a resource (if necessary) utils.task_resource_create( StorageAccount(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_STORAGE)), sa_params)
def create_incoming_nat_rule(**_): '''Uses an existing, or creates a new, Load Balancer Incoming NAT Rule''' # Check if invalid external resource if ctx.node.properties.get('use_external_resource', False) and \ not ctx.node.properties.get('name'): raise NonRecoverableError( '"use_external_resource" specified without a resource "name"') # Generate a name if it doesn't exist utils.generate_resource_name(InboundNATRule()) # Get an interface to the Load Balancer lb_rel = utils.get_relationship_by_type( ctx.instance.relationships, constants.REL_CONTAINED_IN_LB) lb_name = utils.get_resource_name(lb_rel.target) lb_iface = LoadBalancer() # Get the resource config res_cfg = utils.get_resource_config() # Get the existing rules lb_data = lb_iface.get(lb_name) lb_rules = lb_data.get('properties', dict()).get( 'inboundNatRules', list()) # Get the Load Balancer Frontend IP Configuration lb_fe_ipc_name = utils.get_rel_node_name(constants.REL_CONNECTED_TO_IPC) lb_fe_ipc_id = utils.get_full_resource_id( FrontendIPConfiguration(), lb_fe_ipc_name) # Update the resource config res_cfg['frontendIPConfiguration'] = lb_fe_ipc_id lb_rules.append({ 'name': utils.get_resource_name(), 'properties': res_cfg }) # Update the Load Balancer with the new NAT rule utils.task_resource_update( lb_iface, { 'properties': { 'inboundNatRules': lb_rules } }, name=lb_name)
def create_data_disk(**_): '''Uses an existing, or creates a new, Data Disk placeholder''' res_cfg = utils.get_resource_config() or dict() disk_name = ctx.node.properties.get('name') disk_container = res_cfg.get('container_name') # Validation if ctx.node.properties.get('use_external_resource', False): if not disk_name: raise NonRecoverableError( '"use_external_resource" specified without ' 'a resource "name"') if not disk_container: raise NonRecoverableError( '"use_external_resource" specified without ' 'a resource "container_name"') # Get the storage account csa = get_cloud_storage_account() # Get an interface to the Page Blob Service pageblobsvc = csa.create_page_blob_service() # Generate a VHD Data Disk name if needed if not disk_name: ctx.logger.info('Generating a new Data Disk name') for _ in xrange(0, 10): tmpname = disk_name_generator() if not data_disk_exists(pageblobsvc, disk_container, tmpname): disk_name = tmpname break # Set the runtime properties ctx.instance.runtime_properties['name'] = disk_name ctx.instance.runtime_properties['diskSizeGB'] = \ res_cfg.get('size') ctx.instance.runtime_properties['container'] = \ disk_container ctx.instance.runtime_properties['uri'] = ( 'https://{0}.blob.{1}/{2}/{3}'.format( csa.account_name, constants.CONN_STORAGE_ENDPOINT, disk_container, disk_name) )
def create(**_): '''Uses an existing, or creates a new, Virtual Machine''' # Generate a resource name (if needed) utils.generate_resource_name( VirtualMachine(), generator=vm_name_generator) res_cfg = utils.get_resource_config() or dict() # Build storage profile osdisk = build_osdisk_profile( res_cfg.get('storageProfile', dict()).get('osDisk', dict())) datadisks = build_datadisks_profile( res_cfg.get('storageProfile', dict()).get('dataDisks', list())) storage_profile = { 'osDisk': osdisk, 'dataDisks': datadisks } # Build the network profile network_profile = build_network_profile() # Build the OS profile os_family = ctx.node.properties.get('os_family', '').lower() os_profile = dict() # Set defaults for Windows installs to enable WinRM listener if os_family == 'windows' and \ not res_cfg.get('osProfile', dict()).get('windowsConfiguration'): os_profile = { 'windowsConfiguration': { # This is required for extension scripts to work 'provisionVMAgent': True, 'winRM': { 'listeners': [{ 'protocol': 'Http', 'certificateUrl': None }] } }, 'linuxConfiguration': None } elif not res_cfg.get('osProfile', dict()).get('linuxConfiguration'): os_profile = { 'linuxConfiguration': { 'disablePasswordAuthentication': False }, 'windowsConfiguration': None } # Set the computerName if it's not set already os_profile['computerName'] = \ res_cfg.get( 'osProfile', dict() ).get('computerName', utils.get_resource_name()) # Create a resource (if necessary) utils.task_resource_create( VirtualMachine(), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'plan': ctx.node.properties.get('plan'), 'properties': utils.dict_update( utils.get_resource_config(), { 'availabilitySet': utils.get_rel_id_reference( AvailabilitySet, constants.REL_CONNECTED_TO_AS), 'networkProfile': network_profile, 'storageProfile': storage_profile, 'osProfile': os_profile } ) })
def create(args=None, **_): '''Uses an existing, or creates a new, Virtual Machine''' # Generate a resource name (if needed) utils.generate_resource_name( VirtualMachine(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)), generator=vm_name_generator) res_cfg = utils.get_resource_config(args=args) or dict() # Build storage profile osdisk = build_osdisk_profile( res_cfg.get('storageProfile', dict()).get('osDisk', dict())) datadisks = build_datadisks_profile( res_cfg.get('storageProfile', dict()).get('dataDisks', list())) storage_profile = {'osDisk': osdisk, 'dataDisks': datadisks} # Build the network profile network_profile = build_network_profile() # Build the OS profile os_family = ctx.node.properties.get('os_family', '').lower() os_profile = dict() # Set defaults for Windows installs to enable WinRM listener if os_family == 'windows' and \ not res_cfg.get('osProfile', dict()).get('windowsConfiguration'): os_profile = { 'windowsConfiguration': { # This is required for extension scripts to work 'provisionVMAgent': True, 'winRM': { 'listeners': [{ 'protocol': 'Http', 'certificateUrl': None }] } }, 'linuxConfiguration': None } elif not res_cfg.get('osProfile', dict()).get('linuxConfiguration'): os_profile = { 'linuxConfiguration': { 'disablePasswordAuthentication': False }, 'windowsConfiguration': None } # Set the computerName if it's not set already os_profile['computerName'] = \ res_cfg.get( 'osProfile', dict() ).get('computerName', utils.get_resource_name()) resource_create_payload = \ { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'plan': ctx.node.properties.get('plan'), 'properties': utils.dict_update( utils.get_resource_config(args=args), { 'availabilitySet': utils.get_rel_id_reference( AvailabilitySet, constants.REL_CONNECTED_TO_AS), 'networkProfile': network_profile, 'storageProfile': storage_profile, 'osProfile': os_profile } ) } # support userdata from args. os_profile = resource_create_payload['properties']['osProfile'] userdata = _handle_userdata(os_profile.get('customData')) if userdata: ctx.logger.warn('Azure customData implementation is dependent on ' 'Virtual Machine image support.') os_profile['customData'] = base64.b64encode(userdata.encode()) # Remove customData from osProfile if empty to avoid 400 Error. elif 'customData' in resource_create_payload['properties']['osProfile']: del resource_create_payload['properties']['osProfile']['customData'] # Create a resource (if necessary) utils.task_resource_create( VirtualMachine(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)), resource_create_payload)
def create_file_share(**_): '''Creates an Azure File Share''' # Get resource config values res_cfg = utils.get_resource_config() or dict() share_name = ctx.node.properties.get('name') metadata = res_cfg.get('metadata') quota = res_cfg.get('quota') fail_on_exist = res_cfg.get('fail_on_exist', False) # Check if invalid external resource if ctx.node.properties.get('use_external_resource', False) and \ not share_name: raise NonRecoverableError( '"use_external_resource" specified without a resource "name"') # Get the storage account storage_account = utils.get_parent(ctx.instance, rel_type=constants.REL_CONTAINED_IN_SA) storage_account_name = utils.get_resource_name(_ctx=storage_account) # Get the storage account keys keys = StorageAccount(_ctx=storage_account).list_keys() if not isinstance(keys, list) or len(keys) < 1: raise RecoverableError( 'StorageAccount reported no usable authentication keys') # Get an interface to the Storage Account storage_account_key = keys[0].get('key') storageacct = CloudStorageAccount(account_name=storage_account_name, account_key=storage_account_key) # Get an interface to the File Service filesvc = storageacct.create_file_service() if ctx.node.properties.get('use_external_resource', False): # Attempt to use an existing File Share (if specified) ctx.logger.debug( 'Checking for existing File Share "{0}"'.format(share_name)) try: share = filesvc.get_share_properties(share_name) metadata = share.get('metadata', dict()) quota = share.get('properties', dict()).get('quota') created = False except Exception as ex: ctx.logger.error( 'File Share "{0}" does not exist and ' '"use_external_resource" is set to true'.format(share_name)) raise NonRecoverableError(ex) else: # Generate a new File Share name if needed if not share_name: ctx.logger.info('Generating a new File Share name') for _ in xrange(0, 10): tmpname = file_share_name_generator() if not file_share_exists(filesvc, tmpname): share_name = tmpname break # Handle name error if not share_name: raise NonRecoverableError( 'Error generating a new File Share name. Failed ' 'after 10 tries.') # Attempt to create the File Share ctx.logger.debug('Creating File Share "{0}"'.format(share_name)) created = filesvc.create_share(share_name=share_name, metadata=metadata, quota=quota, fail_on_exist=False) if not created: ctx.logger.warn('File Share already exists') if fail_on_exist: raise NonRecoverableError( 'File Share already exists in the storage account and ' '"fail_on_exist" set to True') # Set run-time properties ctx.instance.runtime_properties['name'] = share_name ctx.instance.runtime_properties['quota'] = quota ctx.instance.runtime_properties['metadata'] = metadata ctx.instance.runtime_properties['created'] = created ctx.instance.runtime_properties['storage_account'] = storage_account_name ctx.instance.runtime_properties['username'] = storage_account_name ctx.instance.runtime_properties['password'] = storage_account_key ctx.instance.runtime_properties['uri'] = '{0}.{1}/{2}'.format( storage_account_name, constants.CONN_STORAGE_FILE_ENDPOINT, share_name)
def create(args=None, **_): '''Uses an existing, or creates a new, Virtual Machine''' # Generate a resource name (if needed) utils.generate_resource_name( VirtualMachine(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)), generator=vm_name_generator) res_cfg = utils.get_resource_config(args=args) or dict() # Build storage profile osdisk = build_osdisk_profile( res_cfg.get('storageProfile', dict()).get('osDisk', dict())) datadisks = build_datadisks_profile( res_cfg.get('storageProfile', dict()).get('dataDisks', list())) storage_profile = { 'osDisk': osdisk, 'dataDisks': datadisks } # Build the network profile network_profile = build_network_profile() # Build the OS profile os_family = ctx.node.properties.get('os_family', '').lower() os_profile = dict() # Set defaults for Windows installs to enable WinRM listener if os_family == 'windows' and \ not res_cfg.get('osProfile', dict()).get('windowsConfiguration'): os_profile = { 'windowsConfiguration': { # This is required for extension scripts to work 'provisionVMAgent': True, 'winRM': { 'listeners': [{ 'protocol': 'Http', 'certificateUrl': None }] } }, 'linuxConfiguration': None } elif not res_cfg.get('osProfile', dict()).get('linuxConfiguration'): os_profile = { 'linuxConfiguration': { 'disablePasswordAuthentication': False }, 'windowsConfiguration': None } # Set the computerName if it's not set already os_profile['computerName'] = \ res_cfg.get( 'osProfile', dict() ).get('computerName', utils.get_resource_name()) resource_create_payload = \ { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'plan': ctx.node.properties.get('plan'), 'properties': utils.dict_update( utils.get_resource_config(args=args), { 'availabilitySet': utils.get_rel_id_reference( AvailabilitySet, constants.REL_CONNECTED_TO_AS), 'networkProfile': network_profile, 'storageProfile': storage_profile, 'osProfile': os_profile } ) } # support userdata from args. os_profile = resource_create_payload['properties']['osProfile'] userdata = _handle_userdata(os_profile.get('customData')) if userdata: ctx.logger.warn( 'Azure customData implementation is dependent on ' 'Virtual Machine image support.') os_profile['customData'] = base64.b64encode(userdata.encode()) # Remove customData from osProfile if empty to avoid 400 Error. elif 'customData' in resource_create_payload['properties']['osProfile']: del resource_create_payload['properties']['osProfile']['customData'] # Create a resource (if necessary) utils.task_resource_create(VirtualMachine( api_version=ctx.node.properties.get( 'api_version', constants.API_VER_COMPUTE)), resource_create_payload)
def create_file_share(**_): '''Creates an Azure File Share''' # Get resource config values res_cfg = utils.get_resource_config() or dict() share_name = ctx.node.properties.get('name') metadata = res_cfg.get('metadata') quota = res_cfg.get('quota') fail_on_exist = res_cfg.get('fail_on_exist', False) # Check if invalid external resource if ctx.node.properties.get('use_external_resource', False) and \ not share_name: raise NonRecoverableError( '"use_external_resource" specified without a resource "name"') # Get the storage account storage_account = utils.get_parent( ctx.instance, rel_type=constants.REL_CONTAINED_IN_SA) storage_account_name = utils.get_resource_name(_ctx=storage_account) # Get the storage account keys keys = StorageAccount(_ctx=storage_account).list_keys() if not isinstance(keys, list) or len(keys) < 1: raise RecoverableError( 'StorageAccount reported no usable authentication keys') # Get an interface to the Storage Account storage_account_key = keys[0].get('key') storageacct = CloudStorageAccount( account_name=storage_account_name, account_key=storage_account_key) # Get an interface to the File Service filesvc = storageacct.create_file_service() if ctx.node.properties.get('use_external_resource', False): # Attempt to use an existing File Share (if specified) ctx.logger.debug('Checking for existing File Share "{0}"' .format(share_name)) try: share = filesvc.get_share_properties(share_name) metadata = share.get('metadata', dict()) quota = share.get('properties', dict()).get('quota') created = False except Exception as ex: ctx.logger.error('File Share "{0}" does not exist and ' '"use_external_resource" is set to true' .format(share_name)) raise NonRecoverableError(ex) else: # Generate a new File Share name if needed if not share_name: ctx.logger.info('Generating a new File Share name') for _ in xrange(0, 10): tmpname = file_share_name_generator() if not file_share_exists(filesvc, tmpname): share_name = tmpname break # Handle name error if not share_name: raise NonRecoverableError( 'Error generating a new File Share name. Failed ' 'after 10 tries.') # Attempt to create the File Share ctx.logger.debug('Creating File Share "{0}"'.format(share_name)) created = filesvc.create_share( share_name=share_name, metadata=metadata, quota=quota, fail_on_exist=False) if not created: ctx.logger.warn('File Share already exists') if fail_on_exist: raise NonRecoverableError( 'File Share already exists in the storage account and ' '"fail_on_exist" set to True') # Set run-time properties ctx.instance.runtime_properties['name'] = share_name ctx.instance.runtime_properties['quota'] = quota ctx.instance.runtime_properties['metadata'] = metadata ctx.instance.runtime_properties['created'] = created ctx.instance.runtime_properties['storage_account'] = storage_account_name ctx.instance.runtime_properties['username'] = storage_account_name ctx.instance.runtime_properties['password'] = storage_account_key ctx.instance.runtime_properties['uri'] = '{0}.file.{1}/{2}'.format( storage_account_name, constants.CONN_STORAGE_FILE_ENDPOINT, share_name )
def create(ctx, args=None, **_): """Uses an existing, or creates a new, Virtual Machine""" azure_config = utils.get_client_config(ctx.node.properties) name = utils.get_resource_name(ctx) resource_group_name = utils.get_resource_group(ctx) api_version = \ ctx.node.properties.get('api_version', constants.API_VER_COMPUTE) virtual_machine = VirtualMachine(azure_config, ctx.logger, api_version) res_cfg = ctx.node.properties.get("resource_config", {}) spot_instance = res_cfg.pop("spot_instance", None) # Build storage profile osdisk = build_osdisk_profile(ctx, res_cfg.get( 'storageProfile', dict()).get('osDisk', dict())) datadisks = build_datadisks_profile(ctx, res_cfg.get( 'storageProfile', dict()).get('dataDisks', list())) storage_profile = { 'os_disk': osdisk, 'data_disks': datadisks } # Build the network profile network_profile = build_network_profile(ctx) # Build the OS profile os_family = ctx.node.properties.get('os_family', '').lower() os_profile = dict() # Set defaults for Windows installs to enable WinRM listener if os_family == 'windows' and \ not res_cfg.get('osProfile', dict()).get('windowsConfiguration'): os_profile = { 'windows_configuration': { # This is required for extension scripts to work 'provision_vm_agent': True, 'win_rm': { 'listeners': [{ 'protocol': 'Http', 'certificate_url': None }] } }, 'linux_configuration': None } elif not res_cfg.get('osProfile', dict()).get('linuxConfiguration'): os_profile = { 'linux_configuration': { 'disable_password_authentication': False }, 'windows_configuration': None } # Set the computerName if it's not set already os_profile['computer_name'] = \ res_cfg.get( 'osProfile', dict() ).get('computerName', name) availability_set = None rel_type = constants.REL_CONNECTED_TO_AS for rel in ctx.instance.relationships: if isinstance(rel_type, tuple): if any(x in rel.type_hierarchy for x in rel_type): availability_set = { 'id': rel.target.instance.runtime_properties.get( "resource_id") } else: if rel_type in rel.type_hierarchy: availability_set = { 'id': rel.target.instance.runtime_properties.get( "resource_id") } resource_create_payload = { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'plan': ctx.node.properties.get('plan'), 'availabilitySet': availability_set, 'networkProfile': network_profile, 'storageProfile': storage_profile, 'osProfile': os_profile } # check if spot_instance if spot_instance and spot_instance.get("is_spot_instance"): # this is just an indacator not part of the api spot_instance.pop("is_spot_instance") # handle the params resource_create_payload = \ utils.dict_update(resource_create_payload, spot_instance) resource_create_payload = \ utils.handle_resource_config_params(resource_create_payload, utils.get_resource_config( _ctx=ctx, args=args)) # support userdata from args. os_profile = resource_create_payload['os_profile'] userdata = _handle_userdata(ctx, os_profile.get('custom_data')) if userdata: ctx.logger.warn( 'Azure customData implementation is dependent on ' 'Virtual Machine image support.') resource_create_payload['os_profile']['custom_data'] = \ base64.b64encode(userdata.encode('utf-8')).decode('utf-8') # Remove custom_data from os_profile if empty to avoid Errors. elif 'custom_data' in resource_create_payload['os_profile']: del resource_create_payload['os_profile']['custom_data'] # Create a resource (if necessary) try: result = \ virtual_machine.create_or_update(resource_group_name, name, resource_create_payload) except CloudError as cr: raise cfy_exc.NonRecoverableError( "create virtual_machine '{0}' " "failed with this error : {1}".format(name, cr.message) ) ctx.instance.runtime_properties['resource_group'] = resource_group_name ctx.instance.runtime_properties['resource'] = result ctx.instance.runtime_properties['resource_id'] = result.get("id", "")