def ForwardingRule(deployment, region, port, cidr, net_name, sub_name): """Returns a definition of a forwarding rule. Args: deployment: The name of this deployment. region: The region that this backend service will be deployed to. port: The port of the traffic to forward. cidr: string representing the cidr in a.b.c.d/x form. net_name: The name of the network that this backend service will operate on. sub_name: The name of the subnet that this backend service will operate on. Returns: The definition of the forwarding rule. """ return { default.NAME: _ForwardRuleName(deployment), default.TYPE: default.FORWARDING_RULE, default.PROPERTIES: { default.PORTS: [port], default.IP_ADDRESS: utils.ApplicationIp(cidr), default.NETWORK: common.Ref(net_name), default.SUBNETWORK: common.Ref(sub_name), default.REGION: region, 'backendService': common.Ref(_BackendServiceName(deployment)), default.LB_SCHEME: 'INTERNAL' } }
def BackendService(region, deployment, net_name, num_cluster_nodes): """Returns a definition of a backend service. Args: region: The region that this backend service will be deployed to. deployment: The name of this deployment. net_name: The name of the network that this backend service will operate on. num_cluster_nodes: number of cluster nodes in the deployment Returns: A definition of a backend service. """ backends = [] for zone in utils.GetZoneSet(region, num_cluster_nodes): backends.append( {'group': common.Ref(utils.InstanceGroupName(deployment, zone))}) return { default.NAME: _BackendServiceName(deployment), default.TYPE: default.REGION_BACKEND_SERVICE, default.PROPERTIES: { default.REGION: region, default.NETWORK: common.Ref(net_name), default.BACKENDS: backends, default.HEALTH_CHECKS: [common.Ref(utils.HealthCheckName(deployment))], default.PROTOCOL: 'TCP', default.LB_SCHEME: 'INTERNAL' } }
def GenerateBackendService(context): """Generates one backendService resource.""" prop = context.properties port = prop[default.PORT] health_path = prop[default.HEALTH_PATH] default_srv = prop[default.SERVICE] outputs = prop.setdefault(GENERATED_PROP, dict()) be_name = common.AutoName(context.env['name'], default.BACKEND_SERVICE) hc_name = common.AutoName(context.env['name'], default.HEALTHCHECK) # pyformat: disable resource = [ { 'name': hc_name, 'type': default.HEALTHCHECK, 'properties': { 'port': port, 'requestPath': health_path, } }, { 'name': be_name, 'type': default.BACKEND_SERVICE, 'properties': { 'port': port, 'portName': default_srv, 'backends': GenerateBackends(context), 'healthChecks': [common.Ref(hc_name)], 'generatedProperties': outputs } } ] # pyformat: enable return resource
def GenerateDisks(context, disk_list, new_disks): """Generates as many disks as passed in the disk_list.""" project = context.env[default.PROJECT] prop = context.properties zone = prop.setdefault(ZONE, DEFAULT_ZONE) sourced_disks = [] disk_names = [] for disk in disk_list: d_name = (disk[default.DEVICE_NAME] if default.DISK_NAME not in disk else disk[default.DISK_NAME]) if default.DISK_SOURCE in disk: # Existing disk, expect disk api link source = disk[default.DISK_SOURCE] else: # The disks should be create separately if default.DEVICE_NAME not in disk and default.DISK_NAME not in disk: raise common.Error( 'deviceName or diskName is needed for each disk in ' 'this module implemention of multiple disks per vm.') disk_init = disk.setdefault(default.INITIALIZEP, dict()) disk_size = disk_init.setdefault(default.DISK_SIZE, DEFAULT_DATADISKSIZE) passed_disk_type = disk_init.setdefault(default.TYPE, DEFAULT_DISKTYPE) disk_type = common.LocalComputeLink(project, zone, 'diskTypes', passed_disk_type) new_disks.append({ 'name': d_name, 'type': default.DISK, 'properties': { 'type': disk_type, 'sizeGb': disk_size, 'zone': zone } }) # pyformat: disable disk_names.append(d_name) source = common.Ref(d_name) sourced_disks.append({ 'deviceName': d_name, 'autoDelete': True, 'boot': False, 'source': source, 'type': DEFAULT_PERSISTENT, }) items = prop[METADATA].setdefault('items', list()) items.append({'key': ATTACHED_DISKS, 'value': ','.join(disk_names)}) return sourced_disks, new_disks
def GenerateDisks(context, disk_list, new_disks): """Generates as many disks as passed in the disk_list.""" prop = context.properties zone = prop.setdefault(ZONE, DEFAULT_ZONE) sourced_disks = [] disk_names = [] for disk in disk_list: if default.DISK_SOURCE in disk or disk[default.TYPE] == SCRATCH: # These disks do not need to be created as separate resources sourced_disks.append(disk) else: # Extract disk parameters and create as separate resource disk_init = disk[default.INITIALIZEP] if default.DEVICE_NAME in disk: d_name = disk[default.DEVICE_NAME] elif default.DISK_NAME in disk_init: d_name = disk_init[default.DISK_NAME] else: raise common.Error( 'deviceName or diskName is needed for each disk in ' 'this module implemention of multiple disks per vm.') new_disks.append({ 'name': d_name, 'type': default.DISK, 'properties': { 'type': disk_init[default.DISKTYPE], 'sizeGb': disk_init[default.DISK_SIZE], 'zone': zone } }) disk_names.append(d_name) source = common.Ref(d_name) sourced_disks.append({ 'deviceName': d_name, 'autoDelete': disk[default.AUTO_DELETE], 'boot': False, 'source': source, 'type': disk[default.TYPE], }) items = prop[METADATA].setdefault('items', list()) items.append({'key': ATTACHED_DISKS, 'value': ','.join(disk_names)}) return sourced_disks, new_disks
def GenerateDisks(context, disk_list, new_disks): """Generates as many disks as passed in the disk_list.""" prop = context.properties zone = prop.setdefault(ZONE, DEFAULT_ZONE) sourced_disks = [] disk_names = [] for disk in disk_list: if default.DISK_SOURCE in disk or disk[default.TYPE] == SCRATCH: # These disks do not need to be created as separate resources sourced_disks.append(disk) else: # Extract disk parameters and create as separate resource disk_init = disk[default.INITIALIZEP] if default.DEVICE_NAME in disk: d_name = disk[default.DEVICE_NAME] elif default.DISK_NAME in disk_init: d_name = disk_init[default.DISK_NAME] else: raise common.Error( "deviceName or diskName is needed for each disk in " "this module implemention of multiple disks per vm.") new_disks.append({ "name": d_name, "type": default.DISK, "properties": { "type": disk_init[default.DISKTYPE], "sizeGb": disk_init[default.DISK_SIZE], "zone": zone, }, }) disk_names.append(d_name) source = common.Ref(d_name) sourced_disks.append({ "deviceName": d_name, "autoDelete": disk[default.AUTO_DELETE], "boot": False, "source": source, "type": disk[default.TYPE], }) items = prop[METADATA].setdefault("items", list()) items.append({"key": ATTACHED_DISKS, "value": ",".join(disk_names)}) return sourced_disks, new_disks
def VmDisk(vm_name, disk_type, disk_num): """Returns a vm disk definition. The definition is not top-level, and should be embedded within a disk definition. Args: vm_name: name of the VM that this disk will be on. disk_type: type of the disk. disk_num: the number for this disk. Must correspond to the top-level disk. Returns: A vm disk definition. """ disk_name = DiskName(vm_name, disk_type, disk_num) return { default.DEVICE_NAME: disk_name, default.DISK_SOURCE: common.Ref(disk_name), default.AUTO_DELETE: True }
def GenerateAutscaledGroup(context, zone_dict): """Generate one autoscaled_group resource Dict with a passed zone.""" name = context.env['name'] project = context.env[PROJECT] prop = context.properties zone = zone_dict[default.ZONE] zone_abbrv = common.ShortenZoneName(zone) as_name = common.AutoName(name, default.AUTOSCALER, zone_abbrv) base_name = name + '-' + default.AKA[default.INSTANCE] igm_name = common.AutoName(name, default.IGM, zone_abbrv) max_num = zone_dict[MAX_NUM] project = context.env[default.PROJECT] size = zone_dict[SIZE] vm_template = prop[VM_TEMPLATE] # pyformat: disable resource = [{ 'name': igm_name, 'type': default.IGM, 'properties': { 'project': project, 'zone': zone, 'targetSize': size, 'baseInstanceName': base_name, 'instanceTemplate': vm_template } }, { 'name': as_name, 'type': default.AUTOSCALER, 'properties': { 'project': project, 'zone': zone, 'target': common.Ref(igm_name), 'autoscalingPolicy': { 'maxNumReplicas': max_num } } }] # pyformat: enable return resource
def FirewallRule(name, net_name, protocol, deployment, sources, ports=None): """Creates a Firewall Rule definition. Returns a firewall definition based on arguments that is compatible with the gcloud. Args: name: string name of the firewall rule net_name: string name of the network that this rule will apply to. protocol: The network protocol, e.g. 'ICMP', 'TCP', 'UDP' deployment: name of this deployment. sources: list of strings cidrs of traffic to be allowed. ports: the TCP or UDP ports that this firewall rule will apply to. Returns: Firewall Rule definition compatible with gcloud deployment launcher. """ allowed = {default.IP_PROTO: protocol} if ports: allowed.update({default.PORTS: [ports]}) properties = { default.NETWORK: common.Ref(net_name).format(net_name), default.ALLOWED: [allowed], default.SRC_RANGES: sources } firewall_rule_name = "{deployment}-{name}".format(deployment=deployment, name=name) return { default.NAME: firewall_rule_name, default.TYPE: default.FIREWALL, default.PROPERTIES: properties }
def generate_config(context): # This method will: # 1. Create a instance template for a security GW # (with a tag for the managing security server) # 2. Create a managed instance group # (based on the instance template and zones list provided by the user) # 3. Configure autoscaling # (based on min, max & policy settings provided by the user) prop = context.properties prop['deployment'] = context.env['deployment'] prop['project'] = context.env['project'] prop['templateName'] = TEMPLATE_NAME prop['templateVersion'] = TEMPLATE_VERSION prop['allowUploadDownload'] = str(prop['allowUploadDownload']).lower() prop['hasInternet'] = 'true' # via Google Private Access prop['installationType'] = 'AutoScale' prop['resources'] = [] prop['outputs'] = [] prop['gw_dependencies'] = [] prop['computed_sic_key'] = password.GeneratePassword(12, False) prop['gatewayExternalIP'] = ( prop['mgmtNIC'] == 'Ephemeral Public IP (eth0)') version_chosen = prop['autoscalingVersion'].split(' ')[0] + "-GW" nics = create_nics(context) gw_template = create_instance_template(context, prop['deployment'], nics, depends_on=prop['gw_dependencies'], gw_version=VERSIONS[version_chosen]) prop['resources'] += [gw_template] prop['igm_dependencies'] = [gw_template['name']] igm = GenerateAutscaledGroup(context, prop['deployment'], gw_template['name'], prop['igm_dependencies']) prop['resources'] += [igm] prop['autoscaler_dependencies'] = [igm['name']] cpu_usage = prop.get("cpuUsage") autoscaler = CreateAutscaler(context, prop['deployment'], igm['name'], cpu_usage, prop['autoscaler_dependencies']) prop['resources'] += [autoscaler] prop['outputs'] += [ { 'name': 'deployment', 'value': prop['deployment'] }, { 'name': 'project', 'value': prop['project'] }, { 'name': 'instanceTemplateName', 'value': gw_template['name'] }, { 'name': 'InstanceTemplateLink', 'value': common.Ref(gw_template['name']) }, { 'name': 'IGMname', 'value': igm['name'] }, { 'name': 'IGMLink', 'value': common.RefGroup(igm['name']) }, { 'name': 'cpuUsagePercentage', 'value': str(int(prop['cpuUsage'])) + '%' }, { 'name': 'minInstancesInt', 'value': str(int(prop['minInstances'])) }, { 'name': 'maxInstancesInt', 'value': str(int(prop['maxInstances'])) }, ] return common.MakeResource(prop['resources'], prop['outputs'])
def GenerateDisks(context, disk_list, new_disks): """Generates as many disks as passed in the disk_list.""" project = context.env[default.PROJECT] prop = context.properties zone = prop.setdefault(ZONE, DEFAULT_ZONE) sourced_disks = [] disk_names = [] for disk in disk_list: d_name = (disk[default.DEVICE_NAME] if default.DISK_NAME not in disk else disk[default.DISK_NAME]) d_autodelete = (True if default.AUTO_DELETE not in disk else disk[default.AUTO_DELETE]) d_type = (DEFAULT_PERSISTENT if default.TYPE not in disk else disk[default.TYPE]) if default.DISK_SOURCE in disk: # Existing disk, expect disk api link source = disk[default.DISK_SOURCE] elif d_type == SCRATCH: # No special treatment needed for SSD disk if default.INITIALIZEP in disk: disk_type = disk[default.INITIALIZEP][DISKTYPE] else: disk_type = 'local-ssd' disk[default.INITIALIZEP] = { DISKTYPE: common.LocalComputeLink(project, zone, 'diskTypes', disk_type) } sourced_disks.append(disk) continue else: if default.DEVICE_NAME not in disk and default.DISK_NAME not in disk: raise common.Error( 'deviceName or diskName is needed for each disk in ' 'this module implemention of multiple disks per vm.') # In the Instance API reference, size and type are within this property if default.INITIALIZEP in disk: disk_init = disk[default.INITIALIZEP] disk_size = disk_init.setdefault(default.DISK_SIZE, DEFAULT_DATADISKSIZE) passed_disk_type = disk_init.setdefault( DISKTYPE, DEFAULT_DISKTYPE) # You can also simply pass the size and type properties directly else: disk_size = disk.setdefault(default.DISK_SIZE, DEFAULT_DATADISKSIZE) passed_disk_type = disk.setdefault(DISKTYPE, DEFAULT_DISKTYPE) disk_type = common.LocalComputeLink(project, zone, 'diskTypes', passed_disk_type) new_disks.append({ 'name': d_name, 'type': default.DISK, 'properties': { 'type': disk_type, 'sizeGb': disk_size, 'zone': zone } }) # pyformat: disable disk_names.append(d_name) source = common.Ref(d_name) sourced_disks.append({ 'deviceName': d_name, 'autoDelete': d_autodelete, 'boot': False, 'source': source, 'type': d_type, }) items = prop[METADATA].setdefault('items', list()) items.append({'key': ATTACHED_DISKS, 'value': ','.join(disk_names)}) return sourced_disks, new_disks
def GenerateConfig(context): """Generates the config for the VMs in the deployment. Returns a dictionary with the configs constructed for the backend nodes and the ad node in the deployment. There is also an instance group that the backends are added to. Args: context: Context of the deployment. Returns: List of resources that the gcloud deployment-manager is to create. """ ValidateVmContext(context) deployment = context.env["deployment"] num_cluster_nodes = context.properties["num_cluster_nodes"] region = context.properties["region"] net_name = utils.NetworkName(deployment) # list of top level resources to be returned to the gcloud deployment # orchestrator. Generate passwords for use in the windows apps install. resources = [{ default.NAME: "service-password", default.TYPE: "password.py", default.PROPERTIES: { "length": 8, "includeSymbols": False, } }, { default.NAME: "safe-password", default.TYPE: "password.py", default.PROPERTIES: { "length": 14, "includeSymbols": True, } }] # list of instance names to be put in the instance group instances = [] def _GetImagePath(family, image): return "{}projects/{}/global/images/{}".format( default.COMPUTE_URL_BASE, family, image) def _GetMachinePath(zone): machine_type = utils.ConvertMachineTypeString( context.properties["machine_type"]) return "{}projects/{}/zones/{}/machineTypes/{}".format( default.COMPUTE_URL_BASE, context.env["project"], zone, machine_type) for node_num in xrange(num_cluster_nodes): zone = utils.GetNodeZoneFromRegion(region, node_num) machine_type = _GetMachinePath(zone) vm = ClusterVm(context, _GetImagePath(_SQL_FCI_PUBLIC_FAMILY, _SQL_SVR_2016), machine_type, zone, node_num) resources.extend(vm) instances.append(common.Ref(utils.NodeName(deployment, node_num))) default_zone = utils.GetDefaultZoneFromRegion(region) resources.append( AdVm(context, _GetMachinePath(default_zone), default_zone, _GetImagePath(_SQL_FCI_PUBLIC_FAMILY, _WINDOWS_2016))) # The instance group will be used by the load balancer for instance_group_zone in utils.GetZoneSet(region, num_cluster_nodes): resources.append({ default.NAME: utils.InstanceGroupName(deployment, instance_group_zone), default.TYPE: default.INSTANCE_GROUP, default.PROPERTIES: { default.ZONE: instance_group_zone, default.NETWORK: common.Ref(net_name), } }) return { "resources": resources, }
def AdVm(context, machine_type, zone, image): """Creates a VM definition for the AD VM. This VM only has a single boot disk because it will not be a part of the cluster. Args: context: context of the deployment. machine_type: full path of the machine type. zone: The zone where the VM will reside. image: full path of the image to boot. Returns: definition of AD VM. """ project = context.env["project"] deployment = context.env["deployment"] sql_cidr = context.properties.get("sql_cidr", utils.DEFAULT_DEPLOYMENT_CIDR) net_name = utils.NetworkName(deployment) sub_name = utils.SubnetName(deployment) ad_node_name = utils.AdNodeName(deployment) nic = { default.ACCESS_CONFIGS: [{ default.NAME: "external-nat", default.TYPE: default.ONE_NAT, }], default.NETWORK: common.Ref(net_name), default.SUBNETWORK: common.Ref(sub_name), default.NETWORKIP: utils.AdNodeIp(sql_cidr) } instance = { default.ZONE: zone, default.MACHINETYPE: machine_type, default.SERVICE_ACCOUNTS: [{ "email": "default", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/userinfo.email", "https://www.googleapis.com/auth/cloudruntimeconfig" ] }], default.DISKS: [BootDisk(ad_node_name, "cntlr-boot-disk", project, zone, image)], default.NETWORK_INTERFACES: [nic], default.METADATA: BuildAdNodeInstanceMetadata(context, zone) } return { default.NAME: ad_node_name, default.TYPE: default.INSTANCE, default.PROPERTIES: instance }
def ClusterVm(context, image, machine_type, zone, node_num): """Generates the config for a single cluster VM. Creates and returns a VM definition for a cluster node. The number of disks is determined by the user. The network configuration is determined through this nodes "node_num", which is a unique identifier for this node and so guarantees a unique IP address. Args: context: context of the deployment. image: full path name of the image to use to boot. machine_type: full path of the machine type. zone: the zone where the VM will reside. node_num: unique identifier of this VM. Raises: VmInputValidationError: if the node name is too long. Returns: VM definition of the cluster node. """ project = context.env["project"] deployment = context.env["deployment"] sql_cidr = context.properties.get("sql_cidr", utils.DEFAULT_DEPLOYMENT_CIDR) net_name = utils.NetworkName(deployment) sub_name = utils.SubnetName(deployment) volume_size_gb = utils.ConvertVolumeSizeString( context.properties["volume_size_gb"]) if volume_size_gb not in _VALID_VOLUME_SIZES: raise utils.VmInputValidationError( "volume size unsupported. Volume size must be one of {valid_sizes}" ) vm_name = utils.NodeName(deployment, node_num) if len(vm_name) > _MAX_NAME_LEN: raise utils.VmInputValidationError( "Deployment name is too long. Node names are based on deployment name" " and total length must be no longer than {} characters.".format( _MAX_NAME_LEN)) resources = [] disks = [BootDisk(vm_name, "cluster-boot-disk", project, zone, image)] # For each requested disk we need two disk definitions: # 1) A top level disk definition to go into the "resources". This is used # to create the disk. # 2) A description of the disk to go in the instance definition. This is # used to link the VM to a specific disk. for disk_num in xrange(_NUM_PD_SSD_DISKS): resources.append( Disk(project, zone, vm_name, _PD_SSD_DISK, disk_num, _VOLUME_TO_DISK_SIZE[volume_size_gb])) disks.append(VmDisk(vm_name, _PD_SSD_DISK, disk_num)) nic = { default.ACCESS_CONFIGS: [{ default.NAME: "external-nat", default.TYPE: default.ONE_NAT }], default.NETWORK: common.Ref(net_name), default.SUBNETWORK: common.Ref(sub_name), default.NETWORKIP: utils.NodeIp(sql_cidr, node_num) } instance = { default.ZONE: zone, # The service account is necessary for the VM to have access to google's # API. This will be used in scripts that set/get data related to # runtime watchers and configs. default.SERVICE_ACCOUNTS: [{ "email": "default", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/userinfo.email", "https://www.googleapis.com/auth/cloudruntimeconfig" ] }], default.MACHINETYPE: machine_type, default.DISKS: disks, default.NETWORK_INTERFACES: [nic], default.METADATA: BuildClusterInstanceMetadata(context, zone, node_num) } # We want the master node (node 0) to come up first so that it is # guaranteed to be the master. We do this by making all non-master nodes # depend on node 0 deps = [] if node_num == 0 else [utils.NodeName(deployment, 0)] resources.append({ default.NAME: vm_name, default.TYPE: default.INSTANCE, default.METADATA: { "dependsOn": deps }, default.PROPERTIES: instance }) return resources
def GenerateConfig(context): """Generates the network configuration for the gcloud deployment. Args: context: context of the deployment. Returns: List of resources that the deployment manager will create. """ region = context.properties["region"] sql_cidr = context.properties.get("sql_cidr", utils.DEFAULT_DEPLOYMENT_CIDR) deployment = context.env["deployment"] net_name = utils.NetworkName(deployment) sub_name = utils.SubnetName(deployment) is_test = context.properties.get("dev_mode", "false") resources = [ { default.NAME: net_name, default.TYPE: default.NETWORK_TYPE, default.PROPERTIES: { default.AUTO_CREATE_SUBNETWORKS: False, } }, { default.NAME: sub_name, default.TYPE: default.SUBNETWORK_TYPE, default.PROPERTIES: { default.NETWORK: common.Ref(net_name), default.REGION: region, default.IP_CIDR_RANGE: sql_cidr } }, # Allow ICMP for debugging FirewallRule("allow-all-icmp", net_name, "ICMP", deployment, sources=[sql_cidr]), # Allow RDP, SQL, and Load Balancer Health Check from anywhere FirewallRule("allow-rdp-port", net_name, "TCP", deployment, sources=["0.0.0.0/0"], ports="3389"), FirewallRule( "allow-health-check-port", net_name, "TCP", deployment, # The Google ILB health check service IP ranges. sources=["130.211.0.0/22", "35.191.0.0/16"], ports=utils.HEALTH_CHECK_PORT), # Allow ALL TCP and UDP traffic from within the same network. We should # only have cluster and AD nodes on this network so the traffic is # trusted. FirewallRule("allow-all-udp", net_name, "UDP", deployment, sources=[sql_cidr], ports="0-65535"), FirewallRule("allow-all-tcp", net_name, "TCP", deployment, sources=[sql_cidr], ports="0-65535"), ] if is_test: resources.append( FirewallRule("allow-sql-port", net_name, "TCP", deployment, sources=["0.0.0.0/0"], ports=utils.APPLICATION_PORT)) return {"resources": resources}