def get_vcn_stack(network_client, compartment_id, vcn_id): stack = {} vcn = get(network_client, "get_vcn", vcn_id) if not vcn: return stack gateways = { gateway.id: gateway for gateway in list_entities( network_client, "list_internet_gateways", compartment_id, vcn_id=vcn.id, ) } subnets = { subnet.id: subnet for subnet in list_entities( network_client, "list_subnets", compartment_id, vcn_id=vcn.id) } stack = { "id": vcn.id, "vcn": vcn, "internet_gateways": gateways, "subnets": subnets, } return stack
def get_cluster_stack( container_engine_client, compartment_id, cluster_id, node_kwargs=None ): if not node_kwargs: node_kwargs = dict() stack = dict(id=None, cluster=None, node_pools=[]) cluster = get(container_engine_client, "get_cluster", cluster_id) if not cluster: return stack stack["cluster"] = cluster if hasattr(cluster, "id"): stack["id"] = cluster.id # Get node pools (NodePoolSummaries) node_pool_summaries = list_entities( container_engine_client, "list_node_pools", compartment_id, cluster_id=cluster.id, **node_kwargs, ) for summary in node_pool_summaries: node_pool = get(container_engine_client, "get_node_pool", summary.id) if node_pool: stack["node_pools"].append(node_pool) return stack
def get_subnet_by_name(network_client, compartment_id, vcn_id, display_name, **kwargs): subnets = list_entities(network_client, "list_subnets", compartment_id, vcn_id=vcn_id, **kwargs) for subnet in subnets: if subnet.display_name == display_name: return subnet
def get_route_table_by_name(network_client, compartment_id, vcn_id, display_name, **kwargs): route_tables = list_entities(network_client, "list_route_tables", compartment_id, vcn_id=vcn_id, **kwargs) for routetable in route_tables: if routetable.display_name == display_name: return routetable
def delete_compartment_vcns(network_client, compartment_id, **kwargs): removed_vcns = [] vcns = list_entities(network_client, "list_vcns", compartment_id, **kwargs) for vcn in vcns: removed_stack = delete_vcn_stack(network_client, compartment_id, vcn_id=vcn.id) deleted = stack_was_deleted(removed_stack) removed_vcns.append(deleted) return removed_vcns
def list_instances(compute_client, compartment_id, kwargs=None): if not kwargs: kwargs = {} if "lifecycle_state" not in kwargs: kwargs.update(dict(lifecycle_state=Instance.LIFECYCLE_STATE_RUNNING)) return list_entities(compute_client, "list_instances", compartment_id=compartment_id, **kwargs)
def get_internet_gateway_by_name(network_client, compartment_id, vcn_id, display_name, **kwargs): internet_gateways = list_entities( network_client, "list_internet_gateways", compartment_id, vcn_id=vcn_id, **kwargs, ) for internetgateway in internet_gateways: if internetgateway.display_name == display_name: return internetgateway
def list_clusters(container_engine_client, compartment_id, cluster_kwargs=None): if not cluster_kwargs: cluster_kwargs = {} if "lifecycle_state" not in cluster_kwargs: cluster_kwargs.update(dict(lifecycle_state=[Cluster.LIFECYCLE_STATE_ACTIVE])) # ClusterSummaries return list_entities( container_engine_client, "list_clusters", compartment_id=compartment_id, **cluster_kwargs, )
def tearDown(self): # Delete all vcn with display_name vcns = list_entities( self.network_client, "list_vcns", self.options["profile"]["compartment_id"], display_name=self.vcn_options["display_name"], ) for vcn in vcns: deleted_stack = delete_vcn_stack( self.network_client, self.options["profile"]["compartment_id"], vcn=vcn, ) self.assertTrue(stack_was_deleted(deleted_stack))
def get_instance_endpoints(compute_client, network_client, compartment_id, instance_id): vnic_attachments = list_entities(compute_client, "list_vnic_attachments", compartment_id, instance_id=instance_id) instance_endpoints = [] for vnic_attach in vnic_attachments: vnic = get(network_client, "get_vnic", vnic_attach.vnic_id) endpoints = {} if hasattr(vnic, "public_ip") and vnic.public_ip: endpoints["public_ip"] = vnic.public_ip if hasattr(vnic, "private_ip") and vnic.private_ip: endpoints["private_ip"] = vnic.private_ip instance_endpoints.append(endpoints) return instance_endpoints
def setup(self, resource_config=None, credentials=None): # Ensure we have a VCN stack ready vcn_stack = self._get_vcn_stack() if not vcn_stack: vcn_stack = self._new_vcn_stack() if not self._valid_vcn_stack(vcn_stack): vcn_stack = self._ensure_vcn_stack() if not self._valid_vcn_stack(vcn_stack): raise RuntimeError( "A valid VCN stack could not be found: {}".format(vcn_stack) ) self.vcn_stack = vcn_stack # Find the selected subnet in the VCN subnet = get_subnet_in_vcn_stack( self.vcn_stack, subnet_kwargs=self.options["subnet"], optional_value_kwargs=["id", "display_name"], ) if not subnet: # Create new subnet and attach to the vcn_stack create_subnet_details = prepare_details( CreateSubnetDetails, compartment_id=self.options["profile"]["compartment_id"], vcn_id=self.vcn_stack["id"], route_table_id=self.vcn_stack["vcn"].default_route_table_id, **self.options["subnet"], ) subnet = create_subnet( self.network_client, create_subnet_details, self.vcn_stack["id"] ) self.vcn_stack = self._ensure_vcn_stack() if not subnet: raise RuntimeError( "Failed to find a subnet with the name: {} in vcn: {}".format( self.options["subnet"]["display_name"], self.vcn_stack["vcn"].id ) ) # Available images available_images = list_entities( self.compute_client, "list_images", self.options["profile"]["compartment_id"], **self.options["cluster"]["node"]["image"], ) if not available_images: raise ValueError( "No valid image could be found with options: {}".format( self.options["cluster"]["node"]["image"] ) ) if len(available_images) > 1: raise ValueError( "More than 1 image was found with options: {}".format( self.options["cluster"]["node"]["image"] ) ) image = available_images[0] cluster_details = gen_cluster_stack_details( self.vcn_stack["id"], self.vcn_stack["subnets"], image, **self.options, ) cluster = get_cluster_by_name( self.container_engine_client, self.options["profile"]["compartment_id"], self.options["cluster"]["name"], ) if not cluster: self._is_ready = False # Ensure that we don't change the state options cluster_stack = new_cluster_stack( self.container_engine_client, cluster_details["create_cluster"], cluster_details["create_node_pool"], ) if valid_cluster_stack(cluster_stack): self.resource_id, self.cluster_stack = ( cluster_stack["id"], cluster_stack, ) else: raise ValueError( "The new cluster stack: {} is not valid".format(cluster_stack) ) else: cluster_stack = get_cluster_stack( self.container_engine_client, self.options["profile"]["compartment_id"], cluster.id, ) if cluster_stack["cluster"] and not cluster_stack["node_pools"]: cluster_details["create_node_pool"].cluster_id = cluster_stack[ "cluster" ].id node_pool = create_node_pool( self.container_engine_client, cluster_details["create_node_pool"] ) if node_pool: cluster_stack["node_pools"].append(node_pool) if valid_cluster_stack(cluster_stack): self.resource_id, self.cluster_stack = ( cluster_stack["id"], cluster_stack, ) if self.cluster_stack and self.resource_id: self._is_ready = True
def make_resource_config( cls, provider, provider_profile=None, provider_kwargs=None, cpu=None, memory=None, gpus=None, ): if not provider_profile: provider_profile = {} if not provider_kwargs: provider_kwargs = {} availability_domain = "" if "availability_domain" in provider_kwargs: availability_domain = provider_kwargs["availability_domain"] else: # Try load from config availability_domain = load_from_env_or_config( {"instance": { "availability_domain": {} }}, prefix=gen_config_provider_prefix((provider, )), ) # TODO, load OCI environment variables compute_client = new_compute_client(name=provider_profile["name"]) resource_config = {} available_shapes = list_entities( compute_client, "list_shapes", provider_profile["compartment_id"], availability_domain=availability_domain, ) # Override the name that is assigned to the instance if "display_name" in provider_kwargs: resource_config["display_name"] = provider_kwargs["display_name"] # Subset selection if cpu: cpu_shapes = [] for shape in available_shapes: # Either dynamic or fixed ocpu count if (hasattr(shape, "ocpu_options") and shape.ocpu_options and shape.ocpu_options.max >= cpu and shape.ocpu_options.min <= cpu): # Requires shape config shape.ocpus = cpu cpu_shapes.append(shape) else: if shape.ocpus >= cpu: cpu_shapes.append(shape) available_shapes = cpu_shapes if memory: memory_shapes = [] for shape in available_shapes: if (hasattr(shape, "memory_options") and shape.memory_options and shape.memory_options.max_in_g_bs >= memory and shape.memory_options.min_in_g_bs <= memory): # Dynamic memory range # HACK, since you can't atm set the dynamic memory amount # Ensure that we rank the flexible shapes by how # much the total allocated memory is assigned to the instance shape.memory_in_gbs = ( shape.memory_options.default_per_ocpu_in_g_bs * shape.ocpus) memory_shapes.append(shape) else: if shape.memory_in_gbs >= memory: memory_shapes.append(shape) available_shapes = memory_shapes if gpus: gpu_shapes = [] for shape in available_shapes: if hasattr(shape, "gpus") and shape.gpus >= gpus: gpu_shapes.append(shape) available_shapes = gpu_shapes # TODO, Minimum shape available if available_shapes: # sort on cpu and memory minimum_shape = sorted(available_shapes, key=lambda shape: (shape.ocpus, shape.memory_in_gbs))[0] # If a dynamic resource instance -> needs to be a shape_config if (hasattr(minimum_shape, "ocpu_options") and minimum_shape.ocpu_options and hasattr(minimum_shape, "memory_options") and minimum_shape.memory_options): # pass shape values to shapeconfig instance_shape_details = {} attributes = minimum_shape.attribute_map for k, v in attributes.items(): if hasattr(InstanceShapeConfig, k): instance_shape_details[k] = getattr(minimum_shape, k) # shape_config = InstanceShapeConfig(**instance_shape_details) resource_config["shape_config"] = instance_shape_details resource_config["shape"] = minimum_shape.shape return resource_config
def setup(self, resource_config=None, credentials=None): # If shape in resource_config, override general options options = copy.deepcopy(self.options) if not resource_config: resource_config = {} if not credentials: credentials = [] # TODO, check isinstance dict resource_config if "shape" in resource_config: options["instance"]["shape"] = resource_config["shape"] if "shape_config" in resource_config: options["instance"]["shape_config"] = resource_config[ "shape_config"] if "display_name" in resource_config: options["instance"]["display_name"] = resource_config[ "display_name"] if "instance_metadata" not in options: options["instance_metadata"] = {} for credential in credentials: if hasattr(credential, "public_key") and getattr( credential, "public_key"): if "ssh_authorized_keys" not in options["instance_metadata"]: options["instance_metadata"]["ssh_authorized_keys"] = [] options["instance_metadata"]["ssh_authorized_keys"].append( getattr(credential, "public_key")) # Ensure we have a VCN stack ready vcn_stack = self._get_vcn_stack() if not vcn_stack: vcn_stack = self._new_vcn_stack() if not self._valid_vcn_stack(vcn_stack): vcn_stack = self._ensure_vcn_stack() if not self._valid_vcn_stack(vcn_stack): raise RuntimeError( "A valid VCN stack could not be found: {}".format(vcn_stack)) self.vcn_stack = vcn_stack # Find the selected subnet in the VCN subnet = get_subnet_in_vcn_stack( self.vcn_stack, subnet_kwargs=options["subnet"], optional_value_kwargs=["id", "display_name"], ) if not subnet: # Create new subnet and attach to the vcn_stack create_subnet_details = prepare_details( CreateSubnetDetails, compartment_id=options["profile"]["compartment_id"], vcn_id=self.vcn_stack["id"], route_table_id=self.vcn_stack["vcn"].default_route_table_id, **self.options["subnet"], ) subnet = create_subnet(self.network_client, create_subnet_details, self.vcn_stack["id"]) self.vcn_stack = self._ensure_vcn_stack() if not subnet: raise RuntimeError( "Failed to find a subnet with the name: {} in vcn: {}".format( options["subnet"]["display_name"], self.vcn_stack["vcn"].id)) # Available images available_images = list_entities(self.compute_client, "list_images", options["profile"]["compartment_id"]) available_shapes = list_entities( self.compute_client, "list_shapes", options["profile"]["compartment_id"], availability_domain=options["instance"]["availability_domain"], ) instance_details = _gen_instance_stack_details( self.vcn_stack["vcn"].id, subnet.id, available_images, available_shapes, **options, ) instance = None if "display_name" not in options["instance"]: raise RuntimeError( "Missing required unique value to create the resource") instance = get_instance_by_name( self.compute_client, options["profile"]["compartment_id"], options["instance"]["display_name"], kwargs={ "availability_domain": options["instance"]["availability_domain"] }, ) if not instance: self._is_ready = False instance = create_instance(self.compute_client, instance_details["launch_instance"]) if valid_instance(instance): self.resource_id, self.instance = instance.id, instance else: raise ValueError( "The new instance: {} is not valid".format(instance)) else: if valid_instance(instance): self.resource_id, self.instance = instance.id, instance if self.instance and self.resource_id: # Assign unique id to instance self._is_ready = True
def delete_vcn_stack(network_client, compartment_id, vcn_id=None, vcn=None): if not vcn_id and not vcn: raise ValueError("Either vcn_id or vcn must be provided") if vcn_id: vcn = get(network_client, "get_vcn", vcn_id) remove_stack = { "id": False, "vcn": False, "subnets": {}, "route_tables": [], "internet_gateways": {}, "security_lists": [], "dhcp_options": [], "local_peering_gateways": [], "nat_gateways": [], "service_gateways": [], } if vcn: vcn_subnets = list_entities(network_client, "list_subnets", compartment_id, vcn_id=vcn.id) for subnet in vcn_subnets: deleted = delete( network_client, "delete_subnet", subnet.id, wait_for_states=[Subnet.LIFECYCLE_STATE_TERMINATED], retry_strategy=DEFAULT_RETRY_STRATEGY, ) remove_stack["subnets"][subnet.id] = deleted # Delete all the routes (and disable the gateway target # if they are the default which means that they can't be deleted) routes = list_entities( network_client, "list_route_tables", compartment_id, vcn_id=vcn.id, sort_by="TIMECREATED", sort_order="ASC", ) # Disable routes on the default route table if routes: # Disable all routes for route in routes: update_details = UpdateRouteTableDetails(route_rules=[]) update( network_client, "update_route_table", route.id, update_details, wait_for_states=[RouteTable.LIFECYCLE_STATE_AVAILABLE], retry_strategy=DEFAULT_RETRY_STRATEGY, ) # Delete all non default routes if len(routes) > 1: for route in routes[1:]: deleted = delete( network_client, "delete_route_table", route.id, wait_for_states=[ RouteTable.LIFECYCLE_STATE_TERMINATED ], retry_strategy=DEFAULT_RETRY_STRATEGY, ) remove_stack["route_tables"].append(deleted) # Delete all gateways (can't delete the default) gateways = list_entities( network_client, "list_internet_gateways", compartment_id, vcn_id=vcn.id, sort_by="TIMECREATED", sort_order="ASC", ) for gateway in gateways: deleted = delete( network_client, "delete_internet_gateway", gateway.id, wait_for_states=[InternetGateway.LIFECYCLE_STATE_TERMINATED], retry_strategy=DEFAULT_RETRY_STRATEGY, ) remove_stack["internet_gateways"][gateway.id] = deleted # Delete all security lists securities = list_entities( network_client, "list_security_lists", compartment_id, vcn_id=vcn.id, sort_by="TIMECREATED", sort_order="ASC", ) # Can't delete the detault if len(securities) > 1: for security in securities[1:]: deleted = delete( network_client, "delete_security_list", security.id, wait_for_states=[SecurityList.LIFECYCLE_STATE_TERMINATED], retry_strategy=DEFAULT_RETRY_STRATEGY, ) remove_stack["security_lists"].append(deleted) # Delete all DHCP options dhcp_options = list_entities( network_client, "list_dhcp_options", compartment_id, vcn_id=vcn.id, sort_by="TIMECREATED", sort_order="ASC", ) if len(dhcp_options) > 1: for dhcp_option in dhcp_options[1:]: deleted = delete( network_client, "delete_dhcp_options", dhcp_option.id, wait_for_states=[DhcpOptions.LIFECYCLE_STATE_TERMINATED], retry_strategy=DEFAULT_RETRY_STRATEGY, ) remove_stack["dhcp_options"].append(deleted) # Delete local peering gateways local_peering_gateways = list_entities(network_client, "list_local_peering_gateways", compartment_id, vcn_id=vcn.id) for local_peering_gateway in local_peering_gateways: deleted = delete( network_client, "delete_local_peering_gateway", local_peering_gateway.id, wait_for_states=[ LocalPeeringGateway.LIFECYCLE_STATE_TERMINATED ], retry_strategy=DEFAULT_RETRY_STRATEGY, ) remove_stack["local_peering_gateways"].append(deleted) # Delete all NAT gateways nat_gateways = list_entities(network_client, "list_nat_gateways", compartment_id, vcn_id=vcn.id) for gateway in nat_gateways: deleted = delete( network_client, "delete_nat_gateway", gateway.id, wait_for_states=[NatGateway.LIFECYCLE_STATE_TERMINATED], retry_strategy=DEFAULT_RETRY_STRATEGY, ) remove_stack["nat_gateways"].append(deleted) # Delete Service Gateways service_gateways = list_entities(network_client, "list_service_gateways", compartment_id, vcn_id=vcn.id) for service_gateway in service_gateways: deleted = delete( network_client, "delete_service_gateway", service_gateway.id, wait_for_states=[ServiceGateway.LIFECYCLE_STATE_TERMINATED], retry_strategy=DEFAULT_RETRY_STRATEGY, ) remove_stack["service_gateways"].append(deleted) # The delete_vcn defaults internally to succeed_on_not_found # https://github.com/oracle/oci-python-sdk/blob/bafa4f0d68be097568772cd3cda250e60cb61a0c/src/oci/core/virtual_network_client_composite_operations.py#L1758 deleted = delete( network_client, "delete_vcn", vcn.id, wait_for_states=[Vcn.LIFECYCLE_STATE_TERMINATED], retry_strategy=DEFAULT_RETRY_STRATEGY, ) remove_stack["id"] = vcn.id remove_stack["vcn"] = deleted return remove_stack
def get_vcn_by_name(network_client, compartment_id, display_name, **kwargs): vcns = list_entities(network_client, "list_vcns", compartment_id, **kwargs) for vcn in vcns: if vcn.display_name == display_name: return vcn return None