def setUp(self): super().setUp() self.cluster_dict = getExampleClusterDefinition() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.name = self.cluster_object.metadata.name self.namespace = self.cluster_object.metadata.namespace self.stateful_set = V1beta1StatefulSet( metadata=self._createMeta(self.name), spec=V1beta1StatefulSetSpec( replicas=3, service_name=self.name, template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=KubernetesResources. createDefaultLabels(self.name)), spec=V1PodSpec(containers=[ V1Container( name="mongodb", env=[ V1EnvVar(name="POD_IP", value_from=V1EnvVarSource( field_ref=V1ObjectFieldSelector( api_version="v1", field_path="status.podIP"))) ], command=[ "mongod", "--replSet", self.name, "--bind_ip", "0.0.0.0", "--smallfiles", "--noprealloc" ], image="mongo:3.6.4", ports=[ V1ContainerPort(name="mongodb", container_port=27017, protocol="TCP") ], volume_mounts=[ V1VolumeMount(name="mongo-storage", read_only=False, mount_path="/data/db") ], resources=V1ResourceRequirements(limits={ "cpu": "100m", "memory": "64Mi" }, requests={ "cpu": "100m", "memory": "64Mi" })) ])), volume_claim_templates=[ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="mongo-storage"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements( requests={"storage": "30Gi"}))) ], ), )
def get_sidecar_containers( self, system_paasta_config: SystemPaastaConfig) -> List[V1Container]: registrations = " ".join(self.get_registrations()) # s_m_j currently asserts that services are healthy in smartstack before # continuing a bounce. this readiness check lets us achieve the same thing readiness_probe: Optional[V1Probe] if system_paasta_config.get_enable_nerve_readiness_check(): readiness_probe = V1Probe( _exec=V1ExecAction(command=[ system_paasta_config.get_nerve_readiness_check_script(), ] + self.get_registrations(), ), initial_delay_seconds=10, period_seconds=10, ) else: readiness_probe = None hacheck_sidecar = V1Container( image=system_paasta_config.get_hacheck_sidecar_image_url(), lifecycle=V1Lifecycle(pre_stop=V1Handler(_exec=V1ExecAction( command=[ "/bin/sh", "-c", f"/usr/bin/hadown {registrations}; sleep 31", ], ), ), ), name="hacheck", env=self.get_kubernetes_environment(), ports=[ V1ContainerPort(container_port=6666, ), ], readiness_probe=readiness_probe, ) return [hacheck_sidecar]
def enable_exporter_port(cli_arguments, kube_apis, ingress_controller_prerequisites, ingress_controller) -> None: """ Set containerPort for Prometheus Exporter. :param cli_arguments: context :param kube_apis: client apis :param ingress_controller_prerequisites :param ingress_controller: IC name :return: """ namespace = ingress_controller_prerequisites.namespace port = V1ContainerPort(9113, None, None, "prometheus", "TCP") print( "------------------------- Enable 9113 port in IC -----------------------------------" ) body = kube_apis.apps_v1_api.read_namespaced_deployment( ingress_controller, namespace) body.spec.template.spec.containers[0].ports.append(port) if cli_arguments["deployment-type"] == "deployment": kube_apis.apps_v1_api.patch_namespaced_deployment( ingress_controller, namespace, body) else: kube_apis.apps_v1_api.patch_namespaced_daemon_set( ingress_controller, namespace, body) wait_until_all_pods_are_ready(kube_apis.v1, namespace)
def get_kubernetes_containers( self, docker_volumes: Sequence[DockerVolume], system_paasta_config: SystemPaastaConfig, aws_ebs_volumes: Sequence[AwsEbsVolume], service_namespace_config: ServiceNamespaceConfig, ) -> Sequence[V1Container]: service_container = V1Container( image=self.get_docker_url(), command=self.get_cmd(), args=self.get_args(), env=self.get_container_env(), resources=self.get_resource_requirements(), lifecycle=V1Lifecycle(pre_stop=V1Handler(_exec=V1ExecAction( command=[ "/bin/sh", "-c", "sleep 30", ], ), ), ), name=self.get_sanitised_deployment_name(), liveness_probe=self.get_liveness_probe(service_namespace_config), ports=[ V1ContainerPort(container_port=8888, ), ], volume_mounts=self.get_volume_mounts( docker_volumes=docker_volumes, aws_ebs_volumes=aws_ebs_volumes, persistent_volumes=self.get_persistent_volumes(), ), ) containers = [service_container] + self.get_sidecar_containers( system_paasta_config=system_paasta_config) return containers
def generate_delaying_proxy_deployment(concourse_cfg: ConcourseConfig): ensure_not_none(concourse_cfg) external_url = concourse_cfg.external_url() label = {'app': 'delaying-proxy'} return V1Deployment( kind='Deployment', metadata=V1ObjectMeta(name='delaying-proxy'), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector(match_labels=label), template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=label), spec=V1PodSpec(containers=[ V1Container( image= 'eu.gcr.io/gardener-project/cc/github-enterprise-proxy:0.1.0', image_pull_policy='IfNotPresent', name='delaying-proxy', ports=[ V1ContainerPort(container_port=8080), ], liveness_probe=V1Probe( tcp_socket=V1TCPSocketAction(port=8080), initial_delay_seconds=10, period_seconds=10, ), env=[ V1EnvVar(name='CONCOURSE_URL', value=external_url), ], ), ], ))))
def _create_kube_container(self, op_inst, shm_size=None, image="romilb/nomad_client:latest"): name = op_inst.guid + "-container" assert (isinstance(name, str)) env_vars = op_inst.get_envs() assert (isinstance(env_vars, dict)) # env_vars = merge_dicts(op_inst.get_envs(), op_inst.custom_envs) logger.info("Creating container with image %s, envs = %s" % (image, str(env_vars))) container_envs = [V1EnvVar(k, str(v)) for k, v in env_vars.items()] container_ports = [V1ContainerPort(container_port=30000, name="rpc")] security_capabilites = V1Capabilities() security_capabilites.add = ['SYS_PTRACE', 'SYS_ADMIN'] security_context = V1SecurityContext(allow_privilege_escalation=True, privileged=True, capabilities=security_capabilites) volume_mounts = [] if shm_size and isinstance(shm_size, int): volume_mounts.append( client.V1VolumeMount(mount_path='/dev/shm', name='dshm')) if not volume_mounts: volume_mounts = None container = client.V1Container(name=name, image=image, tty=True, env=container_envs, ports=container_ports, volume_mounts=volume_mounts, security_context=security_context, image_pull_policy="Always") return container
def get_reference_object(self) -> V1Deployment: """Get deployment object for outpost""" # Generate V1ContainerPort objects container_ports = [] for port in self.controller.deployment_ports: container_ports.append( V1ContainerPort( container_port=port.port, name=port.name, protocol=port.protocol.upper(), )) meta = self.get_object_meta(name=self.name) secret_name = f"authentik-outpost-{self.controller.outpost.uuid.hex}-api" image_prefix = CONFIG.y("outposts.docker_image_base") return V1Deployment( metadata=meta, spec=V1DeploymentSpec( replicas=self.outpost.config.kubernetes_replicas, selector=V1LabelSelector(match_labels=self.get_pod_meta()), template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=self.get_pod_meta()), spec=V1PodSpec(containers=[ V1Container( name=str(self.outpost.type), image= f"{image_prefix}-{self.outpost.type}:{__version__}", ports=container_ports, env=[ V1EnvVar( name="AUTHENTIK_HOST", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="authentik_host", )), ), V1EnvVar( name="AUTHENTIK_TOKEN", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="token", )), ), V1EnvVar( name="AUTHENTIK_INSECURE", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="authentik_host_insecure", )), ), ], ) ]), ), ), )
def test_batcher_custom_port(): service_name = 'isvc-sklearn-batcher-custom' predictor = V1beta1PredictorSpec( batcher=V1beta1Batcher( max_batch_size=32, max_latency=5000, ), min_replicas=1, sklearn=V1beta1SKLearnSpec( args=["--http_port=5000"], storage_uri="gs://kfserving-examples/models/sklearn/1.0/model", resources=V1ResourceRequirements( requests={ "cpu": "100m", "memory": "256Mi" }, limits={ "cpu": "100m", "memory": "256Mi" }, ), ports=[V1ContainerPort(container_port=5000, protocol='TCP')]), ) isvc = V1beta1InferenceService( api_version=constants.KSERVE_V1BETA1, kind=constants.KSERVE_KIND, metadata=client.V1ObjectMeta(name=service_name, namespace=KSERVE_TEST_NAMESPACE), spec=V1beta1InferenceServiceSpec(predictor=predictor), ) kserve_client.create(isvc) try: kserve_client.wait_isvc_ready(service_name, namespace=KSERVE_TEST_NAMESPACE) except RuntimeError as e: print( kserve_client.api_instance.get_namespaced_custom_object( "serving.knative.dev", "v1", KSERVE_TEST_NAMESPACE, "services", service_name + "-predictor-default")) pods = kserve_client.core_api.list_namespaced_pod( KSERVE_TEST_NAMESPACE, label_selector='serving.kserve.io/inferenceservice={}'.format( service_name)) for pod in pods.items: print(pod) raise e with futures.ThreadPoolExecutor(max_workers=4) as executor: future_res = [ executor.submit( lambda: predict_str(service_name, json.dumps(item))) for item in json_array ] results = [f.result()["batchId"] for f in future_res] assert (all(x == results[0] for x in results)) kserve_client.delete(service_name, KSERVE_TEST_NAMESPACE)
def enable_prometheus_port( cli_arguments, kube_apis, ingress_controller_prerequisites, crd_ingress_controller_with_ap ) -> None: namespace = ingress_controller_prerequisites.namespace port = V1ContainerPort(9113, None, None, "prometheus", "TCP") print("------------------------- Enable 9113 port in IC ----------------------------") body = kube_apis.apps_v1_api.read_namespaced_deployment("nginx-ingress", namespace) body.spec.template.spec.containers[0].ports.append(port) kube_apis.apps_v1_api.patch_namespaced_deployment("nginx-ingress", namespace, body) wait_until_all_pods_are_ready(kube_apis.v1, namespace)
def __create_app_deployment(self, labels): container_port = V1ContainerPort(container_port=self.container_port) config_map_ref = V1ConfigMapEnvSource(name=INFRA_DB_CONFIG) container = V1Container(name=self.container_name, image=self.image_name, image_pull_policy='IfNotPresent', ports=[container_port], env_from=[V1EnvFromSource(config_map_ref=config_map_ref)]) pod_spec = V1PodSpec(containers=[container]) pod_temp_spec = V1PodTemplateSpec(metadata=V1ObjectMeta(name=self.container_name, labels=labels), spec=pod_spec) deployment_spec = V1DeploymentSpec(replicas=1, selector=V1LabelSelector(match_labels=labels), template=pod_temp_spec) deployment = V1Deployment(metadata=V1ObjectMeta(name=self.container_name), spec=deployment_spec) self.appsApi.create_namespaced_deployment(namespace=TODO_APP_NAMESPACE, body=deployment)
def ensure_whoami(api_apps_v1, api_core_v1, api_custom, domain): name = 'whoami' port_name = 'web' ensure_single_container_deployment( api_apps_v1, V1Container( name=name, image='containous/whoami', ports=[V1ContainerPort(name=port_name, container_port=8000)]), name, 'default') ensure_ingress_routed_svc(api_core_v1, api_custom, domain, name, name, name, 'default', port_name, 80, 8000)
def test_torchserve_grpc(): service_name = "mnist-grpc" predictor = V1beta1PredictorSpec( min_replicas=1, pytorch=V1beta1TorchServeSpec( storage_uri= "gs://kfserving-examples/models/torchserve/image_classifier/v1", ports=[ V1ContainerPort(container_port=7070, name="h2c", protocol="TCP") ], resources=V1ResourceRequirements( requests={ "cpu": "100m", "memory": "1Gi" }, limits={ "cpu": "1", "memory": "1Gi" }, ), ), ) isvc = V1beta1InferenceService( api_version=constants.KSERVE_V1BETA1, kind=constants.KSERVE_KIND, metadata=client.V1ObjectMeta(name=service_name, namespace=KSERVE_TEST_NAMESPACE), spec=V1beta1InferenceServiceSpec(predictor=predictor), ) kserve_client = KServeClient( config_file=os.environ.get("KUBECONFIG", "~/.kube/config")) kserve_client.create(isvc) kserve_client.wait_isvc_ready(service_name, namespace=KSERVE_TEST_NAMESPACE) with open("./data/torchserve_input.json", 'rb') as f: data = f.read() input_data = {'data': data} stub = grpc_stub(service_name, KSERVE_TEST_NAMESPACE) response = stub.Predictions( inference_pb2.PredictionsRequest(model_name='mnist', input=input_data)) prediction = response.prediction.decode('utf-8') json_output = json.loads(prediction) print(json_output) assert (json_output["predictions"][0][0] == 2) kserve_client.delete(service_name, KSERVE_TEST_NAMESPACE)
def __init__(self) -> None: metadata = V1ObjectMeta(name="postgres", labels={"app": "postgres"}) label_selector = V1LabelSelector(match_labels={"app": "postgres"}) env = [V1EnvVar(name="POSTGRES_HOST_AUTH_METHOD", value="trust")] ports = [V1ContainerPort(container_port=5432, name="sql")] volume_mounts = [ V1VolumeMount(name="data", mount_path="/data"), V1VolumeMount( name="postgres-init", mount_path="/docker-entrypoint-initdb.d" ), ] volume_config = V1ConfigMapVolumeSource( name="postgres-init", ) volumes = [V1Volume(name="postgres-init", config_map=volume_config)] container = V1Container( name="postgres", image="postgres:14.3", env=env, ports=ports, volume_mounts=volume_mounts, ) pod_spec = V1PodSpec(containers=[container], volumes=volumes) template_spec = V1PodTemplateSpec(metadata=metadata, spec=pod_spec) claim_templates = [ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="data"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements(requests={"storage": "1Gi"}), ), ) ] self.stateful_set = V1StatefulSet( api_version="apps/v1", kind="StatefulSet", metadata=metadata, spec=V1StatefulSetSpec( service_name="postgres", replicas=1, selector=label_selector, template=template_spec, volume_claim_templates=claim_templates, ), )
def init_pod(namespace, pod_name, image, image_name, ports, command=None): pod = client.V1Pod() pod.metadata = client.V1ObjectMeta(name=pod_name, namespace=namespace) ports = [ V1ContainerPort(host_port=int(port.split(':')[0]), container_port=int(port.split(':')[1]), protocol='TCP') for port in ports ] container = client.V1Container(name=image_name, image=image, ports=ports) print(f'正在创建pod,主机名为{pod_name}') spec = client.V1PodSpec(containers=[container], hostname=pod_name) pod.spec = spec return pod
def _createStatefulSet(self) -> V1beta1StatefulSet: return V1beta1StatefulSet( metadata=self._createMeta(self.name), spec=V1beta1StatefulSetSpec( replicas=3, service_name=self.name, template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=KubernetesResources. createDefaultLabels(self.name)), spec=V1PodSpec(containers=[ V1Container( name="mongodb", env=[ V1EnvVar(name="POD_IP", value_from=V1EnvVarSource( field_ref=V1ObjectFieldSelector( api_version="v1", field_path="status.podIP"))) ], command=[ "mongod", "--wiredTigerCacheSizeGB", "0.25", "--replSet", self.name, "--bind_ip", "0.0.0.0", "--smallfiles", "--noprealloc" ], image="mongo:3.6.4", ports=[ V1ContainerPort(name="mongodb", container_port=27017, protocol="TCP") ], volume_mounts=[ V1VolumeMount(name="mongo-storage", read_only=False, mount_path="/data/db") ], resources=self._createResourceLimits()) ])), volume_claim_templates=[ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="mongo-storage"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements( requests={"storage": "30Gi"}))) ], ), )
def get_kubernetes_containers( self, volumes: Sequence[DockerVolume], system_paasta_config: SystemPaastaConfig, ) -> Sequence[V1Container]: service_container = V1Container( image=self.get_docker_url(), command=self.get_cmd(), args=self.get_args(), env=self.get_container_env(), lifecycle=V1Lifecycle( pre_stop=V1Handler( _exec=V1ExecAction( command=[ "/bin/sh", "-c", "sleep 30", ], ), ), ), name="{service}-{instance}".format( service=self.get_sanitised_service_name(), instance=self.get_sanitised_instance_name(), ), liveness_probe=V1Probe( failure_threshold=10, http_get=V1HTTPGetAction( path="/status", port=8888, ), initial_delay_seconds=15, period_seconds=10, timeout_seconds=5, ), ports=[ V1ContainerPort( container_port=8888, ), ], volume_mounts=self.get_volume_mounts(volumes=volumes), ) containers = [service_container] + self.get_sidecar_containers(system_paasta_config=system_paasta_config) return containers
def send_create_pod_request(self, namespace, name, image, args, ports={}, requests={}, limits={}, probe="", probe_idelay=3, probe_period=3, node_selector=None, node_name=None, labels=None): metadata = V1ObjectMeta(name=name, namespace=namespace, labels=labels) ports = [ V1ContainerPort(container_port=p, name=n) for p, n in ports.items() ] probe_object = None if probe: probe_action = V1ExecAction(re.split(r" +", probe)) probe_object = V1Probe(probe_action, initial_delay_seconds=probe_idelay, period_seconds=probe_period) container = V1Container(args=args.split(), image=image, name=name, ports=ports, resources=V1ResourceRequirements( requests=requests, limits=limits), liveness_probe=probe_object) spec = V1PodSpec(containers=[container], node_selector=node_selector, node_name=node_name, restart_policy="Never") # {"kubernetes.io/hostname": "10.19.137.148"}) pod = V1Pod(spec=spec, metadata=metadata) return self.apiV1.create_namespaced_pod(namespace, body=pod)
def create_ports(ports: dict) -> List[V1ContainerPort]: return [ V1ContainerPort(**_parse_port(port, host_port)) for port, host_port in ports.items() ]
def ensure_traefik(api_core_v1, api_ext_v1_beta1, api_apps_v1, api_custom, api_rbac_auth_v1_b1, admin_email, domain, static_ip, oauth_client_id, oauth_client_secret, oauth_domain, oauth_secret): ensure_crd(api=api_ext_v1_beta1, name='ingressroutes.traefik.containo.us', group='traefik.containo.us', kind='IngressRoute', plural='ingressroutes', singular='ingressroute', scope='Namespaced') ensure_crd(api=api_ext_v1_beta1, name='ingressroutetcps.traefik.containo.us', group='traefik.containo.us', kind='IngressRouteTCP', plural='ingressroutetcps', singular='ingressroutetcp', scope='Namespaced') ensure_crd(api=api_ext_v1_beta1, name='middlewares.traefik.containo.us', group='traefik.containo.us', kind='Middleware', plural='middlewares', singular='middleware', scope='Namespaced') ensure_crd(api=api_ext_v1_beta1, name='tlsoptions.traefik.containo.us', group='traefik.containo.us', kind='TLSOption', plural='tlsoptions', singular='tlsoption', scope='Namespaced') ensure_role(api=api_rbac_auth_v1_b1, role=V1ClusterRole( api_version='rbac.authorization.k8s.io/v1beta1', kind='ClusterRole', metadata=V1ObjectMeta(name='traefik-ingress-controller'), rules=[ V1PolicyRule( api_groups=[''], resources=['services', 'endpoints', 'secrets'], verbs=['get', 'list', 'watch']), V1PolicyRule(api_groups=['extensions'], resources=['ingresses'], verbs=['get', 'list', 'watch']), V1PolicyRule(api_groups=['extensions'], resources=['ingresses/status'], verbs=['update']), V1PolicyRule(api_groups=['traefik.containo.us'], resources=['middlewares'], verbs=['get', 'list', 'watch']), V1PolicyRule(api_groups=['traefik.containo.us'], resources=['ingressroutes'], verbs=['get', 'list', 'watch']), V1PolicyRule(api_groups=['traefik.containo.us'], resources=['ingressroutetcps'], verbs=['get', 'list', 'watch']), V1PolicyRule(api_groups=['traefik.containo.us'], resources=['tlsoptions'], verbs=['get', 'list', 'watch']) ]), name='traefik-ingress-controller') ensure_role_binding( api=api_rbac_auth_v1_b1, role_binding=V1ClusterRoleBinding( api_version='rbac.authorization.k8s.io/v1beta1', kind='ClusterRoleBinding', metadata=V1ObjectMeta(name='traefik-ingress-controller'), role_ref=V1RoleRef(api_group='rbac.authorization.k8s.io', kind='ClusterRole', name='traefik-ingress-controller'), subjects=[ V1Subject(kind='ServiceAccount', name='traefik-ingress-controller', namespace='default') ]), name='traefik-ingress-controller') ensure_service( api=api_core_v1, service=V1Service( api_version="v1", metadata=V1ObjectMeta(name='traefik'), spec=V1ServiceSpec( type='LoadBalancer', load_balancer_ip=static_ip, ports=[ # V1ServicePort( # protocol='TCP', # port=80, # name='web' # ), V1ServicePort(protocol='TCP', port=443, name='websecure'), ], selector={'app': 'traefik'})), name='traefik', namespace='default') ensure_service_account( api=api_core_v1, account=V1ServiceAccount( api_version="v1", metadata=V1ObjectMeta(name='traefik-ingress-controller'), ), name='traefik-ingress-controller', namespace='default') ensure_deployment( api=api_apps_v1, deployment=V1Deployment( api_version="apps/v1", metadata=V1ObjectMeta(name='traefik', labels={'app': 'traefik'}), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector(match_labels={'app': 'traefik'}), template=V1PodTemplateSpec( metadata=V1ObjectMeta(name='traefik', labels={'app': 'traefik'}), spec=V1PodSpec( service_account_name='traefik-ingress-controller', containers=[ V1Container( name='traefik', image='traefik:v2.0', args=[ '--api.insecure', '--accesslog', '--entrypoints.web.Address=:80', '--entrypoints.websecure.Address=:443', '--providers.kubernetescrd', '--certificatesresolvers.default.acme.tlschallenge', f'--certificatesresolvers.default.acme.email={admin_email}', '--certificatesresolvers.default.acme.storage=acme.json', # '--certificatesresolvers.default.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory', ], ports=[ V1ContainerPort(name='web', container_port=8000), V1ContainerPort(name='websecure', container_port=4443), V1ContainerPort(name='admin', container_port=8080), ]) ])))), name='traefik', namespace='default') ensure_deployment( api=api_apps_v1, deployment=V1Deployment( api_version="apps/v1", metadata=V1ObjectMeta(name='traefik-forward-auth', labels={'app': 'traefik-forward-auth'}), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector( match_labels={'app': 'traefik-forward-auth'}), template=V1PodTemplateSpec( metadata=V1ObjectMeta( name='traefik-forward-auth', labels={'app': 'traefik-forward-auth'}), spec=V1PodSpec(containers=[ V1Container( name='traefik-forward-auth', image='thomseddon/traefik-forward-auth:2', ports=[ V1ContainerPort(name='auth', container_port=4181), ], env=[ V1EnvVar(name='PROVIDERS_GOOGLE_CLIENT_ID', value=oauth_client_id), # V1EnvVar(name='LOG_LEVEL', value='trace'), V1EnvVar(name='PROVIDERS_GOOGLE_CLIENT_SECRET', value=oauth_client_secret), V1EnvVar(name='SECRET', value=oauth_secret), V1EnvVar(name='DOMAIN', value=oauth_domain), V1EnvVar(name='COOKIE_DOMAIN', value=domain), V1EnvVar(name='AUTH_HOST', value=f'auth.{domain}'), ]) ])))), name='traefik-forward-auth', namespace='default') ensure_custom_object(api=api_custom, custom_object={ 'apiVersion': 'traefik.containo.us/v1alpha1', 'kind': 'IngressRoute', 'metadata': { 'name': 'traefik-forward-auth', }, 'spec': { 'entryPoints': ['websecure'], 'routes': [{ 'match': f'Host(`auth.{domain}`)', 'kind': 'Rule', 'services': [{ 'name': 'traefik-forward-auth', 'port': 4181 }], 'middlewares': [{ 'name': 'traefik-forward-auth' }] }], 'tls': { 'certResolver': 'default' } } }, group='traefik.containo.us', plural='ingressroutes', version='v1alpha1', name='traefik-forward-auth', namespace='default') ensure_custom_object(api=api_custom, custom_object={ 'apiVersion': 'traefik.containo.us/v1alpha1', 'kind': 'Middleware', 'metadata': { 'name': 'traefik-forward-auth', }, 'spec': { 'forwardAuth': { 'address': 'http://traefik-forward-auth:4181', 'authResponseHeaders': ['X-Forwarded-User'], } } }, group='traefik.containo.us', plural='middlewares', version='v1alpha1', name='traefik-forward-auth', namespace='default') ensure_service(api=api_core_v1, service=V1Service( api_version="v1", metadata=V1ObjectMeta(name='traefik-forward-auth'), spec=V1ServiceSpec( type='ClusterIP', ports=[ V1ServicePort(protocol='TCP', port=4181, name='auth'), ], selector={'app': 'traefik-forward-auth'})), name='traefik-forward-auth', namespace='default') ensure_whoami(api_apps_v1, api_core_v1, api_custom, domain)
def test_get_kubernetes_containers(self): with mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_docker_url', autospec=True, ) as mock_get_docker_url, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_cmd', autospec=True, ) as mock_get_cmd, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_args', autospec=True, ) as mock_get_args, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_container_env', autospec=True, ) as mock_get_container_env, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_service_name', autospec=True, return_value='kurupt', ), mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_instance_name', autospec=True, return_value='fm', ), mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_volume_mounts', autospec=True, ) as mock_get_volume_mounts, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sidecar_containers', autospec=True, return_value=['mock_sidecar'], ): mock_system_config = mock.Mock() mock_docker_volumes: Sequence[DockerVolume] = [] mock_aws_ebs_volumes: Sequence[AwsEbsVolume] = [] expected = [ V1Container( args=mock_get_args.return_value, command=mock_get_cmd.return_value, env=mock_get_container_env.return_value, image=mock_get_docker_url.return_value, lifecycle=V1Lifecycle(pre_stop=V1Handler( _exec=V1ExecAction(command=[ '/bin/sh', '-c', 'sleep 30', ], ), ), ), liveness_probe=V1Probe( failure_threshold=10, http_get=V1HTTPGetAction( path='/status', port=8888, ), initial_delay_seconds=15, period_seconds=10, timeout_seconds=5, ), name='kurupt-fm', ports=[V1ContainerPort(container_port=8888)], volume_mounts=mock_get_volume_mounts.return_value, ), 'mock_sidecar', ] assert self.deployment.get_kubernetes_containers( docker_volumes=mock_docker_volumes, system_paasta_config=mock_system_config, aws_ebs_volumes=mock_aws_ebs_volumes, ) == expected
def test_get_sidecar_containers(self): with mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_registrations', autospec=True, return_value=['universal.credit'], ), mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_kubernetes_environment', autospec=True, return_value={}, ), mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_volume_name', autospec=True, return_value='sane-name', ): mock_system_config = mock.Mock( get_enable_nerve_readiness_check=mock.Mock(return_value=False), get_nerve_readiness_check_script=mock.Mock( return_value='/nail/blah.sh'), get_hacheck_sidecar_image_url=mock.Mock( return_value='some-docker-image'), ) ret = self.deployment.get_sidecar_containers(mock_system_config) expected = [ V1Container( env={}, image='some-docker-image', lifecycle=V1Lifecycle(pre_stop=V1Handler( _exec=V1ExecAction(command=[ '/bin/sh', '-c', '/usr/bin/hadown ' 'universal.credit; sleep ' '31', ], ), ), ), name='hacheck', ports=[V1ContainerPort(container_port=6666)], ), ] assert ret == expected mock_system_config = mock.Mock( get_enable_nerve_readiness_check=mock.Mock(return_value=True), get_nerve_readiness_check_script=mock.Mock( return_value='/nail/blah.sh'), get_hacheck_sidecar_image_url=mock.Mock( return_value='some-docker-image'), ) ret = self.deployment.get_sidecar_containers(mock_system_config) expected = [ V1Container( env={}, image='some-docker-image', lifecycle=V1Lifecycle(pre_stop=V1Handler( _exec=V1ExecAction(command=[ '/bin/sh', '-c', '/usr/bin/hadown ' 'universal.credit; sleep ' '31', ], ), ), ), name='hacheck', ports=[V1ContainerPort(container_port=6666)], readiness_probe=V1Probe( _exec=V1ExecAction( command=['/nail/blah.sh', 'universal.credit'], ), initial_delay_seconds=10, period_seconds=10, ), ), ] assert ret == expected
def test_triton_runtime(): service_name = 'isvc-triton-runtime' predictor = V1beta1PredictorSpec( min_replicas=1, model=V1beta1ModelSpec( model_format=V1beta1ModelFormat(name="pytorch", ), runtime="kserve-tritonserver", storage_uri='gs://kfserving-examples/models/torchscript', ports=[ V1ContainerPort(name="h2c", protocol="TCP", container_port=9000) ])) transformer = V1beta1TransformerSpec( min_replicas=1, containers=[ V1Container( image= '809251082950.dkr.ecr.us-west-2.amazonaws.com/kserve/image-transformer:' + os.environ.get("PULL_BASE_SHA"), name='kserve-container', resources=V1ResourceRequirements(requests={ 'cpu': '100m', 'memory': '1Gi' }, limits={ 'cpu': '100m', 'memory': '1Gi' }), args=["--model_name", "cifar10", "--protocol", "grpc-v2"]) ]) isvc = V1beta1InferenceService( api_version=constants.KSERVE_V1BETA1, kind=constants.KSERVE_KIND, metadata=client.V1ObjectMeta(name=service_name, namespace=KSERVE_TEST_NAMESPACE), spec=V1beta1InferenceServiceSpec(predictor=predictor, transformer=transformer)) kserve_client = KServeClient( config_file=os.environ.get("KUBECONFIG", "~/.kube/config")) kserve_client.create(isvc) try: kserve_client.wait_isvc_ready(service_name, namespace=KSERVE_TEST_NAMESPACE) except RuntimeError as e: print( kserve_client.api_instance.get_namespaced_custom_object( "serving.knative.dev", "v1", KSERVE_TEST_NAMESPACE, "services", service_name + "-predictor-default")) deployments = kserve_client.app_api. \ list_namespaced_deployment(KSERVE_TEST_NAMESPACE, label_selector='serving.kserve.io/' 'inferenceservice={}'. format(service_name)) for deployment in deployments.items: print(deployment) raise e res = predict(service_name, "./data/image.json", model_name='cifar10') assert (np.argmax(res.get("predictions")[0]) == 5) kserve_client.delete(service_name, KSERVE_TEST_NAMESPACE)
def generate_secrets_server_deployment( secrets_server_config: SecretsServerConfig, ): service_name = secrets_server_config.service_name() secret_name = secrets_server_config.secrets().concourse_secret_name() # We need to ensure that the labels and selectors match for both the deployment and the service, # therefore we base them on the configured service name. labels = {'app': service_name} return V1Deployment( kind='Deployment', metadata=V1ObjectMeta(name=service_name, labels=labels), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector(match_labels=labels), template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=labels), spec=V1PodSpec(containers=[ V1Container( image='eu.gcr.io/gardener-project/cc/job-image:latest', image_pull_policy='IfNotPresent', name='secrets-server', resources=V1ResourceRequirements( requests={ 'cpu': '50m', 'memory': '50Mi' }, limits={ 'cpu': '50m', 'memory': '50Mi' }, ), command=['bash'], args=[ '-c', ''' # chdir to secrets dir; create if absent mkdir -p /secrets && cd /secrets # make Kubernetes serviceaccount secrets available by default cp -r /var/run/secrets/kubernetes.io/serviceaccount serviceaccount # store Kubernetes service endpoint env as file for consumer env | grep KUBERNETES_SERVICE > serviceaccount/env # launch secrets server serving secrets dir contents on all IFs python3 -m http.server 8080 ''' ], ports=[ V1ContainerPort(container_port=8080), ], liveness_probe=V1Probe( tcp_socket=V1TCPSocketAction(port=8080), initial_delay_seconds=10, period_seconds=10, ), volume_mounts=[ V1VolumeMount( name=secret_name, mount_path='/secrets/concourse-secrets', read_only=True, ), ], ), ], node_selector={ "worker.garden.sapcloud.io/group": "cc-control" }, volumes=[ V1Volume(name=secret_name, secret=V1SecretVolumeSource( secret_name=secret_name, )) ]))))
def test_sdk_e2e(): worker_container = V1Container( name="mxnet", image="docker.io/johnugeorge/mxnet:1.9.1_cpu_py3", command=["/usr/local/bin/python3"], args=["incubator-mxnet/example/image-classification/train_mnist.py", "--num-epochs", "5", "--num-examples","1000", "--kv-store", "dist_sync"], ports=[V1ContainerPort(container_port=9991, name="mxjob-port")] ) server_container = V1Container( name="mxnet", image="docker.io/johnugeorge/mxnet:1.9.1_cpu_py3", ports=[V1ContainerPort(container_port=9991, name="mxjob-port")] ) scheduler_container = V1Container( name="mxnet", image="docker.io/johnugeorge/mxnet:1.9.1_cpu_py3", ports=[V1ContainerPort(container_port=9991, name="mxjob-port")] ) worker = V1ReplicaSpec( replicas=1, restart_policy="Never", template=V1PodTemplateSpec( spec=V1PodSpec( containers=[worker_container] ) ) ) server = V1ReplicaSpec( replicas=1, restart_policy="Never", template=V1PodTemplateSpec( spec=V1PodSpec( containers=[server_container] ) ) ) scheduler = V1ReplicaSpec( replicas=1, restart_policy="Never", template=V1PodTemplateSpec( spec=V1PodSpec( containers=[scheduler_container] ) ) ) mxjob = KubeflowOrgV1MXJob( api_version="kubeflow.org/v1", kind="MXJob", metadata=V1ObjectMeta(name="mxjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE), spec=KubeflowOrgV1MXJobSpec( job_mode="MXTrain", run_policy=V1RunPolicy( clean_pod_policy="None", ), mx_replica_specs={"Scheduler": scheduler, "Server": server, "Worker": worker} ) ) MX_CLIENT.create(mxjob) MX_CLIENT.wait_for_job("mxjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE) if not MX_CLIENT.is_job_succeeded("mxjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE): raise RuntimeError("The MXJob is not succeeded.") MX_CLIENT.get_logs("mxjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE, master=False) MX_CLIENT.delete("mxjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE)
def _make_pod( name: Optional[str] = None, generate_name: Optional[str] = None, cmd: Optional[List[str]] = None, port: Optional[int] = None, image: Optional[str] = None, run_as_uid: Optional[int] = None, run_as_gid: Optional[int] = None, run_privileged: bool = False, allow_privilege_escalation: bool = True, env: Optional[Dict[str, str]] = None, labels: Optional[Dict[str, str]] = None, service_account: Optional[str] = None, priority_class_name: Optional[str] = None, ) -> V1Pod: pod = V1Pod() pod.kind = "Pod" pod.api_version = "v1" pod.metadata = V1ObjectMeta(name=name, generate_name=generate_name, labels=(labels or {}).copy()) pod.spec = V1PodSpec(containers=[]) pod.spec.restart_policy = "OnFailure" if priority_class_name: pod.spec.priority_class_name = priority_class_name pod_security_context = V1PodSecurityContext() # Only clutter pod spec with actual content if not all([e is None for e in pod_security_context.to_dict().values()]): pod.spec.security_context = pod_security_context container_security_context = V1SecurityContext() if run_as_uid is not None: container_security_context.run_as_user = int(run_as_uid) if run_as_gid is not None: container_security_context.run_as_group = int(run_as_gid) if run_privileged: container_security_context.privileged = True if not allow_privilege_escalation: container_security_context.allow_privilege_escalation = False # Only clutter container spec with actual content if all([e is None for e in container_security_context.to_dict().values()]): container_security_context = None prepared_env = [] for k, v in (env or {}).items(): prepared_env.append(V1EnvVar(name=k, value=v)) notebook_container = V1Container( name="orbit-runner", image=image, ports=[V1ContainerPort(name="notebook-port", container_port=port)], env=prepared_env, args=cmd, resources=V1ResourceRequirements(), security_context=container_security_context, ) if service_account is None: # This makes sure that we don't accidentally give access to the whole # kubernetes API to the users in the spawned pods. pod.spec.automount_service_account_token = False else: pod.spec.service_account_name = service_account notebook_container.resources.requests = {} notebook_container.resources.limits = {} pod.spec.containers.append(notebook_container) pod.spec.volumes = [] if priority_class_name: pod.spec.priority_class_name = priority_class_name return pod
def __init__(self, name, container_port, port, protocol="TCP"): self.service_port = V1ServicePort(name=name, port=port, protocol=protocol) self.pod_port = V1ContainerPort(name=name, container_port=container_port)
def manage_deployment(self, logger): serialized_vars = json.dumps(self.vars, sort_keys=True, separators=(',', ':')) create_deployment = False try: deployment = apps_v1_api.read_namespaced_deployment( self.deployment_name, self.deployment_namespace) update_required = False for envvar in deployment.spec.template.spec.containers[0].env: if envvar.name == 'AUTH_USERNAME': if envvar.value != self.auth_username: envvar.value = self.auth_username updated_required = True elif envvar.name == 'AUTH_PASSWORD': if envvar.value != self.auth_password: envvar.value = self.auth_password updated_required = True elif envvar.name == 'WORKSHOP_VARS': if envvar.value != serialized_vars: envvar.value = serialized_vars updated_required = True if update_required: apps_v1_api.replace_namespaced_deployment( self.deployment_name, self.deployment_namespace, deployment) except ApiException as e: if e.status == 404: create_deployment = True else: raise if create_deployment: logger.info( f"Creating Deployment {self.deployment_name} in {self.deployment_namespace}" ) deployment = V1Deployment( metadata=V1ObjectMeta( annotations={ owner_annotation: self.make_owner_annotation(), }, labels={owner_uid_label: self.uid}, name=self.deployment_name, ), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector( match_labels={"name": self.deployment_name}), strategy=V1DeploymentStrategy(type="Recreate"), template=V1PodTemplateSpec( metadata=V1ObjectMeta( labels={"name": self.deployment_name}), spec=V1PodSpec( containers=[ V1Container( name="bookbag", env=[ V1EnvVar( name="APPLICATION_NAME", value=self.deployment_name, ), V1EnvVar( name="AUTH_USERNAME", value=self.auth_username, ), V1EnvVar( name="AUTH_PASSWORD", value=self.auth_password, ), V1EnvVar(name="CLUSTER_SUBDOMAIN", ), V1EnvVar( name="OAUTH_SERVICE_ACCOUNT", value=self.deployment_name, ), V1EnvVar( name="WORKSHOP_VARS", value=serialized_vars, ), V1EnvVar(name="DOWNLOAD_URL", ), V1EnvVar(name="WORKSHOP_FILE", ), V1EnvVar(name="OC_VERSION", ), V1EnvVar(name="ODO_VERSION", ), V1EnvVar(name="KUBECTL_VERSION", ), ], image=self.get_image(), image_pull_policy="Always", ports=[ V1ContainerPort(container_port=10080) ], ) ], service_account_name=self.deployment_name, )), ), ) if self.image_stream_name: deployment.metadata.annotations[ 'image.openshift.io/triggers'] = json.dumps([{ "fieldPath": 'spec.template.spec.containers[?(@.name=="bookbag")].image', "from": { "kind": "ImageStreamTag", "name": f"{self.image_stream_name}:latest", "namespace": self.image_stream_namespace, }, }]) deployment = apps_v1_api.create_namespaced_deployment( self.deployment_namespace, deployment) return deployment
def create_nifi_instances(api_apps_v1, api_core_v1, api_custom, domain): for instance in NifiInstance.objects.filter(state='PENDING_CREATE'): instance.state = 'CREATING' instance.save() port_name = 'web' instance.state = 'CREATE_FAILED' try: namespace = 'default' if instance.namespace is not None and instance.namespace != 'default': namespace = instance.namespace ensure_namespace(api_core_v1, namespace) else: instance.namespace = 'default' # deploy nifi nifi_volume_paths = [ ('db-repo', '/opt/nifi/nifi-current/database_repository', '20Gi', 'standard'), ('flowfile-repo', '/opt/nifi/nifi-current/flowfile_repository', '20Gi', 'standard'), ('provenance-repo', '/opt/nifi/nifi-current/provenance_repository', '20Gi', 'standard'), ('content-repo', '/opt/nifi/nifi-current/content_repository', '20Gi', 'standard'), ] ensure_statefulset_with_containers( api_apps_v1=api_apps_v1, name=instance.hostname, namespace=namespace, replicas=1, containers=[ V1Container(name='nifi', image=instance.image, env=[ V1EnvVar(name='NIFI_WEB_HTTP_HOST', value='0.0.0.0') ], ports=[V1ContainerPort(container_port=8080)], volume_mounts=[ V1VolumeMount(name=path[0], mount_path=path[1]) for path in nifi_volume_paths ]) ], init_containers=[ V1Container(name='init-permissions', image='busybox', command=[ 'sh', '-c', 'chown -R 1000:1000 /opt/nifi/nifi-current' ], volume_mounts=[ V1VolumeMount(name=path[0], mount_path=path[1]) for path in nifi_volume_paths ]) ], volume_paths=nifi_volume_paths) ensure_ingress_routed_svc(api_core_v1=api_core_v1, api_custom=api_custom, domain=domain, hostname=instance.hostname, name=instance.hostname, target_name=instance.hostname, namespace=namespace, port_name=port_name, svc_port=80, target_port=8080) # deploy mongo if instance.deploy_mongo: mongo_volume_paths = [ ('db', '/data/db', '20Gi', 'standard'), ] ensure_statefulset_with_containers( api_apps_v1=api_apps_v1, name='mongo', namespace=namespace, replicas=1, containers=[ V1Container( name='mongo', image='mongo', env=[ V1EnvVar(name='MONGO_INITDB_ROOT_USERNAME', value='admin'), V1EnvVar(name='MONGO_INITDB_ROOT_PASSWORD', value='admin') ], ports=[ V1ContainerPort(name='mongo', container_port=27017) ], volume_mounts=[ V1VolumeMount(name=path[0], mount_path=path[1]) for path in mongo_volume_paths ]) ], volume_paths=mongo_volume_paths) ensure_service(api=api_core_v1, service=V1Service( api_version="v1", metadata=V1ObjectMeta(name='mongo'), spec=V1ServiceSpec( type='ClusterIP', ports=[ V1ServicePort(protocol='TCP', port=27017, name='mongo', target_port=27017), ], selector={'app': 'mongo'})), name='mongo', namespace=namespace) ensure_single_container_deployment( api_apps_v1, V1Container( name='mongo-express', image='mongo-express', env=[ V1EnvVar(name='ME_CONFIG_MONGODB_ADMINUSERNAME', value='admin'), V1EnvVar(name='ME_CONFIG_MONGODB_ADMINPASSWORD', value='admin') ], ports=[V1ContainerPort(container_port=8000)]), 'mongo-express', instance.namespace) ensure_ingress_routed_svc(api_core_v1=api_core_v1, api_custom=api_custom, domain=domain, hostname="mongo-" + instance.hostname, name="mongo-" + instance.hostname, target_name="mongo-express", namespace=namespace, port_name=port_name, svc_port=80, target_port=8081) if instance.deploy_kafka: # deploy zookeeper ensure_single_container_deployment( api_apps_v1, V1Container(name='zookeeper', image='wurstmeister/zookeeper', env=[], ports=[V1ContainerPort(container_port=2181)]), 'zookeeper', instance.namespace) ensure_service(api=api_core_v1, service=V1Service( api_version="v1", metadata=V1ObjectMeta(name='zookeeper'), spec=V1ServiceSpec( type='ClusterIP', ports=[ V1ServicePort(protocol='TCP', port=2181, name='zookeeper', target_port=2181), ], selector={'app': 'zookeeper'})), name='zookeeper', namespace=namespace) # deploy kafka ensure_single_container_deployment( api_apps_v1, V1Container(name='kafka', image='wurstmeister/kafka', env=[ V1EnvVar(name='KAFKA_ADVERTISED_HOST_NAME', value='kafka'), V1EnvVar(name='KAFKA_ZOOKEEPER_CONNECT', value='zookeeper:2181'), V1EnvVar(name='KAFKA_PORT', value='9092') ], ports=[V1ContainerPort(container_port=9092)]), 'kafka', instance.namespace) ensure_service(api=api_core_v1, service=V1Service( api_version="v1", metadata=V1ObjectMeta(name='kafka'), spec=V1ServiceSpec( type='ClusterIP', ports=[ V1ServicePort(protocol='TCP', port=9092, name='kafka', target_port=9092), ], selector={'app': 'kafka'})), name='kafka', namespace=namespace) if instance.deploy_prometheus: # deploy prometheus ensure_single_container_deployment( api_apps_v1, V1Container(name='prometheus', image='prom/prometheus', env=[], ports=[V1ContainerPort(container_port=9090)]), 'prometheus', instance.namespace) ensure_ingress_routed_svc(api_core_v1=api_core_v1, api_custom=api_custom, domain=domain, hostname="prometheus-" + instance.hostname, name="prometheus", target_name="prometheus", namespace=namespace, port_name=port_name, svc_port=9090, target_port=9090) if instance.deploy_jupyter: # deploy jupyter instance.jupyter_token = str(uuid.uuid1()) ensure_single_container_deployment( api_apps_v1, V1Container( name='jupyter', image='jupyter/datascience-notebook', command=[ 'start-notebook.sh', '--NotebookApp.token=' + instance.jupyter_token ], env=[], ports=[V1ContainerPort(container_port=8888)]), 'jupyter', instance.namespace) ensure_ingress_routed_svc(api_core_v1=api_core_v1, api_custom=api_custom, domain=domain, hostname="jupyter-" + instance.hostname, name="jupyter", target_name="jupyter", namespace=namespace, port_name=port_name, svc_port=8888, target_port=8888) # deploy custom instance types custom_instances = Instance.objects.filter(parent=instance) for ci in custom_instances: inst_type: InstanceType = ci.instance_type env_vars = [ V1EnvVar(name=e.name, value=e.default_value) for e in InstanceTypeEnvVar.objects.filter( instance_type=inst_type) ] ports = [ V1ContainerPort(container_port=p.internal) for p in InstanceTypePort.objects.filter( instance_type=inst_type) ] ensure_single_container_deployment( api_apps_v1, V1Container(name=inst_type.container_name, image=inst_type.image, env=env_vars, ports=ports), inst_type.container_name, instance.namespace) for svc in InstanceTypeIngressRoutedService.objects.filter( instance_type=inst_type): ensure_ingress_routed_svc( api_core_v1=api_core_v1, api_custom=api_custom, domain=domain, hostname=svc.service_name + '-' + instance.hostname, name=svc.service_name + '-' + instance.hostname, target_name=inst_type.container_name, namespace=namespace, port_name=port_name, svc_port=svc.svc_port, target_port=svc.target_port) instance.state = 'RUNNING' finally: instance.save()
def perform_cloud_ops(): # set GOOGLE_APPLICATION_CREDENTIALS env to credentials file # set GOOGLE_CLOUD_PROJECT env to project id domain = os.getenv('DOMAIN') assert domain logger.info(f'using domain: {domain}') static_ip = os.getenv('STATIC_IP') assert static_ip logger.info(f'using static IP: {static_ip}') admin_email = os.getenv('ADMIN_EMAIL') assert admin_email logger.info(f'using ACME admin email: {admin_email}') oauth_client_id = os.getenv('OAUTH_CLIENT_ID') assert oauth_client_id logger.info(f'using oauth client id: {oauth_client_id}') oauth_client_secret = os.getenv('OAUTH_CLIENT_SECRET') assert oauth_client_secret logger.info(f'using oauth client secret: {oauth_client_secret}') oauth_secret = os.getenv('OAUTH_SECRET') assert oauth_secret logger.info(f'using oauth secret: {oauth_secret}') oauth_domain = os.getenv('OAUTH_DOMAIN') assert oauth_domain logger.info(f'using domain: {oauth_domain}') django_secret_key = os.getenv('DJANGO_SECRET_KEY') assert django_secret_key logger.info(f'using DJANGO_SECRET_KEY: {django_secret_key}') credentials, project = google.auth.default() gcloud_client = container_v1.ClusterManagerClient(credentials=credentials) scan_clusters(gcloud_client, project) # FIXME add the k8s cert to a trust store urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) auth_gcloud_k8s(credentials) api_core_v1 = client.CoreV1Api() api_apps_v1 = client.AppsV1Api() api_storage_v1 = client.StorageV1Api() api_custom = client.CustomObjectsApi() api_extensions_v1_beta1 = client.ExtensionsV1beta1Api() api_ext_v1_beta1 = client.ApiextensionsV1beta1Api() api_rbac_auth_v1_b1 = client.RbacAuthorizationV1beta1Api() ensure_traefik(api_core_v1, api_ext_v1_beta1, api_apps_v1, api_custom, api_rbac_auth_v1_b1, admin_email, domain, static_ip, oauth_client_id, oauth_client_secret, oauth_domain, oauth_secret) with open(os.getenv('GOOGLE_APPLICATION_CREDENTIALS'), 'rb') as f: gcloud_credentials_b64 = b64encode(f.read()).decode('UTF-8') ensure_secret(api=api_core_v1, name='webui-credentials', namespace='default', secret=V1Secret( metadata=client.V1ObjectMeta(name='webui-credentials'), data={'gcloud-credentials': gcloud_credentials_b64})) webui_volume_paths = [ ('data', '/opt/nipyapi/data', '20Gi', 'standard'), ] webui_volume_mounts = [ V1VolumeMount(name=path[0], mount_path=path[1]) for path in webui_volume_paths ] webui_volume_mounts.append( V1VolumeMount(name='webui-credentials', mount_path='/root/webui', read_only=True)) dind_volume_paths = [ ('docker', '/var/lib/docker', '200Gi', 'standard'), ] dind_volume_mounts = [ V1VolumeMount(name=path[0], mount_path=path[1]) for path in dind_volume_paths ] shared_volume_mounts = [ V1VolumeMount(name='dind-socket', mount_path='/var/run-shared') ] ensure_statefulset_with_containers( api_apps_v1=api_apps_v1, name='admin', namespace='default', replicas=1, containers=[ V1Container( name='webui', image='aichrist/nipyapi-ds:latest', env=[ # FIXME use k8s secrets for these values V1EnvVar(name='DOMAIN', value=domain), V1EnvVar(name='STATIC_IP', value=static_ip), V1EnvVar(name='ADMIN_EMAIL', value=admin_email), V1EnvVar(name='OAUTH_CLIENT_ID', value=oauth_client_id), V1EnvVar(name='OAUTH_CLIENT_SECRET', value=oauth_client_secret), V1EnvVar(name='OAUTH_SECRET', value=oauth_secret), V1EnvVar(name='OAUTH_DOMAIN', value=oauth_domain), V1EnvVar(name='DJANGO_SECRET_KEY', value=django_secret_key), V1EnvVar(name='GOOGLE_APPLICATION_CREDENTIALS', value='/root/webui/gcloud_credentials.json'), V1EnvVar(name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE', value='/root/webui/gcloud_credentials.json'), V1EnvVar(name='GOOGLE_CLOUD_PROJECT', value=os.getenv('GOOGLE_CLOUD_PROJECT')), V1EnvVar(name='DOCKER_HOST', value='unix:///var/run-shared/docker.sock'), ], ports=[V1ContainerPort(container_port=8000)], volume_mounts=webui_volume_mounts + shared_volume_mounts), V1Container( name='dind', image='docker:19-dind', security_context=V1SecurityContext(privileged=True), command=[ 'dockerd', '-H', 'unix:///var/run-shared/docker.sock' ], volume_mounts=dind_volume_mounts + shared_volume_mounts) ], volumes=[ V1Volume(name='dind-socket', empty_dir={}), V1Volume(name='webui-credentials', projected=V1ProjectedVolumeSource(sources=[ V1VolumeProjection(secret=V1SecretProjection( name='webui-credentials', items=[ V1KeyToPath(key='gcloud-credentials', path='gcloud_credentials.json') ])) ])) ], volume_paths=webui_volume_paths + dind_volume_paths) ensure_ingress_routed_svc(api_core_v1=api_core_v1, api_custom=api_custom, domain=domain, hostname='admin', name='admin', target_name='admin', namespace='default', port_name='web', svc_port=80, target_port=8000) reg_volume_paths = [ ('database', '/opt/nifi-registry/nifi-registry-current/database', '10Gi', 'standard'), ('flow-storage', '/opt/nifi-registry/nifi-registry-current/flow_storage', '20Gi', 'standard'), ] reg_volume_mounts = [ V1VolumeMount(name=path[0], mount_path=path[1]) for path in reg_volume_paths ] ensure_statefulset_with_containers( api_apps_v1=api_apps_v1, name='registry', namespace='default', replicas=1, containers=[ V1Container(name='registry', image='apache/nifi-registry:latest', env=[ V1EnvVar(name='NIFI_REGISTRY_WEB_HTTP_PORT', value='19090'), ], ports=[V1ContainerPort(container_port=19090)], volume_mounts=reg_volume_mounts), ], init_containers=[ V1Container( name='init-permissions', image='busybox', command=[ 'sh', '-c', 'chown -R 1000:1000 /opt/nifi-registry/nifi-registry-current' ], volume_mounts=[ V1VolumeMount(name=path[0], mount_path=path[1]) for path in reg_volume_paths ]) ], volumes=[], volume_paths=reg_volume_paths) ensure_ingress_routed_svc(api_core_v1=api_core_v1, api_custom=api_custom, domain=domain, hostname='registry', name='registry', target_name='registry', namespace='default', port_name='web', svc_port=80, target_port=19090) perform_nifi_ops(api_apps_v1, api_core_v1, api_custom, domain) perform_build_ops_bg() perform_mirror_ops_bg()
def _prepare_app_container( name, image, start_command, environment_variables, compute_spec, internal_ports, external_ports, ): """Prepare app. :param str start_command: :param Dict[str, str] environment_variables: :param ApplicationImage image: :param AppComputeSpecKubernetes compute_spec: :param str name: :param List[int] internal_ports: :param List[int] external_ports: :rtype: V1Container """ container_ports = [] for port in internal_ports: container_ports.append( V1ContainerPort( name="{}{}".format(TagsService.INTERNAL_PORT_PREFIX, port), container_port=port, ) ) for port in external_ports: container_ports.append( V1ContainerPort( name="{}{}".format(TagsService.EXTERNAL_PORT_PREFIX, port), container_port=port, ) ) env_list = ( [ V1EnvVar(name=key, value=value) for key, value in environment_variables.items() ] if environment_variables else [] ) command = None args = None if start_command: command = ["/bin/bash", "-c", "--"] args = [ start_command ] # ["while true; do sleep 30; done;"] # run a task that will never finish if image.tag == "latest" or image.tag == "": full_image_name = image.name else: full_image_name = "{name}:{tag}".format(name=image.name, tag=image.tag) if compute_spec: resources = KubernetesDeploymentService._prepare_resource_request( compute_spec ) return V1Container( name=name, image=full_image_name, resources=resources, command=command, args=args, ports=container_ports, env=env_list, ) else: return V1Container( name=name, image=full_image_name, command=command, args=args, ports=container_ports, env=env_list, )