def get_from_experiment_config_map(self, key_name): name = constants.CONFIG_MAP_NAME.format( experiment_uuid=self.experiment_uuid) config_map_key_ref = client.V1ConfigMapKeySelector(name=name, key=key_name) value = client.V1EnvVarSource(config_map_key_ref=config_map_key_ref) return client.V1EnvVar(name=key_name, value_from=value)
def build_env_list_for_pod(env_vars): env_list = [] for env_name, env_value in env_vars.items(): if env_name is None or env_value is None : continue if type(env_value)==str: # env is key/value pair env_list.append(client.V1EnvVar(name=env_name, value=env_value)) elif type(env_value)==dict: # env is ref if env_value.keys() is None or len(env_value.keys()) < 3: continue if "type" not in env_value.keys() or "name" not in env_value.keys() or "key" not in env_value.keys(): continue ref_type=env_value["type"] ref_name=env_value["name"] ref_key=env_value["key"] ref_selector = None env_var_source = None if ref_type.lower() == "configmap": ref_selector=client.V1ConfigMapKeySelector(key=ref_key, name=ref_name) env_var_source = client.V1EnvVarSource(config_map_key_ref=ref_selector) elif ref_type.lower() == "secret": ref_selector=client.V1SecretKeySelector(key=ref_key, name=ref_name) env_var_source = client.V1EnvVarSource(secret_key_ref=ref_selector) elif ref_type.lower() == "field": pass elif ref_type.lower() == "resource_field": pass if env_var_source is not None: env_list.append(client.V1EnvVar(name=env_name, value_from=env_var_source)) return env_list
def init_envs(self, container_props, name): config = container_props.config config_dict = self.pre_process_config(config) configmap_name = name list_envs = [] for key in config_dict: config_map_ref = client.V1ConfigMapKeySelector(key=key, name=configmap_name) env_var = client.V1EnvVarSource(config_map_key_ref=config_map_ref) env_object = client.V1EnvVar(name=key, value_from=env_var) list_envs.append(env_object) return list_envs
def get_from_config_map(key_name, cm_key_name, config_map_ref_name=None): config_map_ref_name = config_map_ref_name or settings.POLYAXON_K8S_APP_CONFIG_NAME config_map_key_ref = client.V1ConfigMapKeySelector( name=config_map_ref_name, key=cm_key_name) value_from = client.V1EnvVarSource(config_map_key_ref=config_map_key_ref) return client.V1EnvVar(name=key_name, value_from=value_from)
def create_job_object(name: str, container_image: str, env_list: dict, command: List[str], command_args: List[str], volumes: List[Dict], init_containers: List[Dict], output: Output, namespace: str = "stackl", container_name: str = "jobcontainer", api_version: str = "batch/v1", image_pull_policy: str = "Always", ttl_seconds_after_finished: int = 3600, restart_policy: str = "Never", backoff_limit: int = 0, active_deadline_seconds: int = 3600, service_account: str = "stackl-agent-stackl-agent", image_pull_secrets: List[str] = [], labels=None) -> client.V1Job: # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements """Creates a Job object using the Kubernetes client :param name: Job name affix :type name: str :param container_image: automation container image :type container_image: str :param env_list: Dict with key/values for the environment inside the automation container :type env_list: dict :param command: entrypoint command :type command: List[str] :param command_args: command arguments :type command_args: List[str] :param volumes: volumes and volumemounts :type volumes: List[Dict] :param image_pull_secrets: secrets to pull images :type image_pull_secrets: List[str] :param init_containers: list with init_containers :type init_containers: List[Dict] :param output: output Object :type output: Output :param namespace: Kubernetes namespace, defaults to "stackl" :type namespace: str, optional :param container_name: name of automation container, defaults to "jobcontainer" :type container_name: str, optional :param api_version: Job api version, defaults to "batch/v1" :type api_version: str, optional :param image_pull_policy: always pull latest images, defaults to "Always" :type image_pull_policy: str, optional :param ttl_seconds_after_finished: Remove jobs after execution with ttl, defaults to 600 :type ttl_seconds_after_finished: int, optional :param restart_policy: Restart the pod on the same node after failure, defaults to "Never" :type restart_policy: str, optional :param backoff_limit: Retries after failure, defaults to 0 :type backoff_limit: int, optional :param active_deadline_seconds: Timeout on a job, defaults to 3600 seconds :type active_deadline_seconds: int, optional :param service_account: Kubernetes service account, defaults to "stackl-agent-stackl-agent" :type service_account: str, optional :param labels: metadata labels, defaults to {} :type labels: dict, optional :return: automation Job object :rtype: client.V1Job """ id_job = id_generator() name = name + "-" + id_job body = client.V1Job(api_version=api_version, kind="Job") body.metadata = client.V1ObjectMeta(namespace=namespace, name=name) body.status = client.V1JobStatus() template = client.V1PodTemplate() template.template = client.V1PodTemplateSpec() k8s_volumes = [] cms = [] logging.debug(f"volumes: {volumes}") # create a k8s volume for each element in volumes for vol in volumes: vol_name = name + "-" + vol["name"] k8s_volume = client.V1Volume(name=vol_name) if vol["type"] == "config_map": config_map = client.V1ConfigMapVolumeSource() config_map.name = vol_name k8s_volume.config_map = config_map cms.append(create_cm(vol_name, namespace, vol['data'])) vol['name'] = vol_name if vol["type"] == "empty_dir": k8s_volume.empty_dir = client.V1EmptyDirVolumeSource( medium="Memory") vol['name'] = vol_name k8s_volumes.append(k8s_volume) logging.debug(f"Volumes created for job {name}: {k8s_volumes}") # create a volume mount for each element in volumes k8s_volume_mounts = [] for vol in volumes: if vol["mount_path"]: volume_mount = client.V1VolumeMount(name=vol["name"], mount_path=vol["mount_path"]) if "sub_path" in vol: volume_mount.sub_path = vol["sub_path"] k8s_volume_mounts.append(volume_mount) logging.debug(f"Volume mounts created for job {name}: {k8s_volume_mounts}") # create an environment list k8s_env_list = [] if env_list: for key, value in env_list.items(): if isinstance(value, dict): if 'config_map_key_ref' in value: k8s_env_from = client.V1EnvVar( name=key, value_from=client.V1EnvVarSource( config_map_key_ref=client.V1ConfigMapKeySelector( name=value['config_map_key_ref']["name"], key=value['config_map_key_ref']["key"]))) k8s_env_list.append(k8s_env_from) elif 'field_ref' in value: k8s_env_from = client.V1EnvVar( name=key, value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path=value['field_ref']))) k8s_env_list.append(k8s_env_from) else: k8s_env = client.V1EnvVar(name=key, value=value) k8s_env_list.append(k8s_env) k8s_env_from_list = [] # if env_from: # for env in env_from: # if 'config_map_ref' in env: # k8s_env_from = client.V1EnvFromSource( # config_map_ref=env['config_map_ref']) # k8s_env_from_list.append(k8s_env_from) # elif 'secret_ref' in env: # k8s_env_from = client.V1EnvFromSource( # secret_ref=env['secret_ref']) # k8s_env_from_list.append(k8s_env_from) logging.debug(f"Environment list created for job {name}: {k8s_env_list}") print(f"Environment list created for job {name}: {k8s_env_list}") container = client.V1Container(name=container_name, image=container_image, env=k8s_env_list, volume_mounts=k8s_volume_mounts, image_pull_policy=image_pull_policy, command=command, args=command_args, env_from=k8s_env_from_list) k8s_init_containers = [] logging.debug(f"Init containers for job {name}: {init_containers}") for c in init_containers: k8s_c = client.V1Container(name=c['name'], image=c['image'], volume_mounts=k8s_volume_mounts, env=k8s_env_list) if 'args' in c: k8s_c.args = c['args'] k8s_init_containers.append(k8s_c) k8s_secrets = [] for secret in image_pull_secrets: k8s_secrets.append(client.V1LocalObjectReference(name=secret)) logging.debug(f"Secret list created for job {name}: {k8s_secrets}") containers = [container] if output: output.volume_mounts = k8s_volume_mounts output.env = k8s_env_list output_containers = output.containers containers = containers + output_containers template.template.metadata = client.V1ObjectMeta(labels=labels) template.template.spec = client.V1PodSpec( containers=containers, restart_policy=restart_policy, image_pull_secrets=k8s_secrets, volumes=k8s_volumes, init_containers=k8s_init_containers, service_account_name=service_account) template.template = client.V1PodTemplateSpec( metadata=template.template.metadata, spec=template.template.spec) body.spec = client.V1JobSpec( ttl_seconds_after_finished=ttl_seconds_after_finished, template=template.template, backoff_limit=backoff_limit, active_deadline_seconds=active_deadline_seconds) return body, cms
def test_sanitize_k8s_container_attribute(self): # test cases for implicit type sanitization(conversion) op = dsl.ContainerOp(name='echo', image='image', command=['sh', '-c'], arguments=['echo test | tee /tmp/message.txt'], file_outputs={'merged': '/tmp/message.txt'}) op.container \ .add_volume_mount(k8s_client.V1VolumeMount( mount_path='/secret/gcp-credentials', name='gcp-credentials')) \ .add_env_variable(k8s_client.V1EnvVar( name=80, value=80)) \ .add_env_variable(k8s_client.V1EnvVar( name=80, value_from=k8s_client.V1EnvVarSource( config_map_key_ref=k8s_client.V1ConfigMapKeySelector(key=80, name=8080, optional='False'), field_ref=k8s_client.V1ObjectFieldSelector(api_version=80, field_path=8080), resource_field_ref=k8s_client.V1ResourceFieldSelector(container_name=80, divisor=8080, resource=8888), secret_key_ref=k8s_client.V1SecretKeySelector(key=80, name=8080, optional='False') ) )) \ .add_env_from(k8s_client.V1EnvFromSource( config_map_ref=k8s_client.V1ConfigMapEnvSource(name=80, optional='True'), prefix=999 )) \ .add_env_from(k8s_client.V1EnvFromSource( secret_ref=k8s_client.V1SecretEnvSource(name=80, optional='True'), prefix=888 )) \ .add_volume_mount(k8s_client.V1VolumeMount( mount_path=111, mount_propagation=222, name=333, read_only='False', sub_path=444, sub_path_expr=555 )) \ .add_volume_devices(k8s_client.V1VolumeDevice( device_path=111, name=222 )) \ .add_port(k8s_client.V1ContainerPort( container_port='8080', host_ip=111, host_port='8888', name=222, protocol=333 )) \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation='True', capabilities=k8s_client.V1Capabilities(add=[11, 22], drop=[33, 44]), privileged='False', proc_mount=111, read_only_root_filesystem='False', run_as_group='222', run_as_non_root='True', run_as_user='******', se_linux_options=k8s_client.V1SELinuxOptions(level=11, role=22, type=33, user=44), windows_options=k8s_client.V1WindowsSecurityContextOptions( gmsa_credential_spec=11, gmsa_credential_spec_name=22) )) \ .set_stdin(stdin='False') \ .set_stdin_once(stdin_once='False') \ .set_termination_message_path(termination_message_path=111) \ .set_tty(tty='False') \ .set_readiness_probe(readiness_probe=k8s_client.V1Probe( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), failure_threshold='111', http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), initial_delay_seconds='222', period_seconds='333', success_threshold='444', tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666'), timeout_seconds='777' )) \ .set_liveness_probe(liveness_probe=k8s_client.V1Probe( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), failure_threshold='111', http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), initial_delay_seconds='222', period_seconds='333', success_threshold='444', tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666'), timeout_seconds='777' )) \ .set_lifecycle(lifecycle=k8s_client.V1Lifecycle( post_start=k8s_client.V1Handler( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666') ), pre_stop=k8s_client.V1Handler( _exec=k8s_client.V1ExecAction(command=[11, 22, 33]), http_get=k8s_client.V1HTTPGetAction( host=11, http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)], path=44, port='55', scheme=66), tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666') ) )) sanitize_k8s_object(op.container) for e in op.container.env: self.assertIsInstance(e.name, str) if e.value: self.assertIsInstance(e.value, str) if e.value_from: if e.value_from.config_map_key_ref: self.assertIsInstance(e.value_from.config_map_key_ref.key, str) if e.value_from.config_map_key_ref.name: self.assertIsInstance(e.value_from.config_map_key_ref.name, str) if e.value_from.config_map_key_ref.optional: self.assertIsInstance(e.value_from.config_map_key_ref.optional, bool) if e.value_from.field_ref: self.assertIsInstance(e.value_from.field_ref.field_path, str) if e.value_from.field_ref.api_version: self.assertIsInstance(e.value_from.field_ref.api_version, str) if e.value_from.resource_field_ref: self.assertIsInstance(e.value_from.resource_field_ref.resource, str) if e.value_from.resource_field_ref.container_name: self.assertIsInstance(e.value_from.resource_field_ref.container_name, str) if e.value_from.resource_field_ref.divisor: self.assertIsInstance(e.value_from.resource_field_ref.divisor, str) if e.value_from.secret_key_ref: self.assertIsInstance(e.value_from.secret_key_ref.key, str) if e.value_from.secret_key_ref.name: self.assertIsInstance(e.value_from.secret_key_ref.name, str) if e.value_from.secret_key_ref.optional: self.assertIsInstance(e.value_from.secret_key_ref.optional, bool) for e in op.container.env_from: if e.prefix: self.assertIsInstance(e.prefix, str) if e.config_map_ref: if e.config_map_ref.name: self.assertIsInstance(e.config_map_ref.name, str) if e.config_map_ref.optional: self.assertIsInstance(e.config_map_ref.optional, bool) if e.secret_ref: if e.secret_ref.name: self.assertIsInstance(e.secret_ref.name, str) if e.secret_ref.optional: self.assertIsInstance(e.secret_ref.optional, bool) for e in op.container.volume_mounts: if e.mount_path: self.assertIsInstance(e.mount_path, str) if e.mount_propagation: self.assertIsInstance(e.mount_propagation, str) if e.name: self.assertIsInstance(e.name, str) if e.read_only: self.assertIsInstance(e.read_only, bool) if e.sub_path: self.assertIsInstance(e.sub_path, str) if e.sub_path_expr: self.assertIsInstance(e.sub_path_expr, str) for e in op.container.volume_devices: if e.device_path: self.assertIsInstance(e.device_path, str) if e.name: self.assertIsInstance(e.name, str) for e in op.container.ports: if e.container_port: self.assertIsInstance(e.container_port, int) if e.host_ip: self.assertIsInstance(e.host_ip, str) if e.host_port: self.assertIsInstance(e.host_port, int) if e.name: self.assertIsInstance(e.name, str) if e.protocol: self.assertIsInstance(e.protocol, str) if op.container.security_context: e = op.container.security_context if e.allow_privilege_escalation: self.assertIsInstance(e.allow_privilege_escalation, bool) if e.capabilities: for a in e.capabilities.add: self.assertIsInstance(a, str) for d in e.capabilities.drop: self.assertIsInstance(d, str) if e.privileged: self.assertIsInstance(e.privileged, bool) if e.proc_mount: self.assertIsInstance(e.proc_mount, str) if e.read_only_root_filesystem: self.assertIsInstance(e.read_only_root_filesystem, bool) if e.run_as_group: self.assertIsInstance(e.run_as_group, int) if e.run_as_non_root: self.assertIsInstance(e.run_as_non_root, bool) if e.run_as_user: self.assertIsInstance(e.run_as_user, int) if e.se_linux_options: if e.se_linux_options.level: self.assertIsInstance(e.se_linux_options.level, str) if e.se_linux_options.role: self.assertIsInstance(e.se_linux_options.role, str) if e.se_linux_options.type: self.assertIsInstance(e.se_linux_options.type, str) if e.se_linux_options.user: self.assertIsInstance(e.se_linux_options.user, str) if e.windows_options: if e.windows_options.gmsa_credential_spec: self.assertIsInstance(e.windows_options.gmsa_credential_spec, str) if e.windows_options.gmsa_credential_spec_name: self.assertIsInstance(e.windows_options.gmsa_credential_spec_name, str) if op.container.stdin: self.assertIsInstance(op.container.stdin, bool) if op.container.stdin_once: self.assertIsInstance(op.container.stdin_once, bool) if op.container.termination_message_path: self.assertIsInstance(op.container.termination_message_path, str) if op.container.tty: self.assertIsInstance(op.container.tty, bool) for e in [op.container.readiness_probe, op.container.liveness_probe]: if e: if e._exec: for c in e._exec.command: self.assertIsInstance(c, str) if e.failure_threshold: self.assertIsInstance(e.failure_threshold, int) if e.http_get: if e.http_get.host: self.assertIsInstance(e.http_get.host, str) if e.http_get.http_headers: for h in e.http_get.http_headers: if h.name: self.assertIsInstance(h.name, str) if h.value: self.assertIsInstance(h.value, str) if e.http_get.path: self.assertIsInstance(e.http_get.path, str) if e.http_get.port: self.assertIsInstance(e.http_get.port, (str, int)) if e.http_get.scheme: self.assertIsInstance(e.http_get.scheme, str) if e.initial_delay_seconds: self.assertIsInstance(e.initial_delay_seconds, int) if e.period_seconds: self.assertIsInstance(e.period_seconds, int) if e.success_threshold: self.assertIsInstance(e.success_threshold, int) if e.tcp_socket: if e.tcp_socket.host: self.assertIsInstance(e.tcp_socket.host, str) if e.tcp_socket.port: self.assertIsInstance(e.tcp_socket.port, (str, int)) if e.timeout_seconds: self.assertIsInstance(e.timeout_seconds, int) if op.container.lifecycle: for e in [op.container.lifecycle.post_start, op.container.lifecycle.pre_stop]: if e: if e._exec: for c in e._exec.command: self.assertIsInstance(c, str) if e.http_get: if e.http_get.host: self.assertIsInstance(e.http_get.host, str) if e.http_get.http_headers: for h in e.http_get.http_headers: if h.name: self.assertIsInstance(h.name, str) if h.value: self.assertIsInstance(h.value, str) if e.http_get.path: self.assertIsInstance(e.http_get.path, str) if e.http_get.port: self.assertIsInstance(e.http_get.port, (str, int)) if e.http_get.scheme: self.assertIsInstance(e.http_get.scheme, str) if e.tcp_socket: if e.tcp_socket.host: self.assertIsInstance(e.tcp_socket.host, str) if e.tcp_socket.port: self.assertIsInstance(e.tcp_socket.port, (str, int)) # test cases for checking value after sanitization check_value_op = dsl.ContainerOp(name='echo', image='image', command=['sh', '-c'], arguments=['echo test | tee /tmp/message.txt'], file_outputs={'merged': '/tmp/message.txt'}) check_value_op.container \ .add_env_variable(k8s_client.V1EnvVar( name=80, value=8080)) \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation='true', capabilities=k8s_client.V1Capabilities(add=[11, 22], drop=[33, 44]), privileged='false', proc_mount=111, read_only_root_filesystem='False', run_as_group='222', run_as_non_root='True', run_as_user='******', se_linux_options=k8s_client.V1SELinuxOptions(level=11, role=22, type=33, user=44), windows_options=k8s_client.V1WindowsSecurityContextOptions( gmsa_credential_spec=11, gmsa_credential_spec_name=22) )) sanitize_k8s_object(check_value_op.container) self.assertEqual(check_value_op.container.env[0].name, '80') self.assertEqual(check_value_op.container.env[0].value, '8080') self.assertEqual(check_value_op.container.security_context.allow_privilege_escalation, True) self.assertEqual(check_value_op.container.security_context.capabilities.add[0], '11') self.assertEqual(check_value_op.container.security_context.capabilities.add[1], '22') self.assertEqual(check_value_op.container.security_context.capabilities.drop[0], '33') self.assertEqual(check_value_op.container.security_context.capabilities.drop[1], '44') self.assertEqual(check_value_op.container.security_context.privileged, False) self.assertEqual(check_value_op.container.security_context.proc_mount, '111') self.assertEqual(check_value_op.container.security_context.read_only_root_filesystem, False) self.assertEqual(check_value_op.container.security_context.run_as_group, 222) self.assertEqual(check_value_op.container.security_context.run_as_non_root, True) self.assertEqual(check_value_op.container.security_context.run_as_user, 333) self.assertEqual(check_value_op.container.security_context.se_linux_options.level, '11') self.assertEqual(check_value_op.container.security_context.se_linux_options.role, '22') self.assertEqual(check_value_op.container.security_context.se_linux_options.type, '33') self.assertEqual(check_value_op.container.security_context.se_linux_options.user, '44') self.assertEqual(check_value_op.container.security_context.windows_options.gmsa_credential_spec, '11') self.assertEqual(check_value_op.container.security_context.windows_options.gmsa_credential_spec_name, '22') # test cases for exception with self.assertRaises(ValueError, msg='Invalid boolean string 2. Should be boolean.'): exception_op = dsl.ContainerOp(name='echo', image='image') exception_op.container \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation=1 )) sanitize_k8s_object(exception_op.container) with self.assertRaises(ValueError, msg='Invalid boolean string Test. Should be "true" or "false".'): exception_op = dsl.ContainerOp(name='echo', image='image') exception_op.container \ .set_security_context(k8s_client.V1SecurityContext( allow_privilege_escalation='Test' )) sanitize_k8s_object(exception_op.container) with self.assertRaises(ValueError, msg='Invalid test. Should be integer.'): exception_op = dsl.ContainerOp(name='echo', image='image') exception_op.container \ .set_security_context(k8s_client.V1SecurityContext( run_as_group='test', )) sanitize_k8s_object(exception_op.container)
def get_from_experiment_config_map(name, key_name): config_map_key_ref = client.V1ConfigMapKeySelector(name=name, key=key_name) value_from = client.V1EnvVarSource(config_map_key_ref=config_map_key_ref) return client.V1EnvVar(name=key_name, value_from=value_from)
def kubernetes_deployment(_localega, config, ns, fake_cega): """Wrap all the kubernetes settings.""" _here = Path(__file__).parent trace_file = Path(_here / 'config/trace.ini') assert trace_file.exists(), "No trace file!" trace_config = configparser.ConfigParser() trace_config.read(trace_file) deploy_lega = LocalEGADeploy(_localega, ns) # Setting ENV variables and Volumes env_cega_api = client.V1EnvVar(name="CEGA_ENDPOINT", value=f"{_localega['cega']['endpoint']}") env_inbox_mq = client.V1EnvVar(name="BROKER_HOST", value=f"{_localega['services']['broker']}.{ns}") env_inbox_port = client.V1EnvVar(name="INBOX_PORT", value="2222") env_db_data = client.V1EnvVar(name="PGDATA", value="/var/lib/postgresql/data/pgdata") env_cega_mq = client.V1EnvVar(name="CEGA_CONNECTION", value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='cega-connection', key="address"))) env_cega_creds = client.V1EnvVar(name="CEGA_ENDPOINT_CREDS", value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='cega-creds', key="credentials"))) env_acc_minio = client.V1EnvVar(name="MINIO_ACCESS_KEY", value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='s3-keys', key="access"))) env_sec_minio = client.V1EnvVar(name="MINIO_SECRET_KEY", value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='s3-keys', key="secret"))) env_acc_s3 = client.V1EnvVar(name="S3_ACCESS_KEY", value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='s3-keys', key="access"))) env_sec_s3 = client.V1EnvVar(name="S3_SECRET_KEY", value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='s3-keys', key="secret"))) env_db_pass = client.V1EnvVar(name="POSTGRES_PASSWORD", value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='lega-db-secret', key="postgres_password"))) env_db_user = client.V1EnvVar(name="POSTGRES_USER", value_from=client.V1EnvVarSource(config_map_key_ref=client.V1ConfigMapKeySelector(name='lega-db-config', key="user"))) env_db_name = client.V1EnvVar(name="POSTGRES_DB", value_from=client.V1EnvVarSource(config_map_key_ref=client.V1ConfigMapKeySelector(name='lega-db-config', key="dbname"))) env_lega_pass = client.V1EnvVar(name="LEGA_PASSWORD", value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='lega-password', key="password"))) env_keys_pass = client.V1EnvVar(name="KEYS_PASSWORD", value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(name='keys-password', key="password"))) mount_config = client.V1VolumeMount(name="config", mount_path='/etc/ega') mount_inbox = client.V1VolumeMount(name="inbox", mount_path='/ega/inbox') mount_mq_temp = client.V1VolumeMount(name="mq-temp", mount_path='/temp') mount_mq_rabbitmq = client.V1VolumeMount(name="rabbitmq", mount_path='/etc/rabbitmq') mount_mq_script = client.V1VolumeMount(name="mq-entrypoint", mount_path='/script') mount_db_data = client.V1VolumeMount(name="data", mount_path='/var/lib/postgresql/data', read_only=False) mound_db_init = client.V1VolumeMount(name="initsql", mount_path='/docker-entrypoint-initdb.d') mount_minio = client.V1VolumeMount(name="data", mount_path='/data') pmap_ini_conf = client.V1VolumeProjection(config_map=client.V1ConfigMapProjection(name="lega-config", items=[client.V1KeyToPath(key="conf.ini", path="conf.ini", mode=0o744)])) pmap_ini_keys = client.V1VolumeProjection(config_map=client.V1ConfigMapProjection(name="lega-keyserver-config", items=[client.V1KeyToPath(key="keys.ini", path="keys.ini", mode=0o744)])) sec_keys = client.V1VolumeProjection(secret=client.V1SecretProjection(name="keyserver-secret", items=[client.V1KeyToPath(key="key1.sec", path="pgp/key.1"), client.V1KeyToPath(key="ssl.cert", path="ssl.cert"), client.V1KeyToPath(key="ssl.key", path="ssl.key")])) deploy_lega.create_namespace() deploy_lega.config_secret('cega-creds', {'credentials': trace_config['secrets']['cega_creds']}) # Create Secrets deploy_lega.config_secret('cega-connection', {'address': trace_config['secrets']['cega_address']}) deploy_lega.config_secret('lega-db-secret', {'postgres_password': trace_config['secrets']['postgres_password']}) deploy_lega.config_secret('s3-keys', {'access': trace_config['secrets']['s3_access'], 'secret': trace_config['secrets']['s3_secret']}) deploy_lega.config_secret('lega-password', {'password': trace_config['secrets']['lega_password']}) deploy_lega.config_secret('keys-password', {'password': trace_config['secrets']['keys_password']}) with open(_here / 'config/key.1.sec') as key_file: key1_data = key_file.read() with open(_here / 'config/ssl.cert') as cert: ssl_cert = cert.read() with open(_here / 'config/ssl.key') as key: ssl_key = key.read() deploy_lega.config_secret('keyserver-secret', {'key1.sec': key1_data, 'ssl.cert': ssl_cert, 'ssl.key': ssl_key}) # Read conf from files with open(_here / 'extras/db.sql') as sql_init: init_sql = sql_init.read() with open(_here / 'extras/mq.sh') as mq_init: init_mq = mq_init.read() with open(_here / 'config/conf.ini') as conf_file: data_conf = conf_file.read() with open(_here / 'config/keys.ini') as keys_file: data_keys = keys_file.read() with open(_here / 'config/rabbitmq.config') as config: config_mq = config.read() with open(_here / 'config/defs.json') as defs: defs_mq = defs.read() # secret = deploy_lega.read_secret('keys-password') # enc_keys = conf.aes_encrypt(b64decode(secret.to_dict()['data']['password'].encode('utf-8')), data_keys.encode('utf-8'), md5) # with open(_here / 'config/keys.ini.enc', 'w') as enc_file: # enc_file.write(b64encode(enc_keys).decode('utf-8')) # Upload Configuration Maps deploy_lega.config_map('initsql', {'db.sql': init_sql}) deploy_lega.config_map('mq-config', {'defs.json': defs_mq, 'rabbitmq.config': config_mq}) deploy_lega.config_map('mq-entrypoint', {'mq.sh': init_mq}) deploy_lega.config_map('lega-config', {'conf.ini': data_conf}) deploy_lega.config_map('lega-keyserver-config', {'keys.ini': data_keys}) deploy_lega.config_map('lega-db-config', {'user': '******', 'dbname': 'lega'}) # Volumes deploy_lega.persistent_volume("postgres", "0.5Gi", accessModes=["ReadWriteMany"]) deploy_lega.persistent_volume("rabbitmq", "0.5Gi") deploy_lega.persistent_volume("inbox", "0.5Gi", accessModes=["ReadWriteMany"]) deploy_lega.persistent_volume_claim("db-storage", "postgres", "0.5Gi", accessModes=["ReadWriteMany"]) deploy_lega.persistent_volume_claim("mq-storage", "rabbitmq", "0.5Gi") deploy_lega.persistent_volume_claim("inbox", "inbox", "0.5Gi", accessModes=["ReadWriteMany"]) volume_db = client.V1Volume(name="data", persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name="db-storage")) volume_rabbitmq = client.V1Volume(name="rabbitmq", persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name="mq-storage")) volume_db_init = client.V1Volume(name="initsql", config_map=client.V1ConfigMapVolumeSource(name="initsql")) volume_mq_temp = client.V1Volume(name="mq-temp", config_map=client.V1ConfigMapVolumeSource(name="mq-config")) volume_mq_script = client.V1Volume(name="mq-entrypoint", config_map=client.V1ConfigMapVolumeSource(name="mq-entrypoint", default_mode=0o744)) volume_config = client.V1Volume(name="config", config_map=client.V1ConfigMapVolumeSource(name="lega-config")) # volume_ingest = client.V1Volume(name="ingest-conf", config_map=client.V1ConfigMapVolumeSource(name="lega-config")) volume_inbox = client.V1Volume(name="inbox", persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name="inbox")) volume_keys = client.V1Volume(name="config", projected=client.V1ProjectedVolumeSource(sources=[pmap_ini_conf, pmap_ini_keys, sec_keys])) pvc_minio = client.V1PersistentVolumeClaim(metadata=client.V1ObjectMeta(name="data"), spec=client.V1PersistentVolumeClaimSpec(access_modes=["ReadWriteOnce"], resources=client.V1ResourceRequirements(requests={"storage": "10Gi"}))) # Deploy LocalEGA Pods deploy_lega.deployment('mapper', 'nbisweden/ega-base:latest', ["ega-id-mapper"], [], [mount_config], [volume_config], patch=True) deploy_lega.deployment('keys', 'nbisweden/ega-base:latest', ["ega-keyserver", "--keys", "/etc/ega/keys.ini"], [env_lega_pass, env_keys_pass], [mount_config], [volume_keys], ports=[8443], patch=True) deploy_lega.deployment('db', 'postgres:9.6', None, [env_db_pass, env_db_user, env_db_name, env_db_data], [mount_db_data, mound_db_init], [volume_db, volume_db_init], ports=[5432]) deploy_lega.deployment('ingest', 'nbisweden/ega-base:latest', ["ega-ingest"], [env_lega_pass, env_acc_s3, env_sec_s3, env_db_pass], [mount_config, mount_inbox], [volume_config, volume_inbox]) deploy_lega.stateful_set('minio', 'minio/minio:latest', None, [env_acc_minio, env_sec_minio], [mount_minio], None, args=["server", "/data"], vol_claims=[pvc_minio], ports=[9000]) deploy_lega.stateful_set('verify', 'nbisweden/ega-base:latest', ["ega-verify"], [env_acc_s3, env_sec_s3, env_lega_pass, env_db_pass], [mount_config], [volume_config]) deploy_lega.stateful_set('mq', 'rabbitmq:3.6.14-management', ["/script/mq.sh"], [env_cega_mq], [mount_mq_temp, mount_mq_script, mount_mq_rabbitmq], [volume_mq_temp, volume_mq_script, volume_rabbitmq], ports=[15672, 5672, 4369, 25672]) deploy_lega.stateful_set('inbox', 'nbisweden/ega-mina-inbox:latest', None, [env_inbox_mq, env_cega_api, env_cega_creds, env_inbox_port], [mount_inbox], [volume_inbox], ports=[2222]) # Ports ports_db = [client.V1ServicePort(protocol="TCP", port=5432, target_port=5432)] ports_inbox = [client.V1ServicePort(protocol="TCP", port=2222, target_port=2222)] ports_s3 = [client.V1ServicePort(name="web", protocol="TCP", port=9000)] ports_keys = [client.V1ServicePort(protocol="TCP", port=8443, target_port=8443)] ports_mq_management = [client.V1ServicePort(name="http", protocol="TCP", port=15672, target_port=15672)] ports_mq = [client.V1ServicePort(name="amqp", protocol="TCP", port=5672, target_port=5672), client.V1ServicePort(name="epmd", protocol="TCP", port=4369, target_port=4369), client.V1ServicePort(name="rabbitmq-dist", protocol="TCP", port=25672, target_port=25672)] # Deploy Services deploy_lega.service('db', ports_db) deploy_lega.service('mq-management', ports_mq_management, pod_name="mq", type="NodePort") deploy_lega.service('mq', ports_mq) deploy_lega.service('keys', ports_keys) deploy_lega.service('inbox', ports_inbox, type="NodePort") deploy_lega.service('minio', ports_s3) # Headless deploy_lega.service('minio-service', ports_s3, pod_name="minio", type="LoadBalancer") metric_cpu = client.V2beta1MetricSpec(type="Resource", resource=client.V2beta1ResourceMetricSource(name="cpu", target_average_utilization=50)) deploy_lega.horizontal_scale("ingest", "ingest", "Deployment", 5, [metric_cpu]) if fake_cega: deploy_fake_cega(deploy_lega)
def create_job_object(data): meta = client.V1ObjectMeta(name=data["name"], namespace=data["namespace"]) labels = None if "labels" in data: labels_array = data["labels"].split(',') labels = dict(s.split('=') for s in labels_array) meta.labels = labels annotations = None if "annotations" in data: annotations = yaml.full_load(data["annotations"]) meta.annotations = annotations envs = [] if "environments" in data: env_data = yaml.full_load(data["environments"]) for item in env_data: if 'value' in item.keys(): envs.append( client.V1EnvVar(name=item['name'], value=item['value'])) elif 'valueFrom' in item.keys(): if 'fieldRef' in item['valueFrom'].keys(): envs.append( client.V1EnvVar( name=item['name'], value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path=item['valueFrom']['fieldRef'] ['fieldPath'])))) if "environments_secrets" in data: envs_array = data["environments_secrets"].splitlines() tmp_envs = dict(s.split('=', 1) for s in envs_array) for key in tmp_envs: if (":" in tmp_envs[key]): # passing secret env value = tmp_envs[key] secrets = value.split(':') secrect_key = secrets[1] secrect_name = secrets[0] envs.append( client.V1EnvVar( name=key, value="", value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( key=secrect_key, name=secrect_name)))) if "environments_configs" in data: envs_array = data["environments_configs"].splitlines() tmp_envs = dict(s.split('=', 1) for s in envs_array) for key in tmp_envs: if (":" in tmp_envs[key]): # passing config env value = tmp_envs[key] configs = value.split(':') config_key = configs[1] config_name = configs[0] envs.append( client.V1EnvVar( name=key, value="", value_from=client.V1EnvVarSource( config_map_key_ref=client.V1ConfigMapKeySelector( key=config_key, name=config_name)))) container = client.V1Container(name=data["container_name"], image=data["container_image"], image_pull_policy=data["image_pull_policy"]) if "container_command" in data: container.command = data["container_command"].split(' ') if "container_args" in data: args_array = data["container_args"].splitlines() container.args = args_array if "resources_requests" in data: resources_array = data["resources_requests"].split(",") tmp = dict(s.split('=', 1) for s in resources_array) container.resources = client.V1ResourceRequirements(requests=tmp) if "volume_mounts" in data: mounts = common.create_volume_mount_yaml(data) container.volume_mounts = mounts container.env = envs if "env_from" in data: env_froms_data = yaml.full_load(data["env_from"]) env_from = [] for env_from_data in env_froms_data: if 'configMapRef' in env_from_data: env_from.append( client.V1EnvFromSource( config_map_ref=client.V1ConfigMapEnvSource( env_from_data['configMapRef']['name']))) elif 'secretRef' in env_from_data: env_from.append( client.V1EnvFromSource(secret_ref=client.V1SecretEnvSource( env_from_data['secretRef']['name']))) container.env_from = env_from template_spec = client.V1PodSpec(containers=[container], restart_policy=data["job_restart_policy"]) if "volumes" in data: volumes_data = yaml.safe_load(data["volumes"]) volumes = [] if (isinstance(volumes_data, list)): for volume_data in volumes_data: volume = common.create_volume(volume_data) if volume: volumes.append(volume) else: volume = common.create_volume(volumes_data) if volume: volumes.append(volume) template_spec.volumes = volumes if "image_pull_secrets" in data: images_array = data["image_pull_secrets"].split(",") images = [] for image in images_array: images.append(client.V1LocalObjectReference(name=image)) template_spec.image_pull_secrets = images if "tolerations" in data: tolerations_data = yaml.safe_load(data["tolerations"]) tolerations = [] for toleration_data in tolerations_data: toleration = common.create_toleration(toleration_data) if toleration: tolerations.append(toleration) template_spec.tolerations = tolerations template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta( name=data["name"], labels=labels, annotations=annotations, ), spec=template_spec) spec = client.V1JobSpec(template=template) if "completions" in data: spec.completions = int(data["completions"]) if "selectors" in data: selectors_array = data["selectors"].split(',') selectors = dict(s.split('=') for s in selectors_array) spec.selector = selectors if "node_selector" in data: node_selectors_array = data["node_selector"].split(',') node_selectors = dict(s.split('=') for s in node_selectors_array) spec.nodeSelector = node_selectors if "parallelism" in data: spec.parallelism = int(data["parallelism"]) if "active_deadline_seconds" in data: spec.active_deadline_seconds = int(data["active_deadline_seconds"]) if "backoff_limit" in data: spec.backoff_limit = int(data["backoff_limit"]) job = client.V1Job(api_version=data["api_version"], kind='Job', metadata=meta, spec=spec) return job