def create_secret( self, name: str, namespace: str, secret_type: SecretType, token: Token ) -> None: """Create a Kubernetes secret from a token. The token will always be stored in the data field ``token``. Parameters ---------- name : `str` Name of secret to create. namespace : `str` Namespace in which to create the secret. secret_type : `SecretType` Type of token stored in the secret. token : `gafaelfawr.models.token.Token` The token to store. """ secret = V1Secret( api_version="v1", data={"token": self._encode_token(token)}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: secret_type.value}, name=name, namespace=namespace, ), type="Opaque", ) self._api.create_namespaced_secret(namespace, secret)
def put_secret(self, name: str, data: dict, namespace: str='default'): '''creates or updates (replaces) the specified secret. the secret's contents are expected in a dictionary containing only scalar values. In particular, each value is converted into a str; the result returned from to-str conversion is encoded as a utf-8 byte array. Thus such a conversion must not have done before. ''' ne = not_empty metadata = V1ObjectMeta(name=ne(name), namespace=ne(namespace)) secret_data = { k: base64.b64encode(str(v).encode('utf-8')).decode('utf-8') for k,v in data.items() } secret = V1Secret(metadata=metadata, data=secret_data) # find out whether we have to replace or to create try: self.core_api.read_namespaced_secret(name=name, namespace=namespace) secret_exists = True except ApiException as ae: # only 404 is expected if not ae.status == 404: raise ae secret_exists = False if secret_exists: self.core_api.replace_namespaced_secret(name=name, namespace=namespace, body=secret) else: self.core_api.create_namespaced_secret(namespace=namespace, body=secret)
def process(self, module_idx: int) -> None: if self.layer.is_stateless_mode() is True: # no k8s available, skip injecting secret super(DatadogProcessor, self).process(module_idx) return set_kube_config(self.layer) load_opta_kube_config() v1 = CoreV1Api() # Update the secrets namespaces = v1.list_namespace(field_selector=f"metadata.name={self.layer.name}") if len(namespaces.items) == 0: v1.create_namespace( body=V1Namespace(metadata=V1ObjectMeta(name=self.layer.name)) ) try: secret = v1.read_namespaced_secret("secret", self.layer.name) if ( "DATADOG_API_KEY" not in secret.data or secret.data["DATADOG_API_KEY"] == "" ): api_key = self.create_secret(v1) else: api_key = base64.b64decode(secret.data["DATADOG_API_KEY"]).decode("utf-8") except ApiException: v1.create_namespaced_secret( namespace=self.layer.name, body=V1Secret( metadata=V1ObjectMeta(name="secret"), string_data={"DATADOG_API_KEY": ""}, ), ) api_key = self.create_secret(v1) self.module.data["api_key"] = api_key super(DatadogProcessor, self).process(module_idx)
def hub_pod_ssl(kube_client, kube_ns, ssl_app): """Start a hub pod with internal_ssl enabled""" # load ssl dir to tarfile buf = io.BytesIO() tf = tarfile.TarFile(fileobj=buf, mode="w") tf.add(ssl_app.internal_certs_location, arcname="internal-ssl", recursive=True) # store tarfile in a secret b64_certs = base64.b64encode(buf.getvalue()).decode("ascii") secret_name = "hub-ssl-secret" secret_manifest = V1Secret( metadata={"name": secret_name}, data={"internal-ssl.tar": b64_certs} ) create_resource(kube_client, kube_ns, "secret", secret_manifest) name = "hub-ssl" service_manifest = V1Service( metadata=dict(name=name), spec=V1ServiceSpec( type="ClusterIP", ports=[V1ServicePort(port=8081, target_port=8081)], selector={"hub-name": name}, ), ) create_resource(kube_client, kube_ns, "service", service_manifest) return create_hub_pod( kube_client, kube_ns, pod_name=name, ssl=True, )
def hub_pod_ssl(kube_client, kube_ns, ssl_app): with open(jupyterhub_ssl_config_py) as f: ssl_config = f.read() # load ssl dir to tarfile buf = io.BytesIO() tf = tarfile.TarFile(fileobj=buf, mode="w") tf.add(ssl_app.internal_certs_location, arcname="internal-ssl", recursive=True) # store tarfile in a secret b64_certs = base64.b64encode(buf.getvalue()).decode("ascii") print("Creating hub ssl secret") secret_name = "hub-ssl-secret" ensure_not_exists(kube_client, kube_ns, secret_name, "secret") secret_manifest = V1Secret(metadata={"name": secret_name}, data={"internal-ssl.tar": b64_certs}) kube_client.create_namespaced_secret(body=secret_manifest, namespace=kube_ns) return create_hub_pod( kube_client, kube_ns, pod_name="hub-ssl", ssl=True, )
def create_gcr_secret(self, namespace: str, name: str, password: str, email: str, user_name: str = '_json_key', server_url: str = 'https://eu.gcr.io'): metadata = V1ObjectMeta(name=name, namespace=namespace) secret = V1Secret(metadata=metadata) auth = '{user}:{gcr_secret}'.format(user=user_name, gcr_secret=password) docker_config = { server_url: { 'username': user_name, 'email': email, 'password': password, 'auth': base64.b64encode(auth.encode('utf-8')).decode('utf-8') } } encoded_docker_config = base64.b64encode( json.dumps(docker_config).encode('utf-8')).decode('utf-8') secret.data = {'.dockercfg': encoded_docker_config} secret.type = 'kubernetes.io/dockercfg' self.core_api.create_namespaced_secret(namespace=namespace, body=secret)
def _save_private_ssh_key_in_k8s_secret(self, private_key: str, namespace: str): base64_private_key = base64.encodebytes( bytes(private_key, encoding=_encoding)).decode(_encoding) private_key_secret = V1Secret(data={'private_key': base64_private_key}, metadata=V1ObjectMeta(name='git-secret')) create_secret(namespace=namespace, secret_body=private_key_secret)
def test_delete_secret_bad_resource_version(credstash_get_secret_mock): cont = CredStashController("none", "none", "none", "none", "none") cont.v1core = MagicMock() metadata = V1ObjectMeta( name="bobo", namespace="default", annotations={ "credstash-fully-managed": "true", "credstash-resourceversion": "2", }, ) mock_secret = V1Secret("v1", {}, "Secret", metadata) cont.v1core.read_namespaced_secret = MagicMock(return_value=mock_secret) credstash_secret = { "metadata": { "namespace": "test", "name": "boom" }, "spec": [{ "from": "ba", "name": "lala", "version": "0001" }], } assert cont.delete_secret(credstash_secret, resource_version=1) is None credstash_get_secret_mock.assert_not_called() cont.v1core.patch_namespaced_secret.assert_not_called() cont.v1core.delete_namespaced_secret.assert_not_called()
def create_secret_if_not_exists(namespace: str, secret_name: str) -> None: """create the secret in the namespace if it doesn't exist""" load_opta_kube_config() v1 = CoreV1Api() secrets: V1SecretList = v1.list_namespaced_secret( namespace, field_selector=f"metadata.name={secret_name}") if len(secrets.items) == 0: v1.create_namespaced_secret( namespace, body=V1Secret(metadata=V1ObjectMeta(name=secret_name)))
def _get_secret_manifest(self, data): """creates a secret in k8s that will contain the token of the user""" meta = V1ObjectMeta( name=self.token_secret_name, labels=self._build_common_labels({}), annotations=self._build_common_annotations({}), ) secret = V1Secret(metadata=meta, type="Opaque", data=data) return secret
def test_updateSecret(self, client_mock): service = KubernetesService() client_mock.reset_mock() client_mock.CoreV1Api.return_value.read_namespaced_secret.return_value = V1Secret( kind="unit") secret_data = {"username": "******", "password": "******"} expected_body = V1Secret(kind="unit", string_data=secret_data) expected_calls = [ call.CoreV1Api().read_namespaced_secret(self.name, self.namespace), call.CoreV1Api().patch_namespaced_secret(self.name, self.namespace, expected_body), ] result = service.updateSecret(self.name, self.namespace, secret_data) self.assertEqual(expected_calls, client_mock.mock_calls) self.assertEqual( client_mock.CoreV1Api.return_value.patch_namespaced_secret. return_value, result)
def setUp(self): self.cluster_dict = getExampleClusterDefinition() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.kubernetes_service = MagicMock() self.checker = BackupChecker(self.kubernetes_service) self.dummy_credentials = b64encode( json.dumps({ "user": "******" }).encode()) self.kubernetes_service.getSecret.return_value = V1Secret( data={"json": self.dummy_credentials})
def __init__(self, api_client, name, secret_type, variables=[], namespace='default'): self.api = client.CoreV1Api(api_client) self.name = name self.namespace = namespace self._secret_type = secret_type self._variables = variables context = {'name': name, 'type': secret_type, 'variables': variables} config = yaml.safe_load( self.generate_template('secret.yaml.j2', context)) V1Secret.__init__(self, api_version=config['apiVersion'], kind=config['kind'], metadata=config['metadata'], data=config['data'], type=config['type'])
async def test_ignore(setup: SetupTest, mock_kubernetes: MockCoreV1Api) -> None: assert setup.config.kubernetes kubernetes_service = setup.factory.create_kubernetes_service() # Create a secret without the expected label. secret_one = V1Secret( api_version="v1", data={"foo": "bar"}, metadata=V1ObjectMeta(name="secret-one", namespace="mobu"), type="Opaque", ) mock_kubernetes.create_namespaced_secret("mobu", secret_one) # Create a secret with the expected label but a different value. secret_two = V1Secret( api_version="v1", data={"token": token_as_base64(Token())}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: "other"}, name="secret-two", namespace="elsewhere", ), type="Opaque", ) mock_kubernetes.create_namespaced_secret("elsewhere", secret_two) # Update the secrets. Both of our secrets should survive unmolested. await kubernetes_service.update_service_secrets() objects = mock_kubernetes.get_all_objects_for_test() assert secret_one in objects assert secret_two in objects # Delete our secrets and then check that the created secrets are right. mock_kubernetes.delete_namespaced_secret("secret-one", "mobu") mock_kubernetes.delete_namespaced_secret("secret-two", "elsewhere") await assert_kubernetes_secrets_match_config(setup, mock_kubernetes)
def create_secret(v1: CoreV1Api, name: str, data: Dict[str, str], typ: str = 'from-literal'): if name in list_secret_names(v1): logger.info(f"Secret {name} already exists. Skipping.") return logger.info(f"Creating {name}...") metadata = V1ObjectMeta(name=name, namespace='default') secret = V1Secret(api_version='v1', kind='Secret', metadata=metadata, type=typ, data=data) return v1.create_namespaced_secret(namespace='default', body=secret)
def get_reference_object(self) -> V1Secret: """Get deployment object for outpost""" meta = self.get_object_meta(name=self.name) return V1Secret( metadata=meta, data={ "authentik_host": b64string(self.controller.outpost.config.authentik_host), "authentik_host_insecure": b64string( str(self.controller.outpost.config.authentik_host_insecure) ), "token": b64string(self.controller.outpost.token.key), }, )
def test_createSecret(self, client_mock): service = KubernetesService() client_mock.reset_mock() secret_data = {"username": "******", "password": "******"} expected_body = V1Secret(metadata=self._createMeta("secret-name"), string_data=secret_data) result = service.createSecret("secret-name", self.namespace, secret_data) self.assertEqual([ call.CoreV1Api().create_namespaced_secret(self.namespace, expected_body) ], client_mock.mock_calls) self.assertEqual( client_mock.CoreV1Api.return_value.create_namespaced_secret. return_value, result)
def setUp(self): self.cluster_dict = getExampleClusterDefinitionWithRestore() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.kubernetes_service = MagicMock() self.restore_helper = RestoreHelper(self.kubernetes_service) self.dummy_credentials = b64encode( json.dumps({ "user": "******" }).encode()) self.kubernetes_service.getSecret.return_value = V1Secret( data={"json": self.dummy_credentials}) self.expected_cluster_members = [ "mongo-cluster-0.mongo-cluster.mongo-operator-cluster.svc.cluster.local", "mongo-cluster-1.mongo-cluster.mongo-operator-cluster.svc.cluster.local", "mongo-cluster-2.mongo-cluster.mongo-operator-cluster.svc.cluster.local" ]
def test_createSecret_exists(self, client_mock): service = KubernetesService() client_mock.reset_mock() client_mock.CoreV1Api.return_value.create_namespaced_secret.side_effect = ApiException( status=409) client_mock.CoreV1Api.return_value.create_namespaced_secret.side_effect.body = "{}" secret_data = {"username": "******", "password": "******"} result = service.createSecret(self.name, self.namespace, secret_data) expected_body = V1Secret(metadata=self._createMeta(self.name), string_data=secret_data) expected_calls = [ call.CoreV1Api().create_namespaced_secret(self.namespace, expected_body) ] self.assertEqual(expected_calls, client_mock.mock_calls) self.assertIsNone(result)
def test_configures_logging_format_and_logs_errors_to_datadog( self, mock_log_config, mock_dd_init, mock_log_errors, mock_k8s_api_class, mock_k8s_config, mock_gethostname): mock_k8s_api = mock_k8s_api_class.return_value mock_gethostname.return_value = "podname.domain" mock_k8s_api.read_namespaced_secret.return_value = V1Secret( data={ "environment": base64.b64encode("dev".encode("utf8")), "datadog-api-key": base64.b64encode("dd-api-key".encode( "utf8")), }) mock_k8s_api.read_namespaced_pod.return_value = V1Pod( metadata=V1ObjectMeta(labels={"role": "my-role"})) with mock.patch("ustack_logging.logging_configuration.open", mock.mock_open(read_data="my-app"), create=True) as mock_open: configure_logging() mock_log_config.assert_called_once_with( format="%(asctime)s %(levelname)s:%(module)s:%(message)s", datefmt="%Y-%m-%d %H:%M:%S%z", level=logging.INFO) mock_k8s_config.assert_called_once_with() mock_k8s_api.read_namespaced_secret.assert_called_once_with( "environment-info", "ustudio-system") mock_dd_init.assert_called_once_with(api_key="dd-api-key") mock_open.assert_called_once_with( "/var/run/secrets/kubernetes.io/serviceaccount/namespace") mock_gethostname.assert_called_once_with() mock_k8s_api.read_namespaced_pod.assert_called_once_with( "podname", "my-app") mock_log_errors.assert_called_once_with( tags=["environment:dev", "service:my-app", "role:my-role"])
def to_kubesecret(self, client: CoreV1Api) -> Generator[Tuple[str, V1Secret], None, None]: secret = V1Secret( api_version="v1", data=self.data, kind="Secret", type=self.k8s_type, metadata=V1ObjectMeta(annotations=self.annotations, name=self.k8s_secret_name), ) ns_list = self.k8s_namespaces all_namespaces = [ns.metadata.name for ns in client.list_namespace().items] # Deal with "all namespaces" if "*" in ns_list: ns_list = all_namespaces # TODO handle namespace globs for ns in ns_list: yield ns, secret
def write_secret(self, secret_name, data, key_name=None, notebook_namespace=None): secret = V1Secret() secret.metadata = V1ObjectMeta(name=secret_name, labels={'app': 'jupyterhub'}) if notebook_namespace is None: notebook_namespace = self.namespace if key_name: secret.string_data = {key_name: yaml.dump(data, default_flow_style=False)} #stringData instead of data here to make kubernetes parse this as a string without base64 encoding else: secret.string_data = data try: api_response = self.api_client.replace_namespaced_secret(secret_name, notebook_namespace, secret) except ApiException as e: if e.status == 404: try: api_response = self.api_client.create_namespaced_secret(notebook_namespace, secret) except ApiException as e: _LOGGER.error("Exception when calling CoreV1Api->create_namespaced_secret: %s\n" % e) else: raise
async def assert_kubernetes_secrets_match_config( setup: SetupTest, mock_kubernetes: MockCoreV1Api, is_fresh: bool = True) -> None: assert setup.config.kubernetes token_service = setup.factory.create_token_service() expected = [ V1Secret( api_version="v1", data={"token": ANY}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: "service"}, name=s.secret_name, namespace=s.secret_namespace, ), type="Opaque", ) for s in setup.config.kubernetes.service_secrets ] assert_kubernetes_objects_are(mock_kubernetes, expected) for service_secret in setup.config.kubernetes.service_secrets: secret = mock_kubernetes.read_namespaced_secret( service_secret.secret_name, service_secret.secret_namespace) data = await token_data_from_secret(token_service, secret) assert data == TokenData( token=data.token, username=service_secret.service, token_type=TokenType.service, scopes=service_secret.scopes, created=data.created, expires=None, name=None, uid=None, groups=None, ) if is_fresh: now = current_datetime() assert now - timedelta(seconds=5) <= data.created <= now
def _create_token_for_sa( core_api: CoreV1Api, service_account: V1ServiceAccount, ) -> str: service_account_name = service_account.metadata.name service_account_namespace = service_account.metadata.namespace token = core_api.create_namespaced_secret( namespace=service_account_namespace, body=V1Secret( api_version='v1', kind='Secret', metadata=V1ObjectMeta( generate_name=f'{service_account_name}-token-', annotations={ 'kubernetes.io/service-account.name': service_account_name }, ), type='kubernetes.io/service-account-token', ), ) # not all required values are set on the returned object yet. Return only name so that we can # fetch it later (name will be generated by the kube-apiserver) return token.metadata.name
async def test_create_not_ours(setup: SetupTest, mock_kubernetes: MockCoreV1Api, caplog: LogCaptureFixture) -> None: assert setup.config.kubernetes assert len(setup.config.kubernetes.service_secrets) >= 1 service_secret = setup.config.kubernetes.service_secrets[-1] kubernetes_service = setup.factory.create_kubernetes_service() # Create a secret that should exist but doesn't have our annotation. secret = V1Secret( api_version="v1", data={"token": token_as_base64(Token())}, metadata=V1ObjectMeta( name=service_secret.secret_name, namespace=service_secret.secret_namespace, ), type="Opaque", ) mock_kubernetes.create_namespaced_secret(service_secret.secret_namespace, secret) # Now run the synchronization. secret_one and secret_two should be left # unchanged, and we should log errors about failing to do the update. await kubernetes_service.update_service_secrets() objects = mock_kubernetes.get_all_objects_for_test() assert secret in objects assert json.loads(caplog.record_tuples[-1][2]) == { "event": (f"Creating {service_secret.secret_namespace}" f"/{service_secret.secret_name} failed"), "error": (f"Kubernetes API error: (500)\n" f"Reason: {service_secret.secret_namespace}" f"/{service_secret.secret_name} exists\n"), "level": "error", "logger": "gafaelfawr", }
def perform_cloud_ops(): # set GOOGLE_APPLICATION_CREDENTIALS env to credentials file # set GOOGLE_CLOUD_PROJECT env to project id domain = os.getenv('DOMAIN') assert domain logger.info(f'using domain: {domain}') static_ip = os.getenv('STATIC_IP') assert static_ip logger.info(f'using static IP: {static_ip}') admin_email = os.getenv('ADMIN_EMAIL') assert admin_email logger.info(f'using ACME admin email: {admin_email}') oauth_client_id = os.getenv('OAUTH_CLIENT_ID') assert oauth_client_id logger.info(f'using oauth client id: {oauth_client_id}') oauth_client_secret = os.getenv('OAUTH_CLIENT_SECRET') assert oauth_client_secret logger.info(f'using oauth client secret: {oauth_client_secret}') oauth_secret = os.getenv('OAUTH_SECRET') assert oauth_secret logger.info(f'using oauth secret: {oauth_secret}') oauth_domain = os.getenv('OAUTH_DOMAIN') assert oauth_domain logger.info(f'using domain: {oauth_domain}') django_secret_key = os.getenv('DJANGO_SECRET_KEY') assert django_secret_key logger.info(f'using DJANGO_SECRET_KEY: {django_secret_key}') credentials, project = google.auth.default() gcloud_client = container_v1.ClusterManagerClient(credentials=credentials) scan_clusters(gcloud_client, project) # FIXME add the k8s cert to a trust store urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) auth_gcloud_k8s(credentials) api_core_v1 = client.CoreV1Api() api_apps_v1 = client.AppsV1Api() api_storage_v1 = client.StorageV1Api() api_custom = client.CustomObjectsApi() api_extensions_v1_beta1 = client.ExtensionsV1beta1Api() api_ext_v1_beta1 = client.ApiextensionsV1beta1Api() api_rbac_auth_v1_b1 = client.RbacAuthorizationV1beta1Api() ensure_traefik(api_core_v1, api_ext_v1_beta1, api_apps_v1, api_custom, api_rbac_auth_v1_b1, admin_email, domain, static_ip, oauth_client_id, oauth_client_secret, oauth_domain, oauth_secret) with open(os.getenv('GOOGLE_APPLICATION_CREDENTIALS'), 'rb') as f: gcloud_credentials_b64 = b64encode(f.read()).decode('UTF-8') ensure_secret(api=api_core_v1, name='webui-credentials', namespace='default', secret=V1Secret( metadata=client.V1ObjectMeta(name='webui-credentials'), data={'gcloud-credentials': gcloud_credentials_b64})) webui_volume_paths = [ ('data', '/opt/nipyapi/data', '20Gi', 'standard'), ] webui_volume_mounts = [ V1VolumeMount(name=path[0], mount_path=path[1]) for path in webui_volume_paths ] webui_volume_mounts.append( V1VolumeMount(name='webui-credentials', mount_path='/root/webui', read_only=True)) dind_volume_paths = [ ('docker', '/var/lib/docker', '200Gi', 'standard'), ] dind_volume_mounts = [ V1VolumeMount(name=path[0], mount_path=path[1]) for path in dind_volume_paths ] shared_volume_mounts = [ V1VolumeMount(name='dind-socket', mount_path='/var/run-shared') ] ensure_statefulset_with_containers( api_apps_v1=api_apps_v1, name='admin', namespace='default', replicas=1, containers=[ V1Container( name='webui', image='aichrist/nipyapi-ds:latest', env=[ # FIXME use k8s secrets for these values V1EnvVar(name='DOMAIN', value=domain), V1EnvVar(name='STATIC_IP', value=static_ip), V1EnvVar(name='ADMIN_EMAIL', value=admin_email), V1EnvVar(name='OAUTH_CLIENT_ID', value=oauth_client_id), V1EnvVar(name='OAUTH_CLIENT_SECRET', value=oauth_client_secret), V1EnvVar(name='OAUTH_SECRET', value=oauth_secret), V1EnvVar(name='OAUTH_DOMAIN', value=oauth_domain), V1EnvVar(name='DJANGO_SECRET_KEY', value=django_secret_key), V1EnvVar(name='GOOGLE_APPLICATION_CREDENTIALS', value='/root/webui/gcloud_credentials.json'), V1EnvVar(name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE', value='/root/webui/gcloud_credentials.json'), V1EnvVar(name='GOOGLE_CLOUD_PROJECT', value=os.getenv('GOOGLE_CLOUD_PROJECT')), V1EnvVar(name='DOCKER_HOST', value='unix:///var/run-shared/docker.sock'), ], ports=[V1ContainerPort(container_port=8000)], volume_mounts=webui_volume_mounts + shared_volume_mounts), V1Container( name='dind', image='docker:19-dind', security_context=V1SecurityContext(privileged=True), command=[ 'dockerd', '-H', 'unix:///var/run-shared/docker.sock' ], volume_mounts=dind_volume_mounts + shared_volume_mounts) ], volumes=[ V1Volume(name='dind-socket', empty_dir={}), V1Volume(name='webui-credentials', projected=V1ProjectedVolumeSource(sources=[ V1VolumeProjection(secret=V1SecretProjection( name='webui-credentials', items=[ V1KeyToPath(key='gcloud-credentials', path='gcloud_credentials.json') ])) ])) ], volume_paths=webui_volume_paths + dind_volume_paths) ensure_ingress_routed_svc(api_core_v1=api_core_v1, api_custom=api_custom, domain=domain, hostname='admin', name='admin', target_name='admin', namespace='default', port_name='web', svc_port=80, target_port=8000) reg_volume_paths = [ ('database', '/opt/nifi-registry/nifi-registry-current/database', '10Gi', 'standard'), ('flow-storage', '/opt/nifi-registry/nifi-registry-current/flow_storage', '20Gi', 'standard'), ] reg_volume_mounts = [ V1VolumeMount(name=path[0], mount_path=path[1]) for path in reg_volume_paths ] ensure_statefulset_with_containers( api_apps_v1=api_apps_v1, name='registry', namespace='default', replicas=1, containers=[ V1Container(name='registry', image='apache/nifi-registry:latest', env=[ V1EnvVar(name='NIFI_REGISTRY_WEB_HTTP_PORT', value='19090'), ], ports=[V1ContainerPort(container_port=19090)], volume_mounts=reg_volume_mounts), ], init_containers=[ V1Container( name='init-permissions', image='busybox', command=[ 'sh', '-c', 'chown -R 1000:1000 /opt/nifi-registry/nifi-registry-current' ], volume_mounts=[ V1VolumeMount(name=path[0], mount_path=path[1]) for path in reg_volume_paths ]) ], volumes=[], volume_paths=reg_volume_paths) ensure_ingress_routed_svc(api_core_v1=api_core_v1, api_custom=api_custom, domain=domain, hostname='registry', name='registry', target_name='registry', namespace='default', port_name='web', svc_port=80, target_port=19090) perform_nifi_ops(api_apps_v1, api_core_v1, api_custom, domain) perform_build_ops_bg() perform_mirror_ops_bg()
def launch(self, name, docker_config: DockerConfig, mounts, env, blocking: bool = True): name = (self.prefix + 'update-' + name.lower()).replace('_', '-') # If we have been given a username or password for the registry, we have to # update it, if we haven't been, make sure its been cleaned up in the system # so we don't leave passwords lying around pull_secret_name = f'{name}-job-pull-secret' use_pull_secret = False try: # Check if there is already a username/password defined for this job current_pull_secret = self.api.read_namespaced_secret(pull_secret_name, self.namespace, _request_timeout=API_TIMEOUT) except ApiException as error: if error.status != 404: raise current_pull_secret = None if docker_config.registry_username or docker_config.registry_password: use_pull_secret = True # Build the secret we want to make new_pull_secret = V1Secret( metadata=V1ObjectMeta(name=pull_secret_name, namespace=self.namespace), type='kubernetes.io/dockerconfigjson', string_data={ '.dockerconfigjson': create_docker_auth_config( image=docker_config.image, username=docker_config.registry_username, password=docker_config.registry_password, ) } ) # Send it to the server if current_pull_secret: self.api.replace_namespaced_secret(pull_secret_name, namespace=self.namespace, body=new_pull_secret, _request_timeout=API_TIMEOUT) else: self.api.create_namespaced_secret(namespace=self.namespace, body=new_pull_secret, _request_timeout=API_TIMEOUT) elif current_pull_secret: # If there is a password set in kubernetes, but not in our configuration clear it out self.api.delete_namespaced_secret(pull_secret_name, self.namespace, _request_timeout=API_TIMEOUT) try: self.batch_api.delete_namespaced_job(name=name, namespace=self.namespace, propagation_policy='Background', _request_timeout=API_TIMEOUT) while True: self.batch_api.read_namespaced_job(namespace=self.namespace, name=name, _request_timeout=API_TIMEOUT) time.sleep(1) except ApiException: pass volumes = [] volume_mounts = [] for index, mnt in enumerate(mounts): volumes.append(V1Volume( name=f'mount-{index}', persistent_volume_claim=V1PersistentVolumeClaimVolumeSource( claim_name=mnt['volume'], read_only=False ), )) volume_mounts.append(V1VolumeMount( name=f'mount-{index}', mount_path=mnt['dest_path'], sub_path=mnt['source_path'], read_only=False, )) if CONFIGURATION_CONFIGMAP: volumes.append(V1Volume( name='mount-configuration', config_map=V1ConfigMapVolumeSource( name=CONFIGURATION_CONFIGMAP ), )) volume_mounts.append(V1VolumeMount( name='mount-configuration', mount_path='/etc/assemblyline/config.yml', sub_path="config", read_only=True, )) section = 'service' labels = { 'app': 'assemblyline', 'section': section, 'privilege': 'core', 'component': 'update-script', } labels.update(self.extra_labels) metadata = V1ObjectMeta( name=name, labels=labels ) environment_variables = [V1EnvVar(name=_e.name, value=_e.value) for _e in docker_config.environment] environment_variables.extend([V1EnvVar(name=k, value=v) for k, v in env.items()]) environment_variables.extend([V1EnvVar(name=k, value=os.environ[k]) for k in INHERITED_VARIABLES if k in os.environ]) environment_variables.append(V1EnvVar(name="LOG_LEVEL", value=self.log_level)) cores = docker_config.cpu_cores memory = docker_config.ram_mb memory_min = min(docker_config.ram_mb_min, memory) container = V1Container( name=name, image=docker_config.image, command=docker_config.command, env=environment_variables, image_pull_policy='Always', volume_mounts=volume_mounts, resources=V1ResourceRequirements( limits={'cpu': cores, 'memory': f'{memory}Mi'}, requests={'cpu': cores / 4, 'memory': f'{memory_min}Mi'}, ) ) pod = V1PodSpec( volumes=volumes, restart_policy='Never', containers=[container], priority_class_name=self.priority_class, ) if use_pull_secret: pod.image_pull_secrets = [V1LocalObjectReference(name=pull_secret_name)] job = V1Job( metadata=metadata, spec=V1JobSpec( backoff_limit=1, completions=1, template=V1PodTemplateSpec( metadata=metadata, spec=pod ) ) ) status = self.batch_api.create_namespaced_job(namespace=self.namespace, body=job, _request_timeout=API_TIMEOUT).status if blocking: try: while not (status.failed or status.succeeded): time.sleep(3) status = self.batch_api.read_namespaced_job(namespace=self.namespace, name=name, _request_timeout=API_TIMEOUT).status self.batch_api.delete_namespaced_job(name=name, namespace=self.namespace, propagation_policy='Background', _request_timeout=API_TIMEOUT) except ApiException as error: if error.status != 404: raise
def _create_deployment(self, service_name: str, deployment_name: str, docker_config: DockerConfig, shutdown_seconds: int, scale: int, labels: dict[str, str] = None, volumes: list[V1Volume] = None, mounts: list[V1VolumeMount] = None, core_mounts: bool = False, change_key: str = ''): # Build a cache key to check for changes, just trying to only patch what changed # will still potentially result in a lot of restarts due to different kubernetes # systems returning differently formatted data lbls = sorted((labels or {}).items()) svc_env = sorted(self._service_limited_env[service_name].items()) change_key = (f"n={deployment_name}{change_key}dc={docker_config}ss={shutdown_seconds}" f"l={lbls}v={volumes}m={mounts}cm={core_mounts}senv={svc_env}") # Check if a deployment already exists, and if it does check if it has the same change key set replace = None try: replace = self.apps_api.read_namespaced_deployment( deployment_name, namespace=self.namespace, _request_timeout=API_TIMEOUT) if replace.metadata.annotations.get(CHANGE_KEY_NAME) == change_key: if replace.spec.replicas != scale: self.set_target(service_name, scale) return except ApiException as error: if error.status != 404: raise # If we have been given a username or password for the registry, we have to # update it, if we haven't been, make sure its been cleaned up in the system # so we don't leave passwords lying around pull_secret_name = f'{deployment_name}-container-pull-secret' use_pull_secret = False try: current_pull_secret = self.api.read_namespaced_secret(pull_secret_name, self.namespace, _request_timeout=API_TIMEOUT) except ApiException as error: if error.status != 404: raise current_pull_secret = None if docker_config.registry_username or docker_config.registry_password: use_pull_secret = True # Build the secret we want to make new_pull_secret = V1Secret( metadata=V1ObjectMeta(name=pull_secret_name, namespace=self.namespace), type='kubernetes.io/dockerconfigjson', string_data={ '.dockerconfigjson': create_docker_auth_config( image=docker_config.image, username=docker_config.registry_username, password=docker_config.registry_password, ) } ) # Send it to the server if current_pull_secret: self.api.patch_namespaced_secret(pull_secret_name, namespace=self.namespace, body=new_pull_secret, _request_timeout=API_TIMEOUT) else: self.api.create_namespaced_secret(namespace=self.namespace, body=new_pull_secret, _request_timeout=API_TIMEOUT) elif current_pull_secret: self.api.delete_namespaced_secret(pull_secret_name, self.namespace, _request_timeout=API_TIMEOUT) all_labels = dict(self._labels) all_labels['component'] = service_name if core_mounts: all_labels['privilege'] = 'core' all_labels.update(labels or {}) # Build set of volumes, first the global mounts, then the core specific ones, # then the ones specific to this container only all_volumes: list[V1Volume] = [] all_mounts: list[V1VolumeMount] = [] all_volumes.extend(self.config_volumes.values()) all_mounts.extend(self.config_mounts.values()) if core_mounts: all_volumes.extend(self.core_config_volumes.values()) all_mounts.extend(self.core_config_mounts.values()) all_volumes.extend(volumes or []) all_mounts.extend(mounts or []) # Build metadata metadata = V1ObjectMeta(name=deployment_name, labels=all_labels, annotations={CHANGE_KEY_NAME: change_key}) pod = V1PodSpec( volumes=all_volumes, containers=self._create_containers(service_name, deployment_name, docker_config, all_mounts, core_container=core_mounts), priority_class_name=self.priority, termination_grace_period_seconds=shutdown_seconds, security_context=V1PodSecurityContext(fs_group=1000) ) if use_pull_secret: pod.image_pull_secrets = [V1LocalObjectReference(name=pull_secret_name)] template = V1PodTemplateSpec( metadata=metadata, spec=pod, ) spec = V1DeploymentSpec( replicas=int(scale), revision_history_limit=0, selector=V1LabelSelector(match_labels=all_labels), template=template, ) deployment = V1Deployment( kind="Deployment", metadata=metadata, spec=spec, ) if replace: self.logger.info("Requesting kubernetes replace deployment info for: " + metadata.name) try: self.apps_api.replace_namespaced_deployment(namespace=self.namespace, body=deployment, name=metadata.name, _request_timeout=API_TIMEOUT) return except ApiException as error: if error.status == 422: # Replacement of an immutable field (ie. labels); Delete and re-create self.stop_containers(labels=dict(component=service_name)) else: self.logger.info("Requesting kubernetes create deployment info for: " + metadata.name) self.apps_api.create_namespaced_deployment(namespace=self.namespace, body=deployment, _request_timeout=API_TIMEOUT)
def test_update_secret_valid_key_existing_not_managed( credstash_get_secret_mock): cont = CredStashController("none", "none", "none", "none", "none") cont.v1core = MagicMock() metadata = V1ObjectMeta( name="bobo", namespace="default", annotations={"credstash-fully-managed": "false"}, ) mock_secret = V1Secret("v1", {"secret": "poo"}, "Secret", metadata) cont.v1core.read_namespaced_secret = MagicMock(return_value=mock_secret) credstash_secret = { "metadata": { "namespace": "test", "name": "boom" }, "spec": [{ "from": "ba", "name": "lala", "version": "0001" }], } cont.update_secret(credstash_secret, resource_version=1) assert ( cont.v1core.patch_namespaced_secret.call_args_list[0][0][0] == "boom") assert cont.v1core.patch_namespaced_secret.call_args_list[0][0][2].to_dict( ) == { "api_version": "v1", "data": { "lala": "MTIz", "secret": "poo" }, "kind": "Secret", "metadata": { "annotations": { "credstash-fully-managed": "false", "credstash-resourceversion": "1", }, "cluster_name": None, "creation_timestamp": None, "deletion_grace_period_seconds": None, "deletion_timestamp": None, "finalizers": None, "generate_name": None, "generation": None, "initializers": None, "labels": None, "name": "bobo", "namespace": "default", "owner_references": None, "resource_version": None, "self_link": None, "uid": None, }, "string_data": None, "type": None, } credstash_get_secret_mock.assert_called_once_with( aws_access_key_id="none", aws_secret_access_key="none", name="ba", region="none", table="none", version="0001", )
def _save_admin_token(self, admin_secret: V1Secret, token: str, user_namespace: str) -> V1Secret: admin_secret.data['token'] = token return update_secret(namespace=user_namespace, secret=admin_secret)