def get_deis_instance_credentials(instance_id, is_datastore=False, is_datastore_readonly=False, required=True, with_db_prefix=False): none = (None, None, None, None) if with_db_prefix else (None, None, None) instance_kind = ckan_manager.instance_kind() instance = kubectl.get(f'{instance_kind} {instance_id}', required=required) if not instance: return none secret = kubectl.get(f'secret {instance_id}-annotations', namespace=instance_id, required=required) if not secret: return none secret = kubectl.decode_secret(secret) if is_datastore or is_datastore_readonly: db_name = user = instance['spec'].get('datastore', {}).get('name') if is_datastore_readonly: user = secret.get('datastoreReadonlyUser') password = secret.get('datatastoreReadonlyPassword') else: password = secret.get('datastorePassword') else: db_name = user = instance['spec'].get('db', {}).get('name') password = secret.get('databasePassword') res = [user, password, db_name] if all(res): if with_db_prefix: res.append( get_deis_instance_db_prefix_from_instance( instance, is_datastore or is_datastore_readonly)) return res else: assert not required, 'missing some db values' return none
def get(self): exitcode, output = subprocess.getstatusoutput( f'kubectl -n {self.instance.id} get secret/{self.instance.id}-registry -o yaml' ) if exitcode == 0: return {'ready': len(kubectl.decode_secret(yaml.load(output))) > 0} else: return {'ready': False, 'error': output}
def get(self, full=False): exitcode, output = subprocess.getstatusoutput(f'kubectl -n {self.instance.id} get secret/ckan-envvars -o yaml') if exitcode == 0: secret = kubectl.decode_secret(yaml.load(output)) res = {'ready': 'CKAN_SITE_URL' in secret} if full: res['envvars'] = secret else: res = {'ready': False, 'error': output} return res
def delete_key(key, secret_name=None, namespace=None): kubectl.apply({ 'apiVersion': 'v1', 'kind': 'Secret', 'metadata': { 'name': secret_name, 'namespace': namespace }, 'type': 'Opaque', 'data': { k: base64.b64encode(v.encode()).decode() for k, v in kubectl.decode_secret(kubectl.get('secret', secret_name, namespace=namespace)).items() if k != key and v } })
def get_instance_env(old_site_id, path_to_old_cluster_kubeconfig): output = subprocess.check_output( f'KUBECONFIG={path_to_old_cluster_kubeconfig} ' f'kubectl -n {old_site_id} get deployment {old_site_id}-cmd -o yaml', shell=True) deployment = yaml.load(output) containers = deployment['spec']['template']['spec']['containers'] assert len( containers) == 1, f'invalid number of containers {len(containers)}' container = containers[0] fetch_secret_name = None envvar_secrets = {} extra_values = {} for e in container['env']: name = e.pop('name') if 'valueFrom' in e: value_from = e.pop('valueFrom') assert len(e) == 0 secret_key_ref = value_from.pop('secretKeyRef') assert len(value_from) == 0 secret_key = secret_key_ref.pop('key') secret_name = secret_key_ref.pop('name') assert len(secret_key_ref) == 0 assert not fetch_secret_name or fetch_secret_name == secret_name fetch_secret_name = secret_name envvar_secrets[name] = secret_key else: value = e.pop('value') assert len(e) == 0 extra_values[name] = value output = subprocess.check_output( f'KUBECONFIG={path_to_old_cluster_kubeconfig} ' f'kubectl -n {old_site_id} get secret {fetch_secret_name} -o yaml', shell=True) secret = kubectl.decode_secret(yaml.load(output)) instance_env = {} for key, secret_key in envvar_secrets.items(): instance_env[key] = secret[secret_key] return dict(instance_env, **extra_values)
def get_kubeconfig(cluster_name, service_account_namespace, service_account_name, cluster_spec=None): service_account = kubectl.get(f'ServiceAccount {service_account_name}', namespace=service_account_namespace) secret_name = service_account['secrets'][0]['name'] secret = kubectl.decode_secret( kubectl.get(f'secret {secret_name}', namespace=service_account_namespace)) config = kubectl.get('config view', get_cmd='') assert len(config['clusters']) == 1 if not cluster_spec: cluster_spec = {"server": config['clusters'][0]['cluster']['server']} return { "apiVersion": "v1", "kind": "Config", "users": [{ "name": service_account_name, "user": { "token": secret['token'] } }], "clusters": [{ "name": cluster_name, "cluster": cluster_spec }], "contexts": [{ "name": cluster_name, "context": { "cluster": cluster_name, "user": service_account_name } }], "current-context": cluster_name }
def _update(self): spec = self.instance.spec db_name = spec.db['name'] db_password = self.instance.annotations.get_secret('databasePassword') datastore_name = spec.datastore['name'] datastore_password = self.instance.annotations.get_secret('datastorePassword') datastore_ro_user = self.instance.annotations.get_secret('datastoreReadonlyUser') datastore_ro_password = self.instance.annotations.get_secret('datatastoreReadonlyPassword') db_no_db_proxy = spec.db.get('no-db-proxy') == 'yes' datastore_no_db_proxy = spec.datastore.get('no-db-proxy') == 'yes' if db_no_db_proxy or datastore_no_db_proxy: assert db_no_db_proxy and datastore_no_db_proxy, 'must set both DB and datastore with no-db-proxy' no_db_proxy = True else: no_db_proxy = False from ckan_cloud_operator.providers.solr import manager as solr_manager solr_http_endpoint = solr_manager.get_internal_http_endpoint() solr_collection_name = spec.solrCloudCollection['name'] if 'fromSecret' in spec.envvars: envvars = kubectl.get(f'secret {spec.envvars["fromSecret"]}') envvars = yaml.load(kubectl.decode_secret(envvars, 'envvars.yaml')) elif 'fromGitlab' in spec.envvars: envvars = CkanGitlab().get_envvars(spec.envvars['fromGitlab']) else: raise Exception(f'invalid envvars spec: {spec.envvars}') assert envvars['CKAN_SITE_ID'] and envvars['CKAN_SITE_URL'] and envvars['CKAN_SQLALCHEMY_URL'] from ckan_cloud_operator.providers.storage import manager as storage_manager storage_hostname, storage_access_key, storage_secret_key = storage_manager.get_provider().get_credentials() storage_path_parts = spec.storage['path'].strip('/').split('/') storage_bucket = storage_path_parts[0] storage_path = '/'.join(storage_path_parts[1:]) if no_db_proxy: postgres_host, postgres_port = db_manager.get_internal_unproxied_db_host_port() logs.info(f'Bypassing db proxy, connecting to DB directly: {postgres_host}:{postgres_port}') else: postgres_host, postgres_port = db_manager.get_internal_proxy_host_port() logs.info(f'Connecting to DB proxy: {postgres_host}:{postgres_port}') envvars.update( CKAN_SQLALCHEMY_URL=f"postgresql://{db_name}:{db_password}@{postgres_host}:{postgres_port}/{db_name}", CKAN___BEAKER__SESSION__URL=f"postgresql://{db_name}:{db_password}@{postgres_host}:{postgres_port}/{db_name}", CKAN__DATASTORE__READ_URL=f"postgresql://{datastore_ro_user}:{datastore_ro_password}@{postgres_host}:{postgres_port}/{datastore_name}", CKAN__DATASTORE__WRITE_URL=f"postgresql://{datastore_name}:{datastore_password}@{postgres_host}:{postgres_port}/{datastore_name}", CKAN_SOLR_URL=f"{solr_http_endpoint}/{solr_collection_name}", CKANEXT__S3FILESTORE__AWS_STORAGE_PATH=storage_path, CKANEXT__S3FILESTORE__AWS_ACCESS_KEY_ID=storage_access_key, CKANEXT__S3FILESTORE__AWS_SECRET_ACCESS_KEY=storage_secret_key, CKANEXT__S3FILESTORE__AWS_BUCKET_NAME=storage_bucket, CKANEXT__S3FILESTORE__HOST_NAME=f'https://{storage_hostname}/', CKANEXT__S3FILESTORE__REGION_NAME='us-east-1', CKANEXT__S3FILESTORE__SIGNATURE_VERSION='s3v4', CKAN__DATAPUSHER__URL=datapusher.get_datapusher_url(envvars.get('CKAN__DATAPUSHER__URL')), ) # print(yaml.dump(envvars, default_flow_style=False)) self._apply_instance_envvars_overrides(envvars) envvars = { k: ('' if v is None else v) for k,v in envvars.items() } kubectl.update_secret('ckan-envvars', envvars, namespace=self.instance.id) self.site_url = envvars.get('CKAN_SITE_URL')
def get(cls): secret = kubectl.get('secret ckan-infra', required=False) if secret: return kubectl.decode_secret(secret) else: return {}
def __init__(self, required=True): if required: values = kubectl.decode_secret(kubectl.get('secret ckan-infra', required=required), required=required) else: values = {} # Database self.POSTGRES_HOST = values.get('POSTGRES_HOST') self.POSTGRES_USER = values.get('POSTGRES_USER') self.POSTGRES_PASSWORD = values.get('POSTGRES_PASSWORD') self.GCLOUD_SQL_INSTANCE_NAME = values.get('GCLOUD_SQL_INSTANCE_NAME') self.GCLOUD_SQL_PROJECT = values.get('GCLOUD_SQL_PROJECT') # Solr self.SOLR_HTTP_ENDPOINT = values.get('SOLR_HTTP_ENDPOINT') self.SOLR_HTTP_ENDPOINT_SIMPLE = values.get('SOLR_HTTP_ENDPOINT_SIMPLE') self.SOLR_USER = values.get('SOLR_USER') self.SOLR_PASSWORD = values.get('SOLR_PASSWORD') self.SOLR_NUM_SHARDS = values.get('SOLR_NUM_SHARDS') self.SOLR_REPLICATION_FACTOR = values.get('SOLR_REPLICATION_FACTOR') # Private Docker Registry self.DOCKER_REGISTRY_SERVER = values.get('DOCKER_REGISTRY_SERVER') self.DOCKER_REGISTRY_USERNAME = values.get('DOCKER_REGISTRY_USERNAME') self.DOCKER_REGISTRY_PASSWORD = values.get('DOCKER_REGISTRY_PASSWORD') self.DOCKER_REGISTRY_EMAIL = values.get('DOCKER_REGISTRY_EMAIL') # GitLab self.GITLAB_TOKEN_USER = values.get('GITLAB_TOKEN_USER') self.GITLAB_TOKEN_PASSWORD = values.get('GITLAB_TOKEN_PASSWORD') # Migration from old cluster self.DEIS_KUBECONFIG = values.get('DEIS_KUBECONFIG') self.GCLOUD_SQL_DEIS_IMPORT_BUCKET = values.get('GCLOUD_SQL_DEIS_IMPORT_BUCKET') # Gcloud credentials / general details self.GCLOUD_SERVICE_ACCOUNT_JSON = values.get('GCLOUD_SERVICE_ACCOUNT_JSON') self.GCLOUD_SERVICE_ACCOUNT_EMAIL = values.get('GCLOUD_SERVICE_ACCOUNT_EMAIL') self.GCLOUD_AUTH_PROJECT = values.get('GCLOUD_AUTH_PROJECT') self.GCLOUD_COMPUTE_ZONE = values.get('GCLOUD_COMPUTE_ZONE') self.GCLOUD_CLUSTER_NAME = values.get('GCLOUD_CLUSTER_NAME') # Gcloud Storage self.GCLOUD_STORAGE_BUCKET = values.get('GCLOUD_STORAGE_BUCKET') self.GCLOUD_STORAGE_ACCESS_KEY_ID = values.get('GCLOUD_STORAGE_ACCESS_KEY_ID') self.GCLOUD_STORAGE_SECRET_ACCESS_KEY = values.get('GCLOUD_STORAGE_SECRET_ACCESS_KEY') self.GCLOUD_STORAGE_HOST_NAME = values.get('GCLOUD_STORAGE_HOST_NAME') self.GCLOUD_STORAGE_REGION_NAME = values.get('GCLOUD_STORAGE_REGION_NAME') self.GCLOUD_STORAGE_SIGNATURE_VERSION = values.get('GCLOUD_STORAGE_SIGNATURE_VERSION') # Cluster Storage self.MULTI_USER_STORAGE_CLASS_NAME = values.get('MULTI_USER_STORAGE_CLASS_NAME', 'cca-ckan') # Routers / Load Balancing self.ROUTERS_ENV_ID = values.get('ROUTERS_ENV_ID') self.ROUTERS_DEFAULT_ROOT_DOMAIN = values.get('ROUTERS_DEFAULT_ROOT_DOMAIN') self.ROUTERS_DEFAULT_CLOUDFLARE_EMAIL = values.get('ROUTERS_DEFAULT_CLOUDFLARE_EMAIL') self.ROUTERS_DEFAULT_CLOUDFLARE_AUTH_KEY = values.get('ROUTERS_DEFAULT_CLOUDFLARE_AUTH_KEY') # Monitoring self.CKAN_STATUSCAKE_API_KEY = values.get('CKAN_STATUSCAKE_API_KEY') self.CKAN_STATUSCAKE_API_USER = values.get('CKAN_STATUSCAKE_API_USER') self.CKAN_STATUSCAKE_GROUP = values.get('CKAN_STATUSCAKE_GROUP')
def _fetch_secret(secret_name, namespace): secret = kubectl.get(f'secret {secret_name}', required=False, namespace=namespace) return kubectl.decode_secret(secret) if secret else None
def admin_credentials(self): data = kubectl.decode_secret(kubectl.get('secret', 'ckan-envvars', namespace=self.instance.id)) return { 'sysadmin-name': data['CKAN_SYSADMIN_NAME'], 'sysadmin-password': data['CKAN_SYSADMIN_PASSWORD'], }
def update(instance_id_or_name, override_spec=None, persist_overrides=False, wait_ready=False, skip_deployment=False, skip_route=False, force=False, dry_run=False): instance_id, instance_type, instance = _get_instance_id_and_type( instance_id_or_name, required=not dry_run) if dry_run: logs.info('update instance', instance_id=instance_id, instance_id_or_name=instance_id_or_name, override_spec=override_spec, persist_overrides=persist_overrides, wait_ready=wait_ready, skip_deployment=skip_deployment, skip_route=skip_route, force=force, dry_run=dry_run) else: pre_update_hook_data = deployment_manager.pre_update_hook( instance_id, instance_type, instance, override_spec, skip_route) bucket_credentials = instance['spec'].get('ckanStorageBucket', {}).get( get_storage_provider_id()) use_cloud_storage = bucket_credentials and config_manager.get( 'use-cloud-native-storage', secret_name=CONFIG_NAME) if use_cloud_storage: cluster_provider_id = cluster_manager.get_provider_id() if bucket_credentials: literal = [] config_manager.set(values=bucket_credentials, secret_name='bucket-credentials', namespace=instance_id) if instance['spec'].get('operatorCopySecrets'): for target_secret_name, source_secret_config in json.loads( instance['spec']['operatorCopySecrets']).items(): for k, v in source_secret_config.items(): source_secret_config[k] = v.replace( "__INSTANCE_NAME__", instance_id_or_name) kubectl.update_secret( target_secret_name, kubectl.decode_secret( kubectl.get('secret', source_secret_config["fromName"], namespace=source_secret_config.get( "fromNamespace", "ckan-cloud"))), namespace=instance_id) if persist_overrides: logs.info('Persisting overrides') kubectl.apply(instance) if not skip_deployment: deployment_manager.update(instance_id, instance_type, instance, force=force) if wait_ready: wait_instance_ready(instance_id_or_name) if not skip_route and pre_update_hook_data.get('sub-domain'): root_domain = pre_update_hook_data.get('root-domain') sub_domain = pre_update_hook_data['sub-domain'] assert root_domain == routers_manager.get_default_root_domain( ), 'invalid domain, must use default root domain' logs.info( f'adding instance default route to {sub_domain}.{root_domain}') routers_manager.create_subdomain_route( 'instances-default', { 'target-type': 'ckan-instance', 'ckan-instance-id': instance_id, 'root-domain': root_domain, 'sub-domain': sub_domain }) logs.info(f'updating routers_manager wait_ready: {wait_ready}') routers_manager.update('instances-default', wait_ready) else: logs.info('skipping route creation', skip_route=skip_route, sub_domain=pre_update_hook_data.get('sub-domain')) if not instance['spec'].get('skipCreateCkanAdmin', False): logs.info('creating ckan admin') ckan_admin_email = instance['spec'].get( 'ckanAdminEmail', pre_update_hook_data.get('ckan-admin-email')) ckan_admin_password = pre_update_hook_data.get( 'ckan-admin-password') ckan_admin_name = instance['spec'].get( 'ckanAdminName', pre_update_hook_data.get('ckan-admin-name', 'admin')) res = create_ckan_admin_user(instance_id, ckan_admin_name, ckan_admin_email, ckan_admin_password) logs.info(**res) logs.info('Instance is ready', instance_id=instance_id, instance_name=(instance_id_or_name if instance_id_or_name != instance_id else None))