def deploy(): """Deploys a proxy inside the cluster which allows to access the centralized solr without authentication""" labels = {'app': 'ckan-cloud-solrcloud-proxy'} solr_url = parse_url(solr_manager.get_internal_http_endpoint()) scheme = solr_url.scheme hostname = solr_url.hostname port = solr_url.port solr_user, solr_password = solr_url.auth.split(':') if not port: port = '443' if scheme == 'https' else '8983' kubectl.update_secret( 'solrcloud-proxy', { 'SOLR_URL': f'{scheme}://{hostname}:{port}', 'SOLR_USER': solr_user, 'SOLR_PASSWORD': solr_password }) kubectl.apply( kubectl.get_deployment( 'solrcloud-proxy', labels, { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': labels, 'annotations': { 'ckan-cloud/operator-timestamp': str(datetime.datetime.now()) } }, 'spec': { 'containers': [{ 'name': 'solrcloud-proxy', 'image': 'viderum/ckan-cloud-operator-solrcloud-proxy', 'envFrom': [{ 'secretRef': { 'name': 'solrcloud-proxy' } }], 'ports': [{ 'containerPort': 8983 }], }] } } })) service = kubectl.get_resource('v1', 'Service', 'solrcloud-proxy', labels) service['spec'] = { 'ports': [{ 'name': '8983', 'port': 8983 }], 'selector': labels } kubectl.apply(service)
def _get_deployment_spec(router_name, router_type, annotations, image=None): volume_spec = cluster_manager.get_or_create_multi_user_volume_claim( get_label_suffixes(router_name, router_type)) deployment_spec = { 'replicas': 1, 'revisionHistoryLimit': 5, 'template': { 'metadata': { 'labels': get_labels(router_name, router_type, for_deployment=True) }, 'spec': { 'containers': [{ 'name': 'traefik', 'image': image or 'traefik:1.6-alpine', 'ports': [{ 'containerPort': 80 }], 'volumeMounts': [{ 'name': 'etc-traefik', 'mountPath': '/etc-traefik' }, { 'name': 'traefik-acme', 'mountPath': '/traefik-acme', 'subPath': f'router-traefik-{router_name}' }], 'args': ['--configFile=/etc-traefik/traefik.toml'] }], 'volumes': [{ 'name': 'etc-traefik', 'configMap': { 'name': f'router-traefik-{router_name}' } }, dict(volume_spec, name='traefik-acme')] } } } if annotations.get_flag('letsencryptCloudflareEnabled'): container = deployment_spec['template']['spec']['containers'][0] container['ports'].append({'containerPort': 443}) cloudflare_email, cloudflare_api_key = get_cloudflare_credentials() secret_name = f'ckancloudrouter-{router_name}-cloudflare' kubectl.update_secret(secret_name, { 'CLOUDFLARE_EMAIL': cloudflare_email, 'CLOUDFLARE_API_KEY': cloudflare_api_key, }, labels=get_labels(router_name, router_type)) container['envFrom'] = [{'secretRef': {'name': secret_name}}] return deployment_spec
def _save_secret(values, secret_name, namespace, extra_operator_labels): return kubectl.update_secret( secret_name, values, namespace=namespace, labels=_get_labels(secret_name=secret_name, namespace=namespace, extra_operator_labels=extra_operator_labels) )
def deploy_ckan_infra_solr_proxy(): """Deploys a proxy inside the cluster which allows to access the centralized solr without authentication""" labels = {'app': 'ckan-cloud-solrcloud-proxy'} infra = cls() solr_url = urlparse(infra.SOLR_HTTP_ENDPOINT) scheme = solr_url.scheme hostname = solr_url.hostname port = solr_url.port if not port: port = '443' if scheme == 'https' else '8983' kubectl.update_secret('solrcloud-proxy', { 'SOLR_URL': f'{scheme}://{hostname}:{port}', 'SOLR_USER': infra.SOLR_USER, 'SOLR_PASSWORD': infra.SOLR_PASSWORD }) kubectl.apply(kubectl.get_deployment('solrcloud-proxy', labels, { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': {'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': labels, 'annotations': { 'ckan-cloud/operator-timestamp': str(datetime.datetime.now()) } }, 'spec': { 'containers': [ { 'name': 'solrcloud-proxy', 'image': 'orihoch/ckan-cloud-operator-solrcloud-proxy', 'envFrom': [{'secretRef': {'name': 'solrcloud-proxy'}}], 'ports': [{'containerPort': 8983}], } ] } } })) service = kubectl.get_resource('v1', 'Service', 'solrcloud-proxy', labels) service['spec'] = { 'ports': [ {'name': '8983', 'port': 8983} ], 'selector': labels } kubectl.apply(service)
def set(cls, set_type, *args): print(f'Setting {set_type} infra secrets') if set_type == 'key-value': key, value = args kubectl.update_secret('ckan-infra', {key: value}) elif set_type == 'gcloud': service_account_json, service_account_email, auth_project = args with open(service_account_json, 'rb') as f: kubectl.update_secret( 'ckan-infra', { 'GCLOUD_SERVICE_ACCOUNT_JSON': f.read().decode(), 'GCLOUD_SERVICE_ACCOUNT_EMAIL': service_account_email, 'GCLOUD_AUTH_PROJECT': auth_project }) elif set_type == 'docker-registry': server, username, password, email = args kubectl.update_secret( 'ckan-infra', { 'DOCKER_REGISTRY_SERVER': server, 'DOCKER_REGISTRY_USERNAME': username, 'DOCKER_REGISTRY_PASSWORD': password, 'DOCKER_REGISTRY_EMAIL': email }) else: raise NotImplementedError( f'Invalid infra set spec: {set_type}={args}')
def deploy_gcs_minio_proxy(router_name): """Deploys a minio proxy (AKA gateway) for access to google storage""" labels = {'app': 'ckan-cloud-gcsminio-proxy'} if not kubectl.get('secret gcsminio-proxy-credentials', required=False): print('Creating minio credentials') minio_access_key = binascii.hexlify(os.urandom(8)).decode() minio_secret_key = binascii.hexlify(os.urandom(12)).decode() kubectl.update_secret( 'gcsminio-proxy-credentials', { 'MINIO_ACCESS_KEY': minio_access_key, 'MINIO_SECRET_KEY': minio_secret_key, }) kubectl.apply( kubectl.get_deployment( 'gcsminio-proxy', labels, { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': labels, 'annotations': { 'ckan-cloud/operator-timestamp': str(datetime.datetime.now()) } }, 'spec': { 'containers': [{ 'name': 'minio', 'image': 'orihoch/ckan-cloud-operator-gcsminio-proxy', 'env': [{ 'name': 'GOOGLE_APPLICATION_CREDENTIALS', 'value': '/gcloud-credentials/credentials.json' }], 'envFrom': [{ 'secretRef': { 'name': 'gcsminio-proxy-credentials' } }], 'ports': [{ 'containerPort': 9000 }], 'volumeMounts': [ { 'name': 'gcloud-credentials', 'mountPath': '/gcloud-credentials/credentials.json', 'subPath': 'GCLOUD_SERVICE_ACCOUNT_JSON' }, ], }], 'volumes': [ { 'name': 'gcloud-credentials', 'secret': { 'secretName': 'ckan-infra' } }, ] } } })) service = kubectl.get_resource('v1', 'Service', 'gcsminio-proxy', labels) service['spec'] = { 'ports': [{ 'name': '9000', 'port': 9000 }], 'selector': labels } kubectl.apply(service) if not routers_manager.get_backend_url_routes('gcs-minio'): routers_manager.create_subdomain_route( router_name, { 'target-type': 'backend-url', 'target-resource-id': 'gcs-minio', 'backend-url': 'http://gcsminio-proxy.ckan-cloud:9000', 'sub-domain': 'default', 'root-domain': 'default', }) routers_manager.update(router_name, wait_ready=True)
def _get_deployment_spec(router_name, router_type, annotations, image=None, httpauth_secrets=None, dns_provider=None): volume_spec = cluster_manager.get_or_create_multi_user_volume_claim( get_label_suffixes(router_name, router_type)) httpauth_secrets_volume_mounts, httpauth_secrets_volumes = [], [] if httpauth_secrets: added_secrets = [] for httpauth_secret in httpauth_secrets: if httpauth_secret in added_secrets: continue added_secrets.append(httpauth_secret) httpauth_secrets_volumes.append({ 'name': httpauth_secret, 'secret': { 'secretName': httpauth_secret } }) httpauth_secrets_volume_mounts.append({ 'name': httpauth_secret, 'mountPath': f'/httpauth-{httpauth_secret}' }) container_spec_overrides = config_manager.get( 'container-spec-overrides', configmap_name=f'traefik-router-{router_name}-deployment', required=False, default=None) deployment_spec = { 'replicas': 1, 'revisionHistoryLimit': 5, 'template': { 'metadata': { 'labels': get_labels(router_name, router_type, for_deployment=True) }, 'spec': { 'containers': [ { 'name': 'traefik', 'image': image or 'traefik:1.6-alpine', 'ports': [{'containerPort': 80}], 'volumeMounts': [ {'name': 'etc-traefik', 'mountPath': '/etc-traefik'}, {'name': 'traefik-acme', 'mountPath': '/traefik-acme', 'subPath': f'router-traefik-{router_name}'}, *httpauth_secrets_volume_mounts, ], 'args': ['--configFile=/etc-traefik/traefik.toml'], **(json.loads(container_spec_overrides) if container_spec_overrides else {}) } ], 'volumes': [ {'name': 'etc-traefik', 'configMap': {'name': f'router-traefik-{router_name}'}}, dict(volume_spec, name='traefik-acme'), *httpauth_secrets_volumes, ] } } } if dns_provider == 'route53': logs.info('Traefik deployment: adding SSL support using AWS Route53') container = deployment_spec['template']['spec']['containers'][0] container['ports'].append({'containerPort': 443}) aws_credentials = cluster_manager.get_provider().get_aws_credentials() secret_name = f'ckancloudrouter-{router_name}-route53' kubectl.update_secret( secret_name, { 'AWS_ACCESS_KEY_ID': aws_credentials['access'], 'AWS_SECRET_ACCESS_KEY': aws_credentials['secret'], 'AWS_REGION': aws_credentials['region'] }, labels=get_labels(router_name, router_type)) container['envFrom'] = [{'secretRef': {'name': secret_name}}] elif annotations.get_flag('letsencryptCloudflareEnabled'): logs.info('Traefik deployment: adding SSL support using Cloudflare') container = deployment_spec['template']['spec']['containers'][0] container['ports'].append({'containerPort': 443}) cloudflare_email, cloudflare_api_key = get_cloudflare_credentials() secret_name = f'ckancloudrouter-{router_name}-cloudflare' kubectl.update_secret(secret_name, { 'CLOUDFLARE_EMAIL': cloudflare_email, 'CLOUDFLARE_API_KEY': cloudflare_api_key, }, labels=get_labels(router_name, router_type)) container['envFrom'] = [{'secretRef': {'name': secret_name}}] else: logs.info('Not configuring SSL support for Traefik deployment') return deployment_spec
def _update(self): spec = self.instance.spec db_name = spec.db['name'] db_password = self.instance.annotations.get_secret('databasePassword') datastore_name = spec.datastore['name'] datastore_password = self.instance.annotations.get_secret('datastorePassword') datastore_ro_user = self.instance.annotations.get_secret('datastoreReadonlyUser') datastore_ro_password = self.instance.annotations.get_secret('datatastoreReadonlyPassword') db_no_db_proxy = spec.db.get('no-db-proxy') == 'yes' datastore_no_db_proxy = spec.datastore.get('no-db-proxy') == 'yes' if db_no_db_proxy or datastore_no_db_proxy: assert db_no_db_proxy and datastore_no_db_proxy, 'must set both DB and datastore with no-db-proxy' no_db_proxy = True else: no_db_proxy = False from ckan_cloud_operator.providers.solr import manager as solr_manager solr_http_endpoint = solr_manager.get_internal_http_endpoint() solr_collection_name = spec.solrCloudCollection['name'] if 'fromSecret' in spec.envvars: envvars = kubectl.get(f'secret {spec.envvars["fromSecret"]}') envvars = yaml.load(kubectl.decode_secret(envvars, 'envvars.yaml')) elif 'fromGitlab' in spec.envvars: envvars = CkanGitlab().get_envvars(spec.envvars['fromGitlab']) else: raise Exception(f'invalid envvars spec: {spec.envvars}') assert envvars['CKAN_SITE_ID'] and envvars['CKAN_SITE_URL'] and envvars['CKAN_SQLALCHEMY_URL'] from ckan_cloud_operator.providers.storage import manager as storage_manager storage_hostname, storage_access_key, storage_secret_key = storage_manager.get_provider().get_credentials() storage_path_parts = spec.storage['path'].strip('/').split('/') storage_bucket = storage_path_parts[0] storage_path = '/'.join(storage_path_parts[1:]) if no_db_proxy: postgres_host, postgres_port = db_manager.get_internal_unproxied_db_host_port() logs.info(f'Bypassing db proxy, connecting to DB directly: {postgres_host}:{postgres_port}') else: postgres_host, postgres_port = db_manager.get_internal_proxy_host_port() logs.info(f'Connecting to DB proxy: {postgres_host}:{postgres_port}') envvars.update( CKAN_SQLALCHEMY_URL=f"postgresql://{db_name}:{db_password}@{postgres_host}:{postgres_port}/{db_name}", CKAN___BEAKER__SESSION__URL=f"postgresql://{db_name}:{db_password}@{postgres_host}:{postgres_port}/{db_name}", CKAN__DATASTORE__READ_URL=f"postgresql://{datastore_ro_user}:{datastore_ro_password}@{postgres_host}:{postgres_port}/{datastore_name}", CKAN__DATASTORE__WRITE_URL=f"postgresql://{datastore_name}:{datastore_password}@{postgres_host}:{postgres_port}/{datastore_name}", CKAN_SOLR_URL=f"{solr_http_endpoint}/{solr_collection_name}", CKANEXT__S3FILESTORE__AWS_STORAGE_PATH=storage_path, CKANEXT__S3FILESTORE__AWS_ACCESS_KEY_ID=storage_access_key, CKANEXT__S3FILESTORE__AWS_SECRET_ACCESS_KEY=storage_secret_key, CKANEXT__S3FILESTORE__AWS_BUCKET_NAME=storage_bucket, CKANEXT__S3FILESTORE__HOST_NAME=f'https://{storage_hostname}/', CKANEXT__S3FILESTORE__REGION_NAME='us-east-1', CKANEXT__S3FILESTORE__SIGNATURE_VERSION='s3v4', CKAN__DATAPUSHER__URL=datapusher.get_datapusher_url(envvars.get('CKAN__DATAPUSHER__URL')), ) # print(yaml.dump(envvars, default_flow_style=False)) self._apply_instance_envvars_overrides(envvars) envvars = { k: ('' if v is None else v) for k,v in envvars.items() } kubectl.update_secret('ckan-envvars', envvars, namespace=self.instance.id) self.site_url = envvars.get('CKAN_SITE_URL')
def create(cls, *args, **kwargs): create_type = args[0] instance_id = args[-1] from ckan_cloud_operator.providers.db.manager import get_default_db_prefix db_prefix = kwargs['db_prefix'] if kwargs.get('db_prefix') else get_default_db_prefix() if create_type == 'from-gitlab': gitlab_repo = args[1] solr_config = args[2] db_name = instance_id datastore_name = f'{instance_id}-datastore' storage_path = kwargs.get('storage_path') or f'/ckan/{instance_id}' from_db_backups = kwargs.get('from_db_backups') logs.info(f'Creating Deis CKAN instance {instance_id}', gitlab_repo=gitlab_repo, solr_config=solr_config, db_name=db_name, datastore_name=datastore_name, storage_path=storage_path, from_db_backups=from_db_backups) if kwargs.get('use_private_gitlab_repo'): deploy_token_server = input('Gitlab registry url [default: registry.gitlab.com]: ') or 'registry.gitlab.com' deploy_token_username = input('Gitlab deploy token username: '******'Gitlab deploy token password: '******'delete secret private-gitlab-registry', namespace=instance_id) kubectl.call(f'create secret docker-registry private-gitlab-registry --docker-server={deploy_token_server} --docker-username={deploy_token_username} --docker-password={deploy_token_password}', namespace=instance_id) if from_db_backups: db_import_url, datastore_import_url = from_db_backups.split(',') migration_name = None success = False for event in ckan_db_migration_manager.migrate_deis_dbs(None, db_name, datastore_name, db_import_url=db_import_url, datastore_import_url=datastore_import_url, rerun=kwargs.get('rerun'), force=kwargs.get('force'), recreate_dbs=kwargs.get('recreate_dbs'), db_prefix=db_prefix): migration_name = ckan_db_migration_manager.get_event_migration_created_name(event) or migration_name success = ckan_db_migration_manager.print_event_exit_on_complete( event, f'DBs import {from_db_backups} -> {db_name}, {datastore_name}', soft_exit=True ) if success is not None: break assert success, f'Invalid DB migration success value ({success})' else: migration_name = None spec = { 'ckanPodSpec': {}, 'ckanContainerSpec': {'imageFromGitlab': gitlab_repo}, 'envvars': {'fromGitlab': gitlab_repo}, 'solrCloudCollection': { 'name': kwargs.get('solr_collection') or instance_id, 'configName': solr_config }, 'db': { 'name': db_name, **({'fromDbMigration': migration_name} if migration_name else {}), **({'dbPrefix': db_prefix} if db_prefix else {}) }, 'datastore': { 'name': datastore_name, **({'fromDbMigration': migration_name} if migration_name else {}), **({'dbPrefix': db_prefix} if db_prefix else {}) }, 'storage': { 'path': storage_path, } } if kwargs.get('use_private_gitlab_repo'): spec['ckanContainerSpec']['imagePullSecrets'] = [{'name': 'private-gitlab-registry'}] elif create_type == 'from-gcloud-envvars': print(f'Creating Deis CKAN instance {instance_id} from gcloud envvars import') instance_env_yaml, image, solr_config, storage_path, instance_id = args[1:] db_migration_name = kwargs.get('db_migration_name') assert db_migration_name, 'creating from gcloud envvars without a db migration is not supported yet' if type(instance_env_yaml) == str: logs.info(f'Creating {instance_id}-envvars secret from file: {instance_env_yaml}') subprocess.check_call( f'kubectl -n ckan-cloud create secret generic {instance_id}-envvars --from-file=envvars.yaml={instance_env_yaml}', shell=True ) else: logs.info(f'Creating {instance_id}-envvars secret from inline string') kubectl.update_secret(f'{instance_id}-envvars', {'envvars.yaml': yaml.dump(instance_env_yaml, default_flow_style=False)}) spec = { 'ckanPodSpec': {}, 'ckanContainerSpec': {'image': image}, 'envvars': {'fromSecret': f'{instance_id}-envvars'}, 'solrCloudCollection': { 'name': instance_id, 'configName': solr_config }, 'db': { 'name': instance_id, 'fromDbMigration':db_migration_name, **({'dbPrefix': db_prefix} if db_prefix else {}) }, 'datastore': { 'name': f'{instance_id}-datastore', 'fromDbMigration': db_migration_name, **({'dbPrefix': db_prefix} if db_prefix else {}) }, 'storage': { 'path': storage_path } } else: raise NotImplementedError(f'invalid create type: {create_type}') instance_kind = ckan_manager.instance_kind() instance = { 'apiVersion': f'stable.viderum.com/v1', 'kind': instance_kind, 'metadata': { 'name': instance_id, 'namespace': 'ckan-cloud', 'finalizers': ['finalizer.stable.viderum.com'] }, 'spec': spec } subprocess.run('kubectl apply -f -', input=yaml.dump(instance).encode(), shell=True, check=True) return cls(instance_id, values=instance)
def update(instance_id_or_name, override_spec=None, persist_overrides=False, wait_ready=False, skip_deployment=False, skip_route=False, force=False, dry_run=False): instance_id, instance_type, instance = _get_instance_id_and_type( instance_id_or_name, required=not dry_run) if dry_run: logs.info('update instance', instance_id=instance_id, instance_id_or_name=instance_id_or_name, override_spec=override_spec, persist_overrides=persist_overrides, wait_ready=wait_ready, skip_deployment=skip_deployment, skip_route=skip_route, force=force, dry_run=dry_run) else: pre_update_hook_data = deployment_manager.pre_update_hook( instance_id, instance_type, instance, override_spec, skip_route) bucket_credentials = instance['spec'].get('ckanStorageBucket', {}).get( get_storage_provider_id()) use_cloud_storage = bucket_credentials and config_manager.get( 'use-cloud-native-storage', secret_name=CONFIG_NAME) if use_cloud_storage: cluster_provider_id = cluster_manager.get_provider_id() if bucket_credentials: literal = [] config_manager.set(values=bucket_credentials, secret_name='bucket-credentials', namespace=instance_id) if instance['spec'].get('operatorCopySecrets'): for target_secret_name, source_secret_config in json.loads( instance['spec']['operatorCopySecrets']).items(): for k, v in source_secret_config.items(): source_secret_config[k] = v.replace( "__INSTANCE_NAME__", instance_id_or_name) kubectl.update_secret( target_secret_name, kubectl.decode_secret( kubectl.get('secret', source_secret_config["fromName"], namespace=source_secret_config.get( "fromNamespace", "ckan-cloud"))), namespace=instance_id) if persist_overrides: logs.info('Persisting overrides') kubectl.apply(instance) if not skip_deployment: deployment_manager.update(instance_id, instance_type, instance, force=force) if wait_ready: wait_instance_ready(instance_id_or_name) if not skip_route and pre_update_hook_data.get('sub-domain'): root_domain = pre_update_hook_data.get('root-domain') sub_domain = pre_update_hook_data['sub-domain'] assert root_domain == routers_manager.get_default_root_domain( ), 'invalid domain, must use default root domain' logs.info( f'adding instance default route to {sub_domain}.{root_domain}') routers_manager.create_subdomain_route( 'instances-default', { 'target-type': 'ckan-instance', 'ckan-instance-id': instance_id, 'root-domain': root_domain, 'sub-domain': sub_domain }) logs.info(f'updating routers_manager wait_ready: {wait_ready}') routers_manager.update('instances-default', wait_ready) else: logs.info('skipping route creation', skip_route=skip_route, sub_domain=pre_update_hook_data.get('sub-domain')) if not instance['spec'].get('skipCreateCkanAdmin', False): logs.info('creating ckan admin') ckan_admin_email = instance['spec'].get( 'ckanAdminEmail', pre_update_hook_data.get('ckan-admin-email')) ckan_admin_password = pre_update_hook_data.get( 'ckan-admin-password') ckan_admin_name = instance['spec'].get( 'ckanAdminName', pre_update_hook_data.get('ckan-admin-name', 'admin')) res = create_ckan_admin_user(instance_id, ckan_admin_name, ckan_admin_email, ckan_admin_password) logs.info(**res) logs.info('Instance is ready', instance_id=instance_id, instance_name=(instance_id_or_name if instance_id_or_name != instance_id else None))