def deploy(): """Deploys a proxy inside the cluster which allows to access the centralized solr without authentication""" labels = {'app': 'ckan-cloud-solrcloud-proxy'} solr_url = parse_url(solr_manager.get_internal_http_endpoint()) scheme = solr_url.scheme hostname = solr_url.hostname port = solr_url.port solr_user, solr_password = solr_url.auth.split(':') if not port: port = '443' if scheme == 'https' else '8983' kubectl.update_secret( 'solrcloud-proxy', { 'SOLR_URL': f'{scheme}://{hostname}:{port}', 'SOLR_USER': solr_user, 'SOLR_PASSWORD': solr_password }) kubectl.apply( kubectl.get_deployment( 'solrcloud-proxy', labels, { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': labels, 'annotations': { 'ckan-cloud/operator-timestamp': str(datetime.datetime.now()) } }, 'spec': { 'containers': [{ 'name': 'solrcloud-proxy', 'image': 'viderum/ckan-cloud-operator-solrcloud-proxy', 'envFrom': [{ 'secretRef': { 'name': 'solrcloud-proxy' } }], 'ports': [{ 'containerPort': 8983 }], }] } } })) service = kubectl.get_resource('v1', 'Service', 'solrcloud-proxy', labels) service['spec'] = { 'ports': [{ 'name': '8983', 'port': 8983 }], 'selector': labels } kubectl.apply(service)
def _apply_deployment(): deployment_name = _get_resource_name() deployment_labels = _get_resource_labels(for_deployment=True) deployment_annotations = _get_resource_annotations() kubectl.apply( kubectl.get_deployment( deployment_name, deployment_labels, { 'replicas': 1, 'revisionHistoryLimit': 2, 'strategy': { 'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': deployment_labels, 'annotations': deployment_annotations, }, 'spec': { 'containers': [{ 'name': 'adminer', 'image': 'adminer', 'ports': [{ 'containerPort': 8080 }], }], } } }))
def _apply_deployment(): kubectl.apply( kubectl.get_deployment( _get_resource_name(), _get_resource_labels(for_deployment=True), { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True), 'annotations': _get_resource_annotations() }, 'spec': { 'containers': [{ 'name': 'pgbouncer', 'image': 'viderum/ckan-cloud-operator:pgbouncer', 'ports': [{ 'containerPort': 5432 }], 'volumeMounts': [ { 'name': 'config', 'mountPath': '/var/local/pgbouncer', 'readOnly': True }, ], 'readinessProbe': { 'failureThreshold': 1, 'initialDelaySeconds': 5, 'periodSeconds': 5, 'successThreshold': 1, 'tcpSocket': { 'port': 5432 }, 'timeoutSeconds': 5 }, 'resources': { 'limits': { 'memory': '2Gi', }, 'requests': { 'cpu': '0.1', 'memory': '0.2Gi', } } }], 'volumes': [ _config_get_volume_spec('config', is_secret=True), ] } } }))
def _apply_deployment(db_prefix=None): kubectl.apply( kubectl.get_deployment( _get_resource_name(suffix=db_prefix), _get_resource_labels(for_deployment=True, suffix=db_prefix or ''), { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'selector': { 'matchLabels': _get_resource_labels(for_deployment=True, suffix=db_prefix or '') }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix=db_prefix or ''), 'annotations': _get_resource_annotations(suffix=db_prefix or '') }, 'spec': { 'containers': [{ 'name': 'postgres', 'image': 'mdillon/postgis', 'env': [ { 'name': 'POSTGRES_PASSWORD', 'value': 'postgres' }, { 'name': 'POSTGRES_USER', 'value': 'postgres' }, ], 'ports': [{ 'containerPort': 5432 }], 'resources': { 'limits': { 'memory': '1Gi', }, 'requests': { 'cpu': '0.1', 'memory': '0.2Gi', } } }], } } }))
def update(name): _update_registry_secret() datapusher = kubectl.get(f'CkanCloudDatapusher {name}') deployment_name = get_deployment_name(name) labels = _get_labels(name) spec = _get_deployment_spec(labels, datapusher['spec']) print( f'Updating CkanCloudDatapusher {name} (deployment_name={deployment_name})' ) deployment = kubectl.get_deployment(deployment_name, labels, spec) kubectl.apply(deployment)
def _apply_deployment(db_prefix=None): db_host = config_manager.get( secret_name='ckan-cloud-provider-db-azuresql-credentials' )['azuresql-host'] resource_label = _get_resource_labels(for_deployment=True, suffix=db_prefix or '') kubectl.apply( kubectl.get_deployment( _get_resource_name(suffix=db_prefix), resource_label, { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'selector': { 'matchLabels': resource_label }, 'template': { 'metadata': { 'labels': resource_label, 'annotations': _get_resource_annotations(suffix=db_prefix or '') }, 'spec': { 'containers': [{ 'name': 'proxy', 'image': 'viderum/docker-tcp-proxy:latest', 'env': [{ 'name': 'LISTEN', 'value': ':5432' }, { 'name': 'TALK', 'value': f'{db_host}:5432', }], 'ports': [{ 'containerPort': 5432 }], 'resources': { 'limits': { 'memory': '0.3Gi', }, 'requests': { 'cpu': '0.1', 'memory': '0.2Gi', } } }], } } }))
def _apply_zoonavigator_deployment(dry_run=False): cpu_req = config_manager.get('zn-cpu', secret_name='solr-config') mem_req = config_manager.get('zn-mem', secret_name='solr-config') cpu_lim = config_manager.get('zn-cpu-limit', secret_name='solr-config') mem_lim = config_manager.get('zn-mem-limit', secret_name='solr-config') suffix = 'zoonavigator' deployment_name = _get_resource_name(suffix) kubectl.apply(kubectl.get_deployment( deployment_name, _get_resource_labels(for_deployment=True, suffix=suffix), { 'replicas': 1, 'revisionHistoryLimit': 2, 'selector': { 'matchLabels': _get_resource_labels(for_deployment=True, suffix=suffix), }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix=suffix), 'annotations': _get_resource_annotations() }, 'spec': { 'containers': [{ 'env': [{ 'name': 'API_HOST', 'value': 'localhost' }, { 'name': 'API_PORT', 'value': '9000' }, { 'name': 'WEB_HTTP_PORT', 'value': '8000' }], 'image': 'elkozmon/zoonavigator:0.7.1', 'name': 'zoonavigator', 'ports': [{ 'containerPort': 8000, 'name': '8000tcp02', 'protocol': 'TCP' }], 'resources': {} }] } } }), dry_run=dry_run) return deployment_name
def _apply_deployment(db_prefix=None): rds_host = config_manager.get( secret_name='ckan-cloud-provider-db-rds-credentials')['rds-host'] kubectl.apply( kubectl.get_deployment( _get_resource_name(suffix=db_prefix), _get_resource_labels(for_deployment=True, suffix=db_prefix or ''), { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix=db_prefix or ''), 'annotations': _get_resource_annotations(suffix=db_prefix or '') }, 'spec': { 'containers': [{ 'name': 'proxy', # https://github.com/OriHoch/docker-tcp-proxy 'image': 'viderum/docker-tcp-proxy:latest', 'env': [{ 'name': 'LISTEN', 'value': ':5432' }, { 'name': 'TALK', 'value': f'{rds_host}:5432', }], 'ports': [{ 'containerPort': 5432 }], 'resources': { 'limits': { 'memory': '0.5Gi', }, 'requests': { 'cpu': '0.1', 'memory': '0.2Gi', } } }] } } }))
def _apply_deployment(volume_spec, storage_suffix=None, dry_run=False): node_selector = volume_spec.pop('nodeSelector', None) if node_selector: pod_scheduling = {'nodeSelector': node_selector} else: pod_scheduling = {} container_spec_overrides = _config_get('container-spec-overrides', required=False, default=None, suffix=storage_suffix) kubectl.apply(kubectl.get_deployment( _get_resource_name(suffix=storage_suffix), _get_resource_labels(for_deployment=True, suffix=storage_suffix), { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': {'type': 'Recreate', }, 'selector': { 'matchLabels': _get_resource_labels(for_deployment=True, suffix=storage_suffix) }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix=storage_suffix), 'annotations': _get_resource_annotations(suffix=storage_suffix) }, 'spec': { **pod_scheduling, 'containers': [ { 'name': 'minio', 'image': 'minio/minio', 'args': ['server', '/export'], 'envFrom': [{'secretRef': {'name': _get_resource_name(suffix=storage_suffix)}}], 'ports': [{'containerPort': 9000}], 'volumeMounts': [ { 'name': 'minio-data', 'mountPath': '/export', } ], **(json.loads(container_spec_overrides) if container_spec_overrides else {}) } ], 'volumes': [ dict(volume_spec, name='minio-data') ] } } } ), dry_run=dry_run)
def _apply_deployment(): labels = _get_resource_labels(for_deployment=True) labels = dict(labels, app='efs-provisioner') configmap_name = _get_resource_name() fs_id = _config_get('file.system.id', namespace='default') aws_region = _config_get('aws.region', namespace='default') kubectl.apply( kubectl.get_deployment( 'efs-provisioner', labels, dict( replicas=1, strategy=dict(type='Recreate'), selector=dict(matchLabels=labels), template=dict( metadata=dict(labels=labels), spec=dict(containers=[ dict(name='efs-provisioner', image= 'quay.io/external_storage/efs-provisioner:latest', env=[ dict(name=name, valueFrom=dict(configMapKeyRef=dict( name=configmap_name, key=key, optional=optional))) for name, key, optional in [ ('FILE_SYSTEM_ID', 'file.system.id', False), ('AWS_REGION', 'aws.region', False), ('DNS_NAME', 'dns.name', True), ('PROVISIONER_NAME', 'provisioner.name', False), ] ], volumeMounts=[ dict(name='pv-volume', mountPath='/persistentvolumes') ]) ], volumes=[ dict( name='pv-volume', nfs=dict( server= f'{fs_id}.efs.{aws_region}.amazonaws.com', path='/')) ]))), namespace='default'))
def deploy_ckan_infra_solr_proxy(): """Deploys a proxy inside the cluster which allows to access the centralized solr without authentication""" labels = {'app': 'ckan-cloud-solrcloud-proxy'} infra = cls() solr_url = urlparse(infra.SOLR_HTTP_ENDPOINT) scheme = solr_url.scheme hostname = solr_url.hostname port = solr_url.port if not port: port = '443' if scheme == 'https' else '8983' kubectl.update_secret('solrcloud-proxy', { 'SOLR_URL': f'{scheme}://{hostname}:{port}', 'SOLR_USER': infra.SOLR_USER, 'SOLR_PASSWORD': infra.SOLR_PASSWORD }) kubectl.apply(kubectl.get_deployment('solrcloud-proxy', labels, { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': {'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': labels, 'annotations': { 'ckan-cloud/operator-timestamp': str(datetime.datetime.now()) } }, 'spec': { 'containers': [ { 'name': 'solrcloud-proxy', 'image': 'orihoch/ckan-cloud-operator-solrcloud-proxy', 'envFrom': [{'secretRef': {'name': 'solrcloud-proxy'}}], 'ports': [{'containerPort': 8983}], } ] } } })) service = kubectl.get_resource('v1', 'Service', 'solrcloud-proxy', labels) service['spec'] = { 'ports': [ {'name': '8983', 'port': 8983} ], 'selector': labels } kubectl.apply(service)
def _apply_deployment(volume_spec, storage_suffix=None): kubectl.apply( kubectl.get_deployment( _get_resource_name(suffix=storage_suffix), _get_resource_labels(for_deployment=True, suffix=storage_suffix), { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'Recreate', }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix=storage_suffix), 'annotations': _get_resource_annotations(suffix=storage_suffix) }, 'spec': { 'containers': [{ 'name': 'minio', 'image': 'minio/minio', 'args': ['server', '/export'], 'envFrom': [{ 'secretRef': { 'name': _get_resource_name(suffix=storage_suffix) } }], 'ports': [{ 'containerPort': 9000 }], 'volumeMounts': [{ 'name': 'minio-data', 'mountPath': '/export', }], }], 'volumes': [dict(volume_spec, name='minio-data')] } } }))
def _apply_zoonavigator_deployment(): suffix = 'zoonavigator' deployment_name = _get_resource_name(suffix) kubectl.apply(kubectl.get_deployment( deployment_name, _get_resource_labels(for_deployment=True, suffix=suffix), { 'replicas': 1, 'revisionHistoryLimit': 2, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix=suffix), 'annotations': _get_resource_annotations() }, 'spec': { 'containers': [ { 'env': [ {'name': 'API_HOST', 'value': 'localhost'}, {'name': 'API_PORT', 'value': '9000'}, {'name': 'WEB_HTTP_PORT', 'value': '8000'} ], 'image': 'elkozmon/zoonavigator-web:0.5.0', 'name': 'zoonavigator-web', 'ports': [ {'containerPort': 8000, 'name': '8000tcp02', 'protocol': 'TCP'} ], 'resources': {} }, { 'env': [ {'name': 'API_HTTP_PORT', 'value': '9000'} ], 'image': 'elkozmon/zoonavigator-api:0.5.0', 'name': 'zoonavigator-api', 'resources': {'requests': {'cpu': '0.01', 'memory': '0.01Gi'}, 'limits': {'memory': '0.5Gi'}}, } ], } } } )) return deployment_name
def _update(router_name, spec, annotations, routes): resource_name = _get_resource_name(router_name) router_type = spec['type'] cloudflare_email, cloudflare_auth_key = get_cloudflare_credentials() external_domains = spec.get('external-domains') dns_provider = spec.get('dns-provider', 'cloudflare') logs.info('updating traefik deployment', resource_name=resource_name, router_type=router_type, cloudflare_email=cloudflare_email, cloudflare_auth_key_len=len(cloudflare_auth_key) if cloudflare_auth_key else 0, external_domains=external_domains, dns_provider=dns_provider) kubectl.apply( kubectl.get_configmap( resource_name, get_labels(router_name, router_type), { 'traefik.toml': toml.dumps( traefik_router_config.get( routes, cloudflare_email, enable_access_log=bool(spec.get('enable-access-log')), wildcard_ssl_domain=spec.get('wildcard-ssl-domain'), external_domains=external_domains, dns_provider=dns_provider, force=True)) })) domains = {} httpauth_secrets = [] for route in routes: root_domain, sub_domain = routes_manager.get_domain_parts(route) domains.setdefault(root_domain, []).append(sub_domain) routes_manager.pre_deployment_hook( route, get_labels(router_name, router_type)) if route['spec'].get('httpauth-secret') and route['spec'][ 'httpauth-secret'] not in httpauth_secrets: httpauth_secrets.append(route['spec']['httpauth-secret']) load_balancer = kubectl.get_resource('v1', 'Service', f'loadbalancer-{resource_name}', get_labels(router_name, router_type)) load_balancer['spec'] = { 'ports': [ { 'name': '80', 'port': 80 }, { 'name': '443', 'port': 443 }, ], 'selector': { 'app': get_labels(router_name, router_type, for_deployment=True)['app'] }, 'type': 'LoadBalancer' } kubectl.apply(load_balancer) load_balancer_ip = get_load_balancer_ip(router_name) print(f'load balancer ip: {load_balancer_ip}') from ckan_cloud_operator.providers.routers import manager as routers_manager if external_domains: from ckan_cloud_operator.providers.routers import manager as routers_manager external_domains_router_root_domain = routers_manager.get_default_root_domain( ) env_id = routers_manager.get_env_id() assert router_name.startswith( 'prod-'), f'invalid external domains router name: {router_name}' external_domains_router_sub_domain = f'cc-{env_id}-{router_name}' routers_manager.update_dns_record(dns_provider, external_domains_router_sub_domain, external_domains_router_root_domain, load_balancer_ip, cloudflare_email, cloudflare_auth_key) else: for root_domain, sub_domains in domains.items(): for sub_domain in sub_domains: routers_manager.update_dns_record(dns_provider, sub_domain, root_domain, load_balancer_ip, cloudflare_email, cloudflare_auth_key) kubectl.apply( kubectl.get_deployment( resource_name, get_labels(router_name, router_type, for_deployment=True), _get_deployment_spec( router_name, router_type, annotations, image=('traefik:1.7' if (external_domains or len(httpauth_secrets) > 0) else None), httpauth_secrets=httpauth_secrets, dns_provider=dns_provider)))
def _apply_solrcloud_deployment(suffix, volume_spec, configmap_name, log_configmap_name, headless_service_name, pause_deployment, dry_run=False): cpu_req = config_manager.get('sc-cpu', secret_name='solr-config') mem_req = config_manager.get('sc-mem', secret_name='solr-config') cpu_lim = config_manager.get('sc-cpu-limit', secret_name='solr-config') mem_lim = config_manager.get('sc-mem-limit', secret_name='solr-config') namespace = cluster_manager.get_operator_namespace_name() container_spec_overrides = config_manager.get( 'container-spec-overrides', configmap_name='ckan-cloud-provider-solr-solrcloud-sc-config', required=False, default=None) resources = { 'requests': { 'cpu': cpu_req, 'memory': mem_req }, 'limits': { 'cpu': cpu_lim, 'memory': mem_lim } } if not container_spec_overrides else {} kubectl.apply(kubectl.get_deployment( _get_resource_name(suffix), _get_resource_labels(for_deployment=True, suffix='sc'), { 'replicas': 1, 'revisionHistoryLimit': 2, 'strategy': {'type': 'Recreate', }, 'selector': { 'matchLabels': _get_resource_labels(for_deployment=True, suffix='sc'), }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix='sc'), 'annotations': _get_resource_annotations() }, 'spec': { 'hostname': suffix, 'subdomain': headless_service_name, **_get_volume_pod_scheduling( volume_spec, _get_resource_labels(for_deployment=True, suffix='sc')['app'] ), 'initContainers': [ { 'name': 'init', 'image': 'alpine', 'command': [ "sh", "-c", f""" if [ -e /data/solr/solr.xml ]; then echo /data/solr/solr.xml already exists, will not recreate else echo creating /data/solr/solr.xml &&\ mkdir -p /data/solr &&\ echo \'{SOLR_CONFIG_XML}\' > /data/solr/solr.xml fi &&\ echo Setting permissions to solr user/group 8983:8983 on /data/solr &&\ chown -R 8983:8983 /data/solr &&\ echo init completed successfully """ ], 'securityContext': { 'runAsUser': 0 }, 'volumeMounts': [ {'mountPath': '/data', 'name': 'datadir'}, ] } ], 'containers': [ { 'name': 'sc', 'envFrom': [{'configMapRef': {'name': configmap_name}}], 'env': [ {'name': 'SOLR_HOST', 'value': f'{suffix}.{headless_service_name}.{namespace}.svc.cluster.local'} ], **({ 'command': ['sh', '-c', 'sleep 86400'] } if pause_deployment else { 'livenessProbe': { 'exec': {'command': ['/opt/solr/bin/solr', 'status']}, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, 'readinessProbe': { 'exec': {'command': ['/opt/solr/bin/solr', 'status']}, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, }), 'image': 'solr:5.5.5', 'ports': [ {'containerPort': 8983, 'name': 'solr', 'protocol': 'TCP'}, {'containerPort': 7983, 'name': 'stop', 'protocol': 'TCP'}, {'containerPort': 18983, 'name': 'rmi', 'protocol': 'TCP'} ], 'volumeMounts': [ {'mountPath': '/data', 'name': 'datadir'}, {'mountPath': '/logconfig', 'name': 'logconfig'} ], **({'resources': resources} if resources else {}), **(json.loads(container_spec_overrides) if container_spec_overrides else {}) } ], 'volumes': [ {'configMap': {'defaultMode': 420, 'name': log_configmap_name}, 'name': 'logconfig'}, dict(volume_spec, name='datadir') ] } } }, with_timestamp=False ), dry_run=dry_run)
def _apply_zookeeper_deployment(suffix, volume_spec, zookeeper_configmap_name, headless_service_name, dry_run=False): cpu_req = config_manager.get('zk-cpu', secret_name='solr-config') mem_req = config_manager.get('zk-mem', secret_name='solr-config') cpu_lim = config_manager.get('zk-cpu-limit', secret_name='solr-config') mem_lim = config_manager.get('zk-mem-limit', secret_name='solr-config') kubectl.apply(kubectl.get_deployment( _get_resource_name(suffix), _get_resource_labels( for_deployment=True, suffix='zk'), { 'replicas': 1, 'revisionHistoryLimit': 2, 'strategy': { 'type': 'Recreate', }, 'selector': { 'matchLabels': _get_resource_labels(for_deployment=True, suffix='zk'), }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix='zk'), 'annotations': _get_resource_annotations() }, 'spec': { 'hostname': suffix, 'subdomain': headless_service_name, **_get_volume_pod_scheduling( volume_spec, _get_resource_labels(for_deployment=True, suffix='zk')['app']), 'containers': [{ 'name': 'zk', 'command': [ 'sh', '-c', 'zkGenConfig.sh && zkServer.sh start-foreground' ], 'envFrom': [{ 'configMapRef': { 'name': zookeeper_configmap_name } }], 'env': [{ 'name': 'SOLR_HOST', 'valueFrom': { 'fieldRef': { 'apiVersion': 'v1', 'fieldPath': 'status.podIP' } } }], 'image': 'gcr.io/google_samples/k8szk:v3', 'livenessProbe': { 'exec': { 'command': ['zkOk.sh'] }, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, 'ports': [{ 'containerPort': 2181, 'name': 'client', 'protocol': 'TCP' }, { 'containerPort': 2888, 'name': 'server', 'protocol': 'TCP' }, { 'containerPort': 3888, 'name': 'leader-election', 'protocol': 'TCP' }], 'readinessProbe': { 'exec': { 'command': ['zkOk.sh'] }, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, 'resources': { 'requests': { 'cpu': cpu_req, 'memory': mem_req }, 'limits': { 'cpu': cpu_lim, 'memory': mem_lim } }, 'volumeMounts': [ { 'mountPath': '/var/lib/zookeeper', 'name': 'datadir' }, ], }], 'volumes': [dict(volume_spec, name='datadir')] } } }, with_timestamp=False), dry_run=dry_run)
def _apply_solrcloud_deployment(suffix, volume_spec, configmap_name, log_configmap_name, headless_service_name, pause_deployment): namespace = cluster_manager.get_operator_namespace_name() kubectl.apply(kubectl.get_deployment( _get_resource_name(suffix), _get_resource_labels(for_deployment=True, suffix='sc'), { 'replicas': 1, 'revisionHistoryLimit': 2, 'strategy': {'type': 'Recreate', }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix='sc'), 'annotations': _get_resource_annotations() }, 'spec': { 'hostname': suffix, 'subdomain': headless_service_name, 'affinity': { 'podAntiAffinity': {'requiredDuringSchedulingIgnoredDuringExecution': [ { 'labelSelector': {'matchExpressions': [ {'key': 'app', 'operator': 'In', 'values': [ _get_resource_labels(for_deployment=True, suffix='sc')['app'] ]} ]}, 'topologyKey': 'kubernetes.io/hostname' } ]}}, 'initContainers': [ { 'name': 'init', 'image': 'alpine', 'command': [ "sh", "-c", f""" if [ -e /data/solr/solr.xml ]; then echo /data/solr/solr.xml already exists, will not recreate else echo creating /data/solr/solr.xml &&\ mkdir -p /data/solr &&\ echo \'{SOLR_CONFIG_XML}\' > /data/solr/solr.xml fi &&\ echo Setting permissions to solr user/group 8983:8983 on /data/solr &&\ chown -R 8983:8983 /data/solr &&\ echo init completed successfully """ ], 'securityContext': { 'runAsUser': 0 }, 'volumeMounts': [ {'mountPath': '/data', 'name': 'datadir'}, ] } ], 'containers': [ { 'name': 'sc', 'envFrom': [{'configMapRef': {'name': configmap_name}}], 'env': [ {'name': 'SOLR_HOST', 'value': f'{suffix}.{headless_service_name}.{namespace}.svc.cluster.local'} ], **({ 'command': ['sh', '-c', 'sleep 86400'] } if pause_deployment else { 'livenessProbe': { 'exec': {'command': ['/opt/solr/bin/solr', 'status']}, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, 'readinessProbe': { 'exec': {'command': ['/opt/solr/bin/solr', 'status']}, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, }), 'image': 'solr:5.5.5', 'ports': [ {'containerPort': 8983, 'name': 'solr', 'protocol': 'TCP'}, {'containerPort': 7983, 'name': 'stop', 'protocol': 'TCP'}, {'containerPort': 18983, 'name': 'rmi', 'protocol': 'TCP'} ], 'resources': {'requests': {'cpu': '1', 'memory': '4Gi'}, 'limits': {'cpu': '2.5', 'memory': '8Gi'}}, 'volumeMounts': [ {'mountPath': '/data', 'name': 'datadir'}, {'mountPath': '/logconfig', 'name': 'logconfig'} ], } ], 'volumes': [ {'configMap': {'defaultMode': 420, 'name': log_configmap_name}, 'name': 'logconfig'}, dict(volume_spec, name='datadir') ] } } }, with_timestamp=False ))
def _apply_zookeeper_deployment(suffix, volume_spec, zookeeper_configmap_name, headless_service_name): kubectl.apply(kubectl.get_deployment( _get_resource_name(suffix), _get_resource_labels(for_deployment=True, suffix='zk'), { 'replicas': 1, 'revisionHistoryLimit': 2, 'strategy': {'type': 'Recreate', }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix='zk'), 'annotations': _get_resource_annotations() }, 'spec': { 'hostname': suffix, 'subdomain': headless_service_name, 'affinity': { 'podAntiAffinity': {'requiredDuringSchedulingIgnoredDuringExecution': [ { 'labelSelector': {'matchExpressions': [ {'key': 'app', 'operator': 'In', 'values': [ _get_resource_labels(for_deployment=True, suffix='zk')['app'] ]} ]}, 'topologyKey': 'kubernetes.io/hostname' } ]}}, 'containers': [ { 'name': 'zk', 'command': ['sh', '-c', 'zkGenConfig.sh && zkServer.sh start-foreground'], 'envFrom': [{'configMapRef': {'name': zookeeper_configmap_name}}], 'env': [ {'name': 'SOLR_HOST', 'valueFrom': {'fieldRef': {'apiVersion': 'v1', 'fieldPath': 'status.podIP'}}} ], 'image': 'gcr.io/google_samples/k8szk:v3', 'livenessProbe': { 'exec': {'command': ['zkOk.sh']}, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, 'ports': [ {'containerPort': 2181, 'name': 'client', 'protocol': 'TCP'}, {'containerPort': 2888, 'name': 'server', 'protocol': 'TCP'}, {'containerPort': 3888, 'name': 'leader-election', 'protocol': 'TCP'} ], 'readinessProbe': { 'exec': {'command': ['zkOk.sh']}, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, 'resources': {'requests': {'cpu': '0.5', 'memory': '1Gi'}, 'limits': {'memory': '2Gi'}}, 'volumeMounts': [ {'mountPath': '/var/lib/zookeeper', 'name': 'datadir'}, ], } ], 'volumes': [ dict(volume_spec, name='datadir') ] } } }, with_timestamp=False ))
def _update(router_name, spec, annotations, routes): resource_name = _get_resource_name(router_name) router_type = spec['type'] cloudflare_email, cloudflare_auth_key = get_cloudflare_credentials() external_domains = spec.get('external-domains') kubectl.apply( kubectl.get_configmap( resource_name, get_labels(router_name, router_type), { 'traefik.toml': toml.dumps( traefik_router_config.get( routes, cloudflare_email, wildcard_ssl_domain=spec.get('wildcard-ssl-domain'), external_domains=external_domains)) })) domains = {} for route in routes: root_domain, sub_domain = routes_manager.get_domain_parts(route) domains.setdefault(root_domain, []).append(sub_domain) routes_manager.pre_deployment_hook( route, get_labels(router_name, router_type)) load_balancer = kubectl.get_resource('v1', 'Service', f'loadbalancer-{resource_name}', get_labels(router_name, router_type)) load_balancer['spec'] = { 'ports': [ { 'name': '80', 'port': 80 }, { 'name': '443', 'port': 443 }, ], 'selector': { 'app': get_labels(router_name, router_type, for_deployment=True)['app'] }, 'type': 'LoadBalancer' } kubectl.apply(load_balancer) load_balancer_ip = get_load_balancer_ip(router_name) print(f'load balancer ip: {load_balancer_ip}') if external_domains: from ckan_cloud_operator.providers.routers import manager as routers_manager external_domains_router_root_domain = routers_manager.get_default_root_domain( ) env_id = routers_manager.get_env_id() assert router_name.startswith( 'prod-'), f'invalid external domains router name: {router_name}' external_domains_router_sub_domain = f'cc-{env_id}-{router_name}' cloudflare.update_a_record( cloudflare_email, cloudflare_auth_key, external_domains_router_root_domain, f'{external_domains_router_sub_domain}.{external_domains_router_root_domain}', load_balancer_ip) else: for root_domain, sub_domains in domains.items(): for sub_domain in sub_domains: cloudflare.update_a_record(cloudflare_email, cloudflare_auth_key, root_domain, f'{sub_domain}.{root_domain}', load_balancer_ip) kubectl.apply( kubectl.get_deployment( resource_name, get_labels(router_name, router_type, for_deployment=True), _get_deployment_spec( router_name, router_type, annotations, image='traefik:1.7' if external_domains else None)))
def deploy_gcs_minio_proxy(router_name): """Deploys a minio proxy (AKA gateway) for access to google storage""" labels = {'app': 'ckan-cloud-gcsminio-proxy'} if not kubectl.get('secret gcsminio-proxy-credentials', required=False): print('Creating minio credentials') minio_access_key = binascii.hexlify(os.urandom(8)).decode() minio_secret_key = binascii.hexlify(os.urandom(12)).decode() kubectl.update_secret( 'gcsminio-proxy-credentials', { 'MINIO_ACCESS_KEY': minio_access_key, 'MINIO_SECRET_KEY': minio_secret_key, }) kubectl.apply( kubectl.get_deployment( 'gcsminio-proxy', labels, { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': labels, 'annotations': { 'ckan-cloud/operator-timestamp': str(datetime.datetime.now()) } }, 'spec': { 'containers': [{ 'name': 'minio', 'image': 'orihoch/ckan-cloud-operator-gcsminio-proxy', 'env': [{ 'name': 'GOOGLE_APPLICATION_CREDENTIALS', 'value': '/gcloud-credentials/credentials.json' }], 'envFrom': [{ 'secretRef': { 'name': 'gcsminio-proxy-credentials' } }], 'ports': [{ 'containerPort': 9000 }], 'volumeMounts': [ { 'name': 'gcloud-credentials', 'mountPath': '/gcloud-credentials/credentials.json', 'subPath': 'GCLOUD_SERVICE_ACCOUNT_JSON' }, ], }], 'volumes': [ { 'name': 'gcloud-credentials', 'secret': { 'secretName': 'ckan-infra' } }, ] } } })) service = kubectl.get_resource('v1', 'Service', 'gcsminio-proxy', labels) service['spec'] = { 'ports': [{ 'name': '9000', 'port': 9000 }], 'selector': labels } kubectl.apply(service) if not routers_manager.get_backend_url_routes('gcs-minio'): routers_manager.create_subdomain_route( router_name, { 'target-type': 'backend-url', 'target-resource-id': 'gcs-minio', 'backend-url': 'http://gcsminio-proxy.ckan-cloud:9000', 'sub-domain': 'default', 'root-domain': 'default', }) routers_manager.update(router_name, wait_ready=True)
def _apply_deployment(db_prefix=None): config = config_manager.get( configmap_name='ckan-cloud-provider-cluster-gcloud') project_id = config['project-id'] location = '-'.join(config['cluster-compute-zone'].split('-')[:2]) if db_prefix: db_config = config_manager.get( secret_name= f'ckan-cloud-provider-db-gcloudsql-{db_prefix}-credentials') else: db_config = config_manager.get( secret_name='ckan-cloud-provider-db-gcloudsql-credentials') db_instance_name = db_config['gcloud-sql-instance-name'] kubectl.apply( kubectl.get_deployment( _get_resource_name(suffix=db_prefix), _get_resource_labels(for_deployment=True, suffix=db_prefix or ''), { 'selector': { 'matchLabels': _get_resource_labels(for_deployment=True, suffix=db_prefix or ''), }, 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix=db_prefix or ''), 'annotations': _get_resource_annotations(suffix=db_prefix or '') }, 'spec': { 'containers': [{ 'name': 'proxy', 'image': 'gcr.io/cloudsql-docker/gce-proxy:1.11', 'args': [ '/cloud_sql_proxy', f'-instances={project_id}:{location}:{db_instance_name}=tcp:5432' ], 'env': [{ 'name': 'GOOGLE_APPLICATION_CREDENTIALS', 'value': '/infra/creds.json' }], 'ports': [{ 'containerPort': 5432 }], 'volumeMounts': [ { 'name': 'service-account', 'mountPath': '/infra/creds.json', 'readOnly': True, 'subPath': 'service-account-json' }, ], 'resources': { 'limits': { 'memory': '1Gi', }, 'requests': { 'cpu': '0.1', 'memory': '0.2Gi', } } }], 'volumes': [{ 'name': 'service-account', 'secret': { 'secretName': 'ckan-cloud-provider-cluster-gcloud', } }] } } }))
def post_deploy_hook(instance_id, instance, deploy_kwargs): kubectl.apply( kubectl.get_deployment( name='jnlp-kube-prod-1', labels={}, spec={ 'minReadySeconds': 15, 'progressDeadlineSeconds': 1200, 'replicas': 1, 'revisionHistoryLimit': 10, 'selector': { 'matchLabels': { 'app': 'deployment-jenkins-jnlp-kube-prod-1' } }, 'strategy': { 'type': 'Recreate' }, 'template': { 'metadata': { 'labels': { 'app': 'deployment-jenkins-jnlp-kube-prod-1' } }, 'spec': { 'containers': [{ 'command': [ 'bash', '/home/jenkins/ckan-cloud-operator/entrypoint-jnlp.sh' ], 'env': [{ 'name': 'CKAN_CLOUD_OPERATOR_SCRIPTS', 'value': '/usr/src/ckan-cloud-operator/scripts' }, { 'name': 'CKAN_CLOUD_USER_NAME', 'value': 'jenkins-admin' }, { 'name': 'HOME', 'value': '/home/jenkins/agent' }], 'envFrom': [{ 'secretRef': { 'name': 'jnlp-slave-kube-prod-1', 'optional': False } }], 'image': 'viderum/ckan-cloud-operator:jnlp-v0.2.7', 'imagePullPolicy': 'Always', 'name': 'jnlp-kube-prod-1', 'resources': {}, 'volumeMounts': [{ 'mountPath': '/etc/ckan-cloud', 'name': 'vol1' }, { 'mountPath': '/home/jenkins/agent', 'name': 'workspace-volume' }], 'workingDir': '/home/jenkins/agent' }], 'terminationGracePeriodSeconds': 120, 'volumes': [{ 'name': 'vol1', 'secret': { 'defaultMode': 511, 'optional': False, 'secretName': 'etc-ckan-cloud-jenkins-admin' } }, { 'emptyDir': {}, 'name': 'workspace-volume' }] } } }, namespace=instance_id, )) pass