def _apply_deployment(): deployment_name = _get_resource_name() deployment_labels = _get_resource_labels(for_deployment=True) deployment_annotations = _get_resource_annotations() kubectl.apply( kubectl.get_deployment( deployment_name, deployment_labels, { 'replicas': 1, 'revisionHistoryLimit': 2, 'strategy': { 'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': deployment_labels, 'annotations': deployment_annotations, }, 'spec': { 'containers': [{ 'name': 'adminer', 'image': 'adminer', 'ports': [{ 'containerPort': 8080 }], }], } } }))
def create_volume(disk_size_gb, labels, use_existing_disk_name=None, zone=None): disk_id = use_existing_disk_name or 'cc' + _generate_password(12) if use_existing_disk_name: logs.info(f'using existing persistent disk {disk_id}') else: logs.info(f'creating persistent disk {disk_id} with size {disk_size_gb}') _, zone = get_project_zone() labels = ','.join([ '{}={}'.format(k.replace('/', '_'), v.replace('/', '_')) for k, v in labels.items() ]) gcloud_driver.check_call(*get_project_zone(), f'compute disks create {disk_id} --size={disk_size_gb}GB --zone={zone} --labels={labels}') kubectl.apply({ 'apiVersion': 'v1', 'kind': 'PersistentVolume', 'metadata': {'name': disk_id, 'namespace': 'ckan-cloud'}, 'spec': { 'storageClassName': '', 'capacity': {'storage': f'{disk_size_gb}G'}, 'accessModes': ['ReadWriteOnce'], 'gcePersistentDisk': {'pdName': disk_id} } }) kubectl.apply({ 'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim', 'metadata': {'name': disk_id, 'namespace': 'ckan-cloud'}, 'spec': { 'storageClassName': '', 'volumeName': disk_id, 'accessModes': ['ReadWriteOnce'], 'resources': {'requests': {'storage': f'{disk_size_gb}G'}} } }) return {'persistentVolumeClaim': {'claimName': disk_id}}
def update(instance_id_or_name, override_spec=None, persist_overrides=False, wait_ready=False, skip_deployment=False, skip_route=False): instance_id, instance_type, instance = _get_instance_id_and_type(instance_id_or_name) if override_spec: for k, v in override_spec.items(): logs.info(f'Applying override spec {k}={v}') instance['spec'][k] = v assert instance['spec'].get('useCentralizedInfra'), 'non-centralized instances are not supported' # full domain to route to the instance instance_domain = instance['spec'].get('domain') # instance is added to router only if this is true, as all routers must use SSL and may use sans SSL too with_sans_ssl = instance['spec'].get('withSansSSL') # subdomain to register on the default root domain register_subdomain = instance['spec'].get('registerSubdomain') if persist_overrides: logs.info('Persisting overrides') kubectl.apply(instance) if not skip_deployment: deployment_manager.update(instance_id, instance_type, instance) if wait_ready: wait_instance_ready(instance_id_or_name) if not skip_route: if instance_domain: assert with_sans_ssl, 'withSansSSL must be set to true to add routes' assert '.'.join(instance_domain.split('.')[1:]) == routers_manager.get_default_root_domain(), f'invalid root domain ({instance_domain})' assert instance_domain.split('.')[0] == register_subdomain, f'invalid register_subdomain ({register_subdomain})' logs.info(f'adding instance route to {instance_domain}') routers_manager.create_subdomain_route('instances-default', { 'target-type': 'ckan-instance', 'ckan-instance-id': instance_id, 'root-domain': routers_manager.get_default_root_domain(), 'sub-domain': register_subdomain }) routers_manager.update('instances-default', wait_ready) else: assert not register_subdomain, 'subdomain registration is only supported with instance_domain'
def create(instance_type, instance_id=None, instance_name=None, values=None, values_filename=None, exists_ok=False, dry_run=False): if not instance_id: if instance_name: instance_id = '{}-{}'.format(instance_name, _generate_password(6)) else: instance_id = _generate_password(12) if values_filename: assert values is None with open(values_filename) as f: values = yaml.load(f.read()) if not exists_ok and crds_manager.get(INSTANCE_CRD_SINGULAR, name=instance_id, required=False): raise Exception('instance already exists') values_id = values.get('id') if values_id: assert values_id == instance_id, f'instance spec has conflicting instance_id ({values_id} != {instance_id})' values['id'] = instance_id logs.info('Creating instance', instance_id=instance_id) kubectl.apply(crds_manager.get_resource( INSTANCE_CRD_SINGULAR, instance_id, extra_label_suffixes={'instance-type': instance_type}, spec=values ), dry_run=dry_run) if instance_name: set_name(instance_id, instance_name, dry_run=dry_run) return instance_id
def create_volume(disk_size_gb, labels, use_existing_disk_name=None, zone=0): if zone != 0: logs.warning(f'variable zone for create_volume has been deprecated.') disk_id = use_existing_disk_name or 'cc' + _generate_password(12) if use_existing_disk_name: logs.info(f'using existing persistent disk {disk_id}') else: logs.info( f'creating persistent disk {disk_id} with size {disk_size_gb}GB') labels = ','.join([ '{}={}'.format(k.replace('/', '_'), v.replace('/', '_')) for k, v in labels.items() ]) kubectl.apply({ "kind": "PersistentVolumeClaim", "apiVersion": "v1", "metadata": { "name": disk_id, "namespace": "ckan-cloud" }, "spec": { "accessModes": ["ReadWriteOnce"], "resources": { "requests": { "storage": f'{disk_size_gb}G' } }, "storageClassName": "cca-ckan" } }) return {'persistentVolumeClaim': {'claimName': disk_id}}
def update(instance_id_or_name, override_spec=None, persist_overrides=False, wait_ready=False, skip_deployment=False, skip_route=False, force=False, dry_run=False): instance_id, instance = _get_instance(instance_id_or_name, required=not dry_run) if dry_run: logs.info('update instance', instance_id=instance_id, instance_id_or_name=instance_id_or_name, override_spec=override_spec, persist_overrides=persist_overrides, wait_ready=wait_ready, skip_deployment=skip_deployment, skip_route=skip_route, force=force, dry_run=dry_run) else: pre_update_hook_data = deployment_manager.pre_update_hook(instance_id, instance, override_spec, skip_route) if persist_overrides: logs.info('Persisting overrides') kubectl.apply(instance) if not skip_deployment: deployment_manager.update(instance_id, instance) if wait_ready: wait_instance_ready(instance_id_or_name) if not skip_route and pre_update_hook_data.get('sub-domain'): root_domain = pre_update_hook_data.get('root-domain') sub_domain = pre_update_hook_data['sub-domain'] assert root_domain == routers_manager.get_default_root_domain(), \ 'invalid domain, must use default root domain' logs.info(f'adding instance default route to {sub_domain}.{root_domain}') routers_manager.create_subdomain_route('instances-default', { 'target-type': 'app-instance', 'app-instance-id': instance_id, 'root-domain': root_domain, 'sub-domain': sub_domain }) routers_manager.update('instances-default', wait_ready) else: logs.info('skipping route creation', skip_route=skip_route, sub_domain=pre_update_hook_data.get('sub-domain')) logs.info('Instance is ready', instance_id=instance_id, instance_name=(instance_id_or_name if instance_id_or_name != instance_id else None))
def deploy(): """Deploys a proxy inside the cluster which allows to access the centralized solr without authentication""" labels = {'app': 'ckan-cloud-solrcloud-proxy'} solr_url = parse_url(solr_manager.get_internal_http_endpoint()) scheme = solr_url.scheme hostname = solr_url.hostname port = solr_url.port solr_user, solr_password = solr_url.auth.split(':') if not port: port = '443' if scheme == 'https' else '8983' kubectl.update_secret( 'solrcloud-proxy', { 'SOLR_URL': f'{scheme}://{hostname}:{port}', 'SOLR_USER': solr_user, 'SOLR_PASSWORD': solr_password }) kubectl.apply( kubectl.get_deployment( 'solrcloud-proxy', labels, { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': labels, 'annotations': { 'ckan-cloud/operator-timestamp': str(datetime.datetime.now()) } }, 'spec': { 'containers': [{ 'name': 'solrcloud-proxy', 'image': 'viderum/ckan-cloud-operator-solrcloud-proxy', 'envFrom': [{ 'secretRef': { 'name': 'solrcloud-proxy' } }], 'ports': [{ 'containerPort': 8983 }], }] } } })) service = kubectl.get_resource('v1', 'Service', 'solrcloud-proxy', labels) service['spec'] = { 'ports': [{ 'name': '8983', 'port': 8983 }], 'selector': labels } kubectl.apply(service)
def _create_storage_classes(): kubectl.apply({ 'apiVersion': 'storage.k8s.io/v1', 'kind': 'StorageClass', 'metadata': { 'name': 'cca-ckan', }, 'provisioner': 'example.com/aws-efs', 'reclaimPolicy': 'Delete', 'volumeBindingMode': 'Immediate' }) kubectl.apply({ 'apiVersion': 'storage.k8s.io/v1', 'kind': 'StorageClass', 'metadata': { 'name': 'cca-storage', }, 'provisioner': 'kubernetes.io/aws-ebs', 'reclaimPolicy': 'Delete', 'volumeBindingMode': 'Immediate', 'parameters': { 'encrypted': 'false', 'type': 'gp2', } })
def _create_storage_classes(): kubectl.apply({ 'apiVersion': 'storage.k8s.io/v1', 'kind': 'StorageClass', 'metadata': { 'name': 'cca-ckan', }, 'parameters': { 'skuName': 'Standard_LRS', 'location': _config_get('azure-default-location') }, 'provisioner': 'kubernetes.io/azure-disk', 'reclaimPolicy': 'Delete', 'volumeBindingMode': 'Immediate' }) kubectl.apply({ 'apiVersion': 'storage.k8s.io/v1', 'kind': 'StorageClass', 'metadata': { 'name': 'cca-storage', }, 'provisioner': 'kubernetes.io/azure-disk', 'volumeBindingMode': 'Immediate', 'parameters': { 'skuName': 'Standard_LRS', 'location': _config_get('azure-default-location') } })
def create_volume(disk_size_gb, labels, use_existing_disk_name=None, zone=0): rg = _config_get('azure-rg') location = zone or _config_get('azure-default-location') disk_id = use_existing_disk_name or 'cc' + _generate_password(12) if use_existing_disk_name: logs.info(f'using existing persistent disk {disk_id}') else: logs.info( f'creating persistent disk {disk_id} with size {disk_size_gb}GB') _, zone = get_project_zone() labels = ','.join([ '{}={}'.format(k.replace('/', '_'), v.replace('/', '_')) for k, v in labels.items() ]) kubectl.apply({ "kind": "PersistentVolumeClaim", "apiVersion": "v1", "metadata": { "name": disk_id, "namespace": "ckan-cloud" }, "spec": { "accessModes": ["ReadWriteOnce"], "resources": { "requests": { "storage": f'{disk_size_gb}G' } }, "storageClassName": "cca-ckan" } }) return {'persistentVolumeClaim': {'claimName': disk_id}}
def create(instance_type, instance_id=None, instance_name=None, values=None, values_filename=None, exists_ok=False, dry_run=False, update_=False, wait_ready=False, skip_deployment=False, skip_route=False, force=False): if not instance_id: if instance_name: instance_id = '{}-{}'.format(instance_name, _generate_password(6)) else: instance_id = _generate_password(12) if values_filename: assert values is None if values_filename != '-': with open(values_filename) as f: values = yaml.load(f.read()) else: values = yaml.load(sys.stdin.read()) if not exists_ok and crds_manager.get(INSTANCE_CRD_SINGULAR, name=instance_id, required=False): raise Exception('instance already exists') values_id = values.get('id') if values_id and values_id != instance_id: logs.warning(f'changing instance id in spec from {values_id} to the instance id {instance_id}') values['id'] = instance_id logs.info('Creating instance', instance_id=instance_id) kubectl.apply(crds_manager.get_resource( INSTANCE_CRD_SINGULAR, instance_id, extra_label_suffixes={'instance-type': instance_type}, spec=values ), dry_run=dry_run) if instance_name: set_name(instance_id, instance_name, dry_run=dry_run) if update_: update(instance_id, wait_ready=wait_ready, skip_deployment=skip_deployment, skip_route=skip_route, force=force, dry_run=dry_run) return instance_id
def create(router): router_name = router['metadata']['name'] router_spec = router['spec'] cloudflare_spec = router_spec.get('cloudflare', {}) cloudflare_email = cloudflare_spec.get('email') cloudflare_api_key = cloudflare_spec.get('api-key') default_root_domain = router_spec.get('default-root-domain') dns_provider = router_spec.get('dns-provider') assert dns_provider == 'cloudflare' router_spec['dns-provider'] = dns_provider assert all([default_root_domain, dns_provider]), f'invalid nginx router spec: {router_spec}' assert cloudflare_email and cloudflare_api_key, 'invalid nginx router spec, missing cloudflare email or api key' # cloudflare credentials are stored in a secret, not in the spec if 'cloudflare' in router_spec: del router_spec['cloudflare'] kubectl.apply(router) annotations = CkanRoutersAnnotations(router_name, router) annotations.update_flag( 'letsencryptCloudflareEnabled', lambda: annotations.set_secrets( { 'LETSENCRYPT_CLOUDFLARE_EMAIL': cloudflare_email, 'LETSENCRYPT_CLOUDFLARE_API_KEY': cloudflare_api_key }), force_update=True) return router
def update_instance(instance_id, override_spec=None, persist_overrides=False, wait_ready=False, skip_deployment=False): instance, instance_type = _get_instance_and_type(instance_id) if override_spec: for k, v in override_spec.items(): instance['spec'][k] = v if persist_overrides: kubectl.apply(instance) if not skip_deployment: deployment_manager.update(instance_id, instance_type, instance) if wait_ready: print('Waiting for ready status') time.sleep(3) while True: data = get_instance(instance_id) if data.get('ready'): print(yaml.dump(data, default_flow_style=False)) break else: print( yaml.dump( { k: v for k, v in data.items() if (k not in ['ready'] and type(v) == dict and not v.get('ready')) or k == 'namespace' }, default_flow_style=False)) time.sleep(2)
def set_name(instance_id, instance_name, dry_run=False): resource = crds_manager.get(APP_NAME_CRD_SINGULAR, name=instance_name, required=False) if resource: resource['spec']['latest-instance-id'] = instance_id if not resource['spec']['instance-ids'].get(instance_id): resource['spec']['instance-ids'][instance_id] = { 'added': datetime.datetime.now() } else: resource = crds_manager.get_resource( APP_NAME_CRD_SINGULAR, instance_name, spec={ 'name': instance_name, 'latest-instance-id': instance_id, 'instance-ids': { instance_id: { 'added': datetime.datetime.now() } } }) if dry_run: logs.print_yaml_dump(resource) else: kubectl.apply(resource)
def _apply_solrcloud_headless_service(dry_run=False): headless_service_name = _get_resource_name('sc-headless') kubectl.apply(kubectl.get_resource( 'v1', 'Service', headless_service_name, _get_resource_labels(suffix='sc-headless'), spec={ 'clusterIP': 'None', 'ports': [{ 'name': 'solr', 'port': 8983, 'protocol': 'TCP', 'targetPort': 8983 }, { 'name': 'stop', 'port': 7983, 'protocol': 'TCP', 'targetPort': 7983 }, { 'name': 'rmi', 'port': 18983, 'protocol': 'TCP', 'targetPort': 18983 }], 'selector': { 'app': _get_resource_labels(for_deployment=True, suffix='sc')['app'] } }), dry_run=dry_run) return headless_service_name
def _init_namespace(instance_id, dry_run=False): logs.debug('Initializing helm-based instance deployment namespace', namespace=instance_id) if kubectl.get('ns', instance_id, required=False): logs.info(f'instance namespace already exists ({instance_id})') else: logs.info(f'creating instance namespace ({instance_id})') kubectl.apply(kubectl.get_resource('v1', 'Namespace', instance_id, {}), dry_run=dry_run) service_account_name = f'ckan-{instance_id}-operator' logs.debug('Creating service account', service_account_name=service_account_name) if not dry_run: kubectl_rbac_driver.update_service_account( f'ckan-{instance_id}-operator', {}, namespace=instance_id) role_name = f'ckan-{instance_id}-operator-role' logs.debug('Creating role and binding to the service account', role_name=role_name) if not dry_run: kubectl_rbac_driver.update_role(role_name, {}, [{ "apiGroups": ["*"], "resources": ['secrets', 'pods', 'pods/exec', 'pods/portforward'], "verbs": ["list", "get", "create"] }], namespace=instance_id) kubectl_rbac_driver.update_role_binding( name=f'ckan-{instance_id}-operator-rolebinding', role_name=f'ckan-{instance_id}-operator-role', namespace=instance_id, service_account_name=f'ckan-{instance_id}-operator', labels={})
def get_or_create_multi_user_volume_claim(label_suffixes): assert len(label_suffixes) > 0, 'must provide some labels to identify the volume' claim_labels = labels_manager.get_resource_labels(label_suffixes=dict( label_suffixes, **_get_cluster_volume_label_suffixes() )) pvcs = kubectl.get_items_by_labels('PersistentVolumeClaim', claim_labels, required=False) if len(pvcs) > 0: assert len(pvcs) == 1 claim_name = pvcs[0]['metadata']['name'] else: storage_class_name = get_multi_user_storage_class_name() claim_name = 'cc' + _generate_password(12) logs.info(f'Creating persistent volume claim: {claim_name}') logs.info(f'labels: {claim_labels}') kubectl.apply(kubectl.get_persistent_volume_claim( claim_name, claim_labels, { 'storageClassName': storage_class_name, 'accessModes': ['ReadWriteMany'], 'resources': { 'requests': { 'storage': '1Mi' } } } )) return {'persistentVolumeClaim': {'claimName': claim_name}}
def _apply_service(db_prefix=None): deployment_app = _get_resource_labels(for_deployment=True, suffix=db_prefix or '')['app'] kubectl.apply( kubectl.get_service(_get_resource_name(suffix=db_prefix or ''), _get_resource_labels(suffix=db_prefix or ''), [5432], {'app': deployment_app}))
def update_role_binding(name, role_name, namespace, service_account_name, labels): kubectl.apply( { 'apiVersion': 'rbac.authorization.k8s.io/v1', 'kind': 'RoleBinding', 'metadata': { 'name': name, 'namespace': namespace, 'labels': labels }, 'subjects': [{ 'kind': 'ServiceAccount', 'name': service_account_name, 'namespace': namespace, }], 'roleRef': { 'kind': 'Role', 'name': role_name, 'namespace': namespace, 'apiGroup': 'rbac.authorization.k8s.io' } }, reconcile=True)
def _apply_zookeeper_headless_service(dry_run=False): headless_service_name = _get_resource_name('zk-headless') kubectl.apply(kubectl.get_resource( 'v1', 'Service', headless_service_name, _get_resource_labels(suffix='zk-headless'), spec={ 'clusterIP': 'None', 'ports': [{ 'name': 'client', 'port': 2181, 'protocol': 'TCP', 'targetPort': 2181 }, { 'name': 'server', 'port': 2888, 'protocol': 'TCP', 'targetPort': 2888 }, { 'name': 'leader-election', 'port': 3888, 'protocol': 'TCP', 'targetPort': 3888 }], 'selector': { 'app': _get_resource_labels(for_deployment=True, suffix='zk')['app'] } }), dry_run=dry_run) return headless_service_name
def update_cluster_role_binding(name, service_account_namespace, service_account_name, cluster_role_name, labels): resource = { 'apiVersion': 'rbac.authorization.k8s.io/v1', 'kind': 'ClusterRoleBinding', 'metadata': { 'name': name, 'labels': labels }, 'subjects': [{ 'kind': 'User', 'name': f'system:serviceaccount:{service_account_namespace}:{service_account_name}', 'apiGroup': 'rbac.authorization.k8s.io' }], 'roleRef': { 'kind': 'ClusterRole', 'name': cluster_role_name, 'apiGroup': 'rbac.authorization.k8s.io' } } kubectl.apply(resource, reconcile=True)
def create(router): router_name = router['metadata']['name'] router_spec = router['spec'] cloudflare_spec = router_spec.get('cloudflare', {}) cloudflare_email = cloudflare_spec.get('email') cloudflare_api_key = cloudflare_spec.get('api-key') default_root_domain = router_spec.get('default-root-domain') dns_provider = router_spec.get('dns-provider') from ckan_cloud_operator.providers.cluster import manager as cluster_manager default_dns_provider = 'route53' if cluster_manager.get_provider_id( ) == 'aws' else 'cloudflare' logs.info(dns_provider=dns_provider, default_dns_provider=default_dns_provider) if not dns_provider: dns_provider = default_dns_provider router_spec['dns-provider'] = dns_provider assert all([default_root_domain, dns_provider]), f'invalid traefik router spec: {router_spec}' if dns_provider == 'cloudflare': assert cloudflare_email and cloudflare_api_key, 'invalid traefik router spec for cloudflare dns provider' # cloudflare credentials are stored in a secret, not in the spec if 'cloudflare' in router_spec: del router_spec['cloudflare'] kubectl.apply(router) annotations = CkanRoutersAnnotations(router_name, router) if dns_provider == 'cloudflare': annotations.update_flag( 'letsencryptCloudflareEnabled', lambda: annotations.set_secrets( { 'LETSENCRYPT_CLOUDFLARE_EMAIL': cloudflare_email, 'LETSENCRYPT_CLOUDFLARE_API_KEY': cloudflare_api_key }), force_update=True) return router
def create(name, role): logs.info(f'Creating user {name} (role={role})') kubectl.apply(crds_manager.get_resource( CRD_SINGULAR, name, spec=_get_spec(name, role), extra_label_suffixes=get_user_labels(name, role) ))
def _pre_update_hook_modify_spec(instance_id, instance, callback, dry_run=False): # applies changes to both the non-persistent spec and persists the changes on latest instance spec latest_instance = crds_manager.get(INSTANCE_CRD_SINGULAR, crds_manager.get_resource_name( INSTANCE_CRD_SINGULAR, instance_id ), required=True) callback(instance) callback(latest_instance) kubectl.apply(latest_instance, dry_run=dry_run)
def _apply_deployment(): kubectl.apply( kubectl.get_deployment( _get_resource_name(), _get_resource_labels(for_deployment=True), { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True), 'annotations': _get_resource_annotations() }, 'spec': { 'containers': [{ 'name': 'pgbouncer', 'image': 'viderum/ckan-cloud-operator:pgbouncer', 'ports': [{ 'containerPort': 5432 }], 'volumeMounts': [ { 'name': 'config', 'mountPath': '/var/local/pgbouncer', 'readOnly': True }, ], 'readinessProbe': { 'failureThreshold': 1, 'initialDelaySeconds': 5, 'periodSeconds': 5, 'successThreshold': 1, 'tcpSocket': { 'port': 5432 }, 'timeoutSeconds': 5 }, 'resources': { 'limits': { 'memory': '2Gi', }, 'requests': { 'cpu': '0.1', 'memory': '0.2Gi', } } }], 'volumes': [ _config_get_volume_spec('config', is_secret=True), ] } } }))
def create_volume(disk_size_gb, labels, use_existing_disk_name=None): assert not use_existing_disk_name, 'using existing disk name is not supported yet' availability_zone = get_storage_availability_zone() logs.info( f'creating persistent disk with size {disk_size_gb} in availability zone {availability_zone}' ) data = json.loads( aws_check_output( f'ec2 create-volume -- --size {disk_size_gb} --availability-zone {availability_zone}' )) volume_id = data['VolumeId'] logs.info(f'volume_id={volume_id}') kubectl.apply({ 'apiVersion': 'v1', 'kind': 'PersistentVolume', 'metadata': { 'name': volume_id, 'namespace': 'ckan-cloud' }, 'spec': { 'storageClassName': '', 'capacity': { 'storage': f'{disk_size_gb}G' }, 'accessModes': ['ReadWriteOnce'], 'awsElasticBlockStore': { 'volumeID': volume_id } } }) kubectl.apply({ 'apiVersion': 'v1', 'kind': 'PersistentVolumeClaim', 'metadata': { 'name': volume_id, 'namespace': 'ckan-cloud' }, 'spec': { 'storageClassName': '', 'volumeName': volume_id, 'accessModes': ['ReadWriteOnce'], 'resources': { 'requests': { 'storage': f'{disk_size_gb}G' } } } }) return { 'persistentVolumeClaim': { 'claimName': volume_id }, 'nodeSelector': { 'failure-domain.beta.kubernetes.io/zone': availability_zone } }
def _apply_deployment(db_prefix=None): kubectl.apply( kubectl.get_deployment( _get_resource_name(suffix=db_prefix), _get_resource_labels(for_deployment=True, suffix=db_prefix or ''), { 'replicas': 1, 'revisionHistoryLimit': 10, 'strategy': { 'type': 'RollingUpdate', }, 'selector': { 'matchLabels': _get_resource_labels(for_deployment=True, suffix=db_prefix or '') }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix=db_prefix or ''), 'annotations': _get_resource_annotations(suffix=db_prefix or '') }, 'spec': { 'containers': [{ 'name': 'postgres', 'image': 'mdillon/postgis', 'env': [ { 'name': 'POSTGRES_PASSWORD', 'value': 'postgres' }, { 'name': 'POSTGRES_USER', 'value': 'postgres' }, ], 'ports': [{ 'containerPort': 5432 }], 'resources': { 'limits': { 'memory': '1Gi', }, 'requests': { 'cpu': '0.1', 'memory': '0.2Gi', } } }], } } }))
def create(name, image, config, router_name=None): labels = _get_labels(name) datapusher = kubectl.get_resource('stable.viderum.com/v1', 'CkanCloudDatapusher', name, labels) datapusher['spec'] = {'image': image, 'config': config} kubectl.apply(datapusher) if router_name: routers_manager.create_subdomain_route(router_name, { 'target-type': 'datapusher', 'datapusher-name': name })
def update(name): _update_registry_secret() datapusher = kubectl.get(f'CkanCloudDatapusher {name}') deployment_name = get_deployment_name(name) labels = _get_labels(name) spec = _get_deployment_spec(labels, datapusher['spec']) print( f'Updating CkanCloudDatapusher {name} (deployment_name={deployment_name})' ) deployment = kubectl.get_deployment(deployment_name, labels, spec) kubectl.apply(deployment)
def update_cluster_role(name, rules, labels): kubectl.apply( { "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "ClusterRole", "metadata": { "name": name, 'labels': labels }, "rules": rules }, reconcile=True)