def list_configs(namespace=None, full=False, show_secrets=False): label_prefix = labels_manager.get_label_prefix() if not namespace: namespace = cluster_manager.get_operator_namespace_name() what = 'configmaps' if show_secrets: what += ',secrets' configs = kubectl.get( what, '-l', f'{label_prefix}/operator-config-namespace={namespace}', required=False) if configs: for config in configs.get('items', []): kind = config['kind'] name = config.get('metadata', {}).get('name') data = { 'kind': config['kind'], 'name': config.get('metadata', {}).get('name') } if full: if name: data['values'] = get( secret_name=name if kind == 'Secret' else None, configmap_name=name if kind == 'ConfigMap' else None, namespace=namespace, required=False) else: data['values'] = None yield data
def initialize_zookeeper(interactive=False, dry_run=False): headless_service_name = _apply_zookeeper_headless_service(dry_run=dry_run) zk_instances = { suffix: { 'host_name': suffix, 'volume_spec': _get_or_create_volume(suffix, disk_size_gb=20, dry_run=dry_run, zone=zone), } for zone, suffix in enumerate(_get_zk_suffixes()) } zk_host_names = [zk['host_name'] for zk in zk_instances.values()] zk_configmap_name = _apply_zookeeper_configmap(zk_host_names) for zk_suffix, zk in zk_instances.items(): _apply_zookeeper_deployment(zk_suffix, zk['volume_spec'], zk_configmap_name, headless_service_name, dry_run=dry_run) namespace = cluster_manager.get_operator_namespace_name() return [ f'{h}.{headless_service_name}.{namespace}.svc.cluster.local:2181' for h in zk_host_names ]
def initialize_zookeeper(interactive=False, dry_run=False): headless_service_name = _apply_zookeeper_headless_service(dry_run=dry_run) zk_instances = { suffix: { 'host_name': suffix, 'volume_spec': _get_or_create_volume(suffix, disk_size_gb=20, dry_run=dry_run), } for suffix in _get_zk_suffixes() } zk_host_names = [zk['host_name'] for zk in zk_instances.values()] zk_configmap_name = _apply_zookeeper_configmap(zk_host_names) if interactive: logs.info('Starting interactive update of zookeeper deployments') print( '\nDeployments will be done one by one, you should check if deployment succeeded before moving on to next one' ) for zk_suffix, zk in zk_instances.items(): if input(f'Update zookeeper deployment {zk_suffix}? [y/n]: ' ) == 'y': _apply_zookeeper_deployment(zk_suffix, zk['volume_spec'], zk_configmap_name, headless_service_name, dry_run=dry_run) else: logs.warning('deployments are not updated in non-interactive mode') namespace = cluster_manager.get_operator_namespace_name() return [ f'{h}.{headless_service_name}.{namespace}.svc.cluster.local:2181' for h in zk_host_names ]
def start_port_forward(): """Starts a local proxy to the cloud SQL instance""" print("\nKeep this running in the background\n") namespace = cluster_manager.get_operator_namespace_name() deployment_name = _get_resource_name() subprocess.check_call( f'kubectl -n {namespace} port-forward deployment/{deployment_name} 5432', shell=True)
def start_zoonavigator_port_forward(): connection_string = ','.join(yaml.load(_config_get('zk-host-names'))) print("\nStarting port forward to zoonavigator\n" "\nUse the following connection string:\n" f"\n {connection_string}\n" "\nhttp://localhost:8000/\n") namespace = cluster_manager.get_operator_namespace_name() deployment_name = _get_resource_name('zoonavigator') subprocess.check_call(f'kubectl -n {namespace} port-forward deployment/{deployment_name} 8000', shell=True)
def get_internal_proxy_host_port(db_prefix=None): namespace = cluster_manager.get_operator_namespace_name() service_name = _get_resource_name(suffix=db_prefix or '') return f'{service_name}.{namespace}', 5432
def _apply_solrcloud_deployment(suffix, volume_spec, configmap_name, log_configmap_name, headless_service_name, pause_deployment, dry_run=False): cpu_req = config_manager.get('sc-cpu', secret_name='solr-config') mem_req = config_manager.get('sc-mem', secret_name='solr-config') cpu_lim = config_manager.get('sc-cpu-limit', secret_name='solr-config') mem_lim = config_manager.get('sc-mem-limit', secret_name='solr-config') namespace = cluster_manager.get_operator_namespace_name() container_spec_overrides = config_manager.get( 'container-spec-overrides', configmap_name='ckan-cloud-provider-solr-solrcloud-sc-config', required=False, default=None) resources = { 'requests': { 'cpu': cpu_req, 'memory': mem_req }, 'limits': { 'cpu': cpu_lim, 'memory': mem_lim } } if not container_spec_overrides else {} kubectl.apply(kubectl.get_deployment( _get_resource_name(suffix), _get_resource_labels(for_deployment=True, suffix='sc'), { 'replicas': 1, 'revisionHistoryLimit': 2, 'strategy': {'type': 'Recreate', }, 'selector': { 'matchLabels': _get_resource_labels(for_deployment=True, suffix='sc'), }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix='sc'), 'annotations': _get_resource_annotations() }, 'spec': { 'hostname': suffix, 'subdomain': headless_service_name, **_get_volume_pod_scheduling( volume_spec, _get_resource_labels(for_deployment=True, suffix='sc')['app'] ), 'initContainers': [ { 'name': 'init', 'image': 'alpine', 'command': [ "sh", "-c", f""" if [ -e /data/solr/solr.xml ]; then echo /data/solr/solr.xml already exists, will not recreate else echo creating /data/solr/solr.xml &&\ mkdir -p /data/solr &&\ echo \'{SOLR_CONFIG_XML}\' > /data/solr/solr.xml fi &&\ echo Setting permissions to solr user/group 8983:8983 on /data/solr &&\ chown -R 8983:8983 /data/solr &&\ echo init completed successfully """ ], 'securityContext': { 'runAsUser': 0 }, 'volumeMounts': [ {'mountPath': '/data', 'name': 'datadir'}, ] } ], 'containers': [ { 'name': 'sc', 'envFrom': [{'configMapRef': {'name': configmap_name}}], 'env': [ {'name': 'SOLR_HOST', 'value': f'{suffix}.{headless_service_name}.{namespace}.svc.cluster.local'} ], **({ 'command': ['sh', '-c', 'sleep 86400'] } if pause_deployment else { 'livenessProbe': { 'exec': {'command': ['/opt/solr/bin/solr', 'status']}, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, 'readinessProbe': { 'exec': {'command': ['/opt/solr/bin/solr', 'status']}, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, }), 'image': 'solr:5.5.5', 'ports': [ {'containerPort': 8983, 'name': 'solr', 'protocol': 'TCP'}, {'containerPort': 7983, 'name': 'stop', 'protocol': 'TCP'}, {'containerPort': 18983, 'name': 'rmi', 'protocol': 'TCP'} ], 'volumeMounts': [ {'mountPath': '/data', 'name': 'datadir'}, {'mountPath': '/logconfig', 'name': 'logconfig'} ], **({'resources': resources} if resources else {}), **(json.loads(container_spec_overrides) if container_spec_overrides else {}) } ], 'volumes': [ {'configMap': {'defaultMode': 420, 'name': log_configmap_name}, 'name': 'logconfig'}, dict(volume_spec, name='datadir') ] } } }, with_timestamp=False ), dry_run=dry_run)
def get_internal_http_endpoint(): solrcloud_host_name = _config_get('sc-main-host-name', required=True) namespace = cluster_manager.get_operator_namespace_name() return f'http://{solrcloud_host_name}.{namespace}.svc.cluster.local:8983/solr'
def start_solrcloud_port_forward(suffix='sc-0'): namespace = cluster_manager.get_operator_namespace_name() deployment_name = _get_resource_name(suffix) subprocess.check_call( f'kubectl -n {namespace} port-forward deployment/{deployment_name} 8983', shell=True)
def _apply_solrcloud_deployment(suffix, volume_spec, configmap_name, log_configmap_name, headless_service_name, pause_deployment): namespace = cluster_manager.get_operator_namespace_name() kubectl.apply(kubectl.get_deployment( _get_resource_name(suffix), _get_resource_labels(for_deployment=True, suffix='sc'), { 'replicas': 1, 'revisionHistoryLimit': 2, 'strategy': {'type': 'Recreate', }, 'template': { 'metadata': { 'labels': _get_resource_labels(for_deployment=True, suffix='sc'), 'annotations': _get_resource_annotations() }, 'spec': { 'hostname': suffix, 'subdomain': headless_service_name, 'affinity': { 'podAntiAffinity': {'requiredDuringSchedulingIgnoredDuringExecution': [ { 'labelSelector': {'matchExpressions': [ {'key': 'app', 'operator': 'In', 'values': [ _get_resource_labels(for_deployment=True, suffix='sc')['app'] ]} ]}, 'topologyKey': 'kubernetes.io/hostname' } ]}}, 'initContainers': [ { 'name': 'init', 'image': 'alpine', 'command': [ "sh", "-c", f""" if [ -e /data/solr/solr.xml ]; then echo /data/solr/solr.xml already exists, will not recreate else echo creating /data/solr/solr.xml &&\ mkdir -p /data/solr &&\ echo \'{SOLR_CONFIG_XML}\' > /data/solr/solr.xml fi &&\ echo Setting permissions to solr user/group 8983:8983 on /data/solr &&\ chown -R 8983:8983 /data/solr &&\ echo init completed successfully """ ], 'securityContext': { 'runAsUser': 0 }, 'volumeMounts': [ {'mountPath': '/data', 'name': 'datadir'}, ] } ], 'containers': [ { 'name': 'sc', 'envFrom': [{'configMapRef': {'name': configmap_name}}], 'env': [ {'name': 'SOLR_HOST', 'value': f'{suffix}.{headless_service_name}.{namespace}.svc.cluster.local'} ], **({ 'command': ['sh', '-c', 'sleep 86400'] } if pause_deployment else { 'livenessProbe': { 'exec': {'command': ['/opt/solr/bin/solr', 'status']}, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, 'readinessProbe': { 'exec': {'command': ['/opt/solr/bin/solr', 'status']}, 'failureThreshold': 3, 'initialDelaySeconds': 15, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 5 }, }), 'image': 'solr:5.5.5', 'ports': [ {'containerPort': 8983, 'name': 'solr', 'protocol': 'TCP'}, {'containerPort': 7983, 'name': 'stop', 'protocol': 'TCP'}, {'containerPort': 18983, 'name': 'rmi', 'protocol': 'TCP'} ], 'resources': {'requests': {'cpu': '1', 'memory': '4Gi'}, 'limits': {'cpu': '2.5', 'memory': '8Gi'}}, 'volumeMounts': [ {'mountPath': '/data', 'name': 'datadir'}, {'mountPath': '/logconfig', 'name': 'logconfig'} ], } ], 'volumes': [ {'configMap': {'defaultMode': 420, 'name': log_configmap_name}, 'name': 'logconfig'}, dict(volume_spec, name='datadir') ] } } }, with_timestamp=False ))
def get_internal_proxy_host_port(): namespace = cluster_manager.get_operator_namespace_name() service_name = _get_resource_name() return f'{service_name}.{namespace}', 5432