def _init_ckan_infra_secret(instance_id, dry_run=False): logs.debug('Initializing ckan infra secret', instance_id=instance_id) ckan_infra = config_manager.get(secret_name='ckan-infra', namespace=instance_id, required=False) if ckan_infra: logs.info('ckan-infra secret already exists') else: admin_user, admin_password, db_name = db_manager.get_admin_db_credentials( ) db_host, db_port = db_manager.get_internal_unproxied_db_host_port() assert int(db_port) == 5432 logs.debug('Creating ckan-infra secret', admin_user=admin_user, admin_password=admin_password, db_name=db_name, db_host=db_host, db_port=db_port) config_manager.set(values={ 'POSTGRES_HOST': db_host, 'POSTGRES_PASSWORD': admin_password, 'POSTGRES_USER': admin_user }, secret_name='ckan-infra', namespace=instance_id, dry_run=dry_run)
def set(key=None, value=None, values=None, secret_name=None, configmap_name=None, namespace=None, extra_operator_labels=None, from_file=False, dry_run=False): log_kwargs = {'func': 'config/set', 'secret': secret_name, 'configmap': configmap_name, 'namespace': namespace} cache_key = _get_cache_key(secret_name, configmap_name, namespace) if secret_name is None and configmap_name is None and namespace is None: # hack to set label-prefix to bootstrap the environment if key == 'label-prefix' and not values: label_prefix = value elif not key and 'label-prefix' in values: label_prefix = values['label-prefix'] else: label_prefix = None if label_prefix: __CACHED_VALUES.setdefault(cache_key, {})['label-prefix'] = label_prefix logs.debug('start', **log_kwargs) if from_file: assert key and value and not values with open(value) as f: value = f.read() values = {key: value} elif key or value: assert key and value and not values, 'Invalid arguments: must specify both key and value args and not specify values arg' values = {key: value} assert values, 'Invalid arguments: no values to save' return _save(cache_key, values, extra_operator_labels, dry_run=dry_run)
def _helm_deploy(values, tiller_namespace_name, ckan_helm_chart_repo, ckan_helm_chart_version, ckan_helm_release_name, instance_id, dry_run=False): logs.debug(f'Deploying helm chart {ckan_helm_chart_repo} {ckan_helm_chart_version} to release {ckan_helm_release_name} (instance_id={instance_id})') with tempfile.NamedTemporaryFile('w') as f: yaml.dump(values, f, default_flow_style=False) f.flush() helm_driver.deploy(tiller_namespace_name, ckan_helm_chart_repo, 'ckan-cloud/ckan', ckan_helm_chart_version, ckan_helm_release_name, f.name, instance_id, dry_run=dry_run)
def update(router_name, wait_ready, spec, annotations, routes, dry_run=False): logs.debug(f'updating nginx router: {router_name}') logs.debug_verbose(router_name=router_name, spec=spec, routes=routes) return nginx_deployment.update(router_name, wait_ready, spec, annotations, routes, dry_run=dry_run)
def update(instance_id, instance, force=False, dry_run=False): tiller_namespace_name = _get_resource_name() logs.debug('Updating helm-based instance deployment', instance_id=instance_id, tiller_namespace_name=tiller_namespace_name) _create_private_container_registry_secret(instance_id) _init_ckan_infra_secret(instance_id, dry_run=dry_run) ckan_helm_chart_repo = instance['spec'].get( "ckanHelmChartRepo", "https://raw.githubusercontent.com/ViderumGlobal/ckan-cloud-helm/master/charts_repository" ) ckan_helm_chart_version = instance['spec'].get("ckanHelmChartVersion", "") ckan_helm_release_name = f'ckan-cloud-{instance_id}' solr_schema = instance['spec'].get("ckanSolrSchema", "ckan_default") solr_host, solr_port = _init_solr( instance_id, solr_schema, dry_run=dry_run, ) logs.debug(ckan_helm_chart_repo=ckan_helm_chart_repo, ckan_helm_chart_version=ckan_helm_chart_version, ckan_helm_release_name=ckan_helm_release_name, solr_host=solr_host, solr_port=solr_port) instance['spec']['centralizedSolrHost'], instance['spec'][ 'centralizedSolrPort'] = solr_host, solr_port if annotations_manager.get_status(instance, 'helm', 'created'): logs.info('Updating existing instance') values = instance['spec'] else: logs.info( 'New instance, deploying first with 1 replica and disabled probes and jobs' ) values = { **instance['spec'], "replicas": 1, "nginxReplicas": 1, "disableJobs": True, "noProbes": True, "enableHarvesterNG": False } _helm_deploy(values, tiller_namespace_name, ckan_helm_chart_repo, ckan_helm_chart_version, ckan_helm_release_name, instance_id, dry_run=dry_run) if not dry_run: _wait_instance_events(instance_id) instance = crds_manager.get(INSTANCE_CRD_SINGULAR, name=instance_id) if not annotations_manager.get_status(instance, 'helm', 'created'): annotations_manager.set_status(instance, 'helm', 'created') _helm_deploy(instance['spec'], tiller_namespace_name, ckan_helm_chart_repo, ckan_helm_chart_version, ckan_helm_release_name, instance_id) _scale_down_scale_up(namespace=instance_id, replicas=values.get('replicas', 1))
def update(instance_id, instance, dry_run=False): if instance['spec'].get('helm3'): tiller_namespace_name = None else: tiller_namespace_name = _get_tiller_namespace_name( instance_id, instance) if tiller_namespace_name: logs.debug('Updating helm-based instance deployment', instance_id=instance_id, tiller_namespace_name=tiller_namespace_name) else: logs.debug('Updating helm3 based instance deployment', instance_id=instance_id) chart_repo_name = instance['spec'].get("chart-repo-name") assert chart_repo_name, 'missing spec attribute: chart-repo-name' logs.info(chart_repo_name=chart_repo_name) chart_repo = instance['spec'].get("chart-repo") assert chart_repo or chart_repo_name in [ 'stable' ], 'missing spec attribute: chart-repo' logs.info(chart_repo=chart_repo) chart_name = instance['spec'].get('chart-name') assert chart_name, 'missing spec attribute: chart-name' logs.info(chart_name=chart_name) chart_version = instance['spec'].get("chart-version", "") logs.info(chart_version=chart_version) release_name = _get_helm_release_name(instance_id, instance) logs.info(release_name=release_name, ) _pre_update_hook_modify_spec( instance_id, instance, lambda i: i['spec'].update( **{ 'release-name': release_name, 'chart-version': chart_version, 'chart-name': chart_name, 'chart-repo': chart_repo, 'chart-repo-name': chart_repo_name, })) deploy_kwargs = dict(values=instance['spec'].get('values', {}), tiller_namespace_name=tiller_namespace_name, chart_repo=chart_repo, chart_version=chart_version, chart_name=chart_name, release_name=release_name, instance_id=instance_id, dry_run=dry_run, chart_repo_name=chart_repo_name) app_type = instance['spec'].get('app-type') if app_type: _get_app_type_manager(app_type).pre_deploy_hook( instance_id, instance, deploy_kwargs) _helm_deploy(**deploy_kwargs) if app_type: _get_app_type_manager(app_type).post_deploy_hook( instance_id, instance, deploy_kwargs)
def _add_route(config, domains, route, enable_ssl_redirect): route_name = routes_manager.get_name(route) logs.debug(f'adding route to traefik config: {route_name}') logs.debug_verbose(config=config, domains=domains, route=route, enable_ssl_redirect=enable_ssl_redirect) backend_url = routes_manager.get_backend_url(route) frontend_hostname = routes_manager.get_frontend_hostname(route) root_domain, sub_domain = routes_manager.get_domain_parts(route) domains.setdefault(root_domain, []).append(sub_domain) if route['spec'].get('extra-no-dns-subdomains'): extra_hostnames = ',' + ','.join([ f'{s}.{root_domain}' for s in route['spec']['extra-no-dns-subdomains'] ]) else: extra_hostnames = '' logs.debug(route_name=route_name, backend_url=backend_url, frontend_hostname=frontend_hostname, root_domain=root_domain, sub_domain=sub_domain, domains=domains, extra_hostnames=extra_hostnames) config['backends'][route_name] = { 'servers': { 'server1': { 'url': backend_url } } } config['frontends'][route_name] = { 'backend': route_name, 'passHostHeader': True, 'headers': { 'SSLRedirect': bool(enable_ssl_redirect) }, 'routes': { 'route1': { 'rule': f'Host:{frontend_hostname}{extra_hostnames}' } }, **({ 'auth': { 'basic': { 'usersFile': '/httpauth-' + route['spec']['httpauth-secret'] + '/.htpasswd' } } } if route['spec'].get('httpauth-secret') else {}), }
def _wait_instance_events(instance_id, force_update_events=False): start_time = datetime.datetime.now() logs.info('Waiting for instance events', start_time=start_time) while True: logs.debug('sleeping 15 seconds') time.sleep(15) if len(_check_instance_events(instance_id, force_update_events)) == 0: logs.info('All instance events completed successfully') break if (datetime.datetime.now() - start_time).total_seconds() > 600: raise Exception('time out waiting for instance events')
def _get_instance_id_and_type(instance_id_or_name=None, instance_id=None, required=True): if instance_id: logs.debug(f'Getting instance type using instance_id', instance_id=instance_id) instance = crds_manager.get(INSTANCE_CRD_SINGULAR, name=instance_id, required=False) instance_name = None else: logs.debug(f'Attempting to get instance type using id', instance_id_or_name=instance_id_or_name) instance = crds_manager.get(INSTANCE_CRD_SINGULAR, name=instance_id_or_name, required=False) if instance: instance_id = instance_id_or_name instance_name = None else: logs.debug(f'Attempting to get instance type from instance name', instance_id_or_name=instance_id_or_name) instance_name = crds_manager.get(INSTANCE_NAME_CRD_SINGULAR, name=instance_id_or_name, required=False) if instance_name: instance_id = instance_name['spec'].get('latest-instance-id') logs.debug(instance_id=instance_id) instance = crds_manager.get(INSTANCE_CRD_SINGULAR, name=instance_id, required=False) instance_name = instance_id_or_name else: instance_name = None if instance: instance_type = instance['metadata']['labels'].get('{}/instance-type'.format(labels_manager.get_label_prefix())) else: instance_type = None logs.debug_yaml_dump(instance_name=instance_name, instance_id=instance_id, instance_type=instance_type, instance=bool(instance)) if required: assert instance_id and instance_type and len(instance) > 2, f'Failed to find instance (instance_id_or_name={instance_id_or_name}, instance_id={instance_id})' return instance_id, instance_type, instance
def _get_instance(instance_id_or_name=None, instance_id=None, required=True): if instance_id: logs.debug(f'Getting instance using instance_id', instance_id=instance_id) instance = crds_manager.get(APP_CRD_SINGULAR, name=instance_id, required=False) instance_name = None else: logs.debug(f'Attempting to get instance using id', instance_id_or_name=instance_id_or_name) instance = crds_manager.get(APP_CRD_SINGULAR, name=instance_id_or_name, required=False) if instance: instance_id = instance_id_or_name instance_name = None else: logs.debug(f'Attempting to get instance from instance name', instance_id_or_name=instance_id_or_name) instance_name = crds_manager.get(APP_NAME_CRD_SINGULAR, name=instance_id_or_name, required=False) if instance_name: instance_id = instance_name['spec'].get('latest-instance-id') logs.debug(instance_id=instance_id) instance = crds_manager.get(APP_CRD_SINGULAR, name=instance_id, required=False) instance_name = instance_id_or_name else: instance_name = None logs.debug_yaml_dump(instance_name=instance_name, instance_id=instance_id, instance=bool(instance)) if required: assert instance_id and len(instance) > 2, \ f'Failed to find instance (instance_id_or_name={instance_id_or_name}, instance_id={instance_id})' return instance_id, instance
def spec(self): """Initialize the spec object, fetch values from kubernetes if not provided :return: DeisCkanInstanceSpec """ if not getattr(self, '_spec', None): self._spec = DeisCkanInstanceSpec(self.values['spec'], self._override_spec) if self._persist_overrides and self._spec.num_applied_overrides > 0: logs.info('persisting overrides') logs.debug(f'saving spec for instance id {self.id}: {self._spec.spec}') instance = kubectl.get(f'{self.kind} {self.id}') instance['spec'] = self._spec.spec logs.debug_yaml_dump(instance) kubectl.apply(instance) return self._spec
def _wait_instance_events(instance_id): start_time = datetime.datetime.now() logs.info('Waiting for instance events', start_time=start_time) missing_events = None while True: logs.debug('sleeping 15 seconds') time.sleep(15) currently_missing = _check_instance_events(instance_id) if len(currently_missing) == 0: logs.info('All instance events completed successfully') break if currently_missing != missing_events: missing_events = currently_missing logs.info('Still waiting for', repr(sorted(missing_events))) if (datetime.datetime.now() - start_time).total_seconds() > 1200: raise Exception('time out waiting for instance events')
def interactive_set(default_values, secret_name=None, configmap_name=None, namespace=None, from_file=False, extra_operator_labels=None, interactive=True): log_kwargs = {'func': 'config/interactive_set', 'secret': secret_name, 'configmap': configmap_name, 'namespace': namespace} logs.debug('start', **log_kwargs) set_values = {} for key, default_value in default_values.items(): saved_value = get(key, secret_name=secret_name, configmap_name=configmap_name, namespace=namespace) preset_value = get_preset_answer(namespace, configmap_name, secret_name, key) if preset_value: set_values[key] = preset_value elif interactive: if saved_value: if from_file: msg = ', leave empty to use the saved value' else: msg = f', leave empty to use the saved value: {saved_value}' default_value = saved_value elif default_value is not None: assert not from_file msg = f', leave empty to use the default value: {default_value}' else: msg = ' (required)' if from_file: print(f'Enter the path to a file containing the value for {key}{msg}') source_path = input(f'{key} path: ') if source_path: with open(source_path) as f: set_values[key] = f.read() elif saved_value: set_values[key] = saved_value else: raise Exception('file path is required') else: if default_value in [True, False]: print(f'Enter a boolean value for {key}{msg}') entered_value = input(f'{key} [y/n]: ') bool_value = default_value if entered_value == '' else (entered_value == 'y') set_values[key] = 'y' if bool_value else 'n' else: print(f'Enter a value for {key}{msg}') entered_value = input(f'{key}: ') set_values[key] = str(entered_value or default_value) else: set_values[key] = saved_value if saved_value is not None else default_value logs.debug('set', **log_kwargs) return set(values=set_values, secret_name=secret_name, configmap_name=configmap_name, namespace=namespace, extra_operator_labels=extra_operator_labels)
def curl(auth_email, auth_key, urlpart, data=None, method='GET'): logs.info(f'Running Cloudflare curl: {urlpart} {data} {method}') logs.debug(f'{auth_email} / {auth_key}') cmd = [ 'curl', '-s', '-X', method, f'https://api.cloudflare.com/client/v4/{urlpart}' ] cmd += [ '-H', f'X-Auth-Email: {auth_email}', '-H', f'X-Auth-Key: {auth_key}', '-H', 'Content-Type: application/json' ] if data: cmd += ['--data', json.dumps(data)] logs.debug(*cmd) output = subprocess.check_output(cmd) try: return json.loads(output) except Exception: logs.critical(f'Got invalid data from cloudflare curl: {output}') raise
def pre_update_hook(instance_id, instance, res, sub_domain, root_domain, modify_spec_callback): logs.info('jenkins pre_update_hook', instance_id=instance_id, instance_spec=instance.get('spec')) chart_update_kwargs = { k: v for k, v in DEFAULT_CHART_VALUES.items() if not instance['spec'].get(k) } values_update_kwargs = { k: v for k, v in DEFAULT_VALUES.items() if not instance['spec'].get('values', {}).get(k) } logs.debug(chart_update_kwargs=chart_update_kwargs) logs.debug(values_update_kwargs=values_update_kwargs) instance['spec'].update(**chart_update_kwargs) instance['spec'].setdefault('values', {}).update(**values_update_kwargs) modify_spec_callback(lambda i: i.update(**chart_update_kwargs)) modify_spec_callback( lambda i: i.setdefault('values', {}).update(**values_update_kwargs))
def _check_instance_events(instance_id): status = get(instance_id) errors = [] ckan_cloud_logs = [] ckan_cloud_events = set() pod_names = [] for app, app_status in status.get('helm_app_statuses', {}).items(): for kind, kind_items in app_status.items(): for item in kind_items: for error in item.get("errors", []): errors.append( dict(error, kind=kind, app=app, name=item.get("name"))) for logdata in item.get("ckan-cloud-logs", []): ckan_cloud_logs.append( dict(logdata, kind=kind, app=app, name=item.get("name"))) if "event" in logdata: ckan_cloud_events.add(logdata["event"]) if kind == "pods": pod_names.append(item["name"]) expected_events = {("ckan-env-vars-created", "ckan-env-vars-exists"), ("ckan-secrets-created", "ckan-secrets-exists"), "got-ckan-secrets", "ckan-entrypoint-initialized", "ckan-entrypoint-db-init-success", "ckan-entrypoint-extra-init-success"} missing = set() for events in expected_events: if isinstance(events, str): events = (events, ) found = False for e in events: if e in ckan_cloud_events: found = True break if not found: missing.add('/'.join(events)) logs.debug(ckan_cloud_events=ckan_cloud_events) return missing
def _init_namespace(instance_id, dry_run=False): logs.debug('Initializing helm-based instance deployment namespace', namespace=instance_id) if kubectl.get('ns', instance_id, required=False): logs.info(f'instance namespace already exists ({instance_id})') else: logs.info(f'creating instance namespace ({instance_id})') kubectl.apply(kubectl.get_resource('v1', 'Namespace', instance_id, {}), dry_run=dry_run) service_account_name = f'ckan-{instance_id}-operator' logs.debug('Creating service account', service_account_name=service_account_name) if not dry_run: kubectl_rbac_driver.update_service_account( f'ckan-{instance_id}-operator', {}, namespace=instance_id) role_name = f'ckan-{instance_id}-operator-role' logs.debug('Creating role and binding to the service account', role_name=role_name) if not dry_run: kubectl_rbac_driver.update_role(role_name, {}, [{ "apiGroups": ["*"], "resources": ['secrets', 'pods', 'pods/exec', 'pods/portforward'], "verbs": ["list", "get", "create"] }], namespace=instance_id) kubectl_rbac_driver.update_role_binding( name=f'ckan-{instance_id}-operator-rolebinding', role_name=f'ckan-{instance_id}-operator-role', namespace=instance_id, service_account_name=f'ckan-{instance_id}-operator', labels={})
def _check_instance_events(instance_id, force_update_events=False): status = get(instance_id) errors = [] ckan_cloud_logs = [] ckan_cloud_events = set() pod_names = [] for app, app_status in status.get('helm_app_statuses', {}).items(): for kind, kind_items in app_status.items(): for item in kind_items: for error in item.get("errors", []): errors.append(dict(error, kind=kind, app=app, name=item.get("name"))) for logdata in item.get("ckan-cloud-logs", []): ckan_cloud_logs.append(dict(logdata, kind=kind, app=app, name=item.get("name"))) if "event" in logdata: ckan_cloud_events.add(logdata["event"]) if kind == "pods": pod_names.append(item["name"]) instance = crds_manager.get(INSTANCE_CRD_SINGULAR, name=instance_id) if force_update_events or annotations_manager.get_status(instance, 'helm', 'created'): logs.debug('expecting update events') expected_events = { "ckan-env-vars-exists", "ckan-secrets-exists", "got-ckan-secrets", "ckan-entrypoint-initialized", "ckan-entrypoint-db-init-success", "ckan-entrypoint-extra-init-success" } else: logs.debug('expecting create events') expected_events = { "ckan-env-vars-created", "ckan-secrets-created", "got-ckan-secrets", "ckan-db-initialized", "ckan-datastore-db-initialized", "ckan-entrypoint-initialized", "ckan-entrypoint-db-init-success", "ckan-entrypoint-extra-init-success" } logs.debug(ckan_cloud_events=ckan_cloud_events) return expected_events.difference(ckan_cloud_events)
def __init__(self, spec, override_spec): self.spec = copy.deepcopy(spec) self.num_applied_overrides = 0 if override_spec: for k, v in override_spec.items(): if k == 'envvars': logs.info('Applying overrides to instance spec envvars') logs.debug(f"spec['envvars']['overrides'] = {v}") self.spec['envvars']['overrides'] = v self.num_applied_overrides += 1 elif k in ['db', 'datastore', 'solrCloudCollection']: for kk, vv in v.items(): if kk == 'name': print(f'Overriding instance {k} spec name') self.spec.setdefault(k)[kk] = vv self.num_applied_overrides += 1 elif k == 'solrCloudCollection' and kk == 'configName': print(f'Overriding instance solr spec config name') self.spec.setdefault(k)[kk] = vv self.num_applied_overrides += 1 elif k in ['db', 'datastore'] and kk == 'no-db-proxy': print(f'Overriding instance {k} spec {kk}={vv}') self.spec[k][kk] = vv self.num_applied_overrides += 1 else: raise NotImplementedError( f'Unsupported {k} spec override: {kk}={vv}') else: raise NotImplementedError( f'Unsupported instance spec override: {k}: {v}') self._validate() self.envvars = self.spec['envvars'] self.db = self.spec['db'] self.datastore = self.spec['datastore'] self.solrCloudCollection = self.spec['solrCloudCollection'] self.storage = self.spec['storage']
def _init_solr(instance_id, dry_run=False): logs.debug('Initializing solr', instance_id=instance_id) solr_status = solr_manager.get_collection_status(instance_id) logs.debug_yaml_dump(solr_status) if not solr_status['ready']: logs.info('Creating solr collection', collection_name=instance_id, solr_config='ckan_28_default') if not dry_run: solr_manager.create_collection(instance_id, 'ckan_28_default') else: logs.info(f'collection already exists ({instance_id})') solr_url = solr_status['solr_http_endpoint'] logs.debug(solr_url=solr_url) assert solr_url.startswith('http') and solr_url.endswith('/solr'), f'invalid solr_url ({solr_url})' host, port = solr_url.replace('https://', '').replace('http://', '').replace('/solr', '').split(':') logs.debug('Solr initialization completed successfully', host=host, port=port) return host, port
def update(instance_id, instance, dry_run=False): logs.debug('Updating none-based instance deployment', instance_id=instance_id) app_type = instance['spec'].get('app-type') _get_app_type_manager(app_type).deploy(instance_id, instance)
def get(instance_id, instance=None): image = None latest_operator_timestamp, latest_pod_name, latest_pod_status = None, None, None item_app_statuses = {} ckan_deployment_status = None ckan_deployment_ready = None ckan_deployment_status_pods = [] logs.debug('Getting all namespace resources', namespace=instance_id) all_resources = kubectl.get('all', namespace=instance_id, required=False) num_resource_items = len(all_resources.get('items')) logs.debug(num_resource_items=num_resource_items) if num_resource_items > 0: for item in all_resources['items']: item_kind = item['kind'] try: if item_kind in ("Pod", "ReplicaSet"): item_app = item["metadata"]["labels"]["app"] elif item_kind in ("Service", "Deployment"): item_app = item["metadata"]["name"] else: item_app = None except: logging.exception('Failed to extract item_app from %r', item) item_app = None logs.debug(item_kind=item_kind, item_app=item_app) if item_app in ["ckan", "jobs-db", "redis", "nginx", "jobs"]: app_status = item_app_statuses.setdefault(item_app, {}) else: app_status = item_app_statuses.setdefault("unknown", {}) item_status = kubectl.get_item_detailed_status(item) app_status.setdefault("{}s".format(item_kind.lower()), []).append(item_status) if item_app == 'ckan': if item_kind == 'Deployment': ckan_deployment_status = item_status ckan_deployment_ready = len(item_status.get('error', [])) == 0 logs.debug(ckan_deployment_ready=ckan_deployment_ready) elif item_kind == 'Pod': pod = item pod_status = item_status pod_operator_timestamp = pod['metadata'][ 'creationTimestamp'] if not latest_operator_timestamp or latest_operator_timestamp < pod_operator_timestamp: latest_operator_timestamp = pod_operator_timestamp latest_pod_name = pod['metadata']['name'] for container in ["secrets", "ckan"]: status_code, output = subprocess.getstatusoutput( f'kubectl -n {instance_id} logs {pod["metadata"]["name"]} -c {container}', ) container_logs = output if status_code == 0 else None logs.debug(len_container_logs=len(container_logs) if container_logs else 0) if container == 'ckan': pod_status['logs'] = output if container_logs: for logline in container_logs.split( "--START_CKAN_CLOUD_LOG--")[1:]: logdata = json.loads( logline.split("--END_CKAN_CLOUD_LOG--")[0]) pod_status.setdefault("ckan-cloud-logs", []).append(logdata) if not image: image = pod["spec"]["containers"][0]["image"] else: if image != pod["spec"]["containers"][0]["image"]: ckan_deployment_ready = False image = pod["spec"]["containers"][0]["image"] ckan_deployment_status_pods.append(pod_status) if latest_pod_name == pod_status['name']: latest_pod_status = pod_status if not latest_pod_status or len(latest_pod_status.get( 'errors', [])) > 0 or latest_pod_status['logs'] is None: ckan_deployment_ready = False else: ckan_deployment_ready = False return { **ckan_deployment_status, 'ready': ckan_deployment_ready, 'pods': ckan_deployment_status_pods, 'image': image, 'latest_pod_name': latest_pod_name, 'latest_operator_timestamp': latest_operator_timestamp, 'helm_app_statuses': item_app_statuses, 'helm_metadata': { 'ckan_instance_id': instance_id, 'namespace': instance_id, 'status_generated_at': datetime.datetime.now(), 'status_generated_from': subprocess.check_output(["hostname"]).decode().strip(), } }
def update(router_name, wait_ready, spec, annotations, routes): logs.debug(f'updating traefik router: {router_name}') logs.debug_verbose(router_name=router_name, spec=spec, routes=routes) return traefik_deployment.update(router_name, wait_ready, spec, annotations, routes)