def update(self, wait_ready=False, skip_solr=False, skip_deployment=False): """Ensure the instance is updated to latest spec""" old_deployment = kubectl.get(f'deployment {self.id}', required=False, namespace=self.id) if old_deployment: old_deployment_generation = old_deployment.get('metadata', {}).get('generation') else: old_deployment_generation = None if old_deployment_generation: expected_new_deployment_generation = old_deployment_generation + 1 else: expected_new_deployment_generation = 1 print(f'old deployment generation = {old_deployment_generation}') DeisCkanInstanceNamespace(self).update() DeisCkanInstanceDb(self, 'db').update() DeisCkanInstanceDb(self, 'datastore').update() if not skip_solr: DeisCkanInstanceSolr(self).update() DeisCkanInstanceStorage(self).update() DeisCkanInstanceRegistry(self).update() envvars = DeisCkanInstanceEnvvars(self) envvars.update() if not skip_deployment: DeisCkanInstanceDeployment(self).update() while True: time.sleep(.2) new_deployment = kubectl.get(f'deployment {self.id}', required=False, namespace=self.id) if not new_deployment: continue new_deployment_generation = new_deployment.get('metadata', {}).get('generation') if not new_deployment_generation: continue if new_deployment_generation == old_deployment_generation: continue if new_deployment_generation != expected_new_deployment_generation: raise Exception(f'Invalid generation: {new_deployment_generation} ' f'(expected: {expected_new_deployment_generation}') print(f'new deployment generation: {new_deployment_generation}') break if wait_ready: print('Waiting for ready status') time.sleep(3) while True: data = self.get() if data.get('ready'): print(yaml.dump(data, default_flow_style=False)) break else: print(yaml.dump( { k: v for k, v in data.items() if (k not in ['ready'] and type(v) == dict and not v.get('ready')) or k == 'namespace' }, default_flow_style=False) ) time.sleep(2) self.ckan.update() try: DeisCkanInstanceDb(self, 'datastore').set_datastore_readonly_permissions() except Exception: logs.warning('Setting datastore permissions failed, continuing anyway') # Create/Update uptime monitoring after everything else is ready DeisCkanInstanceUptime(self).update(envvars.site_url)
def get(self, attr=None, exclude_attr=None): """Get detailed information about the instance and related components""" gets = { 'annotations': lambda: DeisCkanInstanceAnnotations(self).get(), 'db': lambda: DeisCkanInstanceDb(self, 'db').get(), 'datastore': lambda: DeisCkanInstanceDb(self, 'datastore').get(), 'deployment': lambda: DeisCkanInstanceDeployment(self).get(), 'envvars': lambda: DeisCkanInstanceEnvvars(self).get(), 'namespace': lambda: DeisCkanInstanceNamespace(self).get(), 'registry': lambda: DeisCkanInstanceRegistry(self).get(), 'solr': lambda: DeisCkanInstanceSolr(self).get(), 'storage': lambda: DeisCkanInstanceStorage(self).get(), } if exclude_attr: gets = {k: v for k, v in gets.items() if k not in exclude_attr} if attr: return gets[attr]() else: ret = {'ready': True} for k, v in gets.items(): ret[k] = v() if type(ret[k]) == dict and not ret[k].get('ready'): ret['ready'] = False ret['id'] = self.id return ret
def post_create_checks(instance_id): from ckan_cloud_operator.deis_ckan.instance import DeisCkanInstance from ckan_cloud_operator.deis_ckan.envvars import DeisCkanInstanceEnvvars instance = DeisCkanInstance(instance_id) envvars = DeisCkanInstanceEnvvars(instance).get(full=True) assert envvars['ready'] envvars = envvars['envvars'] check_envvars(envvars, instance)
def delete(self, force=False, wait_deleted=False): """ Can run delete multiple time until successful deletion of all components. Uses Kubernetes finalizers to ensure deletion is complete before applying the deletion. """ print(f'Deleting {self.kind} {self.id}') try: assert self.spec has_spec = True except Exception: has_spec = False # this updates deletion timestamp but doesn't delete the object until all finalizers are removed subprocess.call(f'kubectl -n ckan-cloud delete --wait=false {self.kind} {self.id}', shell=True) num_exceptions = 0 if has_spec: for delete_id, delete_code in { 'deployment': lambda: DeisCkanInstanceDeployment(self).delete(), 'envvars': lambda: DeisCkanInstanceEnvvars(self).delete(), 'registry': lambda: DeisCkanInstanceRegistry(self).delete(), 'solr': lambda: DeisCkanInstanceSolr(self).delete(), 'storage': lambda: DeisCkanInstanceStorage(self).delete(), 'namespace': lambda: DeisCkanInstanceNamespace(self).delete(), 'envvars-secret': lambda: kubectl.check_call(f'delete --ignore-not-found secret/{self.id}-envvars'), 'routes': lambda: routers_manager.delete_routes(deis_instance_id=self.id), 'uptime-monitoring': lambda: DeisCkanInstanceUptime(self).delete(self.id) }.items(): try: delete_code() except Exception as e: logs.critical(f'deletion failed for instance {self.id}, submodule: {delete_id}') num_exceptions += 1 else: try: routers_manager.delete_routes(deis_instance_id=self.id) except Exception as e: logs.critical(f'deletion failed for instance {self.id}, submodule: routes') num_exceptions += 1 num_exceptions += 1 if num_exceptions != 0 and not force: raise Exception('instance was not deleted, run with --force to force deletion with risk of remaining infra') else: print(f'Removing finalizers from {self.kind} {self.id}') try: subprocess.check_call( f'kubectl -n ckan-cloud patch {self.kind} {self.id} -p \'{{"metadata":{{"finalizers":[]}}}}\' --type=merge', shell=True ) except Exception: logs.critical(f'failed to remove finalizers: {self.id}') num_exceptions += 1 if not force: raise if wait_deleted and has_spec: logs.info('Waiting 30 seconds for instance to be deleted...') time.sleep(30)