def handle(self, tiller): known_release_names = [release[0] for release in tiller.list_charts()] if self.releases: target_releases = [ r.strip() for r in self.releases.split(',') if r.strip() in known_release_names ] if not target_releases: self.logger.info("There's no release to delete.") return if not self.ctx.obj.get('api', False): for r in target_releases: self.logger.info("Deleting release %s", r) tiller.delete_release(r, purge=self.purge) else: raise NotImplementedError() if self.manifest: target_deletes = [] with open(self.manifest) as f: documents = list(yaml.safe_load_all(f.read())) try: armada_obj = Manifest(documents).get_manifest() prefix = armada_obj.get(const.KEYWORD_DATA).get( const.KEYWORD_PREFIX) for group in armada_obj.get(const.KEYWORD_DATA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_DATA).get( const.KEYWORD_CHARTS): chart = ch.get(const.KEYWORD_DATA) release_name = release_prefixer( prefix, chart.get('release')) if release_name in known_release_names: target_deletes.append((chart, release_name)) except yaml.YAMLError as e: mark = e.problem_mark self.logger.info( "While parsing the manifest file, %s. " "Error position: (%s:%s)", e.problem, mark.line + 1, mark.column + 1) if not target_deletes: self.logger.info("There's no release to delete.") return if not self.ctx.obj.get('api', False): for chart, release in target_deletes: chart_delete = ChartDelete(chart, release, tiller, purge=self.purge) chart_delete.delete() else: raise NotImplementedError()
def invoke(self): tiller = Tiller(tiller_host=self.tiller_host, tiller_port=self.tiller_port) known_release_names = [release[0] for release in tiller.list_charts()] if self.releases: target_releases = [ r.strip() for r in self.releases.split(',') if r.strip() in known_release_names ] if not target_releases: self.logger.info("There's no release to delete.") return if not self.ctx.obj.get('api', False): for r in target_releases: self.logger.info("Deleting release %s", r) tiller.uninstall_release(r, purge=self.purge) else: raise NotImplementedError() if self.manifest: target_releases = [] with open(self.manifest) as f: documents = yaml.safe_load_all(f.read()) try: armada_obj = Manifest(documents).get_manifest() prefix = armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_PREFIX) for group in armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): release_name = release_prefix( prefix, ch.get('chart').get('chart_name')) if release_name in known_release_names: target_releases.append(release_name) except yaml.YAMLError as e: mark = e.problem_mark self.logger.info( "While parsing the manifest file, %s. " "Error position: (%s:%s)", e.problem, mark.line + 1, mark.column + 1) if not target_releases: self.logger.info("There's no release to delete.") return if not self.ctx.obj.get('api', False): for r in target_releases: self.logger.info("Deleting release %s", r) tiller.uninstall_release(r, purge=self.purge) else: raise NotImplementedError()
def on_post(self, req, resp): try: opts = req.params tiller = Tiller(tiller_host=opts.get('tiller_host', None), tiller_port=opts.get('tiller_port', None)) documents = self.req_yaml(req) armada_obj = Manifest(documents).get_manifest() prefix = armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_PREFIX) known_releases = [release[0] for release in tiller.list_charts()] message = { 'tests': { 'passed': [], 'skipped': [], 'failed': [] } } for group in armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): release_name = release_prefix( prefix, ch.get('chart').get('chart_name')) if release_name in known_releases: self.logger.info('RUNNING: %s tests', release_name) resp = tiller.testing_release(release_name) if not resp: continue test_status = getattr( resp.info.status, 'last_test_suite_run', 'FAILED') if test_status.results[0].status: self.logger.info("PASSED: %s", release_name) message['test']['passed'].append(release_name) else: self.logger.info("FAILED: %s", release_name) message['test']['failed'].append(release_name) else: self.logger.info( 'Release %s not found - SKIPPING', release_name) message['test']['skipped'].append(release_name) resp.status = falcon.HTTP_200 resp.body = json.dumps(message) resp.content_type = 'application/json' except Exception as e: err_message = 'Failed to test manifest: {}'.format(e) self.error(req.context, err_message) self.return_error( resp, falcon.HTTP_500, message=err_message)
def handle(self, req, resp, tiller): try: documents = self.req_yaml(req, default=[]) except yaml.YAMLError: err_message = 'Documents must be valid YAML.' return self.return_error( resp, falcon.HTTP_400, message=err_message) target_manifest = req.get_param('target_manifest', None) is_valid = self._validate_documents(req, resp, documents) if not is_valid: return armada_obj = Manifest( documents, target_manifest=target_manifest).get_manifest() prefix = armada_obj.get(const.KEYWORD_ARMADA).get(const.KEYWORD_PREFIX) known_releases = [release[0] for release in tiller.list_charts()] message = {'tests': {'passed': [], 'skipped': [], 'failed': []}} for group in armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): chart = ch['chart'] release_name = release_prefixer(prefix, chart.get('release')) if release_name in known_releases: cleanup = req.get_param_as_bool('cleanup') enable_all = req.get_param_as_bool('enable_all') cg_test_charts = group.get('test_charts') test_values = chart.get('test', {}) test_handler = Test( release_name, tiller, cg_test_charts=cg_test_charts, cleanup=cleanup, enable_all=enable_all, test_values=test_values) if test_handler.test_enabled: success = test_handler.test_release_for_success() if success: message['test']['passed'].append(release_name) else: message['test']['failed'].append(release_name) else: self.logger.info('Release %s not found - SKIPPING', release_name) message['test']['skipped'].append(release_name) resp.status = falcon.HTTP_200 resp.body = json.dumps(message) resp.content_type = 'application/json'
def handle(self, helm): release_ids = helm.list_release_ids() if self.release: if not self.ctx.obj.get('api', False): release_id = HelmReleaseId(self.namespace, self.release) test_handler = Test({}, release_id, helm) test_handler.test_release_for_success() else: client = self.ctx.obj.get('CLIENT') resp = client.get_test_release(release=self.release) self.logger.info(resp.get('result')) self.logger.info(resp.get('message')) if self.file: if not self.ctx.obj.get('api', False): documents = list(yaml.safe_load_all(open(self.file).read())) armada_obj = Manifest( documents, target_manifest=self.target_manifest).get_manifest() prefix = armada_obj.get(const.KEYWORD_DATA).get( const.KEYWORD_PREFIX) for group in armada_obj.get(const.KEYWORD_DATA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): chart = ch['chart'] release_id = HelmReleaseId( chart['namespace'], release_prefixer(prefix, chart['release'])) if release_id in release_ids: test_handler = Test(chart, release_id, helm, enable_all=self.enable_all) if test_handler.test_enabled: test_handler.test_release_for_success() else: self.logger.info('Release %s not found - SKIPPING', release_id) else: client = self.ctx.obj.get('CLIENT') with open(self.filename, 'r') as f: resp = client.get_test_manifest(manifest=f.read()) for test in resp.get('tests'): self.logger.info('Test State: %s', test) for item in test.get('tests').get(test): self.logger.info(item) self.logger.info(resp)
def testService(args): tiller = Tiller(tiller_host=args.tiller_host, tiller_port=args.tiller_port) known_release_names = [release[0] for release in tiller.list_charts()] if args.release: LOG.info("RUNNING: %s tests", args.release) resp = tiller.testing_release(args.release) if not resp: LOG.info("FAILED: %s", args.release) return test_status = getattr(resp.info.status, 'last_test_suite_run', 'FAILED') if test_status.results[0].status: LOG.info("PASSED: %s", args.release) else: LOG.info("FAILED: %s", args.release) if args.file: documents = yaml.safe_load_all(open(args.file).read()) armada_obj = Manifest(documents).get_manifest() prefix = armada_obj.get(const.KEYWORD_ARMADA).get(const.KEYWORD_PREFIX) for group in armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): release_name = release_prefix( prefix, ch.get('chart').get('chart_name')) if release_name in known_release_names: LOG.info('RUNNING: %s tests', release_name) resp = tiller.testing_release(release_name) if not resp: continue test_status = getattr(resp.info.status, 'last_test_suite_run', 'FAILED') if test_status.results[0].status: LOG.info("PASSED: %s", release_name) else: LOG.info("FAILED: %s", release_name) else: LOG.info('Release %s not found - SKIPPING', release_name)
def handle(self, req, resp, helm): try: documents = self.req_yaml(req, default=[]) except yaml.YAMLError: err_message = 'Documents must be valid YAML.' return self.return_error(resp, falcon.HTTP_400, message=err_message) target_manifest = req.get_param('target_manifest', None) is_valid = self._validate_documents(req, resp, documents) if not is_valid: return armada_obj = Manifest(documents, target_manifest=target_manifest).get_manifest() prefix = armada_obj[const.KEYWORD_DATA][const.KEYWORD_PREFIX] release_ids = helm.list_release_ids() message = {'tests': {'passed': [], 'skipped': [], 'failed': []}} for group in armada_obj.get(const.KEYWORD_DATA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): chart = ch['chart'] release_id = helm.HelmReleaseId( ch['namespace'], release_prefixer(prefix, ch['release'])) if release_id in release_ids: enable_all = req.get_param_as_bool('enable_all') cg_test_charts = group.get('test_charts') test_handler = Test(chart, release_id, helm, cg_test_charts=cg_test_charts, enable_all=enable_all) if test_handler.test_enabled: success = test_handler.test_release_for_success() if success: message['test']['passed'].append(release_id) else: message['test']['failed'].append(release_id) else: self.logger.info('Release %s not found - SKIPPING', release_id) message['test']['skipped'].append(release_id) resp.status = falcon.HTTP_200 resp.text = json.dumps(message) resp.content_type = 'application/json'
class Armada(object): ''' This is the main Armada class handling the Armada workflows ''' def __init__(self, documents, disable_update_pre=False, disable_update_post=False, enable_chart_cleanup=False, dry_run=False, set_ovr=None, force_wait=False, timeout=None, tiller_host=None, tiller_port=None, tiller_namespace=None, values=None, target_manifest=None, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1): ''' Initialize the Armada engine and establish a connection to Tiller. :param List[dict] documents: Armada documents. :param bool disable_update_pre: Disable pre-update Tiller operations. :param bool disable_update_post: Disable post-update Tiller operations. :param bool enable_chart_cleanup: Clean up unmanaged charts. :param bool dry_run: Run charts without installing them. :param bool force_wait: Force Tiller to wait until all charts are deployed, rather than using each chart's specified wait policy. :param int timeout: Specifies overall time in seconds that Tiller should wait for charts until timing out. :param str tiller_host: Tiller host IP. Default is None. :param int tiller_port: Tiller host port. Default is ``CONF.tiller_port``. :param str tiller_namespace: Tiller host namespace. Default is ``CONF.tiller_namespace``. :param str target_manifest: The target manifest to run. Useful for specifying which manifest to run when multiple are available. :param int k8s_wait_attempts: The number of times to attempt waiting for pods to become ready. :param int k8s_wait_attempt_sleep: The time in seconds to sleep between attempts. ''' tiller_port = tiller_port or CONF.tiller_port tiller_namespace = tiller_namespace or CONF.tiller_namespace self.enable_chart_cleanup = enable_chart_cleanup self.dry_run = dry_run self.force_wait = force_wait # TODO: Use dependency injection i.e. pass in a Tiller instead of # creating it here. self.tiller = Tiller(tiller_host=tiller_host, tiller_port=tiller_port, tiller_namespace=tiller_namespace, dry_run=dry_run) try: self.documents = Override(documents, overrides=set_ovr, values=values).update_manifests() except (validate_exceptions.InvalidManifestException, override_exceptions.InvalidOverrideValueException): raise self.manifest = Manifest( self.documents, target_manifest=target_manifest).get_manifest() self.cloned_dirs = set() self.chart_deploy = ChartDeploy(disable_update_pre, disable_update_post, self.dry_run, k8s_wait_attempts, k8s_wait_attempt_sleep, timeout, self.tiller) def pre_flight_ops(self): """Perform a series of checks and operations to ensure proper deployment. """ LOG.info("Performing pre-flight operations.") # Ensure Tiller is available and manifest is valid if not self.tiller.tiller_status(): raise tiller_exceptions.TillerServicesUnavailableException() # Clone the chart sources repos = {} manifest_data = self.manifest.get(const.KEYWORD_ARMADA, {}) for group in manifest_data.get(const.KEYWORD_GROUPS, []): for ch in group.get(const.KEYWORD_CHARTS, []): self.tag_cloned_repo(ch, repos) for dep in ch.get('chart', {}).get('dependencies', []): self.tag_cloned_repo(dep, repos) def tag_cloned_repo(self, ch, repos): chart = ch.get('chart', {}) chart_source = chart.get('source', {}) location = chart_source.get('location') ct_type = chart_source.get('type') subpath = chart_source.get('subpath', '.') if ct_type == 'local': chart['source_dir'] = (location, subpath) elif ct_type == 'tar': LOG.info('Downloading tarball from: %s', location) if not CONF.certs: LOG.warn('Disabling server validation certs to extract charts') tarball_dir = source.get_tarball(location, verify=False) else: tarball_dir = source.get_tarball(location, verify=CONF.cert) chart['source_dir'] = (tarball_dir, subpath) elif ct_type == 'git': reference = chart_source.get('reference', 'master') repo_branch = (location, reference) if repo_branch not in repos: auth_method = chart_source.get('auth_method') proxy_server = chart_source.get('proxy_server') logstr = 'Cloning repo: {} from branch: {}'.format( *repo_branch) if proxy_server: logstr += ' proxy: {}'.format(proxy_server) if auth_method: logstr += ' auth method: {}'.format(auth_method) LOG.info(logstr) repo_dir = source.git_clone(*repo_branch, proxy_server=proxy_server, auth_method=auth_method) self.cloned_dirs.add(repo_dir) repos[repo_branch] = repo_dir chart['source_dir'] = (repo_dir, subpath) else: chart['source_dir'] = (repos.get(repo_branch), subpath) else: chart_name = chart.get('chart_name') raise source_exceptions.ChartSourceException(ct_type, chart_name) def sync(self): ''' Synchronize Helm with the Armada Config(s) ''' if self.dry_run: LOG.info('Armada is in DRY RUN mode, no changes being made.') msg = { 'install': [], 'upgrade': [], 'diff': [], 'purge': [], 'protected': [] } # TODO: (gardlt) we need to break up this func into # a more cleaner format self.pre_flight_ops() known_releases = self.tiller.list_releases() manifest_data = self.manifest.get(const.KEYWORD_ARMADA, {}) prefix = manifest_data.get(const.KEYWORD_PREFIX) for chartgroup in manifest_data.get(const.KEYWORD_GROUPS, []): cg_name = chartgroup.get('name', '<missing name>') cg_desc = chartgroup.get('description', '<missing description>') cg_sequenced = chartgroup.get('sequenced', False) or self.force_wait LOG.info('Processing ChartGroup: %s (%s), sequenced=%s%s', cg_name, cg_desc, cg_sequenced, ' (forced)' if self.force_wait else '') # TODO(MarshM): Deprecate the `test_charts` key cg_test_all_charts = chartgroup.get('test_charts') if isinstance(cg_test_all_charts, bool): LOG.warn('The ChartGroup `test_charts` key is deprecated, ' 'and support for this will be removed. See the ' 'Chart `test` key for more information.') else: # This key defaults to True. Individual charts must # explicitly disable helm tests if they choose cg_test_all_charts = True cg_charts = chartgroup.get(const.KEYWORD_CHARTS, []) charts = map(lambda x: x.get('chart', {}), cg_charts) def deploy_chart(chart): set_current_chart(chart) try: return self.chart_deploy.execute(chart, cg_test_all_charts, prefix, known_releases) finally: set_current_chart(None) results = [] failures = [] # Returns whether or not there was a failure def handle_result(chart, get_result): name = chart['chart_name'] try: result = get_result() except Exception as e: LOG.error('Chart deploy [%s] failed: %s', name, e) failures.append(name) return True else: results.append(result) return False if cg_sequenced: for chart in charts: if (handle_result(chart, lambda: deploy_chart(chart))): break else: with ThreadPoolExecutor( max_workers=len(cg_charts)) as executor: future_to_chart = { executor.submit(deploy_chart, chart): chart for chart in charts } for future in as_completed(future_to_chart): chart = future_to_chart[future] handle_result(chart, future.result) if failures: LOG.error('Chart deploy(s) failed: %s', failures) raise armada_exceptions.ChartDeployException(failures) for result in results: for k, v in result.items(): msg[k].append(v) # End of Charts in ChartGroup LOG.info('All Charts applied in ChartGroup %s.', cg_name) self.post_flight_ops() if self.enable_chart_cleanup: self._chart_cleanup( prefix, self.manifest[const.KEYWORD_ARMADA][const.KEYWORD_GROUPS], msg) LOG.info('Done applying manifest.') return msg def post_flight_ops(self): ''' Operations to run after deployment process has terminated ''' LOG.info("Performing post-flight operations.") # Delete temp dirs used for deployment for cloned_dir in self.cloned_dirs: LOG.debug('Removing cloned temp directory: %s', cloned_dir) source.source_cleanup(cloned_dir) def _chart_cleanup(self, prefix, charts, msg): LOG.info('Processing chart cleanup to remove unspecified releases.') valid_releases = [] for gchart in charts: for chart in gchart.get(const.KEYWORD_CHARTS, []): valid_releases.append( release_prefixer(prefix, chart.get('chart', {}).get('release'))) actual_releases = [x.name for x in self.tiller.list_releases()] release_diff = list(set(actual_releases) - set(valid_releases)) for release in release_diff: if release.startswith(prefix): LOG.info('Purging release %s as part of chart cleanup.', release) self.tiller.uninstall_release(release) msg['purge'].append(release)
def handle(self, tiller): known_release_names = [release[0] for release in tiller.list_charts()] if self.release: if not self.ctx.obj.get('api', False): test_handler = Test(self.release, tiller, cleanup=self.cleanup) test_handler.test_release_for_success() else: client = self.ctx.obj.get('CLIENT') query = { 'tiller_host': self.tiller_host, 'tiller_port': self.tiller_port, 'tiller_namespace': self.tiller_namespace } resp = client.get_test_release(release=self.release, query=query) self.logger.info(resp.get('result')) self.logger.info(resp.get('message')) if self.file: if not self.ctx.obj.get('api', False): documents = list(yaml.safe_load_all(open(self.file).read())) armada_obj = Manifest( documents, target_manifest=self.target_manifest).get_manifest() prefix = armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_PREFIX) for group in armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): chart = ch['chart'] release_name = release_prefixer( prefix, chart.get('release')) if release_name in known_release_names: test_values = chart.get('test', {}) test_handler = Test(release_name, tiller, cleanup=self.cleanup, enable_all=self.enable_all, test_values=test_values) if test_handler.test_enabled: test_handler.test_release_for_success() else: self.logger.info('Release %s not found - SKIPPING', release_name) else: client = self.ctx.obj.get('CLIENT') query = { 'tiller_host': self.tiller_host, 'tiller_port': self.tiller_port, 'tiller_namespace': self.tiller_namespace } with open(self.filename, 'r') as f: resp = client.get_test_manifest(manifest=f.read(), query=query) for test in resp.get('tests'): self.logger.info('Test State: %s', test) for item in test.get('tests').get(test): self.logger.info(item) self.logger.info(resp)
def on_post(self, req, resp): # TODO(fmontei): Validation Content-Type is application/x-yaml. target_manifest = req.get_param('target_manifest', None) try: tiller = Tiller( tiller_host=req.get_param('tiller_host'), tiller_port=req.get_param_as_int( 'tiller_port') or CONF.tiller_port, tiller_namespace=req.get_param( 'tiller_namespace', default=CONF.tiller_namespace)) # TODO(fmontei): Provide more sensible exception(s) here. except Exception: err_message = 'Failed to initialize Tiller handler.' self.error(req.context, err_message) return self.return_error( resp, falcon.HTTP_500, message=err_message) try: documents = self.req_yaml(req, default=[]) except yaml.YAMLError: err_message = 'Documents must be valid YAML.' return self.return_error( resp, falcon.HTTP_400, message=err_message) is_valid = self._validate_documents(req, resp, documents) if not is_valid: return resp armada_obj = Manifest( documents, target_manifest=target_manifest).get_manifest() prefix = armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_PREFIX) known_releases = [release[0] for release in tiller.list_charts()] message = { 'tests': { 'passed': [], 'skipped': [], 'failed': [] } } for group in armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): release_name = release_prefix( prefix, ch.get('chart').get('chart_name')) if release_name in known_releases: self.logger.info('RUNNING: %s tests', release_name) resp = tiller.testing_release(release_name) if not resp: continue test_status = getattr( resp.info.status, 'last_test_suite_run', 'FAILED') if test_status.results[0].status: self.logger.info("PASSED: %s", release_name) message['test']['passed'].append(release_name) else: self.logger.info("FAILED: %s", release_name) message['test']['failed'].append(release_name) else: self.logger.info( 'Release %s not found - SKIPPING', release_name) message['test']['skipped'].append(release_name) resp.status = falcon.HTTP_200 resp.body = json.dumps(message) resp.content_type = 'application/json'
class Armada(object): ''' This is the main Armada class handling the Armada workflows ''' def __init__(self, documents, disable_update_pre=False, disable_update_post=False, enable_chart_cleanup=False, dry_run=False, set_ovr=None, force_wait=False, timeout=0, tiller_host=None, tiller_port=None, tiller_namespace=None, values=None, target_manifest=None, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1): ''' Initialize the Armada engine and establish a connection to Tiller. :param List[dict] documents: Armada documents. :param bool disable_update_pre: Disable pre-update Tiller operations. :param bool disable_update_post: Disable post-update Tiller operations. :param bool enable_chart_cleanup: Clean up unmanaged charts. :param bool dry_run: Run charts without installing them. :param bool force_wait: Force Tiller to wait until all charts are deployed, rather than using each chart's specified wait policy. :param int timeout: Specifies overall time in seconds that Tiller should wait for charts until timing out. :param str tiller_host: Tiller host IP. Default is None. :param int tiller_port: Tiller host port. Default is ``CONF.tiller_port``. :param str tiller_namespace: Tiller host namespace. Default is ``CONF.tiller_namespace``. :param str target_manifest: The target manifest to run. Useful for specifying which manifest to run when multiple are available. :param int k8s_wait_attempts: The number of times to attempt waiting for pods to become ready. :param int k8s_wait_attempt_sleep: The time in seconds to sleep between attempts. ''' tiller_port = tiller_port or CONF.tiller_port tiller_namespace = tiller_namespace or CONF.tiller_namespace self.disable_update_pre = disable_update_pre self.disable_update_post = disable_update_post self.enable_chart_cleanup = enable_chart_cleanup self.dry_run = dry_run self.force_wait = force_wait self.timeout = timeout self.tiller = Tiller(tiller_host=tiller_host, tiller_port=tiller_port, tiller_namespace=tiller_namespace) self.documents = Override(documents, overrides=set_ovr, values=values).update_manifests() self.k8s_wait_attempts = k8s_wait_attempts self.k8s_wait_attempt_sleep = k8s_wait_attempt_sleep self.manifest = Manifest( self.documents, target_manifest=target_manifest).get_manifest() def find_release_chart(self, known_releases, name): ''' Find a release given a list of known_releases and a release name ''' for chart_name, _, chart, values, _ in known_releases: if chart_name == name: return chart, values def pre_flight_ops(self): """Perform a series of checks and operations to ensure proper deployment. """ LOG.info("Performing pre-flight operations.") # Ensure Tiller is available and manifest is valid if not self.tiller.tiller_status(): raise tiller_exceptions.TillerServicesUnavailableException() valid, details = validate.validate_armada_documents(self.documents) if details: for msg in details: if msg.get('error', False): LOG.error(msg.get('message', 'Unknown validation error.')) else: LOG.debug(msg.get('message', 'Validation succeeded.')) if not valid: raise validate_exceptions.InvalidManifestException( error_messages=details) result, msg_list = validate.validate_armada_manifests(self.documents) if not result: raise validate_exceptions.InvalidArmadaObjectException( details=','.join([m.get('message') for m in msg_list])) # Purge known releases that have failed and are in the current yaml manifest_data = self.manifest.get(KEYWORD_ARMADA, {}) prefix = manifest_data.get(KEYWORD_PREFIX, '') failed_releases = self.get_releases_by_status(STATUS_FAILED) for release in failed_releases: for group in manifest_data.get(KEYWORD_GROUPS, []): for ch in group.get(KEYWORD_CHARTS, []): ch_release_name = release_prefix( prefix, ch.get('chart', {}).get('chart_name')) if release[0] == ch_release_name: LOG.info( 'Purging failed release %s ' 'before deployment', release[0]) self.tiller.uninstall_release(release[0]) # Clone the chart sources # # We only support a git source type right now, which can also # handle git:// local paths as well repos = {} for group in manifest_data.get(KEYWORD_GROUPS, []): for ch in group.get(KEYWORD_CHARTS, []): self.tag_cloned_repo(ch, repos) for dep in ch.get('chart', {}).get('dependencies', []): self.tag_cloned_repo(dep, repos) def tag_cloned_repo(self, ch, repos): chart = ch.get('chart', {}) chart_source = chart.get('source', {}) location = chart_source.get('location') ct_type = chart_source.get('type') subpath = chart_source.get('subpath', '.') if ct_type == 'local': chart['source_dir'] = (location, subpath) elif ct_type == 'tar': LOG.info('Downloading tarball from: %s', location) if not CONF.certs: LOG.warn('Disabling server validation certs to extract charts') tarball_dir = source.get_tarball(location, verify=False) else: tarball_dir = source.get_tarball(location, verify=CONF.cert) chart['source_dir'] = (tarball_dir, subpath) elif ct_type == 'git': reference = chart_source.get('reference', 'master') repo_branch = (location, reference) if repo_branch not in repos: auth_method = chart_source.get('auth_method') proxy_server = chart_source.get('proxy_server') logstr = 'Cloning repo: {} from branch: {}'.format( *repo_branch) if proxy_server: logstr += ' proxy: {}'.format(proxy_server) if auth_method: logstr += ' auth method: {}'.format(auth_method) LOG.info(logstr) repo_dir = source.git_clone(*repo_branch, proxy_server=proxy_server, auth_method=auth_method) repos[repo_branch] = repo_dir chart['source_dir'] = (repo_dir, subpath) else: chart['source_dir'] = (repos.get(repo_branch), subpath) else: chart_name = chart.get('chart_name') raise source_exceptions.ChartSourceException(ct_type, chart_name) def get_releases_by_status(self, status): ''' :params status - status string to filter releases on Return a list of current releases with a specified status ''' filtered_releases = [] known_releases = self.tiller.list_charts() for release in known_releases: if release[4] == status: filtered_releases.append(release) return filtered_releases def sync(self): ''' Synchronize Helm with the Armada Config(s) ''' msg = {'install': [], 'upgrade': [], 'diff': []} # TODO: (gardlt) we need to break up this func into # a more cleaner format self.pre_flight_ops() # extract known charts on tiller right now known_releases = self.tiller.list_charts() manifest_data = self.manifest.get(KEYWORD_ARMADA, {}) prefix = manifest_data.get(KEYWORD_PREFIX, '') for chartgroup in manifest_data.get(KEYWORD_GROUPS, []): cg_name = chartgroup.get('name', '<missing name>') cg_desc = chartgroup.get('description', '<missing description>') LOG.info('Processing ChartGroup: %s (%s)', cg_name, cg_desc) cg_sequenced = chartgroup.get('sequenced', False) cg_test_all_charts = chartgroup.get('test_charts', False) namespaces_seen = set() tests_to_run = [] cg_charts = chartgroup.get(KEYWORD_CHARTS, []) # Track largest Chart timeout to stop the ChartGroup at the end cg_max_timeout = 0 for chart_entry in cg_charts: chart = chart_entry.get('chart', {}) namespace = chart.get('namespace') release = chart.get('release') values = chart.get('values', {}) pre_actions = {} post_actions = {} wait_timeout = self.timeout wait_labels = {} release_name = release_prefix(prefix, release) # Retrieve appropriate timeout value if wait_timeout <= 0: # TODO(MarshM): chart's `data.timeout` should be deprecated chart_timeout = chart.get('timeout', 0) # Favor data.wait.timeout over data.timeout, until removed wait_values = chart.get('wait', {}) wait_timeout = wait_values.get('timeout', chart_timeout) wait_labels = wait_values.get('labels', {}) this_chart_should_wait = (cg_sequenced or self.force_wait or wait_timeout > 0 or len(wait_labels) > 0) if this_chart_should_wait and wait_timeout <= 0: LOG.warn('No Chart timeout specified, using default: %ss', DEFAULT_CHART_TIMEOUT) wait_timeout = DEFAULT_CHART_TIMEOUT # Track namespaces + labels touched namespaces_seen.add((namespace, tuple(wait_labels.items()))) # Naively take largest timeout to apply at end # TODO(MarshM) better handling of timeout/timer cg_max_timeout = max(wait_timeout, cg_max_timeout) # Chart test policy can override ChartGroup, if specified test_this_chart = chart.get('test', cg_test_all_charts) chartbuilder = ChartBuilder(chart) protoc_chart = chartbuilder.get_helm_chart() deployed_releases = [x[0] for x in known_releases] # Begin Chart timeout deadline deadline = time.time() + wait_timeout # TODO(mark-burnett): It may be more robust to directly call # tiller status to decide whether to install/upgrade rather # than checking for list membership. if release_name in deployed_releases: # indicate to the end user what path we are taking LOG.info("Upgrading release %s in namespace %s", release_name, namespace) # extract the installed chart and installed values from the # latest release so we can compare to the intended state apply_chart, apply_values = self.find_release_chart( known_releases, release_name) upgrade = chart.get('upgrade', {}) disable_hooks = upgrade.get('no_hooks', False) LOG.info("Checking Pre/Post Actions") if upgrade: upgrade_pre = upgrade.get('pre', {}) upgrade_post = upgrade.get('post', {}) if not self.disable_update_pre and upgrade_pre: pre_actions = upgrade_pre if not self.disable_update_post and upgrade_post: post_actions = upgrade_post # Show delta for both the chart templates and the chart # values # TODO(alanmeadows) account for .files differences # once we support those LOG.info('Checking upgrade chart diffs.') upgrade_diff = self.show_diff(chart, apply_chart, apply_values, chartbuilder.dump(), values, msg) if not upgrade_diff: LOG.info("There are no updates found in this chart") continue # TODO(MarshM): Add tiller dry-run before upgrade and # consider deadline impacts # do actual update timer = int(round(deadline - time.time())) LOG.info('Beginning Upgrade, wait=%s, timeout=%ss', this_chart_should_wait, timer) tiller_result = self.tiller.update_release( protoc_chart, release_name, namespace, pre_actions=pre_actions, post_actions=post_actions, dry_run=self.dry_run, disable_hooks=disable_hooks, values=yaml.safe_dump(values), wait=this_chart_should_wait, timeout=timer) if this_chart_should_wait: self.tiller.k8s.wait_until_ready( release=release_name, labels=wait_labels, namespace=namespace, k8s_wait_attempts=self.k8s_wait_attempts, k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep, timeout=timer) LOG.info('Upgrade completed with results from Tiller: %s', tiller_result.__dict__) msg['upgrade'].append(release_name) # process install else: LOG.info("Installing release %s in namespace %s", release_name, namespace) timer = int(round(deadline - time.time())) LOG.info('Beginning Install, wait=%s, timeout=%ss', this_chart_should_wait, timer) tiller_result = self.tiller.install_release( protoc_chart, release_name, namespace, dry_run=self.dry_run, values=yaml.safe_dump(values), wait=this_chart_should_wait, timeout=timer) if this_chart_should_wait: self.tiller.k8s.wait_until_ready( release=release_name, labels=wait_labels, namespace=namespace, k8s_wait_attempts=self.k8s_wait_attempts, k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep, timeout=timer) LOG.info('Install completed with results from Tiller: %s', tiller_result.__dict__) msg['install'].append(release_name) # Sequenced ChartGroup should run tests after each Chart timer = int(round(deadline - time.time())) if test_this_chart and cg_sequenced: LOG.info('Running sequenced test, timeout remaining: %ss.', timer) if timer <= 0: reason = ('Timeout expired before testing sequenced ' 'release %s' % release_name) LOG.error(reason) raise ArmadaTimeoutException(reason) self._test_chart(release_name, timer) # Un-sequenced ChartGroup should run tests at the end elif test_this_chart: # Keeping track of time remaining tests_to_run.append((release_name, timer)) # End of Charts in ChartGroup LOG.info('All Charts applied.') # After all Charts are applied, we should wait for the entire # ChartGroup to become healthy by looking at the namespaces seen # TODO(MarshM): Need to restrict to only releases we processed # TODO(MarshM): Need to determine a better timeout # (not cg_max_timeout) if cg_max_timeout <= 0: cg_max_timeout = DEFAULT_CHART_TIMEOUT deadline = time.time() + cg_max_timeout for (ns, labels) in namespaces_seen: labels_dict = dict(labels) timer = int(round(deadline - time.time())) LOG.info( 'Final wait for healthy namespace (%s), label=(%s), ' 'timeout remaining: %ss.', ns, labels_dict, timer) if timer <= 0: reason = ('Timeout expired waiting on namespace: %s, ' 'label: %s' % (ns, labels_dict)) LOG.error(reason) raise ArmadaTimeoutException(reason) self.tiller.k8s.wait_until_ready( namespace=ns, labels=labels_dict, k8s_wait_attempts=self.k8s_wait_attempts, k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep, timeout=timer) # After entire ChartGroup is healthy, run any pending tests for (test, test_timer) in tests_to_run: self._test_chart(test, test_timer) LOG.info("Performing Post-Flight Operations") self.post_flight_ops() if self.enable_chart_cleanup: self.tiller.chart_cleanup( prefix, self.manifest[KEYWORD_ARMADA][KEYWORD_GROUPS]) return msg def post_flight_ops(self): ''' Operations to run after deployment process has terminated ''' # Delete temp dirs used for deployment for group in self.manifest.get(KEYWORD_ARMADA, {}).get(KEYWORD_GROUPS, []): for ch in group.get(KEYWORD_CHARTS, []): chart = ch.get('chart', {}) if chart.get('source', {}).get('type') == 'git': source_dir = chart.get('source_dir') if isinstance(source_dir, tuple) and source_dir: source.source_cleanup(source_dir[0]) def _test_chart(self, release_name, timeout): # TODO(MarshM): Fix testing, it's broken, and track timeout resp = self.tiller.testing_release(release_name, timeout=timeout) status = getattr(resp.info.status, 'last_test_suite_run', 'FAILED') LOG.info("Test INFO: %s", status) if resp: LOG.info("PASSED: %s", release_name) return True else: LOG.info("FAILED: %s", release_name) return False def show_diff(self, chart, installed_chart, installed_values, target_chart, target_values, msg): '''Produce a unified diff of the installed chart vs our intention''' # TODO(MarshM) This gives decent output comparing values. Would be # nice to clean it up further. Are \\n or \n\n ever valid diffs? # Can these be cleanly converted to dicts, for easier compare? def _sanitize_diff_str(str): return str.replace('\\n', '\n').replace('\n\n', '\n').split('\n') source = _sanitize_diff_str(str(installed_chart.SerializeToString())) target = _sanitize_diff_str(str(target_chart)) chart_diff = list(difflib.unified_diff(source, target, n=0)) chart_release = chart.get('release', None) if len(chart_diff) > 0: LOG.info("Found diff in Chart (%s)", chart_release) diff_msg = [] for line in chart_diff: diff_msg.append(line) msg['diff'].append({'chart': diff_msg}) pretty_diff = '\n'.join(diff_msg) LOG.debug(pretty_diff) source = _sanitize_diff_str(installed_values) target = _sanitize_diff_str(yaml.safe_dump(target_values)) values_diff = list(difflib.unified_diff(source, target, n=0)) if len(values_diff) > 0: LOG.info("Found diff in values (%s)", chart_release) diff_msg = [] for line in values_diff: diff_msg.append(line) msg['diff'].append({'values': diff_msg}) pretty_diff = '\n'.join(diff_msg) LOG.debug(pretty_diff) result = (len(chart_diff) > 0) or (len(values_diff) > 0) return result
def on_post(self, req, resp): # TODO(fmontei): Validation Content-Type is application/x-yaml. target_manifest = req.get_param('target_manifest', None) try: tiller = Tiller(tiller_host=req.get_param('tiller_host'), tiller_port=req.get_param_as_int('tiller_port') or CONF.tiller_port, tiller_namespace=req.get_param( 'tiller_namespace', default=CONF.tiller_namespace)) # TODO(fmontei): Provide more sensible exception(s) here. except Exception: err_message = 'Failed to initialize Tiller handler.' self.error(req.context, err_message) return self.return_error(resp, falcon.HTTP_500, message=err_message) try: documents = self.req_yaml(req, default=[]) except yaml.YAMLError: err_message = 'Documents must be valid YAML.' return self.return_error(resp, falcon.HTTP_400, message=err_message) is_valid = self._validate_documents(req, resp, documents) if not is_valid: return resp armada_obj = Manifest(documents, target_manifest=target_manifest).get_manifest() prefix = armada_obj.get(const.KEYWORD_ARMADA).get(const.KEYWORD_PREFIX) known_releases = [release[0] for release in tiller.list_charts()] message = {'tests': {'passed': [], 'skipped': [], 'failed': []}} for group in armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): chart = ch['chart'] release_name = release_prefixer(prefix, chart.get('release')) cleanup = req.get_param_as_bool('cleanup') if cleanup is None: test_chart_override = chart.get('test', {}) if isinstance(test_chart_override, bool): self.logger.warn( 'Boolean value for chart `test` key is deprecated ' 'and will be removed. Use `test.enabled` instead.') # Use old default value. cleanup = True else: cleanup = test_chart_override.get('options', {}).get( 'cleanup', False) if release_name in known_releases: self.logger.info('RUNNING: %s tests', release_name) success = test_release_for_success(tiller, release_name, cleanup=cleanup) if success: self.logger.info("PASSED: %s", release_name) message['test']['passed'].append(release_name) else: self.logger.info("FAILED: %s", release_name) message['test']['failed'].append(release_name) else: self.logger.info('Release %s not found - SKIPPING', release_name) message['test']['skipped'].append(release_name) resp.status = falcon.HTTP_200 resp.body = json.dumps(message) resp.content_type = 'application/json'
def invoke(self): tiller = Tiller( tiller_host=self.tiller_host, tiller_port=self.tiller_port, tiller_namespace=self.tiller_namespace) known_release_names = [release[0] for release in tiller.list_charts()] if self.release: if not self.ctx.obj.get('api', False): self.logger.info("RUNNING: %s tests", self.release) success = test_release_for_success( tiller, self.release, cleanup=self.cleanup) if success: self.logger.info("PASSED: %s", self.release) else: self.logger.info("FAILED: %s", self.release) else: client = self.ctx.obj.get('CLIENT') query = { 'tiller_host': self.tiller_host, 'tiller_port': self.tiller_port, 'tiller_namespace': self.tiller_namespace } resp = client.get_test_release( release=self.release, query=query) self.logger.info(resp.get('result')) self.logger.info(resp.get('message')) if self.file: if not self.ctx.obj.get('api', False): documents = list(yaml.safe_load_all(open(self.file).read())) armada_obj = Manifest( documents, target_manifest=self.target_manifest).get_manifest() prefix = armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_PREFIX) for group in armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): chart = ch['chart'] release_name = release_prefixer( prefix, chart.get('release')) if release_name in known_release_names: cleanup = self.cleanup if cleanup is None: test_chart_override = chart.get('test', {}) if isinstance(test_chart_override, bool): self.logger.warn( 'Boolean value for chart `test` key is' ' deprecated and support for this will' ' be removed. Use `test.enabled` ' 'instead.') # Use old default value. cleanup = True else: cleanup = test_chart_override.get( 'options', {}).get('cleanup', False) self.logger.info('RUNNING: %s tests', release_name) success = test_release_for_success( tiller, release_name, cleanup=cleanup) if success: self.logger.info("PASSED: %s", release_name) else: self.logger.info("FAILED: %s", release_name) else: self.logger.info('Release %s not found - SKIPPING', release_name) else: client = self.ctx.obj.get('CLIENT') query = { 'tiller_host': self.tiller_host, 'tiller_port': self.tiller_port, 'tiller_namespace': self.tiller_namespace } with open(self.filename, 'r') as f: resp = client.get_test_manifest( manifest=f.read(), query=query) for test in resp.get('tests'): self.logger.info('Test State: %s', test) for item in test.get('tests').get(test): self.logger.info(item) self.logger.info(resp)
class Armada(object): ''' This is the main Armada class handling the Armada workflows ''' def __init__(self, documents, helm, disable_update_pre=False, disable_update_post=False, enable_chart_cleanup=False, set_ovr=None, force_wait=False, timeout=None, values=None, target_manifest=None, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1): ''' Initialize the Armada engine. :param List[dict] documents: Armada documents. :param bool disable_update_pre: Disable pre-update operations. :param bool disable_update_post: Disable post-update operations. :param bool enable_chart_cleanup: Clean up unmanaged charts. :param bool force_wait: Force to wait until all charts are deployed, rather than using each chart's specified wait policy. :param int timeout: Specifies overall time in seconds t to wait for charts until timing out. :param str target_manifest: The target manifest to run. Useful for specifying which manifest to run when multiple are available. :param int k8s_wait_attempts: The number of times to attempt waiting for pods to become ready. :param int k8s_wait_attempt_sleep: The time in seconds to sleep between attempts. ''' self.enable_chart_cleanup = enable_chart_cleanup self.force_wait = force_wait self.helm = helm try: self.documents = Override(documents, overrides=set_ovr, values=values).update_manifests() except (validate_exceptions.InvalidManifestException, override_exceptions.InvalidOverrideValueException): raise self.manifest = Manifest( self.documents, target_manifest=target_manifest).get_manifest() self.chart_download = ChartDownload() self.chart_deploy = ChartDeploy(self.manifest, disable_update_pre, disable_update_post, k8s_wait_attempts, k8s_wait_attempt_sleep, timeout, self.helm) def pre_flight_ops(self): """Perform a series of checks and operations to ensure proper deployment. """ LOG.info("Performing pre-flight operations.") # Clone the chart sources manifest_data = self.manifest.get(const.KEYWORD_DATA, {}) for group in manifest_data.get(const.KEYWORD_GROUPS, []): for ch in group.get(const.KEYWORD_DATA).get( const.KEYWORD_CHARTS, []): self.chart_download.get_chart(ch, manifest=self.manifest) def sync(self): ''' Synchronize Helm with the Armada Config(s) ''' manifest_name = self.manifest['metadata']['name'] with metrics.APPLY.get_context(manifest_name): return self._sync() def _sync(self): msg = { 'install': [], 'upgrade': [], 'diff': [], 'purge': [], 'protected': [] } # TODO: (gardlt) we need to break up this func into # a more cleaner format self.pre_flight_ops() manifest_data = self.manifest.get(const.KEYWORD_DATA, {}) prefix = manifest_data.get(const.KEYWORD_PREFIX) for cg in manifest_data.get(const.KEYWORD_GROUPS, []): chartgroup = cg.get(const.KEYWORD_DATA) cg_name = cg.get('metadata').get('name') cg_desc = chartgroup.get('description', '<missing description>') cg_sequenced = chartgroup.get('sequenced', False) or self.force_wait LOG.info('Processing ChartGroup: %s (%s), sequenced=%s%s', cg_name, cg_desc, cg_sequenced, ' (forced)' if self.force_wait else '') # TODO: Remove when v1 doc support is removed. cg_test_all_charts = chartgroup.get('test_charts') cg_charts = chartgroup.get(const.KEYWORD_CHARTS, []) def deploy_chart(chart, concurrency): set_current_chart(chart) try: return self.chart_deploy.execute(chart, cg_test_all_charts, prefix, concurrency) finally: set_current_chart(None) results = [] failures = [] # Returns whether or not there was a failure def handle_result(chart, get_result): name = chart['metadata']['name'] try: result = get_result() except Exception: LOG.exception('Chart deploy [{}] failed'.format(name)) failures.append(name) return True else: results.append(result) return False if cg_sequenced: for chart in cg_charts: if (handle_result(chart, lambda: deploy_chart(chart, 1))): break else: with ThreadPoolExecutor( max_workers=len(cg_charts)) as executor: future_to_chart = { executor.submit(deploy_chart, chart, len(cg_charts)): chart for chart in cg_charts } for future in as_completed(future_to_chart): chart = future_to_chart[future] handle_result(chart, future.result) if failures: LOG.error('Chart deploy(s) failed: %s', failures) raise armada_exceptions.ChartDeployException(failures) for result in results: for k, v in result.items(): msg[k].append(v) # End of Charts in ChartGroup LOG.info('All Charts applied in ChartGroup %s.', cg_name) self.post_flight_ops() if self.enable_chart_cleanup: self._chart_cleanup( prefix, self.manifest[const.KEYWORD_DATA][const.KEYWORD_GROUPS], msg) LOG.info('Done applying manifest.') return msg def post_flight_ops(self): ''' Operations to run after deployment process has terminated ''' LOG.info("Performing post-flight operations.") self.chart_download.cleanup() def _chart_cleanup(self, prefix, chart_groups, msg): LOG.info('Processing chart cleanup to remove unspecified releases.') valid_release_ids = [] for group in chart_groups: group_data = group.get(const.KEYWORD_DATA, {}) for chart in group_data.get(const.KEYWORD_CHARTS, []): chart_data = chart.get(const.KEYWORD_DATA, {}) valid_release_ids.append( HelmReleaseId( chart_data['namespace'], release_prefixer(prefix, chart_data['release']))) actual_release_ids = self.helm.list_release_ids() release_diff = list(set(actual_release_ids) - set(valid_release_ids)) for release_id in release_diff: if release_id.name.startswith(prefix): LOG.info('Purging release %s as part of chart cleanup.', release_id) self.helm.uninstall_release(release_id) msg['purge'].append('{}'.format(release_id))
class Armada(object): ''' This is the main Armada class handling the Armada workflows ''' def __init__(self, documents, tiller, disable_update_pre=False, disable_update_post=False, enable_chart_cleanup=False, set_ovr=None, force_wait=False, timeout=None, values=None, target_manifest=None, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1): ''' Initialize the Armada engine and establish a connection to Tiller. :param List[dict] documents: Armada documents. :param tiller: Tiller instance to use. :param bool disable_update_pre: Disable pre-update Tiller operations. :param bool disable_update_post: Disable post-update Tiller operations. :param bool enable_chart_cleanup: Clean up unmanaged charts. :param bool force_wait: Force Tiller to wait until all charts are deployed, rather than using each chart's specified wait policy. :param int timeout: Specifies overall time in seconds that Tiller should wait for charts until timing out. :param str target_manifest: The target manifest to run. Useful for specifying which manifest to run when multiple are available. :param int k8s_wait_attempts: The number of times to attempt waiting for pods to become ready. :param int k8s_wait_attempt_sleep: The time in seconds to sleep between attempts. ''' self.enable_chart_cleanup = enable_chart_cleanup self.force_wait = force_wait self.tiller = tiller try: self.documents = Override(documents, overrides=set_ovr, values=values).update_manifests() except (validate_exceptions.InvalidManifestException, override_exceptions.InvalidOverrideValueException): raise self.manifest = Manifest( self.documents, target_manifest=target_manifest).get_manifest() self.chart_cache = {} self.chart_deploy = ChartDeploy(self.manifest, disable_update_pre, disable_update_post, k8s_wait_attempts, k8s_wait_attempt_sleep, timeout, self.tiller) def pre_flight_ops(self): """Perform a series of checks and operations to ensure proper deployment. """ LOG.info("Performing pre-flight operations.") # Ensure Tiller is available and manifest is valid if not self.tiller.tiller_status(): raise tiller_exceptions.TillerServicesUnavailableException() # Clone the chart sources manifest_data = self.manifest.get(const.KEYWORD_DATA, {}) for group in manifest_data.get(const.KEYWORD_GROUPS, []): for ch in group.get(const.KEYWORD_DATA).get( const.KEYWORD_CHARTS, []): self.get_chart(ch) def get_chart(self, ch): manifest_name = self.manifest['metadata']['name'] chart_name = ch['metadata']['name'] with metrics.CHART_DOWNLOAD.get_context(manifest_name, chart_name): return self._get_chart(ch) def _get_chart(self, ch): chart = ch.get(const.KEYWORD_DATA) chart_source = chart.get('source', {}) location = chart_source.get('location') ct_type = chart_source.get('type') subpath = chart_source.get('subpath', '.') proxy_server = chart_source.get('proxy_server') if ct_type == 'local': chart['source_dir'] = (location, subpath) elif ct_type == 'tar': source_key = (ct_type, location) if source_key not in self.chart_cache: LOG.info("Downloading tarball from: %s / proxy %s", location, proxy_server or "not set") if not CONF.certs: LOG.warn( 'Disabling server validation certs to extract charts') tarball_dir = source.get_tarball(location, verify=False, proxy_server=proxy_server) else: tarball_dir = source.get_tarball(location, verify=CONF.certs, proxy_server=proxy_server) self.chart_cache[source_key] = tarball_dir chart['source_dir'] = (self.chart_cache.get(source_key), subpath) elif ct_type == 'git': reference = chart_source.get('reference', 'master') source_key = (ct_type, location, reference) if source_key not in self.chart_cache: auth_method = chart_source.get('auth_method') logstr = 'Cloning repo: {} from branch: {}'.format( location, reference) if proxy_server: logstr += ' proxy: {}'.format(proxy_server) if auth_method: logstr += ' auth method: {}'.format(auth_method) LOG.info(logstr) repo_dir = source.git_clone(location, reference, proxy_server=proxy_server, auth_method=auth_method) self.chart_cache[source_key] = repo_dir chart['source_dir'] = (self.chart_cache.get(source_key), subpath) else: name = ch['metadata']['name'] raise source_exceptions.ChartSourceException(ct_type, name) for dep in ch.get(const.KEYWORD_DATA, {}).get('dependencies', []): self.get_chart(dep) def sync(self): ''' Synchronize Helm with the Armada Config(s) ''' manifest_name = self.manifest['metadata']['name'] with metrics.APPLY.get_context(manifest_name): return self._sync() def _sync(self): msg = { 'install': [], 'upgrade': [], 'diff': [], 'purge': [], 'protected': [] } # TODO: (gardlt) we need to break up this func into # a more cleaner format self.pre_flight_ops() known_releases = self.tiller.list_releases() manifest_data = self.manifest.get(const.KEYWORD_DATA, {}) prefix = manifest_data.get(const.KEYWORD_PREFIX) for cg in manifest_data.get(const.KEYWORD_GROUPS, []): chartgroup = cg.get(const.KEYWORD_DATA) cg_name = cg.get('metadata').get('name') cg_desc = chartgroup.get('description', '<missing description>') cg_sequenced = chartgroup.get('sequenced', False) or self.force_wait LOG.info('Processing ChartGroup: %s (%s), sequenced=%s%s', cg_name, cg_desc, cg_sequenced, ' (forced)' if self.force_wait else '') # TODO: Remove when v1 doc support is removed. cg_test_all_charts = chartgroup.get('test_charts') cg_charts = chartgroup.get(const.KEYWORD_CHARTS, []) def deploy_chart(chart, concurrency): set_current_chart(chart) try: return self.chart_deploy.execute(chart, cg_test_all_charts, prefix, known_releases, concurrency) finally: set_current_chart(None) results = [] failures = [] # Returns whether or not there was a failure def handle_result(chart, get_result): name = chart['metadata']['name'] try: result = get_result() except Exception: LOG.exception('Chart deploy [{}] failed'.format(name)) failures.append(name) return True else: results.append(result) return False if cg_sequenced: for chart in cg_charts: if (handle_result(chart, lambda: deploy_chart(chart, 1))): break else: with ThreadPoolExecutor( max_workers=len(cg_charts)) as executor: future_to_chart = { executor.submit(deploy_chart, chart, len(cg_charts)): chart for chart in cg_charts } for future in as_completed(future_to_chart): chart = future_to_chart[future] handle_result(chart, future.result) if failures: LOG.error('Chart deploy(s) failed: %s', failures) raise armada_exceptions.ChartDeployException(failures) for result in results: for k, v in result.items(): msg[k].append(v) # End of Charts in ChartGroup LOG.info('All Charts applied in ChartGroup %s.', cg_name) self.post_flight_ops() if self.enable_chart_cleanup: self._chart_cleanup( prefix, self.manifest[const.KEYWORD_DATA][const.KEYWORD_GROUPS], msg) LOG.info('Done applying manifest.') return msg def post_flight_ops(self): ''' Operations to run after deployment process has terminated ''' LOG.info("Performing post-flight operations.") # Delete temp dirs used for deployment for chart_dir in self.chart_cache.values(): LOG.debug('Removing temp chart directory: %s', chart_dir) source.source_cleanup(chart_dir) def _chart_cleanup(self, prefix, chart_groups, msg): LOG.info('Processing chart cleanup to remove unspecified releases.') valid_releases = [] for group in chart_groups: group_data = group.get(const.KEYWORD_DATA, {}) for chart in group_data.get(const.KEYWORD_CHARTS, []): chart_data = chart.get(const.KEYWORD_DATA, {}) valid_releases.append( release_prefixer(prefix, chart_data.get('release'))) actual_releases = [x.name for x in self.tiller.list_releases()] release_diff = list(set(actual_releases) - set(valid_releases)) for release in release_diff: if release.startswith(prefix): LOG.info('Purging release %s as part of chart cleanup.', release) self.tiller.uninstall_release(release) msg['purge'].append(release)
def invoke(self): tiller = Tiller( tiller_host=self.tiller_host, tiller_port=self.tiller_port) known_release_names = [release[0] for release in tiller.list_charts()] if self.release: if not self.ctx.obj.get('api', False): self.logger.info("RUNNING: %s tests", self.release) resp = tiller.testing_release(self.release) if not resp: self.logger.info("FAILED: %s", self.release) return test_status = getattr(resp.info.status, 'last_test_suite_run', 'FAILED') if test_status.results[0].status: self.logger.info("PASSED: %s", self.release) else: self.logger.info("FAILED: %s", self.release) else: client = self.ctx.obj.get('CLIENT') resp = client.get_test_release(release=self.release) self.logger.info(resp.get('result')) self.logger.info(resp.get('message')) if self.file: if not self.ctx.obj.get('api', False): documents = yaml.safe_load_all(open(self.file).read()) armada_obj = Manifest(documents).get_manifest() prefix = armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_PREFIX) for group in armada_obj.get(const.KEYWORD_ARMADA).get( const.KEYWORD_GROUPS): for ch in group.get(const.KEYWORD_CHARTS): release_name = release_prefix( prefix, ch.get('chart').get('chart_name')) if release_name in known_release_names: self.logger.info('RUNNING: %s tests', release_name) resp = tiller.testing_release(release_name) if not resp: continue test_status = getattr( resp.info.status, 'last_test_suite_run', 'FAILED') if test_status.results[0].status: self.logger.info("PASSED: %s", release_name) else: self.logger.info("FAILED: %s", release_name) else: self.logger.info( 'Release %s not found - SKIPPING', release_name) else: client = self.ctx.obj.get('CLIENT') with open(self.filename, 'r') as f: resp = client.get_test_manifest(manifest=f.read()) for test in resp.get('tests'): self.logger.info('Test State: %s', test) for item in test.get('tests').get(test): self.logger.info(item) self.logger.info(resp)
class Armada(object): ''' This is the main Armada class handling the Armada workflows ''' def __init__(self, documents, disable_update_pre=False, disable_update_post=False, enable_chart_cleanup=False, dry_run=False, set_ovr=None, force_wait=False, timeout=0, tiller_host=None, tiller_port=None, tiller_namespace=None, values=None, target_manifest=None, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1): ''' Initialize the Armada engine and establish a connection to Tiller. :param List[dict] documents: Armada documents. :param bool disable_update_pre: Disable pre-update Tiller operations. :param bool disable_update_post: Disable post-update Tiller operations. :param bool enable_chart_cleanup: Clean up unmanaged charts. :param bool dry_run: Run charts without installing them. :param bool force_wait: Force Tiller to wait until all charts are deployed, rather than using each chart's specified wait policy. :param int timeout: Specifies overall time in seconds that Tiller should wait for charts until timing out. :param str tiller_host: Tiller host IP. Default is None. :param int tiller_port: Tiller host port. Default is ``CONF.tiller_port``. :param str tiller_namespace: Tiller host namespace. Default is ``CONF.tiller_namespace``. :param str target_manifest: The target manifest to run. Useful for specifying which manifest to run when multiple are available. :param int k8s_wait_attempts: The number of times to attempt waiting for pods to become ready. :param int k8s_wait_attempt_sleep: The time in seconds to sleep between attempts. ''' tiller_port = tiller_port or CONF.tiller_port tiller_namespace = tiller_namespace or CONF.tiller_namespace self.disable_update_pre = disable_update_pre self.disable_update_post = disable_update_post self.enable_chart_cleanup = enable_chart_cleanup self.dry_run = dry_run self.force_wait = force_wait self.timeout = timeout # TODO: Use dependency injection i.e. pass in a Tiller instead of # creating it here. self.tiller = Tiller(tiller_host=tiller_host, tiller_port=tiller_port, tiller_namespace=tiller_namespace, dry_run=dry_run) try: self.documents = Override(documents, overrides=set_ovr, values=values).update_manifests() except (validate_exceptions.InvalidManifestException, override_exceptions.InvalidOverrideValueException): raise self.k8s_wait_attempts = k8s_wait_attempts self.k8s_wait_attempt_sleep = k8s_wait_attempt_sleep self.manifest = Manifest( self.documents, target_manifest=target_manifest).get_manifest() self.cloned_dirs = set() def find_release_chart(self, known_releases, release_name): ''' Find a release given a list of known_releases and a release name ''' for release, _, chart, values, _ in known_releases: if release == release_name: return chart, values def pre_flight_ops(self): """Perform a series of checks and operations to ensure proper deployment. """ LOG.info("Performing pre-flight operations.") # Ensure Tiller is available and manifest is valid if not self.tiller.tiller_status(): raise tiller_exceptions.TillerServicesUnavailableException() # Clone the chart sources repos = {} manifest_data = self.manifest.get(const.KEYWORD_ARMADA, {}) for group in manifest_data.get(const.KEYWORD_GROUPS, []): for ch in group.get(const.KEYWORD_CHARTS, []): self.tag_cloned_repo(ch, repos) for dep in ch.get('chart', {}).get('dependencies', []): self.tag_cloned_repo(dep, repos) def tag_cloned_repo(self, ch, repos): chart = ch.get('chart', {}) chart_source = chart.get('source', {}) location = chart_source.get('location') ct_type = chart_source.get('type') subpath = chart_source.get('subpath', '.') if ct_type == 'local': chart['source_dir'] = (location, subpath) elif ct_type == 'tar': LOG.info('Downloading tarball from: %s', location) if not CONF.certs: LOG.warn('Disabling server validation certs to extract charts') tarball_dir = source.get_tarball(location, verify=False) else: tarball_dir = source.get_tarball(location, verify=CONF.cert) chart['source_dir'] = (tarball_dir, subpath) elif ct_type == 'git': reference = chart_source.get('reference', 'master') repo_branch = (location, reference) if repo_branch not in repos: auth_method = chart_source.get('auth_method') proxy_server = chart_source.get('proxy_server') logstr = 'Cloning repo: {} from branch: {}'.format( *repo_branch) if proxy_server: logstr += ' proxy: {}'.format(proxy_server) if auth_method: logstr += ' auth method: {}'.format(auth_method) LOG.info(logstr) repo_dir = source.git_clone(*repo_branch, proxy_server=proxy_server, auth_method=auth_method) self.cloned_dirs.add(repo_dir) repos[repo_branch] = repo_dir chart['source_dir'] = (repo_dir, subpath) else: chart['source_dir'] = (repos.get(repo_branch), subpath) else: chart_name = chart.get('chart_name') raise source_exceptions.ChartSourceException(ct_type, chart_name) def _get_releases_by_status(self): ''' Return a list of current releases with DEPLOYED or FAILED status ''' deployed_releases = [] failed_releases = [] known_releases = self.tiller.list_charts() for release in known_releases: if release[4] == const.STATUS_DEPLOYED: deployed_releases.append(release) elif release[4] == const.STATUS_FAILED: failed_releases.append(release) else: # tiller.list_charts() only looks at DEPLOYED/FAILED so # this should be unreachable LOG.debug('Ignoring release %s in status %s.', release[0], release[4]) return deployed_releases, failed_releases def sync(self): ''' Synchronize Helm with the Armada Config(s) ''' if self.dry_run: LOG.info('Armada is in DRY RUN mode, no changes being made.') msg = { 'install': [], 'upgrade': [], 'diff': [], 'purge': [], 'protected': [] } # TODO: (gardlt) we need to break up this func into # a more cleaner format self.pre_flight_ops() # extract known charts on tiller right now deployed_releases, failed_releases = self._get_releases_by_status() manifest_data = self.manifest.get(const.KEYWORD_ARMADA, {}) prefix = manifest_data.get(const.KEYWORD_PREFIX) for chartgroup in manifest_data.get(const.KEYWORD_GROUPS, []): cg_name = chartgroup.get('name', '<missing name>') cg_desc = chartgroup.get('description', '<missing description>') cg_sequenced = chartgroup.get('sequenced', False) LOG.info('Processing ChartGroup: %s (%s), sequenced=%s', cg_name, cg_desc, cg_sequenced) # TODO(MarshM): Deprecate the `test_charts` key cg_test_all_charts = chartgroup.get('test_charts') if isinstance(cg_test_all_charts, bool): LOG.warn('The ChartGroup `test_charts` key is deprecated, ' 'and support for this will be removed. See the ' 'Chart `test` key for more information.') else: # This key defaults to True. Individual charts must # explicitly disable helm tests if they choose cg_test_all_charts = True ns_label_set = set() tests_to_run = [] cg_charts = chartgroup.get(const.KEYWORD_CHARTS, []) # Track largest Chart timeout to stop the ChartGroup at the end cg_max_timeout = 0 for chart_entry in cg_charts: chart = chart_entry.get('chart', {}) namespace = chart.get('namespace') release = chart.get('release') release_name = release_prefixer(prefix, release) LOG.info('Processing Chart, release=%s', release_name) values = chart.get('values', {}) pre_actions = {} post_actions = {} protected = chart.get('protected', {}) p_continue = protected.get('continue_processing', False) # Check for existing FAILED release, and purge if release_name in [rel[0] for rel in failed_releases]: LOG.info('Purging FAILED release %s before deployment.', release_name) if protected: if p_continue: LOG.warn( 'Release %s is `protected`, ' 'continue_processing=True. Operator must ' 'handle FAILED release manually.', release_name) msg['protected'].append(release_name) continue else: LOG.error( 'Release %s is `protected`, ' 'continue_processing=False.', release_name) raise armada_exceptions.ProtectedReleaseException( release_name) else: # Purge the release self.tiller.uninstall_release(release_name) msg['purge'].append(release_name) # NOTE(MarshM): Calculating `wait_timeout` is unfortunately # overly complex. The order of precedence is currently: # 1) User provided override via API/CLI (default 0 if not # provided by client/user). # 2) Chart's `data.wait.timeout`, or... # 3) Chart's `data.timeout` (deprecated). # 4) const.DEFAULT_CHART_TIMEOUT, if nothing is ever # specified, for use in waiting for final ChartGroup # health and helm tests, but ignored for the actual # install/upgrade of the Chart. # NOTE(MarshM): Not defining a timeout has a side effect of # allowing Armada to install charts with a circular # dependency defined between components. # TODO(MarshM): Deprecated, remove the following block deprecated_timeout = chart.get('timeout', None) if isinstance(deprecated_timeout, int): LOG.warn('The `timeout` key is deprecated and support ' 'for this will be removed soon. Use ' '`wait.timeout` instead.') wait_values = chart.get('wait', {}) wait_labels = wait_values.get('labels', {}) wait_timeout = self.timeout if wait_timeout <= 0: wait_timeout = wait_values.get('timeout', wait_timeout) # TODO(MarshM): Deprecated, remove the following check if wait_timeout <= 0: wait_timeout = deprecated_timeout or wait_timeout # Determine wait logic # NOTE(Dan Kim): Conditions to wait are below : # 1) set sequenced=True in chart group # 2) set force_wait param # 3) add Chart's `data.wait.timeout` # --timeout param will do not set wait=True, it just change # max timeout of chart's deployment. (default: 900) this_chart_should_wait = (cg_sequenced or self.force_wait or (bool(wait_values) and (wait_timeout > 0))) # If there is still no timeout, we need to use a default # (item 4 in note above) if wait_timeout <= 0: LOG.warn('No Chart timeout specified, using default: %ss', const.DEFAULT_CHART_TIMEOUT) wait_timeout = const.DEFAULT_CHART_TIMEOUT # Naively take largest timeout to apply at end # TODO(MarshM) better handling of timeout/timer cg_max_timeout = max(wait_timeout, cg_max_timeout) test_chart_override = chart.get('test') # Use old default value when not using newer `test` key test_cleanup = True if test_chart_override is None: test_this_chart = cg_test_all_charts elif isinstance(test_chart_override, bool): LOG.warn('Boolean value for chart `test` key is' ' deprecated and support for this will' ' be removed. Use `test.enabled` ' 'instead.') test_this_chart = test_chart_override else: # NOTE: helm tests are enabled by default test_this_chart = test_chart_override.get('enabled', True) test_cleanup = test_chart_override.get('options', {}).get( 'cleanup', False) chartbuilder = ChartBuilder(chart) new_chart = chartbuilder.get_helm_chart() # Begin Chart timeout deadline deadline = time.time() + wait_timeout # TODO(mark-burnett): It may be more robust to directly call # tiller status to decide whether to install/upgrade rather # than checking for list membership. if release_name in [rel[0] for rel in deployed_releases]: # indicate to the end user what path we are taking LOG.info("Upgrading release %s in namespace %s", release_name, namespace) # extract the installed chart and installed values from the # latest release so we can compare to the intended state old_chart, old_values_string = self.find_release_chart( deployed_releases, release_name) upgrade = chart.get('upgrade', {}) disable_hooks = upgrade.get('no_hooks', False) force = upgrade.get('force', False) recreate_pods = upgrade.get('recreate_pods', False) LOG.info("Checking Pre/Post Actions") if upgrade: upgrade_pre = upgrade.get('pre', {}) upgrade_post = upgrade.get('post', {}) if not self.disable_update_pre and upgrade_pre: pre_actions = upgrade_pre if not self.disable_update_post and upgrade_post: post_actions = upgrade_post try: old_values = yaml.safe_load(old_values_string) except yaml.YAMLError: chart_desc = '{} (previously deployed)'.format( old_chart.metadata.name) raise armada_exceptions.\ InvalidOverrideValuesYamlException(chart_desc) LOG.info('Checking for updates to chart release inputs.') diff = self.get_diff(old_chart, old_values, new_chart, values) if not diff: LOG.info("Found no updates to chart release inputs") continue LOG.info("Found updates to chart release inputs") LOG.debug("%s", diff) msg['diff'].append({chart['release']: str(diff)}) # TODO(MarshM): Add tiller dry-run before upgrade and # consider deadline impacts # do actual update timer = int(round(deadline - time.time())) LOG.info('Beginning Upgrade, wait=%s, timeout=%ss', this_chart_should_wait, timer) tiller_result = self.tiller.update_release( new_chart, release_name, namespace, pre_actions=pre_actions, post_actions=post_actions, disable_hooks=disable_hooks, values=yaml.safe_dump(values), wait=this_chart_should_wait, timeout=timer, force=force, recreate_pods=recreate_pods) if this_chart_should_wait: self._wait_until_ready(release_name, wait_labels, namespace, timer) # Track namespace+labels touched by upgrade ns_label_set.add((namespace, tuple(wait_labels.items()))) LOG.info('Upgrade completed with results from Tiller: %s', tiller_result.__dict__) msg['upgrade'].append(release_name) # process install else: LOG.info("Installing release %s in namespace %s", release_name, namespace) timer = int(round(deadline - time.time())) LOG.info('Beginning Install, wait=%s, timeout=%ss', this_chart_should_wait, timer) tiller_result = self.tiller.install_release( new_chart, release_name, namespace, values=yaml.safe_dump(values), wait=this_chart_should_wait, timeout=timer) if this_chart_should_wait: self._wait_until_ready(release_name, wait_labels, namespace, timer) # Track namespace+labels touched by install ns_label_set.add((namespace, tuple(wait_labels.items()))) LOG.info('Install completed with results from Tiller: %s', tiller_result.__dict__) msg['install'].append(release_name) # Keeping track of time remaining timer = int(round(deadline - time.time())) test_chart_args = (release_name, timer, test_cleanup) if test_this_chart: # Sequenced ChartGroup should run tests after each Chart if cg_sequenced: LOG.info( 'Running sequenced test, timeout remaining: ' '%ss.', timer) self._test_chart(*test_chart_args) # Un-sequenced ChartGroup should run tests at the end else: tests_to_run.append( functools.partial(self._test_chart, *test_chart_args)) # End of Charts in ChartGroup LOG.info('All Charts applied in ChartGroup %s.', cg_name) # After all Charts are applied, we should wait for the entire # ChartGroup to become healthy by looking at the namespaces seen # TODO(MarshM): Need to determine a better timeout # (not cg_max_timeout) if cg_max_timeout <= 0: cg_max_timeout = const.DEFAULT_CHART_TIMEOUT deadline = time.time() + cg_max_timeout for (ns, labels) in ns_label_set: labels_dict = dict(labels) timer = int(round(deadline - time.time())) LOG.info( 'Final ChartGroup wait for healthy namespace=%s, ' 'labels=(%s), timeout remaining: %ss.', ns, labels_dict, timer) if timer <= 0: reason = ('Timeout expired waiting on namespace: %s, ' 'labels: (%s)' % (ns, labels_dict)) LOG.error(reason) raise armada_exceptions.ArmadaTimeoutException(reason) self._wait_until_ready(release_name=None, wait_labels=labels_dict, namespace=ns, timeout=timer) # After entire ChartGroup is healthy, run any pending tests for callback in tests_to_run: callback() self.post_flight_ops() if self.enable_chart_cleanup: self._chart_cleanup( prefix, self.manifest[const.KEYWORD_ARMADA][const.KEYWORD_GROUPS], msg) LOG.info('Done applying manifest.') return msg def post_flight_ops(self): ''' Operations to run after deployment process has terminated ''' LOG.info("Performing post-flight operations.") # Delete temp dirs used for deployment for cloned_dir in self.cloned_dirs: LOG.debug('Removing cloned temp directory: %s', cloned_dir) source.source_cleanup(cloned_dir) def _wait_until_ready(self, release_name, wait_labels, namespace, timeout): if self.dry_run: LOG.info( 'Skipping wait during `dry-run`, would have waited on ' 'namespace=%s, labels=(%s) for %ss.', namespace, wait_labels, timeout) return self.tiller.k8s.wait_until_ready( release=release_name, labels=wait_labels, namespace=namespace, k8s_wait_attempts=self.k8s_wait_attempts, k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep, timeout=timeout) def _test_chart(self, release_name, timeout, cleanup): if self.dry_run: LOG.info( 'Skipping test during `dry-run`, would have tested ' 'release=%s with timeout %ss.', release_name, timeout) return True if timeout <= 0: reason = ('Timeout expired before testing ' 'release %s' % release_name) LOG.error(reason) raise armada_exceptions.ArmadaTimeoutException(reason) success = test_release_for_success(self.tiller, release_name, timeout=timeout, cleanup=cleanup) if success: LOG.info("Test passed for release: %s", release_name) else: LOG.info("Test failed for release: %s", release_name) raise tiller_exceptions.TestFailedException(release_name) def _chart_cleanup(self, prefix, charts, msg): LOG.info('Processing chart cleanup to remove unspecified releases.') valid_releases = [] for gchart in charts: for chart in gchart.get(const.KEYWORD_CHARTS, []): valid_releases.append( release_prefixer(prefix, chart.get('chart', {}).get('release'))) actual_releases = [x.name for x in self.tiller.list_releases()] release_diff = list(set(actual_releases) - set(valid_releases)) for release in release_diff: if release.startswith(prefix): LOG.info('Purging release %s as part of chart cleanup.', release) self.tiller.uninstall_release(release) msg['purge'].append(release) def get_diff(self, old_chart, old_values, new_chart, values): return ReleaseDiff(old_chart, old_values, new_chart, values).get_diff()