def test_deprecated_test_key_false(self): """Test that tests can be disabled using the deprecated, boolean value for a chart's test key. """ test_handler = test.Test(chart={'test': False}, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock.Mock()) assert not test_handler.test_enabled
def test_no_test_values(self): """Test that the default values are enforced when no chart `test` values are provided (i.e. tests are enabled). """ test_handler = test.Test(chart={}, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock.Mock()) assert test_handler.test_enabled is True
def test_cg_enabled_test_key_disabled(self): """Test that tests are disabled when a chart group enables all tests and the deprecated, boolean `test` key is disabled. """ test_handler = test.Test(chart={'test': False}, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock.Mock(), cg_test_charts=True) assert test_handler.test_enabled is False
def test_cg_disabled(self): """Test that tests are disabled when a chart group disables all tests. """ test_handler = test.Test(chart={}, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock.Mock(), cg_test_charts=False) assert test_handler.test_enabled is False
def test_deprecated_test_key_timeout(self): """Test that the default helm timeout is used when tests are enabled using the deprecated, boolean value for a chart's `test` key. """ mock_helm = mock.Mock() test_handler = test.Test(chart={'test': True}, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock_helm) assert test_handler.timeout == const.DEFAULT_TEST_TIMEOUT
def test_enable_all_test_key_disabled(self): """Test that tests are enabled when the `enable_all` parameter is True and the deprecated, boolean `test` key is disabled. """ test_handler = test.Test(chart={'test': True}, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock.Mock(), enable_all=True) assert test_handler.test_enabled is True
def test_timeout_value(self): """Test that a chart's test timeout value, `test.timeout` overrides the default test timeout. """ chart = {'test': {'enabled': True, 'timeout': 800}} test_handler = test.Test(chart=chart, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock.Mock()) assert test_handler.timeout is chart['test']['timeout']
def test_default_timeout_value(self): """Test that the default timeout value is used if a test timeout value, `test.timeout` is not provided. """ test_handler = test.Test(chart={'test': { 'enabled': True }}, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock.Mock()) assert test_handler.timeout == helm.DEFAULT_HELM_TIMEOUT
def test_tests_disabled(self): """Test that tests are disabled by a chart's values using the `test.enabled` path. """ test_handler = test.Test(chart={'test': { 'enabled': False }}, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock.Mock()) assert test_handler.test_enabled is False
def test_enable_all_cg_disabled(self): """Test that tests are enabled when the `enable_all` parameter is True and the chart group `test_enabled` key is disabled. """ test_handler = test.Test(chart={}, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock.Mock(), cg_test_charts=False, enable_all=True) assert test_handler.test_enabled is True
def test_cg_disabled_test_values_enabled(self): """Test that tests are enabled when a chart group disables all tests and the `test.enabled` key is False. """ test_handler = test.Test(chart={'test': { 'enabled': True }}, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock.Mock(), cg_test_charts=False) assert test_handler.test_enabled is True
def test_enable_all_test_values_disabled(self): """Test that tests are enabled when the `enable_all` parameter is True and the `test.enabled` key is False. """ test_handler = test.Test(chart={'test': { 'enabled': False }}, release_id=helm.HelmReleaseId( 'release_ns', 'release'), helm=mock.Mock(), enable_all=True) assert test_handler.test_enabled is True
def get_unit(self, chart_data, timeout=None, version=2): chart = { 'schema': 'armada/Chart/v{}'.format(str(version)), 'metadata': { 'name': 'test' }, const.KEYWORD_DATA: chart_data } return wait.ChartWait(k8s=mock.MagicMock(), release_id=helm.HelmReleaseId( 'test', 'test-test'), chart=chart, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1, timeout=timeout)
def _do_test(mock_test, mock_chartbuilder, MockChartDownload, mock_post_flight): MockChartDownload.return_value.get_chart.side_effect = \ set_source_dir # Instantiate Armada object. yaml_documents = list(yaml.safe_load_all(TEST_YAML)) m_helm = mock.MagicMock() armada_obj = armada.Armada(yaml_documents, m_helm) prefix = armada_obj.manifest['data']['release_prefix'] def release_metadata(release_id, **kwargs): try: return next(r for r in known_releases if release_id.name == r['name'] and release_id.namespace == r['namespace']) except StopIteration: return None m_helm.release_metadata.side_effect = release_metadata armada_obj.chart_deploy.get_diff = mock.Mock() cg = armada_obj.manifest['data']['chart_groups'][0] chart_group = cg['data'] charts = chart_group['chart_group'] cg_test_all_charts = chart_group.get('test_charts') mock_test_release = mock_test.return_value.test_release if test_failure_to_run: mock_test_release.side_effect = Exception('test failed to run') else: if not test_success: mock_test_release.side_effect = Exception('test failed') mock_test.return_value.timeout = const.DEFAULT_TEST_TIMEOUT # Stub out irrelevant methods called by `armada.sync()`. mock_chartbuilder.get_helm_chart.return_value = None # Simulate chart diff, upgrade should only happen if non-empty. armada_obj.chart_deploy.get_diff.return_value = diff armada_obj.sync() expected_install_release_calls = [] expected_upgrade_release_calls = [] expected_uninstall_release_calls = [] expected_test_constructor_calls = [] for c in charts: chart = c['data'] release = chart['release'] release_name = release_prefixer(prefix, release) release_id = helm.HelmReleaseId(chart['namespace'], release_name) source_dir = chart['source_dir'] source_directory = os.path.join(*source_dir) # Simplified check because the actual code uses logical-or's # multiple conditions, so this is enough. native_wait_enabled = (chart['wait'].get('native', {}).get( 'enabled', True)) if release_name not in [x['name'] for x in known_releases]: expected_install_release_calls.append( mock.call(source_directory, release_id, values=chart['values'], wait=native_wait_enabled, timeout=mock.ANY)) else: target_release = None for known_release in known_releases: if known_release['name'] == release_name: target_release = known_release break if target_release: status = get_release_status(target_release) if status == helm.STATUS_FAILED: protected = chart.get('protected', {}) if not protected: expected_uninstall_release_calls.append( mock.call( release_id, purge=True, timeout=const.DEFAULT_DELETE_TIMEOUT)) expected_install_release_calls.append( mock.call(source_directory, release_id, values=chart['values'], wait=native_wait_enabled, timeout=mock.ANY)) else: p_continue = protected.get( 'continue_processing', False) if p_continue: continue else: if chart_group['sequenced']: break if status == helm.STATUS_DEPLOYED: if diff: upgrade = chart.get('upgrade', {}) disable_hooks = upgrade.get('no_hooks', False) options = upgrade.get('options', {}) force = options.get('force', False) expected_upgrade_release_calls.append( mock.call(source_directory, release_id, disable_hooks=disable_hooks, force=force, values=chart['values'], wait=native_wait_enabled, timeout=mock.ANY)) expected_test_constructor_calls.append( mock.call(chart, release_id, m_helm, cg_test_charts=cg_test_all_charts)) any_order = not chart_group['sequenced'] # Verify that at least 1 release is either installed or updated. self.assertTrue( len(expected_install_release_calls) >= 1 or len(expected_upgrade_release_calls) >= 1) # Verify that the expected number of non-deployed releases are # installed with expected arguments. self.assertEqual(len(expected_install_release_calls), m_helm.install_release.call_count) m_helm.install_release.assert_has_calls( expected_install_release_calls, any_order=any_order) # Verify that the expected number of deployed releases are # updated with expected arguments. self.assertEqual(len(expected_upgrade_release_calls), m_helm.upgrade_release.call_count) m_helm.upgrade_release.assert_has_calls( expected_upgrade_release_calls, any_order=any_order) # Verify that the expected number of deployed releases are # uninstalled with expected arguments. self.assertEqual(len(expected_uninstall_release_calls), m_helm.uninstall_release.call_count) m_helm.uninstall_release.assert_has_calls( expected_uninstall_release_calls, any_order=any_order) # Verify that the expected number of deployed releases are # tested with expected arguments. self.assertEqual(len(expected_test_constructor_calls), mock_test.call_count) mock_test.assert_has_calls(expected_test_constructor_calls, any_order=True)
def _execute(self, ch, cg_test_all_charts, prefix): manifest_name = self.manifest['metadata']['name'] chart = ch[const.KEYWORD_DATA] chart_name = ch['metadata']['name'] namespace = chart.get('namespace') release = chart.get('release') release_name = r.release_prefixer(prefix, release) release_id = helm.HelmReleaseId(namespace, release_name) source_dir = chart['source_dir'] source_directory = os.path.join(*source_dir) LOG.info('Processing Chart, release=%s', release_id) result = {} chart_wait = ChartWait( self.helm.k8s, release_id, ch, k8s_wait_attempts=self.k8s_wait_attempts, k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep, timeout=self.timeout) wait_timeout = chart_wait.get_timeout() # Begin Chart timeout deadline deadline = time.time() + wait_timeout old_release = self.helm.release_metadata(release_id) action = metrics.ChartDeployAction.NOOP def noop(): pass deploy = noop # Resolve action values = chart.get('values', {}) pre_actions = {} status = None if old_release: status = r.get_release_status(old_release) native_wait_enabled = chart_wait.is_native_enabled() chartbuilder = ChartBuilder.from_chart_doc(ch, self.helm) if status == helm.STATUS_DEPLOYED: # indicate to the end user what path we are taking LOG.info("Existing release %s found", release_id) # extract the installed chart and installed values from the # latest release so we can compare to the intended state old_chart = old_release['chart'] old_values = old_release.get('config', {}) upgrade = chart.get('upgrade', {}) options = upgrade.get('options', {}) # TODO: Remove when v1 doc support is removed. schema_info = get_schema_info(ch['schema']) if schema_info.version < 2: no_hooks_location = upgrade else: no_hooks_location = options disable_hooks = no_hooks_location.get('no_hooks', False) force = options.get('force', False) if upgrade: upgrade_pre = upgrade.get('pre', {}) upgrade_post = upgrade.get('post', {}) if not self.disable_update_pre and upgrade_pre: pre_actions = upgrade_pre if not self.disable_update_post and upgrade_post: LOG.warning('Post upgrade actions are ignored by Armada' 'and will not affect deployment.') LOG.info('Checking for updates to chart release inputs.') new_chart = chartbuilder.get_helm_chart(release_id, values) diff = self.get_diff(old_chart, old_values, new_chart, values) if not diff: LOG.info("Found no updates to chart release inputs") else: action = metrics.ChartDeployAction.UPGRADE LOG.info("Found updates to chart release inputs") def upgrade(): # do actual update timer = int(round(deadline - time.time())) PreUpdateActions(self.helm.k8s).execute( pre_actions, release, namespace, chart, disable_hooks, values, timer) LOG.info("Upgrading release=%s, wait=%s, " "timeout=%ss", release_id, native_wait_enabled, timer) self.helm.upgrade_release(source_directory, release_id, disable_hooks=disable_hooks, values=values, wait=native_wait_enabled, timeout=timer, force=force) LOG.info('Upgrade completed') result['upgrade'] = release_id deploy = upgrade else: def install(): timer = int(round(deadline - time.time())) LOG.info("Installing release=%s, wait=%s, " "timeout=%ss", release_id, native_wait_enabled, timer) self.helm.install_release(source_directory, release_id, values=values, wait=native_wait_enabled, timeout=timer) LOG.info('Install completed') result['install'] = release_id # Check for release with status other than DEPLOYED if status: if status != helm.STATUS_FAILED: LOG.warn( 'Unexpected release status encountered ' 'release=%s, status=%s', release_id, status) # Make best effort to determine whether a deployment is # likely pending, by checking if the last deployment # was started within the timeout window of the chart. last_deployment_age = r.get_last_deployment_age( old_release) likely_pending = last_deployment_age <= wait_timeout if likely_pending: # We don't take any deploy action and wait for the # to get deployed. deploy = noop deadline = deadline - last_deployment_age else: # Release is likely stuck in an unintended # state. Log and continue on with remediation steps # below. LOG.info( 'Old release %s likely stuck in status %s, ' '(last deployment age=%ss) >= ' '(chart wait timeout=%ss)', release, status, last_deployment_age, wait_timeout) res = self.purge_release(chart, release_id, status, manifest_name, chart_name, result) if isinstance(res, dict): if 'protected' in res: return res action = metrics.ChartDeployAction.INSTALL deploy = install else: # The chart is in Failed state, hence we purge # the chart and attempt to install it again. res = self.purge_release(chart, release_id, status, manifest_name, chart_name, result) if isinstance(res, dict): if 'protected' in res: return res action = metrics.ChartDeployAction.INSTALL deploy = install if status is None: action = metrics.ChartDeployAction.INSTALL deploy = install # Deploy with metrics.CHART_DEPLOY.get_context(wait_timeout, manifest_name, chart_name, action.get_label_value()): deploy() # Wait timer = int(round(deadline - time.time())) chart_wait.wait(timer) # Test just_deployed = ('install' in result) or ('upgrade' in result) last_test_passed = old_release and r.get_last_test_result(old_release) test_handler = Test(chart, release_id, self.helm, cg_test_charts=cg_test_all_charts) run_test = test_handler.test_enabled and (just_deployed or not last_test_passed) if run_test: with metrics.CHART_TEST.get_context(test_handler.timeout, manifest_name, chart_name): self._test_chart(test_handler) return result