def test_happy_path_unicode(self, getsize_patch): # Arrange self.args.provisioning_artifact_name = u'\u05d1\u05e8\u05d9\u05e6' self.args.provisioning_artifact_description = u'\u00fd\u00a9\u0194' self.servicecatalog_client.create_provisioning_artifact\ .return_value = self.get_create_provisioning_artifact_output() expected_pa_detail = self.get_create_provisioning_artifact_output() del expected_pa_detail['ResponseMetadata'] expected_response_output = json.dumps(expected_pa_detail, indent=2, ensure_ascii=False) # Act with capture_output() as captured: result = self.cmd._run_main(self.args, self.global_args) # Assert self.session.create_client.assert_called_with( 'servicecatalog', region_name=self.global_args.region, endpoint_url=None, verify=None) self.servicecatalog_client.create_provisioning_artifact.\ assert_called_once_with( ProductId=self.args.product_id, Parameters=self. get_provisioning_artifact_parameters( self.args.provisioning_artifact_name, self.args.provisioning_artifact_description, self.args.provisioning_artifact_type ) ) self.assertEqual(expected_response_output, captured.stdout.getvalue()) self.assertEqual(0, result)
def _run_cmd(cmd, expected_rc=0): logging.debug("Calling cmd: %s", cmd) # Drivers do not seem to be reusable since the formatters seem to not clear # themselves between runs. This is fine in practice since a driver is only # called once but for tests it means we need to create a new driver for # each test, which is far more heavyweight than it needs to be. Might be # worth seeing if we can make drivers reusable to speed these up generated # tests. driver = create_clidriver() if not isinstance(cmd, list): cmdlist = cmd.split() else: cmdlist = cmd with capture_output() as captured: try: rc = driver.main(cmdlist) except SystemExit as e: # We need to catch SystemExit so that we # can get a proper rc and still present the # stdout/stderr to the test runner so we can # figure out what went wrong. rc = e.code stderr = captured.stderr.getvalue() stdout = captured.stdout.getvalue() assert_equal( rc, expected_rc, "Unexpected rc (expected: %s, actual: %s) for command: %s\n" "stdout:\n%sstderr:\n%s" % (expected_rc, rc, cmd, stdout, stderr)) return stdout, stderr, rc
def _run_cmd(cmd, expected_rc=0): logging.debug("Calling cmd: %s", cmd) # Drivers do not seem to be reusable since the formatters seem to not clear # themselves between runs. This is fine in practice since a driver is only # called once but for tests it means we need to create a new driver for # each test, which is far more heavyweight than it needs to be. Might be # worth seeing if we can make drivers reusable to speed these up generated # tests. driver = create_clidriver() if not isinstance(cmd, list): cmdlist = cmd.split() else: cmdlist = cmd with capture_output() as captured: try: rc = driver.main(cmdlist) except SystemExit as e: # We need to catch SystemExit so that we # can get a proper rc and still present the # stdout/stderr to the test runner so we can # figure out what went wrong. rc = e.code stderr = captured.stderr.getvalue() stdout = captured.stdout.getvalue() assert_equal( rc, expected_rc, "Unexpected rc (expected: %s, actual: %s) for command: %s\n" "stdout:\n%sstderr:\n%s" % ( expected_rc, rc, cmd, stdout, stderr)) return stdout, stderr, rc
def run(self, cmdline): with mock.patch('os.environ', self.env): with capture_output() as output: runner_result = self._do_run(cmdline) runner_result.stdout = output.stdout.getvalue() runner_result.stderr = output.stderr.getvalue() return runner_result
def test_run_main(self, mock_expiration_time): mock_token_generator = mock.Mock() fake_token = 'k8s-aws-v1.aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=' mock_token_generator.get_token.return_value = fake_token mock_args = mock.Mock() mock_args.cluster_name = "my-cluster" mock_args.role_arn = None mock_globals = mock.Mock() mock_globals.region = 'us-west-2' expected_stdout = json.dumps({ "kind": "ExecCredential", "apiVersion": "client.authentication.k8s.io/v1alpha1", "spec": {}, "status": { "expirationTimestamp": "2019-06-21T22:07:54Z", "token": fake_token, }, }) + '\n' cmd = GetTokenCommand(self._session) with capture_output() as captured: cmd._run_main(mock_args, mock_globals, mock_token_generator) self.assertEqual(expected_stdout, captured.stdout.getvalue())
def test_wait_for_deploy_success_default_wait(self): mock_id = 'd-1234567XX' expected_stdout = self.deployer.MSG_WAITING.format( deployment_id=mock_id, wait=30) with capture_output() as captured: self.deployer.wait_for_deploy_success('d-1234567XX', 0) self.assertEqual(expected_stdout, captured.stdout.getvalue())
def assert_format(given, expected, query=None): compiled_query = None if query is not None: compiled_query = jmespath.compile(query) formatter = YAMLFormatter( Namespace(query=compiled_query), DynamoYAMLDumper()) with capture_output() as output: formatter('fake-command-name', given) assert_equal(output.stdout.getvalue(), expected)
def test_wait_for_deploy_success_max_wait_exceeded(self): mock_id = 'd-1234567XX' mock_wait = MAX_WAIT_MIN + 15 expected_stdout = self.deployer.MSG_WAITING.format( deployment_id=mock_id, wait=MAX_WAIT_MIN) with capture_output() as captured: self.deployer.wait_for_deploy_success('d-1234567XX', mock_wait) self.assertEqual(expected_stdout, captured.stdout.getvalue())
def test_rb_force_does_not_delete_bucket_on_failure(self): cmd = RbCommand(self.session) self.session.register('before-call', self.handler) self.responses['ListObjects'] = [(StatusCode(500), {})] global_args = FakeArgs(endpoint_url=None, region=None, verify_ssl=None) with self.assertRaisesRegexp( RuntimeError, "Unable to delete all objects in the bucket"): with capture_output(): cmd(['s3://bucket/', '--force'], global_args) # Note there's no DeleteObject nor DeleteBucket calls # because the ListOBjects call failed. self.assertEqual(self.method_calls, ['ListObjects'])
def test_happy_path_unicode(self, getsize_patch): # Arrange self.args.product_name = u'\u05d1\u05e8\u05d9\u05e6\u05e7\u05dc\u05d4' self.args.support_description = u'\u00fd\u00a9\u0194\u0292' actual_product_view_detail = self.get_product_view_detail() self.servicecatalog_client.create_product.return_value = \ actual_product_view_detail expected_product_view_detail = self.get_product_view_detail() del expected_product_view_detail["ResponseMetadata"] expected_response_output = json.dumps(expected_product_view_detail, indent=2) expected_args = self.get_args_dict() # Act with capture_output() as captured: result = self.cmd._run_main(self.args, self.global_args) # Assert self.session.create_client.assert_called_with( 'servicecatalog', region_name=self.global_args.region, endpoint_url=None, verify=None) self.servicecatalog_client.create_product.assert_called_once_with( Name=expected_args['product-name'], Owner=expected_args['product-owner'], Description=expected_args ['product-description'], Distributor=expected_args ['product-distributor'], SupportDescription=expected_args ['support-description'], SupportEmail=expected_args ['support-email'], ProductType=expected_args['product-type'], Tags=expected_args['tags'], ProvisioningArtifactParameters=self. get_provisioning_artifact_parameters( self.args.provisioning_artifact_name, self. args. provisioning_artifact_description, self.args.provisioning_artifact_type ) ) self.assertEqual(expected_response_output, captured.stdout.getvalue() ) self.assertEquals(0, result)
def run_s3_handler(self, s3_handler, tasks): self.patch_make_request() with capture_output() as captured: try: rc = s3_handler.call(tasks) except SystemExit as e: # We need to catch SystemExit so that we # can get a proper rc and still present the # stdout/stderr to the test runner so we can # figure out what went wrong. rc = e.code stderr = captured.stderr.getvalue() stdout = captured.stdout.getvalue() return stdout, stderr, rc
def test_rb_force_does_not_delete_bucket_on_failure(self): cmd = RbCommand(self.session) self.session.register('before-call', self.handler) self.responses['ListObjects'] = [ (StatusCode(500), {}) ] global_args = FakeArgs(endpoint_url=None, region=None, verify_ssl=None) with self.assertRaisesRegexp( RuntimeError, "Unable to delete all objects in the bucket"): with capture_output(): cmd(['s3://bucket/', '--force'], global_args) # Note there's no DeleteObject nor DeleteBucket calls # because the ListOBjects call failed. self.assertEqual(self.method_calls, ['ListObjects'])
def assert_cmd_dry(self, passed_config, env_variable_configs, default_config=os.path.join(".kube", "config"), role=None): """ Run update-kubeconfig using dry-run, assert_cmd_dry runs directly referencing the testdata directory, since dry_run won't write to file The KUBECONFIG environment variable will be set to contain the configs listed in env_variable_configs (regardless of whether they exist). The default path will be set to default_config Returns the captured output :param passed_config: A filename to be passed to --kubeconfig :type passed_config: string :param env_variable_configs: A list of filenames to put in KUBECONFIG :type env_variable_configs: list or None :param default config: A config to be the default path :type default_config: string :returns: The captured output :rtype: CapturedOutput """ env_variable = self.build_temp_environment_variable( env_variable_configs ) args = ["--name", "ExampleCluster", "--dry-run"] if passed_config is not None: args += ["--kubeconfig", get_testdata(passed_config)] if role is not None: args += ["--role-arn", "RoleArn"] with capture_output() as captured: with mock.patch.dict(os.environ, {'KUBECONFIG': env_variable}): with mock.patch( "awscli.customizations.eks.update_kubeconfig.DEFAULT_PATH", get_testdata(default_config)): self.command(args, None) self.mock_create_client.assert_called_once_with('eks') self.client \ .describe_cluster.assert_called_once_with(name='ExampleCluster') return captured
def assert_cmd_dry(self, passed_config, env_variable_configs, default_config=os.path.join(".kube", "config"), role=None): """ Run update-kubeconfig using dry-run, assert_cmd_dry runs directly referencing the testdata directory, since dry_run won't write to file The KUBECONFIG environment variable will be set to contain the configs listed in env_variable_configs (regardless of whether they exist). The default path will be set to default_config Returns the captured output :param passed_config: A filename to be passed to --kubeconfig :type passed_config: string :param env_variable_configs: A list of filenames to put in KUBECONFIG :type env_variable_configs: list or None :param default config: A config to be the default path :type default_config: string :returns: The captured output :rtype: CapturedOutput """ env_variable = self.build_temp_environment_variable( env_variable_configs) args = ["--name", "ExampleCluster", "--dry-run"] if passed_config is not None: args += ["--kubeconfig", get_testdata(passed_config)] if role is not None: args += ["--role-arn", "RoleArn"] with capture_output() as captured: with mock.patch.dict(os.environ, {'KUBECONFIG': env_variable}): with mock.patch( "awscli.customizations.eks.update_kubeconfig.DEFAULT_PATH", get_testdata(default_config)): self.command(args, None) self.mock_create_client.assert_called_once_with('eks') self.client \ .describe_cluster.assert_called_once_with(name='ExampleCluster') return captured
def assert_skeleton_equals(self, arg_value, expected_output, expected_rc=0, input_shape=None): argument = self.argument if input_shape is not None: argument = self.create_argument(input_shape) parsed_args = mock.Mock(generate_cli_skeleton=arg_value) with capture_output() as output: rc = argument.generate_skeleton( service_operation=self.service_operation, parsed_args=parsed_args, parsed_globals=None, call_parameters=None, ) self.assertEqual(rc, expected_rc) self.assertEqual(output.stdout.getvalue(), expected_output)
def colorama_text(self, tty=True): with capture_output() as captured: captured.stdout.isatty = lambda: tty with colorama.colorama_text(**COLORAMA_KWARGS): yield captured