def test_can_provide_lambda_name_for_logs(runner, mock_cli_factory): deployed_resources = DeployedResources({ "resources": [ {"name": "foo", "lambda_arn": "arn:aws:lambda::app-dev-foo", "resource_type": "lambda_function"}] }) mock_cli_factory.create_config_obj.return_value = FakeConfig( deployed_resources) log_retriever = mock.Mock(spec=LogRetriever) log_retriever.retrieve_logs.return_value = [] mock_cli_factory.create_log_retriever.return_value = log_retriever with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.logs, ['--name', 'foo'], cli_factory=mock_cli_factory ) assert result.exit_code == 0 log_retriever.retrieve_logs.assert_called_with( include_lambda_messages=False, max_entries=None) mock_cli_factory.create_log_retriever.assert_called_with( mock.sentinel.Session, 'arn:aws:lambda::app-dev-foo' )
def test_invoke_does_raise_if_service_error(runner, mock_cli_factory): deployed_resources = DeployedResources({"resources": []}) mock_cli_factory.create_config_obj.return_value = FakeConfig( deployed_resources) invoke_handler = mock.Mock(spec=LambdaInvokeHandler) invoke_handler.invoke.side_effect = ClientError( { 'Error': { 'Code': 'LambdaError', 'Message': 'Error message' } }, 'Invoke' ) mock_cli_factory.create_lambda_invoke_handler.return_value = invoke_handler mock_reader = mock.Mock(spec=PipeReader) mock_reader.read.return_value = 'barbaz' mock_cli_factory.create_stdin_reader.return_value = mock_reader with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.invoke, ['-n', 'foo'], cli_factory=mock_cli_factory) assert result.exit_code == 1 assert invoke_handler.invoke.call_args == mock.call('barbaz') assert ( "Error: got 'LambdaError' exception back from Lambda\n" "Error message" ) in result.output
def test_warning_when_using_deprecated_arg(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.deploy, ['prod'], cli_factory=mock_cli_factory) assert result.exit_code == 0 assert 'is deprecated and will be removed' in result.output
def test_error_when_no_deployed_record(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.url, [], cli_factory=mock_cli_factory) assert result.exit_code == 2 assert 'not find' in result.output
def test_debug_flag_enables_logging(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = runner.invoke( cli.cli, ['--debug', 'package', 'outdir'], obj={}) assert result.exit_code == 0 assert re.search('[DEBUG].*Creating deployment package', result.output) is not None
def test_can_specify_profile_for_logs(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.logs, ['--profile', 'my-profile'], cli_factory=mock_cli_factory ) assert result.exit_code == 0 assert mock_cli_factory.profile == 'my-profile'
def test_can_package_command(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.package, ['outdir']) assert result.exit_code == 0, result.output assert os.path.isdir('outdir') dir_contents = os.listdir('outdir') assert 'sam.json' in dir_contents assert 'deployment.zip' in dir_contents
def test_can_package_with_single_file(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.package, ['--single-file', 'package.zip']) assert result.exit_code == 0, result.output assert os.path.isfile('package.zip') with zipfile.ZipFile('package.zip', 'r') as f: assert sorted(f.namelist()) == ['deployment.zip', 'sam.json']
def test_invoke_does_raise_if_no_function_found(runner, mock_cli_factory): mock_cli_factory.create_lambda_invoke_handler.side_effect = \ factory.NoSuchFunctionError('foo') with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.invoke, ['-n', 'foo'], cli_factory=mock_cli_factory) assert result.exit_code == 2 assert 'foo' in result.output
def test_invoke_does_raise_if_read_timeout(runner, mock_cli_factory): mock_cli_factory.create_lambda_invoke_handler.side_effect = \ ReadTimeout('It took too long') with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.invoke, ['-n', 'foo'], cli_factory=mock_cli_factory) assert result.exit_code == 1 assert 'It took too long' in result.output
def test_can_deploy_specify_connection_timeout(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.deploy, ['--connection-timeout', 100], cli_factory=mock_cli_factory) assert result.exit_code == 0 mock_cli_factory.create_botocore_session.assert_called_with( connection_timeout=100 )
def test_can_specify_chalice_stage_arg(runner, mock_cli_factory, mock_deployer): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.deploy, ['--stage', 'prod'], cli_factory=mock_cli_factory) assert result.exit_code == 0 config = mock_cli_factory.create_config_obj.return_value mock_deployer.deploy.assert_called_with(config, chalice_stage_name='prod')
def test_can_specify_api_gateway_stage(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.deploy, ['--api-gateway-stage', 'notdev'], cli_factory=mock_cli_factory) assert result.exit_code == 0 mock_cli_factory.create_config_obj.assert_called_with( autogen_policy=None, chalice_stage_name='dev', api_gateway_stage='notdev' )
def test_api_gateway_mutex_with_positional_arg(runner, mock_cli_factory, mock_deployer): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.deploy, ['--api-gateway-stage', 'prod', 'prod'], cli_factory=mock_cli_factory) assert result.exit_code == 2 assert 'is deprecated' in result.output assert not mock_deployer.deploy.called
def test_gen_policy_command_creates_policy(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = runner.invoke(cli.cli, ['gen-policy'], obj={}) assert result.exit_code == 0 # The output should be valid JSON. parsed_policy = json.loads(result.output) # We don't want to validate the specific parts of the policy # (that's tested elsewhere), but we'll check to make sure # it looks like a policy document. assert 'Version' in parsed_policy assert 'Statement' in parsed_policy
def test_can_call_invoke(runner, mock_cli_factory, monkeypatch): invoke_handler = mock.Mock(spec=LambdaInvokeHandler) mock_cli_factory.create_lambda_invoke_handler.return_value = invoke_handler mock_reader = mock.Mock(spec=PipeReader) mock_reader.read.return_value = 'barbaz' mock_cli_factory.create_stdin_reader.return_value = mock_reader with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.invoke, ['-n', 'foo'], cli_factory=mock_cli_factory) assert result.exit_code == 0 assert invoke_handler.invoke.call_args == mock.call('barbaz')
def test_env_vars_set_in_local(runner, mock_cli_factory, monkeypatch): local_server = mock.Mock(spec=local.LocalDevServer) mock_cli_factory.create_local_server.return_value = local_server mock_cli_factory.create_config_obj.return_value = Config.create( project_dir='.', environment_variables={'foo': 'bar'}) actual_env = {} monkeypatch.setattr(os, 'environ', actual_env) with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') _run_cli_command(runner, cli.local, [], cli_factory=mock_cli_factory) assert actual_env['foo'] == 'bar'
def test_can_generate_pipeline_for_all(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.generate_pipeline, ['pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('pipeline.json') with open('pipeline.json', 'r') as f: template = json.load(f) # The actual contents are tested in the unit # tests. Just a sanity check that it looks right. assert "AWSTemplateFormatVersion" in template assert "Outputs" in template
def test_no_errors_if_override_codebuild_image(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.generate_pipeline, ['-i', 'python:3.6.1', 'pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('pipeline.json') with open('pipeline.json', 'r') as f: template = json.load(f) # The actual contents are tested in the unit # tests. Just a sanity check that it looks right. image = template['Parameters']['CodeBuildImage']['Default'] assert image == 'python:3.6.1'
def test_does_deploy_with_default_api_gateway_stage_name(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') # This isn't perfect as we're assuming we know how to # create the config_obj like the deploy() command does, # it should give us more confidence that the api gateway # stage defaults are still working. cli_factory = factory.CLIFactory('.') config = cli_factory.create_config_obj( chalice_stage_name='dev', autogen_policy=None, api_gateway_stage=None ) assert config.api_gateway_stage == DEFAULT_APIGATEWAY_STAGE_NAME
def test_can_extract_buildspec_yaml(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.generate_pipeline, ['--buildspec-file', 'buildspec.yml', '-i', 'python:3.6.1', 'pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('buildspec.yml') with open('buildspec.yml') as f: data = f.read() # The contents of this file are tested elsewhere, # we just want a basic sanity check here. assert 'chalice package' in data
def test_invoke_does_raise_if_unhandled_error(runner, mock_cli_factory): deployed_resources = DeployedResources({"resources": []}) mock_cli_factory.create_config_obj.return_value = FakeConfig( deployed_resources) invoke_handler = mock.Mock(spec=LambdaInvokeHandler) invoke_handler.invoke.side_effect = UnhandledLambdaError('foo') mock_cli_factory.create_lambda_invoke_handler.return_value = invoke_handler mock_reader = mock.Mock(spec=PipeReader) mock_reader.read.return_value = 'barbaz' mock_cli_factory.create_stdin_reader.return_value = mock_reader with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.invoke, ['-n', 'foo'], cli_factory=mock_cli_factory) assert result.exit_code == 1 assert invoke_handler.invoke.call_args == mock.call('barbaz') assert 'Unhandled exception in Lambda function, details above.' \ in result.output
def test_error_message_displayed_when_missing_feature_opt_in(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') sys.modules.pop('app', None) with open(os.path.join('testproject', 'app.py'), 'w') as f: # Rather than pick an existing experimental feature, we're # manually injecting a feature flag into our app. This ensures # we don't have to update this test if a feature graduates # from trial to accepted. The '_features_used' is a "package # private" var for chalice code. f.write( 'from chalice import Chalice\n' 'app = Chalice("myapp")\n' 'app._features_used.add("MYTESTFEATURE")\n' ) os.chdir('testproject') result = _run_cli_command(runner, cli.package, ['out']) assert isinstance(result.exception, ExperimentalFeatureError) assert 'MYTESTFEATURE' in str(result.exception)
def test_can_configure_github(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') # The -i option is provided so we don't have to skip this # test on python3.6 result = _run_cli_command( runner, cli.generate_pipeline, ['--source', 'github', '-i' 'python:3.6.1', 'pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('pipeline.json') with open('pipeline.json', 'r') as f: template = json.load(f) # The template is already tested in the unit tests # for template generation. We just want a basic # sanity check to make sure things are mapped # properly. assert 'GithubOwner' in template['Parameters'] assert 'GithubRepoName' in template['Parameters']
def test_can_delete(runner, mock_cli_factory, mock_deployer): deployed_values = { 'dev': { 'api_handler_arn': 'foo', 'rest_api_id': 'bar', } } mock_deployer.delete.return_value = None with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') deployed_file = os.path.join('.chalice', 'deployed.json') with open(deployed_file, 'wb') as f: f.write(json.dumps(deployed_values).encode('utf-8')) result = _run_cli_command(runner, cli.delete, [], cli_factory=mock_cli_factory) assert result.exit_code == 0 with open(deployed_file) as f: data = json.load(f) assert data == {}
def test_can_deploy(runner, mock_cli_factory, mock_deployer): deployed_values = { 'dev': { # We don't need to fill in everything here. 'api_handler_arn': 'foo', 'rest_api_id': 'bar', } } mock_deployer.deploy.return_value = deployed_values with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.deploy, [], cli_factory=mock_cli_factory) assert result.exit_code == 0 # We should have also created the deployed JSON file. deployed_file = os.path.join('.chalice', 'deployed.json') assert os.path.isfile(deployed_file) with open(deployed_file) as f: data = json.load(f) assert data == deployed_values
def test_can_retrieve_url(runner, mock_cli_factory): deployed_values = { "dev": { "rest_api_id": "rest_api_id", "chalice_version": "0.7.0", "region": "us-west-2", "backend": "api", "api_handler_name": "helloworld-dev", "api_handler_arn": "arn:...", "api_gateway_stage": "dev-apig" }, "prod": { "rest_api_id": "rest_api_id_prod", "chalice_version": "0.7.0", "region": "us-west-2", "backend": "api", "api_handler_name": "helloworld-dev", "api_handler_arn": "arn:...", "api_gateway_stage": "prod-apig" }, } with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') record_deployed_values(deployed_values, os.path.join('.chalice', 'deployed.json')) result = _run_cli_command(runner, cli.url, [], cli_factory=mock_cli_factory) assert result.exit_code == 0 assert result.output == ( 'https://rest_api_id.execute-api.us-west-2.amazonaws.com' '/dev-apig/\n') prod_result = _run_cli_command(runner, cli.url, ['--stage', 'prod'], cli_factory=mock_cli_factory) assert prod_result.exit_code == 0 assert prod_result.output == ( 'https://rest_api_id_prod.execute-api.us-west-2.amazonaws.com' '/prod-apig/\n')
def test_can_retrieve_url(runner, mock_cli_factory): deployed_values_dev = { "schema_version": "2.0", "resources": [ {"rest_api_url": "https://dev-url/", "name": "rest_api", "resource_type": "rest_api"}, ] } deployed_values_prod = { "schema_version": "2.0", "resources": [ {"rest_api_url": "https://prod-url/", "name": "rest_api", "resource_type": "rest_api"}, ] } with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') deployed_dir = os.path.join('.chalice', 'deployed') os.makedirs(deployed_dir) record_deployed_values( deployed_values_dev, os.path.join(deployed_dir, 'dev.json') ) record_deployed_values( deployed_values_prod, os.path.join(deployed_dir, 'prod.json') ) result = _run_cli_command(runner, cli.url, [], cli_factory=mock_cli_factory) assert result.exit_code == 0 assert result.output == 'https://dev-url/\n' prod_result = _run_cli_command(runner, cli.url, ['--stage', 'prod'], cli_factory=mock_cli_factory) assert prod_result.exit_code == 0 assert prod_result.output == 'https://prod-url/\n'
def test_invoke_does_raise_if_service_error(runner, mock_cli_factory): deployed_resources = DeployedResources({"resources": []}) mock_cli_factory.create_config_obj.return_value = FakeConfig( deployed_resources) invoke_handler = mock.Mock(spec=LambdaInvokeHandler) invoke_handler.invoke.side_effect = ClientError( {'Error': { 'Code': 'LambdaError', 'Message': 'Error message' }}, 'Invoke') mock_cli_factory.create_lambda_invoke_handler.return_value = invoke_handler mock_reader = mock.Mock(spec=PipeReader) mock_reader.read.return_value = 'barbaz' mock_cli_factory.create_stdin_reader.return_value = mock_reader with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.invoke, ['-n', 'foo'], cli_factory=mock_cli_factory) assert result.exit_code == 1 assert invoke_handler.invoke.call_args == mock.call('barbaz') assert ("Error: got 'LambdaError' exception back from Lambda\n" "Error message") in result.output
def test_can_follow_logs_with_option(runner, mock_cli_factory): deployed_resources = DeployedResources({ "resources": [{ "name": "foo", "lambda_arn": "arn:aws:lambda::app-dev-foo", "resource_type": "lambda_function" }] }) mock_cli_factory.create_config_obj.return_value = FakeConfig( deployed_resources) log_retriever = mock.Mock(spec=LogRetriever) log_retriever.retrieve_logs.return_value = [] mock_cli_factory.create_log_retriever.return_value = log_retriever with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.logs, ['--name', 'foo', '--follow'], cli_factory=mock_cli_factory) assert result.exit_code == 0 log_retriever.retrieve_logs.assert_called_with( LogRetrieveOptions(include_lambda_messages=False, max_entries=None)) mock_cli_factory.create_log_retriever.assert_called_with( mock.sentinel.Session, 'arn:aws:lambda::app-dev-foo', True)
def app_skeleton(tmpdir, runner): project_name = 'deployment-integ-test' with cd(str(tmpdir)): create_new_project_skeleton(project_name, None) return str(tmpdir.join(project_name))
def test_can_write_swagger_model(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.generate_models, []) assert result.exit_code == 0 model = json.loads(result.output) assert model == { "swagger": "2.0", "info": { "version": "1.0", "title": "testproject" }, "schemes": ["https"], "paths": { "/": { "get": { "consumes": ["application/json"], "produces": ["application/json"], "responses": { "200": { "description": "200 response", "schema": { "$ref": "#/definitions/Empty" } } }, "x-amazon-apigateway-integration": { "responses": { "default": { "statusCode": "200" } }, "uri": ("arn:aws:apigateway:{region_name}:lambda:" "path/2015-03-31/functions/" "{api_handler_lambda_arn}/invocations"), "passthroughBehavior": "when_no_match", "httpMethod": "POST", "contentHandling": "CONVERT_TO_TEXT", "type": "aws_proxy" } } } }, "definitions": { "Empty": { "type": "object", "title": "Empty Schema" } }, "x-amazon-apigateway-binary-media-types": [ "application/octet-stream", "application/x-tar", "application/zip", "audio/basic", "audio/ogg", "audio/mp4", "audio/mpeg", "audio/wav", "audio/webm", "image/png", "image/jpg", "image/jpeg", "image/gif", "video/ogg", "video/mpeg", "video/webm" ] }