def test_deploy_delete_cmds(awsclient, simple_cloudformation_stack_folder): tooldata = get_tooldata(awsclient, 'kumo', 'deploy') assert deploy_cmd(False, **tooldata) == 0 assert _get_stack_state(awsclient.get_client('cloudformation'), 'infra-dev-kumo-sample-stack') in ['CREATE_COMPLETE'] tooldata['context']['command'] = 'delete' assert delete_cmd(True, **tooldata) == 0 assert _get_stack_state(awsclient.get_client('cloudformation'), 'infra-dev-kumo-sample-stack') is None
def test_wire_unwire_new_events_s3(awsclient, vendored_folder, temp_bucket, cleanup_lambdas, cleanup_roles): log.info('running test_wire_unwire_new_events_s3') # create a lambda function temp_string = utils.random_string() lambda_name = 'jenkins_test_%s' % temp_string role_name = 'unittest_%s_lambda' % temp_string role_arn = create_lambda_role_helper(awsclient, role_name) cleanup_roles.append(role_name) create_lambda_helper(awsclient, lambda_name, role_arn, './resources/sample_lambda/handler_counter.py', lambda_handler='handler_counter.handle') events = [{ "event_source": { "arn": "arn:aws:s3:::" + temp_bucket, "events": ['s3:ObjectCreated:*'], "suffix": ".gz" } }] cleanup_lambdas.append((lambda_name, events)) # wire the function with the bucket exit_code = wire(awsclient, events, lambda_name) assert exit_code == 0 # put a file into the bucket awsclient.get_client('s3').put_object( ACL='public-read', Body=b'this is some content', Bucket=temp_bucket, Key='test_file.gz', ) # validate function call time.sleep(20) # sleep till the event arrived assert int(_get_count(awsclient, lambda_name)) == 1 # unwire the function exit_code = unwire(awsclient, events, lambda_name) assert exit_code == 0 # put in another file awsclient.get_client('s3').put_object( ACL='public-read', Body=b'this is some content', Bucket=temp_bucket, Key='test_file_2.gz', ) # validate function not called time.sleep(10) assert int(_get_count(awsclient, lambda_name)) == 1
def test_event_source_lifecycle_kinesis(awsclient, vendored_folder, temp_lambda, temp_kinesis): log.info('running test_event_source_lifecycle_kinesis') lambda_name = temp_lambda[0] # lookup lambda arn lambda_client = awsclient.get_client('lambda') alias_name = 'ACTIVE' lambda_arn = lambda_client.get_alias(FunctionName=lambda_name, Name=alias_name)['AliasArn'] # define event source evt_source = { #"arn": "arn:aws:dynamodb:us-east-1:1234554:table/YourTable/stream/2016-05-11T00:00:00.000", "arn": temp_kinesis[1], "starting_position": "TRIM_HORIZON", # Supported values: TRIM_HORIZON, LATEST "batch_size": 50, # Max: 1000 "enabled": True # Default is false } # event source lifecycle _add_event_source(awsclient, evt_source, lambda_arn) status = _get_event_source_status(awsclient, evt_source, lambda_arn) assert status['EventSourceArn'] _remove_event_source(awsclient, evt_source, lambda_arn)
def test_kumo_utils_ensure_ebs_tags(cleanup_stack_ec2, awsclient): are_credentials_still_valid(awsclient) cloudformation_ec2, _ = load_cloudformation_template( here('resources/sample_ec2_cloudformation_stack/cloudformation.py') ) exit_code = deploy_stack(awsclient, {}, config_ec2, cloudformation_ec2, override_stack_policy=False) assert exit_code == 0 stack_name = _get_stack_name(config_ec2) stack_output = get_outputs_for_stack(awsclient, stack_name) instance_id = stack_output.get('InstanceId', None) assert instance_id is not None tags = [{'Key': 'kumo-test', 'Value': 'Success'}] ensure_ebs_volume_tags_ec2_instance(awsclient, instance_id, tags) client_ec2 = awsclient.get_client('ec2') volumes = client_ec2.describe_volumes(Filters=[ { 'Name': 'attachment.instance-id', 'Values': [instance_id] } ]) for vol in volumes['Volumes']: for tag in tags: assert check_volume_tagged(vol, tag)
def test_describe_change_set_on_new_stack(awsclient): # create a stack we use for the test lifecycle cloudformation_simple_stack, _ = load_cloudformation_template( here('resources/simple_cloudformation_stack/cloudformation.py')) change_set_name, stackname, change_set_type = \ create_change_set(awsclient, {}, config_simple_stack, cloudformation_simple_stack) assert stackname == _get_stack_name(config_simple_stack) assert change_set_name != '' assert change_set_type == 'CREATE' describe_change_set(awsclient, change_set_name, stackname) # clean up # even if we delete the change_Set we need to delete our stack which # is in state "REVIEW_IN_PROGRESS" awsclient.get_client('cloudformation').delete_stack(StackName=stackname, )
def _get_count(awsclient, function_name, alias_name='ACTIVE', version=None): """Send a count request to a lambda function. :param awsclient: :param function_name: :param alias_name: :param version: :return: count retrieved from lambda call """ client_lambda = awsclient.get_client('lambda') payload = '{"ramuda_action": "count"}' if version: response = client_lambda.invoke(FunctionName=function_name, InvocationType='RequestResponse', Payload=payload, Qualifier=version) else: response = client_lambda.invoke(FunctionName=function_name, InvocationType='RequestResponse', Payload=payload, Qualifier=alias_name) # print type(response['Payload']) results = response['Payload'].read() # payload is a 'StreamingBody' return results
def test_event_source_lifecycle_s3(awsclient, vendored_folder, temp_lambda, temp_bucket): log.info('running test_event_source_lifecycle_s3') lambda_name = temp_lambda[0] # lookup lambda arn lambda_client = awsclient.get_client('lambda') alias_name = 'ACTIVE' lambda_arn = lambda_client.get_alias(FunctionName=lambda_name, Name=alias_name)['AliasArn'] # define event source bucket_arn = 'arn:aws:s3:::' + temp_bucket evt_source = { 'arn': bucket_arn, 'events': ['s3:ObjectCreated:*'], "suffix": ".gz" } # event source lifecycle _add_event_source(awsclient, evt_source, lambda_arn) status = _get_event_source_status(awsclient, evt_source, lambda_arn) assert status['EventSourceArn'] _remove_event_source(awsclient, evt_source, lambda_arn)
def test_ec2_instance_stop_start(awsclient, simple_cloudformation_stack_with_ec2): def _get_instance_status(ec2_instance): # helper to check the status client_ec2 = awsclient.get_client('ec2') instances_status = all_pages( client_ec2.describe_instance_status, { 'InstanceIds': [ec2_instance], 'IncludeAllInstances': True }, lambda r: [i['InstanceState']['Name'] for i in r.get('InstanceStatuses', [])], )[0] return instances_status stack_name = _get_stack_name(config_ec2_stack) client_cfn = awsclient.get_client('cloudformation') resources = all_pages( client_cfn.list_stack_resources, { 'StackName': stack_name }, lambda r: r['StackResourceSummaries'] ) instances = [ r['PhysicalResourceId'] for r in resources if r['ResourceType'] == 'AWS::EC2::Instance' ] assert _get_instance_status(instances[0]) == 'running' _stop_ec2_instances(awsclient, instances, wait=True) assert _get_instance_status(instances[0]) == 'stopped' _start_ec2_instances(awsclient, instances, wait=True) assert _get_instance_status(instances[0]) == 'running'
def test_s3_upload(cleanup_buckets, awsclient): #upload_conf = ConfigFactory.parse_file( # here('resources/simple_cloudformation_stack/settings_upload_dev.conf') #) upload_conf = { 'stack': { 'StackName': "infra-dev-kumo-sample-stack", 'artifactBucket': "unittest-kumo-artifact-bucket" }, 'parameters': { 'InstanceType': "t2.micro" } } region = awsclient.get_client('s3').meta.region_name account = os.getenv('ACCOUNT', None) # add account prefix to artifact bucket config if account: upload_conf['stack']['artifactBucket'] = \ '%s-unittest-kumo-artifact-bucket' % account artifact_bucket = _get_artifact_bucket(upload_conf) prepare_artifacts_bucket(awsclient, artifact_bucket) cleanup_buckets.append(artifact_bucket) dest_key = 'kumo/%s/%s-cloudformation.json' % ( region, _get_stack_name(upload_conf)) expected_s3url = 'https://s3-%s.amazonaws.com/%s/%s' % ( region, artifact_bucket, dest_key) cloudformation_simple_stack, _ = load_cloudformation_template( here('resources/simple_cloudformation_stack/cloudformation.py')) actual_s3url = _s3_upload( awsclient, upload_conf, generate_template({}, upload_conf, cloudformation_simple_stack)) assert expected_s3url == actual_s3url
def test_pythonic_name(awsclient): # Convert "pythonic" name to camel case. # to convert back we need to create or cache the mapping like this: client_s3 = awsclient.get_client('s3') operation_names = client_s3.meta.service_model.operation_names mapping = {xform_name(on): on for on in operation_names} assert mapping['head_object'] == 'HeadObject'
def test_template_variables_to_dict(awsclient): api_name = 'apiName' api_description = 'apiDescription' api_target_stage = 'mock' client_api = awsclient.get_client('apigateway') result = _template_variables_to_dict(client_api, api_name, api_description, api_target_stage) assert_equal(result['apiName'], api_name) assert_equal(result['apiDescription'], api_description) assert_equal(result['apiBasePath'], 'mock') assert_not_in('apiHostname', result)
def test_basic_lifecycle_cmds(awsclient, simple_cloudformation_stack_folder): # note this only covers parts of the lifecycle # a more sorrow lifecycle test using `gcdt-sample-stack` is contained # in the gcdt PR builder lifecycle tooldata = get_tooldata(awsclient, 'kumo', 'deploy') assert deploy_cmd(False, **tooldata) == 0 assert _get_stack_state( awsclient.get_client('cloudformation'), 'infra-dev-kumo-sample-stack') in ['CREATE_COMPLETE'] tooldata['context']['command'] = 'stop' assert stop_cmd(**tooldata) == 0 tooldata['context']['command'] = 'start' assert start_cmd(**tooldata) == 0 tooldata['context']['command'] = 'delete' assert delete_cmd(True, **tooldata) == 0 assert _get_stack_state(awsclient.get_client('cloudformation'), 'infra-dev-kumo-sample-stack') is None
def _get_instance_status(ec2_instance): # helper to check the status client_ec2 = awsclient.get_client('ec2') instances_status = all_pages( client_ec2.describe_instance_status, { 'InstanceIds': [ec2_instance], 'IncludeAllInstances': True }, lambda r: [i['InstanceState']['Name'] for i in r.get('InstanceStatuses', [])], )[0] return instances_status
def test_deprecated_schedule_event_source(awsclient, vendored_folder, cleanup_lambdas_deprecated, cleanup_roles): log.info('running test_schedule_event_source') # include reading config from settings file config = { "lambda": { "events": { "timeSchedules": [{ "ruleName": "unittest-dev-lambda-schedule", "ruleDescription": "run every 1 minute", "scheduleExpression": "rate(1 minute)" }] } } } # for time_event in time_event_sources: time_event = config['lambda'].get('events', []).get('timeSchedules', [])[0] rule_name = time_event.get('ruleName') rule_description = time_event.get('ruleDescription') schedule_expression = time_event.get('scheduleExpression') # now, I need a lambda function that registers the calls!! temp_string = utils.random_string() lambda_name = 'jenkins_test_%s' % temp_string role_name = 'unittest_%s_lambda' % temp_string role_arn = create_lambda_role_helper(awsclient, role_name) cleanup_roles.append(role_name) create_lambda_helper(awsclient, lambda_name, role_arn, './resources/sample_lambda/handler_counter.py', lambda_handler='handler_counter.handle') cleanup_lambdas_deprecated.append(lambda_name) # lookup lambda arn lambda_client = awsclient.get_client('lambda') alias_name = 'ACTIVE' lambda_arn = lambda_client.get_alias(FunctionName=lambda_name, Name=alias_name)['AliasArn'] # create scheduled event source rule_arn = _lambda_add_time_schedule_event_source(awsclient, rule_name, rule_description, schedule_expression, lambda_arn) _lambda_add_invoke_permission(awsclient, lambda_name, 'events.amazonaws.com', rule_arn) time.sleep(180) # wait for at least 2 invocations count = _get_count(awsclient, lambda_name) assert int(count) >= 2
def test_template_variables_to_dict_custom_hostname(awsclient): api_name = 'apiName' api_description = 'apiDescription' api_target_stage = 'mock' client_api = awsclient.get_client('apigateway') result = _template_variables_to_dict( client_api, api_name, api_description, api_target_stage, custom_hostname='chn', custom_base_path='cbp') assert_equal(result['apiName'], api_name) assert_equal(result['apiDescription'], api_description) assert_equal(result['apiBasePath'], 'cbp') assert_equal(result['apiHostname'], 'chn')
def test_kumo_utils_ensure_autoscaling_ebs_tags(cleanup_stack_autoscaling, awsclient): are_credentials_still_valid(awsclient) cloudformation_autoscaling, _ = load_cloudformation_template( here('resources/sample_autoscaling_cloudformation_stack/cloudformation.py') ) exit_code = deploy_stack(awsclient, {}, config_autoscaling, cloudformation_autoscaling, override_stack_policy=False) assert exit_code == 0 stack_name = _get_stack_name(config_autoscaling) stack_output = get_outputs_for_stack(awsclient, stack_name) as_group_name = stack_output.get('AutoScalingGroupName', None) assert as_group_name is not None tags_v1 = [{'Key': 'kumo-test', 'Value': 'version1'}] ensure_ebs_volume_tags_autoscaling_group(awsclient, as_group_name, tags_v1) autoscale_filter = { 'Name': 'tag:aws:autoscaling:groupName', 'Values': [as_group_name] } client_ec2 = awsclient.get_client('ec2') response = client_ec2.describe_instances(Filters=[autoscale_filter]) for r in response['Reservations']: for i in r['Instances']: volumes = client_ec2.describe_volumes(Filters=[ { 'Name': 'attachment.instance-id', 'Values': [i['InstanceId']] } ]) for vol in volumes['Volumes']: for tag in tags_v1: assert check_volume_tagged(vol, tag) tags_v2 = [{'Key': 'kumo-test', 'Value': 'version2'}] ensure_ebs_volume_tags_autoscaling_group(awsclient, as_group_name, tags_v2) for r in response['Reservations']: for i in r['Instances']: volumes = client_ec2.describe_volumes(Filters=[ { 'Name': 'attachment.instance-id', 'Values': [i['InstanceId']] } ]) for vol in volumes['Volumes']: for tag in tags_v2: assert check_volume_tagged(vol, tag) for tag in tags_v1: assert not check_volume_tagged(vol, tag)
def test_validate_parameters(awsclient): client_s3 = awsclient.get_client('s3') service_model = client_s3.meta.service_model #print(service_model.operation_names) operation_model = service_model.operation_model('HeadObject') params = {'foo': 'bar'} with pytest.raises(Exception) as einfo: validate_parameters(params, operation_model.input_shape) assert einfo.match(r'.*Missing required parameter in input: "Bucket".*') assert einfo.match(r'.*Missing required parameter in input: "Key".*') assert einfo.match(r'.*Unknown parameter in input: "foo".*')
def test_event_source_lifecycle_cloudwatch_pattern(awsclient, vendored_folder, cleanup_lambdas_deprecated, cleanup_roles): log.info('running test_event_source_lifecycle_cloudwatch_pattern') lambda_folder = './resources/sample_lambda_event_pattern/' temp_string = utils.random_string() lambda_name = 'jenkins_test_sample-lambda-event-pattern_' + temp_string role_name = 'unittest_%s_lambda' % temp_string role_arn = create_lambda_role_helper(awsclient, role_name) # create the function create_lambda_helper(awsclient, lambda_name, role_arn, here(lambda_folder + 'handler.py'), lambda_handler='handler.handler', folders_from_file=[], runtime='python2.7') cleanup_roles.append(role_name) cleanup_lambdas_deprecated.append(lambda_name) # lookup lambda arn # us-east-1 is the only region that implements lambda@edge lambda_client = awsclient.get_client('lambda') alias_name = 'ACTIVE' lambda_arn = lambda_client.get_alias(FunctionName=lambda_name, Name=alias_name)['AliasArn'] # define event source evt_source = { "name": "ssm_parameter_changed", "input_path": "$.detail", "pattern": { "source": ["aws.ssm"], "detail-type": ["Parameter Store Change"] } } # event source lifecycle _add_event_source(awsclient, evt_source, lambda_arn) status = _get_event_source_status(awsclient, evt_source, lambda_arn) assert status['EventSourceArn'] _remove_event_source(awsclient, evt_source, lambda_arn)
def test_s3_upload(cleanup_buckets, awsclient): upload_conf = ConfigFactory.parse_file( here('resources/simple_cloudformation_stack/settings_upload_dev.conf')) region = awsclient.get_client('s3').meta.region_name account = os.getenv('ACCOUNT', None) # add account prefix to artifact bucket config if account: upload_conf['cloudformation']['artifactBucket'] = \ '%s-unittest-kumo-artifact-bucket' % account artifact_bucket = _get_artifact_bucket(upload_conf) prepare_artifacts_bucket(awsclient, artifact_bucket) cleanup_buckets.append(artifact_bucket) dest_key = 'kumo/%s/%s-cloudformation.json' % ( region, _get_stack_name(upload_conf)) expected_s3url = 'https://s3-%s.amazonaws.com/%s/%s' % ( region, artifact_bucket, dest_key) cloudformation_simple_stack, _ = load_cloudformation_template( here('resources/simple_cloudformation_stack/cloudformation.py')) actual_s3url = _s3_upload(awsclient, upload_conf, cloudformation_simple_stack) assert expected_s3url == actual_s3url
def test_event_source_lifecycle_cloudwatch_schedule(awsclient, vendored_folder, temp_lambda): log.info('running test_event_source_lifecycle_cloudwatch_schedule') lambda_name = temp_lambda[0] # lookup lambda arn lambda_client = awsclient.get_client('lambda') alias_name = 'ACTIVE' lambda_arn = lambda_client.get_alias(FunctionName=lambda_name, Name=alias_name)['AliasArn'] # define event source evt_source = { "name": "unittest_execute_backup", "schedule": "rate(1 minute)" } # event source lifecycle _add_event_source(awsclient, evt_source, lambda_arn) status = _get_event_source_status(awsclient, evt_source, lambda_arn) assert status['EventSourceArn'] assert status['State'] == 'ENABLED' _remove_event_source(awsclient, evt_source, lambda_arn)
def test_event_source_lifecycle_sns(awsclient, vendored_folder, temp_lambda, temp_sns_topic): log.info('running test_event_source_lifecycle_sns') lambda_name = temp_lambda[0] # lookup lambda arn lambda_client = awsclient.get_client('lambda') alias_name = 'ACTIVE' lambda_arn = lambda_client.get_alias(FunctionName=lambda_name, Name=alias_name)['AliasArn'] # define event source evt_source = { #"arn": "arn:aws:sns:::your-event-topic-arn", "arn": temp_sns_topic[1], "events": ["sns:Publish"] } # event source lifecycle _add_event_source(awsclient, evt_source, lambda_arn) status = _get_event_source_status(awsclient, evt_source, lambda_arn) assert status['EventSourceArn'] _remove_event_source(awsclient, evt_source, lambda_arn)
def test_get_stack_state(awsclient, simple_cloudformation_stack): state = _get_stack_state(awsclient.get_client('cloudformation'), simple_cloudformation_stack) assert state in ['CREATE_IN_PROGRESS', 'CREATE_COMPLETE']
def test_call_hook(awsclient, sample_cloudformation_stack_with_hooks): # note: asserts for parameters are located in the hook state = _get_stack_state(awsclient.get_client('cloudformation'), sample_cloudformation_stack_with_hooks) assert state in ['CREATE_IN_PROGRESS', 'CREATE_COMPLETE']
def test_create_update_stack_artifactbucket(awsclient, temp_cloudformation_policy, cleanup_roles, cleanup_buckets): # create a stack we use for the test lifecycle cloudformation_simple_stack, _ = load_cloudformation_template( here('resources/simple_cloudformation_stack/cloudformation.py') ) upload_conf = { 'stack': { 'StackName': "infra-dev-kumo-sample-stack", 'artifactBucket': "unittest-kumo-artifact-bucket" }, 'parameters': { 'InstanceType': "t2.micro", } } region = awsclient.get_client('s3').meta.region_name account = os.getenv('ACCOUNT', None) # add account prefix to artifact bucket config if account: upload_conf['stack']['artifactBucket'] = \ '%s-unittest-kumo-artifact-bucket' % account artifact_bucket = _get_artifact_bucket(upload_conf) prepare_artifacts_bucket(awsclient, artifact_bucket) cleanup_buckets.append(artifact_bucket) dest_key = 'kumo/%s/%s-cloudformation.json' % (region, _get_stack_name(upload_conf)) expected_s3url = 'https://s3-%s.amazonaws.com/%s/%s' % (region, artifact_bucket, dest_key) actual_s3url = _s3_upload(awsclient, upload_conf, generate_template({}, upload_conf, cloudformation_simple_stack)) assert expected_s3url == actual_s3url # create role to use for cloudformation update role = create_role_helper( awsclient, 'unittest_%s_kumo' % utils.random_string(), policies=[ temp_cloudformation_policy, 'arn:aws:iam::aws:policy/AWSCodeDeployReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonS3FullAccess' ], principal_service=['cloudformation.amazonaws.com'] ) cleanup_roles.append(role['RoleName']) # create exit_code = deploy_stack(awsclient, {}, upload_conf, cloudformation_simple_stack, override_stack_policy=False) assert exit_code == 0 stack_id = get_stack_id(awsclient, upload_conf['stack']['StackName']) wait_for_stack_create_complete(awsclient, stack_id) # update (as a change we add the RoleARN) upload_conf['stack']['RoleARN'] = role['Arn'] # update the stack changed = get_parameter_diff(awsclient, upload_conf) assert not changed exit_code = deploy_stack(awsclient, {}, upload_conf, cloudformation_simple_stack, override_stack_policy=False) assert exit_code == 0 wait_for_stack_update_complete(awsclient, stack_id) # cleanup exit_code = delete_stack(awsclient, upload_conf) assert exit_code == 0 wait_for_stack_delete_complete(awsclient, stack_id)
def test_wire_unwire_lambda_with_s3(awsclient, vendored_folder, cleanup_lambdas, cleanup_roles, temp_bucket): log.info('running test_wire_unwire_lambda_with_s3') # create a lambda function temp_string = helpers.random_string() lambda_name = 'jenkins_test_%s' % temp_string role_name = 'unittest_%s_lambda' % temp_string role_arn = create_lambda_role_helper(awsclient, role_name) cleanup_roles.append(role_name) create_lambda_helper(awsclient, lambda_name, role_arn, './resources/sample_lambda/handler_counter.py', lambda_handler='handler_counter.handle') cleanup_lambdas.append(lambda_name) bucket_name = temp_bucket config = { "lambda": { "events": { "s3Sources": [{ "bucket": bucket_name, "type": "s3:ObjectCreated:*", "suffix": ".gz" }] } } } # wire the function with the bucket s3_event_sources = config['lambda'].get('events', []).get('s3Sources', []) time_event_sources = config['lambda'].get('events', []).get('timeSchedules', []) exit_code = wire(awsclient, lambda_name, s3_event_sources, time_event_sources) assert_equal(exit_code, 0) # put a file into the bucket awsclient.get_client('s3').put_object( ACL='public-read', Body=b'this is some content', Bucket=bucket_name, Key='test_file.gz', ) # validate function call time.sleep(20) # sleep till the event arrived assert_equal(int(_get_count(awsclient, lambda_name)), 1) # unwire the function exit_code = unwire(awsclient, lambda_name, s3_event_sources, time_event_sources) assert_equal(exit_code, 0) # put in another file awsclient.get_client('s3').put_object( ACL='public-read', Body=b'this is some content', Bucket=bucket_name, Key='test_file_2.gz', ) # validate function not called time.sleep(10) assert int(_get_count(awsclient, lambda_name)) == 1