Example #1
0
def _create_cloud_watch_rule_from_meta(name, meta, region):
    # validation depends on rule type
    required_parameters = ['rule_type']
    rule_type = meta.get('rule_type')
    if rule_type:
        if rule_type == 'schedule':
            required_parameters.append('expression')
    validate_params(name, meta, required_parameters)

    event_buses = meta.get('event_bus_accounts')
    response = CONN.cw_events(region).get_rule(name)
    if response:
        _LOG.warn('%s rule exists in %s.', name, region)
        return describe_rule(name=name, meta=meta, region=region,
                             response=response)
    try:
        func = RULE_TYPES[rule_type]
        func(name, meta, CONN.cw_events(region))
        if event_buses:
            time.sleep(5)
            _attach_tenant_rule_targets(name, region, event_buses)
        _LOG.info('Created cloud watch rule %s in %s.', name, region)
        response = CONN.cw_events(region).get_rule(name)
        time.sleep(5)
        return describe_rule(name=name, meta=meta, region=region,
                             response=response)
    except KeyError:
        raise AssertionError(
            'Invalid rule type: {0} for resource {1}. '
            'Please, change rule type with existing: '
            'schedule|ec2|api_call.'.format(rule_type, name))
Example #2
0
def _subscribe_lambda_to_sns_topic(lambda_arn, topic_name, region):
    topic_arn = CONN.sns(region).subscribe(lambda_arn, topic_name, 'lambda')
    try:
        CONN.lambda_conn().add_invocation_permission(lambda_arn,
                                                     'sns.amazonaws.com',
                                                     source_arn=topic_arn)
    except ClientError:
        _LOG.warn('The final access policy size for lambda {} is reached. '
                  'The limit is 20480 bytes. '
                  'Invocation permission was not added.'.format(lambda_arn))
Example #3
0
def _create_cloud_watch_trigger_from_meta(topic_name, trigger_meta, region):
    required_parameters = ['target_rule']
    validate_params(topic_name, trigger_meta, required_parameters)
    rule_name = trigger_meta['target_rule']

    topic_arn = CONN.sns(region).get_topic_arn(topic_name)
    CONN.cw_events(region).add_rule_target(rule_name, topic_arn)
    CONN.sns(region).allow_service_invoke(topic_arn, 'events.amazonaws.com')
    _LOG.info('SNS topic %s subscribed to cloudwatch rule %s', topic_name,
              rule_name)
Example #4
0
def remove_deploy_output(bundle_name, deploy_name):
    from syndicate.core import CONFIG, CONN
    key = _build_output_key(bundle_name=bundle_name,
                            deploy_name=deploy_name,
                            is_regular_output=True)
    if CONN.s3().is_file_exists(CONFIG.deploy_target_bucket,
                                key):
        CONN.s3().remove_object(CONFIG.deploy_target_bucket, key)
    else:
        _LOG.warn(
            'Output file for deploy {0} does not exist.'.format(deploy_name))
Example #5
0
def _create_kinesis_stream_trigger_from_meta(lambda_name, lambda_arn,
                                             role_name, trigger_meta):
    required_parameters = ['target_stream', 'batch_size', 'starting_position']
    validate_params(lambda_name, trigger_meta, required_parameters)

    stream_name = trigger_meta['target_stream']

    stream = CONN.kinesis().get_stream(stream_name)
    stream_arn = stream['StreamDescription']['StreamARN']
    stream_status = stream['StreamDescription']['StreamStatus']
    # additional waiting for stream
    if stream_status != 'ACTIVE':
        _LOG.debug('Kinesis stream %s is not in active state,'
                   ' waiting for activation...', stream_name)
        time.sleep(120)

    # TODO policy should be moved to meta
    policy_name = '{0}KinesisTo{1}Lambda'.format(stream_name, lambda_name)
    policy_document = {
        "Statement": [
            {
                "Effect": "Allow",
                "Action": [
                    "lambda:InvokeFunction"
                ],
                "Resource": [
                    lambda_arn
                ]
            },
            {
                "Action": [
                    "kinesis:DescribeStreams",
                    "kinesis:DescribeStream",
                    "kinesis:ListStreams",
                    "kinesis:GetShardIterator",
                    "Kinesis:GetRecords"
                ],
                "Effect": "Allow",
                "Resource": stream_arn
            }
        ],
        "Version": "2012-10-17"
    }
    CONN.iam().attach_inline_policy(role_name=role_name,
                                    policy_name=policy_name,
                                    policy_document=policy_document)
    _LOG.debug('Inline policy %s is attached to role %s',
               policy_name, role_name)
    _LOG.debug('Waiting for activation policy %s...', policy_name)
    time.sleep(10)

    _add_kinesis_event_source(lambda_arn, stream_arn, trigger_meta)
    _LOG.info('Lambda %s subscribed to kinesis stream %s', lambda_name,
              stream_name)
Example #6
0
def _remove_cloud_watch_rule(arn, config):
    region = arn.split(':')[3]
    resource_name = config['resource_name']
    try:
        CONN.cw_events(region).remove_rule(resource_name)
        _LOG.info('Rule %s was removed', resource_name)
    except ClientError as e:
        exception_type = e.response['Error']['Code']
        if exception_type == 'ResourceNotFoundException':
            _LOG.warn('Rule %s is not found', resource_name)
        else:
            raise e
Example #7
0
def _remove_sns_application(arn, config):
    region = arn.split(':')[3]
    application_name = config['resource_name']
    try:
        CONN.sns(region).remove_application_by_arn(arn)
        _LOG.info('SNS application %s was removed.', application_name)
    except ClientError as e:
        exception_type = e.response['Error']['Code']
        if exception_type == 'ResourceNotFoundException':
            _LOG.warn('SNS application %s is not found', application_name)
        else:
            raise e
def _create_cloud_watch_trigger_from_meta(lambda_name, lambda_arn, role_name,
                                          trigger_meta):
    required_parameters = ['target_rule']
    validate_params(lambda_name, trigger_meta, required_parameters)
    rule_name = trigger_meta['target_rule']

    rule_arn = CONN.cw_events().get_rule_arn(rule_name)
    CONN.cw_events().add_rule_target(rule_name, lambda_arn)
    _LAMBDA_CONN.add_invocation_permission(lambda_arn, 'events.amazonaws.com',
                                           rule_arn)
    _LOG.info('Lambda %s subscribed to cloudwatch rule %s', lambda_name,
              rule_name)
def describe_queue_from_meta(name, meta):
    region = meta.get('region', CONFIG.region)
    is_fifo = meta.get('fifo_queue', False)
    resource_name = _build_resource_name(is_fifo, name)
    queue_url = CONN.sqs(region).get_queue_url(resource_name,
                                               CONFIG.account_id)
    if not queue_url:
        return {}
    response = CONN.sqs(region).get_queue_attributes(queue_url)
    return {
        _build_queue_arn(resource_name, region):
        build_description_obj(response, name, meta)
    }
Example #10
0
def load_deploy_output(bundle_name, deploy_name):
    from syndicate.core import CONFIG, CONN
    key = _build_output_key(bundle_name=bundle_name,
                            deploy_name=deploy_name,
                            is_regular_output=True)
    if CONN.s3().is_file_exists(
            CONFIG.deploy_target_bucket, key):
        output_file = CONN.s3().load_file_body(
            CONFIG.deploy_target_bucket, key)
        return json.loads(output_file)
    else:
        raise AssertionError('Deploy name {0} does not exist.'
                             ' Cannot find output file.'.format(deploy_name))
Example #11
0
def _attach_tenant_rule_targets(rule_name, region, event_buses):
    for event_bus in event_buses:
        target_arn = 'arn:aws:events:{0}:{1}:event-bus/default'.format(region,
                                                                       event_bus)
        existing_targets = CONN.cw_events(region).list_targets_by_rule(
            rule_name=rule_name)
        for target in existing_targets:
            if target['Arn'] == target_arn:
                _LOG.debug('Target to event bus %s is already attached',
                           target_arn)
                return
        CONN.cw_events(region).add_rule_target(rule_name=rule_name,
                                               target_arn=target_arn)
Example #12
0
def create_bundles_bucket():
    from syndicate.core import CONFIG, CONN
    if CONN.s3().is_bucket_exists(CONFIG.deploy_target_bucket):
        _LOG.info('Bundles bucket {0} already exists'.format(
            CONFIG.deploy_target_bucket))
    else:
        _LOG.info(
            'Bundles bucket {0} does not exist. Creating bucket..'.format(
                CONFIG.deploy_target_bucket))
        CONN.s3().create_bucket(
            bucket_name=CONFIG.deploy_target_bucket,
            location=CONFIG.region)
        _LOG.info('{0} bucket created successfully'.format(
            CONFIG.deploy_target_bucket))
Example #13
0
def _create_sns_topic_from_meta(name, meta, region):
    arn = CONN.sns(region).get_topic_arn(name)
    if arn:
        _LOG.warn('{0} sns topic exists in region {1}.'.format(name, region))
        return describe_sns(name=name, meta=meta, region=region, arn=arn)
    arn = CONN.sns(region).create_topic(name)
    event_sources = meta.get('event_sources')
    if event_sources:
        for trigger_meta in event_sources:
            trigger_type = trigger_meta['resource_type']
            func = CREATE_TRIGGER[trigger_type]
            func(name, trigger_meta, region)
    _LOG.info('SNS topic %s in region %s created.', name, region)
    return describe_sns(name=name, meta=meta, region=region, arn=arn)
Example #14
0
def if_bundle_exist(bundle_name):
    from syndicate.core import CONFIG, CONN
    _assert_bundle_bucket_exists()
    bundle_folder = bundle_name + DEFAULT_SEP
    return CONN.s3().get_keys_by_prefix(
        CONFIG.deploy_target_bucket,
        bundle_folder)
def _create_cloud_watch_trigger_from_meta(name, trigger_meta):
    required_parameters = ['target_rule', 'input', 'iam_role']
    validate_params(name, trigger_meta, required_parameters)
    rule_name = trigger_meta['target_rule']
    input = trigger_meta['input']
    sf_role = trigger_meta['iam_role']

    sf_arn = _build_sm_arn(name, CONFIG.region)
    sf_description = _SF_CONN.describe_state_machine(arn=sf_arn)
    if sf_description.get('status') == 'ACTIVE':
        sf_role_arn = _IAM_CONN.check_if_role_exists(sf_role)
        if sf_role_arn:
            CONN.cw_events().add_rule_sf_target(rule_name, sf_arn, input,
                                                sf_role_arn)
            _LOG.info('State machine %s subscribed to cloudwatch rule %s',
                      name, rule_name)
def _enable_autoscaling(autoscaling_config, name):
    targets = []
    policies = []
    for item in autoscaling_config:
        autoscaling_required_parameters = [
            'resource_name', 'dimension', 'min_capacity', 'max_capacity',
            'role_name'
        ]
        validate_params(name, item, autoscaling_required_parameters)
        role_name = item['role_name']
        role_arn = CONN.iam().check_if_role_exists(role_name)
        if role_arn:
            dimension = item['dimension']
            resource_id, sc_targets = register_autoscaling_target(
                dimension, item, role_arn, name)
            targets.extend(sc_targets)
            _LOG.debug('Autoscaling %s is set up for %s', dimension,
                       resource_id)
            autoscaling_policy = item.get('config')
            if autoscaling_policy:
                policy_name = autoscaling_policy['policy_name']
                _LOG.debug('Going to set up autoscaling with '
                           'policy %s', policy_name)
                sc_policies = put_autoscaling_policy(autoscaling_policy,
                                                     dimension, policy_name,
                                                     resource_id)
                policies.append(sc_policies)
                _LOG.debug('Policy %s is set up', policy_name)
        else:
            _LOG.warn('Role %s is not found, skip autoscaling config',
                      role_name)
    return {'targets': targets, 'policies': policies}
Example #17
0
def _assert_bundle_bucket_exists():
    from syndicate.core import CONFIG, CONN
    if not CONN.s3().is_bucket_exists(
            CONFIG.deploy_target_bucket):
        raise AssertionError("Bundles bucket {0} does not exist."
                             " Please use 'create_deploy_target_bucket' to "
                             "create the bucket."
                             .format(CONFIG.deploy_target_bucket))
Example #18
0
def _create_dynamodb_trigger_from_meta(lambda_name, lambda_arn, role_name,
                                       trigger_meta):
    required_parameters = ['target_table', 'batch_size']
    validate_params(lambda_name, trigger_meta, required_parameters)
    table_name = trigger_meta['target_table']

    if not CONN.dynamodb().is_stream_enabled(table_name):
        CONN.dynamodb().enable_table_stream(table_name)

    stream = CONN.dynamodb().get_table_stream_arn(table_name)
    # TODO support another sub type
    _LAMBDA_CONN.add_event_source(lambda_arn, stream,
                                  trigger_meta['batch_size'],
                                  start_position='LATEST')
    # start_position='LATEST' - in case we did not remove tables before
    _LOG.info('Lambda %s subscribed to dynamodb table %s', lambda_name,
              table_name)
Example #19
0
def create_deploy_output(bundle_name, deploy_name, output, success,
                         replace_output=False):
    from syndicate.core import CONFIG, CONN
    output_str = json.dumps(output, default=_json_serial)
    key = _build_output_key(bundle_name=bundle_name,
                            deploy_name=deploy_name,
                            is_regular_output=success)
    if CONN.s3().is_file_exists(CONFIG.deploy_target_bucket,
                                key) and not replace_output:
        _LOG.warn(
            'Output file for deploy {0} already exists.'.format(deploy_name))
    else:
        CONN.s3().put_object(output_str, key,
                             CONFIG.deploy_target_bucket,
                             'application/json')
        _LOG.info('Output file with name {} has been {}'.format(
            key, 'replaced' if replace_output else 'created'))
def _remove_queue(arn, config):
    region = arn.split(':')[3]
    queue_name = config['resource_name']
    resource_meta = config['resource_meta']
    try:
        is_fifo = resource_meta.get('fifo_queue', False)
        resource_name = _build_resource_name(is_fifo, queue_name)
        queue_url = CONN.sqs(region).get_queue_url(resource_name,
                                                   CONFIG.account_id)
        if queue_url:
            CONN.sqs(region).delete_queue(queue_url)
            _LOG.info('SQS queue %s was removed.', queue_name)
        else:
            _LOG.warn('SQS queue %s is not found', queue_name)
    except ClientError as e:
        exception_type = e.response['Error']['Code']
        if exception_type == 'ResourceNotFoundException':
            _LOG.warn('SQS queue %s is not found', queue_name)
        else:
            raise e
Example #21
0
def describe_sns_from_meta(name, meta):
    new_region_args = create_args_for_multi_region([{
        'name': name,
        'meta': meta
    }], ALL_REGIONS)
    responses = []
    for arg in new_region_args:
        region = arg['region']
        topic_arn = CONN.sns(region).get_topic_arn(name)
        if not topic_arn:
            continue
        response = CONN.sns(region).get_topic_attributes(topic_arn)
        if response:
            responses.append({'arn': topic_arn, 'response': response})
    description = []
    for topic in responses:
        description.append({
            topic['arn']:
            build_description_obj(topic['response'], name, meta)
        })
    return description
Example #22
0
def _create_platform_application_from_meta(name, meta, region):
    required_parameters = ['platform', 'attributes']
    validate_params(name, meta, required_parameters)
    arn = CONN.sns(region).get_platform_application(name)
    if arn:
        _LOG.warn('{0} SNS platform application exists in region {1}.'.format(
            name, region))
        return describe_sns_application(name, meta, region, arn)
    platform = meta['platform']
    atrbts = meta['attributes']
    try:
        arn = CONN.sns(region).create_platform_application(name=name,
                                                           platform=platform,
                                                           attributes=atrbts)
    except ClientError as e:
        exception_type = e.response['Error']['Code']
        if exception_type == 'InvalidParameterException':
            _LOG.warn('SNS application %s is already exist.', name)
        else:
            raise e
    _LOG.info('SNS platform application %s in region %s has been created.',
              name, region)
    return describe_sns_application(name, meta, region, arn)
def _create_sqs_queue_from_meta(name, meta):
    region = meta.get('region', CONFIG.region)
    is_fifo = meta.get('fifo_queue', False)
    resource_name = _build_resource_name(is_fifo, name)
    queue_url = CONN.sqs(region).get_queue_url(resource_name,
                                               CONFIG.account_id)
    if queue_url:
        _LOG.warn('SQS queue %s exists.', name)
        return describe_queue(queue_url, name, meta, resource_name, region)
    delay_sec = meta.get('delay_seconds')
    max_mes_size = meta.get('maximum_message_size')
    mes_ret_period = meta.get('message_retention_period')
    policy = meta.get('policy')
    recieve_mes_wait_sec = meta.get('receive_message_wait_time_seconds')
    redrive_policy = meta.get('redrive_policy')
    vis_timeout = meta.get('visibility_timeout')
    kms_master_key_id = meta.get('kms_master_key_id')
    kms_data_reuse_period = meta.get('kms_data_key_reuse_period_seconds')
    if is_fifo and region not in FIFO_REGIONS:
        raise AssertionError('FIFO queue is not available in {0}.', region)
    content_deduplication = meta.get('content_based_deduplication')
    params = dict(queue_name=resource_name,
                  delay_seconds=delay_sec,
                  maximum_message_size=max_mes_size,
                  message_retention_period=mes_ret_period,
                  policy=policy,
                  receive_message_wait_time_seconds=recieve_mes_wait_sec,
                  redrive_policy=redrive_policy,
                  visibility_timeout=vis_timeout,
                  kms_master_key_id=kms_master_key_id,
                  kms_data_key_reuse_period_seconds=kms_data_reuse_period,
                  fifo_queue=is_fifo,
                  content_based_deduplication=content_deduplication)
    queue_url = CONN.sqs(region).create_queue(**params)['QueueUrl']
    _LOG.info('Created SQS queue %s.', name)
    return describe_queue(queue_url, name, meta, resource_name, region)
Example #24
0
 def real_wrapper(*args, **kwargs):
     deploy_name = kwargs.get('deploy_name')
     bundle_name = kwargs.get('bundle_name')
     replace_output = kwargs.get('replace_output')
     if deploy_name and bundle_name and not replace_output:
         output_file_name = '{}/outputs/{}.json'.format(
             bundle_name, deploy_name)
         exists = CONN.s3().is_file_exists(CONFIG.deploy_target_bucket,
                                           key=output_file_name)
         if exists:
             _LOG.warn(
                 'Output file already exists with name {}.'
                 ' If it should be replaced with new one, '
                 'use --replace_output flag.'.format(output_file_name))
             return
     return func(*args, **kwargs)
Example #25
0
def _create_sqs_trigger_from_meta(lambda_name, lambda_arn, role_name,
                                  trigger_meta):
    required_parameters = ['target_queue', 'batch_size']
    validate_params(lambda_name, trigger_meta, required_parameters)
    target_queue = trigger_meta['target_queue']

    if not CONN.sqs().get_queue_url(target_queue, CONFIG.account_id):
        _LOG.debug('Queue %s does not exist', target_queue)
        return

    queue_arn = 'arn:aws:sqs:{0}:{1}:{2}'.format(CONFIG.region,
                                                 CONFIG.account_id,
                                                 target_queue)

    _LAMBDA_CONN.add_event_source(lambda_arn, queue_arn,
                                  trigger_meta['batch_size'])
    _LOG.info('Lambda %s subscribed to SQS queue %s', lambda_name,
              target_queue)
Example #26
0
    def real_wrapper(*args, **kwargs):
        from syndicate.core import CONN
        from syndicate.core import CONFIG
        deploy_name = kwargs.get('deploy_name')
        bundle_name = kwargs.get('bundle_name')
        replace_output = kwargs.get('replace_output')
        if deploy_name and bundle_name and not replace_output:
            output_file_name = '{}/outputs/{}.json'.format(bundle_name,
                                                           deploy_name)

            exists = CONN.s3().is_file_exists(
                CONFIG.deploy_target_bucket,
                key=output_file_name)
            if exists:
                _LOG.warn(f'Output file already exists with name '
                          f'{output_file_name}. If it should be replaced with '
                          f'new one, use --replace_output flag.')
                return
        return func(*args, **kwargs)
Example #27
0
def describe_rule_from_meta(name, meta):
    new_region_args = create_args_for_multi_region(
        [
            {'name': name,
             'meta': meta}
        ],
        ALL_REGIONS)
    responses = []
    for arg in new_region_args:
        rule = CONN.cw_events(arg['region']).get_rule(name)
        if rule:
            responses.append(rule)

    description = []
    for rule in responses:
        arn = rule['Arn']
        del rule['Arn']
        description.append({arn: build_description_obj(rule, name, meta)})
    return description
Example #28
0
def _create_alarm_from_meta(name, meta):
    """ Create alarm resource in AWS Cloud via meta description.

    :type name: str
    :type meta: dict
    """
    required_parameters = [
        'metric_name', 'namespace', 'period', 'threshold',
        'evaluation_periods', 'comparison_operator', 'statistic'
    ]
    validate_params(name, meta, required_parameters)

    if _CW_METRIC.is_alarm_exists(name):
        _LOG.warn('%s alarm exists.', name)
        return describe_alarm(name, meta)

    params = dict(alarm_name=name,
                  metric_name=meta['metric_name'],
                  namespace=meta['namespace'],
                  period=meta['period'],
                  evaluation_periods=meta['evaluation_periods'],
                  threshold=meta['threshold'],
                  statistic=meta['statistic'],
                  comparison_operator=meta['comparison_operator'])

    sns_topics = meta.get('sns_topics')
    sns_topic_arns = []
    if sns_topics:
        for each in sns_topics:
            arn = CONN.sns().get_topic_arn(each)
            sns_topic_arns.append(arn)
        if sns_topic_arns:
            params['alarm_actions'] = sns_topic_arns

    _CW_METRIC.put_metric_alarm(**params)
    _LOG.info('Created alarm {0}.'.format(name))
    return describe_alarm(name, meta)
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
"""
from botocore.exceptions import ClientError

from syndicate.commons.log_helper import get_logger
from syndicate.connection.helper import retry
from syndicate.core import CONFIG, CONN
from syndicate.core.helper import create_pool, prettify_json, unpack_kwargs
from syndicate.core.resources.helper import (build_description_obj,
                                             resolve_dynamic_identifier)

_IAM_CONN = CONN.iam()

_LOG = get_logger('syndicate.core.resources.iam_resource')


def remove_policies(args):
    create_pool(_remove_policy, args)


@unpack_kwargs
def _remove_policy(arn, config):
    policy_name = config['resource_name']
    try:
        _IAM_CONN.remove_policy(arn)
        _LOG.info('IAM policy %s was removed.', policy_name)
    except ClientError as e:
        {
            "status_code": "503",
            "error_regex": ".*ERROR_CODE\\\": 503.*",
            'response_templates': {
                'application/json': '#set ($errorMessageObj = $util.parseJson('
                                    '$input.path(\'$.errorMessage\')))'
                                    '{"message" : "$errorMessageObj.message"}'
            }
        }
    ]
}

_CORS_HEADER_NAME = 'Access-Control-Allow-Origin'
_CORS_HEADER_VALUE = "'*'"

_API_GATEWAY_CONN = CONN.api_gateway()
_LAMBDA_CONN = CONN.lambda_conn()


def api_resource_identifier(name, output=None):
    if output:
        # api currently is not located in different regions
        # process only first object
        api_output = list(output.items())[0][1]
        # find id from the output
        return api_output['description']['id']
    # if output is not provided - try to get API by name
    # cause there is no another option
    return _API_GATEWAY_CONN.get_api_id(name)