Exemplo n.º 1
0
    def deliver_sns_message(self, topic, subject, rendered_jinja_body, sqs_message):
        # Max length of subject in sns is 100 chars
        if len(subject) > 100:
            subject = subject[:97] + '..'
        try:
            account = topic.split(':')[4]
            if account in self.sns_cache:
                sns = self.sns_cache[account]
            else:
                # if cross_accounts isn't set, we'll try using the current credentials
                if account not in self.config.get('cross_accounts', []):
                    session = Session()
                else:
                    creds = self.aws_sts.assume_role(
                        RoleArn=self.config['cross_accounts'][account],
                        RoleSessionName="CustodianNotification")['Credentials']
                    session = Session(
                        aws_access_key_id=creds['AccessKeyId'],
                        aws_secret_access_key=creds['SecretAccessKey'],
                        aws_session_token=creds['SessionToken'])
                self.sns_cache[account] = sns = session.client('sns')

            self.logger.info("Sending account:%s policy:%s sns:%s to %s" % (
                sqs_message.get('account', ''),
                sqs_message['policy']['name'],
                sqs_message['action'].get('template', 'default'),
                topic))
            sns.publish(TopicArn=topic, Subject=subject, Message=rendered_jinja_body)
        except Exception as e:
            self.logger.warning(
                "Error policy:%s account:%s sending sns to %s \n %s" % (
                    sqs_message['policy'], sqs_message.get('account', 'na'), topic, e))
Exemplo n.º 2
0
    def test_delete_vpc(self):
        ctx = {
            'name': 'vpc01',
            'cidr_block': '10.0.10.0/24'
        }

        filters = [{'Name': 'tag:Name', 'Values': ['vpc01']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_vpc.VpcWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.Vpc'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the VPC
            h = ec2_vpc.create_handler(ctx, self.credentials)
            h.create_resource()

            vpcs = list(ec2.vpcs.filter(Filters=filters))

            self.assertEqual(len(vpcs), 1)

            # We clear the resource cache to simulate a new
            # program execution with the 'delete' option
            base.BaseHandler._cache.clear()

            # Delete the VPC
            h.delete_resource()

            vpcs = list(ec2.vpcs.filter(Filters=filters))

            self.assertEqual(len(vpcs), 0)
def get_urls_list():
    logging.info("=== boto3 を利用して S3 上の URL リストを取得")
    s3 = Session().client('s3')
    response = s3.get_object(Bucket=c.get('s3','bucket_name'), Key=c.get('s3','urls_list'))
    body = response['Body'].read()
    # 内容をバルクで返す
    return body.strip()
Exemplo n.º 4
0
    def generate(self, incident: str, **kwargs) -> List[str]:
        """Generates the commands that will be run on the host."""
        logger.debug("Generating osquery payload.")
        session = Session()

        # TODO check for existence before deployment
        # we run with these commands with diffy credentials so as to not pollute the on-instance credentials
        creds = session.get_credentials()
        region = kwargs.get("region", CONFIG.get("DIFFY_PAYLOAD_OSQUERY_REGION"))
        key = kwargs.get("key", CONFIG.get("DIFFY_PAYLOAD_OSQUERY_KEY"))

        if not region:
            raise BadArguments(
                "DIFFY_PAYLOAD_OSQUERY_REGION required for use with OSQuery plugin."
            )

        if not key:
            raise BadArguments(
                "DIFFY_PAYLOAD_OSQUERY_KEY required for use with OSQuery plugin."
            )

        commands: List[str] = [
            f"export AWS_ACCESS_KEY_ID={creds.access_key}",
            f"export AWS_SECRET_ACCESS_KEY={creds.secret_key}",
            f"export AWS_SESSION_TOKEN={creds.token}",
            f"cd $(mktemp -d -t binaries-{incident}-`date +%s`-XXXXXX)",
            f"aws s3 --region {region} cp s3://{key} ./latest.tar.bz2 --quiet",
            "tar xvf latest.tar.bz2 &>/dev/null",
        ]

        commands += CONFIG.get("DIFFY_PAYLOAD_OSQUERY_COMMANDS")
        return commands
Exemplo n.º 5
0
def get_temp_creds(profile, token):
  """Use STS to retrieve temporary credentials for <profile>"""
  from boto3 import Session   #Late import because importing boto3 is slow

  config = get_boto_config()[profile]
  hub_client = Session(profile_name=config['source_profile']).client('sts')

  response = hub_client.assume_role(
    RoleArn = config['role_arn'],
    RoleSessionName = 'alfed-aws-{}@{}'.format(str(uuid.uuid4())[:8], profile),
    DurationSeconds = 3600,
    SerialNumber = config['mfa_serial'],
    TokenCode = token,
  )

  temp_creds = response['Credentials']

  return {
    'access_key': temp_creds['AccessKeyId'],
    'secret_key': temp_creds['SecretAccessKey'],
    'session_token': temp_creds['SessionToken'],
    #Python's datetime lib is dumb and doesn't know how to turn timezone-aware datetimes
    #into epoch timestamps. Since the datetime boto returns and the datetime returned
    #by datetime.utcfromtimestamp() are both in UTC, this is safe.
    'expires': (temp_creds['Expiration'].replace(tzinfo=None) - datetime.utcfromtimestamp(0)).total_seconds(),
  }
Exemplo n.º 6
0
    def test_delete_internet_gateway(self):
        ctx = {
            'name': 'igw01'
        }

        filters = [{'Name': 'tag:Name', 'Values': ['igw01']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_igw.InternetGatewayWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.InternetGateway'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the internet gateway
            h = ec2_igw.create_handler(ctx, self.credentials)
            h.create_resource()

            gateways = list(ec2.internet_gateways.filter(Filters=filters))

            self.assertEqual(len(gateways), 1)

            # We clear the resource cache to simulate a new
            # program execution with the 'delete' option
            base.BaseHandler._cache.clear()

            # Delete the internet gateway
            h.delete_resource()

            gateways = list(ec2.internet_gateways.filter(Filters=filters))

            self.assertEqual(len(gateways), 0)
Exemplo n.º 7
0
    def test_create_subnet(self):
        vpc_ctx = {
            'name': 'vpc01',
            'cidr_block': '10.0.10.0/24'
        }

        subnet_ctx = {
            'name': 'subnet01a',
            'cidr_block': '10.0.10.0/25',
            'zone': 'us-west-2a',
            'vpc': 'vpc01',
            'tags': {
                'description': 'Test subnet (zone a) for VPC vpc01'
            }
        }

        tags = [
            {
                'Key': 'Name',
                'Value': 'subnet01a'
            },
            {
                'Key': 'Description',
                'Value': 'Test subnet (zone a) for VPC vpc01'
            }
        ]

        vpc_filters = [{'Name': 'tag:Name', 'Values': ['vpc01']}]
        subnet_filters = [{'Name': 'tag:Name', 'Values': ['subnet01a']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_subnet.SubnetWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.Subnet'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the VPC
            h = ec2_vpc.create_handler(vpc_ctx, self.credentials)
            h.create_resource()

            vpcs = list(ec2.vpcs.filter(Filters=vpc_filters))
            vpc = vpcs[0]

            # Create the subnet
            h = ec2_subnet.create_handler(subnet_ctx, self.credentials)
            h.create_resource()

            subnets = list(ec2.subnets.filter(Filters=subnet_filters))
            subnet = subnets[0]

            self.assertEqual(len(subnets), 1)
            self.assertEqual(subnet.name, 'subnet01a')
            self.assertEqual(subnet.cidr_block, '10.0.10.0/25')
            self.assertEqual(subnet.availability_zone, 'us-west-2a')
            self.assertEqual(subnet.vpc_id, vpc.id)
            self.assertEqual(subnet.map_public_ip_on_launch, False)
            self.assertCountEqual(subnet.tags, tags)
Exemplo n.º 8
0
    def test_create_dhcp_options(self):
        ctx = {
            'name': 'dhcp01',
            'domain_name': [
                'test01.us-west-2.aws'
            ],
            'domain_name_servers': [
                '10.0.10.2'
            ],
            'tags': {
                'description': 'DHCP options set for VPC vpc01'
            }
        }

        tags = [
            {
                'Key': 'Name',
                'Value': 'dhcp01'
            },
            {
                'Key': 'Description',
                'Value': 'DHCP options set for VPC vpc01'
            }
        ]

        dhcp_configurations = [
            {
                'Key': 'domain-name',
                'Values': [{'Value': 'test01.us-west-2.aws'}]
            },
            {
                'Key': 'domain-name-servers',
                'Values': [{'Value': '10.0.10.2'}]
            }
        ]

        filters = [{'Name': 'tag:Name', 'Values': ['dhcp01']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_dhcp.DhcpOptionsWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.DhcpOptions'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the DHCP options set
            h = ec2_dhcp.create_handler(ctx, self.credentials)
            h.create_resource()

            dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters))
            dhcp = dhcp_options_sets[0]

            self.assertEqual(len(dhcp_options_sets), 1)
            self.assertEqual(dhcp.name, 'dhcp01')
            self.assertCountEqual(dhcp.dhcp_configurations, dhcp_configurations)
            self.assertCountEqual(dhcp.tags, tags)
Exemplo n.º 9
0
    def test_delete_route_table_with_association(self):
        vpc_ctx = {
            'name': 'vpc01',
            'cidr_block': '10.0.10.0/24'
        }

        subnet_ctx = {
            'name': 'subnet01a',
            'cidr_block': '10.0.10.0/25',
            'zone': 'us-west-2a',
            'vpc': 'vpc01'
        }

        rt_ctx = {
            'name': 'rt01',
            'vpc': 'vpc01',
            'subnets': [
                'subnet01a'
            ]
        }

        filters = [{'Name': 'tag:Name', 'Values': ['rt01']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_rt.RouteTableWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.RouteTable'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the VPC
            h = ec2_vpc.create_handler(vpc_ctx, self.credentials)
            h.create_resource()

            # Create the subnet
            h = ec2_subnet.create_handler(subnet_ctx, self.credentials)
            h.create_resource()

            # Create the route table
            h = ec2_rt.create_handler(rt_ctx, self.credentials)
            h.create_resource()

            route_tables = list(ec2.route_tables.filter(Filters=filters))

            self.assertEqual(len(route_tables), 1)

            # We clear the resource cache to simulate a new
            # program execution with the 'delete' option
            base.BaseHandler._cache.clear()

            # Delete the route table
            h.delete_resource()

            route_tables = list(ec2.route_tables.filter(Filters=filters))

            self.assertEqual(len(route_tables), 0)
Exemplo n.º 10
0
    def test_delete_attached_internet_gateway(self):
        igw_ctx = {
            'name': 'igw01'
        }

        vpc_ctx = {
            'name': 'vpc01',
            'cidr_block': '10.0.10.0/24',
            'internet_gateway': 'igw01'
        }

        igw_filters = [{'Name': 'tag:Name', 'Values': ['igw01']}]
        vpc_filters = [{'Name': 'tag:Name', 'Values': ['vpc01']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_igw.InternetGatewayWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.InternetGateway'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the internet gateway
            h = ec2_igw.create_handler(igw_ctx, self.credentials)
            h.create_resource()

            gateways = list(ec2.internet_gateways.filter(Filters=igw_filters))
            igw = gateways[0]

            self.assertCountEqual(igw.attachments, [])

            # Create the VPC
            h = ec2_vpc.create_handler(vpc_ctx, self.credentials)
            h.create_resource()

            vpcs = list(ec2.vpcs.filter(Filters=vpc_filters))
            vpc = vpcs[0]

            # Test that the internet gateway has been attached
            igw.reload()
            attachments = [{'VpcId': vpc.id, 'State': 'available'}]
            self.assertCountEqual(igw.attachments, attachments)

            # We clear the resource cache to simulate a new
            # program execution with the 'delete' option
            base.BaseHandler._cache.clear()

            # Delete the internet gateway
            h = ec2_igw.create_handler(igw_ctx, self.credentials)
            h.delete_resource()

            gateways = list(ec2.internet_gateways.filter(Filters=igw_filters))

            # The gateway was not deleted
            self.assertEqual(len(gateways), 1)
Exemplo n.º 11
0
def configure_sqs_client(graph):
    endpoint_url = graph.config.sqs_consumer.endpoint_url
    profile_name = graph.config.sqs_consumer.profile_name
    region_name = graph.config.sqs_consumer.region_name
    session = Session(profile_name=profile_name)
    return session.client(
        "sqs",
        endpoint_url=endpoint_url,
        region_name=region_name,
    )
Exemplo n.º 12
0
    def test_create_route_table(self):
        vpc_ctx = {
            'name': 'vpc01',
            'cidr_block': '10.0.10.0/24'
        }

        rt_ctx = {
            'name': 'rt01',
            'vpc': 'vpc01',
            'tags': {
                'description': 'Replace the default route table for VPC vpc01'
            }
        }

        tags = [
            {
                'Key': 'Name',
                'Value': 'rt01'
            },
            {
                'Key': 'Description',
                'Value': 'Replace the default route table for VPC vpc01'
            }
        ]

        vpc_filters = [{'Name': 'tag:Name', 'Values': ['vpc01']}]
        rt_filters = [{'Name': 'tag:Name', 'Values': ['rt01']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_rt.RouteTableWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.RouteTable'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the VPC
            h = ec2_vpc.create_handler(vpc_ctx, self.credentials)
            h.create_resource()

            vpcs = list(ec2.vpcs.filter(Filters=vpc_filters))
            vpc = vpcs[0]

            # Create the route table
            h = ec2_rt.create_handler(rt_ctx, self.credentials)
            h.create_resource()

            route_tables = list(ec2.route_tables.filter(Filters=rt_filters))
            rt = route_tables[0]

            self.assertEqual(len(route_tables), 1)
            self.assertEqual(rt.name, 'rt01')
            self.assertEqual(rt.vpc_id, vpc.id)
            self.assertCountEqual(rt.tags, tags)
Exemplo n.º 13
0
class AWSClient(object):
    """Manages automatically creating and destroying clients to AWS services."""

    def __init__(self, resource, config, credentials=None, region_name=None):
        """Constructor

        :param resource: AWS specific token for resource type. e.g., 's3', 'sqs', etc.
        :type resource: string
        :param config: Resource specific configuration
        :type config: :class:`botocore.client.Config`
        :param credentials: Authentication values needed to access AWS. If no credentials are passed, then IAM
            role-based access is assumed.
        :type credentials: :class:`util.aws.AWSCredentials`
        :param region_name: The AWS region the resource resides in.
        :type region_name: string
        """

        self.credentials = credentials
        self.region_name = region_name
        self._client = None
        self._resource_name = resource
        self._config = config

    def __enter__(self):
        """Callback handles creating a new client for AWS access."""

        logger.debug('Setting up AWS client...')

        session_args = {}
        if self.credentials:
            session_args['aws_access_key_id'] = self.credentials.access_key_id
            session_args['aws_secret_access_key'] = self.credentials.secret_access_key
        if self.region_name:
            session_args['region_name'] = self.region_name
        self._session = Session(**session_args)

        self._client = self._session.client(self._resource_name, config=self._config)
        self._resource = self._session.resource(self._resource_name, config=self._config)
        return self

    def __exit__(self, type, value, traceback):
        """Callback handles destroying an existing client."""
        pass

    @staticmethod
    def instantiate_credentials_from_config(config):
        if 'credentials' in config and config['credentials']:
            credentials_dict = config['credentials']
            if 'access_key_id' not in credentials_dict or not credentials_dict['access_key_id']:
                raise InvalidAWSCredentials('"credentials" requires "access_key_id" to be populated')
            if 'secret_access_key' not in credentials_dict or not credentials_dict['secret_access_key']:
                raise InvalidAWSCredentials('"credentials" requires "secret_access_key" to be populated')
            return AWSCredentials(credentials_dict['access_key_id'], credentials_dict['secret_access_key'])
Exemplo n.º 14
0
    def test_create_security_group(self):
        vpc_ctx = {
            'name': 'vpc01',
            'cidr_block': '10.0.10.0/24'
        }

        sg_ctx = {
            'name': 'sg01a',
            'description': 'Test security group sg01a',
            'vpc': 'vpc01'
        }

        tags = [
            {
                'Key': 'Name',
                'Value': 'sg01a'
            }
        ]

        vpc_filters = [{'Name': 'tag:Name', 'Values': ['vpc01']}]
        sg_filters = [{'Name': 'tag:Name', 'Values': ['sg01a']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_sg.SecurityGroupWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.SecurityGroup'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the VPC
            h = ec2_vpc.create_handler(vpc_ctx, self.credentials)
            h.create_resource()

            vpcs = list(ec2.vpcs.filter(Filters=vpc_filters))
            vpc = vpcs[0]

            # Create the security group
            h = ec2_sg.create_handler(sg_ctx, self.credentials)
            h.create_resource()

            security_groups = list(ec2.security_groups.filter(Filters=sg_filters))
            sg = security_groups[0]

            self.assertEqual(len(security_groups), 1)
            self.assertEqual(sg.name, 'sg01a')
            # Security groups have a dedicated attribute for their name
            self.assertEqual(sg.name, sg.group_name)
            self.assertEqual(sg.vpc_id, vpc.id)
            self.assertCountEqual(sg.tags, tags)
Exemplo n.º 15
0
 def decrypt(self, value, context=None):
     if not context:
         context = {}
     session = Session(profile_name=self.profile_name)
     kms = session.client('kms', region_name=self.region)
     key_service = KeyService(kms, self.kms_key, context)
     return open_aes_ctr_legacy(
         key_service,
         dict(
             key=value.key,
             contents=value.contents,
             hmac=value.hmac,
         )
     )
Exemplo n.º 16
0
def configure_sns_producer(graph):
    """
    Configure an SNS producer.

    The SNS Producer requires the following collaborators:
        - Opaque from microcosm.opaque for capturing context information
        - an aws sns client, i.e. from boto.
        - pubsub message codecs: see tests for examples.
        - sns topic arns: see tests for examples.

    """
    if graph.metadata.testing:
        from unittest.mock import MagicMock

        if not graph.config.sns_producer.mock_sns:
            return MagicMock()

        sns_client = MagicMock()
    else:
        endpoint_url = graph.config.sns_producer.endpoint_url
        profile_name = graph.config.sns_producer.profile_name
        region_name = graph.config.sns_producer.region_name
        session = Session(profile_name=profile_name)
        sns_client = session.client(
            "sns",
            endpoint_url=endpoint_url,
            region_name=region_name,
        )
    try:
        opaque = graph.opaque
    except NotBoundError:
        opaque = None

    if graph.config.sns_producer.skip is None:
        # In development mode, default to not publishing because there's typically
        # not anywhere to publish to (e.g. no SNS topic)
        skip = graph.metadata.debug
    else:
        # If configured explicitly, respect the flag
        skip = strtobool(graph.config.sns_producer.skip)

    return SNSProducer(
        opaque=opaque,
        pubsub_message_schema_registry=graph.pubsub_message_schema_registry,
        sns_client=sns_client,
        sns_topic_arns=graph.sns_topic_arns,
        skip=skip,
        deferred_batch_size=graph.config.sns_producer.deferred_batch_size,
    )
Exemplo n.º 17
0
 def encrypt(self, plaintext, context=None):
     if not context:
         context = {}
     session = Session(profile_name=self.profile_name)
     kms = session.client('kms', region_name=self.region)
     key_service = KeyService(kms, self.kms_key, context)
     sealed = seal_aes_ctr_legacy(
         key_service,
         plaintext,
     )
     return EncryptedValue(
         sealed["key"],
         sealed["contents"],
         sealed["hmac"],
     )
Exemplo n.º 18
0
    def __init__(self, bucket):
        self.config = get_config()
        self.aws_config = self.__aws_config()
        self.session = Session(self.aws_access_key_id,
                               self.aws_secret_access_key)

        self.bucket = bucket
        self.s3 = self.session.client('s3')
 def __init__(self, bucket_name, file_location, access_key, secret_access_key, region):
     self.bucket_name = bucket_name
     self.file_location = file_location
     self.session = Session(
         aws_access_key_id=access_key, aws_secret_access_key=secret_access_key, region_name=region
     )
     self.s3 = self.session.resource("s3")
     self.bucket = self.s3.Bucket(bucket_name)
Exemplo n.º 20
0
    def test_delete_security_group(self):
        vpc_ctx = {
            'name': 'vpc01',
            'cidr_block': '10.0.10.0/24'
        }

        sg_ctx = {
            'name': 'sg01a',
            'description': 'Test security group sg01a',
            'vpc': 'vpc01'
        }

        filters = [{'Name': 'tag:Name', 'Values': ['sg01a']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_sg.SecurityGroupWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.SecurityGroup'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the VPC
            h = ec2_vpc.create_handler(vpc_ctx, self.credentials)
            h.create_resource()

            # Create the security group
            h = ec2_sg.create_handler(sg_ctx, self.credentials)
            h.create_resource()

            security_groups = list(ec2.security_groups.filter(Filters=filters))

            self.assertEqual(len(security_groups), 1)

            # We clear the resource cache to simulate a new
            # program execution with the 'delete' option
            base.BaseHandler._cache.clear()

            # Delete the security group
            h.delete_resource()

            security_groups = list(ec2.security_groups.filter(Filters=filters))

            self.assertEqual(len(security_groups), 0)
def openGate(accName, carParkName, bookNum):
        session = Session(region_name="us-east-1")
        polly = session.client('polly') 
        s3 = boto3.client('s3')
        filename = "voice/", bookNum,".mp3"
        filename = str(filename)
        text = accName + \
            ". Welcome to " + \
            carParkName + \
            ". The gate will now open. Enjoy your last day at S.A. Launch."
        print (text)
        #try:
        response = polly.synthesize_speech(
                Text=str(text),
                OutputFormat="mp3",
                VoiceId="Joanna")
        with closing(response["AudioStream"]) as stream:
            s3.put_object(ACL='public-read', Bucket='sa-launch-demo-onedimsum', Key='voice/filename.mp3', Body=stream.read())
Exemplo n.º 22
0
    def test_attach_internet_gateway(self):
        igw_ctx = {
            'name': 'igw01'
        }

        vpc_ctx = {
            'name': 'vpc01',
            'cidr_block': '10.0.10.0/24',
            'internet_gateway': 'igw01'
        }

        igw_filters = [{'Name': 'tag:Name', 'Values': ['igw01']}]
        vpc_filters = [{'Name': 'tag:Name', 'Values': ['vpc01']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_vpc.VpcWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.Vpc'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the internet gateway
            h = ec2_igw.create_handler(igw_ctx, self.credentials)
            h.create_resource()

            gateways = list(ec2.internet_gateways.filter(Filters=igw_filters))
            igw = gateways[0]

            self.assertCountEqual(igw.attachments, [])

            # Create the VPC
            h = ec2_vpc.create_handler(vpc_ctx, self.credentials)
            h.create_resource()

            vpcs = list(ec2.vpcs.filter(Filters=vpc_filters))
            vpc = vpcs[0]

            # Test that the internet gateway has been attached
            igw.reload()
            attachments = [{'VpcId': vpc.id, 'State': 'available'}]
            self.assertCountEqual(igw.attachments, attachments)
Exemplo n.º 23
0
    def test_create_vpc(self):
        ctx = {
            'name': 'vpc01',
            'cidr_block': '10.0.10.0/24',
            'tags': {
                'description': 'VPC vpc01 (subnet01a & subnet01b)'
            }
        }

        tags = [
            {
                'Key': 'Name',
                'Value': 'vpc01'
            },
            {
                'Key': 'Description',
                'Value': 'VPC vpc01 (subnet01a & subnet01b)'
            }
        ]

        filters = [{'Name': 'tag:Name', 'Values': ['vpc01']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_vpc.VpcWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.Vpc'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the VPC
            h = ec2_vpc.create_handler(ctx, self.credentials)
            h.create_resource()

            vpcs = list(ec2.vpcs.filter(Filters=filters))
            vpc = vpcs[0]

            self.assertEqual(len(vpcs), 1)
            self.assertEqual(vpc.name, 'vpc01')
            self.assertEqual(vpc.cidr_block, '10.0.10.0/24')
            self.assertCountEqual(vpc.tags, tags)
Exemplo n.º 24
0
 def get_session(self):
     if self.profile_name:
         self._log.info(
             'using AWS credential profile %s', self.profile_name)
         try:
             kwargs = {'profile_name': self.profile_name}
             if self.region:
                 kwargs['region_name'] = self.region
             session = Session(**kwargs)
         except Exception as ex:
             self._log.fatal(
                 'Could not connect to AWS using profile %s: %s',
                 self.profile_name, ex)
             raise
     else:
         self._log.debug(
             'getting an AWS session with the default provider')
         kwargs = {}
         if self.region:
             kwargs['region_name'] = self.region
         session = Session(**kwargs)
     if self.role_arn:
         self._log.info(
             'attempting to assume STS self.role %s', self.role_arn)
         try:
             self.role_creds = session.client('sts').assume_role(
                 RoleArn=self.role_arn,
                 RoleSessionName='repoman-%s' % time.time(),
                 DurationSeconds=3600)['Credentials']
         except Exception as ex:
             self._log.fatal(
                 'Could not assume self.role %s: %s',
                 self.role_arn, ex)
             raise
         kwargs = {
             'aws_access_key_id': self.role_creds['AccessKeyId'],
             'aws_secret_access_key': self.role_creds['SecretAccessKey'],
             'aws_session_token': self.role_creds['SessionToken']}
         if self.region:
             kwargs['region_name'] = self.region
         session = Session(**kwargs)
     return session
Exemplo n.º 25
0
    def test_create_internet_gateway(self):
        ctx = {
            'name': 'igw01',
            'tags': {
                'description': 'Internet gateway for VPC vpc01'
            }
        }

        tags = [
            {
                'Key': 'Name',
                'Value': 'igw01'
            },
            {
                'Key': 'Description',
                'Value': 'Internet gateway for VPC vpc01'
            }
        ]

        filters = [{'Name': 'tag:Name', 'Values': ['igw01']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_igw.InternetGatewayWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.InternetGateway'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the internet gateway
            h = ec2_igw.create_handler(ctx, self.credentials)
            h.create_resource()

            gateways = list(ec2.internet_gateways.filter(Filters=filters))
            igw = gateways[0]

            self.assertEqual(len(gateways), 1)
            self.assertEqual(igw.name, 'igw01')
            self.assertCountEqual(igw.tags, tags)
            self.assertCountEqual(igw.attachments, [])
Exemplo n.º 26
0
    def test_delete_dhcp_options_set(self):
        ctx = {
            'name': 'dhcp01',
            'domain_name': [
                'test01.us-west-2.aws'
            ],
            'domain_name_servers': [
                '10.0.10.2'
            ]
        }

        filters = [{'Name': 'tag:Name', 'Values': ['dhcp01']}]

        def _add_wrapper(base_classes, **kwargs):
            base_classes.insert(0, ec2_dhcp.DhcpOptionsWrapper)

        with mock_ec2():
            event = 'creating-resource-class.ec2.DhcpOptions'
            session = Session(**self.credentials)
            session.events.register(event, _add_wrapper)
            ec2 = session.resource('ec2')

            # Create the DHCP options set
            h = ec2_dhcp.create_handler(ctx, self.credentials)
            h.create_resource()

            dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters))

            self.assertEqual(len(dhcp_options_sets), 1)

            # We clear the resource cache to simulate a new
            # program execution with the 'delete' option
            base.BaseHandler._cache.clear()

            # Delete the DHCP options set
            h.delete_resource()

            dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters))

            self.assertEqual(len(dhcp_options_sets), 0)
Exemplo n.º 27
0
class DatabaseResource(object):

    def  __init__(self):
        self.session = Session(profile_name=profile_name, region_name="us-east-1")
        self.content_type = "application/json"

    def on_get(self, request, response):
        rds = self.session.client('rds')
        databases = rds.describe_db_instances()
        for instance in databases['DBInstances']:
            instance['InstanceCreateTime'] = str(instance["InstanceCreateTime"])
            instance['LatestRestorableTime'] = str(instance["LatestRestorableTime"])
        response.status = falcon.HTTP_200
        response.content_type = self.content_type
        response.body = json.dumps(databases)
Exemplo n.º 28
0
    def deliver_sns(self, topic, subject, msg, data):
        account = topic.split(':')[4]
        if account in self.sns_cache:
            sns = self.sns_cache[account]
        else:
            if account not in self.config['cross_accounts']:
                log.error(
                    "No cross account role for sending sns to %s" % topic)
                return
            creds = self.sts.assume_role(
                RoleArn=self.config['cross_accounts'][account],
                RoleSessionName="CustodianNotification")['Credentials']
            session = Session(
                aws_access_key_id=creds['AccessKeyId'],
                aws_secret_access_key=creds['SecretAccessKey'],
                aws_session_token=creds['SessionToken'])
            self.sns_cache[account] = sns = session.client('sns')

        log.info("Sending account:%s policy:%s sns:%s to %s" % (
            data.get('account', ''),
            data['policy']['name'],
            data['action'].get('template', 'default'),
            topic))
        sns.publish(TopicArn=topic, Subject=subject, Message=msg)
Exemplo n.º 29
0
    def __enter__(self):
        """Callback handles creating a new client for AWS access."""

        logger.debug('Setting up AWS client...')

        session_args = {}
        if self.credentials:
            session_args['aws_access_key_id'] = self.credentials.access_key_id
            session_args['aws_secret_access_key'] = self.credentials.secret_access_key
        if self.region_name:
            session_args['region_name'] = self.region_name
        self._session = Session(**session_args)

        self._client = self._session.client(self._resource_name, config=self._config)
        self._resource = self._session.resource(self._resource_name, config=self._config)
        return self
Exemplo n.º 30
0
class CacheResource(object):

    def  __init__(self):
        self.session = Session(profile_name=profile_name, region_name="us-east-1")
        self.content_type = "application/json"


    def on_get(self, request, response):
        redis = self.session.client('elasticache')
        clusters = redis.describe_cache_clusters(ShowCacheNodeInfo=True)
        for cluster in clusters['CacheClusters']:
            cluster['CacheClusterCreateTime'] = str(cluster['CacheClusterCreateTime'])
            for node in cluster["CacheNodes"]:
                node['CacheNodeCreateTime'] = str(node['CacheNodeCreateTime'])
        response.status = falcon.HTTP_200
        response.content_type = self.content_type
        response.body = json.dumps(clusters)
Exemplo n.º 31
0
        self.networks[network_id] = ManagedBlockchainNetwork(
            id=network_id,
            name=name,
            framework=self.framework,
            frameworkversion=self.frameworkversion,
            frameworkconfiguration=self.frameworkconfiguration,
            voting_policy=self.voting_policy,
            member_configuration=self.member_configuration,
            region=self.region_name,
            description=self.description,
        )

        # Return the network and member ID
        d = {"NetworkId": network_id, "MemberId": member_id}
        return d

    def list_networks(self):
        return self.networks.values()

    def get_network(self, network_id):
        if network_id not in self.networks:
            raise ResourceNotFoundException(
                "CreateNetwork", "Network {0} not found".format(network_id)
            )
        return self.networks.get(network_id)


managedblockchain_backends = {}
for region in Session().get_available_regions("managedblockchain"):
    managedblockchain_backends[region] = ManagedBlockchainBackend(region)
Exemplo n.º 32
0
        # Token used up, might as well bin now, if you call it again your an idiot
        if pagination_token:
            del self._pages[pagination_token]

        return new_token, result

    # These methods will be called from responses.py.
    # They should call a tag function inside of the moto module
    # that governs the resource, that way if the target module
    # changes how tags are delt with theres less to change

    # def tag_resources(self, resource_arn_list, tags):
    #     return failed_resources_map
    #
    # def untag_resources(self, resource_arn_list, tag_keys):
    #     return failed_resources_map


resourcegroupstaggingapi_backends = {}
for region in Session().get_available_regions("resourcegroupstaggingapi"):
    resourcegroupstaggingapi_backends[
        region] = ResourceGroupsTaggingAPIBackend(region)
for region in Session().get_available_regions("resourcegroupstaggingapi",
                                              partition_name="aws-us-gov"):
    resourcegroupstaggingapi_backends[
        region] = ResourceGroupsTaggingAPIBackend(region)
for region in Session().get_available_regions("resourcegroupstaggingapi",
                                              partition_name="aws-cn"):
    resourcegroupstaggingapi_backends[
        region] = ResourceGroupsTaggingAPIBackend(region)
Exemplo n.º 33
0
 def _fix(cls, session: Session, resource: Dict[str, Any],
          parameters: Dict[str, str]) -> None:
     session.client('s3').put_bucket_versioning(
         Bucket=resource['Name'],
         VersioningConfiguration={'Status': 'Enabled'})
Exemplo n.º 34
0
app = Flask(__name__)
app.secret_key = os.urandom(12)  # Generic key for dev purposes only
fa = FontAwesome(app)

# ======== AWS Polly Setup =========================================================== #
# Mapping possible user browser suported audio formats to their corresponding
# response code for AWS Polly

AUDIO_FORMATS = {"ogg_vorbis": "audio/ogg",
                 "mp3": "audio/mpeg",
                 "pcm": "audio/wave; codecs=1"}

# Create a client using the credentials and region defined in the adminuser
# section of the AWS credentials and configuration files
# For more information read the READEME.md file
session = Session(profile_name="adminuser")
polly = session.client("polly")


# ======== Simple Exception Handler =========================================================== #
class InvalidUsage(Exception):
    status_code = 400

    def __init__(self, message, status_code=None, payload=None):
        Exception.__init__(self)
        self.message = message
        if status_code is not None:
            self.status_code = status_code
        self.payload = payload

    def to_dict(self):
Exemplo n.º 35
0
def get_all_vpcs(session: boto3.Session) -> typing.List[dict]:
    return session.client('ec2').describe_vpcs()['Vpcs']
Exemplo n.º 36
0
def vault():
    vault_session = Session(profile_name='zenko')
    return vault_session.resource(
        'iam', endpoint_url=conf.ZENKO_VAULT_ENDPOINT)
Exemplo n.º 37
0
        return json.dumps({"Streams": streams})

    def get_shard_iterator(self,
                           arn,
                           shard_id,
                           shard_iterator_type,
                           sequence_number=None):
        table = self._get_table_from_arn(arn)
        assert table.stream_shard.id == shard_id

        shard_iterator = ShardIterator(self, table.stream_shard,
                                       shard_iterator_type, sequence_number)
        self.shard_iterators[shard_iterator.arn] = shard_iterator

        return json.dumps(shard_iterator.to_json())

    def get_records(self, iterator_arn, limit):
        shard_iterator = self.shard_iterators[iterator_arn]
        return json.dumps(shard_iterator.get(limit), cls=DynamoJsonEncoder)


dynamodbstreams_backends = {}
for region in Session().get_available_regions("dynamodbstreams"):
    dynamodbstreams_backends[region] = DynamoDBStreamsBackend(region)
for region in Session().get_available_regions("dynamodbstreams",
                                              partition_name="aws-us-gov"):
    dynamodbstreams_backends[region] = DynamoDBStreamsBackend(region)
for region in Session().get_available_regions("dynamodbstreams",
                                              partition_name="aws-cn"):
    dynamodbstreams_backends[region] = DynamoDBStreamsBackend(region)
Exemplo n.º 38
0
            "Endpoints": [
                {
                    "Address": f"ingest.timestream.{self.region_name}.amazonaws.com",
                    "CachePeriodInMinutes": 1440,
                }
            ]
        }

    def reset(self):
        region_name = self.region_name
        self.__dict__ = {}
        self.__init__(region_name)


timestreamwrite_backends = {}
for available_region in Session().get_available_regions("timestream-write"):
    timestreamwrite_backends[available_region] = TimestreamWriteBackend(
        available_region
    )
for available_region in Session().get_available_regions(
    "timestream-write", partition_name="aws-us-gov"
):
    timestreamwrite_backends[available_region] = TimestreamWriteBackend(
        available_region
    )
for available_region in Session().get_available_regions(
    "timestream-write", partition_name="aws-cn"
):
    timestreamwrite_backends[available_region] = TimestreamWriteBackend(
        available_region
    )
Exemplo n.º 39
0
            return None

    def put_function_concurrency(self, function_name, reserved_concurrency):
        fn = self.get_function(function_name)
        fn.reserved_concurrency = reserved_concurrency
        return fn.reserved_concurrency

    def delete_function_concurrency(self, function_name):
        fn = self.get_function(function_name)
        fn.reserved_concurrency = None
        return fn.reserved_concurrency

    def get_function_concurrency(self, function_name):
        fn = self.get_function(function_name)
        return fn.reserved_concurrency


def do_validate_s3():
    return os.environ.get("VALIDATE_LAMBDA_S3", "") in ["", "1", "true"]


lambda_backends = {}
for region in Session().get_available_regions("lambda"):
    lambda_backends[region] = LambdaBackend(region)
for region in Session().get_available_regions("lambda",
                                              partition_name="aws-us-gov"):
    lambda_backends[region] = LambdaBackend(region)
for region in Session().get_available_regions("lambda",
                                              partition_name="aws-cn"):
    lambda_backends[region] = LambdaBackend(region)
Exemplo n.º 40
0
class S3ArchiveDiffer(ArchiveDiffer):
    """
    AWS S3 backend for archiving.

    Archives CSV files into a S3 bucket, with keys "{indicator_prefix}/{csv_file_name}".
    Ideally, versioning should be enabled in this bucket to track versions of each CSV file.
    """
    def __init__(
        self,
        cache_dir: str,
        export_dir: str,
        bucket_name: str,
        indicator_prefix: str,
        aws_credentials: Dict[str, str],
    ):
        """
        Initialize a S3ArchiveDiffer.

        See this link for possible aws_credentials kwargs:
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html#boto3.session.Session

        Parameters
        ----------
        cache_dir: str
            The directory for storing most recent archived/uploaded CSVs to do start diffing from.
            Usually 'cache'.
        export_dir: str
            The directory with most recent exported CSVs to diff to.
            Usually 'receiving'.
        bucket_name: str
            The S3 bucket to upload files to.
        indicator_prefix: str
            The prefix for S3 keys related to this indicator.
        aws_credentials: Dict[str, str]
            kwargs to create a boto3.Session, containing AWS credentials/profile to use.
        """
        super().__init__(cache_dir, export_dir)
        self.s3 = Session(**aws_credentials).resource("s3")
        self.bucket = self.s3.Bucket(bucket_name)
        self.indicator_prefix = indicator_prefix

    def update_cache(self):
        """Make sure cache_dir is updated with all latest files from the S3 bucket."""
        # List all indicator-related objects from S3
        archive_objects = self.bucket.objects.filter(
            Prefix=self.indicator_prefix).all()
        archive_objects = [
            obj for obj in archive_objects if obj.key.endswith(".csv")
        ]

        # Check against what we have locally and download missing ones
        cached_files = set(
            basename(f) for f in glob(join(self.cache_dir, "*.csv")))
        for obj in archive_objects:
            archive_file = basename(obj.key)
            cached_file = join(self.cache_dir, archive_file)

            if archive_file not in cached_files:
                print(f"Updating cache with {cached_file}")
                obj.Object().download_file(cached_file)

        self._cache_updated = True

    def archive_exports(
            self,  # pylint: disable=arguments-differ
            exported_files: Files,
            update_cache: bool = True,
            update_s3: bool = True) -> Tuple[Files, Files]:
        """
        Handle actual archiving of files to the S3 bucket.

        Parameters
        ----------
        exported_files: Files
            List of files to be archived. Usually new and changed files.

        Returns
        -------
        (successes, fails): Tuple[Files, Files]
            successes: List of successfully archived files
            fails: List of unsuccessfully archived files
        """
        archive_success = []
        archive_fail = []

        for exported_file in exported_files:
            cached_file = abspath(join(self.cache_dir,
                                       basename(exported_file)))
            archive_key = join(self.indicator_prefix, basename(exported_file))

            try:
                if update_cache:
                    # Update local cache
                    shutil.copyfile(exported_file, cached_file)

                if update_s3:
                    self.bucket.Object(archive_key).upload_file(exported_file)

                archive_success.append(exported_file)
            except FileNotFoundError:
                archive_fail.append(exported_file)

        self._exports_archived = True

        return archive_success, archive_fail
Exemplo n.º 41
0
# set variables no_proxy, i.e., run internal service calls directly
no_proxy = ','.join(
    set((LOCALSTACK_HOSTNAME, LOCALHOST, LOCALHOST_IP, '[::1]')))
if os.environ.get('no_proxy'):
    os.environ['no_proxy'] += ',' + no_proxy
elif os.environ.get('NO_PROXY'):
    os.environ['NO_PROXY'] += ',' + no_proxy
else:
    os.environ['no_proxy'] = no_proxy

# additional CLI commands, can be set by plugins
CLI_COMMANDS = {}

# set of valid regions
VALID_REGIONS = set(Session().get_available_regions('sns'))


def parse_service_ports():
    """ Parses the environment variable $SERVICES with a comma-separated list of services
        and (optional) ports they should run on: 'service1:port1,service2,service3:port3' """
    service_ports = os.environ.get('SERVICES', '').strip()
    if not service_ports:
        return DEFAULT_SERVICE_PORTS
    result = {}
    for service_port in re.split(r'\s*,\s*', service_ports):
        parts = re.split(r'[:=]', service_port)
        service = parts[0]
        key_upper = service.upper().replace('-', '_')
        port_env_name = '%s_PORT' % key_upper
        # (1) set default port number
Exemplo n.º 42
0
import tornado.locale
import markdown
import os
import uuid
import hashlib
import time
import boto3
import botocore

from handlers.util import *
from handlers.base import BaseHandler
from boto3 import Session

# AWS S3 Configuration
BUCKET_NAME = 'chec-static'
session = Session()
credentials = session.get_credentials()
current_credentials = credentials.get_frozen_credentials()
s3 = boto3.resource('s3')
s3c = boto3.client('s3',
                   aws_access_key_id=current_credentials.access_key,
                   aws_secret_access_key=current_credentials.secret_key,
                   aws_session_token=current_credentials.token)

# AWS S3 access bucket
myBucket = s3.Bucket(BUCKET_NAME)
config = s3c._client_config
config.signature_version = botocore.UNSIGNED

BlogURL = os.path.join(os.path.dirname('./..'), "static/members/")
Exemplo n.º 43
0
def azure_endpoint_resource():
    sesh = Session(aws_access_key_id=conf.ZENKO_ACCESS_KEY,
                   aws_secret_access_key=conf.ZENKO_SECRET_KEY)
    return sesh.resource('s3', endpoint_url=conf.ZENKO_AZURE_ENDPOINT,
                         verify=conf.VERIFY_CERTIFICATES)
def _print_relevant_environment_vars(environ):
    session = Session()
    print("Session current region={}".format(session.region_name))
    print("Session available_profiles={}".format(session.available_profiles))
    print("Session access_key={}".format(session.get_credentials().access_key))
    print("Session secret_key={}".format(session.get_credentials().secret_key))
Exemplo n.º 45
0
import json
import boto3
from boto3 import Session
from datetime import datetime
import os
from pyspark import SparkContext
from pyspark.sql import SparkSession
from datetime import timedelta

sc = SparkContext()
spark = SparkSession.builder.getOrCreate()

s3client = Session().client('s3')
s3 = boto3.resource('s3')
bucket = s3.Bucket('zozo-image-analyze-user-favorite')

now = datetime.now()
target_date = now - timedelta(hours=1)
prefix = target_date.strftime("%Y/%m/%d/%H")

response = s3client.list_objects(Bucket='zozo-image-analyze-user-favorite',
                                 Prefix=prefix)

if 'Contents' in response:
    keys = [content['Key'] for content in response['Contents']]

timestamp = datetime.now().strftime("%Y%m%d%H%M%S%f")

json_file_name = '/home/ec2-user/close-clothes/user-favorite/' + timestamp + '.json'
parquet_file_name = '/home/ec2-user/close-clothes/user-favorite/' + timestamp + '.parquet'
Exemplo n.º 46
0
    print(get_day_number(), index)
    uri = dlna_server_uri + yoga_videos[index]
    print(uri)
    service.SetAVTransportURI(InstanceID=0,
                              CurrentURI=uri,
                              CurrentURIMetaData="")
    time.sleep(5)
    service.Pause(InstanceID=0)
    return "Started yoga video"


# Create a client using the credentials and region defined in the adminuser
# section of the AWS credentials and configuration files
from boto3 import Session
import os
session = Session(profile_name="marina", region_name="eu-west-1")
polly = session.client("polly")


@app.route("/play_alarm")
def play_alarm():
    def say_time(time):
        return "<say-as interpret-as='time'>{}</say-as>".format(
            datetime.strftime(time, "%-I:%M %p"))

    def emphasize(text):
        return "<emphasis>{}</emphasis>".format(text)

    def get_greeting():
        bst = pytz.timezone("Europe/London")
        utc = pytz.timezone("UTC")
Exemplo n.º 47
0
class Polly():
    """Amazon Polly class to support functionality of Text-To-Speech.
    Read here: http://docs.aws.amazon.com/polly/latest/dg/what-is.html

    Defaults
    --------
    Output Format: mp3
    Text type: text

    Available
    ----------
    Output Formats: json | mp3 | ogg_vorbis | pcm
    Text types: ssml | text
    """

    output_format = 'mp3'
    text_type = 'ssml'
    language = 'US English'
    gender = 'Female'

    def __init__(self, aws_access_key_id, aws_secret_access_key, region_name):
        """Initialize Amazon Polly client with the provided credentials
        """
        self.session = Session(aws_access_key_id=aws_access_key_id,
                               aws_secret_access_key=aws_secret_access_key,
                               region_name=region_name)
        self.client = self.session.client('polly')

    def _get_ssml_text(self, text):
        """Converts plain text to SSML text to keep requests consistent"""
        if not text.startswith('<speak>'):
            text = '{}{}{}'.format('<speak>', text, '</speak>')
        return text

    def _get_synthesized_speech(self, text, voice_id, text_type,
                                output_format):
        """Get audio stream of the specified text using Amazon Polly,
        and return the audiostream itself, without saving it anywhere
        """
        text = self._get_ssml_text(text)
        try:
            # Request speech synthesis
            response = self.client.synthesize_speech(
                Text=text,
                VoiceId=voice_id,
                TextType=text_type,
                OutputFormat=output_format)
            if 'AudioStream' in response:
                return response['AudioStream']
            raise ValueError('Audio cannot be streamed.')

        except (BotoCoreError, ClientError) as error:
            raise ValueError(error)

    def get_speech(self, text, voice_id, text_type, output_format):
        """Get synthesized speech and return its contents"""
        speech = self._get_synthesized_speech(text, voice_id, text_type,
                                              output_format).read()
        return speech

    def get_voice_by_language_and_gender(self, language, gender):
        voice_id = voice[language][gender]
        if voice_id is None:
            gender = 'Female' if gender is 'Male' else 'Male'
            voice_id = voice[language][gender]
        if voice_id is None:
            raise ValueError("No voice to convert text to.")
        return voice_id

    def get_speech_file(self, text, voice_id, filename, text_type,
                        output_format):
        """Write audio stream to a file and return the path to the file"""
        response = self._get_synthesized_speech(text, voice_id, text_type,
                                                output_format)
        with closing(response) as stream:
            filename = '{}.{}'.format(filename, output_format)
            filepath = os.path.join(gettempdir(), filename)
            try:
                with open(filepath, "wb") as file:
                    file.write(stream.read())
                return filepath
            except IOError as error:
                raise ValueError(error)

    def remove_speech_file(self, filepath):
        if os.path.exists(filepath) and os.path.isfile(filepath):
            os.remove(filepath)
Exemplo n.º 48
0
        self,
        destinations,
        input_devices,
        input_id,
        input_security_groups,
        media_connect_flows,
        name,
        role_arn,
        sources,
    ):
        a_input = self._inputs[input_id]
        a_input.destinations = destinations
        a_input.input_devices = input_devices
        a_input.security_groups = input_security_groups
        a_input.media_connect_flows = media_connect_flows
        a_input.name = name
        a_input.role_arn = role_arn
        a_input.sources = sources
        return a_input


medialive_backends = {}
for region in Session().get_available_regions("medialive"):
    medialive_backends[region] = MediaLiveBackend()
for region in Session().get_available_regions("medialive",
                                              partition_name="aws-us-gov"):
    medialive_backends[region] = MediaLiveBackend()
for region in Session().get_available_regions("medialive",
                                              partition_name="aws-cn"):
    medialive_backends[region] = MediaLiveBackend()
Exemplo n.º 49
0
def aws_crr_resource():
    return Session(
        aws_access_key_id=conf.AWS_BACKBEAT_ACCESS_KEY,
        aws_secret_access_key=conf.AWS_BACKBEAT_SECRET_KEY).resource('s3')
Exemplo n.º 50
0
def wasabi_resource():
    sesh = Session(profile_name='wasabi')
    return sesh.resource('s3', endpoint_url=conf.WASABI_ENDPOINT)
Exemplo n.º 51
0
def get_all_network_acls(session: boto3.Session) -> typing.List[dict]:
    return session.client('ec2').describe_network_acls()['NetworkAcls']
Exemplo n.º 52
0
def get_all_rts(session: boto3.Session) -> typing.List[dict]:
    return session.client('ec2').describe_route_tables()['RouteTables']
Exemplo n.º 53
0
def assume_role(accountID, rgn, event):
	
	ec2_message = ""
	cfn_message = ""
	rds_message = ""

	client = boto3.client('sts')
	response = client.assume_role(RoleArn='arn:aws:iam::'+accountID+':role/'+event['CheckRoleName'],
		RoleSessionName='AWSLimits')
	
	session = Session(		
		aws_access_key_id=response['Credentials']['AccessKeyId'], 
		aws_secret_access_key=response['Credentials']['SecretAccessKey'], 
		aws_session_token=response['Credentials']['SessionToken'], 
		region_name=rgn
	)

	##############
	# call trusted advisor for the limit checks
	##############
	support_client = session.client('support', region_name='us-east-1')
	response = support_client.describe_trusted_advisor_check_result(
		checkId='eW7HH0l7J9',
		language='en'
	)
	print "Contacting Trusted Advisor..."

	# parse the json and find flagged resources that are in warning mode
	flag_list = response['result']['flaggedResources']
	warn_list=[]
	for fr in flag_list:
		if fr['metadata'][5] != "Green":
			warn_list.append(fr['metadata'][2]+'\n'+'Region: '+fr['metadata'][0]+'\n------------------------'+'\nResource Limit: '+fr['metadata'][3]+'\n'+'Resource Usage: '+fr['metadata'][4]+'\n')
	if not warn_list:
		print "TA all green"
	else:
		global ta_message 
		ta_message = trustedAlert(warn_list)


	###############
	#call EC2 limits for rgn
	###############
	ec2_client = session.client('ec2', region_name=rgn)
        response = ec2_client.describe_account_attributes()
        attribute_list = response['AccountAttributes']
        for att in attribute_list:
                if att['AttributeName'] == 'max-instances':
                        limit_of_instances = att['AttributeValues'][0]['AttributeValue']
			print"num of limit: "+limit_of_instances

        response = ec2_client.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values':['pending','running']}])
        reservation_list = response['Reservations']
	num_of_instances = 0
	for rsrv in reservation_list:
		instance_list = rsrv['Instances']
		num_of_instances += len(instance_list)

	print "num of instances: "+str(num_of_instances)

	#calculate if limit is within threshold
	if (float(num_of_instances) / float(limit_of_instances)  >= 0.8):			
		ec2_message = ec2Alert(limit_of_instances, num_of_instances, rgn)
		print ec2_message

	###############
	#cfn resource limit
	###############
	cfn_client = session.client('cloudformation', region_name=rgn)
	# grabbing all stacks except for DELETE_COMPLETE
	cfn_response = cfn_client.list_stacks(
		StackStatusFilter=[
			'CREATE_IN_PROGRESS','CREATE_FAILED','CREATE_COMPLETE',
			'ROLLBACK_IN_PROGRESS','ROLLBACK_FAILED','ROLLBACK_COMPLETE',
			'DELETE_IN_PROGRESS','DELETE_FAILED','UPDATE_IN_PROGRESS',
			'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS','UPDATE_COMPLETE',
			'UPDATE_ROLLBACK_IN_PROGRESS','UPDATE_ROLLBACK_FAILED',
			'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
			'UPDATE_ROLLBACK_COMPLETE'
		]
	)
	done = False
	aggregated_results=[]
	while not done:
		aggregated_results=aggregated_results+cfn_response['StackSummaries']
		next_token = cfn_response.get("NextToken", None)
		if next_token is None:
			done = True
		else:
			cfn_response = cfn_client.list_stacks(
				StackStatusFilter=[
					'CREATE_IN_PROGRESS','CREATE_FAILED','CREATE_COMPLETE',
					'ROLLBACK_IN_PROGRESS','ROLLBACK_FAILED','ROLLBACK_COMPLETE',
					'DELETE_IN_PROGRESS','DELETE_FAILED','UPDATE_IN_PROGRESS',
					'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS','UPDATE_COMPLETE',
					'UPDATE_ROLLBACK_IN_PROGRESS','UPDATE_ROLLBACK_FAILED',
					'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
					'UPDATE_ROLLBACK_COMPLETE'
				],
				NextToken=next_token
			)

	num_of_stacks = len(aggregated_results)
	stack_limit = cfn_client.describe_account_limits()
	if  stack_limit['AccountLimits'][0]['Name'] == 'StackLimit':
		limit_of_stacks = stack_limit['AccountLimits'][0]['Value']

	if (float(num_of_stacks) / float(limit_of_stacks)) >= 0.8:
		cfn_message = cloudformationAlert(limit_of_stacks, num_of_stacks, rgn)
		print cfn_message


	################
	#call RDS Limits for rgn
	################
	rds_client = session.client('rds', region_name=rgn)
        instance_limit = rds_client.describe_account_attributes()
        service_limit = instance_limit['AccountQuotas'][0]['Max']
        service_usage = instance_limit['AccountQuotas'][0]['Used']	
	
	if (float(service_usage) / float(service_limit) >= 0.8):			
		rds_message = rdsAlert(service_limit, service_usage, rgn)
		print rds_message


	print "Assumed session for "+accountID+" in region "+rgn
 
	rgn_message = ec2_message + cfn_message + rds_message

	return rgn_message;
Exemplo n.º 54
0
 def _fix(cls, session: Session, resource: Dict[str, Any],
          parameters: Dict[str, str]) -> None:
     session.client("guardduty").create_detector(
         Enable=True,
         FindingPublishingFrequency=parameters['FindingPublishingFrequency']
     )
Exemplo n.º 55
0
            raise NotAuthorizedError(access_token)

    def admin_update_user_attributes(self, user_pool_id, username, attributes):
        user_pool = self.user_pools.get(user_pool_id)
        if not user_pool:
            raise ResourceNotFoundError(user_pool_id)

        if username not in user_pool.users:
            raise UserNotFoundError(username)

        user = user_pool.users[username]
        user.update_attributes(attributes)


cognitoidp_backends = {}
for region in Session().get_available_regions("cognito-idp"):
    cognitoidp_backends[region] = CognitoIdpBackend(region)
for region in Session().get_available_regions("cognito-idp",
                                              partition_name="aws-us-gov"):
    cognitoidp_backends[region] = CognitoIdpBackend(region)
for region in Session().get_available_regions("cognito-idp",
                                              partition_name="aws-cn"):
    cognitoidp_backends[region] = CognitoIdpBackend(region)


# Hack to help moto-server process requests on localhost, where the region isn't
# specified in the host header. Some endpoints (change password, confirm forgot
# password) have no authorization header from which to extract the region.
def find_region_by_value(key, value):
    for region in cognitoidp_backends:
        backend = cognitoidp_backends[region]
Exemplo n.º 56
0
        if len(updated_tags) > 50:
            raise TagLimitExceededError

        self.topics[resource_arn]._tags = updated_tags

    def untag_resource(self, resource_arn, tag_keys):
        if resource_arn not in self.topics:
            raise ResourceNotFoundError

        for key in tag_keys:
            self.topics[resource_arn]._tags.pop(key, None)


sns_backends = {}
for region in Session().get_available_regions('sns'):
    sns_backends[region] = SNSBackend(region)


DEFAULT_TOPIC_POLICY = {
    "Version": "2008-10-17",
    "Id": "us-east-1/698519295917/test__default_policy_ID",
    "Statement": [{
        "Effect": "Allow",
        "Sid": "us-east-1/698519295917/test__default_statement_ID",
        "Principal": {
            "AWS": "*"
        },
        "Action": [
            "SNS:GetTopicAttributes",
            "SNS:SetTopicAttributes",
Exemplo n.º 57
0
        raise JsonRESTError("ResourceNotFoundException",
                            "An entity that you specified does not exist.")

    def tag_resource(self, arn, tags):
        name = arn.split("/")[-1]
        if name in self.rules:
            self.tagger.tag_resource(self.rules[name].arn, tags)
            return {}
        raise JsonRESTError("ResourceNotFoundException",
                            "An entity that you specified does not exist.")

    def untag_resource(self, arn, tag_names):
        name = arn.split("/")[-1]
        if name in self.rules:
            self.tagger.untag_resource_using_names(self.rules[name].arn,
                                                   tag_names)
            return {}
        raise JsonRESTError("ResourceNotFoundException",
                            "An entity that you specified does not exist.")


events_backends = {}
for region in Session().get_available_regions("events"):
    events_backends[region] = EventsBackend(region)
for region in Session().get_available_regions("events",
                                              partition_name="aws-us-gov"):
    events_backends[region] = EventsBackend(region)
for region in Session().get_available_regions("events",
                                              partition_name="aws-cn"):
    events_backends[region] = EventsBackend(region)
Exemplo n.º 58
0
 def configure_session(self, session_input):
     if not session_input:
         return Session(region_name=self.region)
     else:
         return session_input
Exemplo n.º 59
0
def digital_ocean_resource():
    sesh = Session(profile_name='do')
    return sesh.resource('s3', endpoint_url=conf.DO_ENDPOINT)
Exemplo n.º 60
0
def get_all_sgs(session: boto3.Session) -> typing.List[dict]:
    return session.client('ec2').describe_security_groups()['SecurityGroups']