from boto3 import Session from botocore.exceptions import BotoCoreError, ClientError from contextlib import closing # Parsing Library from pybtex.database.input import bibtex from expand_labels import expand_doc_new_labels from doc_cleanup import cleanxml_string from format_master_files import format_master_files # Internal classes from conversion_db import ConversionDB from conversion_parser import ConversionParser # Creates session of user using AWS credentials session = Session(profile_name='default') # Creates objects of use polly = session.client("polly") s3 = session.client("s3") # Check to see if file has been uplaoded to the S3 bucket or not def check_s3(key): try: s3.head_object(Bucket="tex2speech", Key=key) except ClientError as e: return False return True
resource_policy = { "Version": "2012-10-17", "Statement": { "Effect": "Allow", "Principal": { "AWS": [ "arn:aws:iam::111122223333:root", "arn:aws:iam::444455556666:root", ] }, "Action": ["secretsmanager:GetSecretValue"], "Resource": "*", }, } return json.dumps({ "ARN": secret_id, "Name": secret_id, "ResourcePolicy": json.dumps(resource_policy), }) secretsmanager_backends = {} for region in Session().get_available_regions("secretsmanager"): secretsmanager_backends[region] = SecretsManagerBackend(region_name=region) for region in Session().get_available_regions("secretsmanager", partition_name="aws-us-gov"): secretsmanager_backends[region] = SecretsManagerBackend(region_name=region) for region in Session().get_available_regions("secretsmanager", partition_name="aws-cn"): secretsmanager_backends[region] = SecretsManagerBackend(region_name=region)
from boto3 import Session from botocore.exceptions import BotoCoreError, ClientError from contextlib import closing import os import sys import subprocess session = Session(region_name="ap-northeast-1") polly = session.client("polly") try: response = polly.synthesize_speech( Text="ただいまの南さいたまの気温は9.6℃、湿度は48.0%くらいです。", OutputFormat="mp3", VoiceId="Mizuki") except (BotoCoreError, ClientError) as error: print(error) sys.exit(-1) if "AudioStream" in response: with closing(response["AudioStream"]) as stream: output = "/var/www/html/speech.mp3" try: with open(output, "wb") as file: file.write(stream.read()) except IOError as error: print(error) sys.exit(-1) print("synthesize_speech OK ->>" + output) else: print("Could not stream audio") sys.exit(-1)
if number_of_members_in_network(self.members, networkid, member_status="DELETED") == len( self.members): # Set network status to DELETED for all invitations for invitation_id in self.invitations: if (self.invitations.get(invitation_id).invitation_networkid == networkid): self.invitations.get(invitation_id).set_network_status( "DELETED") # Remove network del self.networks[networkid] def update_member(self, networkid, memberid, logpublishingconfiguration): # Check if network exists if networkid not in self.networks: raise ResourceNotFoundException( "UpdateMember", "Network {0} not found.".format(networkid)) if memberid not in self.members: raise ResourceNotFoundException( "UpdateMember", "Member {0} not found.".format(memberid)) self.members.get(memberid).update(logpublishingconfiguration) managedblockchain_backends = {} for region in Session().get_available_regions("managedblockchain"): managedblockchain_backends[region] = ManagedBlockchainBackend(region)
import boto3 from boto3 import Session from botocore.exceptions import BotoCoreError, ClientError from contextlib import closing import os import sys import subprocess from tempfile import gettempdir import pyaudio # Create a client using the credentials and region defined in the [adminuser] # section of the AWS credentials file (~/.aws/credentials). session = Session(profile_name="default") polly = session.client("polly") pollyVoice = { 'en': 'Amy', 'fr': 'Celine', 'de': 'Vicki', 'pt': 'Vitoria', 'es': 'Miguel' } languageOptions = { 'English': 'en', 'French': 'fr', 'German': 'de', 'Portugese': 'pt', 'Spanish': 'es' } myfile = input("file to be processed: ")
def test_alert_purge(self): session = Session() # Get the service resource dynamodb = session.resource('dynamodb') dynamodb.create_table(TableName='sso-dashboard-alert1', KeySchema=[{ 'AttributeName': 'alert_id', 'KeyType': 'HASH' }], AttributeDefinitions=[{ 'AttributeName': 'alert_id', 'AttributeType': 'S' }], ProvisionedThroughput={ 'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5 }) client = session.client('dynamodb') response = client.update_table( TableName='sso-dashboard-alert1', GlobalSecondaryIndexUpdates=[{ 'Create': { 'IndexName': 'user_id-index', 'KeySchema': [{ 'AttributeName': 'user_id', 'KeyType': 'HASH' }], 'Projection': { 'ProjectionType': 'ALL' }, 'ProvisionedThroughput': { 'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5 } } }], ) assert response is not None self.table = dynamodb.Table('sso-dashboard-alert1') self.table.meta.client.get_waiter('table_exists').wait( TableName='sso-dashboard-alert1') a = alert.Alert() a.dynamodb = self.table alert_dict = { "alert_code": "416c65727447656f6d6f64656c", "alert_id": "1c7c506eb221f6206becb8ef0d96f6", "alert_str_json": "foo", "date": "2018-08-05", "description": "This alert is created based on geo ip information about the last login of a user.", "duplicate": True, "risk": "high", "summary": "Did you recently login from Lewisham, United Kingdom (x.x.x.x)?", "url": "https://mana.mozilla.org/wiki/display/SECURITY/Alert%3A+Change+in+Country", "url_title": "Get Help", "user_id": "ad|Mozilla-LDAP|akrug" } res = a.create(alert_dict=alert_dict) assert res is not None assert res['ResponseMetadata']['HTTPStatusCode'] == 200 res = a.find('ad|Mozilla-LDAP|akrug') for this_alert in res.get('visible_alerts'): if alert_dict.get('alert_id') == this_alert.get('alert_id'): assert 0 else: pass
This file should only contain constants used for the EKS tests. """ import re from enum import Enum from boto3 import Session from moto.core import ACCOUNT_ID from moto.eks import REGION as DEFAULT_REGION DEFAULT_ENCODING = "utf-8" DEFAULT_HTTP_HEADERS = {"Content-type": "application/json"} DEFAULT_NAMESPACE = "namespace_1" FROZEN_TIME = "2013-11-27T01:42:00Z" MAX_FARGATE_LABELS = 5 PARTITIONS = Session().get_available_partitions() REGION = Session().region_name or DEFAULT_REGION SERVICE = "eks" SUBNET_IDS = ["subnet-12345ab", "subnet-67890cd"] AMI_TYPE_KEY = "amiType" AMI_TYPE_VALUE = "AL2_x86_64" CLIENT_REQUEST_TOKEN_KEY = "clientRequestToken" CLIENT_REQUEST_TOKEN_VALUE = "test_request_token" DISK_SIZE_KEY = "diskSize" DISK_SIZE_VALUE = 30 ENCRYPTION_CONFIG_KEY = "encryptionConfig" ENCRYPTION_CONFIG_VALUE = [{
def tts(self, text='', ssml='', voice_config=None, detail=None): ''' Synthesizes audio data for text. Args: text: string / target to be synthesized(plain text) ssml: string / target to be synthesized(SSML) voice_config: VoiceConfig / parameters for voice and audio detail: dict / detail parameters for voice and audio Returns: binary ''' if self.credential: if isinstance(self.credential, PollyCredential): pass else: raise TypeError('Invalid credential') else: raise CloudTTSError('No Authentication yet') if self.credential.has_access_key(): sess = Session( region_name=self.credential.region_name, aws_access_key_id=self.credential.aws_access_key_id, aws_secret_access_key=self.credential.aws_secret_access_key) else: sess = Session(region_name=self.credential.region_name) if text: if len(text) > PollyClient.MAX_TEXT_LENGTH: msg = Client.TOO_LONG_DATA_MSG.format( PollyClient.MAX_TEXT_LENGTH, len(text)) raise CloudTTSError(msg) elif ssml: _text = re.compile('</?speak>').sub('', ssml) if len(_text) > PollyClient.MAX_TEXT_LENGTH: msg = Client.TOO_LONG_DATA_MSG.format( PollyClient.MAX_TEXT_LENGTH, len(_text)) raise CloudTTSError(msg) else: raise ValueError('No text or ssml is passed') polly = sess.client('polly') params = self._make_params(voice_config, detail) response = polly.synthesize_speech( Text=ssml if ssml else text, TextType='ssml' if ssml else 'text', OutputFormat=params['output_format'], VoiceId=params['voice_id'], SampleRate=params['sample_rate'], ) audio = None if 'AudioStream' in response: with closing(response['AudioStream']) as stream: audio = stream.read() return audio
max_manifest_fragment_results, ): # Ignore option paramters as the format of hls_url does't depends on them api_name = "GET_DASH_STREAMING_SESSION_URL" url = self._get_streaming_url(stream_name, stream_arn, api_name) return url def get_clip(self, stream_name, stream_arn, clip_fragment_selector): kinesisvideo_backends[self.region_name]._get_stream(stream_name, stream_arn) content_type = "video/mp4" # Fixed content_type as it depends on input stream payload = b"sample-mp4-video" return content_type, payload kinesisvideoarchivedmedia_backends = {} for region in Session().get_available_regions("kinesis-video-archived-media"): kinesisvideoarchivedmedia_backends[region] = KinesisVideoArchivedMediaBackend( region ) for region in Session().get_available_regions( "kinesis-video-archived-media", partition_name="aws-us-gov" ): kinesisvideoarchivedmedia_backends[region] = KinesisVideoArchivedMediaBackend( region ) for region in Session().get_available_regions( "kinesis-video-archived-media", partition_name="aws-cn" ): kinesisvideoarchivedmedia_backends[region] = KinesisVideoArchivedMediaBackend( region )
return None else: raise NotAuthorizedError(access_token) def admin_set_user_password(self, user_pool_id, username, password, permanent): user = self.admin_get_user(user_pool_id, username) user.password = password if permanent: user.status = UserStatus["CONFIRMED"] else: user.status = UserStatus["FORCE_CHANGE_PASSWORD"] cognitoidp_backends = {} for region in Session().get_available_regions("cognito-idp"): cognitoidp_backends[region] = CognitoIdpBackend(region) for region in Session().get_available_regions("cognito-idp", partition_name="aws-us-gov"): cognitoidp_backends[region] = CognitoIdpBackend(region) for region in Session().get_available_regions("cognito-idp", partition_name="aws-cn"): cognitoidp_backends[region] = CognitoIdpBackend(region) # Hack to help moto-server process requests on localhost, where the region isn't # specified in the host header. Some endpoints (change password, confirm forgot # password) have no authorization header from which to extract the region. def find_region_by_value(key, value): for region in cognitoidp_backends: backend = cognitoidp_backends[region]
def update_group(self, group_name, description=None): if description: self.groups.by_name[group_name].description = description return self.groups.by_name[group_name] def update_group_query(self, group_name, resource_query): self._validate_resource_query(resource_query) self.groups.by_name[group_name].resource_query = resource_query return self.groups.by_name[group_name] def get_group_configuration(self, group_name): group = self.groups.by_name.get(group_name) configuration = group.configuration return configuration def put_group_configuration(self, group_name, configuration): self.groups.by_name[group_name].configuration = configuration return self.groups.by_name[group_name] resourcegroups_backends = {} for region in Session().get_available_regions("resource-groups"): resourcegroups_backends[region] = ResourceGroupsBackend(region) for region in Session().get_available_regions("resource-groups", partition_name="aws-us-gov"): resourcegroups_backends[region] = ResourceGroupsBackend(region) for region in Session().get_available_regions("resource-groups", partition_name="aws-cn"): resourcegroups_backends[region] = ResourceGroupsBackend(region)
) minioExternalClient = Minio( MINIO_EXTERNAL_SERVER, region=MINIO_REGION, access_key=os.environ.get("MINIO_ACCESS_KEY"), secret_key=os.environ.get("MINIO_SECRET_KEY"), secure=MINIO_SSL, ) if not minioClient.bucket_exists(MINIO_BUCKET): minioClient.make_bucket(MINIO_BUCKET) session = Session( aws_access_key_id=os.environ.get("MINIO_ACCESS_KEY"), aws_secret_access_key=os.environ.get("MINIO_SECRET_KEY"), region_name=MINIO_REGION, ) # https://github.com/boto/boto3/blob/develop/boto3/session.py#L185 s3 = session.client( "s3", verify=False, use_ssl=MINIO_SSL, endpoint_url=MINIO_HTTP_PREFIX + MINIO_SERVER, region_name=MINIO_REGION, config=Config(signature_version="s3v4", s3={"addressing_style": "path"}), ) # signature_versions # https://github.com/boto/botocore/blob/master/botocore/auth.py#L846
# We only care about a filter's Region, Resource Name, and Resource ID: resource_region = filters.get('Region') resource_id = [filters['ResourceId'] ] if filters.get('ResourceId') else None resource_name = filters.get('ResourceName') identifiers, new_token = \ RESOURCE_MAP[resource_type].list_config_service_resources(resource_id, resource_name, limit, next_token, resource_region=resource_region) result = { 'ResourceIdentifiers': [{ 'SourceAccountId': DEFAULT_ACCOUNT_ID, 'SourceRegion': identifier['region'], 'ResourceType': identifier['type'], 'ResourceId': identifier['id'], 'ResourceName': identifier['name'] } for identifier in identifiers] } if new_token: result['NextToken'] = new_token return result config_backends = {} boto3_session = Session() for region in boto3_session.get_available_regions('config'): config_backends[region] = ConfigBackend()
def newSession(): return Session(aws_access_key_id=input("Enter your AWS access key ID: "), aws_secret_access_key=input("Enter your AWS secret key: "), region_name="us-east-2")
def get_config_resource(self, resource_id, resource_name=None, backend_region=None, resource_region=None): # Do we even have this defined? if not self.backends["global"].public_access_block: return None # Resource name can only ever be "" if it's supplied: if resource_name is not None and resource_name != "": return None # Are we filtering based on region? account_id = get_moto_s3_account_id() regions = [ region for region in Session().get_available_regions("config") ] # Is the resource ID correct?: if account_id == resource_id: if backend_region: pab_region = backend_region # Invalid region? elif resource_region not in regions: return None else: pab_region = resource_region else: return None # Format the PAB to the AWS Config format: creation_time = datetime.datetime.utcnow() config_data = { "version": "1.3", "accountId": account_id, "configurationItemCaptureTime": str(creation_time), "configurationItemStatus": "OK", "configurationStateId": str(int(time.mktime( creation_time.timetuple()))), # PY2 and 3 compatible "resourceType": "AWS::S3::AccountPublicAccessBlock", "resourceId": account_id, "awsRegion": pab_region, "availabilityZone": "Not Applicable", "configuration": self.backends["global"].public_access_block.to_config_dict(), "supplementaryConfiguration": {}, } # The 'configuration' field is also a JSON string: config_data["configuration"] = json.dumps(config_data["configuration"]) return config_data
def test_terragrunt_gets_all_parameters(self, data): # Given deploy_config = DeployConfig( data['team'], data['dev_account_id'], data['platform_config_file'] ) boto_session = Session( 'dummy-access-key-id', 'dummy-secret-access-key', 'dummy-token', data['aws_region'] ) deploy = Deploy( boto_session, data['component_name'], data['environment_name'], data['version'], data['ecs_cluster'], deploy_config ) image_name = '{}.dkr.ecr.{}.amazonaws.com/{}:{}'.format( data['dev_account_id'], data['aws_region'], data['component_name'], data['version'] ) secret_file_path = '/mock/file/path' # When with patch( 'cdflow_commands.plugins.ecs.check_call' ) as check_call, patch( 'cdflow_commands.plugins.ecs.NamedTemporaryFile', autospec=True ) as NamedTemporaryFile, patch( 'cdflow_commands.plugins.ecs.get_secrets' ) as get_secrets: NamedTemporaryFile.return_value.__enter__.return_value.name = \ secret_file_path get_secrets.return_value = {} deploy.run() # Then args = [ '-var', 'component={}'.format(data['component_name']), '-var', 'env={}'.format(data['environment_name']), '-var', 'aws_region={}'.format(data['aws_region']), '-var', 'team={}'.format(data['team']), '-var', 'image={}'.format(image_name), '-var', 'version={}'.format(data['version']), '-var', 'ecs_cluster={}'.format(data['ecs_cluster']), '-var-file', data['platform_config_file'], '-var-file', secret_file_path, 'infra' ] check_call.assert_any_call( ['terragrunt', 'plan'] + args, env=ANY ) check_call.assert_any_call( ['terragrunt', 'apply'] + args, env=ANY )
def get_schemas_client(profile, region): if profile: session = Session(profile_name=profile) else: session = Session() return session.client("schemas", region_name=region)
def create(self, name: str = None) -> Session: config = self.get(name) return Session(**config)
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36' try: from django.conf import settings s3_bucket = settings.S3_BUCKET profile_name = settings.AWS_PROFILE_NAME if hasattr(settings, 'USER_AGENT'): user_agent = settings.USER_AGENT except: s3_bucket = os.getenv('S3_BUCKET') profile_name = os.getenv('AWS_PROFILE_NAME') logger = logging.getLogger(__name__) try: session = Session(profile_name=profile_name) s3 = session.client('s3') except: logger.error('Error instantiating AWS services. AWS credentials might not be configured correctly. ' 'http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html') def transfer_image_url_to_s3(url, make_jpeg=False, max_size=None, thumbnail_size=None, **kwargs): if url.startswith('{}/{}/{}'.format(s3.meta.endpoint_url, s3_bucket, kwargs.get('path', ''))): # already in s3 print('url already in s3', url) return url print('requesting', url) request = requests.get(url, headers={'User-Agent': user_agent}, timeout=15) content_type = request.headers.get('Content-Type') if not content_type:
def update_pipeline( self, id, name, input_bucket, role, aws_kms_key_arn, notifications, content_config, thumbnail_config, ): pipeline = self.read_pipeline(id) pipeline.update(name, input_bucket, role) warnings = [] return pipeline, warnings def delete_pipeline(self, pipeline_id): self.pipelines.pop(pipeline_id) elastictranscoder_backends = {} for region in Session().get_available_regions("elastictranscoder"): elastictranscoder_backends[region] = ElasticTranscoderBackend(region) for region in Session().get_available_regions("elastictranscoder", partition_name="aws-us-gov"): elastictranscoder_backends[region] = ElasticTranscoderBackend(region) for region in Session().get_available_regions("elastictranscoder", partition_name="aws-cn"): elastictranscoder_backends[region] = ElasticTranscoderBackend(region)
# Token used up, might as well bin now, if you call it again your an idiot if pagination_token: del self._pages[pagination_token] return new_token, result # These methods will be called from responses.py. # They should call a tag function inside of the moto module # that governs the resource, that way if the target module # changes how tags are delt with theres less to change # def tag_resources(self, resource_arn_list, tags): # return failed_resources_map # # def untag_resources(self, resource_arn_list, tag_keys): # return failed_resources_map resourcegroupstaggingapi_backends = {} for region in Session().get_available_regions("resourcegroupstaggingapi"): resourcegroupstaggingapi_backends[ region] = ResourceGroupsTaggingAPIBackend(region) for region in Session().get_available_regions("resourcegroupstaggingapi", partition_name="aws-us-gov"): resourcegroupstaggingapi_backends[ region] = ResourceGroupsTaggingAPIBackend(region) for region in Session().get_available_regions("resourcegroupstaggingapi", partition_name="aws-cn"): resourcegroupstaggingapi_backends[ region] = ResourceGroupsTaggingAPIBackend(region)
from boto3 import Session from botocore.exceptions import BotoCoreError, ClientError from contextlib import closing import os import sys import subprocess import rospy from std_msgs.msg import String from tempfile import gettempdir overload_str = "You have been saying an awful lot. It is going to cost money" overload_char_limit = 100000 # Create a client using the credentials and region defined in the [adminuser] # section of the AWS credentials file (~/.aws/credentials). session = Session(profile_name="adminuser") polly = session.client("polly") char_count = 0 def call_polly(words): try: # Request speech synthesis response = polly.synthesize_speech(Text=words, OutputFormat="mp3", VoiceId="Matthew") except (BotoCoreError, ClientError) as error: # The service returned an error, exit gracefully print(error) sys.exit(-1)
import tornado import tornado.locale import markdown import os import boto3 import botocore from handlers.base import BaseHandler from handlers.util import * from handlers.blog.blog import BlogURL from boto3 import Session from datetime import datetime # AWS S3 Configuration BUCKET_NAME = 'chec-static' session = Session() credentials = session.get_credentials() current_credentials = credentials.get_frozen_credentials() s3 = boto3.resource('s3') s3c = boto3.client('s3', aws_access_key_id=current_credentials.access_key, aws_secret_access_key=current_credentials.secret_key, aws_session_token=current_credentials.token) # AWS S3 access bucket myBucket = s3.Bucket(BUCKET_NAME) config = s3c._client_config config.signature_version = botocore.UNSIGNED DocURL = os.path.join(os.path.dirname('./..'), "static/documents") CurriculumURL = os.path.join(DocURL, "HCIcurriculum")
def __init__(self, volume=0.7): pygame.mixer.init() self._volume = volume session = Session(profile_name="default") self.__polly = session.client("polly")
if status is not None: job_executions = list( filter( lambda elem: status in elem["status"] and elem["status"] == status, job_executions, )) token = next_token if token is None: job_executions = job_executions[0:max_results] next_token = str( max_results) if len(job_executions) > max_results else None else: token = int(token) job_executions = job_executions[token:token + max_results] next_token = (str(token + max_results) if len(job_executions) > token + max_results else None) return job_executions, next_token iot_backends = {} for region in Session().get_available_regions("iot"): iot_backends[region] = IoTBackend(region) for region in Session().get_available_regions("iot", partition_name="aws-us-gov"): iot_backends[region] = IoTBackend(region) for region in Session().get_available_regions("iot", partition_name="aws-cn"): iot_backends[region] = IoTBackend(region)
else: return None, metrics class LogGroup(BaseModel): def __init__(self, spec): # required self.name = spec["LogGroupName"] # optional self.tags = spec.get("Tags", []) @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json["Properties"] log_group_name = properties["LogGroupName"] tags = properties.get("Tags", {}) return logs_backends[region_name].create_log_group( log_group_name, tags, **properties) cloudwatch_backends = {} for region in Session().get_available_regions("cloudwatch"): cloudwatch_backends[region] = CloudWatchBackend() for region in Session().get_available_regions("cloudwatch", partition_name="aws-us-gov"): cloudwatch_backends[region] = CloudWatchBackend() for region in Session().get_available_regions("cloudwatch", partition_name="aws-cn"): cloudwatch_backends[region] = CloudWatchBackend()
def import_dynamodb_items_to_es(table_name, aws_secret, aws_access, aws_region, event_source_arn, lambda_f, scan_limit, es_host, es_index): global reports global partSize global object_amount logger = logging.getLogger() logger.setLevel(logging.ERROR) session = Session(aws_access_key_id=aws_access, aws_secret_access_key=aws_secret, region_name=aws_region) os.environ['AWS_ACCESS_KEY_ID'] = aws_access os.environ['AWS_SECRET_ACCESS_KEY'] = aws_secret os.environ['AWS_REGION'] = aws_region os.environ['ES_HOST'] = es_host os.environ['ES_INDEX'] = es_index dynamodb = session.resource('dynamodb') logger.info('dynamodb: %s', dynamodb) ddb_table_name = table_name table = dynamodb.Table(ddb_table_name) logger.info('table: %s', table) ddb_keys_name = [a['AttributeName'] for a in table.attribute_definitions] logger.info('ddb_keys_name: %s', ddb_keys_name) response = None pool = Pool(8) while True: if not response: response = table.scan(Limit=scan_limit) else: response = table.scan( ExclusiveStartKey=response['LastEvaluatedKey'], Limit=scan_limit) for i in response["Items"]: ddb_keys = {k: i[k] for k in i if k in ddb_keys_name} ddb_data = boto3.dynamodb.types.TypeSerializer().serialize(i)["M"] ddb_keys = boto3.dynamodb.types.TypeSerializer().serialize( ddb_keys)["M"] record = { "dynamodb": { "SequenceNumber": "0000", "Keys": ddb_keys, "NewImage": ddb_data }, "awsRegion": aws_region, "eventName": "INSERT", "eventSourceARN": event_source_arn, "eventSource": "aws:dynamodb" } partSize += 1 object_amount += 1 logger.info(object_amount) reports.append(record) if partSize >= 100: pool.apply_async(send_to_eslambda, args=(reports, lambda_f)) # send_to_eslambda(reports, lambda_f) if 'LastEvaluatedKey' not in response: break if partSize > 0: send_to_eslambda(reports, lambda_f) pool.close() pool.join()
def list_config_service_resources( self, resource_ids, resource_name, limit, next_token, backend_region=None, resource_region=None, aggregator=None, ): # For the Account Public Access Block, they are the same for all regions. The resource ID is the AWS account ID # There is no resource name -- it should be a blank string "" if provided. # The resource name can only ever be None or an empty string: if resource_name is not None and resource_name != "": return [], None pab = None account_id = get_moto_s3_account_id() regions = [ region for region in Session().get_available_regions("config") ] # If a resource ID was passed in, then filter accordingly: if resource_ids: for resource_id in resource_ids: if account_id == resource_id: pab = self.backends["global"].public_access_block break # Otherwise, just grab the one from the backend: if not resource_ids: pab = self.backends["global"].public_access_block # If it's not present, then return nothing if not pab: return [], None # Filter on regions (and paginate on them as well): if backend_region: pab_list = [backend_region] elif resource_region: # Invalid region? if resource_region not in regions: return [], None pab_list = [resource_region] # Aggregated query where no regions were supplied so return them all: else: pab_list = regions # Pagination logic: sorted_regions = sorted(pab_list) new_token = None # Get the start: if not next_token: start = 0 else: # Tokens for this moto feature is just the region-name: # For OTHER non-global resource types, it's the region concatenated with the resource ID. if next_token not in sorted_regions: raise InvalidNextTokenException() start = sorted_regions.index(next_token) # Get the list of items to collect: pab_list = sorted_regions[start:(start + limit)] if len(sorted_regions) > (start + limit): new_token = sorted_regions[start + limit] return ( [{ "type": "AWS::S3::AccountPublicAccessBlock", "id": account_id, "region": region, } for region in pab_list], new_token, )
# set variables no_proxy, i.e., run internal service calls directly no_proxy = ','.join( set((LOCALSTACK_HOSTNAME, HOSTNAME, LOCALHOST, '127.0.0.1', '[::1]'))) if os.environ.get('no_proxy'): os.environ['no_proxy'] += ',' + no_proxy elif os.environ.get('NO_PROXY'): os.environ['NO_PROXY'] += ',' + no_proxy else: os.environ['no_proxy'] = no_proxy # additional CLI commands, can be set by plugins CLI_COMMANDS = {} # set of valid regions VALID_REGIONS = set(Session().get_available_regions('sns')) def parse_service_ports(): """ Parses the environment variable $SERVICES with a comma-separated list of services and (optional) ports they should run on: 'service1:port1,service2,service3:port3' """ service_ports = os.environ.get('SERVICES', '').strip() if not service_ports: return DEFAULT_SERVICE_PORTS result = {} for service_port in re.split(r'\s*,\s*', service_ports): parts = re.split(r'[:=]', service_port) service = parts[0] key_upper = service.upper().replace('-', '_') port_env_name = '%s_PORT' % key_upper # (1) set default port number
self, destinations, input_devices, input_id, input_security_groups, media_connect_flows, name, role_arn, sources, ): a_input = self._inputs[input_id] a_input.destinations = destinations a_input.input_devices = input_devices a_input.security_groups = input_security_groups a_input.media_connect_flows = media_connect_flows a_input.name = name a_input.role_arn = role_arn a_input.sources = sources return a_input medialive_backends = {} for region in Session().get_available_regions("medialive"): medialive_backends[region] = MediaLiveBackend() for region in Session().get_available_regions("medialive", partition_name="aws-us-gov"): medialive_backends[region] = MediaLiveBackend() for region in Session().get_available_regions("medialive", partition_name="aws-cn"): medialive_backends[region] = MediaLiveBackend()