def setUp(self): self.sns_delivery = SnsDelivery(MAILER_CONFIG, boto3.Session(), logger) self.sns_topic_example = 'arn:aws:sns:us-east-1:172519456306:cloud-custodian'
def sagemaker_session(region): return Session(boto_session=boto3.Session(region_name=region))
os.path.dirname(__file__), '../cf', 'cf_iot_rule_gen_s3.json' ) # # Open CloudFormation for Dynamodb setup with open(filename) as f: config = f.read() with open(filename2) as f: config2 = f.read() with open(filename3) as f: config3 = f.read() # create an instance for CloudFormation cf = boto3.client('cloudformation', region_name = 'us-west-2') session = boto3.Session(region_name='us-west-2') # create more instances for other resources client = boto3.client('s3', region_name = 'ap-northeast-2') paginator = client.get_paginator("list_objects") client2 = boto3.resource('dynamodb', region_name = 'us-west-2') origin = 'cf' destination = 'dynamodb' # Reference name dictionaries cloud_env = dict({'702738637364':'dev','207840868635':'test','660909071379':'qc','043951023778':'tc'}) users =dict() set(users.update({x:'20'}) for x in set(['AIDAIQ5BHWCJHZ3TGTP3E','AIDAJERRHDYD3YFNZTHFY', 'AIDAI5JMJC4T5T744LQHU','AIDAIDFHZH4RPIU4WWGRS'])); set(users.update({x:'50'}) for x in set(['AIDAIF3O2S4QE4R6HJ6YQ','AIDAILUP7G25NUSUY3C76']));
def connect_aws(): session = boto3.Session() client = session.client('ec2') return (client)
def get_services(): """Return a list of all service names where listable resources can be present""" return [ service for service in boto3.Session().get_available_services() if service not in SERVICE_BLACKLIST ]
import boto3 translate = boto3.Session(region_name='ap-northeast-1').client('translate') def translate_en_to_ja(text): resp = translate.translate_text(Text=text, SourceLanguageCode="en", TargetLanguageCode="ja") return resp.get('TranslatedText')
import boto3 import click session = boto3.Session(profile_name='pythonAutomation') s3 = session.resource('s3') @click.group() def cli(): "Webotron deploys websites to AWS" pass @cli.command('list-buckets') def list_buckets(): "List all S3 buckets" for bucket in s3.buckets.all(): print(bucket) @cli.command('list-bucket-objects') @click.argument('bucket') def list_bucket_objects(bucket): "List content of an S3 bucket" for obj in s3.Bucket(bucket).objects.all(): print(obj) if __name__ == '__main__': cli()
# coding: utf-8 import boto3 session = boto3.Session(profile_name="default") s3 = session.resource('s3')
import boto3 from time import sleep from athena_ddl import * # read in config file with open("../src/main/resources/application.conf", "r") as ins: array = [] for line in ins: array.append(line.replace("\n", '').replace('spark.', '').split("=")) config = dict(array) # create athena session session = boto3.Session(aws_access_key_id=config['awskey'], aws_secret_access_key=config['awssecret']) ath = session.client('athena') # global parameters DATABASE = config['athena.dbname'] ROOT_BUCKET = f"s3://{config['rootbucket']}/" ATHENA_LOGS_OUTPUT = f"{ROOT_BUCKET}{config['athena.logsoutputbucket']}/" print(f"Root buckets is: {ROOT_BUCKET}") print(f"Athena database name is: {DATABASE}") print(f"Athena logs output path is: {ATHENA_LOGS_OUTPUT}") def get_query_status_response(query_execution_id): response = ath.get_query_execution(QueryExecutionId=query_execution_id) return response
# coding: utf-8 import boto3 session = boto3.Session(profile_name='JPNEW') as_client = session.client('autoscaling') as_client.execute_policy(AutoScalingGroupName='slack-notify', PolicyName='simple scale up')
def launch_instance( ami_id, instance_type, ec2_key_name=None, region="us-west-2", user_data=None, iam_instance_profile_name=None, instance_name="", ): """ Launch an instance :param ami_id: AMI ID to be used for launched instance :param instance_type: Instance type of launched instance :param region: Region where instance will be launched :param user_data: Script to run when instance is launched as a str :param iam_instance_profile_arn: EC2 Role to be attached :param instance_name: Tag to display as Name on EC2 Console :return: <dict> Information about the instance that was launched """ if not ami_id: raise Exception("No ami_id provided") if not ec2_key_name: raise Exception("Ec2 Key name must be provided") client = boto3.Session(region_name=region).client("ec2") # Construct the dictionary with the arguments for API call arguments_dict = { "KeyName": ec2_key_name, "ImageId": ami_id, "InstanceType": instance_type, "MaxCount": 1, "MinCount": 1, "TagSpecifications": [ { "ResourceType": "instance", "Tags": [{ "Key": "Name", "Value": f"CI-CD {instance_name}" }], }, ], "BlockDeviceMappings": [{ "DeviceName": "/dev/sda1", "Ebs": { "VolumeSize": 200, }, }], } if user_data: arguments_dict["UserData"] = user_data if iam_instance_profile_name: arguments_dict["IamInstanceProfile"] = { "Name": iam_instance_profile_name } LOGGER.info( f"Launching instance with name: {instance_name}, and key: {ec2_key_name}" ) response = client.run_instances(**arguments_dict) if not response or len(response["Instances"]) < 1: raise Exception( "Unable to launch the instance. Did not return any response") LOGGER.info(f"Instance launched successfully.") return response["Instances"][0]
def __init__(self): self.session = boto3.Session() self.client = self.session.client("route53")
import boto3 import botocore import config # initialize session with AWS S3 service session = boto3.Session(aws_access_key_id=config.AWS_SERVER_PUBLIC_KEY, aws_secret_access_key=config.AWS_SERVER_SECRET_KEY) s3 = session.resource('s3') # initialize client to upload/download files client_s3 = boto3.client("s3", aws_access_key_id=config.AWS_SERVER_PUBLIC_KEY, aws_secret_access_key=config.AWS_SERVER_SECRET_KEY) def upload_file(file, bucket_name, acl="public-read"): ''' Upload binary file to S3 bucket. ''' try: client_s3.upload_fileobj(file, bucket_name, file.filename, ExtraArgs={ "ACL": acl, "ContentType": file.content_type }) except Exception as e: print("Error occured: ", e)
# mjlee from pprint import pprint import boto3, sys, botocore, re vSrcProfileName = 'l-ellotte-dev' vTgtProfileName = 'l-ellotte-tst' vSrcRegionName = 'ap-northeast-2' vTgtRegionName = 'ap-northeast-2' session = boto3.Session(profile_name=vSrcProfileName, region_name=vSrcRegionName) dynamodb = session.client('dynamodb', region_name=vSrcRegionName) session2 = boto3.Session(profile_name=vTgtProfileName, region_name=vTgtRegionName) dynamodb2 = session2.client('dynamodb', region_name=vTgtRegionName) # 테이블 이름에 postfix를 붙이고 싶을 때 사용한다. tblNmPostFix = "" try: word = sys.argv[1] print("Table Replication Start : " + word + "%") except IndexError: word = "ALL" print("Table Replication Start : ALL MODE") vYesNo = input("Do you want to start replication(Y/[N])?") if vYesNo is None or vYesNo == "N" or vYesNo == "n" or vYesNo == "": print("Good Bye")
# coding: utf-8 import boto3 session = boto3.Session(profile_name="autopython") s3 = session.resource('s3')
def cluster_creation(emr_region: str, cluster_name: str, release_label: str, log_uri: str, step_name: str, step_arg: str, job_flow_role: str = 'EMR_EC2_DefaultRole', service_role: str = 'EMR_DefaultRole', scaledown_behavior: str = 'TERMINATE_AT_TASK_COMPLETION' ) -> None: """ :param emr_region: :param cluster_name: :param release_label: :param log_uri: :param step_name: :param step_arg: :param job_flow_role: :param service_role: :param scaledown_behavior: :return: """ # func init dict_path = ConfigPath().PATH session = boto3.Session(region_name=emr_region) emr = session.client(service_name='emr') emr_para = { 'Name': cluster_name, 'ReleaseLabel': release_label, 'LogUri': log_uri, 'JobFlowRole': job_flow_role, 'ServiceRole': service_role, 'ScaleDownBehavior': scaledown_behavior } # load jsons for key, path in dict_path.items(): with open(path) as f: config = json.load(f) emr_para.update({key: config}) # add step emr_para.update( { 'Steps': [ { 'Name': 'Setup hadoop debugging', 'ActionOnFailure': 'TERMINATE_CLUSTER', 'HadoopJarStep': { 'Jar': 'command-runner.jar', 'Args': ['state-pusher-script'] } }, { 'Name': step_name, 'ActionOnFailure': 'TERMINATE_CLUSTER', 'HadoopJarStep': { 'Jar': 'command-runner.jar', 'Args': step_arg.split(' ') } } ] } ) response = emr.run_job_flow(**emr_para) print(response)
accounts = [{ "name": "pdffiller", "region": "us-east-1", "env": "prod" # "access_key_id": "", # "secret_access_key": "" }] with open('instances.csv', 'w', newline='') as csvfile: fieldnames = ['name', 'AZ', 'ID'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for account in accounts: session = boto3.Session(profile_name='pdf-dev') ec2 = session.resource('ec2', region_name=account['region']) cw = session.client('cloudwatch', region_name=account['region']) count = 0 for instance in ec2.instances.filter(): count += 1 print('Handle instance ', count, ' id ', instance.id) data = { "name": "", "AZ": instance.placement["AvailabilityZone"], "ID": instance.id, }
print('TAG_VALUE is now: '+tag) state['nodes'] = {'N':str(int(state.get('nodes').get('N'))+1)} # Update the JSON with the changed IP for the server print (state[tag]) state[tag] = {'S':ip} state['changed'] = {'S':tag} print (state) return [tag, state] if __name__ == "__main__": # initialise needed variables session = boto3.Session(profile_name='terraform') client = session.client('dynamodb') tablename = 'consul-state' # get the AWS values needed to lookup the relevant state and ASG data valueList = getAWSValues() LOCAL_IP = valueList[0] INSTANCE_ID = valueList[1] TAG_VALUE = valueList[2] vmaxInstances = valueList[3] instanceList = valueList[4] region = valueList[5] # get the current details from the DynamoDB table data = getStateFile(client, vmaxInstances, TAG_VALUE, tablename)
if __name__ == "__main__": SECURITY_GROUP = os.environ.get("SECURITY_GROUP", "") AMI = os.environ.get("AMI", "") ELASTIC_IP = os.environ.get("ELASTIC_IP", "") INSTANCE_SIZE = os.environ.get("INSTANCE_SIZE", "") JENKINS_URL = os.environ.get("JENKINS_URL", "") AWS_CREDENTIALS_URL = os.environ.get("AWS_CREDENTIALS_URL", "") SPOT_PRICE = float(os.environ.get("SPOT_PRICE", "0.0")) r = requests.get(AWS_CREDENTIALS_URL) creds = json.loads(r.text) AWS_KEY_ID = creds["AccessKeyId"] AWS_KEY = creds["SecretAccessKey"] AWS_SESSION_TOKEN = creds["Token"] session = boto3.Session(aws_access_key_id=AWS_KEY_ID, aws_secret_access_key=AWS_KEY, aws_session_token=AWS_SESSION_TOKEN, region_name="us-east-2") rs = session.resource('ec2') cl = session.client('ec2') parser = argparse.ArgumentParser( "Spawns instances and checks for instance statuses.") parser.add_argument("--spawn-instances", dest="instance_spawner", action="store_true", default=False) parser.add_argument("--manage-instances", dest="instance_manager", action="store_true", default=False) parser.add_argument("--dry-run",
def collect(arguments): logging.getLogger('botocore').setLevel(logging.WARN) account_dir = './{}'.format(arguments.account_name) summary = [] if arguments.clean and os.path.exists( "account-data/{}".format(account_dir)): rmtree("account-data/{}".format(account_dir)) make_directory("account-data") make_directory("account-data/{}".format(account_dir)) default_region = os.environ.get('AWS_REGION', 'us-east-1') session_data = {'region_name': default_region} if arguments.profile_name: session_data['profile_name'] = arguments.profile_name session = boto3.Session(**session_data) sts = session.client('sts') try: sts.get_caller_identity() except ClientError as e: if 'InvalidClientTokenId' in str(e): print( "ERROR: sts.get_caller_identity failed with InvalidClientTokenId. Likely cause is no AWS credentials are set.", flush=True) exit(-1) else: print( "ERROR: Unknown exception when trying to call sts.get_caller_identity: {}" .format(e), flush=True) exit(-1) # Ensure we can make iam calls iam = session.client('iam') try: iam.get_user(UserName='******') except ClientError as e: if 'InvalidClientTokenId' in str(e): print( "ERROR: AWS doesn't allow you to make IAM calls from a session without MFA, and the collect command gathers IAM data. Please use MFA or don't use a session. With aws-vault, specify `--no-session` on your `exec`.", flush=True) exit(-1) if 'NoSuchEntity' in str(e): # Ignore, we're just testing that our creds work pass else: print("ERROR: Ensure your creds are valid.", flush=True) print(e, flush=True) exit(-1) except NoCredentialsError: print("ERROR: No AWS credentials configured.", flush=True) exit(-1) print("* Getting region names", flush=True) ec2 = session.client('ec2') region_list = ec2.describe_regions() with open("account-data/{}/describe-regions.json".format(account_dir), 'w+') as f: f.write(json.dumps(region_list, indent=4, sort_keys=True)) print("* Creating directory for each region name", flush=True) for region in region_list['Regions']: make_directory('account-data/{}/{}'.format( account_dir, region.get('RegionName', 'Unknown'))) # Services that will only be queried in the default_ # TODO: Identify these from boto universal_services = [ 'sts', 'iam', 'route53', 'route53domains', 's3', 's3control', 'cloudfront' ] with open("collect_commands.yaml", 'r') as f: collect_commands = yaml.safe_load(f) for runner in collect_commands: print('* Getting {}:{} info'.format(runner['Service'], runner['Request']), flush=True) parameters = {} for region in region_list['Regions']: dynamic_parameter = None # Only call universal services in default region if runner['Service'] in universal_services: if region['RegionName'] != default_region: continue elif region['RegionName'] not in session.get_available_regions( runner['Service']): print( ' Skipping region {}, as {} does not exist there'.format( region['RegionName'], runner['Service'])) continue handler = session.client(runner['Service'], region_name=region['RegionName']) filepath = "account-data/{}/{}/{}-{}".format( account_dir, region['RegionName'], runner['Service'], runner['Request']) method_to_call = snakecase(runner["Request"]) # Identify any parameters if runner.get('Parameters', False): for parameter in runner['Parameters']: parameters[parameter['Name']] = parameter['Value'] # Look for any dynamic values (ones that jq parse a file) if '|' in parameter['Value']: dynamic_parameter = parameter['Name'] if dynamic_parameter is not None: # Set up directory for the dynamic value make_directory(filepath) # The dynamic parameter must always be the first value parameter_file = parameters[dynamic_parameter].split('|')[0] parameter_file = "account-data/{}/{}/{}".format( account_dir, region['RegionName'], parameter_file) # Get array if a globbing pattern is used (ex. "*.json") parameter_files = glob.glob(parameter_file) for parameter_file in parameter_files: if not os.path.isfile(parameter_file): # The file where parameters are obtained from does not exist # Need to manually add the failure to our list of calls made as this failure # occurs before the call is attempted. call_summary = { 'service': handler.meta.service_model.service_name, 'action': method_to_call, 'parameters': parameters, 'exception': 'Parameter file does not exist: {}'.format( parameter_file) } summary.append(call_summary) print( " The file where parameters are obtained from does not exist: {}" .format(parameter_file), flush=True) continue with open(parameter_file, 'r') as f: parameter_values = json.load(f) pyjq_parse_string = '|'.join( parameters[dynamic_parameter].split('|')[1:]) for parameter in pyjq.all(pyjq_parse_string, parameter_values): filename = get_filename_from_parameter(parameter) identifier = get_identifier_from_parameter( parameter) call_parameters = dict(parameters) call_parameters[dynamic_parameter] = identifier outputfile = "{}/{}".format(filepath, filename) call_function(outputfile, handler, method_to_call, call_parameters, runner.get('Check', None), summary) else: filepath = filepath + ".json" call_function(filepath, handler, method_to_call, parameters, runner.get('Check', None), summary) # Print summary print( "--------------------------------------------------------------------") failures = [] for call_summary in summary: if 'exception' in call_summary: failures.append(call_summary) print("Summary: {} APIs called. {} errors".format(len(summary), len(failures))) if len(failures) > 0: print("Failures:") for call_summary in failures: print(" {}.{}({}): {}".format(call_summary['service'], call_summary['action'], call_summary['parameters'], call_summary['exception']))
import boto3 import click session = boto3.Session(profile_name='snapshotty') ec2 = session.resource('ec2') def filter_instances(project): instances = [] if project: filters = [{'Name':'tag:Project', 'Values':[project]}] instances = ec2.instances.filter(Filters=filters) else: instances = ec2.instances.all() return instances @click.group() def instances(): """Commands for instances""" @instances.command('list') @click.option('--project', default=None, help="Only instances fro project (tag Project:<name>)") def list_instances(project): "List EC2 instances" instances = filter_instances(project) for i in instances: tags = { t['Key']: t['Value'] for t in i.tags or [] } print(', '.join(( i.id,
if TYPE_CHECKING: from mypy_boto3_macie2 import Macie2Client from mypy_boto3_macie2.type_defs import ListOrganizationAdminAccountsResponseTypeDef from mypy_boto3_organizations import OrganizationsClient from mypy_boto3_sns import SNSClient # Setup Default Logger LOGGER = logging.getLogger("sra") # Global variables SERVICE_NAME = "macie.amazonaws.com" SLEEP_SECONDS = 20 UNEXPECTED = "Unexpected!" try: MANAGEMENT_ACCOUNT_SESSION = boto3.Session() ORG_CLIENT: OrganizationsClient = MANAGEMENT_ACCOUNT_SESSION.client("organizations") except Exception: LOGGER.exception(UNEXPECTED) raise ValueError("Unexpected error executing Lambda function. Review CloudWatch logs for details.") from None def enable_admin_account(admin_account_id: str, response: ListOrganizationAdminAccountsResponseTypeDef) -> bool: """Enable admin account. Args: admin_account_id: Admin Account ID response: ListOrganizationAdminAccountsResponseTypeDef Returns: True or False
import boto3, argparse # Input parameters parser = argparse.ArgumentParser() parser.add_argument("-file_path", "--file_path", help="path off the file to push to s3") parser.add_argument("-bucket_name", "--bucket_name", help="name of s3 bucket") parser.add_argument("-aws_profile", "--aws_profile", help="profile name of the aws configuration / credentials") args = parser.parse_args() file_path = args.file_path bucket_name = args.bucket_name aws_profile = args.aws_profile # Create session and connect to s3 resource session = boto3.Session(profile_name=aws_profile) s3 = session.resource('s3') # Upload the file data = open(file_path, 'rb') file_name = os.path.basename(file_path) s3.Bucket(bucket_name).put_object(Key=file_name, Body=data)
import boto3 from botocore.exceptions import ClientError from lambda_python_powertools.logging import ( MetricUnit, log_metric, logger_inject_process_booking_sfn, logger_setup, ) from lambda_python_powertools.tracing import Tracer logger = logger_setup() tracer = Tracer() session = boto3.Session() dynamodb = session.resource("dynamodb") table_name = os.getenv("BOOKING_TABLE_NAME", "undefined") table = dynamodb.Table(table_name) _cold_start = True class BookingConfirmationException(Exception): def __init__(self, message=None, status_code=None, details=None): super(BookingConfirmationException, self).__init__() self.message = message or "Booking confirmation failed" self.status_code = status_code or 500 self.details = details or {}
# # # # # # # role=get_execution_role() # # client = boto3.client( # 's3', # aws_access_key_id='AKIAIZKEEZRAOYZAJWPQ', # aws_secret_access_key='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', # region_name='us-west-1' # ) session = boto3.Session( aws_access_key_id='AKIAIZKEEZRAOYZAJWPQ', aws_secret_access_key='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', region_name='us-west-2') s3 = session.resource('s3') role = "role" print(role) region_name = session print(region_name) containers = { 'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/blazingtext:latest', 'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/blazingtext:latest', 'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/blazingtext:latest',
def write_to_s3(fobj, bucket, key): return boto3.Session().resource("s3").Bucket(bucket).Object( key).upload_fileobj(fobj)
def sagemaker_local_session(region): return LocalSession(boto_session=boto3.Session(region_name=region))
import collections import datetime import sys import boto3 s = boto3.Session(profile_name='xxxxxxxxxxx') ec = s.client('ec2', region_name='xxxxxxxxxxxxxx') d = datetime.today() - timedelta(days=days_to_subtract) def delete_ami(ami_id): ec.deregister_image(ImageId=image["ImageId"]) return "Deleted AMI" def deleted_snapshot(Snapshot_id): ec.delete_snapshot(SnapshotId=id) return "Deleted Snapshot" images = ec.describe_images( Filters=[ { 'Name':'description', 'Values': ['* ' +'2020-05-11'] }, { 'Name': 'tag:backup', 'Values': ['dailyami'] } ] )["Images"] count = 0 im = []
def subscribe(config, accounts, region, merge, debug): """subscribe accounts log groups to target account log group destination""" config = validate.callback(config) subscription = config.get('subscription') if subscription is None: log.error("config file: logs subscription missing") sys.exit(1) def converge_destination_policy(client, config): destination_name = subscription['destination-arn'].rsplit(':', 1)[-1] try: extant_destinations = client.describe_destinations( DestinationNamePrefix=destination_name).get('destinations') except ClientError: log.error("Log group destination not found: %s", subscription['destination-arn']) sys.exit(1) account_ids = set() for a in accounts: if isinstance(a['role'], list): account_ids.add(a['role'][-1].split(':')[4]) else: account_ids.add(a['role'].split(':')[4]) if merge: for d in extant_destinations: if d['destinationName'] == destination_name: for s in json.loads(d['accessPolicy']): if s['Sid'] == 'CrossAccountDelivery': account_ids.update(s['Principal']['AWS']) client.put_destination_policy(destinationName=destination_name, accessPolicy=json.dumps({ 'Statement': [{ 'Action': 'logs:PutSubscriptionFilter', 'Effect': 'Allow', 'Principal': { 'AWS': list(account_ids) }, 'Resource': subscription['destination-arn'], 'Sid': 'CrossAccountDelivery' }] })) def subscribe_account(t_account, subscription, region): session = get_session(t_account['role'], region) client = session.client('logs') distribution = subscription.get('distribution', 'ByLogStream') for g in account.get('groups'): if (g.endswith('*')): g = g.replace('*', '') paginator = client.get_paginator('describe_log_groups') allLogGroups = paginator.paginate( logGroupNamePrefix=g).build_full_result() for l in allLogGroups: _process_subscribe_group(client, l['logGroupName'], subscription, distribution) else: _process_subscribe_group(client, g, subscription, distribution) if subscription.get('managed-policy'): if subscription.get('destination-role'): session = get_session(subscription['destination-role'], region) else: session = boto3.Session() converge_destination_policy(session.client('logs'), config) executor = debug and MainThreadExecutor or ThreadPoolExecutor with executor(max_workers=32) as w: futures = {} for account in config.get('accounts', ()): if accounts and account['name'] not in accounts: continue futures[w.submit(subscribe_account, account, subscription, region)] = account for f in as_completed(futures): account = futures[f] if f.exception(): log.error("Error on account %s err: %s", account['name'], f.exception()) log.info("Completed %s", account['name'])
import json import time import uuid import sys from docopt import docopt from pprint import pprint import botocore ## LOOK Here!!! you will need to change this # MEDIALIVE_ARN = 'arn:aws:iam::1234567890:role/AllowMediaLiveAccessRole' # leah MEDIALIVE_ARN = 'arn:aws:iam::1234567890:role/MediaLiveAccessRole' # techmkt S3_BUCKET = 'nab2018-catfinder5001' CHANNEL_NAME = 'nab2018-catfinder5001' REGION = 'us-east-1' session = boto3.Session( # profile_name='ibc' ) medialive = session.client( 'medialive', region_name=REGION, ) mediapackage = session.client( 'mediapackage', region_name=REGION, ) ssm = session.client( 'ssm', region_name=REGION, )