Example #1
0
def get_token():
	boto3.setup_default_session(profile_name='default')
	client = boto3.client('s3')
	transfer = boto3.s3.transfer.S3Transfer(client)
	transfer.download_file('mint_bucket', "ro2key/client.json", "./client.json")
	transfer.download_file('mint_bucket', 'ro2key/user.json', './user.json')


	with open('./user.json', 'r') as f:
	    r = f.read()
	    jsonFile = json.loads(r)
	    application_username = jsonFile['application_username']
	    application_password = jsonFile['application_password']

	with open('./client.json', 'r') as f:
		r = f.read()
		jsonFile = json.loads(r)
		client_id = jsonFile['client_id']
		client_secret = jsonFile['client_secret']    



	data = {"grant_type": "password", "username": application_username, "password": application_password, "scope": encoded_scopes}
	url = 'https://auth.example.org/oauth2?realm=services'
	req = requests.post(url, data=data, auth=(client_id, client_secret))

	response = json.loads(req.text)
	return response['access_token']
Example #2
0
def cli(state, profile, region, access_key, secret_key):
    state.profile = profile
    state.region = region
    boto3.setup_default_session(profile_name=profile,
                                region_name=region,
                                aws_access_key_id=access_key,
                                aws_secret_access_key=secret_key)
Example #3
0
def clean_boto_configuration(monkeypatch):
    monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'AK000000000000000000')
    monkeypatch.setenv('AWS_SECRET_ACCESS_KEY',
                       '0000000000000000000000000000000000000000')
    monkeypatch.setenv('AWS_DEFAULT_REGION', 'us-east-1')
    # Reset previously created default session (for credentials)
    boto3.setup_default_session()
Example #4
0
def get_service(hass, config, discovery_info=None):
    """Get the AWS Lambda notification service."""
    context_str = json.dumps({'hass': hass.config.as_dict(),
                              'custom': config[CONF_CONTEXT]}, cls=JSONEncoder)
    context_b64 = base64.b64encode(context_str.encode('utf-8'))
    context = context_b64.decode('utf-8')

    # pylint: disable=import-error
    import boto3

    aws_config = config.copy()

    del aws_config[CONF_PLATFORM]
    del aws_config[CONF_NAME]
    del aws_config[CONF_CONTEXT]

    profile = aws_config.get(CONF_PROFILE_NAME)

    if profile is not None:
        boto3.setup_default_session(profile_name=profile)
        del aws_config[CONF_PROFILE_NAME]

    lambda_client = boto3.client("lambda", **aws_config)

    return AWSLambda(lambda_client, context)
    def __init__(
        self,
        filter_addresses,
        enable_standby_mode=True,
        log_level="INFO",
        log_format="json",
        log_file=None,
        force=False,
    ):
        """
        Default constructor.
        """
        self.setup_logging(log_level=log_level, log_format=log_format, log_file=log_file)

        self.filter_addresses = filter_addresses
        self.enable_standby_mode = enable_standby_mode
        self.force = force

        self.instance_metadata = self.get_instance_metadata()
        self.instance_id = self.instance_metadata.get("instance-id")
        # Collect details about this instance
        vpc_id = self.instance_metadata["network"]["interfaces"]["macs"].values()[0]["vpc-id"]
        region = self.instance_metadata["placement"]["availability-zone"][:-1]
        # Setup boto3
        boto3.setup_default_session(region_name=region)
        # Enable required connections
        self.ec2_connection = boto.ec2.connect_to_region(region)
        if self.enable_standby_mode:
            self.asg_connection = boto.ec2.autoscale.connect_to_region(region)

        if self.ec2_connection is None:
            self.logger.critical("Critical error getting EC2 conection...exiting")
            self.safe_exit(1)
def runit(profile,rn,rp):
    if profile:
        boto3.setup_default_session(profile_name='cjk1')
    client = boto3.client('iam')

    trust_role = {
      "Version": "2012-10-17",
      "Statement": [
        {
          "Sid": "",
          "Effect": "Allow",
          "Principal": {
            "Service": "lambda.amazonaws.com"
          },
          "Action": "sts:AssumeRole"
        }
      ]
    }
    
    try:
        response = client.create_role(RoleName=rn,AssumeRolePolicyDocument=json.dumps(trust_role))
        print(response['Role']['Arn'])
        print("Success: done creating role {}".format(rn))
    except botocore.exceptions.ClientError as e:
        print("Error: {0}".format(e))
    
    try:
        with open('policy.json') as json_data:
            response = client.put_role_policy(RoleName=rn,PolicyName=rp,
                PolicyDocument=json.dumps(json.load(json_data))
            )
            print("Success: done adding inline policy to role {}".format(rp))
    except botocore.exceptions.ClientError as e:
        print("Error: {0}".format(e))
    def main(args, evpc=None):
        """
        For every AWS profile + region, dump every VPC to STDOUT.
        :param args: The parsed arguments and flags from the CLI.
        :returns: None
        """
        sessions = get_all_sessions()

        # We assume all sessions have the same list of regions.
        # This might not always be true. This is an optimization.
        regions = get_region_names(sessions[0])

        vpcs = {}

        for session in sessions:
            boto3.setup_default_session(profile_name=session.profile)
            vpcs[session.profile] = {}
            for region_name in regions:
                if region_name not in vpcs[session.profile]:
                    vpcs[session.profile][region_name] = {}
                ec2 = boto3.resource('ec2', region_name=region_name)
                for vpc in ec2.vpcs.all():
                    vpc_tags = make_tag_dict(vpc)
                    vpc_name = vpc_tags.get('Name', vpc.id)
                    vpcs[session.profile][region_name][vpc_name] = vpc.id

        print(output_formatter(vpcs, args.output_format))
Example #8
0
def _region(name):
    session = boto3.DEFAULT_SESSION
    boto3.setup_default_session(region_name=name)
    try:
        yield
    finally:
        boto3.DEFAULT_SESSION = session
Example #9
0
    def test_create_default_session(self):
        session = self.Session.return_value

        boto3.setup_default_session()

        self.assertEqual(boto3.DEFAULT_SESSION, session,
            'Default session not created properly')
Example #10
0
def handler(event, context):
    entry = time.time() * 1000
    logger = logging.getLogger()
    if context:
        logger.info('sns::handler: context: {}'.format(context))
        if event:
            logger.info('sns::handler: event: {}'.format(event))
    else: #calling from main (testing)
        boto3.setup_default_session(profile_name='cjk1')
    sns = boto3.client('sns')
    arn = None
    subj = 'test'
    msg = 'hello world'
    if event:
        if 'topic' in event:
            arn = event['topic']
        if 'subject' in event:
            subj = event['subject']
        if 'msg' in event:
            msg = event['msg']

    if arn:
        sns.publish(
            TopicArn=arn,
            Subject=subj,
            Message=msg
        )

    delta = (time.time() * 1000) - entry
    me_str = 'TIMER:CALL:{}'.format(delta)
    logger.warn(me_str)
    return me_str
Example #11
0
def use_profile(aws_profile=Constants.AWS_PROFILE_DEFAULT):
	try:
		boto3.setup_default_session(profile_name=aws_profile)
	except:		
		# boto3.setup_default_session(profile_name=Constants.AWS_PROFILE_DEFAULT)
		return False
	return True
Example #12
0
def get_data(args: dict) -> list:
    """
    Args:
        args (dict):

    Returns:
        list: asdf

    Raises:
        ValueError: blah
    """
    method = get_option('method', args)
    """str: Method
    """

    if method == 'json_file':
        in_file = get_option('file', args)
        with open(in_file) as f:
            raw_data = json.load(f)
    elif method == 'live':
        aws_prof = get_option('aws', args)
        boto3.setup_default_session(profile_name=aws_prof)
        client = boto3.client('ec2')
        raw_data = client.describe_instances()
    else:
        raise ValueError("Invalid get_data method")

    # All data is inside the "Reservations" key
    return raw_data['Reservations']
Example #13
0
    def test_create_default_session_with_args(self):
        boto3.setup_default_session(
            aws_access_key_id='key',
            aws_secret_access_key='secret')

        self.Session.assert_called_with(
            aws_access_key_id='key',
            aws_secret_access_key='secret')
Example #14
0
 def _refresh_session(self):
     """
     Update boto3's default session by creating a new session based on values set in the context. Some properties of
     the Boto3's session object are read-only. Therefore when Click parses new AWS session related properties (like
     region & profile), it will call this method to create a new session with latest values for these properties.
     """
     boto3.setup_default_session(region_name=self._aws_region,
                                 profile_name=self._aws_profile)
Example #15
0
 def __init__(self, instance_id, profile=None):
     # Basic attributes #
     self.instance_id  = instance_id
     self.profile      = profile
     # If we want a different profile set it in boto3 #
     if self.profile: boto3.setup_default_session(profile_name=self.profile)
     # Make the object we will use for queries #
     self.ec2 = boto3.client('ec2')
Example #16
0
 def __init__(self, subject, email_from, email_to, credentials):
     Email.__init__(self, subject, email_from, email_to)
     boto3.setup_default_session(
         aws_access_key_id=credentials['AWS_ID'],
         aws_secret_access_key=credentials['AWS_PASS'],
         region_name=credentials['region']
     )
     self.client = boto3.client('ses')
def invokeCLI(event,context,logger):
    #hard code it in case its not available in the context for some reason
    me = 'unknown'
    reqID = 'unknown'
    if not context: #invoking from main
        boto3.setup_default_session(profile_name='cjk1')
    else:
        me = context.invoked_function_arn
        reqID = context.aws_request_id
    lambda_client = boto3.client('lambda')

    fn = None
    if event:
        if 'functionName' in event:
            fn = event['functionName']

    #run_lambda does not support invoke via Payload arg
    invoke_response = None
    if fn and fn != me:
        now = time.time() * 1000
        msg = {}
        msg['msg'] = 'from {} at {}'.format(me,now)
        msg['requestId'] = reqID
        if event and 'eventSource' in event and me == 'unknown': 
            msg['eventSource'] = event['eventSource']
        else:
            msg['eventSource'] = 'int:invokeCLI:{}'.format(me)

	#do not set functionName here as you risk getting into an infinite loop!
        payload=json.dumps(msg)
        logger.warn('SpotTemplatePy::handler sending payload to {}: {}'.format(fn,msg))

        invoke_response = lambda_client.invoke(FunctionName=fn,
            InvocationType='Event', Payload=payload) #Event type says invoke asynchronously
        nowtmp = time.time() * 1000
        delta = nowtmp-now
        ms = int(round(delta))
        me_str = 'TIMER:CALL:{}'.format(ms)
    else:
        me_str = 'no_functionName_or_recursive:fn:{}'.format(fn)
    
    if invoke_response:
        reqID = 'unknown'
        if 'ResponseMetadata' in invoke_response:
            meta = invoke_response['ResponseMetadata']
            if 'HTTPHeaders' in invoke_response['ResponseMetadata']:
                headers = meta['HTTPHeaders']
                if 'x-amzn-requestid' in headers:
                    reqID = headers['x-amzn-requestid']
                if 'x-amzn-trace-id' in headers:
                    reqID += ':{}'.format(headers['x-amzn-trace-id'])
        status = 'unknown'
        if 'StatusCode' in invoke_response:
            status = invoke_response['StatusCode']
        logger.warn('{} invoke_response: reqId:{} statusCode:{}'.format(me,reqID,status))

    logger.warn('SpotTemplatePy::handler_returning:{}'.format(me_str))
    return me_str
Example #18
0
    def __init__(self, session):

        # // Set AWS ENV vars in /etc/sysconfig/gmond

        self.session = session
        boto3.setup_default_session(profile_name=self.session)
        self.cw = boto3.client('cloudwatch')
        self.s3client = boto3.client('s3')
        self.now = datetime.datetime.now()
def test_find_instance_ids(mock_resource):
    """
    Test the find_instance_ids function
    """
    instance_id = 'abcdef-12345'
    instance = [MagicMock(id=instance_id)]
    boto3.setup_default_session(region_name='us-east-1')
    mock_resource.return_value = instance
    assert find_instance_ids('blah') == [instance_id]
 def _get_ec2_client(self, region_name=None):
         try:
             return boto3.client('ec2')
         except NoRegionError:
             if not region_name:
                 raise CreateEC2ClientNoRegionError()
             # next class instance could be created without region_name parameter
             boto3.setup_default_session(region_name=region_name)
             return self._get_ec2_client()
def set_up_mock_queue(type):
    boto3.setup_default_session(region_name='eu-west-1')
    conn = boto3.resource('sqs', region_name='eu-west-1')
    if type == 'email':
        name = 'gov_uk_notify_email_queue'
    else:
        name = 'gov_uk_notify_sms_queue'
    q = conn.create_queue(QueueName=name)
    return q
Example #22
0
def fetch_hcp(subjects):
    """
    Fetch HCP diffusion data.

    Parameters
    ----------
    subjects : list
       Each item is an integer, identifying one of the HCP subjects

    Returns
    -------
    dict with remote and local names of thes files.

    Notes
    -----
    To use this function, you need to have a file '~/.aws/credentials', that
    includes a section:

    [hcp]
    AWS_ACCESS_KEY_ID=XXXXXXXXXXXXXXXX
    AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXXXXXX

    The keys are credentials that you can get from HCP (see https://wiki.humanconnectome.org/display/PublicData/How+To+Connect+to+Connectome+Data+via+AWS)  # noqa

    Local filenames are changed to match our expected conventions.
    """
    boto3.setup_default_session(profile_name='hcp')
    s3 = boto3.resource('s3')
    bucket = s3.Bucket('hcp-openaccess')
    base_dir = op.join(afq_home, "HCP")
    if not os.path.exists(base_dir):
        os.mkdir(base_dir)
    data_files = {}
    for subject in subjects:
        sub_dir = op.join(base_dir, '%s' % subject)
        if not os.path.exists(op.join(base_dir, sub_dir)):
            os.mkdir(sub_dir)
            os.mkdir(os.path.join(sub_dir, 'sess'))
            os.mkdir(os.path.join(sub_dir, 'sess', 'dwi'))
            os.mkdir(os.path.join(sub_dir, 'sess', 'anat'))
        data_files[op.join(sub_dir, 'sess', 'dwi', 'dwi.bvals')] =\
            'HCP/%s/T1w/Diffusion/bvals' % subject
        data_files[op.join(sub_dir, 'sess', 'dwi', 'dwi.bvecs')] =\
            'HCP/%s/T1w/Diffusion/bvecs' % subject
        data_files[op.join(sub_dir, 'sess', 'dwi', 'dwi.nii.gz')] =\
            'HCP/%s/T1w/Diffusion/data.nii.gz' % subject
        data_files[op.join(sub_dir, 'sess', 'anat', 'T1w_acpc_dc.nii.gz')] =\
            'HCP/%s/T1w/T1w_acpc_dc.nii.gz' % subject
        data_files[op.join(sub_dir, 'sess', 'anat', 'aparc+aseg.nii.gz')] =\
            'HCP/%s/T1w/aparc+aseg.nii.gz' % subject

    for k in data_files.keys():
        if not op.exists(k):
            bucket.download_file(data_files[k], k)

    return data_files
Example #23
0
    def use_as_global(self):
        """
        Set this object to use its current credentials as the global boto3 settings.
        If a role has been assumed, the assumed credentials will be used.
        If a role is set but has not been assumed, the base credentials will be used.
        WARNING: This will affect all calls made to boto3.

        :return awsauthhelper.Credentials:
        """
        boto3.setup_default_session(**self._build_kwargs())
Example #24
0
def lambda_handler(event, context):
    # Default variables

    start_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M.%S")
    try:
        if env == 'local':
            print('BEGIN - LAMBDA - dynabodb')
            print('BEGIN - START TIME - {0}'.format(start_time))
            print('Running locally')
            boto3.setup_default_session(profile_name='pacheco')
    except NameError:
        print('BEGIN - LAMBDA - dynabodb')
        print('BEGIN - START TIME - {0}'.format(start_time))
        print('Running AWS')

    # Create the resource
    dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
    # Get the client from the resource
    #dynamodb = dynamodb_resource.meta.client


    table = dynamodb.Table('myapps')
    try:
        response = table.get_item(
            Key={
                'id': '02',
                'name': 'Jeff Lee'
            }
        )
    except:
        error={'ErrorID': '-1', 'Error':'Key NOT found'}
        print(error)
        exit(error)
    #botocore.exceptions.ClientError: An error occurred (ValidationException)

    #response = table.query(
    #KeyConditionExpression=Key('id').eq('01')
    #)
    try:
        item = response['Item']
        print(item)
    except KeyError:
        error={'ErrorID': '-2', 'Error':'No Item Returned'}
        print(error)
        exit(error)
    #items = response['Items']
    #print(items)

    # Log the end of the Lambda function
    end_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M.%S")
    print('END - END TIME - {0}'.format(end_time))
    print('END - LAMBDA - dynabodb')

    return item
Example #25
0
def setUpModule():
    # Setup Fake AWS Keys (this takes precedence over config file)
    os.environ['AWS_ACCESS_KEY_ID'] = 'AK000000000000000000'
    os.environ['AWS_SECRET_ACCESS_KEY'] = (
        '0000000000000000000000000000000000000000')
    os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'

    # Reset previously created default session (for credentials)
    boto3.setup_default_session()

    httpretty.HTTPretty.allow_net_connect = False
Example #26
0
    def send_sqs(self):
        if not boto_loaded:
            raise ImportError("boto3 not loaded")

        boto3.setup_default_session(region_name=self._sqs_region)
        sqs = boto3.resource('sqs')
        if (self._sqs_aws_account_id != None):
            queue = sqs.get_queue_by_name(QueueName=self._sqs_queue_name,
                    QueueOwnerAWSAccountId=self._sqs_aws_account_id)
        else:
            queue = sqs.get_queue_by_name(QueueName=self._sqs_queue_name)
        response = queue.send_message(MessageBody=json.dumps(self._sendlog))
 def testEC2_create(self):
     para = {"region_name": "us-west-1"}
     boto3.setup_default_session(**para)
     ec2_s = boto3.resource('ec2')
     class_name = ec2_s.__class__
     print(class_name)
     file = inspect.getfile(class_name)
     print(file)
     methods = [getattr(ec2_s, method) for method in dir(ec2_s) if callable(getattr(ec2_s, method)) and "__" not in str(method)]
     for method in methods:
         print(method, " args: ", inspect.getargspec(method))
     print(ec2_s.Subnet)
Example #28
0
def main(region, profile='default'):
    project_details = json.load(open('project.json'))
    boto3.setup_default_session(
        profile_name=profile,
        region_name=region
    )
    client = boto3.client('apigateway', region_name=region)
    raml = ramlfications.parse('api_schema.raml')
    api_name = raml.title
    api_gateway = get_api_by_name(client, api_name)
    if api_gateway is None:
        api_gateway = client.create_rest_api(name=api_name)
    aws_resources = client.get_resources(restApiId=api_gateway['id'])['items']
    root = grab_root_resource(aws_resources)
    resources = api.transform_resources(raml, raml.resources)
    # resources = parse_annotations(raml.resources)
    # resources = transform_resources(resources)
    resources = associate_resources(aws_resources, resources)
    aws_authorizers = client.get_authorizers(restApiId=api_gateway['id'])['items']  # NOQA
    authorizers = associate_authorizers(aws_authorizers, raml.security_schemes)
    create_authorizer = functools.partial(
        create_security_scheme,
        client,
        api_gateway['id'],
        project_details['name']
    )
    authorizers = map(create_authorizer, authorizers)

    for resource in resources:
        print 'Creating Resource'
        create_resource(
            client,
            api_gateway['id'],
            root['id'],
            resource,
            project_details['name'],
            authorizers
        )
    deployment = client.create_deployment(
        restApiId=api_gateway['id'],
        stageName=raml.base_uri
    )
    data = {
        'deployment': deployment['id'],
        'api': api_gateway['id'],
        'uri': 'https://{}.execute-api.{}.amazonaws.com/{}/'.format(
            api_gateway['id'],
            region,
            raml.base_uri
        )
    }
    print data
Example #29
0
def save_wm_mask(subject):
    s3 = boto3.resource('s3')
    boto3.setup_default_session(profile_name='cirrus')
    bucket = s3.Bucket('hcp-dki')
    path = '%s/%s_white_matter_mask.nii.gz' % (subject, subject)
    if not exists(path, bucket.name):
        bucket = setup_boto()
        with tempfile.TemporaryDirectory() as tempdir:
            try:
                dwi_file = op.join(tempdir, 'data.nii.gz')
                seg_file = op.join(tempdir, 'aparc+aseg.nii.gz')
                data_files = {}
                data_files[dwi_file] = \
                    'HCP_900/%s/T1w/Diffusion/data.nii.gz' % subject
                data_files[seg_file] = \
                    'HCP_900/%s/T1w/aparc+aseg.nii.gz' % subject
                for k in data_files.keys():
                    if not op.exists(k):
                        bucket.download_file(data_files[k], k)

                seg_img = nib.load(seg_file)
                dwi_img = nib.load(dwi_file)
                seg_data_orig = seg_img.get_data()
                # Corpus callosum labels:
                cc_mask = ((seg_data_orig == 251) |
                           (seg_data_orig == 252) |
                           (seg_data_orig == 253) |
                           (seg_data_orig == 254) |
                           (seg_data_orig == 255))

                # Cerebral white matter in both hemispheres + corpus callosum
                wm_mask = ((seg_data_orig == 41) | (seg_data_orig == 2) |
                           (cc_mask))
                dwi_data = dwi_img.get_data()
                resamp_wm = np.round(reg.resample(wm_mask, dwi_data[..., 0],
                                     seg_img.affine,
                                     dwi_img.affine)).astype(int)
                wm_file = op.join(tempdir, 'wm.nii.gz')
                nib.save(nib.Nifti1Image(resamp_wm.astype(int),
                                         dwi_img.affine),
                         wm_file)
                boto3.setup_default_session(profile_name='cirrus')
                s3 = boto3.resource('s3')
                s3.meta.client.upload_file(
                        wm_file,
                        'hcp-dki',
                        path)
                return subject, True
            except Exception as err:
                return subject, err.args
    else:
        return subject, True
Example #30
0
def create_boto3_default_session(roleARN):
    """
    Invoke STS to Assume the given role and create a default boto3 session
    :param roleARN: The IAM rolename to assume.  TrustedPolicy must include the following principals
                    ["lambda.amazonaws.com", "arn:aws:iam:<YOUR ACCOUNT ID>::user/<YOUR USER>"]
    """
    print("Going to assume role %s" % roleARN)
    stsClient = boto3.client("sts")
    creds = stsClient.assume_role(RoleArn=roleARN,RoleSessionName='emulambda',)
    print("Setting up the default session")
    boto3.setup_default_session(aws_access_key_id=creds['Credentials']['AccessKeyId'],
                                aws_secret_access_key=creds['Credentials']['SecretAccessKey'],
                                aws_session_token=creds['Credentials']['SessionToken'])
import boto3
import aws_region
import aws_profile
import threading
import sys
import time
import csv
from common import continue_prompt

sys.stdout.flush()
_table_name = "Aws_RscTag_Tbl"

# Get the service resource
boto3.setup_default_session(profile_name=aws_profile.get())
dynamodb = boto3.resource("dynamodb", region_name=aws_region.get())


def create():
    global _table_name
    global dynamodb
    global _continue_printing

    if _table_exists():
        print("Table already exists. Delete the table and try again")
        continue_prompt()
        return
    print(f"Creating {_table_name} table")
    # Create the DynamoDB table.
    table = dynamodb.create_table(
        TableName=_table_name,
        KeySchema=[
Example #32
0
load_dotenv(dotenv_path, override=True)

AWS_PROFILE = env("AWS_PROFILE")
AWS_DEFAULT_REGION = env("AWS_DEFAULT_REGION", "us-east-1")
STACK_NAME = env("STACK_NAME")
OC_CLUSTER_NAME = env("OC_CLUSTER_NAME")
PROD_IDENTIFIER = "prod"
NONINTERACTIVE = env("NONINTERACTIVE")
INGEST_ALLOWED_IPS = env("INGEST_ALLOWED_IPS", "")

LAMBDA_CODE_BUCKET = env("LAMBDA_CODE_BUCKET")
LAMBDA_CODE_URI = f"s3://{LAMBDA_CODE_BUCKET}/{STACK_NAME}"
RECORDINGS_URI = f"s3://{STACK_NAME}-{names.RECORDINGS_BUCKET}"

if AWS_PROFILE is not None:
    boto3.setup_default_session(profile_name=AWS_PROFILE)


@task
def production_failsafe(ctx):
    """
    This is not a standalone task and should not be added to any of the task
    collections. It is meant to be prepended to the execution of other tasks
    to force a confirmation when a task is being executed that could have an
    impact on a production stack
    """
    if not STACK_NAME:
        raise Exit("No STACK_NAME specified")

    if not NONINTERACTIVE and PROD_IDENTIFIER in STACK_NAME.lower():
        print("You are about to run this task on a production system")
Example #33
0
def setup():
    '''create client'''
    boto3.setup_default_session(aws_access_key_id=ACCESS_KEY,
                                aws_secret_access_key=SECRET_KEY,
                                region_name=AWS_REGION)
    return boto3.client('ec2'), boto3.client('elbv2')
import boto3

boto3.setup_default_session(profile_name='ocp-at')
client = boto3.client('logs')


def delete_log_group(logGroupNamePrefix: str):
    paginator = client.get_paginator('describe_log_groups')
    for page in paginator.paginate(logGroupNamePrefix=logGroupNamePrefix):
        for group in page['logGroups']:
            client.delete_log_group(logGroupName=group['logGroupName'])


def test_delete_lambda_loggroups():
    delete_log_group('describe_log_groups')


def test_delete_api_gateway_execution_logs():
    delete_log_group('API-Gateway-Execution-Logs')
Example #35
0
def _boto_session():
    aws_profile = get_config().get('aws_profile')
    LOGGER.info('Using AWS profile {}'.format(aws_profile))
    boto3.setup_default_session(profile_name=aws_profile)
Example #36
0
            f'"ddb_record_mismatch_table": "{args.ddb_record_mismatch_table}", '
            f'"nino": "{nino}')
        dynamodb_record_mismatch_record(dynamo_table, dynamodb_data)

    for row in non_matches:
        logger.info(
            f'Attempting to format into DynamoDb structure", "record": "{row}')
        dynamodb_data = dynamodb_format(nino, take_home_pay, row["ire"],
                                        row["ldn"])

        logger.info(
            f'Recording mismatch record into DynamoDB",'
            f'"ddb_record_mismatch_table": "{args.ddb_record_mismatch_table}", '
            f'"nino": "{nino}')
        dynamodb_record_mismatch_record(dynamo_table, dynamodb_data)


if __name__ == "__main__":
    try:
        args = get_parameters()
        logger = setup_logging("INFO")

        boto3.setup_default_session(region_name=args.aws_region)
        logger.info(os.getcwd())
        json_content = json.loads(open("resources/event.json", "r").read())
        handler(json_content, None)
    except Exception as err:
        logger.error(
            f'Exception occurred for invocation", "error_message": "{err}')
        raise err
PREFIX = CONFIG.get('general', 'resource_prefix')

WINDOWS_AMI_ID = CONFIG.get('windows', 'windows2016.{}'.format(REGION))
INSTANCE_TYPE = CONFIG.get('windows', 'instance_type')
LINUX_AMI_ID = CONFIG.get('linux', 'ami')
LINUX_INSTANCE_TYPE = CONFIG.get('linux', 'instance_type')

SSM_DOC_NAME = PREFIX + 'delete-image'
CFN_STACK_NAME = PREFIX + 'delete-image'
TEST_CFN_STACK_NAME = PREFIX + 'delete-image'

logging.basicConfig(level=CONFIG.get('general', 'log_level').upper())
LOGGER = logging.getLogger(__name__)
logging.getLogger('botocore').setLevel(level=logging.WARNING)

boto3.setup_default_session(region_name=REGION)

iam_client = boto3.client('iam')
s3_client = boto3.client('s3')
sns_client = boto3.client('sns')
sts_client = boto3.client('sts')


def verify_role_created(role_arn):
    LOGGER.info("Verifying that role exists: " + role_arn)
    # For what ever reason assuming a role that got created too fast fails, so we just wait until we can.
    retry_count = 12
    while True:
        try:
            sts_client.assume_role(RoleArn=role_arn,
                                   RoleSessionName="checking_assume")
Example #38
0
import time
import boto3
import argparse

# Parse argument variables passed via the DeployModel processing step
parser = argparse.ArgumentParser()
parser.add_argument('--model-name', type=str)
parser.add_argument('--region', type=str)
parser.add_argument('--endpoint-instance-type', type=str)
parser.add_argument('--endpoint-name', type=str)
args = parser.parse_args()

region = args.region
boto3.setup_default_session(region_name=region)
sagemaker_boto_client = boto3.client('sagemaker')

#name truncated per sagameker length requirememnts (63 char max)
endpoint_config_name = f'{args.model_name[:56]}-config'
existing_configs = sagemaker_boto_client.list_endpoint_configs(
    NameContains=endpoint_config_name)['EndpointConfigs']

if not existing_configs:
    create_ep_config_response = sagemaker_boto_client.create_endpoint_config(
        EndpointConfigName=endpoint_config_name,
        ProductionVariants=[{
            'InstanceType': args.endpoint_instance_type,
            'InitialVariantWeight': 1,
            'InitialInstanceCount': 1,
            'ModelName': args.model_name,
            'VariantName': 'AllTraffic'
        }])
Example #39
0
def _bind_aws_session_to_chosen_profile(config):
    try:
        boto3.setup_default_session(profile_name=config.profile)
    except botocore.exceptions.ProfileNotFound:
        pass
Example #40
0
            print(x)
            print(len(x))
            break

            yield from x
            try:
                kwargs['NextToken'] = resp['NextToken']
            except KeyError as err:
                break
    except Exception as err:
        print(err)


if __name__ == '__main__':
    # Change the profile of the default session in code
    boto3.setup_default_session(profile_name='prod')
    database_name = 'internal_reporting_prod'
    table_name = 'dim_parsed_message'
    CONFIG = Config(retries=dict(max_attempts=10))

    GLUE = boto3.client('glue', config=CONFIG, region_name='eu-west-2')

    # yields one partition at a time
    for parts in get_partitions(database_name, table_name):
        print(parts)
        print(type((parts)))
        print(len((parts)))
        print(parts.keys())
        print(parts.values())
        break
Example #41
0
# -*- coding: utf-8 -*-
import boto3
import yaml
from mock import patch
from moto import mock_cloudformation, mock_ec2, mock_sts
from troposphere import Output

import ekscli
from ekscli.stack import ClusterInfo, NodeGroup, ControlPlane, KubeConfig
from ekscli.utils import Status

cluster_endpoint = 'https://test.sk1.us-east-1.eks.amazonaws.com'
cluster_ca = 'BASE64STR'
boto3.setup_default_session(region_name='us-east-1')


def skip_cluster(self, cp_sg, role_arn):
    self.tpl.add_output(Output(self.OUTPUT_CP_ENDPOINT,
                               Value=cluster_endpoint))
    self.tpl.add_output(Output(self.OUTPUT_CP_CA, Value=cluster_ca))


def skip_configmap(kubeconf, role):
    pass


@mock_cloudformation
@mock_ec2
@mock_sts
@patch.object(ekscli.stack.ControlPlane, '_create_eks_cluster_template',
              skip_cluster)
Example #42
0
    mock_dynamodb2,
    mock_lambda,
    mock_iam,
    mock_s3,
    mock_ec2,
    mock_sns,
    mock_logs,
    settings,
    mock_sqs,
)
from moto.sts.models import ACCOUNT_ID
from nose.tools import assert_raises
from botocore.exceptions import ClientError

_lambda_region = "us-west-2"
boto3.setup_default_session(region_name=_lambda_region)


def _process_lambda(func_str):
    zip_output = io.BytesIO()
    zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED)
    zip_file.writestr("lambda_function.py", func_str)
    zip_file.close()
    zip_output.seek(0)
    return zip_output.read()


def get_test_zip_file1():
    pfunc = """
def lambda_handler(event, context):
    return event
'''Filter instances based on tag name and value'''
ACCESS_KEY = '@@{cred_aws.username}@@'
SECRET_KEY = '@@{cred_aws.secret}@@'
AWS_REGION = '@@{clusters_geolocation}@@'
TAG_KEY = '@@{tag_name}@@'
TAG_VALUE = '@@{tag_value}@@'

import boto3

boto3.setup_default_session(aws_access_key_id=ACCESS_KEY,
                            aws_secret_access_key=SECRET_KEY,
                            region_name=AWS_REGION)

client = boto3.client('ec2')

try:
    response = client.describe_instances(Filters=[{
        'Name': 'tag:' + TAG_KEY,
        'Values': [TAG_VALUE]
    }])

    if response['Reservations']:
        print('Matching instance(s) with "{}" tag, and "{}" value:'.format(
            TAG_KEY, TAG_VALUE))
        for i in response['Reservations']:
            print(i['Instances'][0]['InstanceId'])
    else:
        print('No matching instances with "{}" tag and "{}" value'.format(
            TAG_KEY, TAG_VALUE))
except ClientError as e:
    print(e)
Example #44
0
def setup_raw_submissions():
    boto3.setup_default_session()
    conn = boto3.client('s3', region_name='us-east-1')
    conn.create_bucket(Bucket=Origin_Bucket_Name)
    conn.create_bucket(Bucket=Export_Bucket_Name)
    return conn
Example #45
0
""" simple boto3 script to launch AWS instances

* 25.10.2017 - Oli Moser (https://twitter.com/moseroli)
"""
import boto3

boto3.setup_default_session(profile_name='codecentric',
                            region_name='eu-west-1')
ec2 = boto3.resource('ec2')

filters = [{'Name': 'tag:Name', 'Values': ['computer-vision-gpu']}]

instances = ec2.instances.filter(Filters=filters)
num_instances = len(list(instances))

# expecting exactly 1 instance ... else break here
assert num_instances == 1

instance = list(instances).pop()


def start_instance(instance):
    if instance.state['Name'] == 'stopped':
        print("instances is currently stopped. starting it ... ")
        print(instance.start())
    else:
        print("instance not in stopped state. skipping.")


def get_public_ip(instance):
    print("Instance Pubilic IP: {0}".format(instance.public_ip_address))
Example #46
0
            tripid, jsonData = generateJson(tripitems)

            # Write the JSON file to S3
            writeFileToS3(filename=tripid, extension="json", content=jsonData)

            print("Processed " + tripid)
        except:
            exportsuccess = False
            traceback.print_exc()
            print('Could not export trip: ' + trip)

        if exportsuccess:
            # Succeeded to export
            markTripExported(tripid)


def lambda_handler(event, context):
    # running in lambda, set up AWS without a profile name
    boto3.setup_default_session(region_name=AWS_REGION)

    main()
    return 0


if __name__ == "__main__":
    # running interactively, use a profile
    boto3.setup_default_session(profile_name=AWS_PROFILE,
                                region_name=AWS_REGION)

    sys.exit(main())
Example #47
0
def handler(event, context):
    entry = time.time() * 1000
    logger = logging.getLogger()
    reg = 'us-west-2'
    tablename = 'imageLabels'
    if event:
        if 'region' in event:
            reg = event['region']
        if 'tableName' in event:
            tablename = event['tableName']

    if not context: #calling from main so set the profile we want
        boto3.setup_default_session(profile_name='cjk1')
        session = boto3.Session(profile_name='cjk1')
        dynamodb = session.resource('dynamodb', region_name=reg)
        rekog = boto3.client("rekognition", reg)

    else: #function triggered
        dynamodb = boto3.resource('dynamodb', region_name=reg)
        rekog = boto3.client("rekognition", reg)

    bktname = None
    key = None
    if event:
            #s3 event: {'Records': [{'awsRegion': 'us-west-2', 'eventName': 'ObjectCreated:Put', 'eventSource': 'aws:s3', 'eventTime': '2017-08-30T20:30:35.581Z', 'eventVersion': '2.0', 'requestParameters': {'sourceIPAddress': '98.171.178.234'}, 'responseElements': {'x-amz-id-2': 'xw4/vqjUwiRLOXwqRNAsSBiPcd72QamenQnDI/2sm/IYXm+72A1S+TQIJYjAv2oyiq3TsY6SuYQ=', 'x-amz-request-id': '4D69F866BA76CA70'}, 's3': {'bucket': {'arn': 'arn:aws:s3:::cjktestbkt', 'name': 'cjktestbkt', 'ownerIdentity': {'principalId': 'A13UVRJM0LZTMZ'}}, 'configurationId': '3debbff2-99b6-48d0-92df-6fba9b5ddda5', 'object': {'eTag': '9f2e3e584c7c8ee4866669e2d1694703', 'key': 'imgProc/deer.jpg', 'sequencer': '0059A7206B7A3C594C', 'size': 392689}, 's3SchemaVersion': '1.0'}, 'userIdentity': {'principalId': 'AWS:AIDAJQRLZF5NITGU76JME'}}]}
        if 'Records' in event:
            recs = event['Records']
            obj = recs[0]
            if 'eventSource' in obj and 'aws:s3' in obj['eventSource']:
                #s3 triggered
                assert 's3' in obj
                s3obj = obj['s3']
                assert 'bucket' in s3obj
                bkt = s3obj['bucket']
                assert 'name' in bkt
                bktname = bkt['name']
                assert 'object' in s3obj
                keyobj = s3obj['object']
                assert 'key' in keyobj
                key = keyobj['key']
        elif 'eventSource' in event and 'ext:invokeCLI' in event['eventSource']:
            #fninvoke event: {'bkt': 'cjktestbkt', 'eventSource': 'ext:invokeCLI', 'name': 'prefix/test.jpg'}
            assert 'name' in event
            bktname = event['name']
            assert 'key' in event
            key = event['key']

    assert bktname is not None and key is not None
    labels = detect_labels(rekog, bktname, key)

    if labels:
        table = dynamodb.Table(tablename) # we assume key is id of type String
        table.put_item( Item={
            'id': key,
            'labels': json.dumps(labels)
            }
        )
    else: 
        print('No labels found!')

    #post to website
    key = str(uuid.uuid4())[:4]
    val = 17
    r = requests.post('http://httpbin.org/post', data = {key:val})
    print('HTTP POST status: {}'.format(r.status_code))

    delta = (time.time() * 1000) - entry
    me_str = 'TIMER:CALL:{}'.format(delta)
    logger.warn(me_str)
    return me_str
Example #48
0
def lambda_handler(event, context):
    # running in lambda, set up AWS without a profile name
    boto3.setup_default_session(region_name=AWS_REGION)

    main()
    return 0
import boto3
import botocore.exceptions


parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
                                 description="Gather AWS EC2 KeyPair data")

parser.add_argument('--profile', help='AWS Profile to use from ~/.aws/credentials')

args = parser.parse_args()
profile = args.profile

try:
    # If profile is specified, we use it rather than AWS_PROFILE
    if profile:
        boto3.setup_default_session(profile_name=profile)
    client = boto3.client('iam')
    response = client.list_users()
except botocore.exceptions.ProfileNotFound as e:
    print(f"ERROR: Profile {profile} not found in your ~/.aws/credentials file")
    raise SystemExit
except:
    print(f"ERROR: Use AWS_PROFILE environemnt variable or --profile to specify a valid profile.")
    raise SystemExit

for user in response['Users']:
    username = user['UserName']
    access_keys = client.list_access_keys(UserName=username)
    for key in access_keys['AccessKeyMetadata']:
        if key['Status'] == 'Inactive':
            print(f"Inactive Key: {key}")
Example #50
0
def collect_and_download(out_dir,
                         subject,
                         series=SERIES_MAP.keys(),
                         tartasks=False):
    '''
    Function to collect and download images from the Rockland sample
    directory on FCP-INDI's S3 bucket

    Parameters
    ----------
    out_dir : string
        filepath to a local directory to save files to
    series : list
        the series to download (for functional scans)
    Returns
    -------
    boolean
        Returns true if the download was successful, false otherwise.
    '''
    # Import packages

    import pandas
    import boto3
    import botocore

    # Init variables
    s3_bucket_name = 'hcp-openaccess'
    s3_prefix = 'HCP_1200'

    boto3.setup_default_session(
        profile_name='hcp',
        aws_access_key_id='AKIAJPL4P4UUIGBQSSMA',
        aws_secret_access_key='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')

    s3 = boto3.resource('s3')
    bucket = s3.Bucket('hcp-openaccess')

    s3_keys = bucket.objects.filter(Prefix='HCP_1200/%s/' % subject)
    s3_keylist = [key.key for key in s3_keys]

    prefixes = [
        "HCP_1200/%s/T1w/%s/" % (subject, x) for x in SERIES_MAP.values()
    ]
    prefixes = tuple(prefixes)
    s3_keylist = [
        x for x in s3_keylist if x.startswith(prefixes) and (
            not ('eddylogs' in x or 'grad_dev.nii.gz' in x
                 or 'nodif_brain_mask.nii.gz' in x))
    ]

    # remove png and html
    s3_keylist = [x for x in s3_keylist if not x.endswith(('png', 'html'))]

    # If output path doesn't exist, create it
    if not os.path.exists(out_dir):
        print('Could not find %s, creating now...' % out_dir)
        os.makedirs(out_dir)

    # Init a list to store paths.
    print('Collecting images of interest...')

    # And download the items
    total_num_files = len(s3_keylist)
    files_downloaded = len(s3_keylist)
    for path_idx, s3_path in enumerate(s3_keylist):
        rel_path = s3_path.replace(s3_prefix, '')
        rel_path = rel_path.lstrip('/')
        download_file = os.path.join(out_dir, rel_path)
        download_dir = os.path.dirname(download_file)
        if not os.path.exists(download_dir):
            os.makedirs(download_dir)
        try:
            if not os.path.exists(download_file):
                print('Downloading to: %s' % download_file)
                with open(download_file, 'wb') as f:
                    bucket.download_file(s3_path, download_file)
                print("FACTS: path: %s, file: %s" % (s3_path, download_file))
                print('%.3f%% percent complete' % \
                      (100*(float(path_idx+1)/total_num_files)))
            else:
                print('File %s already exists, skipping...' % download_file)
                files_downloaded -= 1
        except Exception as exc:
            print('There was a problem downloading %s.\n'\
                  'Check and try again.' % s3_path)
            print(exc)
    if tartasks:
        subdir = os.path.join(out_dir, "%s/MNINonLinear/Results/" % subject)
        if os.path.exists(subdir):
            try:
                protocols = [
                    x for x in os.listdir(subdir)
                    if x.startswith('tfMRI') and not x.endswith('tar.gz')
                ]
            except OSError:
                print("OSError")
            else:
                for protocol in protocols:
                    print('tarring protocol %s in subject %s' %
                          (subject, protocol))
                    protocoldir = os.path.join(subdir, protocol)
                    with tarfile.open(protocoldir + ".tar.gz", "w:gz") as tar:
                        tar.add(protocoldir,
                                arcname=os.path.basename(protocoldir))
                    shutil.rmtree(protocoldir)
    print('%d files downloaded for subject %s.' % (files_downloaded, subject))

    print('Done!')
Example #51
0
"""

import boto3
import datetime
import json
import logging
import math
import os

from botocore.exceptions import ClientError

_logger = logging.getLogger()
_logger.setLevel(logging.DEBUG if os.getenv('APP_DEBUG', '') == 'true' else logging.INFO)

if os.getenv('AWS_REGION'):
    boto3.setup_default_session(region_name=os.getenv('AWS_REGION'))


def _get_arn(lookup_token):
    if lookup_token is not None:
        client = boto3.client('dynamodb')
        row = client.get_item(TableName=os.getenv('AUTH_TABLE', 'role_perms'),
                              Key={'auth_token': {'S': lookup_token}})
        if row is not None:
            # Resetting/refreshing the last_accessed/expires TTLs
            try:
                now = datetime.datetime.now()
                expires_days_in_seconds = os.getenv('EXPIRES_IN_DAYS', 14) * 86400
                expires_ts = now + datetime.timedelta(seconds=expires_days_in_seconds)
                expires_ttl = math.floor(expires_ts.timestamp())
                last_accessed = math.floor(now.timestamp())
import boto3

boto3.setup_default_session(profile_name='face')
session = boto3.Session(profile_name="face")
personalize = session.client('personalize')

if __name__ == "__main__":
    schema_name = 'YourSchemaName'

    # Define the schema for your dataset
    schema = {
        "type":
        "record",
        "name":
        "Interactions",
        "namespace":
        "com.amazonaws.personalize.schema",
        "fields": [{
            "name": "USER_ID",
            "type": "string"
        }, {
            "name": "ITEM_ID",
            "type": "string"
        }, {
            "name": "EVENT_VALUE",
            "type": "float"
        }, {
            "name": "EVENT_TYPE",
            "type": "string"
        }, {
            "name": "TIMESTAMP",
Example #53
0
import os
import json

import multiprocessing
import subprocess
import string
from pathlib import Path
import logging
logger = logging.getLogger('ept_tiler')



profile = os.environ.get('AWS_PROFILE')
region = os.environ.get('AWS_REGION')

boto3.setup_default_session(region_name=region,
                            profile_name=profile)


class Process(object):

    def __init__(self):
        self.tasks = []

    def put(self, tile):
        self.tasks.append(tile)
    def do(self):
        pass


class SQS(Process):
Example #54
0
import argparse

import boto3

if __name__ == "__main__":
    parser = argparse.ArgumentParser("Delete Redshift Cluster")
    parser.add_argument("--aws_profile", type=str)
    parser.add_argument("--dwh_cluster_id", default="udacity-de", type=str)
    parser.add_argument("--dwh_s3_iam_name", default="redshift_s3_read_only")
    args = parser.parse_args()

    # create new boto3 session with profile credentials
    boto3.setup_default_session(profile_name=args.aws_profile)

    # initialize boto3 clients
    redshift = boto3.client("redshift")
    iam = boto3.client("iam")

    # deleting redshift cluster
    redshift.delete_cluster(ClusterIdentifier=args.dwh_cluster_id,
                            SkipFinalClusterSnapshot=True)

    # detach role policy and remove role
    iam.detach_role_policy(
        RoleName=args.dwh_s3_iam_name,
        PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess")
    iam.delete_role(RoleName=args.dwh_s3_iam_name)
Example #55
0
def aws():
    service_list = []
    app_id_get = request.form['app_id_get']
    region = request.form['region']
    print(region)
    try:
        service_list.append(request.form['service_type'])
    except Exception as e:
        print('Error')
    try:
        service_list.append(request.form['service_type1'])
    except Exception as e:
        print('Error')
    try:
        service_list.append(request.form['service_type2'])
    except Exception as e:
        print('Error')
    try:
        service_list.append(request.form['service_type3'])
    except Exception as e:
        print('Error')
    try:
        service_list.append(request.form['service_type4'])
    except Exception as e:
        print('Error')
    try:
        service_list.append(request.form['service_type5'])
    except Exception as e:
        print('Error')
    service_list.append('elasticloadbalancing')
    boto3.setup_default_session(profile_name=profile_name)
    client = boto3.client('resourcegroupstaggingapi', region_name=region)

    paginator = client.get_paginator('get_resources')
    response_iterator = paginator.paginate(
        TagFilters=[
            {
                'Key': 'app_id',
                'Values': [app_id_get]
            },
        ],
        # TagsPerPage=100,
        ResourceTypeFilters=service_list,
        PaginationConfig={
            'MaxItems': 100,
            'PageSize': 20,
            'StartingToken': ''
        })

    lis = []

    print(type(response_iterator))
    for pages in response_iterator:
        # print(type(pages))
        # for key in pages:
        a = pages['ResourceTagMappingList']
        index = 0
        while index < len(a):
            # print(a[index]['ResourceARN'])
            lis.append([(a[index]['ResourceARN'])])
            index = index + 1
            # print(lis)
        if not lis:
            print('Nothing')
        else:
            print('hello')

    print('list of items', lis)

    a = (len(lis))
    items = []

    def item_list():
        index = 0
        while index < len(lis):
            items.append(lis[index])
            index = index + 1

    try:
        item_list()
    except Exception as e:
        print('error')

    db_details = {}
    ec2_details = {}
    ec2_details_for = {}

    db_resources_details = []
    my_db_storage = []
    my_dbs_hosts = []
    my_db_instance_status = []

    my_db_instance_types = []
    my_multiaza = []
    # print(type(items()
    loadbalancer_details_lb = []
    target_details = []

    def resources():
        index = 0
        while index < len(items):
            b = items[index]
            # print(type(b))
            c = (', '.join(b))
            d = c.split(":")
            # print(d[2])
            # print(d)
            # print(f)
            # print('assdsdfsfsfdsfsfdsfdfdf', d)
            index = index + 1
            # service_list.append(f)
            if d[2] == 'rds' and d[5] == 'db':
                print(d)
                h = db_details[index] = d[6]
                print(h)
                db_resources_details.append(h)
                # print(resource_details)
            elif d[2] == 'ec2':
                e = (', '.join(d))
                f = e.split("/")
                # print(f[1])
                if f[1].startswith('i-') == True:
                    ec2_details[index] = f[1]
                    ec2_details_for[0] = f[1]
            elif d[2] == 'elasticloadbalancing':
                abc = d[5]
                yyy = abc.split("/")
                xyx = yyy[2]
                #print('THISSSSS YYYYYYY', yyy)
                #print('THISSSSS YYxxxYYYYY', xyx)
                if yyy[0] == 'loadbalancer':
                    loadbalancer_details_lb.append(xyx)
                else:
                    target_details.append(xyx)
                #loadbalancer_details.append(d[5])

    try:
        resources()
    except Exception as e:
        print('Eror')
    try:
        for i in db_resources_details:
            db_instance = i
            source = boto3.client('rds', region_name=region)
            instances = source.describe_db_instances(
                DBInstanceIdentifier=db_instance)
            # print(instances)
            rds_host = instances.get('DBInstances')[0].get('Endpoint').get(
                'Address')
            instance_type = instances.get('DBInstances')[0].get(
                'DBInstanceClass')
            multiaz = instances.get('DBInstances')[0]['MultiAZ']
            # print(multiaz)
            instance_status = instances.get('DBInstances')[0].get(
                'DBInstanceStatus')
            storage = instances.get('DBInstances')[0]['AllocatedStorage']

            my_dbs_hosts.append(rds_host)
            my_db_instance_types.append(instance_type)
            my_multiaza.append(multiaz)
            my_db_instance_status.append(instance_status)
            my_db_storage.append(storage)
        #print(my_dbs_hosts)
        #print(my_db_instance_types)
        #print(my_multiaza)
        #print(my_db_instance_status)
    except Exception as e:
        print('NO DB')

    print('This is Load Balancer', loadbalancer_details_lb)
    print('This is Target Group', target_details)
    a = ec2_details.values()

    cluster_names = []

    repo_names = []
    ec2_instance_details = {}
    image_id = []
    instance_type = []
    IPAddress = []
    running_state = []

    def describe_instances():
        for i in a:
            client = boto3.client('ec2', region_name=region)
            response = client.describe_instances(InstanceIds=[
                i,
            ])
            # index = index + 1
            # print(type(response['Reservations'][0]['Instances'][0]))
            # print(response)
            for key in response:
                instance_b = response['Reservations']
                print('bbbbbbb', instance_b)
                print(len(instance_b))
                instance_c = instance_b[0]
                #print('asdadsadadas', type(c))
                d = instance_c['Instances']
                print('ssssss', d)
                e = d[0]
                f = e['Tags']
                running = e['State']
                state = running['Name']

                if profile_name == 'apps':
                    index = 0
                    while index < len(f):
                        g = f[index]
                        if "ecs_cluster_name" in g.values():
                            # print(g)
                            cluster_names.append(g['Value'])
                        index = index + 1
                else:
                    index = 0
                    while index < len(f):
                        g = f[index]
                        if "Name" in g.values():
                            # print(g)
                            cluster_names.append(g['Value'])
                        index = index + 1

                        # print(g)
                index = 0
                while index < len(f):
                    g = f[index]
                    if "tf_repo" in g.values():
                        # print(g)
                        repo_names.append(g['Value'])
                    index = index + 1
                imageid = e['ImageId']
                LaunchTime = e['LaunchTime']
                SubnetId = e['SubnetId']
                InstanceType = e['InstanceType']
                PrivateIpAddress = e['PrivateIpAddress']
            image_id.append(imageid)
            instance_type.append(InstanceType)
            IPAddress.append(PrivateIpAddress)
            running_state.append(state)
            # print(f[16])
            # print(g)

            # print(imageid, LaunchTime, SubnetId, InstanceType, PrivateIpAddress)

    try:
        describe_instances()
    except Exception as e:
        print('Error')
    elasticlis = []

    elasticlis_updated = []
    elastic_value_empty = 'No Elastic Cache'
    if not elasticlis:
        elasticlis_updated.append(elastic_value_empty)
    else:
        elasticlis_updated = elasticlis
    cluster_names_values1 = (set(cluster_names))
    print('BEFOR ELAS', cluster_names_values1)

    def elastic():
        boto3.setup_default_session(profile_name=profile_name)
        paginator = client.get_paginator('get_resources', region_name=region)
        response_iterator = paginator.paginate(
            TagFilters=[
                {
                    'Key': 'name',
                    'Values': cluster_names_values1
                    # 'Values': ['cart-latest']
                },
            ],
            # TagsPerPage=100,
            ResourceTypeFilters=['elasticache'],
            PaginationConfig={
                'MaxItems': 100,
                'PageSize': 20,
                'StartingToken': ''
            })

        print('eeelasticccccc')
        print(response_iterator)
        for pages in response_iterator:
            # print(type(pages))
            # for key in pages:
            a = pages['ResourceTagMappingList']
            index = 0
            while index < len(a):
                # print(a[index]['ResourceARN'])
                # elasticlis.append([(a[index]['ResourceARN'])])
                b = a[index]['ResourceARN']
                print('elasssticcccccmukkkk', b)
                # d = (', '.join(b))
                e = b.split(":")
                elasticlis.append(e[6])
                index = index + 1
                # print(lis)
            if not elasticlis:
                print('Nothing')
            else:
                print('hello')

    try:
        elastic()
    except Exception as e:
        print('error')

    mydict = {}
    ecs_service_list = []
    cluster_names_values1 = (set(cluster_names))
    print("HEEeEEEEEEE", cluster_names_values1)

    def main_run():
        for i in cluster_names_values1:

            def cluster():
                # global response
                client = boto3.client('ecs', region_name=region)
                response = client.describe_clusters(clusters=[i], )
                return response

            exe = cluster()
            a = exe['clusters']
            # mydict[i] = a[0]['clusterName']
            mydict[a[0]['clusterName']] = a[0]['status']

    try:
        main_run()
    except Exception as e:
        print('Error')

    total_cost = {}

    def cost():
        client = boto3.client('ce')

        response = client.get_cost_and_usage(
            TimePeriod={
                'Start': '2018-10-19',
                'End': '2018-11-19'
            },
            Granularity='MONTHLY',
            Metrics=['UnblendedCost'],
            Filter={'Tags': {
                'Key': 'app_id',
                'Values': [
                    app_id_get,
                ],
            }}
            # GroupBy=[
            #          {'Type': 'DIMENSION',
            #           'Key': 'SERVICE'
            #          }
            # ]
        )
        # print(response)
        # print(type(response))
        print(response)
        a = response['ResultsByTime']
        print(len(a))
        index = 0
        while index < len(a):
            b = a[index]['Total']['UnblendedCost']['Amount']
            # c = b + b
            # total_cost.append(b)
            total_cost['Total Cost in USD'] = b
            index = index + 1

    # cost()
    # print(mydict)

    print('No Of DB', len(db_details), db_details.values())
    print('No Of EC2', len(ec2_details), ec2_details.values())
    number_of_resources = (len(lis))
    # number_of_instances = len(db_details)
    number_of_db_instances = len(ec2_details)
    db_instance__values = ec2_details.values()
    # instance_details = ec2_details.values()
    instance_details = list(ec2_details.values())
    db_details = db_resources_details

    cluster_names_values = (set(cluster_names))
    repo_names_values = (set(repo_names))
    print(type(repo_names_values))
    ecs_cluster_status = mydict
    print(ecs_cluster_status)
    elastic_list = elasticlis_updated

    # id=seperate_id, service_type=service_list, region=region, lang=True)
    return render_template('Report.html',
                           lisa=number_of_resources,
                           ec2_details=instance_details,
                           service_type=service_list,
                           region=region,
                           lang=True,
                           cluster_name_values=cluster_names_values,
                           repo_names_values=repo_names_values,
                           mydict=mydict,
                           ecs_service_list=ecs_service_list,
                           instance_type=instance_type,
                           IPAddress=IPAddress,
                           image_id=image_id,
                           total_cost=total_cost,
                           app_id_get=app_id_get,
                           db_details=db_details,
                           running1=running_state,
                           elasticlis=elastic_list,
                           my_db_instance_types=my_db_instance_types,
                           my_dbs_hosts=my_dbs_hosts,
                           my_db_instance_status=my_db_instance_status,
                           my_multiaza=my_multiaza)
Example #56
0
MODEL_LOCAL_PATH = os.path.join(os.sep, 'tmp', 'yolo_tf.pb')

DL_S3_BUCKET = list(
    filter(lambda output: output.get('OutputKey') == 'DLModelStore',
           json.load(open('./StackOutput.json'))["Stacks"][0]
           ["Outputs"]))[0]["OutputValue"]

REQ_LOCAL_PATH = os.path.join(os.sep, 'tmp', 'requirements')

if not os.path.isdir(REQ_LOCAL_PATH):
    zip_ref = zipfile.ZipFile('requirements.zip', 'r')
    zip_ref.extractall(REQ_LOCAL_PATH)
    zip_ref.close()
    sys.path.insert(0, REQ_LOCAL_PATH)

boto3.setup_default_session(region_name='us-west-2')
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
lambda_client = boto3.client('lambda')

S3_BUCKET = list(
    filter(lambda output: output.get('OutputKey') == 'ImageStore',
           json.load(open('./StackOutput.json'))["Stacks"][0]
           ["Outputs"]))[0]["OutputValue"]


def handler(event, context):
    from yolo_utils import draw_boxes, eval_image, generate_colors, preprocess_image, load_graph

    file_stream = io.BytesIO()
    bucket = s3.Bucket(DL_S3_BUCKET)
Example #57
0
    mfa_code = sys.argv[-1]
    del sys.argv[-1]

if "AWS_SHARED_CREDENTIALS_FILE" not in os.environ:
    os.environ["AWS_SHARED_CREDENTIALS_FILE"] = os.environ["HOME"]+"/.aws/credentials"

if len(sys.argv) == 2:
    if sys.argv[1].startswith("arn:aws:iam:"):
        role_arn = sys.argv[1]
    else:
        import configparser
        config = configparser.ConfigParser()
        config.read([os.environ["AWS_SHARED_CREDENTIALS_FILE"]])
        if config.has_option(sys.argv[1], "source_profile"):
            source_profile = config.get(sys.argv[1], "source_profile")
            boto3.setup_default_session(profile_name=source_profile)
            role_arn = config.get(sys.argv[1], "role_arn")
            mfa_serial = config.get(sys.argv[1], "mfa_serial", fallback=None)
            region = config.get(sys.argv[1], "region", fallback=None)
            if region:
                dest += "?region=" + region
        else:
            response = boto3.client("sts").get_caller_identity()
            role_arn = "arn:aws:iam::"+response["Account"]+":role/"+sys.argv[1]
elif len(sys.argv) == 3:
    role_arn = sys.argv[1]
    mfa_serial = sys.argv[2]
else:
    print("Insufficient arguments.")
    print("Usage: %s <profile> [mfa_code]" % sys.argv[0])
    print("Usage: %s <role_name> [mfa_code]" % sys.argv[0])
#!/usr/bin/python
import os
import json
import boto3
import time
import calendar

boto3.setup_default_session(profile_name='motes-acumera')
TIME_SINCE_EPOCH = calendar.timegm(time.gmtime())
TTL = 10 * 60
TTL += TIME_SINCE_EPOCH
VALUE = str(TTL)
dynamodb_clinet = boto3.client('dynamodb')
HOSTNAME = os.uname()[1]
PasswdObject = open("/Users/cmotes/python/passwd_to_json/passwd", 'r')
PasswdListofDicts = []
for line in PasswdObject:
    PasswdFields = line.strip().split(':')
    PerLineDict = {
        "Hostname": {
            "S": HOSTNAME
        },
        "UserName": {
            "S": PasswdFields[0]
        },
        "Password": {
            "S": PasswdFields[1]
        },
        "UserId": {
            "S": PasswdFields[2]
        },
Example #59
0
                   'ap-south-1',
                   'ap-southeast-1', 'ap-southeast-2',
                   'ca-central-1',
                   'eu-central-1',
                   'eu-north-1',
                   'eu-west-1', 'eu-west-2', 'eu-west-3',
                   'sa-east-1',
                   'us-east-1', 'us-east-2',
                   'us-west-1', 'us-west-2']

    return aws_regions


if __name__ == '__main__':

    boto3.setup_default_session(profile_name='LayerUploader')
    regions = get_aws_regions()

    for region in regions:
        client = boto3.client('lambda', region_name=region)

        kwargs = {}
        layers = []
        while True:
            response = client.list_layers(**kwargs)
            layers.extend(response['Layers'])
            kwargs['Marker'] = response.get('NextMarker', False)
            if not kwargs['Marker']:
                break

        for layer in layers:
Example #60
0
#!/usr/bin/env python3
# Author: Juber Nunes
# Date: 05/03/2019
# Description: Create and List Security Group.

import boto3, sys, os, stat

# module python-dotenv installed for reading environment variables through a .env file (pip install python-dotenv)
# importing file
from dotenv import load_dotenv

# Load file from the path.
load_dotenv()

boto3.setup_default_session(profile_name=os.getenv('profile'))


# function taking one parameter which is the name to be created
def creategroup(name):
    # set profile through .env file as the default goes to personal one

    ec2 = boto3.resource('ec2')
    # Create sec group - VPC is being passed through environment variable set on .env
    sec_group = ec2.create_security_group(GroupName=name,
                                          Description='allow web',
                                          VpcId=os.getenv('VpcId'))
    sec_group.authorize_ingress(
        IpPermissions=[{
            'IpRanges': [{
                'CidrIp': '0.0.0.0/0'
            }],