Example #1
0
import boto3
import logging
import os
import shutil
import sys

from botocore.config import Config
from time import sleep
from logging.handlers import TimedRotatingFileHandler

AWS_CONFIG = Config(retries=dict(max_attempts=100))

# optional schedule class in schedule.py to add logic when to send files
# default is to send all pictures to AWS
try:
    from schedule import check_schedule
except ImportError:

    class Schedule():
        def check_schedule(self, image_file_name):
            # optional logic
            return True


def make_dir_if_not_exists(dir):
    if not os.path.exists(dir):
        os.makedirs(dir)


def detect_hooman(image_file):
    client = boto3.client('rekognition', config=AWS_CONFIG)
def s3_etag(url, proxies=None):
    """Check ETag on S3 object."""
    s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
    bucket_name, s3_path = split_s3_path(url)
    s3_object = s3_resource.Object(bucket_name, s3_path)
    return s3_object.e_tag
Example #3
0
 def create_client(self):
     client_config = Config(signature_version='s3v4')
     return self.session.create_client('s3',
                                       self.region,
                                       config=client_config)
Example #4
0
def get_cloudformation_client(session):
    config = Config(retries=dict(max_attempts=MAX_ATTEMPTS))
    return session.client('cloudformation', config=config)
Example #5
0
 def route_53_client(self):
     if hasattr(self, '_route_53_client') == False:
         self._route_53_client = self.account_ctx.get_aws_client(
             'route53',
             client_config=Config(retries={'max_attempts': 10}))
     return self._route_53_client
Example #6
0
 def createClient(self):
     myConfig = Config(region_name='us-west-2', )
     self.client = boto3.client('s3', config=myConfig)
Example #7
0
 def test_sigv2_presign(self):
     config = Config(signature_version='s3')
     client = self.session.create_client('s3', self.region, config=config)
     url = client.generate_presigned_url(ClientMethod='list_buckets')
     self.assertNotIn('Algorithm=AWS4-HMAC-SHA256', url)
def sagemaker_runtime_client(boto_session):
    return boto_session.client("runtime.sagemaker",
                               config=Config(retries={"max_attempts": 10}))
Example #9
0
from mephisto.abstractions.providers.mturk.mturk_utils import setup_aws_credentials
from mephisto.abstractions.architects.router import build_router

from botocore import client  # type: ignore
from botocore.exceptions import ClientError, ProfileNotFound  # type: ignore
from botocore.config import Config  # type: ignore
from mephisto.utils.logger_core import get_logger

logger = get_logger(name=__name__)

if TYPE_CHECKING:
    from omegaconf import DictConfig  # type: ignore

botoconfig = Config(region_name="us-east-2",
                    retries={
                        "max_attempts": 10,
                        "mode": "standard"
                    })

DEFAULT_AMI_ID = "ami-0f19d220602031aed"
AMI_DEFAULT_USER = "******"
DEFAULT_INSTANCE_TYPE = "m2.micro"
FALLBACK_INSTANCE_TYPE = "t2.nano"
MY_DIR = os.path.abspath(os.path.dirname(__file__))
DEFAULT_KEY_PAIR_DIRECTORY = os.path.join(MY_DIR, "keypairs")
DEFAULT_SERVER_DETAIL_LOCATION = os.path.join(MY_DIR, "servers")
SCRIPTS_DIRECTORY = os.path.join(MY_DIR, "run_scripts")
DEFAULT_FALLBACK_FILE = os.path.join(DEFAULT_SERVER_DETAIL_LOCATION,
                                     "fallback.json")
FALLBACK_SERVER_LOC = os.path.join(MY_DIR, "fallback_server")
KNOWN_HOST_PATH = os.path.expanduser("~/.ssh/known_hosts")
Example #10
0
    def test_ns_bucket_unsigned_access(self, mcg_obj, bucket_factory,
                                       namespace_store_factory):
        """
        Test anonymous(unsigned) access of S3 operations are denied on Namespace bucket.
        """
        sample_data = "Sample string content to write to a S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Create the namespace bucket
        nss_tup = ("oc", {"aws": [(1, self.DEFAULT_REGION)]})
        ns_store = namespace_store_factory(*nss_tup)[0]
        bucketclass_dict = {
            "interface": "OC",
            "namespace_policy_dict": {
                "type": "Single",
                "namespacestores": [ns_store],
            },
        }
        ns_bucket = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0].name

        # Put and Get object operations done with s3 credentials
        logger.info(f"Put and Get object operations on {ns_bucket}")
        assert bucket_utils.s3_put_object(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_key=object_key,
            data=sample_data,
        ), "Failed: PutObject"
        assert bucket_utils.s3_get_object(
            s3_obj=mcg_obj, bucketname=ns_bucket,
            object_key=object_key), "Failed: GetObject"

        # Boto3 client with signing disabled
        anon_s3_client = boto3.client(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=mcg_obj.s3_endpoint,
            config=Config(signature_version=UNSIGNED),
        )

        logger.info(
            f"Verifying anonymous access is blocked on namespace bucket: {ns_bucket}"
        )
        try:
            anon_s3_client.get_object(Bucket=ns_bucket, Key=object_key)
        except boto3exception.ClientError as e:
            response = HttpResponseParser(e.response)
            assert (response.error["Code"] == "AccessDenied"
                    ), f"Invalid error code:{response.error['Code']}"
            assert (response.status_code == 403
                    ), f"Invalid status code:{response.status_code}"
            assert (response.error["Message"] == "Access Denied"
                    ), f"Invalid error message:{response.error['Message']}"
        else:
            assert (
                False
            ), "GetObject operation has been granted access, when it should have been blocked"
Example #11
0
logger.addHandler(handler)
logger.addHandler(f_handler)

logger.info(
    f"Path {folder_path}, credentials: {ACCESS_KEY}, {SECRET_KEY}, {BUCKET}, {ENDPOINT}"
)

session = boto3.session.Session(
    aws_access_key_id=ACCESS_KEY,
    aws_secret_access_key=SECRET_KEY,
)
s3 = session.client(
    "s3",
    endpoint_url=ENDPOINT,
    use_ssl=True,
    config=Config(max_pool_connections=200),
)


def download_file(bucket, k, dest_pathname, count, total):
    session_s = boto3.session.Session(
        aws_access_key_id=ACCESS_KEY,
        aws_secret_access_key=SECRET_KEY,
    )
    client = session_s.client(
        "s3",
        endpoint_url=ENDPOINT,
        use_ssl=True,
        config=Config(max_pool_connections=200),
    )
    if Path(dest_pathname).is_file() and SKIP_EXISTING:
Example #12
0
def get_credentials(short_term_name, lt_key_id, lt_access_key, args, config):
    if args.token:
        logger.debug("Received token as argument")
        mfa_token = '%s' % (args.token)
    else:
        console_input = prompter()
        mfa_token = console_input('Enter AWS MFA code for device [%s] '
                                  '(renewing for %s seconds):' %
                                  (args.device, args.duration))
    if args.real_proxies:
        client = boto3.client(
            'sts',
            aws_access_key_id=lt_key_id,
            aws_secret_access_key=lt_access_key,
            config=Config(proxies=args.real_proxies)
        )
    else:
        client = boto3.client(
            'sts',
            aws_access_key_id=lt_key_id,
            aws_secret_access_key=lt_access_key
        )

    if args.assume_role:

        logger.info("Assuming Role - Profile: %s, Role: %s, Duration: %s",
                    short_term_name, args.assume_role, args.duration)
        if args.role_session_name is None:
            log_error_and_exit(logger, "You must specify a role session name "
                               "via --role-session-name")

        try:
            response = client.assume_role(
                RoleArn=args.assume_role,
                RoleSessionName=args.role_session_name,
                DurationSeconds=args.duration,
                SerialNumber=args.device,
                TokenCode=mfa_token
            )
        except ClientError as e:
            log_error_and_exit(logger,
                               "An error occured while calling "
                               "assume role: {}".format(e))
        except ParamValidationError:
            log_error_and_exit(logger, "Token must be six digits")

        config.set(
            short_term_name,
            'assumed_role',
            'True',
        )
        config.set(
            short_term_name,
            'assumed_role_arn',
            args.assume_role,
        )
    else:
        logger.info("Fetching Credentials - Profile: %s, Duration: %s",
                    short_term_name, args.duration)
        try:
            response = client.get_session_token(
                DurationSeconds=args.duration,
                SerialNumber=args.device,
                TokenCode=mfa_token
            )
        except ClientError as e:
            log_error_and_exit(
                logger,
                "An error occured while calling assume role: {}".format(e))
        except ParamValidationError:
            log_error_and_exit(
                logger,
                "Token must be six digits")

        config.set(
            short_term_name,
            'assumed_role',
            'False',
        )
        config.remove_option(short_term_name, 'assumed_role_arn')

    # aws_session_token and aws_security_token are both added
    # to support boto and boto3
    options = [
        ('aws_access_key_id', 'AccessKeyId'),
        ('aws_secret_access_key', 'SecretAccessKey'),
        ('aws_session_token', 'SessionToken'),
        ('aws_security_token', 'SessionToken'),
    ]

    for option, value in options:
        config.set(
            short_term_name,
            option,
            response['Credentials'][value]
        )
    # Save expiration individiually, so it can be manipulated
    config.set(
        short_term_name,
        'expiration',
        response['Credentials']['Expiration'].strftime('%Y-%m-%d %H:%M:%S')
    )
    with open(AWS_CREDS_PATH, 'w') as configfile:
        config.write(configfile)
    logger.info(
        "Success! Your credentials will expire in %s seconds at: %s"
        % (args.duration, response['Credentials']['Expiration']))
    sys.exit(0)
Example #13
0
#  or in the "license" file accompanying this file. This file is distributed  #
#  on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express #
#  or implied. See the License for the specific language governing permis-    #
#  sions and limitations under the License.                                   #
###############################################################################
#
# Imports
import re
import json
import inspect
import os
import boto3
from botocore.config import Config
from lib.metrics import Metrics

boto_config = Config(retries={'max_attempts': 10})

# Get AWS region from Lambda environment. If not present then we're not
# running under lambda, so defaulting to us-east-1
AWS_REGION = os.getenv('AWS_DEFAULT_REGION', 'us-east-1')
securityhub = boto3.client('securityhub',
                           config=boto_config,
                           region_name=AWS_REGION)

# Classes


class InvalidFindingJson(Exception):
    pass

Example #14
0
from datetime import datetime, date

# Disable insecure warnings
urllib3.disable_warnings()

AWS_DEFAULT_REGION = demisto.params()['defaultRegion']
AWS_ROLE_ARN = demisto.params()['roleArn']
AWS_ROLE_SESSION_NAME = demisto.params()['roleSessionName']
AWS_ROLE_SESSION_DURATION = demisto.params()['sessionDuration']
AWS_ROLE_POLICY = None
AWS_ACCESS_KEY_ID = demisto.params().get('access_key')
AWS_SECRET_ACCESS_KEY = demisto.params().get('secret_key')
VERIFY_CERTIFICATE = not demisto.params().get('insecure', True)
proxies = handle_proxy(proxy_param_name='proxy', checkbox_default_value=False)
config = Config(connect_timeout=1,
                retries=dict(max_attempts=5),
                proxies=proxies)
"""HELPER FUNCTIONS"""


def aws_session(service='cloudtrail',
                region=None,
                roleArn=None,
                roleSessionName=None,
                roleSessionDuration=None,
                rolePolicy=None):
    kwargs = {}
    if roleArn and roleSessionName is not None:
        kwargs.update({
            'RoleArn': roleArn,
            'RoleSessionName': roleSessionName,
ssm_parameter_credentials = os.environ['ssm_parameter_credentials']
checkip_url = os.environ['checkip_url']

# 内部参数
JobType = "PUT"
MaxRetry = 10  # 最大请求重试次数
MaxThread = 50  # 最大线程数
MaxParallelFile = 1  # Lambda 中暂时没用到
JobTimeout = 900

ResumableThreshold = 5 * 1024 * 1024  # Accelerate to ignore small file
CleanUnfinishedUpload = False  # For debug
ChunkSize = 5 * 1024 * 1024  # For debug, will be auto-change
ifVerifyMD5Twice = False  # For debug

s3_config = Config(max_pool_connections=30)  # 最大连接数

# Set environment
logger = logging.getLogger()
logger.setLevel(logging.INFO)

dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(table_queue_name)

# 取另一个Account的credentials
ssm = boto3.client('ssm')
logger.info(f'Get ssm_parameter_credentials: {ssm_parameter_credentials}')
credentials = json.loads(
    ssm.get_parameter(Name=ssm_parameter_credentials,
                      WithDecryption=True)['Parameter']['Value'])
credentials_session = boto3.session.Session(
Example #16
0
def download(profile, output, include_non_default_policy_versions):
    """
    Runs aws iam get-authorization-details on all accounts specified in the aws credentials file, and stores them in
    account-alias.json
    """
    default_region = "us-east-1"
    session_data = {"region_name": default_region}

    if profile:
        session_data["profile_name"] = profile
        output_filename = os.path.join(output, f"{profile}.json")
    else:
        output_filename = "default.json"

    session = boto3.Session(**session_data)
    config = Config(connect_timeout=5, retries={"max_attempts": 10})
    iam_client = session.client("iam", config=config)

    results = {
        "UserDetailList": [],
        "GroupDetailList": [],
        "RoleDetailList": [],
        "Policies": [],
    }
    paginator = iam_client.get_paginator("get_account_authorization_details")
    for page in paginator.paginate(Filter=["User"]):
        # Always add inline user policies
        results["UserDetailList"].extend(page["UserDetailList"])
    for page in paginator.paginate(Filter=["Group"]):
        results["GroupDetailList"].extend(page["GroupDetailList"])
    for page in paginator.paginate(Filter=["Role"]):
        results["RoleDetailList"].extend(page["RoleDetailList"])
        # Ignore Service Linked Roles
        for policy in page["Policies"]:
            if policy["Path"] != "/service-role/":
                results["RoleDetailList"].append(policy)
    for page in paginator.paginate(Filter=["LocalManagedPolicy"]):
        # Add customer-managed policies IF they are attached to IAM principals
        for policy in page["Policies"]:
            if policy["AttachmentCount"] > 0:
                results["Policies"].append(policy)
    for page in paginator.paginate(Filter=["AWSManagedPolicy"]):
        # Add customer-managed policies IF they are attached to IAM principals
        for policy in page["Policies"]:
            if policy["AttachmentCount"] > 0:
                if include_non_default_policy_versions:
                    results["Policies"].append(policy)
                else:
                    policy_version_list = []
                    for policy_version in policy.get("PolicyVersionList"):
                        if policy_version.get("VersionId") == policy.get(
                                "DefaultVersionId"):
                            policy_version_list.append(policy_version)
                            break
                    entry = {
                        "PolicyName":
                        policy.get("PolicyName"),
                        "PolicyId":
                        policy.get("PolicyId"),
                        "Arn":
                        policy.get("Arn"),
                        "Path":
                        policy.get("Path"),
                        "DefaultVersionId":
                        policy.get("DefaultVersionId"),
                        "AttachmentCount":
                        policy.get("AttachmentCount"),
                        "PermissionsBoundaryUsageCount":
                        policy.get("PermissionsBoundaryUsageCount"),
                        "IsAttachable":
                        policy.get("IsAttachable"),
                        "CreateDate":
                        policy.get("CreateDate"),
                        "UpdateDate":
                        policy.get("UpdateDate"),
                        "PolicyVersionList":
                        policy_version_list,
                    }
                    results["Policies"].append(entry)

    if os.path.exists(output_filename):
        os.remove(output_filename)
    with open(output_filename, "w") as file:
        json.dump(results, file, indent=4, default=str)
        print(f"Saved results to {output_filename}")
    return 1
import argparse
import re
from contextlib import contextmanager
from functools import partial
from time import time

import boto3
from botocore.config import Config
import s3mothball.commands
from smart_open import open, parse_uri

# set up boto clients
config = Config(retries={'max_attempts': 10, 'mode': 'standard'})
s3_client = boto3.client('s3', config=config)
s3_resource = boto3.resource('s3')

# helpers


def parse_s3_url(url):
    source_path_parsed = parse_uri(url)
    return source_path_parsed.bucket_id, source_path_parsed.key_id


def delete_file(url):
    bucket, key = parse_s3_url(url)
    s3_client.delete_object(Bucket=bucket, Key=key)


def iter_prefixes(url):
    bucket, key = parse_s3_url(url)
    requests.put('http://169.254.169.254/latest/api/token',
                 headers={
                     "X-aws-ec2-metadata-token-ttl-seconds": "21600"
                 }).content, 'utf-8')
instance_id = str(
    requests.get('http://169.254.169.254/latest/meta-data/instance-id',
                 headers={
                     "X-aws-ec2-metadata-token": token
                 }).content, 'utf-8')
region = str(
    requests.get('http://169.254.169.254/latest/meta-data/placement/region',
                 headers={
                     "X-aws-ec2-metadata-token": token
                 }).content, 'utf-8')

my_config = Config(region_name=region, )

ec2 = boto3.client('ec2', config=my_config)

instances = ec2.describe_instances(Filters=[{
    'Name': 'tag:App',
    'Values': ['tags-web-server']
}])

ips = []

for r in instances['Reservations']:
    for i in r['Instances']:
        if i.get('PrivateIpAddress') is not None:
            ips.append(i.get('PrivateIpAddress'))
class Organizations:
    """Class used for modeling Organizations
    """

    _config = Config(retries=dict(max_attempts=30))

    def __init__(self, role, account_id=None):
        self.client = role.client('organizations',
                                  config=Organizations._config)
        self.account_id = account_id
        self.account_ids = []

    def get_parent_info(self):
        response = self.list_parents(self.account_id)
        return {
            "ou_parent_id": response.get('Id'),
            "ou_parent_type": response.get('Type')
        }

    def get_account_ids(self):
        for account in paginator(self.client.list_accounts):
            if account.get('Status') == 'ACTIVE':
                self.account_ids.append(account['Id'])

        return self.account_ids

    def get_organization_info(self):
        response = self.client.describe_organization()
        return {
            "organization_master_account_id":
            response.get('Organization').get('MasterAccountId'),
            "organization_id":
            response.get('Organization').get('Id')
        }

    def describe_ou_name(self, ou_id):
        try:
            response = self.client.describe_organizational_unit(
                OrganizationalUnitId=ou_id)
            return response['OrganizationalUnit']['Name']
        except ClientError:
            raise RootOUIDError("OU is the Root of the Organization")

    @staticmethod
    def determine_ou_path(ou_path, ou_child_name):
        return '{0}/{1}'.format(ou_path,
                                ou_child_name) if ou_path else ou_child_name

    def list_parents(self, ou_id):
        return self.client.list_parents(ChildId=ou_id).get('Parents')[0]

    def get_accounts_for_parent(self, parent_id):
        return paginator(self.client.list_accounts_for_parent,
                         ParentId=parent_id)

    def get_child_ous(self, parent_id):
        return paginator(self.client.list_organizational_units_for_parent,
                         ParentId=parent_id)

    def dir_to_ou(self, path):
        p = path.split('/')[1:]
        ou_id = self.client.list_roots().get('Roots')[0].get('Id')

        while p:
            for ou in self.get_child_ous(ou_id):
                if ou['Name'] == p[0]:
                    p.pop(0)
                    ou_id = ou['Id']
                    break
            else:
                raise Exception(
                    "Path {} failed to return a child OU at '{}'".format(
                        path, p[0]))
        else:  # pylint: disable=W0120
            return self.get_accounts_for_parent(ou_id)

    def build_account_path(self, ou_id, account_path, cache):
        """Builds a path tree to the account from the root of the Organization
        """
        current = self.list_parents(ou_id)

        # While not at the root of the Organization
        while current.get('Type') != "ROOT":
            # check cache for ou name of id
            if not cache.check(current.get('Id')):
                cache.add(current.get('Id'),
                          self.describe_ou_name(current.get('Id')))
            ou_name = cache.check(current.get('Id'))
            account_path.append(ou_name)
            return self.build_account_path(current.get('Id'), account_path,
                                           cache)
        return Organizations.determine_ou_path(
            '/'.join(list(reversed(account_path))),
            self.describe_ou_name(self.get_parent_info().get("ou_parent_id")))
Example #20
0

class InvalidInputError(ValueError):
    pass


# Custom encoder for datetime objects
class MyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, datetime.datetime):
            return int(mktime(obj.timetuple()))
        return json.JSONEncoder.default(self, obj)


# limit the number of retries submitted by boto3 because Step Functions will handle the exponential retries more efficiently
config = Config(retries=dict(max_attempts=2))

client = boto3.client('transcribe', config=config)


# Entrypoint for lambda funciton
def lambda_handler(event, context):
    session = boto3.session.Session()
    region = session.region_name

    # Default to unsuccessful
    isSuccessful = "FALSE"

    # Create a random name for the transcription job
    jobname = id_generator()
Example #21
0
 def test_presign_unsigned(self):
     config = Config(signature_version=botocore.UNSIGNED)
     client = self.session.create_client('s3', 'us-east-2', config=config)
     url = client.generate_presigned_url(ClientMethod='list_buckets')
     self.assertEqual('https://s3.us-east-2.amazonaws.com/', url)
 def __init__(self, bucket="congo8khz-pnnn"):
     self.bucket = bucket
     self.s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED))
Example #23
0
# The partition size of each multipart chunk
multipart_chunksize = 5 * MB
# Whether use threads
use_threads = True
# Max retry times
retries = 100

# target bucket and key name
region = 'ap-northeast-1'
bucket = 'multipart--upload-andrew-test'
key = 'test/file'
# to upload file name
to_upload = 'bigfile'

# Get the service client
s3 = boto3.client('s3',
                  region,
                  config=Config(retries={'max_attempts': retries}))

# Group things into config
config = TransferConfig(multipart_threshold=multipart_threshold,
                        max_concurrency=max_concurrency,
                        multipart_chunksize=multipart_chunksize,
                        use_threads=use_threads)

# Upload tmp.txt to bucket-name at key-name
before = time.time()
s3.upload_file(to_upload, bucket, key, Config=config)
after = time.time()
print(after - before)
Example #24
0
# initialise LOGGERs
LOG_LEVEL = os.getenv('log_level', 'info')
LOGGER = Logger(loglevel=LOG_LEVEL)
APPLOGGER = LogHandler(PLAYBOOK) # application LOGGER for CW Logs


# Get AWS region from Lambda environment. If not present then we're not
# running under lambda, so defaulting to us-east-1
AWS_REGION = os.getenv('AWS_DEFAULT_REGION', 'us-east-1')
AWS_PARTITION = os.getenv('AWS_PARTITION', 'aws')

# Append region name to LAMBDA_ROLE
LAMBDA_ROLE += '_' + AWS_REGION
BOTO_CONFIG = Config(
    retries={
        'max_attempts': 10
    },
    region_name=AWS_REGION
)
AWS = AWSClient(AWS_PARTITION, AWS_REGION)

#------------------------------------------------------------------------------
# HANDLER
#------------------------------------------------------------------------------
def lambda_handler(event, context):

    LOGGER.debug(event)
    metrics = Metrics(event)
    try:
        for finding_rec in event['detail']['findings']:
            finding = Finding(finding_rec)
            LOGGER.info('FINDING_ID: ' + str(finding.details.get('Id')))
Example #25
0
    def __init__(  # type: ignore
        self,
        agent_config_id: str = None,
        name: str = None,
        labels: Iterable[str] = None,
        env_vars: dict = None,
        max_polls: int = None,
        agent_address: str = None,
        no_cloud_logs: bool = None,
        launch_type: str = "FARGATE",
        aws_access_key_id: str = None,
        aws_secret_access_key: str = None,
        aws_session_token: str = None,
        region_name: str = None,
        botocore_config: dict = None,
        enable_task_revisions: bool = False,
        use_external_kwargs: bool = False,
        external_kwargs_s3_bucket: str = None,
        external_kwargs_s3_key: str = None,
        **kwargs,
    ) -> None:
        super().__init__(
            agent_config_id=agent_config_id,
            name=name,
            labels=labels,
            env_vars=env_vars,
            max_polls=max_polls,
            agent_address=agent_address,
            no_cloud_logs=no_cloud_logs,
        )

        if not kwargs.pop("_called_from_cli", False):
            warnings.warn(
                "`FargateAgent` is deprecated, please transition to using `ECSAgent` instead"
            )

        from boto3 import client as boto3_client
        from boto3 import resource as boto3_resource
        from botocore.config import Config

        # Config used for boto3 client initialization
        aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
        aws_secret_access_key = aws_secret_access_key or os.getenv(
            "AWS_SECRET_ACCESS_KEY"
        )
        aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN")
        region_name = region_name or os.getenv("REGION_NAME")
        botocore_config = botocore_config or {}

        # revisions and kwargs configurations
        self.enable_task_revisions = enable_task_revisions
        self.use_external_kwargs = use_external_kwargs
        self.external_kwargs_s3_bucket = external_kwargs_s3_bucket
        self.external_kwargs_s3_key = external_kwargs_s3_key
        self.launch_type = launch_type

        # Parse accepted kwargs for task definition, run, and container definitions key of task
        # definition
        (
            self.task_definition_kwargs,
            self.task_run_kwargs,
            self.container_definitions_kwargs,
        ) = self._parse_kwargs(kwargs, True)

        # Client initialization
        self.boto3_client = boto3_client(
            "ecs",
            aws_access_key_id=aws_access_key_id,
            aws_secret_access_key=aws_secret_access_key,
            aws_session_token=aws_session_token,
            region_name=region_name,
            config=Config(**botocore_config),
        )
        # fetch external kwargs from s3 if needed
        if self.use_external_kwargs:
            self.logger.info("Use of external S3 kwargs enabled.")
            self.s3_resource = boto3_resource(
                "s3",
                aws_access_key_id=aws_access_key_id,
                aws_secret_access_key=aws_secret_access_key,
                aws_session_token=aws_session_token,
                region_name=region_name,
            )

        # get boto3 client for resource groups tagging api
        if self.enable_task_revisions:
            self.logger.info("Native ECS task revisions enabled.")
            self.boto3_client_tags = boto3_client(
                "resourcegroupstaggingapi",
                aws_access_key_id=aws_access_key_id,
                aws_secret_access_key=aws_secret_access_key,
                aws_session_token=aws_session_token,
                region_name=region_name,
                config=Config(**botocore_config),
            )

        self.logger.debug(f"Launch type: {self.launch_type}")
        self.logger.debug(f"Enable task revisions: {self.enable_task_revisions}")
        self.logger.debug(f"Use external kwargs: {self.use_external_kwargs}")
        self.logger.debug(
            f"External kwargs S3 bucket: {self.external_kwargs_s3_bucket}"
        )
        self.logger.debug(f"External kwargs S3 key: {self.external_kwargs_s3_key}")
Example #26
0
def _resource(name, config):
    boto_config = Config(retries={'max_attempts': BOTO_MAX_RETRIES})
    return boto3.resource(
        name, config["provider"]["region"], config=boto_config)
def s3_get(url, temp_file, proxies=None):
    """Pull a file directly from S3."""
    s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
    bucket_name, s3_path = split_s3_path(url)
    s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def jobs_compare(args):

    job_source = args['job-name-source']
    job_target = args['job-name-target']
    region = args['region']
    profile = args['profile']

    boto3.setup_default_session(profile_name=profile)

    region_config = Config(region_name=region)

    glue = boto3.client('glue', config=region_config)

    response_source = glue.get_job(JobName=job_source)
    response_source = response_source['Job']
    del response_source['Name']
    del response_source['CreatedOn']
    del response_source['LastModifiedOn']

    response_target = glue.get_job(JobName=job_target)
    response_target = response_target['Job']
    del response_target['Name']
    del response_target['CreatedOn']
    del response_target['LastModifiedOn']

    diff = {}
    for key in response_source:
        value = response_source[key]
        if isinstance(value, dict):
            diff_dict = {}
            for key_dict in value:
                value_dict = value[key_dict]
                if key_dict in response_target[key]:
                    if str(value_dict) != str(response_target[key][key_dict]):
                        diff_dict[key_dict] = "{} <> {}".format(
                            str(value_dict),
                            str(response_target[key][key_dict]))
                else:
                    diff_dict[key_dict] = "{} <> {}".format(
                        str(value_dict), "")
            if len(diff_dict) > 0:
                diff[key] = diff_dict
        else:
            if key in response_target:
                if str(value) != str(response_target[key]):
                    diff[key] = "{} <> {}".format(str(value),
                                                  str(response_target[key]))
            else:
                diff[key] = "{} <> {}".format(str(value), "")

    for key in response_target:
        value = response_target[key]
        if isinstance(value, dict):
            diff_dict = diff[key] if key in diff else {}
            for key_dict in value:
                value_dict = value[key_dict]
                if not key_dict in response_source[key]:
                    diff_dict[key_dict] = "{} <> {}".format(
                        "", str(value_dict))
            if len(diff_dict) > 0:
                diff[key] = diff_dict
        else:
            if not key in response_source:
                diff[key] = "{} <> {}".format("", str(value))

    if len(diff) > 0:
        print(json.dumps(diff, indent=4))
    else:
        print("Não existem diferenças")
Example #29
0
 def setUp(self):
     super(TestS3SigV4Client, self).setUp()
     self.client = self.session.create_client(
         's3', self.region, config=Config(signature_version='s3v4'))
Example #30
0
    def __init__(self, ec2=None, autoscaling=None):
        """ Initializes aws sdk clients """

        self.ec2 = ec2 or boto3.client(
            'ec2', config=Config(retries={'max_attempts': BOTO3_RETRIES}))
        self.asg = autoscaling or boto3.client('autoscaling')