Exemple #1
0
    def _run_main(self, parsed_args, parsed_globals):

        register_task_def_kwargs, appspec_obj = \
            self._load_file_args(parsed_args.task_definition,
                                 parsed_args.codedeploy_appspec)

        ecs_client_wrapper = ECSClient(
            self._session, parsed_args, parsed_globals, self.USER_AGENT_EXTRA)

        self.resources = self._get_resource_names(
            parsed_args, ecs_client_wrapper)

        codedeploy_client = self._session.create_client(
            'codedeploy',
            region_name=parsed_globals.region,
            verify=parsed_globals.verify_ssl,
            config=config.Config(user_agent_extra=self.USER_AGENT_EXTRA))

        self._validate_code_deploy_resources(codedeploy_client)

        self.wait_time = self._cd_validator.get_deployment_wait_time()

        self.task_def_arn = self._register_task_def(
            register_task_def_kwargs, ecs_client_wrapper)

        self._create_and_wait_for_deployment(codedeploy_client, appspec_obj)
Exemple #2
0
def init():
    return config.Config(region_name=os.environ["AWS_REGION"],
                         retries={
                             "max_attempts": 10,
                             "mode": "standard"
                         },
                         **json.loads(os.environ["AWS_SDK_USER_AGENT"]))
Exemple #3
0
 def __init__(self, session, parsed_args, parsed_globals, user_agent_extra):
     self._args = parsed_args
     self._custom_config = config.Config(user_agent_extra=user_agent_extra)
     self._client = session.create_client(
         'ecs',
         region_name=parsed_globals.region,
         endpoint_url=parsed_globals.endpoint_url,
         verify=parsed_globals.verify_ssl,
         config=self._custom_config)
    def __init__(self, access_key: str, secret_key: str, verify: bool,
                 proxy: bool):
        self.access_key = access_key
        self.secret_key = secret_key
        self.verify = verify

        # Set proxy
        if proxy:
            proxies = handle_proxy()

            # Throws a ValueError if Proxy is empty in configuration.
            if not proxies.get('https', True):
                raise ValueError(MESSAGES['BLANK_PROXY_ERROR'] + str(proxies))

            self.config = config.Config(proxies=proxies)
        else:
            self.config = config.Config(proxies={})

        self.s3_client: Any = None
def write_to_s3(bucket: str, keyname: str, file_path: str):
    try:
        s3 = boto3.client(
            's3',
            'ap-southeast-1',
            config=config.Config(s3={'addressing_style': 'path'}))
        print("begin to write to S3 : " + bucket + "  key : " + keyname +
              " file_path : " + file_path)
        s3.upload_file(file_path, bucket, keyname)
    except Exception as e:
        print("write to S3 error : " + str(e))
def main(input, output, client=None):
    jsondata = read_in(input)

    provider_config = config.Config(region_name=jsondata['region'])

    if client is None:
        client = boto3.client('backup', config=provider_config)

    response = client.list_backup_plans(
        MaxResults=100,
        IncludeDeleted=False
    )

    for backupPlan in response['BackupPlansList']:
        if jsondata['plan_name'] == backupPlan['BackupPlanName']:
            output.write(json.dumps({"plan_id": backupPlan['BackupPlanId']}))
def main():
    shockdb = get_client(CONFIG_MONGO_SHOCK_HOST, CONFIG_MONGO_SHOCK_DATABASE,
                         CONFIG_MONGO_SHOCK_USER,
                         CONFIG_MONGO_SHOCK_PWD)[CONFIG_MONGO_SHOCK_DATABASE]
    bsdb = get_client(
        CONFIG_MONGO_BLOBSTORE_HOST, CONFIG_MONGO_BLOBSTORE_DATABASE,
        CONFIG_MONGO_BLOBSTORE_USER,
        CONFIG_MONGO_BLOBSTORE_PWD)[CONFIG_MONGO_BLOBSTORE_DATABASE]

    s3 = boto3.client('s3',
                      endpoint_url=CONFIG_S3_HOST,
                      aws_access_key_id=CONFIG_S3_ACCESS_KEY,
                      aws_secret_access_key=CONFIG_S3_ACCESS_SECRET,
                      region_name=CONFIG_S3_REGION,
                      config=bcfg.Config(s3={'addressing_style': 'path'}))

    paginator = s3.get_paginator('list_objects_v2')
    seenusers = {}

    # no way to get object count in a bucket other than listing them, apparently

    count = 0
    lastPrint = ''
    for page in paginator.paginate(Bucket=CONFIG_S3_BUCKET):
        nodes = [toUUID(o['Key']) for o in page['Contents']]
        for n in nodes:
            node = shockdb[SHOCK_COL_NODES].find_one({'id': n})
            if not node:
                raise ValueError("Missing shock node " + n)
            bsnode = toBSNode(node, seenusers, shockdb, bsdb)
            bsdb[BS_COL_NODES].update_one({BS_KEY_NODES_ID: n},
                                          {'$set': bsnode},
                                          upsert=True)
            count += 1
            if count % 100 == 0:
                backspace = '\b' * len(lastPrint)
                lastPrint = 'Processed {} records'.format(count)
                print(backspace + lastPrint, end='', flush=True)

    backspace = '\b' * len(lastPrint)
    lastPrint = 'Processed {} records'.format(count)
    print(backspace + lastPrint)
def main():
    token = sys.argv[1]

    s3 = boto3.client('s3',
                      endpoint_url=CONFIG_S3_HOST,
                      aws_access_key_id=CONFIG_S3_ACCESS_KEY,
                      aws_secret_access_key=CONFIG_S3_ACCESS_SECRET,
                      region_name=CONFIG_S3_REGION,
                      config=bcfg.Config(s3={'addressing_style': 'path'}))

    for _ in range(RECORD_COUNT):
        ret = requests.post(SHOCK_NODE_URL,
                            data=io.StringIO("foo"),
                            headers={'authorization': 'oauth ' + token})
        j = ret.json()
        nid = j['data']['id']
        key = nid[0:2] + '/' + nid[2:4] + '/' + nid[4:6] + '/' + nid
        _ = s3.put_object(Body=io.BytesIO(b'whee'),
                          Bucket=CONFIG_S3_BUCKET,
                          Key=key)
Exemple #9
0
#                                                                                                                    #
#  Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance    #
#  with the License. A copy of the License is located at                                                             #
#                                                                                                                    #
#      http://www.apache.org/licenses/LICENSE-2.0                                                                    #
#                                                                                                                    #
#  or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
#  OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions    #
#  and limitations under the License.                                                                                #
######################################################################################################################

import boto3, logging, os, json
from crhelper import CfnResource
from botocore import config

config = config.Config(**json.loads(os.environ['AWS_SDK_USER_AGENT']))
client_iam = boto3.client('iam', config=config)

helper = CfnResource(json_logging=True, log_level='INFO')
logger = logging.getLogger(__name__)


def _create_iam_resources():
    '''
    This function creates the IAM resources required for a no-ingress EC2 environment.
    https://docs.aws.amazon.com/cloud9/latest/user-guide/ec2-ssm.html#aws-cli-instance-profiles
    '''

    CLOUD9_ROLE = 'AWSCloud9SSMAccessRole'
    CLOUD9_INSTANCE_PROFILE = 'AWSCloud9SSMInstanceProfile'
Exemple #10
0
    parser.add_argument("--prefix-list-id", "-pl", type=str, help="Prefix list ID with IPs authorized to access SOCA on port 22/443")
    parser.add_argument("--name", "-n", type=str, help="Friendly name for your SOCA cluster. Must be unique. SOCA will be added as prefix")
    parser.add_argument("--base-os", "-os", choices=["amazonlinux2", "centos7", "rhel7"], type=str, help="The preferred Linux distribution for the scheduler and compute instances")
    parser.add_argument("--debug", action='store_const', const=True, default=False, help="Enable CDK debug mode")
    parser.add_argument("--cdk-cmd", type=str, choices=["deploy", "create", "update", "ls", "list", "synth", "synthesize", "destroy", "bootstrap"], default="deploy")
    parser.add_argument("--skip-config-message", action='store_const', const=True, default=False, help="Skip default_config message")

    args = parser.parse_args()

    # Use script location as current working directory
    install_directory = os.path.dirname(os.path.realpath(__file__))
    os.chdir(install_directory)

    # Append Solution ID to Boto3 Construct
    aws_solution_user_agent = {"user_agent_extra": "AwsSolution/SO0072/2.7.2"}
    boto_extra_config = config.Config(**aws_solution_user_agent)

    print(f"""
            {fg('red')}_____{fg('light_blue')} ____  {fg('magenta')}______{fg('yellow')}___{attr('reset')} 
           {fg('red')}/ ___/{fg('light_blue')}/ __ \{fg('magenta')}/ ____{fg('yellow')}/   |{attr('reset')} 
           {fg('red')}\__ \{fg('light_blue')}/ / / {fg('magenta')}/ /   {fg('yellow')}/ /| |{attr('reset')} 
          {fg('red')}___/{fg('light_blue')} / /_/ {fg('magenta')}/ /___{fg('yellow')}/ ___ |{attr('reset')} 
         {fg('red')}/____/{fg('light_blue')}\____/{fg('magenta')}\____{fg('yellow')}/_/  |_|{attr('reset')}                     
        {fg('red')}Scale{attr('reset')}-{fg('light_blue')}Out{attr('reset')} {fg('magenta')}Computing{attr('reset')} on {fg('yellow')}AWS{attr('reset')}
    ================================
    > Documentation: https://awslabs.github.io/scale-out-computing-on-aws/
    > Source Code: https://github.com/awslabs/scale-out-computing-on-aws/""")

    install_phases = {
        1: "Please provide a cluster name ('soca-' is automatically added as a prefix)",
        2: "Enter the name of an S3 bucket your own",
Exemple #11
0
def get_boto_config():
    if SOLUTION_ID and SOLUTION_VERSION:
        return config.Config(
            user_agent_extra=f"AwsSolution/{SOLUTION_ID}/{SOLUTION_VERSION}")
    else:
        return config.Config()
Exemple #12
0
IS_OFFLINE = os.environ.get('IS_OFFLINE')


if IS_OFFLINE:
    from botocore import UNSIGNED
    from botocore import config
    db = boto3.resource(
        'dynamodb',
        region_name='localhost',
        endpoint_url='http://localhost:8000'
    )
    s3 = boto3.resource(
        's3',
        region_name='localhost',
        endpoint_url='http://localhost:8001',
        config=config.Config(signature_version=UNSIGNED)  # Otherwise S3 triggered lambdas fail 403
    )
else:
    db = boto3.resource('dynamodb')
    s3 = boto3.resource('s3')


def handle_daily_prices(event, context) -> int:
    """
    This function is triggered everytime a new price file is available.
    """
    # TODO find out a way to import at module level
    from . import model
    logging.info('****** daily prices triggered with s3 *******')
    for record in event['Records']:
        logging.info('record: %s', str(record))
# Boto3
import boto3
import botocore.config as boto_config

# ResourceManagerCommon
from resource_manager_common import stack_info

# Utils
from cgf_utils import aws_utils
from cgf_utils import custom_resource_response
from cgf_utils import properties
from cgf_utils import role_utils

CLOUD_GEM_FRAMEWORK = 'CloudGemFramework'
iam = aws_utils.ClientWrapper(boto3.client('iam'))
cfg = boto_config.Config(read_timeout=70, connect_timeout=70)
s3 = aws_utils.ClientWrapper(boto3.client('s3', config=cfg),
                             do_not_log_args=['Body'])


def get_default_policy(project_service_lambda_arn):
    """
    Gets the default policy to associate with a a Lambda Configuration

    To ensure least privileges we do not attach PutLogEvents, CreateLogStream permissions here. Instead
    these are added as an inline policy on the Lambda's execution role once its created so they can be correctly scoped.

    :param project_service_lambda_arn:
    :return: The default policy document for the lambda
    """
    policy = {
Exemple #14
0
def _boto_config():
    return config.Config(max_pool_connections=MAX_RPC_CONCURRENCY + MAX_PASSTHROUGH_CONCURRENCY)
GLUE_OUTPUT_S3_KEY_PREFIX = args["glue_output_s3_key_prefix"]
SOLUTION_ID = args["solution_id"]
SOLUTION_VERSION = args["solution_version"]

# Sets Glue context and logging
spark_context = SparkContext()
glue_context = GlueContext(spark_context)
job = Job(glue_context)

# AWS Clients
config_json = {}
if SOLUTION_ID.strip() != "" and SOLUTION_VERSION.strip() != "":
    config_json[
        "user_agent_extra"] = f"AwsSolution/{SOLUTION_ID}/{SOLUTION_VERSION}"

config = config.Config(**config_json)
s3 = boto3.client('s3', config=config)


class DataCleanupException(Exception):
    """Raised when there is an issue while cleaning previous data from S3"""
    pass


def log_message(msg):
    msg_arr = [f'****** LOG_MSG {datetime.now()} ******']

    if not isinstance(msg, list):
        msg = [msg]

    # Add some preceding whitespace to each line for the log message.
Exemple #16
0
import boto3
from botocore import config
import botocore.exceptions as aws_exp

if __name__ == '__main__':
    conf = config.Config(retries={'max_attempts': 10, 'mode': 'standard'})
    client = boto3.client('emr', config=conf)
    paginator = client.get_paginator('list_clusters')
    cluster_paginator = paginator.paginate()
    with open('aws.txt', 'w') as file:
        while 1:
            try:
                for index, item in enumerate(cluster_paginator):
                    print(f'Index: {index}  Item: {item["Clusters"]}')
                    file.write(f'Index: {index}  Item :{item}\n')
            except aws_exp.ClientError:
                continue
from datetime import datetime
from typing import Dict, List

import boto3
from botocore import config

from dashboard_api.core.config import DT_FORMAT, BUCKET
from dashboard_api.models.static import IndicatorObservation

s3 = boto3.client("s3")

_lambda = boto3.client(
    "lambda",
    region_name="us-east-1",
    config=config.Config(
        read_timeout=900, connect_timeout=900, retries={"max_attempts": 0}
    ),
)


def invoke_lambda(
    lambda_function_name: str, payload: dict = None, invocation_type="RequestResponse"
):
    """Invokes a lambda function using the boto3 lambda client.

    Params:
    -------
    lambda_function_name (str): name of the lambda to invoke
    payload (Optional[dict]): data into invoke the lambda function with (will be accessible
        in the lambda handler function under the `event` param)
    invocation_type (Optional[str] = ["RequestResponse", "Event", "DryRun"]):
Exemple #18
0
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0

from elasticsearch import Elasticsearch, RequestsHttpConnection
import base64
import json
import os
import boto3
from requests_aws4auth import AWS4Auth
from botocore import config

es_endpoint = os.environ['EsEndpoint']
dataplane_bucket = os.environ['DataplaneBucket']
boto_config = json.loads(os.environ['botoConfig'])
config = config.Config(**boto_config)

s3 = boto3.client('s3', config=config)

# These names are the lowercase version of OPERATOR_NAME defined in /source/operators/operator-library.yaml
supported_operators = [
    "textdetection", "mediainfo", "transcribeaudio", "transcribevideo",
    "translate", "genericdatalookup", "labeldetection", "celebrityrecognition",
    "facesearch", "contentmoderation", "facedetection", "key_phrases",
    "entities", "shotdetection", "technicalcuedetection"
]


def normalize_confidence(confidence_value):
    converted = float(confidence_value) * 100
    return str(converted)
def boto_extra_config():
    aws_solution_user_agent = {
        "user_agent_extra": "AwsSolution/SO0072/__VERSION__"
    }
    return botocore_config.Config(**aws_solution_user_agent)
Exemple #20
0
 def s3conf(self):
   return botoconf.Config(
     region_name = Common.Session.region,
     signature_version = 's3v4',
   )
Exemple #21
0
def get_s3_client():
    import boto3
    from botocore import config
    return boto3.client('s3', boto3.session.Session().region_name,
                        config=config.Config(s3={'addressing_style': 'path'}))
Exemple #22
0
import boto3
from logging import getLogger, FileHandler, Formatter
from argparse import ArgumentParser
from botocore import exceptions
from botocore import config

logger = getLogger(__name__)
handle = FileHandler("/tmp/restore.log")
logger.setLevel("INFO")
formatter = Formatter('%(asctime)s - %(levelname)s - %(message)s')
handle.setFormatter(formatter)
logger.addHandler(handle)

client_config = config.Config(signature_version="s3",
                              max_pool_connections=5,
                              retries=dict(max_attempts=0),
                              read_timeout=700)

global client

sucess_list = []
fail_list = []


def get_object_versions(bucket, prefix, max_key, key_marker):
    kwargs = dict(Bucket=bucket,
                  EncodingType='url',
                  MaxKeys=max_key,
                  Prefix=prefix)

    if key_marker:
import boto3
import urllib3
import json
from botocore import config
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core import patch_all
from MediaInsightsEngineLambdaHelper import MediaInsightsOperationHelper
from MediaInsightsEngineLambdaHelper import MasExecutionError
from MediaInsightsEngineLambdaHelper import DataPlane

patch_all()

region = os.environ['AWS_REGION']

mie_config = json.loads(os.environ['botoConfig'])
config = config.Config(**mie_config)
transcribe = boto3.client("transcribe", config=config)


def lambda_handler(event, context):
    print("We got this event:\n", event)
    operator_object = MediaInsightsOperationHelper(event)
    # If Transcribe wasn't run due to silent audio, then we're done
    if "Mediainfo_num_audio_tracks" in event["Input"]["MetaData"] and event[
            "Input"]["MetaData"]["Mediainfo_num_audio_tracks"] == "0":
        operator_object.update_workflow_status("Complete")
        return operator_object.return_output_object()
    try:
        job_id = operator_object.metadata["TranscribeJobId"]
        workflow_id = operator_object.workflow_execution_id
        asset_id = operator_object.asset_id
Exemple #24
0
import boto3
import os
import json
import base64
import time
from botocore import config

solution_identifier = {"user_agent_extra": "AwsSolution/SO8016/1.0.0"}
config = config.Config(**solution_identifier)

sagemaker_runtime_client = boto3.client('runtime.sagemaker', config=config)
s3 = boto3.client('s3', config=config)


def handler(event, context):
    # parse environment parameters
    sagemaker_endpoint_name = os.environ['SAGEMAKER_ENDPOINT_NAME']
    service_ype = os.environ['SERVICE_TYPE']
    deploy_model_name = os.environ['OBJECT_DETECTOR_MODEL_NAME']
    events_s3_bucket_name = os.environ['EVENTS_S3_BUCKET_NAME']
    request_events_snapshot_enabled = os.environ[
        'REQUEST_EVENTS_SNAPSHOT_ENABLED']

    # object detector short size mapping look-up table
    short_size_lut = {
        'ssd_512_resnet50_v1_coco': 512,
        'yolo3_darknet53_coco': 416,
        'yolo3_mobilenet1.0_coco': 416,
        'faster_rcnn_fpn_resnet101_v1d_coco': 600
    }
    short_size = short_size_lut.get(deploy_model_name)