def get_s3_session(self):
     return boto3.client('s3',
                         config=Config(signature_version='s3v4',
                                       region_name=self.S3_REGION))
Пример #2
0
def get_tator_store(bucket=None,
                    connect_timeout=5,
                    read_timeout=5,
                    max_attempts=5,
                    upload=False,
                    backup=False) -> TatorStorage:
    """
    Determines the type of object store required by the given bucket and returns it. All returned
    objects are subclasses of the base class TatorStorage.

    :param bucket: The bucket to use for accessing object storage.
    :type bucket: models.Bucket
    :param upload: If True, use the upload bucket; `bucket` must also be None if this is True
    :type upload: bool
    :param connect_timeout: The number of seconds to wait on connect before timing out.
    :type connect_timeout: float or int
    :param read_timeout: The number of seconds to wait on reading before timing out.
    :type read_timeout: float or int
    :param max_attempts: The max number of retries on any one request.
    :type max_attempts: int
    :rtype: TatorStorage
    """
    if upload and backup:
        raise ValueError("Cannot set both `upload` and `backup` to True")
    if bucket is not None and (upload or backup):
        raise ValueError(
            f"Cannot specify a bucket and set `{'upload' if upload else 'backup'}` to True"
        )

    # Google Cloud Storage uses a different client class, handle this case first
    if getattr(bucket, "gcs_key_info", None):
        gcs_key_info = json.loads(bucket.gcs_key_info)
        gcs_project = gcs_key_info["project_id"]
        client = storage.Client(
            gcs_project, Credentials.from_service_account_info(gcs_key_info))
        return TatorStorage.get_tator_store(ObjectStore.GCP, bucket, client,
                                            bucket.name)

    if bucket is None:
        if upload and os.getenv("UPLOAD_STORAGE_HOST"):
            # Configure for upload
            prefix = "UPLOAD"
            bucket_env_name = "UPLOAD_STORAGE_BUCKET_NAME"
        elif backup:
            # Configure for backup
            prefix = "BACKUP"
            bucket_env_name = "BACKUP_STORAGE_BUCKET_NAME"
        else:
            # Configure for standard use
            prefix = "OBJECT"
            bucket_env_name = "BUCKET_NAME"
        endpoint = os.getenv(f"{prefix}_STORAGE_HOST")
        region = os.getenv(f"{prefix}_STORAGE_REGION_NAME")
        access_key = os.getenv(f"{prefix}_STORAGE_ACCESS_KEY")
        secret_key = os.getenv(f"{prefix}_STORAGE_SECRET_KEY")
        bucket_name = os.getenv(bucket_env_name)
        external_host = os.getenv(f"{prefix}_STORAGE_EXTERNAL_HOST")
    else:
        endpoint = bucket.endpoint_url
        region = bucket.region
        access_key = bucket.access_key
        secret_key = bucket.secret_key
        bucket_name = bucket.name
        external_host = None

    if endpoint:
        # TODO change back to virtual-host-style access when it works again, as path-style access is
        # on delayed deprecation
        # Strip the bucket name from the url to use path-style access
        endpoint = endpoint.replace(f"{bucket_name}.", "")
        config = Config(
            connect_timeout=connect_timeout,
            read_timeout=read_timeout,
            retries={"max_attempts": max_attempts},
        )
        client = boto3.client(
            "s3",
            endpoint_url=f"{endpoint}",
            region_name=region,
            aws_access_key_id=access_key,
            aws_secret_access_key=secret_key,
            config=config,
        )
    else:
        # Client generator will not have env variables defined
        client = boto3.client("s3")

    # Get the type of object store from bucket metadata
    try:
        response = client.head_bucket(Bucket=bucket_name)
    except:
        logger.warning(
            f"Failed to retrieve remote bucket information, inferring server type from endpoint"
        )
        if endpoint and "amazonaws" in endpoint:
            server = ObjectStore.AWS
        else:
            server = ObjectStore.MINIO
    else:
        response_server = response["ResponseMetadata"]["HTTPHeaders"]["server"]
        if ObjectStore.AWS.value in response_server:
            server = ObjectStore.AWS
        elif ObjectStore.MINIO.value in response_server:
            server = ObjectStore.MINIO
        else:
            raise ValueError(
                f"Received unhandled server type '{response_server}'")

    return TatorStorage.get_tator_store(server, bucket, client, bucket_name,
                                        external_host)
Пример #3
0
import os
import random

#here = os.path.dirname(os.path.abspath(__file__))

#filename = os.path.join(here, 'example.jpg')

ACCESS_KEY = 'AWS_ACCESS_KEY'
SECRET_KEY = 'AWS_SERET_KEY'
bucketname = 'hack-the-valley-photo'
#data = open(filename, 'rb')

s3 = boto3.resource('s3',
                    aws_access_key_id=ACCESS_KEY,
                    aws_secret_access_key=SECRET_KEY,
                    config=Config(signature_version='s3v4'))

import cv2, time


def getImage(timer):
    video = cv2.VideoCapture(0)
    a = 0
    while a < timer:
        a = a + 1
        here = os.path.dirname(os.path.abspath(__file__))
        filename = os.path.join(here, 'example.jpg')
        data = open(filename, 'rb')
        check, frame = video.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gener = random.randint(
Пример #4
0
import json
import boto3
from botocore.client import Config

lambda_client = boto3.client("lambda",
                             config=Config(connect_timeout=930,
                                           read_timeout=930,
                                           retries=dict(max_attempts=0)))


def lambda_invoker(function_name,
                   payload,
                   type="RequestResponse",
                   qualifier="prod"):
    response = lambda_client.invoke(
        FunctionName=function_name,
        InvocationType=type,
        Payload=json.dumps(payload),
        Qualifier=qualifier,
    )
    response["Payload"] = json.load(response["Payload"])
    return response
def lambda_handler(event, context):
    timeofevent = datetime.today() - timedelta(hours=6)
    timeofevent = timeofevent.strftime("%Y-%m-%d-%H-%M-%S")
    #print timeofevent
    s3resource = boto3.resource('s3', config=Config(signature_version='s3v4'), region_name="us-east-1")
    s3resource.meta.client.download_file(bucket, c7n_org_config_template_Key, '/tmp/config.txt')
    f = open('/tmp/config.txt', 'r')
    ConfigTemplateOriginal = f.read()
    
    ConfigFileRegional=[]
    ConfigFileRegional.append("accounts:")
    ConfigFileGlobal= []
    ConfigFileGlobal.append("accounts:")
    
    next_token = ' '
    paginator = orgs.get_paginator('list_accounts_for_parent')
    all_federated_accounts_info = paginator.paginate(
    ParentId=OrgsParentId
    )
    AccountNumbers = []
    AccountNumbersAndNames = []
    #print all_federated_accounts
    for all_federated_accounts in all_federated_accounts_info:
        for this_account in all_federated_accounts['Accounts']:
            ConfigTemplate = ConfigTemplateOriginal
            account_name = this_account['Name']
            account_output = this_account['Id']
            #print 'Account      ----------------------------------------------------- '
            print json.dumps(account_output,default=json_serial)
            AccountNumbers.append(account_output)
            AccountNumbersAndNames.append(account_output + ':' + account_name)
            ConfigTemplate = ConfigTemplate.replace("<Account_Number>", account_output)
            ConfigTemplate = ConfigTemplate.replace("<Account_Name>", account_name)
            ConfigTemplate = ConfigTemplate.replace("<Role_Name>", RoleName)
            ConfigFileRegional.append(" ")
            ConfigFileRegional.append(ConfigTemplate)
    #print response
    print AccountNumbersAndNames
    
    for all_federated_accounts in all_federated_accounts_info:
        for this_account in all_federated_accounts['Accounts']:
            ConfigTemplate = ConfigTemplateOriginal
            account_name = this_account['Name']
            account_output = this_account['Id']
            ConfigTemplate = ConfigTemplate.replace("<Account_Number>", account_output)
            ConfigTemplate = ConfigTemplate.replace("<Account_Name>", account_name)
            ConfigTemplate = ConfigTemplate.replace("<Role_Name>", RoleName)
            ConfigFileGlobal.append(" ")
            ConfigFileGlobal.append(ConfigTemplate)

    
  
    
    s3Client = boto3.client('s3', 
        config=Config(signature_version='s3v4'), 
        region_name="us-east-1",
    )

    response = s3Client.put_object(
        Body=json.dumps(AccountNumbers),
        Bucket=bucket,
        Key=AccountNumbers_Key
    )
    
    response = s3Client.put_object(
        Body=json.dumps(AccountNumbersAndNames),
        Bucket=bucket,
        Key=AccountNumbersAndNames_Key
    )

    #print ConfigFileRegional-Regional
    ConfigFileRegional = '\n'.join(ConfigFileRegional)
    
    response = s3Client.put_object(
        Body=ConfigFileRegional,
        Bucket=bucket,
        Key=custodian_config_key_regional
    )
    
    ConfigFileGlobal = '\n'.join(ConfigFileGlobal)
    
    response = s3Client.put_object(
        Body=ConfigFileGlobal,
        Bucket=bucket,
        Key=custodian_config_key_global
    )
Пример #6
0
def create_cisco_config(bucket_name, bucket_key, s3_url, bgp_asn, ssh):
    log.info("Processing %s/%s", bucket_name, bucket_key)

    #Download the VPN configuration XML document
    s3 = boto3.client('s3',
                      endpoint_url=s3_url,
                      config=Config(s3={'addressing_style': 'virtual'},
                                    signature_version='s3v4'))
    config = s3.get_object(Bucket=bucket_name, Key=bucket_key)

    xmldoc = minidom.parseString(config['Body'].read())
    #Extract transit_vpc_configuration values
    vpn_config = xmldoc.getElementsByTagName("transit_vpc_config")[0]
    account_id = vpn_config.getElementsByTagName(
        "account_id")[0].firstChild.data
    vpn_endpoint = vpn_config.getElementsByTagName(
        "vpn_endpoint")[0].firstChild.data
    vpn_status = vpn_config.getElementsByTagName("status")[0].firstChild.data
    preferred_path = vpn_config.getElementsByTagName(
        "preferred_path")[0].firstChild.data

    #Extract VPN connection information
    vpn_connection = xmldoc.getElementsByTagName('vpn_connection')[0]
    vpn_connection_id = vpn_connection.attributes['id'].value
    customer_gateway_id = vpn_connection.getElementsByTagName(
        "customer_gateway_id")[0].firstChild.data
    vpn_gateway_id = vpn_connection.getElementsByTagName(
        "vpn_gateway_id")[0].firstChild.data
    vpn_connection_type = vpn_connection.getElementsByTagName(
        "vpn_connection_type")[0].firstChild.data

    #Determine the VPN tunnels to work with
    if vpn_status == 'create':
        tunnelId = getNextTunnelId(ssh)
    else:
        tunnelId = getExistingTunnelId(ssh, vpn_connection_id)
        if tunnelId == 0:
            return

    log.info("%s %s with tunnel #%s and #%s.", vpn_status, vpn_connection_id,
             tunnelId, tunnelId + 1)
    # Create or delete the VRF for this connection
    if vpn_status == 'delete':
        ipsec_tunnel = vpn_connection.getElementsByTagName("ipsec_tunnel")[0]
        customer_gateway = ipsec_tunnel.getElementsByTagName(
            "customer_gateway")[0]
        customer_gateway_bgp_asn = customer_gateway.getElementsByTagName(
            "bgp")[0].getElementsByTagName("asn")[0].firstChild.data
        #Remove VPN configuration for both tunnels
        config_text = ['router bgp {}'.format(customer_gateway_bgp_asn)]
        config_text.append(
            '  no address-family ipv4 vrf {}'.format(vpn_connection_id))
        config_text.append('exit')
        config_text.append('no ip vrf {}'.format(vpn_connection_id))
        config_text.append('interface Tunnel{}'.format(tunnelId))
        config_text.append('  shutdown')
        config_text.append('exit')
        config_text.append('no interface Tunnel{}'.format(tunnelId))
        config_text.append('interface Tunnel{}'.format(tunnelId + 1))
        config_text.append('  shutdown')
        config_text.append('exit')
        config_text.append('no interface Tunnel{}'.format(tunnelId + 1))
        config_text.append(
            'no route-map rm-{} permit'.format(vpn_connection_id))
        # Cisco requires waiting 60 seconds before removing the isakmp profile
        config_text.append('WAIT')
        config_text.append('WAIT')
        config_text.append('no crypto isakmp profile isakmp-{}-{}'.format(
            vpn_connection_id, tunnelId))
        config_text.append('no crypto isakmp profile isakmp-{}-{}'.format(
            vpn_connection_id, tunnelId + 1))
        config_text.append('no crypto keyring keyring-{}-{}'.format(
            vpn_connection_id, tunnelId))
        config_text.append('no crypto keyring keyring-{}-{}'.format(
            vpn_connection_id, tunnelId + 1))
    else:
        # Create global tunnel configuration
        config_text = ['ip vrf {}'.format(vpn_connection_id)]
        config_text.append(' rd {}:{}'.format(bgp_asn, tunnelId))
        config_text.append(' route-target export {}:0'.format(bgp_asn))
        config_text.append(' route-target import {}:0'.format(bgp_asn))
        config_text.append('exit')
        # Check to see if a route map is needed for creating a preferred path
        if preferred_path != 'none':
            config_text.append(
                'route-map rm-{} permit'.format(vpn_connection_id))
            # If the preferred path is this transit VPC vpn endpoint, then set a shorter as-path prepend than if it is not
            if preferred_path == vpn_endpoint:
                config_text.append('  set as-path prepend {}'.format(bgp_asn))
            else:
                config_text.append('  set as-path prepend {} {}'.format(
                    bgp_asn, bgp_asn))
            config_text.append('exit')

        # Create tunnel specific configuration
        for ipsec_tunnel in vpn_connection.getElementsByTagName(
                "ipsec_tunnel"):
            customer_gateway = ipsec_tunnel.getElementsByTagName(
                "customer_gateway")[0]
            customer_gateway_tunnel_outside_address = customer_gateway.getElementsByTagName(
                "tunnel_outside_address")[0].getElementsByTagName(
                    "ip_address")[0].firstChild.data
            customer_gateway_tunnel_inside_address_ip_address = customer_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "ip_address")[0].firstChild.data
            customer_gateway_tunnel_inside_address_network_mask = customer_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "network_mask")[0].firstChild.data
            customer_gateway_tunnel_inside_address_network_cidr = customer_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "network_cidr")[0].firstChild.data
            customer_gateway_bgp_asn = customer_gateway.getElementsByTagName(
                "bgp")[0].getElementsByTagName("asn")[0].firstChild.data
            customer_gateway_bgp_hold_time = customer_gateway.getElementsByTagName(
                "bgp")[0].getElementsByTagName("hold_time")[0].firstChild.data

            vpn_gateway = ipsec_tunnel.getElementsByTagName("vpn_gateway")[0]
            vpn_gateway_tunnel_outside_address = vpn_gateway.getElementsByTagName(
                "tunnel_outside_address")[0].getElementsByTagName(
                    "ip_address")[0].firstChild.data
            vpn_gateway_tunnel_inside_address_ip_address = vpn_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "ip_address")[0].firstChild.data
            vpn_gateway_tunnel_inside_address_network_mask = vpn_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "network_mask")[0].firstChild.data
            vpn_gateway_tunnel_inside_address_network_cidr = vpn_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "network_cidr")[0].firstChild.data
            vpn_gateway_bgp_asn = vpn_gateway.getElementsByTagName(
                "bgp")[0].getElementsByTagName("asn")[0].firstChild.data
            vpn_gateway_bgp_hold_time = vpn_gateway.getElementsByTagName(
                "bgp")[0].getElementsByTagName("hold_time")[0].firstChild.data

            ike = ipsec_tunnel.getElementsByTagName("ike")[0]
            ike_authentication_protocol = ike.getElementsByTagName(
                "authentication_protocol")[0].firstChild.data
            ike_encryption_protocol = ike.getElementsByTagName(
                "encryption_protocol")[0].firstChild.data
            ike_lifetime = ike.getElementsByTagName(
                "lifetime")[0].firstChild.data
            ike_perfect_forward_secrecy = ike.getElementsByTagName(
                "perfect_forward_secrecy")[0].firstChild.data
            ike_mode = ike.getElementsByTagName("mode")[0].firstChild.data
            ike_pre_shared_key = ike.getElementsByTagName(
                "pre_shared_key")[0].firstChild.data

            ipsec = ipsec_tunnel.getElementsByTagName("ipsec")[0]
            ipsec_protocol = ipsec.getElementsByTagName(
                "protocol")[0].firstChild.data
            ipsec_authentication_protocol = ipsec.getElementsByTagName(
                "authentication_protocol")[0].firstChild.data
            ipsec_encryption_protocol = ipsec.getElementsByTagName(
                "encryption_protocol")[0].firstChild.data
            ipsec_lifetime = ipsec.getElementsByTagName(
                "lifetime")[0].firstChild.data
            ipsec_perfect_forward_secrecy = ipsec.getElementsByTagName(
                "perfect_forward_secrecy")[0].firstChild.data
            ipsec_mode = ipsec.getElementsByTagName("mode")[0].firstChild.data
            ipsec_clear_df_bit = ipsec.getElementsByTagName(
                "clear_df_bit")[0].firstChild.data
            ipsec_fragmentation_before_encryption = ipsec.getElementsByTagName(
                "fragmentation_before_encryption")[0].firstChild.data
            ipsec_tcp_mss_adjustment = ipsec.getElementsByTagName(
                "tcp_mss_adjustment")[0].firstChild.data
            ipsec_dead_peer_detection_interval = ipsec.getElementsByTagName(
                "dead_peer_detection")[0].getElementsByTagName(
                    "interval")[0].firstChild.data
            ipsec_dead_peer_detection_retries = ipsec.getElementsByTagName(
                "dead_peer_detection")[0].getElementsByTagName(
                    "retries")[0].firstChild.data

            config_text.append('crypto keyring keyring-{}-{}'.format(
                vpn_connection_id, tunnelId))
            config_text.append('  local-address GigabitEthernet1')
            config_text.append('  pre-shared-key address {} key {}'.format(
                vpn_gateway_tunnel_outside_address, ike_pre_shared_key))
            config_text.append('exit')
            config_text.append('crypto isakmp profile isakmp-{}-{}'.format(
                vpn_connection_id, tunnelId))
            config_text.append('  local-address GigabitEthernet1')
            config_text.append('  match identity address {}'.format(
                vpn_gateway_tunnel_outside_address))
            config_text.append('  keyring keyring-{}-{}'.format(
                vpn_connection_id, tunnelId))
            config_text.append('exit')
            config_text.append('interface Tunnel{}'.format(tunnelId))
            config_text.append(
                '  description {} from {} to {} for account {}'.format(
                    vpn_connection_id, vpn_gateway_id, customer_gateway_id,
                    account_id))
            config_text.append(
                '  ip vrf forwarding {}'.format(vpn_connection_id))
            config_text.append('  ip address {} 255.255.255.252'.format(
                customer_gateway_tunnel_inside_address_ip_address))
            config_text.append('  ip virtual-reassembly')
            config_text.append('  tunnel source GigabitEthernet1')
            config_text.append('  tunnel destination {} '.format(
                vpn_gateway_tunnel_outside_address))
            config_text.append('  tunnel mode ipsec ipv4')
            config_text.append(
                '  tunnel protection ipsec profile ipsec-vpn-aws')
            config_text.append('  ip tcp adjust-mss 1387')
            config_text.append('  no shutdown')
            config_text.append('exit')
            config_text.append(
                'router bgp {}'.format(customer_gateway_bgp_asn))
            config_text.append(
                '  address-family ipv4 vrf {}'.format(vpn_connection_id))
            config_text.append('  neighbor {} remote-as {}'.format(
                vpn_gateway_tunnel_inside_address_ip_address,
                vpn_gateway_bgp_asn))
            if preferred_path != 'none':
                config_text.append('  neighbor {} route-map rm-{} out'.format(
                    vpn_gateway_tunnel_inside_address_ip_address,
                    vpn_connection_id))
            config_text.append('  neighbor {} timers 10 30 30'.format(
                vpn_gateway_tunnel_inside_address_ip_address))
            config_text.append('  neighbor {} activate'.format(
                vpn_gateway_tunnel_inside_address_ip_address))
            config_text.append('  neighbor {} as-override'.format(
                vpn_gateway_tunnel_inside_address_ip_address))
            config_text.append(
                '  neighbor {} soft-reconfiguration inbound'.format(
                    vpn_gateway_tunnel_inside_address_ip_address))
            config_text.append('  neighbor {} next-hop-self'.format(
                vpn_gateway_tunnel_inside_address_ip_address))
            config_text.append('exit')
            config_text.append('exit')

            #Increment tunnel ID for going onto the next tunnel
            tunnelId += 1

    log.debug("Conversion complete")
    return config_text
Пример #7
0
import logging
import os
import json

import awswrangler as wr

from os.path import dirname, abspath

import boto3

from boto3.dynamodb.conditions import Key
from botocore.client import Config

_logger: logging.Logger = logging.getLogger(__name__)

sfn_client_config = Config(connect_timeout=50, read_timeout=70)
sfn = boto3.client('stepfunctions', config=sfn_client_config)
sts = boto3.client('sts')
crawler = boto3.client('glue')
dynamodb = boto3.resource('dynamodb')

# todo : draft function >> work in progress

_config = {
    "sfn_activity_arn":
    "arn:aws:states:{}:{}:activity:QuickSightRunnerActivity".format(
        os.getenv("REGION", "eu-west-2"),
        sts.get_caller_identity()['Account']),
    "sfn_worker_name":
    "quicksightrunner",
    "ddb_table":
Пример #8
0
def CardCreator(company, phoneNumber, street, city, region, code, country, website, note):
    
    access_key = os.environ['S3_KEY']
    secret_key = os.environ['S3_SECRET']
    regionS3 = "eu-central-1"
    key = "contactcards/" + str(company) + '.vcf'
    bucket = os.environ['S3_BUCKET']
    file = tempfile.TemporaryFile("w+b")

    file.write("BEGIN:VCARD\nVERSION:3.0\nPRODID:-//Apple Inc.//iPhone OS 13.0//EN\nN:;;;;\n".encode("utf-8"))
    file.write(("FN:" + str(company) + "\n").encode("utf-8"))
    file.write(("ORG:" + str(company) + "\n").encode("utf-8"))
    if note:
        file.write("NOTE:".encode("utf-8"))
        for i in range(0, len(note)):
            if i is len(note)-1:
                file.write(str(note[i]).encode("utf-8"))
                continue
            file.write((str(note[i]) + "\\n").encode("utf-8"))

    file.write(("\nTEL;type=WORK;type=VOICE;type=pref:" + str(phoneNumber) + "\n").encode("utf-8"))
    file.write(("item1.ADR;type=WORK;type=pref:;;" + str(street) + ";" + str(city) + ";" + str(region) + ";" + str(code) + ";" + str(country) + "\n").encode("utf-8"))
    file.write("item1.X-ABADR:de\n".encode("utf-8"))
    file.write(("item2.URL;type=pref:" + str(website) + "\n").encode("utf-8"))
    file.write("item2.X-ABLabel:_$!<HomePage>!$_\nX-ABShowAs:COMPANY\nEND:VCARD\n".encode("utf-8"))

    file.seek(0)
    s3_client = boto3.client('s3', region_name=regionS3, aws_access_key_id=access_key, aws_secret_access_key=secret_key, config=Config(signature_version='s3v4'))
    s3 = boto3.resource('s3', region_name=regionS3, aws_access_key_id=access_key, aws_secret_access_key=secret_key, config=Config(signature_version='s3v4'))

    s3.Bucket(bucket).put_object(Key=key, Body=file)
    objectUrl = s3_client.generate_presigned_url(ClientMethod='get_object', Params={'Bucket': bucket, 'Key': key})
    response = urllib.request.urlretrieve(str(objectUrl))
    print(response)
    
    return objectUrl
import boto3
from botocore.client import Config

s3 = boto3.client('s3', config=Config(signature_version='s3v4'))


def lambda_handler(event, context):

    # link to download certificate

    key = 'certs/' + str(event['params']['path']['id']) + '.json'

    url = s3.generate_presigned_url(ClientMethod='get_object',
                                    Params={
                                        'Bucket': 'fs.blockcert.poc',
                                        'Key': key,
                                        'ResponseContentDisposition':
                                        'attachment'
                                    })

    return {"location": url}
Пример #10
0
def open_portal(context, args):
    project_resources = context.config.project_resources

    if not project_resources.has_key(constant.PROJECT_CGP_RESOURCE_NAME):
        raise HandledError(
            'You can not open the Cloud Gem Portal without having the Cloud Gem Portal gem installed in your project.'
        )

    project_config_bucket_id = context.config.configuration_bucket_name
    cgp_s3_resource = project_resources[constant.PROJECT_CGP_RESOURCE_NAME]
    stackid = cgp_s3_resource['StackId']
    bucket_id = cgp_s3_resource['PhysicalResourceId']
    expiration = args.duration_seconds if args.duration_seconds else constant.PROJECT_CGP_DEFAULT_EXPIRATION_SECONDS  # default comes from argparse only on cli, gui call doesn't provide a default expiration
    region = resource_manager.util.get_region_from_arn(stackid)
    s3_client = context.aws.session.client(
        's3', region, config=Config(signature_version='s3v4'))
    user_pool_resource = project_resources[
        constant.PROJECT_RESOURCE_NAME_USER_POOL]
    identity_pool_resource = project_resources[
        constant.PROJECT_RESOURCE_NAME_IDENTITY_POOL]

    if 'CloudGemPortalApp' not in user_pool_resource['UserPoolClients']:
        credentials = context.aws.load_credentials()
        access_key = credentials.get(constant.DEFAULT_SECTION_NAME,
                                     constant.ACCESS_KEY_OPTION)
        raise HandledError(
            'The Cognito user pool \'{}\' is missing the \'CloudGemPortalApp\' app client.  Ensure the Lumberyard user \'{}\' with AWS access key identifier \'{}\' in the Lumberyard Credentials Manager has the policy \'AmazonCognitoReadOnly\' attached and a project stack has been created (Lumberyard -> AWS -> Resource Manager).'
            .format(constant.PROJECT_RESOURCE_NAME_USER_POOL,
                    context.config.user_default_profile, access_key))
    client_id = user_pool_resource['UserPoolClients']['CloudGemPortalApp'][
        'ClientId']
    user_pool_id = user_pool_resource['PhysicalResourceId']
    identity_pool_id = identity_pool_resource['PhysicalResourceId']

    #create an administrator account if one is not present
    output = __validate_administrator_account(context, args)
    admin_account_created = __is_first_time_usage(output)

    #Request the index file
    try:
        s3_index_obj_request = s3_client.get_object(
            Bucket=bucket_id, Key=constant.PROJECT_CGP_ROOT_FILE)
    except ClientError as e:
        raise HandledError(
            "Could not read from the key '{}' in the S3 bucket '{}'.".format(
                constant.PROJECT_CGP_ROOT_FILE, bucket_id), e)

    #Does the user have access to it?
    if s3_index_obj_request['ResponseMetadata']['HTTPStatusCode'] != 200:
        raise HandledError(
            "The user does not have access to the file index.html file.  This Cloud Gem Portal site will not load."
        )

    content = s3_index_obj_request['Body'].read().decode('utf-8')

    if args.show_current_configuration:
        try:
            cgp_current_bootstrap_config = s3_client.get_object(
                Bucket=bucket_id, Key=constant.PROJECT_CGP_ROOT_SUPPORT_FILE)
            cgp_current_bootstrap_config = cgp_current_bootstrap_config[
                'Body'].read().decode('utf-8')
            context.view._output_message(
                cgp_current_bootstrap_config.replace(BOOTSTRAP_VARIABLE_NAME,
                                                     ''))
            return
        except ClientError as e:
            raise HandledError(
                "Could not read from the key '{}' in the S3 bucket '{}'.".
                format(constant.PROJECT_CGP_ROOT_SUPPORT_FILE, bucket_id), e)

    cgp_bootstrap_config = {
        "clientId": client_id,
        "userPoolId": user_pool_id,
        "identityPoolId": identity_pool_id,
        "projectConfigBucketId": project_config_bucket_id,
        "region": region,
        "firstTimeUse": admin_account_created,
        "cognitoDev": args.cognito_dev if args.cognito_dev != "''" else None,
        "cognitoProd":
        args.cognito_prod if args.cognito_prod != "''" else None,
        "cognitoTest": args.cognito_test if args.cognito_test != "''" else None
    }

    content = set_presigned_urls(content, bucket_id, s3_client, expiration,
                                 region)
    result = None
    try:
        # TODO: write to an unique name and configure bucket to auto delete these objects after 1 hour
        # the max allowed --duration-seconds value.
        s3_client.put_object(Bucket=bucket_id,
                             Key=constant.PROJECT_CGP_ROOT_SUPPORT_FILE,
                             Body="var bootstrap = {}".format(
                                 json.dumps(cgp_bootstrap_config)),
                             ContentType='text/html')
        result = s3_client.put_object(Bucket=bucket_id,
                                      Key=constant.PROJECT_CGP_ROOT_FILE,
                                      Body=content,
                                      ContentType='text/html')
    except ClientError as e:
        if e.response["Error"]["Code"] in ["AccessDenied"]:
            credentials = context.aws.load_credentials()
            access_key = credentials.get(constant.DEFAULT_SECTION_NAME,
                                         constant.ACCESS_KEY_OPTION)
            context.view._output_message(
                "The Lumberyard user '{0}' associated with AWS IAM access key identifier '{1}' is missing PUT permissions on the S3 bucket '{2}'. Now attempting to use old Cloud Gem Portal pre-signed urls.\nHave the administrator grant the AWS user account with access key '{1}' S3 PUT permissions for bucket '{2}'"
                .format(context.config.user_default_profile, access_key,
                        bucket_id))
        else:
            raise HandledError(
                "Could write to the key '{}' in the S3 bucket '{}'.".format(
                    constant.PROJECT_CGP_ROOT_FILE, bucket_id), e)

    if result == None or result['ResponseMetadata']['HTTPStatusCode'] == 200:
        if result != None and not set_bucket_cors(
                context, project_config_bucket_id, region):
            raise HandledError(
                "Warning: the Cross Origin Resource Sharing (CORS) policy cloud not be set:  Access Denied.  This may prevent the Cloud Gem Portal from accessing the projects project-settings.json file."
            )

        #generate presigned url
        secured_url = __get_presigned_url(s3_client, bucket_id,
                                          constant.PROJECT_CGP_ROOT_FILE,
                                          expiration)

        __updateUserPoolEmailMessage(context, secured_url,
                                     project_config_bucket_id)
        if args.show_configuration:
            context.view._output_message(json.dumps(cgp_bootstrap_config))

        if args.show_url_only:
            context.view._output_message(secured_url)
        else:
            webbrowser.open_new(secured_url)
    else:
        raise HandledError(
            "The index.html cloud not be set in the S3 bucket '{}'.  This Cloud Gem Portal site will not load."
            .format(bucket_id))

import types
import pandas as pd
from botocore.client import Config
import ibm_boto3

def __iter__(self): return 0

# @hidden_cell
# The following code accesses a file in your IBM Cloud Object Storage. It includes your credentials.
# You might want to remove those credentials before you share your notebook.
client_bba26c7923b04a17832943e1b244b58e = ibm_boto3.client(service_name='s3',
    ibm_api_key_id='MTHFdZK_I6-W1zWk4b-hEUyJA2ihmrD9b61PU91_ib9B',
    ibm_auth_endpoint="https://iam.bluemix.net/oidc/token",
    config=Config(signature_version='oauth'),
    endpoint_url='https://s3.eu-geo.objectstorage.service.networklayer.com')

body = client_bba26c7923b04a17832943e1b244b58e.get_object(Bucket='graduateadmissionprediction-donotdelete-pr-8bat18q13wgs5s',Key='Admission_Predict_Ver1.1.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )

dataset = pd.read_csv(body)
dataset.head()


# In[54]:


dataset
Пример #12
0
import argparse
import concurrent.futures
from botocore.client import Config
import boto3
import time
import tempfile

config = Config(connect_timeout=5, read_timeout=5, retries={'max_attempts': 1})

s3 = boto3.client('s3', config=config)

parser = argparse.ArgumentParser(
    description='MegaJQ runs SQL-statements through S3-prefixes parallel')

parser.add_argument("bucket", help="S3 bucket where select will be run")
parser.add_argument("prefix", help="S3 key prefix")
parser.add_argument(
    "query",
    help=
    "SQL-query, see https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference-select.html"
)
parser.add_argument("output", help="Output file")

args = parser.parse_args()
print(args)

BUCKET = args.bucket
PREFIX = args.prefix
query = args.query

OUTPUT = args.output
Пример #13
0
import numpy as np
from PIL import Image
from tqdm import tqdm
from joblib import Parallel, delayed, cpu_count
import math
import tifffile as tf
from util import (
    tqdm_joblib,
    chunks,
    imgResample,
    upload_file_to_s3,
    S3Url,
    s3_object_exists,
)

config = Config(connect_timeout=5, retries={"max_attempts": 5})


def get_out_path(in_path, outdir):
    head, fname = os.path.split(in_path)
    head_tmp = head.split("/")
    head = f"{outdir}/" + "/".join(head_tmp[-1:])
    idx = fname.find(".")
    fname_new = fname[:idx] + "_corrected.tiff"
    out_path = f"{head}/{fname_new}"
    os.makedirs(head, exist_ok=True)  # succeeds even if directory exists.
    return out_path


def get_all_s3_objects(s3, **base_kwargs):
    continuation_token = None
Пример #14
0
import boto3
from botocore.client import Config
import StringIO
import zipfile
import mimetypes

s3 = boto3.resource('s3', config=Config(signature_version='s3v4'))

portfolio_bucket = s3.Bucket('portfolio.bala.info')
build_bucket = s3.Bucket('portfolio.build.bala.info')

portfolio_zip = StringIO.StringIO()
build_bucket.download_fileobj('portfoliobuild.zip', portfolio_zip)

with zipfile.ZipFile(portfolio_zip) as myzip:
    for nm in myzip.namelist():
        obj = myzip.open(nm)
        portfolio_bucket.upload_fileobj(
            obj, nm, ExtraArgs={'ContentType': mimetypes.guess_type(nm)[0]})
        portfolio_bucket.Object(nm).Acl().put(ACL='public-read')
Пример #15
0
 def _create_client(self):
     return self._client_creator(
         'sts', config=Config(signature_version=botocore.UNSIGNED))
Пример #16
0
 def create_client(self):
     # Even though the default signature_version is s3,
     # we're being explicit in case this ever changes.
     client_config = Config(signature_version='s3')
     return self.session.create_client('s3', self.region,
                                       config=client_config)
Пример #17
0
def main(args):

    DOMAIN = 'ITD'
    VERSION = '1'

    taskName = os.path.basename(__file__)[:-3]

    logging.config.fileConfig('/Assets/sharedLibraries/logging_config.ini')
    logging.debug("Creating SWF boto client")
    botoConfig = Config(
        connect_timeout=50,
        read_timeout=70)  # suggestion is the read is higher than connect
    swf = boto3.client('swf', config=botoConfig)
    logging.debug("Created SWF boto client: %s", swf)

    BUCKETNAME = "schulerfiles"
    workingStorage = "/Assets/working/"

    while True:

        task = swf.poll_for_activity_task(domain=DOMAIN,
                                          taskList={'name': taskName},
                                          identity='%s-01' % (taskName))

        if 'taskToken' not in task:
            logging.info("%s - Poll timed out, no new task. Repoll", taskName)

        # Run the operation
        else:
            taskToken = task['taskToken']
            workID = task['workflowExecution']['workflowId']
            logging.info("[%s] New request for %s", workID, taskName)

            INPUT = json.loads(task['input'])

            source = INPUT['locationSource']
            destination = INPUT['locationDestination']
            dbPrimaryKey = INPUT['dbPrimaryKey']
            fileKey = INPUT['fileKey'] + '/'

            # Bucket object is necessary in all cases
            logging.debug("[%s] Creating S3 bucket boto client", workID)
            s3 = boto3.resource('s3')
            bucket = s3.Bucket(BUCKETNAME)
            logging.debug("[%s] Created S3 bucket boto client", workID)

            # Setting the storage class to be used for later
            s3StorageClass = 'STANDARD'
            if destination == 'near_line':
                s3StorageClass = 'STANDARD_IA'

            logging.info("[%s] Moving %s from %s to %s", workID, fileKey,
                         source, destination)
            # CDN and near_line are both S3 tiers, so all we are doing is changing the Storage Class with a PUT
            if (source == 'CDN' and destination == 'near_line') or (
                    source == 'near_line' and destination == 'CDN'):

                logging.info("[%s] Moving objects between S3 and S3IA", workID)
                for obj in bucket.objects.filter(Prefix=fileKey):
                    logging.debug(
                        "[%s] Moving object %s from %s to %s object: ", workID,
                        obj.key, source, destination)

                    copy_source = {'Bucket': bucket.name, 'Key': obj.key}

                    response = s3.meta.client.copy_object(
                        CopySource=copy_source,
                        Bucket=bucket.name,
                        Key=obj.key,
                        StorageClass=s3StorageClass)
                    logging.debug("[%s] Object moved: ", workID, response)

                OUTPUT = {
                    'result': 'success',
                }

            # If we need to move to or restore from archive, we need to run the whole gamut
            elif 'archive' in [source, destination]:  #Glacier

                # Create Glacier object

                # Create directory in working storage
                subDir = parseHelper.createDir(workingStorage, fileKey)

                # Pull down from glacier
                if source == 'archive':
                    logging.info("[%s] Moving asset from Glacier", workID)
                else:
                    logging.info("[%s] Begin moving objects to Glacier",
                                 workID)
                    logging.info("[%s] Begin object download", workID)
                    # Download object to the working storage subdirectory
                    # Upload files back up to the same fileKey (this takes Accounts into consideration as well)
                    for obj in bucket.objects.filter(Prefix=fileKey):
                        logging.info(
                            "[%s] Downloading %s to temporary storage", workID,
                            obj.key)
                        fileName = os.path.join(workingStorage, obj.key)
                        if not os.path.exists(os.path.dirname(fileName)):
                            try:
                                os.makedirs(os.path.dirname(fileName))
                            except OSError as exc:  # Guard against race condition
                                if exc.errno != errno.EEXIST:
                                    raise

                        s3.Object(bucket.name, obj.key).download_file(
                            fileName)  # Create directories as needed here

                    logging.info("[%s] Begin object upload to glacier", workID)

                # Output needs the temporary storage location to clean up
                # cleanUpLandingPads expects an ASSET (e.g., /Assets/working/file.ext), and not just a path. We will provide a dummy asset
                OUTPUT = {
                    'result': 'success',
                    'asset': '%sdummy.file' % (subDir)
                }

            AUDIT = {}
            AUDIT['User'] = '******'
            AUDIT['Timestamp'] = time.strftime("%Y-%m-%dT%H:%M:%S+0000",
                                               time.gmtime())
            AUDIT['Action'] = 'Asset moved from %s from %s' % (source,
                                                               destination)
            AUDIT['Notes'] = workID

            # Add the Audit Dictionary to a list so that we can append it
            aLIST = []
            aLIST.append(AUDIT)

            updateExpression = 'set File_Location = :d, Audit = list_append(Audit, :a)'

            expressionValues = {':d': destination, ':a': aLIST}
            # Call the update function
            logging.debug("[%s] Updating the asset location and history: %s",
                          workID, destination)
            response = databaseHelper.updateEntry(dbPrimaryKey,
                                                  updateExpression,
                                                  expressionValues)

            OUTPUT.update(INPUT)

            swf.respond_activity_task_completed(taskToken=taskToken,
                                                result=json.dumps(OUTPUT))

            logging.info("[%s] %s Complete", workID, taskName)
Пример #18
0
 def create_client(self):
     client_config = Config(signature_version='s3v4')
     return self.session.create_client('s3', self.region,
                                       config=client_config)
Пример #19
0
 def getClient(self, name, awsRegion):
     config = Config(retries=dict(max_attempts=5))
     return boto3.client(name, region_name=awsRegion, config=config)
Пример #20
0
 def create_client(self):
     return self.session.create_client(
         's3', region_name=self.region,
         config=Config(s3={'addressing_style': self.addressing_style}))
Пример #21
0
def get_signed_url(rse_id, service, operation, url, lifetime=600):
    """
    Get a signed URL for a particular service and operation.

    The signed URL will be valid for 1 hour but can be overriden.

    :param rse_id: The ID of the RSE that the URL points to.
    :param service: The service to authorise, either 'gcs', 's3' or 'swift'.
    :param operation: The operation to sign, either 'read', 'write', or 'delete'.
    :param url: The URL to sign.
    :param lifetime: Lifetime of the signed URL in seconds.
    :returns: Signed URL as a variable-length string.
    """

    global CREDS_GCS

    if service not in ['gcs', 's3', 'swift']:
        raise UnsupportedOperation('Service must be "gcs", "s3" or "swift"')

    if operation not in ['read', 'write', 'delete']:
        raise UnsupportedOperation(
            'Operation must be "read", "write", or "delete"')

    if url is None or url == '':
        raise UnsupportedOperation('URL must not be empty')

    if lifetime:
        if not isinstance(lifetime, integer_types):
            try:
                lifetime = int(lifetime)
            except:
                raise UnsupportedOperation(
                    'Lifetime must be convertible to numeric.')

    signed_url = None
    if service == 'gcs':
        if not CREDS_GCS:
            CREDS_GCS = ServiceAccountCredentials.from_json_keyfile_name(
                config_get(
                    'credentials',
                    'gcs',
                    raise_exception=False,
                    default='/opt/rucio/etc/google-cloud-storage-test.json'))

        # select the correct operation
        operations = {'read': 'GET', 'write': 'PUT', 'delete': 'DELETE'}
        operation = operations[operation]

        # special case to test signature, force epoch time
        if lifetime is None:
            lifetime = 0
        else:
            # GCS is timezone-sensitive, don't use UTC
            # has to be converted to Unixtime
            lifetime = datetime.datetime.now() + datetime.timedelta(
                seconds=lifetime)
            lifetime = int(time.mktime(lifetime.timetuple()))

        # sign the path only
        path = urlparse(url).path

        # assemble message to sign
        to_sign = "%s\n\n\n%s\n%s" % (operation, lifetime, path)

        # create URL-capable signature
        # first character is always a '=', remove it
        signature = urlencode(
            {'': base64.b64encode(CREDS_GCS.sign_blob(to_sign)[1])})[1:]

        # assemble final signed URL
        signed_url = 'https://storage.googleapis.com:443%s?GoogleAccessId=%s&Expires=%s&Signature=%s' % (
            path, CREDS_GCS.service_account_email, lifetime, signature)

    elif service == 's3':
        # split URL to get hostname, bucket and key
        components = urlparse(url)
        host = components.netloc
        pathcomponents = components.path.split('/')
        if len(pathcomponents) < 3:
            raise UnsupportedOperation('Not a valid S3 URL')
        bucket = pathcomponents[1]
        key = '/'.join(pathcomponents[2:])

        # remove port number from host if present
        colon = host.find(':')
        port = '443'
        if colon >= 0:
            port = host[colon + 1:]
            host = host[:colon]

        # look up in RSE account configuration by RSE ID
        cred_name = rse_id
        cred = REGION.get('s3-%s' % cred_name)
        if cred is NO_VALUE:
            rse_cred = get_rse_credentials()
            cred = rse_cred.get(cred_name)
            REGION.set('s3-%s' % cred_name, cred)
        access_key = cred['access_key']
        secret_key = cred['secret_key']
        signature_version = cred['signature_version']
        region_name = cred['region']

        if operation == 'read':
            s3op = 'get_object'
        elif operation == 'write':
            s3op = 'put_object'
        else:
            s3op = 'delete_object'

        with record_timer_block('credential.signs3'):
            s3 = boto3.client('s3',
                              endpoint_url='https://' + host + ':' + port,
                              aws_access_key_id=access_key,
                              aws_secret_access_key=secret_key,
                              config=Config(
                                  signature_version=signature_version,
                                  region_name=region_name))

            signed_url = s3.generate_presigned_url(s3op,
                                                   Params={
                                                       'Bucket': bucket,
                                                       'Key': key
                                                   },
                                                   ExpiresIn=lifetime)

    elif service == 'swift':
        # split URL to get hostname and path
        components = urlparse(url)
        host = components.netloc

        # remove port number from host if present
        colon = host.find(':')
        if colon >= 0:
            host = host[:colon]

        # use RSE ID to look up key
        cred_name = rse_id

        # look up tempurl signing key
        cred = REGION.get('swift-%s' % cred_name)
        if cred is NO_VALUE:
            rse_cred = get_rse_credentials()
            cred = rse_cred.get(cred_name)
            REGION.set('swift-%s' % cred_name, cred)
        tempurl_key = cred['tempurl_key']

        if operation == 'read':
            swiftop = 'GET'
        elif operation == 'write':
            swiftop = 'PUT'
        else:
            swiftop = 'DELETE'

        expires = int(time.time() + lifetime)

        # create signed URL
        with record_timer_block('credential.signswift'):
            hmac_body = u'%s\n%s\n%s' % (swiftop, expires, components.path)
            # Python 3 hmac only accepts bytes or bytearray
            sig = hmac.new(bytearray(tempurl_key, 'utf-8'),
                           bytearray(hmac_body, 'utf-8'), sha1).hexdigest()
            signed_url = 'https://' + host + components.path + '?temp_url_sig=' + sig + '&temp_url_expires=' + str(
                expires)

    return signed_url
Пример #22
0
    RECORD_TYPE_CONTENT,
    RECORD_TYPE_CREATE,
    RECORD_TYPE_SPECIAL,
    BaseAggregator,
    BaseListener,
    BaseParams,
)
from .parquet_schema import PQ_SCHEMAS

CACHE_SIZE = 500
SITE_VISITS_INDEX = "_site_visits_index"
CONTENT_DIRECTORY = "content"
CONFIG_DIR = "config"
BATCH_COMMIT_TIMEOUT = 30  # commit a batch if no new records for N seconds
S3_CONFIG_KWARGS = {"retries": {"max_attempts": 20}}
S3_CONFIG = Config(**S3_CONFIG_KWARGS)


def listener_process_runner(base_params: BaseParams, manager_params: Dict[str,
                                                                          Any],
                            instance_id: int) -> None:
    """S3Listener runner. Pass to new process"""
    listener = S3Listener(base_params, manager_params, instance_id)
    listener.startup()

    while True:
        listener.update_status_queue()
        listener.save_batch_if_past_timeout()
        if listener.should_shutdown():
            break
        try:
Пример #23
0
def handler(context, event):
    #params - expect json
    jsstring = event.body.decode('utf-8').strip()

    if not jsstring:
        return context.Response(body='Error. Empty json',
                                headers={},
                                content_type='text/plain',
                                status_code=400)

    msg = json.loads(jsstring)
    context.logger.info(msg)

    key = msg['key']
    bucket = msg['bucket']
    parquet = msg['parquet']

    context.logger.info('download from s3  ' + parquet)

    s3 = boto3.client('s3',
                      endpoint_url=S3_ENDPOINT,
                      aws_access_key_id=S3_ACCESS_KEY,
                      aws_secret_access_key=S3_SECRET_KEY,
                      config=Config(signature_version='s3v4'),
                      region_name='us-east-1')

    obj = s3.get_object(Bucket=S3_BUCKET, Key=parquet)
    dataio = io.BytesIO(obj['Body'].read())

    context.logger.info('read parquet into pandas dataframe')

    df = pd.read_parquet(dataio, engine='pyarrow')

    count = len(df)
    context.logger.info('read count: ' + str(count))

    # use sqlalchemy because it supports multi/insert with pagination
    engine = sqlalchemy.create_engine('postgresql://' + DB_USERNAME + ':' +
                                      DB_PASSWORD + '@' + DB_HOST + ':' +
                                      DB_PORT + '/' + DB_NAME)

    context.logger.debug('write dataframe into table ' + DB_TABLE)

    df.to_sql(DB_TABLE,
              engine,
              index=False,
              if_exists='append',
              method='multi',
              chunksize=DB_CHUNKSIZE)

    # send message with filename
    client = mqtt.Client(MQTT_CLIENT + "_" + event.id)  #create new instance
    client.connect(MQTT_BROKER)  #connect to broker
    client.loop_start()

    context.logger.info('send message to MQTT ' + MQTT_TOPIC)

    msg = {
        "key": key,
    }

    js = json.dumps(msg)
    context.logger.debug(js)
    client.publish(MQTT_TOPIC, js)

    context.logger.info('done.')

    return context.Response(body='Done. File ' + key + ' done',
                            headers={},
                            content_type='text/plain',
                            status_code=200)
Пример #24
0
def lambda_handler(event, context):
bucket = os.environ["BUCKET_NAME"] #Using enviroment varibles below the lambda will use your S3 bucket
DestinationPrefix =  os.environ["PREFIX"]

####CODE TO GET DATA CAN BE REPLACED######
client = boto3.client('ecs')
paginator = client.get_paginator("list_clusters") #Paginator for a large list of accounts
response_iterator = paginator.paginate()
with open('/tmp/data.json', 'w') as f: # Saving in the temporay folder in the lambda
    for response in response_iterator: # extracts the needed info
        for cluster in response['clusterArns']:
            listservices = client.list_services(cluster=cluster.split( '/')[1],maxResults=100)
            for i in listservices['serviceArns']:
                #print (i)
                services = client.describe_services(
                    cluster=cluster.split( '/')[1],
                    services=[
                    i.split( '/')[2],
                    ],
                    include=[
                        'TAGS',
                    ]
                )
                for service in services['services']:
                    data = {'cluster':cluster.split( '/')[1], 'services':i.split( '/')[2], 'serviceName': service.get('serviceName'), 'tags':service.get('tags') }
                    print(data)
####CODE TO GET DATA######    
                    jsondata = json.dumps(data) #converts datetime to be able to placed in json

                    f.write(jsondata)
                    f.write('\n')
print("respose gathered")
today = date.today()
year = today.year
month = today.month
try:
    s3 = boto3.client('s3', config=Config(s3={'addressing_style': 'path'}))
    s3.upload_file(
        '/tmp/data.json', bucket, f"{DestinationPrefix}-data/year={year}/month={month}/{DestinationPrefix}.json") #uploading the file with the data to s3
    print(f"Data in s3 - {DestinationPrefix}-data/year={year}/month={month}")
except Exception as e:
    print(e)
start_crawler()

def start_crawler():
    glue_client = boto3.client('glue')
    os.environ['ROLE_ARN']
    try:
        glue_client.start_crawler(Name=os.environ['CRAWLER_NAME'])
    except Exception as e:
        # Send some context about this error to Lambda Logs
        logging.warning("%s" % e)     


def assume_role(account_id, service, region):
    role_name = os.environ['ROLENAME']
    role_arn = f"arn:aws:iam::{account_id}:role/{role_name}" #OrganizationAccountAccessRole
    sts_client = boto3.client('sts')
    
    try:
        #region = sts_client.meta.region_name
        assumedRoleObject = sts_client.assume_role(
            RoleArn=role_arn,
            RoleSessionName="AssumeRoleRoot"
            )
        
        credentials = assumedRoleObject['Credentials']
        client = boto3.client(
            service,
            aws_access_key_id=credentials['AccessKeyId'],
            aws_secret_access_key=credentials['SecretAccessKey'],
            aws_session_token=credentials['SessionToken'],
            region_name = region
        )
        return client

    except ClientError as e:
        logging.warning(f"Unexpected error Account {account_id}: {e}")
        return None


def lits_regions():
    from boto3.session import Session

    s = Session()
    ecs_regions = s.get_available_regions('ecs')
    return ecs_regions
Пример #25
0
# -*- coding: utf-8 -*-

import json
import time
import logging
import threading
from boto3.session import Session
from botocore.client import Config

logger = logging.getLogger(__name__)

logging.getLogger("botocore").setLevel(logging.CRITICAL)

session = Session()
config = Config(connect_timeout=10, read_timeout=310)
client = session.client("lambda", config=config)


class LambdaLoadTest(object):
    """
    An object to run and collect statistics and results from multiple parallel locust load
    tests running on AWS Lambda
    """

    def __init__(
        self,
        lambda_function_name,
        threads,
        ramp_time,
        time_limit,
        lambda_payload,
Пример #26
0
DEFAULT_TTL = 60 * 60 * 48

# Default size of the bucket before checking for inventory
DEFAULT_INVENTORY_BUCKET_SIZE_THRESHOLD = \
    int(os.environ.get("SALACTUS_INVENTORY_THRESHOLD", 100000))

BUCKET_OBJ_DESC = {
    True: ('Versions', 'list_object_versions', ('NextKeyMarker',
                                                'NextVersionIdMarker')),
    False: ('Contents', 'list_objects_v2', ('NextContinuationToken', ))
}

connection = redis.Redis(host=REDIS_HOST)
# Increase timeouts to assist with non local regions, also
# seeing some odd net slowness all around.
s3config = Config(read_timeout=420, connect_timeout=90)
keyconfig = {
    'report-only': not os.environ.get('SALACTUS_ENCRYPT') and True or False,
    'glacier': False,
    'large': True,
    'key-id': os.environ.get('SALACTUS_KEYID'),
    'crypto': os.environ.get('SALACTUS_CRYPTO', 'AES256')
}

log = logging.getLogger("salactus")


def get_session(account_info):
    """Get a boto3 sesssion potentially cross account sts assumed

    assumed sessions are automatically refreshed.
Пример #27
0
from botocore.exceptions import ClientError

## Global variables
# Parameters for connection to s3:
REGION = 'XXXXXX'
VERSION = 'XXXXXX'
ACCESS_KEY = 'XXXXXX'
SECRET_ACCESS_KEY = 'XXXXXX'
END_POINT_URL = 'XXXXXX'

# Objects to perform actions: client is swiss knife , resource has all sort of data:
s3_resource = boto3.resource('s3',
                             endpoint_url=END_POINT_URL,
                             aws_access_key_id=ACCESS_KEY,
                             aws_secret_access_key=SECRET_ACCESS_KEY,
                             config=Config(signature_version=VERSION),
                             region_name=REGION,
                             verify=False)

s3_client = boto3.client('s3',
                         endpoint_url=END_POINT_URL,
                         aws_access_key_id=ACCESS_KEY,
                         aws_secret_access_key=SECRET_ACCESS_KEY,
                         config=Config(signature_version=VERSION),
                         region_name=REGION,
                         verify=False)
""" Functions for bucket operations """


# The function prints the list of existing buckets.
def list_buckets():
Пример #28
0
def archive_to_glacier():
    config = configparser.ConfigParser()
    config.read('configini.ini')
    aws_config = config['AWS']
    canceled_archive = []
    # Connect to SQS and get the message queue
    try:
        sqs = boto3.resource('sqs', region_name=aws_config['AWS_REGION_NAME'])
        huangxy_queue = sqs.get_queue_by_name(
            QueueName=aws_config['AWS_SQS_GLACIER'])
    except Exception as e:
        print(e)
        return
    # Poll the message queue in a loop
    while True:
        # Attempt to read a message from the queue
        response = huangxy_queue.receive_messages(WaitTimeSeconds=20)
        # If message read, extract job parameters from the message body as before
        if response:
            print('Get response successfully.')
            try:
                msg = json.loads(json.loads(response[0].body)['Message'])
                job_id = msg['job_id']
            except Exception as e:
                raise e
                return
            # if the job should be canceled to put into archive, continue to the next while loop.
            if 'canceled_archive' in msg:
                canceled_archive.append(msg['canceled_archive'])
                response[0].delete()
                print('This job should not be moved to glacier.')
                continue
            # intercept the canceled archive job
            if job_id in canceled_archive:
                canceled_archive.remove(job_id)
                response[0].delete()
                print('Avoid moving to glacier.')
                continue
            try:
                dynamodb = boto3.resource(
                    'dynamodb', region_name=aws_config['AWS_REGION_NAME'])
                annotation_table = dynamodb.Table(
                    aws_config['AWS_DYNAMODB_ANNOTATIONS_TABLE'])
                job = annotation_table.query(
                    Select='ALL_ATTRIBUTES',
                    KeyConditionExpression=Key('job_id').eq(
                        job_id))['Items'][0]
                print('Get job successfully.')
            except Exception as e:
                raise e
                return
            if 'complete_time' in job and (
                    time.time() - float(job['complete_time'])) > float(
                        aws_config['FREE_USER_DATA_RETENTION']):
                try:
                    key = msg['s3_key_input_file'].replace(
                        '.vcf', '.annot.vcf')
                    s3 = boto3.resource('s3')
                    bucket = s3.Bucket(aws_config['AWS_S3_RESULTS_BUCKET'])
                    body = bucket.Object(key).get()['Body'].read()
                    # print(body)
                    client_glacier = boto3.client(
                        'glacier', aws_config['AWS_REGION_NAME'])
                    # Response Syntax
                    # {
                    #     'location': 'string',
                    #     'checksum': 'string',
                    #     'archiveId': 'string'
                    # }
                    glacier_upload_response = client_glacier.upload_archive(
                        vaultName=aws_config['AWS_GLACIER_VAULT'], body=body)
                    print('Upload glacier successfully.')
                except Exception as e:
                    raise e
                    return
                try:
                    client_s3 = boto3.client(
                        's3',
                        region_name=aws_config['AWS_REGION_NAME'],
                        config=Config(signature_version='s3v4'))
                    # Response Syntax
                    # {
                    #     'DeleteMarker': True|False,
                    #     'VersionId': 'string',
                    #     'RequestCharged': 'requester'
                    # }
                    s3_delete_response = client_s3.delete_object(
                        Bucket=aws_config['AWS_S3_RESULTS_BUCKET'], Key=key)
                    print('Delete from s3 successfully.')
                except Exception as e:
                    raise e
                    return
                annotation_table.update_item(
                    Key={'job_id': job_id},
                    AttributeUpdates={
                        'archive_id': {
                            'Value': glacier_upload_response['archiveId'],
                            'Action': 'PUT'
                        }
                    })
                print('Update database successfully.')
                # After all done, delete SQS
                response[0].delete()
                print('Delete SQS successfully.')
        else:
            print('There is no response.')
def lambda_handler(event, context):

    # Retrieve the Job ID from the Lambda action
    job_id = event["CodePipeline.job"]["id"]

    try:
        credential_value = get_secret()
        credential_json = json.loads(credential_value)

        #print('credential_value: {}'.format(credential_json))
        codedeploy_client_cn = boto3.client(
            'codedeploy',
            aws_access_key_id=credential_json['AWS_ACCESS_KEY_ID_CN'],
            aws_secret_access_key=credential_json['AWS_SECRET_ACCESS_KEY_CN'],
            region_name=china_region_name,
            config=Config(connect_timeout=15,
                          read_timeout=15,
                          retries=dict(max_attempts=10)))
    except ClientError as e:
        return_msg = json.dumps(
            'Lambda invoke codedeploy in {} failed'.format(china_region_name))
        response = codepipeline_client.put_job_failure_result(jobId=job_id,
                                                              failureDetails={
                                                                  'type':
                                                                  'JobFailed',
                                                                  'message': e
                                                              })
        return {'statusCode': 500, 'body': return_msg}

    try:
        response = codedeploy_client_cn.create_deployment(
            applicationName='MyDemoApplication',
            deploymentGroupName='CodePipelineProductionFleetChina',
            revision={
                'revisionType': 'S3',
                's3Location': {
                    'bucket': S3_BUCKET_NAME_CHINA,
                    'key': S3_ARTIFACT_NAME_CHINA,
                    'bundleType': 'zip'
                }
            })
        deployment_id = response['deploymentId']
        print('create_deployment %s' % (deployment_id))

        waiter = codedeploy_client_cn.get_waiter('deployment_successful')
        waiter.wait(deploymentId=deployment_id,
                    WaiterConfig={
                        'Delay': 15,
                        'MaxAttempts': 20
                    })
        response = codedeploy_client_cn.get_deployment(
            deploymentId=deployment_id)
        print('get_deployment %s' % response['deploymentInfo']['status'])
        print('get_deployment detail %s' % response)

    except (ClientError, WaiterError) as e:
        return_msg = json.dumps(
            'Lambda invoke codedeploy in {} failed'.format(china_region_name))
        response = codepipeline_client.put_job_failure_result(jobId=job_id,
                                                              failureDetails={
                                                                  'type':
                                                                  'JobFailed',
                                                                  'message': e
                                                              })
        return {'statusCode': 500, 'body': return_msg}

    return_msg = json.dumps(
        'Lambda invoke codedeploy in {} successfully'.format(
            china_region_name))
    codepipeline_client.put_job_success_result(
        jobId=job_id, executionDetails={'summary': return_msg})
    return {'statusCode': 200, 'body': return_msg}
Пример #30
0
def get_s3_dcm(bucket, file_key):
    """Read DICOM from S3"""
    s3_config = Config(connect_timeout=50, read_timeout=70)
    s3_client = boto3.client('s3', config=s3_config)  # low-level functional API
    obj = s3_client.get_object(Bucket=bucket, Key=file_key)
    return pydicom.read_file(BytesIO(obj['Body'].read()))