Ejemplo n.º 1
0
 def __init__(self, loop=None, endpoint=None):
     loop = loop if loop else asyncio.get_event_loop()
     session = aiobotocore.get_session(loop=loop)
     self._client = session.create_client('sqs', endpoint_url=endpoint)
     self._queue_urls = {}
     self._unfinished_messages = []
     self._sigterm_recieved = None
Ejemplo n.º 2
0
    def __init__(self, settings, loop=None):
        self._aws_access_key = settings["aws_client_id"]
        self._aws_secret_key = settings["aws_client_secret"]

        opts = dict(
            aws_secret_access_key=self._aws_secret_key,
            aws_access_key_id=self._aws_access_key,
            endpoint_url=settings.get("endpoint_url"),
            verify=settings.get("verify_ssl"),
            use_ssl=settings.get("ssl", True),
            region_name=settings.get("region_name"),
            config=aiobotocore.config.AioConfig(
                None,
                max_pool_connections=settings.get("max_pool_connections", 30)),
        )

        if loop is None:
            loop = asyncio.get_event_loop()
        self._loop = loop

        self._s3aiosession = aiobotocore.get_session(loop=loop)

        # This client is for downloads only
        self._s3aioclient = self._s3aiosession.create_client("s3", **opts)
        self._cached_buckets = []

        self._bucket_name = settings["bucket"]
Ejemplo n.º 3
0
 async def __aenter__(self):
     session = aiobotocore.get_session()
     self.client = session.create_client(
         self.option.service_name,
         region_name=self.option.region_name,
         config=self.option.config)
     return self.client
Ejemplo n.º 4
0
def go(loop):

    bucket = 'dataintake'
    filename = 'dummy.bin'
    folder = 'aiobotocore'
    key = '{}/{}'.format(folder, filename)

    session = aiobotocore.get_session(loop=loop)
    client = session.create_client('s3', region_name='us-west-2',
                                   aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
                                   aws_access_key_id=AWS_ACCESS_KEY_ID)
    # upload object to amazon s3
    data = b'\x01'*1024
    resp = yield from client.put_object(Bucket=bucket,
                                        Key=key,
                                        Body=data)
    print(resp)

    # getting s3 object properties of file we just uploaded
    resp = yield from client.get_object_acl(Bucket=bucket, Key=key)
    print(resp)

    # delete object from s3
    resp = yield from client.delete_object(Bucket=bucket, Key=key)
    print(resp)
Ejemplo n.º 5
0
def main():

    if len(sys.argv) == 1 or len(sys.argv) > 1 and (sys.argv[1] == "-h" or
                                                    sys.argv[1] == "--help"):
        printUsage()

    rootid = sys.argv[1]

    if not isValidUuid(rootid):
        print("Invalid root id!")
        sys.exit(1)

    if not isSchema2Id(rootid):
        print("This tool can only be used with Schema v2 ids")
        sys.exit(1)

    # we need to setup a asyncio loop to query s3
    loop = asyncio.get_event_loop()

    app = {}
    app["bucket_name"] = config.get("bucket_name")
    app["loop"] = loop
    session = get_session(loop=loop)
    app["session"] = session
    loop.run_until_complete(run_delete(app, rootid))

    loop.close()

    print("done!")
Ejemplo n.º 6
0
    async def _run(self):
        for i in range(self._concurrency):
            proc = await asyncio.create_subprocess_exec(
                sys.executable,
                os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             "s3log_worker.py"),
                self._script,
                stdin=asyncio.subprocess.PIPE,
                stdout=asyncio.subprocess.PIPE,
                stderr=sys.stdout,
            )
            await self._q.put(proc)

        session = aiobotocore.get_session(loop=self._loop)
        async with session.create_client(
                "s3",
                region_name=self._aws_region,
                aws_secret_access_key=self._aws_secret_access_key,
                aws_access_key_id=self._aws_access_key_id,
        ) as client:
            if self._show_progress:
                self._loop.create_task(self._status())
            waiter = self._loop.create_task(self._wait())
            # # list s3 objects using paginator
            paginator = client.get_paginator("list_objects")
            async for result in paginator.paginate(Bucket=self._bucket,
                                                   Prefix=self._prefix):
                for c in result.get("Contents", []):
                    key = c["Key"]
                    print(f"Processing key: {key}", file=sys.stderr)
                    proc = await self._q.get()
                    await self._tasks_queue.put(
                        self._loop.create_task(self.feed(client, key, proc)))
            await self._tasks_queue.put(None)
            await waiter
Ejemplo n.º 7
0
    def create_client(cls: Any, name: str, context: Dict) -> None:
        logging.getLogger(
            'botocore.vendored.requests.packages.urllib3.connectionpool'
        ).setLevel(logging.WARNING)

        if not cls.clients:
            cls.clients = {}
            cls.clients_creation_time = {}
        loop = asyncio.get_event_loop()
        session = aiobotocore.get_session(loop=loop)

        try:
            if cls.clients_creation_time.get(
                    name
            ) and cls.clients_creation_time[name] + 30 > time.time():
                return
            cls.clients[name] = session.create_client(name,
                                                      region_name='eu-west-1')
            cls.clients_creation_time[name] = time.time()
        except (botocore.exceptions.PartialCredentialsError,
                botocore.exceptions.NoRegionError) as e:
            error_message = str(e)
            logging.getLogger('transport.aws_sns_sqs').warning(
                'Invalid credentials [{}] to AWS ({})'.format(
                    name, error_message))
            raise AWSSNSSQSConnectionException(
                error_message, log_level=context.get('log_level')) from e
Ejemplo n.º 8
0
async def async_main(login, password, command, src, dest, bucket, loop):
    """
    Асинхронная функция, обрабатывающая команды
    :param login: Логин
    :param password: Пароль
    :param command: Команда
    :param src: /path/to/src
    :param dest: /path/to/dest
    :param bucket: Название бакета
    :param loop: Асинхронный цикл
    :return:
    """

    session = aiobotocore.get_session(loop=loop)
    async with session.create_client("s3",
                                     endpoint_url=storage_url,
                                     region_name='us-west-2',
                                     aws_secret_access_key=password,
                                     aws_access_key_id=login) as client:

        if command == C_DOWNLOAD:
            await download_bucket(bucket, client)
        elif command == C_GET:
            await download_one_file(src, dest, bucket, client)
        elif command == C_LIST:
            await print_bucket(bucket, client)
        elif command == C_UPLOAD:
            await upload(src, bucket, client)
        else:
            sys.exit("Unknown command {}".format(command))
Ejemplo n.º 9
0
async def go(loop):
    # Boto should get credentials from ~/.aws/credentials or the environment
    session = aiobotocore.get_session()
    async with session.create_client('sqs') as client:
        print('Pulling messages off the queue')

        while True:
            try:
                # This loop wont spin really fast as there is
                # essentially a sleep in the receive_message call
                response = await client.receive_message(
                    QueueUrl=QUEUE_URL,
                    WaitTimeSeconds=20,
                )

                if 'Messages' in response:
                    for message in response['Messages']:
                        print("Message received:" + message["Body"])
                        asyncio.ensure_future(handle(session, client, message))

                else:
                    print('No messages in queue')
            except KeyboardInterrupt:
                break

        print('Finished')
async def go():
    session = aiobotocore.get_session()
    client = session.create_client('dynamodb', region_name='us-west-2')
    # Create random table name
    table_name = 'aiobotocore-' + str(uuid.uuid4())

    print('Requesting table creation...')
    await client.create_table(TableName=table_name,
                              AttributeDefinitions=[
                                  {
                                      'AttributeName': 'testKey',
                                      'AttributeType': 'S'
                                  },
                              ],
                              KeySchema=[
                                  {
                                      'AttributeName': 'testKey',
                                      'KeyType': 'HASH'
                                  },
                              ],
                              ProvisionedThroughput={
                                  'ReadCapacityUnits': 10,
                                  'WriteCapacityUnits': 10
                              })

    print("Waiting for table to be created...")
    waiter = client.get_waiter('table_exists')
    await waiter.wait(TableName=table_name)
    print("Table {0} created".format(table_name))

    await client.close()
Ejemplo n.º 11
0
async def go():

    bucket = 'dataintake'
    filename = 'dummy.bin'
    folder = 'aiobotocore'
    key = '{}/{}'.format(folder, filename)

    session = aiobotocore.get_session()
    async with session.create_client(
            's3',
            region_name='us-west-2',
            aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
            aws_access_key_id=AWS_ACCESS_KEY_ID) as client:
        # upload object to amazon s3
        data = b'\x01' * 1024
        resp = await client.put_object(Bucket=bucket, Key=key, Body=data)
        print(resp)

        # getting s3 object properties of file we just uploaded
        resp = await client.get_object_acl(Bucket=bucket, Key=key)
        print(resp)

        # delete object from s3
        resp = await client.delete_object(Bucket=bucket, Key=key)
        print(resp)
Ejemplo n.º 12
0
async def test_s3_parameter_capture(loop, recorder):
    segment = recorder.begin_segment('name')

    bucket_name = 'mybucket'
    key = 'mykey'
    version_id = 'myversionid'
    response = {
        'ResponseMetadata': {
            'RequestId': '1234',
            'HTTPStatusCode': 200
        }
    }

    session = aiobotocore.get_session()
    async with session.create_client('s3', region_name='eu-west-2') as client:
        with Stubber(client) as stubber:
            stubber.add_response('get_object', response, {
                'Bucket': bucket_name,
                'Key': key,
                'VersionId': version_id
            })
            await client.get_object(Bucket=bucket_name,
                                    Key=key,
                                    VersionId=version_id)

    subsegment = segment.subsegments[0]
    aws_meta = subsegment.aws

    assert aws_meta['bucket_name'] == bucket_name
    assert aws_meta['key'] == key
    assert aws_meta['version_id'] == version_id
    assert aws_meta['operation'] == 'GetObject'
Ejemplo n.º 13
0
async def test_map_parameter_grouping(loop, recorder):
    """
    Test special parameters that have shape of map are recorded
    as a list of keys based on `para_whitelist.json`
    """
    segment = recorder.begin_segment('name')

    response = {
        'ResponseMetadata': {
            'RequestId': '1234',
            'HTTPStatusCode': 500,
        }
    }

    session = aiobotocore.get_session()
    async with session.create_client('dynamodb',
                                     region_name='eu-west-2') as client:
        with Stubber(client) as stubber:
            stubber.add_response('batch_write_item', response,
                                 {'RequestItems': ANY})
            await client.batch_write_item(RequestItems={
                'table1': [{}],
                'table2': [{}]
            })

    subsegment = segment.subsegments[0]
    assert subsegment.fault
    assert subsegment.http['response']['status'] == 500

    aws_meta = subsegment.aws
    assert sorted(aws_meta['table_names']) == ['table1', 'table2']
Ejemplo n.º 14
0
def main():

    if len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
        printUsage()

    if len(sys.argv) > 1 and sys.argv[1] == "--update":
        do_update = True
    else:
        do_update = False

    # we need to setup a asyncio loop to query s3
    loop = asyncio.get_event_loop()

    app = {}
    app["bucket_name"] = config.get("bucket_name")
    app["loop"] = loop
    session = get_session(loop=loop)
    app["session"] = session
    loop.run_until_complete(run_scan(app, update=do_update))

    loop.close()

    results = app["bucket_scan"]
    print("root_count:", results["root_count"])
    print("info_count:", results["info_count"])
    print("group_count", results["group_count"])
    print("dataset_count:", results["dataset_count"])
    print("datatype_count", results["datatype_count"])
    print("chunk_count:", results["chunk_count"])
    print('allocated_bytes:', results["allocated_bytes"])
    print("metadata_bytes:", results["metadata_bytes"])
    print("updated_count:", results["updated_count"])

    print("done!")
Ejemplo n.º 15
0
def main():
    if len(sys.argv) == 1 or sys.argv[1] == "-h" or sys.argv[1] == "--help":
        printUsage()
        sys.exit(1)

    chunk_id = sys.argv[-1]
    if not isValidChunkId(chunk_id):
        print("Invalid chunk id")
        sys.exit(1)

    # we need to setup a asyncio loop to query s3
    loop = asyncio.get_event_loop()
    session = get_session(loop=loop)

    app = {}
    app["session"] = session
    app['bucket_name'] = config.get("bucket_name")
    app['node_count'] = 1
    app['node_number'] = 0  
    app['deleted_ids'] = set()
    app['meta_cache'] = {}
    app['pending_s3_read'] = {}
    app['meta_cache'] = LruCache(mem_target=1024*1024, chunk_cache=False)
    app['chunk_cache'] = LruCache(mem_target=64*1024*1024, chunk_cache=True)
    domain = config.get("domain")
    if not domain:
        printUsage()
        sys.exit(-1)
    print("got domain:", domain)

    loop.run_until_complete(printChunkValues(app, domain, chunk_id))

    loop.close()
Ejemplo n.º 16
0
async def coro3(app):
    queue_name = 'jreuter-edge-detection'
    logging.info('C3 - About to call SQS')

    loop = asyncio.get_event_loop()
    session = aiobotocore.get_session(loop=loop)
    async with session.create_client('sqs', region_name='us-east-1') as client:
        try:
            queue = await client.get_queue_url(QueueName=queue_name)
        except ClientError as e:
            if e.response['Error']['Code'] == "AWS.SimpleQueueService.NonExistentQueue":
                logging.info(e.response['Error']['Message'])
                logging.info("Creating SQS queue %s in region %s...", queue_name, 'us-east-1')
                queue = await client.create_queue(QueueName=queue_name)
            else:
                raise
        queue_url = queue.get('QueueUrl')
        logging.info("C3 - recieving messages")
        while True:
            result = await client.receive_message(WaitTimeSeconds=20,
                                                  MaxNumberOfMessages=1,
                                                  QueueUrl=queue_url)

            if 'Messages' in result:
                logging.info('C3 - We got some messages')
                for message in result['Messages']:
                    logging.info(message['Body'])
                    await client.delete_message(ReceiptHandle=str(message.get('ReceiptHandle')),
                                                QueueUrl=queue_url)
            else:
                logging.info('C3 - We got no messages')
            await asyncio.sleep(60)
Ejemplo n.º 17
0
async def test_list_parameter_counting(loop, recorder):
    """
    Test special parameters that have shape of list are recorded
    as count based on `para_whitelist.json`
    """
    segment = recorder.begin_segment('name')

    queue_urls = ['url1', 'url2']
    queue_name_prefix = 'url'
    response = {
        'QueueUrls': queue_urls,
        'ResponseMetadata': {
            'RequestId': '1234',
            'HTTPStatusCode': 200,
        }
    }

    session = aiobotocore.get_session(loop=loop)
    async with session.create_client('sqs', region_name='eu-west-2') as client:
        with Stubber(client) as stubber:
            stubber.add_response('list_queues', response,
                                 {'QueueNamePrefix': queue_name_prefix})
            await client.list_queues(QueueNamePrefix='url')

    subsegment = segment.subsegments[0]
    assert subsegment.http['response']['status'] == 200

    aws_meta = subsegment.aws
    assert aws_meta['queue_count'] == len(queue_urls)
    # all whitelisted input parameters will be converted to snake case
    # unless there is an explicit 'rename_to' attribute in json key
    assert aws_meta['queue_name_prefix'] == queue_name_prefix
Ejemplo n.º 18
0
 def _get_async_client(self, loop=None):
     """Return an asynchronous s3 client for the backend"""
     return aiobotocore.get_session().create_client(
         's3',
         aws_access_key_id=self.access_key,
         aws_secret_access_key=self.secret_key,
         region_name=self.region)
Ejemplo n.º 19
0
async def publish_file_for_5_minutes_and_return_url(dataset: str,
                                                    filename: str) -> str:
    """
    Little aux function to make the user happy.
    When results are shown (if they are), near each record with filename
    there is a `Download` button, so user can click it
    and file will be open/downloaded.
    The thing is files are located on a private S3 bucket, so there is a need
    to expose them/make them public for some short time. So particular function
    makes files (of datasets) public for a 5 minutes on a request.

    :param str dataset: the name of dataset / text collection (as it on S3)
    :param str filename: the name of file in the collection
    :return: an URL to publicly accessible for a 5 minutes file
    :rtype: str

    :raises: possibly some aiobotocore exception. Code that calls this function
      does exceptions handling.
    """
    session = aiobotocore.get_session()

    # create shareable url to download the file (valid for 300 seconds)
    async with session.create_client("s3") as s3_client:

        url: str = await s3_client.generate_presigned_url(
            "get_object",
            Params={
                "Bucket": S3_BUCKET,
                "Key": f"static/{dataset}/{filename}",
            },
            ExpiresIn=300,  # seconds
        )

        return url
Ejemplo n.º 20
0
async def main(loop, args):
    session = get_session(loop=loop)
    config = AioConfig(max_pool_connections=args.semaphore)
    home = path.dirname(path.realpath(__file__))

    keys = listdir('./files/')
    paths = [home + '/files/' + keys[i] for i in range(0, len(keys))]

    async with session.create_client(
            's3',
            config=config,
            aws_access_key_id=args.key_id,
            aws_secret_access_key=args.key) as s3_client:

        async def put(key, f_path):
            f = await aio_open(f_path, mode='r')
            fl = await f.read()
            await f.close()

            response = await s3_client.put_object(Bucket=args.bucket,
                                                  Key=key,
                                                  Body=fl)
            if response['ResponseMetadata']['HTTPStatusCode'] != 200:
                return {key: 'Failed'}
            else:
                return {key: 'Uploaded'}

        return await gather(
            *[put(keys[i], paths[i]) for i in range(0, len(keys))])
Ejemplo n.º 21
0
async def main(args):
    session = get_session()
    config = AioConfig(max_pool_connections=args.semaphore)

    with open(file_name) as fl:
        files = [file[:-1] for file in fl.readlines()]

    async with session.create_client(
            's3',
            config=config,
            aws_access_key_id=args.key_id,
            aws_secret_access_key=args.key) as s3_client:

        async def get(key):
            response = await s3_client.get_object(Bucket=args.bucket, Key=key)

            if response['ResponseMetadata']['HTTPStatusCode'] == 200:
                async with response['Body'] as stream:
                    data = await stream.read()

                f = await aio_open(storage_path + '/' + key, mode='w')
                await f.write(data.decode('UTF-8'))
                await f.flush()

                return {key: 'Downloaded'}
            else:
                return {key: 'Failed'}

        return await gather(*[get(file) for file in files])
Ejemplo n.º 22
0
 def __init__(self, replica):
     self.access_key_id = replica['meta']['access_key_id']
     self.secret_access_key = replica['meta']['secret_access_key']
     loop = asyncio.get_running_loop()
     self.session = aiobotocore.get_session(loop=loop)
     self.bucket = 'openpacs'
     self.region = replica['location']
Ejemplo n.º 23
0
async def go(loop):
    session = aiobotocore.get_session(loop=loop)
    client = session.create_client('dynamodb', region_name='us-west-2')
    # Create random table name
    table_name = 'aiobotocore-' + str(uuid.uuid4())

    print('Requesting table creation...')
    await client.create_table(
        TableName=table_name,
        AttributeDefinitions=[
            {
                'AttributeName': 'testKey',
                'AttributeType': 'S'
            },
        ],
        KeySchema=[
            {
                'AttributeName': 'testKey',
                'KeyType': 'HASH'
            },
        ],
        ProvisionedThroughput={
            'ReadCapacityUnits': 10,
            'WriteCapacityUnits': 10
        }
    )

    print("Waiting for table to be created...")
    waiter = client.get_waiter('table_exists')
    await waiter.wait(TableName=table_name)
    print("Table {0} created".format(table_name))

    await client.close()
Ejemplo n.º 24
0
Archivo: s3.py Proyecto: lablup/logger
async def s3_flusher(loop, opts, ev):
    global _records
    buffer = io.BytesIO()
    part_count = 1
    while True:
        await ev.wait()
        ev.clear()
        print("s3: flushing {} entries...".format(len(_records)))
        if opts["codec"] == "msgpack":
            packer = umsgpack.Packer()
            for rec in _records:
                buffer.write(packer.pack(rec.data))
        elif opts["codec"] == "text":
            for rec in _records:
                print(str(rec).encode("utf8"), file=buffer)
        _records.clear()  # must be cleared before any await
        session = aiobotocore.get_session(loop=loop)
        client = session.create_client(
            "s3",
            region_name=opts["region"],
            aws_secret_access_key=opts["secret_key"],
            aws_access_key_id=opts["access_key"],
        )
        now = datetime.now()
        ts_month = now.strftime("%Y-%m")
        ts_monthday = now.strftime("%Y-%m-%d")
        ts_time = now.strftime("%Y-%m-%dT%H.%M.%S")
        key = "{}/{}/{}/{}.part{}.msgpack".format(opts["key_prefix"], ts_month, ts_monthday, ts_time, part_count)
        resp = await client.put_object(Bucket=opts["bucket"], Key=key, Body=buffer.getvalue(), ACL="private")
        buffer.seek(0, io.SEEK_SET)
        buffer.truncate(0)
        part_count += 1
Ejemplo n.º 25
0
async def run(loop, input_filepath, s3bucket_name, batch_size, concurrency):
    """
    Top level async task loop that coordinates with the get_versions (producer)
    and delete_versions (consumer) coroutines.


    Arguments:
        loop {asyncio.AbstractEventLoop} -- asyncio event loop
        input_filepath {str} -- path of the file holding keys to delete
        s3bucket_name {str} -- bucket to delete from
        batch_size {int} -- the desired number of object versions to batch together into a single delete_objects
            request
        concurrency {int} -- max number of concurrent asyncio actions
    """
    # Setup asyncio objects
    queue = asyncio.Queue(loop=loop)
    sem = asyncio.Semaphore(concurrency)
    session = aiobotocore.get_session(loop=loop)

    # quickly find the total keys we expect to delete to setup the progress bar
    total_keys = key_file_len(input_filepath)

    async with session.create_client('s3') as s3client:
        # wait until we have completed:
        # - the extraction of all object versions
        # - the deletions of all those object versions
        await asyncio.gather(
            get_versions(sem, queue, s3client, s3bucket_name, total_keys,
                         input_filepath),
            delete_versions(sem,
                            queue,
                            s3client,
                            s3bucket_name,
                            total_keys,
                            batch_size=batch_size))
Ejemplo n.º 26
0
 async def setup(s3_cfg):
     session = aiobotocore.get_session()
     s3_ctx = session.create_client("s3",
                                    region_name=region_name,
                                    config=s3_cfg)
     s3 = await s3_ctx.__aenter__()
     return (session, s3, s3_ctx)
Ejemplo n.º 27
0
 async def session(self):
     """
     Returns a valid aiobotocore session
     """
     if getattr(self, '_session', None) is None:
         self._session = aiobotocore.get_session()
     return self._session
Ejemplo n.º 28
0
    def __init__(self, settings, loop=None):
        self._aws_access_key = settings['aws_client_id']
        self._aws_secret_key = settings['aws_client_secret']

        opts = dict(aws_secret_access_key=self._aws_secret_key,
                    aws_access_key_id=self._aws_access_key,
                    endpoint_url=settings.get('endpoint_url'),
                    verify=settings.get('verify_ssl'),
                    use_ssl=settings.get('ssl', True),
                    region_name=settings.get('region_name'))

        if loop is None:
            loop = asyncio.get_event_loop()
        self._loop = loop

        self._s3aiosession = aiobotocore.get_session(loop=loop)

        # This client is for downloads only
        self._s3aioclient = self._s3aiosession.create_client('s3', **opts)
        self._cached_buckets = []

        self._bucket_name = settings['bucket']

        # right now, only used for upload_fileobj in executor
        self._s3client = boto3.client('s3', **opts)
Ejemplo n.º 29
0
async def test_describe_table(loop, recorder):
    segment = recorder.begin_segment('name')

    req_id = '1234'
    response = {
        'ResponseMetadata': {
            'RequestId': req_id,
            'HTTPStatusCode': 403
        }
    }

    session = aiobotocore.get_session()
    async with session.create_client('dynamodb',
                                     region_name='eu-west-2') as client:
        with Stubber(client) as stubber:
            stubber.add_response('describe_table', response,
                                 {'TableName': 'mytable'})
            await client.describe_table(TableName='mytable')

    subsegment = segment.subsegments[0]
    assert subsegment.error
    assert subsegment.http['response']['status'] == 403

    aws_meta = subsegment.aws
    assert aws_meta['table_name'] == 'mytable'
    assert aws_meta['request_id'] == req_id
    assert aws_meta['region'] == 'eu-west-2'
    assert aws_meta['operation'] == 'DescribeTable'
async def check_slack_file_quota(opsdroid, config, message):
    try:
        aws_access_key_id = config["aws_access_key_id"]
        aws_secret_access_key = config["aws_secret_access_key"]
        slack_api_token = config["slack_api_token"]
        s3_region_name = config["s3_region_name"]
        max_total_file_size = config["max_total_file_size"]
        s3_bucket = config["s3_bucket"]
        s3_prefix = config.get("s3_prefix", "")
        file_size_buffer = config.get("file_size_buffer", 0)
    except KeyError:
        _LOGGER.error("Missing config item(s) in skill %s.",
                      config.get('name', 'aws-tag-compliance'))
        return

    if message is None:
        message = Message("", None, config.get("room", connector.default_room),
                          opsdroid.default_connector)
    else:
        await message.respond("I'm on it!")

    files_removed = 0
    data_saved = 0

    files = await get_file_list(slack_api_token)
    size_threshold = max_total_file_size
    while await count_total_file_size(files) > size_threshold:
        if size_threshold == max_total_file_size:
            size_threshold = max_total_file_size - file_size_buffer
        session = aiobotocore.get_session()
        async with session.create_client(
                's3',
                region_name=s3_region_name,
                aws_secret_access_key=aws_secret_access_key,
                aws_access_key_id=aws_access_key_id) as client:
            data = await download_file(slack_api_token, files[-1])
            if await upload_file(client, files[-1], data, s3_bucket,
                                 s3_prefix):
                if await cleanup_file(slack_api_token, files[-1]):
                    _LOGGER.debug("Uploaded %s to S3", files[-1]["name"])
                    files_removed = files_removed + 1
                    data_saved = data_saved + files[-1]["size"]
                    files.remove(files[-1])
                else:
                    _LOGGER.debug(
                        "%s uploaded to S3 but failed to clean up on Slack",
                        files[-1]["name"])
            else:
                _LOGGER.debug("Upload of %s failed", files[-1]["name"])
    if files_removed > 0:
        await message.respond(
            "You were getting close to your Slack file limit so I've moved {} files to the {} bucket on S3 saving {}."
            .format(files_removed, s3_bucket, human_bytes(data_saved)))
    else:
        if message.regex:
            await message.respond(
                "Nothing to do, file size is {} and quota is {}".format(
                    human_bytes(await count_total_file_size(files)),
                    human_bytes(max_total_file_size)))
Ejemplo n.º 31
0
    async def get_text(self, audio_data):
        session = aiobotocore.get_session()
        upload = session.create_client(
            "s3",
            region_name=self.REGION_NAME,
            aws_secret_access_key=self.SECRET_ACCESS_KEY,
            aws_access_key_id=self.ACCESS_KEY_ID,
        )
        transcribe = session.create_client(
            "transcribe",
            region_name=self.REGION_NAME,
            aws_secret_access_key=self.SECRET_ACCESS_KEY,
            aws_access_key_id=self.ACCESS_KEY_ID,
        )
        filename = f"{uuid4().hex}.mp3"
        # Upload audio file to bucket
        await upload.put_object(
            Bucket=self.S3_BUCKET, Key=filename, Body=audio_data
        )
        job_name = uuid4().hex
        job_uri = (
            f"https://s3.{self.REGION_NAME}.amazonaws.com/{self.S3_BUCKET}/"
            f"{filename}"
        )
        # Send audio file URI to Transcribe
        await transcribe.start_transcription_job(
            TranscriptionJobName=job_name,
            Media={"MediaFileUri": job_uri},
            MediaFormat="mp3",
            LanguageCode="en-US",
        )
        # Wait 90 seconds for transcription
        timeout = 90
        while time.time() > timeout:
            status = await transcribe.get_transcription_job(
                TranscriptionJobName=job_name
            )
            if status["TranscriptionJob"]["TranscriptionJobStatus"] in [
                "COMPLETED",
                "FAILED",
            ]:
                break
            await asyncio.sleep(5)
        # Delete audio file from bucket
        await upload.delete_object(Bucket=self.S3_BUCKET, Key=filename)
        if "TranscriptFileUri" in status["TranscriptionJob"]["Transcript"]:
            transcript_uri = status["TranscriptionJob"]["Transcript"][
                "TranscriptFileUri"
            ]
            data = json.loads(await get_page(transcript_uri))
            transcript = data["results"]["transcripts"][0]["transcript"]
            return transcript

        # Delete audio file
        await upload.delete_object(Bucket=self.S3_BUCKET, Key=filename)

        # Close clients
        await upload._endpoint._aio_session.close()
        await transcribe._endpoint._aio_session.close()
Ejemplo n.º 32
0
    def setUp(self):
        super().setUp()

        self.session = aiobotocore.get_session(loop=self.loop)
        self.region = 'us-east-1'
        self.client = self.session.create_client('s3', region_name=self.region)
        self.keys = []
        self.addCleanup(self.client.close)
Ejemplo n.º 33
0
    def setUp(self):
        super().setUp()

        self.session = aiobotocore.get_session(loop=self.loop)
        self.region = 'us-east-1'
        self.client = self.session.create_client('s3', region_name=self.region)
        self.keys = []
        self.addCleanup(self.client.close)
Ejemplo n.º 34
0
 def __init__(self,
              endpoint_url=None,
              use_ssl=True,
              dry_run=False,
              **kwargs):
     super().__init__(endpoint_url, use_ssl, dry_run, **kwargs)
     self._session = aiobotocore.get_session()
     self.aws_account_id = None
Ejemplo n.º 35
0
async def go(loop):
    session = aiobotocore.get_session(loop=loop)
    client = session.create_client('dynamodb', region_name='us-west-2')
    table_name = 'test'

    print('Writing to dynamo')
    start = 0
    while True:
        # Loop adding 25 items to dynamo at a time
        request_items = create_batch_write_structure(table_name, start, 25)
        response = await client.batch_write_item(
            RequestItems=request_items
        )
        if len(response['UnprocessedItems']) == 0:
            print('Writted 25 items to dynamo')
        else:
            # Hit the provisioned write limit
            print('Hit write limit, backing off then retrying')
            await asyncio.sleep(5)

            # Items left over that haven't been inserted
            unprocessed_items = response['UnprocessedItems']
            print('Resubmitting items')
            # Loop until unprocessed items are written
            while len(unprocessed_items) > 0:
                response = await client.batch_write_item(
                    RequestItems=unprocessed_items
                )
                # If any items are still left over, add them to the
                # list to be written
                unprocessed_items = response['UnprocessedItems']

                # If there are items left over, we could do with
                # sleeping some more
                if len(unprocessed_items) > 0:
                    print('Backing off for 5 seconds')
                    await asyncio.sleep(5)

            # Inserted all the unprocessed items, exit loop
            print('Unprocessed items successfully inserted')
            break

        start += 25

    # See if DynamoDB has the last item we inserted
    final_item = 'item' + str(start + 24)
    print('Item "{0}" should exist'.format(final_item))

    response = await client.get_item(
        TableName=table_name,
        Key={'pk': {'S': final_item}}
    )
    print('Response: ' + str(response['Item']))

    await client.close()
Ejemplo n.º 36
0
async def get_available_regions(hass, service):
    """Get available regions for a service."""
    import aiobotocore

    session = aiobotocore.get_session()
    # get_available_regions is not a coroutine since it does not perform
    # network I/O. But it still perform file I/O heavily, so put it into
    # an executor thread to unblock event loop
    return await hass.async_add_executor_job(
        session.get_available_regions, service
    )
Ejemplo n.º 37
0
async def cache_package(project_name, url, package_path):
    key = '/'.join((PIPSY_SIMPLE_ROOT, project_name, os.path.basename(package_path))).lstrip('/')
    boto_session = aiobotocore.get_session()
    async with boto_session.create_client('s3') as s3_client:
        try:
            release = await s3_client.get_object(Bucket=PIPSY_BUCKET, Key=key)
        except s3_client.exceptions.NoSuchKey:
            async with ClientSession() as session:
                async with session.get(url) as index_response:
                    data = await index_response.content.read()
                    await s3_client.put_object(
                        Body=data,
                        Bucket=PIPSY_BUCKET,
                        Key=key,
                    )
Ejemplo n.º 38
0
def init():
    global session
    session = aiobotocore.get_session()
    app = web.Application(middlewares=[heartbeat_middleware_factory])
    aiogithubauth.add_github_auth_middleware(
        app,
        github_id=gh_id,
        github_secret=gh_secret,
        github_org=gh_org,
        cookie_name=cookie_name,
        cookie_key=cookie_key
    )
    app.router.add_route('GET', '/test', handle_auth)
    app.router.add_route('GET', '/{tail:.*}', stream_file)  # Everything else
    return app
Ejemplo n.º 39
0
    def setUp(self):
        super().setUp()

        self.session = aiobotocore.get_session(loop=self.loop)
        self.client = self.session.create_client('s3', region_name='us-west-2')

        self.bucket_name = 'aiobotocoretest%s-%s' % (
            int(time.time()), random.randint(1, 1000))
        self.loop.run_until_complete(self.client.create_bucket(
            Bucket=self.bucket_name,
            CreateBucketConfiguration={
                'LocationConstraint': 'us-west-2',
            }
        ))
        self.addCleanup(self.loop.run_until_complete,
                        self.client.delete_bucket(Bucket=self.bucket_name))
Ejemplo n.º 40
0
    async def fetch_releases_for_project_from_s3(self, project_name):
        if PIPSY_SIMPLE_ROOT:
            prefix = '/'.join([PIPSY_SIMPLE_ROOT, project_name])
        else:
            prefix = project_name

        release_files = {}
        session = aiobotocore.get_session()
        async with session.create_client('s3') as client:
            paginator = client.get_paginator('list_objects_v2')
            async for result in paginator.paginate(Bucket=PIPSY_BUCKET, Prefix=prefix):
                for k in result.get('Contents', []):
                    release_files[os.path.basename(k['Key'])] = {
                        "filename": os.path.basename(k['Key']),
                        "url": os.path.basename(k['Key']),
                        "md5_digest": k['ETag'].strip('"'),
                    }

        return release_files
Ejemplo n.º 41
0
async def go(loop):
    session = aiobotocore.get_session(loop=loop)
    client = session.create_client('sqs', region_name='us-west-2')

    print('Creating test_queue1')
    response = await client.create_queue(QueueName='test_queue1')
    queue_url = response['QueueUrl']

    response = await client.list_queues()

    print('Queue URLs:')
    for queue_name in response.get('QueueUrls', []):
        print(' ' + queue_name)

    print('Deleting queue {0}'.format(queue_url))
    await client.delete_queue(QueueUrl=queue_url)

    print('Done')
    await client.close()
Ejemplo n.º 42
0
def go(loop, task_id):

  print('Start task {}'.format(task_id))
  bucket = 'jharai'
  filename = '{}-{}'.format(time.strftime("%H-%M-%S"), task_id)
  folder = 'aiobotocore'
  key = '{}/{}'.format(folder, filename)

  session = aiobotocore.get_session(loop=loop)
  client = session.create_client(
      's3',
      region_name='ap-northeast-1')

  try:
    # upload object to amazon s3
    data = b'\x01' * 1024
    resp = yield from client.put_object(Bucket=bucket, Key=key, Body=data)
    print('Finish task {}'.format(task_id))
    print(resp)
  finally:
    client.close()
Ejemplo n.º 43
0
    async def get(self):

        if not self.request.path.endswith('/'):
            return web.HTTPFound(self.request.path + '/')

        keys = []
        session = aiobotocore.get_session()
        async with session.create_client('s3') as client:
            paginator = client.get_paginator('list_objects_v2')
            async for result in paginator.paginate(Bucket=PIPSY_BUCKET, Prefix=PIPSY_SIMPLE_ROOT, Delimiter="/"):
                for k in result.get('CommonPrefixes', []):
                    keys.append(k['Prefix'])

        projects = sorted(list({normalize_project_name(k.rstrip('/')) for k in keys}))
        body = [f"  <a href='{project}/'>{project}</a><br/>" for project in projects]
        body = SIMPLE_TEMPLATE.format(body="\n".join(body))

        return web.Response(
                status=200,
                body=body,
                headers={
                    'Content-Type': 'text/html; charset=utf-8',
                }
            )
Ejemplo n.º 44
0
    async def stream_key_from_s3(self, key):
        session = aiobotocore.get_session()
        async with session.create_client('s3') as client:
            try:
                release = await client.get_object(Bucket=PIPSY_BUCKET, Key=key)
            except client.exceptions.NoSuchKey:
                raise KeyError

            response = web.StreamResponse(
                status=200,
                headers={
                    "Content-Type": release['ContentType'],
                    "ETag": release['ETag'].strip('"'),
                }
            )
            response.content_length = release['ContentLength']
            await response.prepare(self.request)
            while True:
                data = await release['Body'].read(8192)
                if not data:
                    await response.drain()
                    break
                response.write(data)
            return response
Ejemplo n.º 45
0
    def __init__(self, name, *,
                 aws_region='us-west-2',
                 connector=None,
                 scheme='http',
                 boto_creds=None,
                 logger=None,
                 num_retries=6,
                 timeout=None,
                 loop=None):
        """
        Bucket class used to access S3 buckets

        @param name: name of bucket to
        @param aws_region: AWS region to use for communication
        @param connector:
        @param scheme: http or https
        @param boto_creds: botocore credential resolver
        @param logger:
        @param num_retries: number of retries for AWS operations
        @param timeout: aiohttp timeout in seconds
        @return: aios3 Bucket object
        """

        if logger is None: logger = logging.getLogger('aio-s3')

        self._name = name
        self._connector = connector
        self._num_retries = num_retries
        self._num_requests = 0
        self._aws_region = aws_region
        self._boto_creds = boto_creds
        self._timeout = timeout
        self._logger = logger
        self._loop = loop
        self._presign_cache = dict()  # (method, params) -> url
        self._cache_hits = 0
        self._cache_misses = 0
        self._retry_handler = functools.partial(_RetryHandler, timeout=self._timeout)
        self._scheme = scheme
        self._aio_boto_session = None

        # Virtual style host URL
        # ----------------------
        #   endpoint: bucket.s3.amazonaws.com / bucket.s3-aws-region.amazonaws.com
        #   host: bucket.s3.amazonaws.com
        #
        # Path Style
        # ----------
        #   endpoint: s3.amazonaws.com/bucket / s3-aws-region.amazonaws.com/bucket
        #   host: s3.amazonaws.com
        #

        # We use Path Style because the Amazon SSL wildcard cert will not match for virtual style with buckets
        # that have '.'s: http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html

        if aws_region == 'us-east-1':
            self._host = "s3.amazonaws.com"
        else:
            self._host = "s3-" + aws_region + ".amazonaws.com"

        self._endpoint = self._host + "/" + self._name

        if self._connector is None:
            kwargs = {}
            if timeout:
                kwargs['conn_timeout'] = timeout

            self._connector = aiohttp.TCPConnector(force_close=False, keepalive_timeout=10, use_dns_cache=False, loop=self._loop, **kwargs)

        use_ssl = self._scheme == 'https'

        connector_args = {
            'use_dns_cache': self._connector._use_dns_cache,
            'force_close': self._connector._force_close,
            'keepalive_timeout': self._connector._keepalive_timeout}

        aio_config = aiobotocore.client.AioConfig(signature_version='s3v4', connector_args=connector_args)

        self._aio_boto_session = aiobotocore.get_session(loop=self._loop)
        self._aio_boto_client = self._aio_boto_session.create_client('s3', region_name=self._aws_region, config=aio_config, use_ssl=use_ssl)

        self._parsers = dict()  # OpName: (op_model, parser) map

        if self._boto_creds is None:
            self._boto_creds = botocore.credentials.create_credential_resolver(self._aio_boto_session).load_credentials()

        self._session = aiohttp.ClientSession(connector=self._connector, loop=self._loop, response_class=aiobotocore.endpoint.ClientResponseProxy)
        self._signer = botocore.auth.S3SigV4Auth(self._boto_creds, 's3', self._aws_region)

        # stats support
        self._concurrent = 0
        self._last_stat_time = time.time()
        self._request_times = []
Ejemplo n.º 46
0
 def client(self):
     session = aiobotocore.get_session(loop=self._loop)
     return session.create_client(self.boto_service_name, **self._client_options)
Ejemplo n.º 47
0
def session(event_loop):
    session = aiobotocore.get_session(loop=event_loop)
    return session
Ejemplo n.º 48
0
def session(loop):
    session = aiobotocore.get_session(loop=loop)
    return session
Ejemplo n.º 49
0
def configure():
    app = aiohttp.web.Application()

    # Pull configuration out of the environment
    app["settings"] = {
        "endpoint": os.environ["CONVEYOR_ENDPOINT"],
        "docs_bucket": os.environ["DOCS_BUCKET"],
    }

    # Setup a HTTP session for our clients to share connections with and
    # register a shutdown callback to close the session.
    app["http.session"] = aiohttp.ClientSession(
        loop=asyncio.get_event_loop(),
        headers={"User-Agent": "conveyor"},
    )
    app["boto.session"] = aiobotocore.get_session(
        loop=asyncio.get_event_loop(),
    )
    app.on_shutdown.append(session_close)

    app["tasks"] = []

    app["redirects"] = {}
    _fetch_redirects_task = asyncio.ensure_future(
        redirects_refresh_task(app),
        loop=asyncio.get_event_loop(),
    )

    app.on_shutdown.append(cancel_tasks)

    # Allow cross-origin GETs by default
    cors = aiohttp_cors.setup(app, defaults={
            "*": aiohttp_cors.ResourceOptions(
                allow_methods="GET",
            )
    })

    # Add routes and views to our application
    cors.add(app.router.add_route(
        "GET",
        "/packages/{python_version}/{project_l}/{project_name}/{filename}",
        redirect,
    ))
    app.router.add_route(
        "HEAD",
        "/packages/{python_version}/{project_l}/{project_name}/{filename}",
        redirect,
    )
    cors.add(app.router.add_route(
        "GET",
        "/packages/{tail:.*}",
        not_found,
    ))
    cors.add(app.router.add_route(
        "GET",
        "/packages",
        not_found,
    ))

    app.router.add_route(
        "GET",
        "/_health/",
        health,
    )
    app.router.add_route(
        "GET",
        "/_health",
        health,
    )

    # Add Documentation routes
    app.router.add_route(
        "GET",
        "/",
        index,
    )
    app.router.add_route(
        "HEAD",
        "/",
        index,
    )
    app.router.add_route(
        "GET",
        "/{project_name}/{path:.*}",
        documentation,
    )
    app.router.add_route(
        "GET",
        "/{project_name}",
        documentation_top,
    )

    return app
 def create_sample_files(self, filenames, content):
     loop = asyncio.get_event_loop()
     session = aiobotocore.get_session(loop=loop)
     s3 = session.create_client('s3', aws_secret_access_key=settings.AWS_SECRET,
                                aws_access_key_id=settings.AWS_KEY)
     return loop.run_until_complete(asyncio.wait([asyncio.Task(self._create_sample_file(s3, fname, content)) for fname in filenames]))
Ejemplo n.º 51
0
Archivo: s3.py Proyecto: jephdo/s3lib
def get_aioclient(loop=None):
    session = aiobotocore.get_session(loop=loop)
    return session.create_client('s3')