コード例 #1
0
def _get_auth_from_env(
        env: Mapping[str, str]) -> Optional[requests_aws4auth.AWS4Auth]:
    aws_in_docker = env.get('AWS_CONTAINER_CREDENTIALS_RELATIVE_URI')
    if aws_in_docker:
        # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html
        response = requests.get(f'http://169.254.170.2{aws_in_docker}')
        response.raise_for_status()
        credentials = response.json()
        access_key_id = credentials.get('AccessKeyId')
        secret_access_key = credentials.get('SecretAccessKey')
        session_token = credentials.get('Token')
    else:
        session = boto3.Session()
        credentials = session.get_credentials()
        if not credentials:
            return None
        access_key_id = credentials.access_key
        secret_access_key = credentials.secret_key
        session_token = credentials.token
    if not access_key_id:
        return None
    region = env.get('AWS_REGION', 'eu-west-3')
    return requests_aws4auth.AWS4Auth(access_key_id,
                                      secret_access_key,
                                      region,
                                      'es',
                                      session_token=session_token)
コード例 #2
0
    def create_elasticsearch_client(self):
        """Build an elasticsearch client using the various parameters passed into this task."""

        if self.connection_type == 'aws':
            service = 'es'
            region = 'us-east-1'
            credentials = boto3.Session().get_credentials()
            awsauth = requests_aws4auth.AWS4Auth(region=region, service=service, refreshable_credentials=credentials)

            return elasticsearch.Elasticsearch(
                hosts=[{'host': self.host, 'port': 443}],
                http_auth=awsauth,
                use_ssl=True,
                verify_certs=True,
                timeout=self.timeout,
                retry_on_status=(HTTP_CONNECT_TIMEOUT_STATUS_CODE, HTTP_GATEWAY_TIMEOUT_STATUS_CODE),
                retry_on_timeout=True,
                connection_class=RequestsHttpConnection,
                serializer=JSONSerializerPython2(),
            )
        else:
            return elasticsearch.Elasticsearch(
                hosts=self.host,
                timeout=self.timeout,
                retry_on_status=(HTTP_CONNECT_TIMEOUT_STATUS_CODE, HTTP_GATEWAY_TIMEOUT_STATUS_CODE),
                retry_on_timeout=True,
            )
コード例 #3
0
ファイル: __init__.py プロジェクト: yangyuanhua/warehouse
def includeme(config):
    p = urllib.parse.urlparse(config.registry.settings["elasticsearch.url"])
    qs = urllib.parse.parse_qs(p.query)
    kwargs = {
        "hosts": [urllib.parse.urlunparse(p[:2] + ("", ) * 4)],
        "verify_certs": True,
        "ca_certs": certifi.where(),
        "timeout": 2,
        "retry_on_timeout": False,
        "serializer": serializer.serializer,
        "max_retries": 1,
    }
    aws_auth = bool(qs.get("aws_auth", False))
    if aws_auth:
        aws_region = qs.get("region", ["us-east-1"])[0]
        kwargs["connection_class"] = elasticsearch.RequestsHttpConnection
        kwargs["http_auth"] = requests_aws4auth.AWS4Auth(
            config.registry.settings["aws.key_id"],
            config.registry.settings["aws.secret_key"],
            aws_region,
            "es",
        )
    config.registry["elasticsearch.client"] = elasticsearch.Elasticsearch(
        **kwargs)
    config.registry["elasticsearch.index"] = p.path.strip("/")
    config.registry["elasticsearch.shards"] = int(qs.get("shards", ["1"])[0])
    config.registry["elasticsearch.replicas"] = int(
        qs.get("replicas", ["0"])[0])
    config.add_request_method(es, name="es", reify=True)

    from warehouse.search.tasks import reindex

    config.add_periodic_task(crontab(minute=0, hour=6), reindex)
コード例 #4
0
def handling_errors():
    auth = requests_aws4auth.AWS4Auth('<ID>', '', 'eu-west-1', '')
    r = requests.get('http://s3.eu-west-1.amazonaws.com', auth=auth)
    root = ET.fromstring(r.text)
    for element in root:
        print('Tag: ' + element.tag)

    for element in root.findall('Message'):
        print(element.tag + ': ' + element.text)
コード例 #5
0
 def awsauth(self):
     if not hasattr(self, '_awsauth'):
         session = boto3.Session()
         credentials = session.get_credentials().get_frozen_credentials()
         self._awsauth = requests_aws4auth.AWS4Auth(
             credentials.access_key,
             credentials.secret_key,
             session.region_name,
             self.service,
             session_token=credentials.token,
         )
     return self._awsauth
コード例 #6
0
def _aws_auth():
    # https://github.com/sam-washington/requests-aws4auth/pull/2
    session = boto3.Session()
    print("Retrieving credentials (profile_name={}, region_name={})".format(session.profile_name, session.region_name),
          file=sys.stderr)
    credentials = session.get_credentials()
    aws4auth = requests_aws4auth.AWS4Auth(credentials.access_key, credentials.secret_key, session.region_name, "es",
                                          session_token=credentials.token)

    def wrapped_aws4auth(request):
        return aws4auth(request)

    return wrapped_aws4auth
コード例 #7
0
ファイル: es_helper.py プロジェクト: harrystech/arthur-tools
    def _aws_auth() -> Any:
        credentials = session.get_credentials()
        aws4auth = requests_aws4auth.AWS4Auth(
            credentials.access_key,
            credentials.secret_key,
            session.region_name,
            "es",
            session_token=credentials.token,
        )

        def wrapped_aws4auth(request: Any) -> Any:
            return aws4auth(request)

        return wrapped_aws4auth
コード例 #8
0
def aws_request(url, data, operation):
    a = requests_aws4auth.AWS4Auth(*read_aws_credentials(), "us-west-2",
                                   "execute-api")
    req = requests.Request('POST',
                           url=url,
                           json=data,
                           auth=a,
                           headers={
                               "Content-Type": "application/json",
                               "X-Amz-Target": operation
                           })
    prepared = req.prepare()
    r = requests.Session().send(prepared)
    return json.dumps(r.json())
コード例 #9
0
 def send(self, query, variables):
     aws_session = boto3.session.Session()
     creds = aws_session.get_credentials().get_frozen_credentials()
     auth = requests_aws4auth.AWS4Auth(
         creds.access_key,
         creds.secret_key,
         aws_session.region_name,
         self.service_name,
         session_token=creds.token,
     )
     transport = gql.transport.requests.RequestsHTTPTransport(
         url=self.appsync_graphql_url,
         use_json=True,
         headers=self.headers,
         auth=auth,
     )
     resp = transport.execute(query, variables)
     if resp.errors:
         raise Exception(f'Appsync resp error: `{resp.errors}` from query `{query}`, variables `{variables}`')
コード例 #10
0
def es_authentication(service, region, es_host):
    credentials = boto3.Session(
        aws_access_key_id=AWS_ACCESS_KEY_ID,
        aws_secret_access_key=AWS_SECRET_ACCESS_KEY).get_credentials()
    aws_auth = requests_aws4auth.AWS4Auth(credentials.access_key,
                                          credentials.secret_key,
                                          region,
                                          service,
                                          session_token=credentials.token)
    es = elasticsearch.Elasticsearch(
        hosts=[{
            'host': es_host,
            'port': 443
        }],
        http_auth=aws_auth,
        use_ssl=True,
        verify_certs=True,
        connection_class=elasticsearch.RequestsHttpConnection,
        timeout=30)
    return es
コード例 #11
0
def _get_auth_from_env(
        env: Mapping[str, str]) -> Optional[requests_aws4auth.AWS4Auth]:
    aws_in_docker = env.get('AWS_CONTAINER_CREDENTIALS_RELATIVE_URI')
    if aws_in_docker:
        # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html
        response = requests.get(f'http://169.254.170.2{aws_in_docker}')
        response.raise_for_status()
        credentials = response.json()
        access_key_id = credentials.get('AccessKeyId')
        secret_access_key = credentials.get('SecretAccessKey')
        session_token = credentials.get('Token')
    else:
        access_key_id = env.get('AWS_ACCESS_KEY_ID')
        secret_access_key = env.get('AWS_SECRET_ACCESS_KEY')
        session_token = None
    if not access_key_id:
        return None
    return requests_aws4auth.AWS4Auth(access_key_id,
                                      secret_access_key,
                                      _find_aws_region(env),
                                      'es',
                                      session_token=session_token)
コード例 #12
0
ファイル: tasks.py プロジェクト: yangyuanhua/warehouse
def reindex(self, request):
    """
    Recreate the Search Index.
    """
    r = redis.StrictRedis.from_url(request.registry.settings["celery.scheduler_url"])
    try:
        with SearchLock(r, timeout=30 * 60, blocking_timeout=30):
            p = urllib.parse.urlparse(request.registry.settings["elasticsearch.url"])
            qs = urllib.parse.parse_qs(p.query)
            kwargs = {
                "hosts": [urllib.parse.urlunparse(p[:2] + ("",) * 4)],
                "verify_certs": True,
                "ca_certs": certifi.where(),
                "timeout": 30,
                "retry_on_timeout": True,
                "serializer": serializer.serializer,
            }
            aws_auth = bool(qs.get("aws_auth", False))
            if aws_auth:
                aws_region = qs.get("region", ["us-east-1"])[0]
                kwargs["connection_class"] = elasticsearch.RequestsHttpConnection
                kwargs["http_auth"] = requests_aws4auth.AWS4Auth(
                    request.registry.settings["aws.key_id"],
                    request.registry.settings["aws.secret_key"],
                    aws_region,
                    "es",
                )
            client = elasticsearch.Elasticsearch(**kwargs)
            number_of_replicas = request.registry.get("elasticsearch.replicas", 0)
            refresh_interval = request.registry.get("elasticsearch.interval", "1s")

            # We use a randomly named index so that we can do a zero downtime reindex.
            # Essentially we'll use a randomly named index which we will use until all
            # of the data has been reindexed, at which point we'll point an alias at
            # our randomly named index, and then delete the old randomly named index.

            # Create the new index and associate all of our doc types with it.
            index_base = request.registry["elasticsearch.index"]
            random_token = binascii.hexlify(os.urandom(5)).decode("ascii")
            new_index_name = "{}-{}".format(index_base, random_token)
            doc_types = request.registry.get("search.doc_types", set())
            shards = request.registry.get("elasticsearch.shards", 1)

            # Create the new index with zero replicas and index refreshes disabled
            # while we are bulk indexing.
            new_index = get_index(
                new_index_name,
                doc_types,
                using=client,
                shards=shards,
                replicas=0,
                interval="-1",
            )
            new_index.create(wait_for_active_shards=shards)

            # From this point on, if any error occurs, we want to be able to delete our
            # in progress index.
            try:
                request.db.execute("SET statement_timeout = '600s'")

                for _ in parallel_bulk(
                    client, _project_docs(request.db), index=new_index_name
                ):
                    pass
            except:  # noqa
                new_index.delete()
                raise
            finally:
                request.db.rollback()
                request.db.close()

            # Now that we've finished indexing all of our data we can update the
            # replicas and refresh intervals.
            client.indices.put_settings(
                index=new_index_name,
                body={
                    "index": {
                        "number_of_replicas": number_of_replicas,
                        "refresh_interval": refresh_interval,
                    }
                },
            )

            # Point the alias at our new randomly named index and delete the old index.
            if client.indices.exists_alias(name=index_base):
                to_delete = set()
                actions = []
                for name in client.indices.get_alias(name=index_base):
                    to_delete.add(name)
                    actions.append({"remove": {"index": name, "alias": index_base}})
                actions.append({"add": {"index": new_index_name, "alias": index_base}})
                client.indices.update_aliases({"actions": actions})
                client.indices.delete(",".join(to_delete))
            else:
                client.indices.put_alias(name=index_base, index=new_index_name)
    except redis.exceptions.LockError as exc:
        raise self.retry(countdown=60, exc=exc)
import sys
import requests
import requests_aws4auth as aws4auth
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom

access_id = ''
access_key = ''
region = 'eu-west-2'

endpoint = 's3.{}.amazonaws.com'.format(region)
auth = aws4auth.AWS4Auth(access_id, access_key, region, 's3')

def xml_pprint(xml_string):
	print(minidom.parseString(xml_string).toprettyxml())

def download_file(bucket, s3_name):
	url = 'http://{}/{}/{}'.format(endpoint, bucket, s3_name)
	print('download file '+url)
	response = requests.get(url, auth=auth)
	print(response)
	if response.ok:
		open(s3_name, 'wb').write(response.content)
		print('Downloaded {} OK'.format(s3_name))
	else:
		xml_pprint(response.text)

def upload_file(bucket, local_path):
	data = open(local_path, 'rb').read()
	url = 'http://{}/{}/{}'.format(endpoint, bucket, local_path)
	print('upload file '+url)
コード例 #14
0
#!/usr/bin/env python3

import sys
import requests
import requests_aws4auth
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
import mimetypes

access_id = ''
access_key = ''
region = 'US East (N. Virginia)'
endpoint = 's3-{}.amazonaws.com'.format(region)
auth = requests_aws4auth.AWS4Auth(access_id, access_key, region, 's3')
ns = 'http://s3.amazonaws.com/doc/2006-03-01/'


def xml_pprint(xml_string):
    print(minidom.parseString(xml_string).toprettyxml())


def create_bucket(bucket):
    XML = ET.Element('CreateBucketConfiguration')
    XML.attrib['xmlns'] = ns
    location = ET.SubElement(XML, 'LocationConstraint')
    location.text = auth.region
    data = ET.tostring(XML, encoding='utf-8')
    url = 'http://{}.{}'.format(bucket, endpoint)
    r = requests.put(url, data=data, auth=auth)
    if r.ok:
        print('Created bucket {} OK'.format(bucket))
コード例 #15
0
    def __service_request(self, method, body=None, params=None):
        '''Submits a network requests and waits for a response.

        Arguments:

            method - the functin to be called to execute the request.

            body - the json content submited with the request

            params - query string parameters sent with the request

        Returns:

            The json content from response wrapped by a Data object. The returned 
            object will have a PATH attribute that is this path object and a RESPONSE 
            attribute that is the response object for the http request. The Data
            object will be read only.

        '''

        if self.__config.session:
            if self.__config.role_arn:

                sts = self.__config.session.client('sts')

                res = sts.assume_role(
                    RoleArn=self.__config.role_arn,
                    RoleSessionName=self.__config.role_session_name
                    or 'cgf_service_client')

                access_key = res['Credentials']['AccessKeyId']
                secret_key = res['Credentials']['SecretAccessKey']
                session_token = res['Credentials']['SessionToken']
                auth_description = self.__config.role_arn

            else:

                session_credentials = self.__config.session.get_credentials(
                ).get_frozen_credentials()

                access_key = session_credentials.access_key
                secret_key = session_credentials.secret_key
                session_token = session_credentials.token
                auth_description = 'session'

            auth = requests_aws4auth.AWS4Auth(
                access_key,
                secret_key,
                self.__config.session.region_name,
                'execute-api',
                session_token=session_token)

        else:

            auth = None
            auth_description = None

        if self.__config.verbose:
            print('HTTP', method.__name__.upper(), self.__url, params, body,
                  auth_description)

        response = method(self.__url, params=params, json=body, auth=auth)

        if self.__config.verbose:
            print('  -->', response.status_code, response.text)

        error.HttpError.for_response(response)

        if response.content:

            result = response.json()

            # TODO: the generated api gateway lambda displatch mapping injects this 'result' thing
            # in there, it shouldn't do this because it breaks the contract defined by the swagger.
            # To fix it, we need to:
            #
            # 1) change swagger_processor.lambda_dispatch to return the response provided by the
            # lambda directly. Problem is that breaks the additional_response_template_content feature,
            # which can probabally be removed all together.
            #
            # 2) change the c++ client generator handle the missing result property
            #
            # 3) change cgp to handle the missing result property
            #
            # 4) remove the following code
            #
            result = result.get('result', None)
            if result is None:
                return RuntimeError(
                    'Service response did not include the stupid result property.'
                )
            #
            # END TODO

            return Data(result, read_only=True, PATH=self, RESPONSE=response)

        else:

            return None
コード例 #16
0
import sys
import requests
import requests_aws4auth as AA
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
import mimetypes

f = open('ID.txt')  #아이디 받아오기
access_id = f.read()
f = open('Key.txt')  #엑세스 키 받아오기
access_key = f.read()
region = 'ap-northeast-2'  #위치 설정
endpoint = 's3-{}.amazonaws.com'.format(region)  #s3를 사용하는데 알맞은 Url형태로 만듦ㅎ
auth = AA.AWS4Auth(access_id, access_key, region, 's3')  #s3에 접속
ns = 'http://s3.amazonaws.com/doc/2006-03-01/'  #?????


def xml_pprint(element):
    print(minidom.parseString(
        element).toprettyxml())  #엘레먼트 트리의 xml형태의 변수를 xml형태에 맞춰 출력시켜준다.


def upload_file(bucket, s3_name, local_path, acl='private'):
    data = open(local_path, 'rb').read()  #모르겠다.
    url = 'http://{}.{}/{}'.format(bucket, endpoint,
                                   s3_name)  #준비해둔 url 양식을 끼워넣음
    headers = {'x-amz-acl': acl}  #헤더중에 접근권한 성정 변경
    mimetype = mimetypes.guess_type(local_path)[0]  #자료형태를 추측??

    #다운로드 없이 바로 이미지 띄움
    if mimetype: