Esempio n. 1
0
 def getCredentials(self):
     provider = InstanceMetadataProvider(
         iam_role_fetcher=InstanceMetadataFetcher(timeout=1000,
                                                  num_attempts=2))
     creds = provider.load()
     access_key = creds.access_key
     return (access_key)
Esempio n. 2
0
    def __init__(self, dest=None, content_type=None):
        self.errors = []

        if not dest:
            dest = 'default'

        try:
            destination_settings =\
                app_settings.S3DIRECT_DESTINATIONS[dest]
        except KeyError:
            self.errors.append(
                'Provided destination is not specified in config', )
            return

        for key, value in destination_settings.items():
            setattr(self, key, value)

        self.access_key =\
            getattr(settings, 'AWS_S3_ACCESS_KEY_ID', None)
        self.secret_access_key =\
            getattr(settings, 'AWS_S3_SECRET_ACCESS_KEY', None)
        self.now_date = datetime.utcnow().strftime('%Y%m%dT%H%M%S000Z')
        self.raw_date = datetime.utcnow().strftime('%Y%m%d')

        if content_type:
            self.content_type = content_type

        if (content_type
                and (self.allowed and content_type not in self.allowed)
                and self.allowed != '*'):
            self.errors.append('Invalid file type.')
            return

        if not self.region or self.region == 'us-east-1':
            self.endpoint = 's3.amazonaws.com'

        self.endpoint = f's3-{self.region}.amazonaws.com'

        if self.access_key is None or self.secret_access_key is None:
            try:
                from botocore.credentials import (InstanceMetadataProvider,
                                                  InstanceMetadataFetcher)
            except ImportError:
                InstanceMetadataProvider = None
                InstanceMetadataFetcher = None

            if all([InstanceMetadataProvider, InstanceMetadataFetcher]):
                provider = InstanceMetadataProvider(
                    iam_role_fetcher=InstanceMetadataFetcher(timeout=1000,
                                                             num_attempts=2))
                creds = provider.load()
                self.access_key = creds.access_key
                self.secret_access_key = creds.secret_key
                self.token = creds.token
            else:
                self.errors.append(
                    'Failed to access EC2 instance metadata due to '
                    'missing dependency.')
                return
Esempio n. 3
0
def _aws_credentials_available_in_metadata_service():
    import botocore
    from botocore.credentials import InstanceMetadataProvider
    from botocore.utils import InstanceMetadataFetcher

    session = botocore.session.Session()
    instance_metadata_provider = InstanceMetadataProvider(
        iam_role_fetcher=InstanceMetadataFetcher(
            timeout=session.get_config_variable('metadata_service_timeout'),
            num_attempts=session.get_config_variable(
                'metadata_service_num_attempts'),
            user_agent=session.user_agent()))
    return not (instance_metadata_provider.load() is None)
def delete_object(bucket, key):
    rn = "eu-west-1"
    provider = InstanceMetadataProvider(
        iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
    creds = provider.load().get_frozen_credentials()
    s3_client = boto3.client(
        "s3",
        region_name=rn,
        aws_access_key_id=creds.access_key,
        aws_secret_access_key=creds.secret_key,
        aws_session_token=creds.token,
    )
    s3_client.delete_object(Bucket=bucket, Key=key)
def credentials(file_loc):
  if os.path.isfile(file_loc): # Delete any Credentials in the specified file location in order to write new credentials
    file = open(file_loc,"r+")
    file.truncate(0)
    file.close()
  now = datetime.datetime.utcnow() # Get UTC Time Now
  provider = InstanceMetadataProvider(iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
  creds = provider.load()
  file_contents = {'access_key':creds.access_key, 'secret_key':creds.secret_key, 'token':creds.token }
  file_write(file_loc, file_contents) #Write Tokens to file
  token_issue_time= datetime.datetime.strptime(now.strftime("%y/%m/%d %H:%M:%S"), "%y/%m/%d  %H:%M:%S" ) # Remove time awareness
  token_issue_expiry = datetime.datetime.strptime((creds._expiry_time).strftime("%y/%m/%d %H:%M:%S"), "%y/%m/%d  %H:%M:%S" ) # Time awa
  expiry_time_sec = (token_issue_expiry - token_issue_time).total_seconds() # Total Seconds before token expiry
  expiry_time_min = expiry_time_sec/60
  return (int(expiry_time_min - 15))
Esempio n. 6
0
def get_boto_session():
    if using_IAM_role:
        provider = InstanceMetadataProvider(
            iam_role_fetcher=InstanceMetadataFetcher(timeout=1000,
                                                     num_attempts=2))
        print(
            "Loading IAM Role credentials... (if this is taking a while you are probably not running inside EC2)"
        )
        creds = provider.load()
        print("IAM credentials loaded")
        return boto3.Session(aws_access_key_id=creds.access_key,
                             aws_secret_access_key=creds.secret_key,
                             aws_session_token=creds.token,
                             region_name='us-east-1')
    else:
        return boto3.Session(region_name='us-east-1')
def get_boto_session(
    force_ec2: bool = False,
    region_name: str = "eu-west-1",
):

    kwargs = {"region_name": region_name}
    if force_ec2:
        provider = InstanceMetadataProvider(
            iam_role_fetcher=InstanceMetadataFetcher(timeout=1000,
                                                     num_attempts=2))
        creds = provider.load().get_frozen_credentials()
        kwargs["aws_access_key_id"] = creds.access_key
        kwargs["aws_secret_access_key"] = creds.secret_key
        kwargs["aws_session_token"] = creds.token

    return boto3.Session(**kwargs)
Esempio n. 8
0
def get_aws_credentials():
    access_key = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
    secret_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
    if access_key and secret_key:
        # AWS tokens are not created for pregenerated access keys
        return AWSCredentials(None, secret_key, access_key)

    if not InstanceMetadataProvider or not InstanceMetadataFetcher:
        # AWS credentials are not required for publicly-writable buckets
        return AWSCredentials(None, None, None)

    provider = InstanceMetadataProvider(
        iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
    creds = provider.load()
    if creds:
        return AWSCredentials(creds.token, creds.secret_key, creds.access_key)
    else:
        # Creds are incorrect
        return AWSCredentials(None, None, None)
Esempio n. 9
0
def get_aws_credentials():
    access_key = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
    secret_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
    if access_key and secret_key:
        # AWS tokens are not created for pregenerated access keys
        return AWSCredentials(None, secret_key, access_key)

    if not InstanceMetadataProvider or not InstanceMetadataFetcher:
        # AWS credentials are not required for publicly-writable buckets
        return AWSCredentials(None, None, None)

    provider = InstanceMetadataProvider(
        iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
    creds = provider.load()
    if creds:
        return AWSCredentials(creds.token, creds.secret_key, creds.access_key)
    else:
        # Creds are incorrect
        return AWSCredentials(None, None, None)
Esempio n. 10
0
class AutoRefreshableSession:
    METHOD = "iam-role"
    DEFAULT_RETRIES = 5
    DEFAULT_METADATA_SERVICE_TIMEOUT = 10  # secs

    def __init__(self,
                 retries=DEFAULT_RETRIES,
                 metadata_service_timeout=DEFAULT_METADATA_SERVICE_TIMEOUT):
        self.instance_metadata_fetcher = InstanceMetadataFetcher()
        self.instance_metadata_fetcher._num_attempts = retries
        self.instance_metadata_fetcher._timeout = metadata_service_timeout
        self.instance_metadata_fetcher._needs_retry_for_credentials = self.needs_retry_for_credentials
        self.instance_metadata_provider = InstanceMetadataProvider(
            self.instance_metadata_fetcher)

    def check_for_missing_keys(self, required_cred_fields, response):
        print(response.content)
        credentials = json.loads(response.content)
        for field in required_cred_fields:
            if field not in credentials:
                print('Retrieved credentials is missing required field: %s',
                      field)
                return True
        return False

    def needs_retry_for_credentials(self, response):
        return (self.instance_metadata_fetcher._is_non_ok_response(response)
                or self.instance_metadata_fetcher._is_empty(response)
                or self.instance_metadata_fetcher._is_invalid_json(response)
                or self.check_for_missing_keys(
                    self.instance_metadata_fetcher._REQUIRED_CREDENTIAL_FIELDS,
                    response))

    def _get(self, region):
        self.session = get_session()
        self.session._credentials = self.instance_metadata_provider.load()
        self.session.set_config_variable("region", region)
        self.autorefresh_session = Session(botocore_session=self.session)
        return self

    def client(self, service_name):
        return self.autorefresh_session.client(service_name=service_name)
Esempio n. 11
0
    def __call__(self, host, username, password, aws_region, boto_profile):
        """ Return the authorization header. If 'boto_profile' is passed, it'll be used. Otherwise it'll sign requests
        with instance role.

        :param host: ElasticSearch host.
        :param username: Username used for authenticating the requests to ElasticSearch.
        :param password: Password used for authenticating the requests to ElasticSearch.
        :param aws_region: AWS Region to use. Only required when signing requests.
        :param boto_profile: Boto profile to use for connecting. Only required when signing requests.
        """
        if username and password:
            return username + ":" + password

        if not aws_region:
            return None

        if boto_profile:
            # Executing elastalert from machine with aws credentials
            config = configparser.ConfigParser()
            config.read(os.path.expanduser("~") + "/.aws/credentials")
            aws_access_key_id = str(config[boto_profile]["aws_access_key_id"])
            aws_secret_access_key = str(config[boto_profile]["aws_secret_access_key"])
            aws_token = None
        else:
            # Executing elastalert from machine deployed with specific role
            provider = InstanceMetadataProvider(iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
            aws_credentials = provider.load()
            aws_access_key_id = str(aws_credentials.access_key)
            aws_secret_access_key = str(aws_credentials.secret_key)
            aws_token = str(aws_credentials.token)

        return AWSRequestsAuth(
            aws_access_key=aws_access_key_id,
            aws_secret_access_key=aws_secret_access_key,
            aws_token=aws_token,
            aws_host=host,
            aws_region=aws_region,
            aws_service="es",
        )
Esempio n. 12
0
    def __call__(self, host, username, password, aws_region, boto_profile):
        """ Return the authorization header. If 'boto_profile' is passed, it'll be used. Otherwise it'll sign requests
        with instance role.

        :param host: ElasticSearch host.
        :param username: Username used for authenticating the requests to ElasticSearch.
        :param password: Password used for authenticating the requests to ElasticSearch.
        :param aws_region: AWS Region to use. Only required when signing requests.
        :param boto_profile: Boto profile to use for connecting. Only required when signing requests.
        """
        if username and password:
            return username + ':' + password

        if not aws_region:
            return None

        if boto_profile:
            # Executing elastalert from machine with aws credentials
            config = configparser.ConfigParser()
            config.read(os.path.expanduser('~') + '/.aws/credentials')
            aws_access_key_id = str(config[boto_profile]['aws_access_key_id'])
            aws_secret_access_key = str(config[boto_profile]['aws_secret_access_key'])
            aws_token = None
        else:
            # Executing elastalert from machine deployed with specific role
            provider = InstanceMetadataProvider(
                iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
            aws_credentials = provider.load()
            aws_access_key_id = str(aws_credentials.access_key)
            aws_secret_access_key = str(aws_credentials.secret_key)
            aws_token = str(aws_credentials.token)

        return AWSRequestsAuth(aws_access_key=aws_access_key_id,
                               aws_secret_access_key=aws_secret_access_key,
                               aws_token=aws_token,
                               aws_host=host,
                               aws_region=aws_region,
                               aws_service='es')
Esempio n. 13
0
    def __setupSparkSession__(self, appName: str = None):
        '''
        Init the Spark environemnt with few default configurations and start the spark session.
        '''
        self.__conf = SparkConf()
        hmConf = {
            "spark.rps.askTimeout": "1200",
            "spark.network.timeout": "1200",
            "spark.broadcast.blockSize": "16m",
            "spark.sql.broadcastTimeout": "1200",
            "spark.broadcast.compress": "true",
            "spark.rdd.compress": "true",
            "fs.s3.enableServerSideEncryption": "true",
            "spark.kryo.unsafe": "false",
            "spark.kryoserializer.buffer": "10240",
            "spark.kryoserializer.buffer.max": "2040m",
            "spark.io.compression.codec":
            "org.apache.spark.io.SnappyCompressionCodec",
            "spark.serializer": "org.apache.spark.serializer.KryoSerializer",
            "mapreduce.fileoutputcommitter.algorithm.version": "2",
            "spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version":
            "2",
        }
        self.__conf.setAll(hmConf)
        SparkContext.setSystemProperty("com.amazonaws.services.s3.enableV4",
                                       "true")
        SparkContext.setSystemProperty("com.amazonaws.services.s3.enforceV4",
                                       "true")
        self.__spark = SparkSession \
                        .builder \
                        .config(conf=self.__conf) \
                        .appName(appName or "PySparkApp") \
                        .enableHiveSupport() \
                        .getOrCreate()
        self.__sc = self.__spark.sparkContext
        self.sqlC = SQLContext(self.__sc)
        self.__sc.setSystemProperty("com.amazonaws.services.s3.enableV4",
                                    "true")
        self.__sc.setSystemProperty("com.amazonaws.services.s3.enforceV4",
                                    "true")
        self.__sc.setLogLevel(self.__parms.get("--logLevel", "INFO"))

        hdpCnf = self.__sc.hadoopConfiguration
        hdpCnf.setAll({
            "io.file.buffer.size":
            "65536",
            "mapreduce.fileoutputcommitter.algorithm.version":
            "2",
            "fs.s3a.endpoint":
            "%s.amazonaws.com" %
            (self.__parms.get("--awsRegion", 's3.us-east-1'))
        })
        if (self.__parms.get("--runEnv", "AWS") == "AWS"):
            from botocore.credentials import InstanceMetadataProvider, InstanceMetadataFetcher
            provider = InstanceMetadataProvider(
                iam_role_fetcher=InstanceMetadataFetcher(timeout=1000,
                                                         num_attempts=2))
            creds = provider.load()
            hdpCnf.setAll({
                "fs.s3a.access.key":
                creds.access_key,
                "fs.s3a.secret.key":
                creds.secret_key,
                "fs.s3a.server-side-encryption-algorithm":
                "SSE-KMS",
                "fs.s3.enableServerSideEncryption":
                "true",
                "fs.s3.impl":
                "org.apache.hadoop.fs.s3a.S3AFileSystem",
                "fs.s3a.impl":
                "org.apache.hadoop.fs.s3a.S3AFileSystem",
                "fs.s3a.endpoint":
                "s3.%s.amazonaws.com" %
                (self.__parms.get("--awsRegion", "us-east-1"))
            })
Esempio n. 14
0
def handle_ingest(args):
    """
    awspx ingest
    """

    session = None

    # Get credentials from environment variables
    if args.env:
        session = boto3.session.Session(region_name=args.region)

    # Use existing profile
    elif args.profile in Profile().credentials.sections():
        session = boto3.session.Session(profile_name=args.profile,
                                        region_name=args.region)
    # Use instance profile
    elif args.profile == "default":
        try:
            provider = InstanceMetadataProvider(
                iam_role_fetcher=InstanceMetadataFetcher())
            creds = provider.load()

            session = boto3.session.Session(
                region_name=args.region,
                aws_access_key_id=creds.access_key,
                aws_secret_access_key=creds.secret_key,
                aws_session_token=creds.token)
        except:
            pass

    # Specified profile doesn't exist, offer to create it
    if not session:

        profile = console.item("Create profile")
        profile.notice(f"The profile '{args.profile}' doesn't exist. "
                       "Please enter your AWS credentials.\n"
                       "(this information will be saved automatically)")

        args.create_profile = args.profile
        handle_profile(args, console=profile)

        session = boto3.session.Session(profile_name=args.profile,
                                        region_name=args.region)
    # Ancillary operations
    try:

        if args.mfa_device:

            session_token = session.client('sts').get_session_token(
                SerialNumber=args.mfa_device,
                TokenCode=args.mfa_token,
                DurationSeconds=args.mfa_duration)["Credentials"]

            session = boto3.session.Session(
                aws_access_key_id=session_token["AccessKeyId"],
                aws_secret_access_key=session_token["SecretAccessKey"],
                aws_session_token=session_token["SessionToken"],
                region_name=args.region)

        if args.role_to_assume:

            assume_role_args = {
                "RoleArn":
                args.role_to_assume,
                "RoleSessionName":
                "awspx",
                "DurationSeconds":
                args.role_to_assume_duration,
                **dict({
                    "ExternalId": args.role_to_assume_external_id
                } if args.role_to_assume_external_id else {})
            }

            assumed_role = session.client('sts').assume_role(
                **assume_role_args)["Credentials"]

            session = boto3.session.Session(
                aws_access_key_id=assumed_role["AccessKeyId"],
                aws_secret_access_key=assumed_role["SecretAccessKey"],
                aws_session_token=assumed_role["SessionToken"],
                region_name=args.region)

    except ClientError as e:
        console.critical(e)

    ingestor = IngestionManager(session=session,
                                console=console,
                                services=args.services,
                                db=args.database,
                                quick=args.quick,
                                skip_actions=args.skip_actions_all,
                                only_types=args.only_types,
                                skip_types=args.skip_types,
                                only_arns=args.only_arns,
                                skip_arns=args.skip_arns)

    assert ingestor.zip is not None, "Ingestion failed"

    args.load_zips = [ingestor.zip]
    handle_db(args, console=console.item("Creating Database"))

    if not (args.skip_attacks_all or args.skip_actions_all):
        handle_attacks(args, console=console.item("Updating Attack paths"))
Esempio n. 15
0
def get_upload_params(request):
    content_type = request.POST['type']
    filename = request.POST['name']

    dest = get_s3direct_destinations().get(request.POST['dest'])

    if not dest:
        data = json.dumps({'error': 'File destination does not exist.'})
        return HttpResponse(data, content_type="application/json", status=400)

    key = dest.get('key')
    auth = dest.get('auth')
    allowed = dest.get('allowed')
    acl = dest.get('acl')
    bucket = dest.get('bucket')
    cache_control = dest.get('cache_control')
    content_disposition = dest.get('content_disposition')
    content_length_range = dest.get('content_length_range')
    server_side_encryption = dest.get('server_side_encryption')

    if not acl:
        acl = 'public-read'

    if not key:
        data = json.dumps({'error': 'Missing destination path.'})
        return HttpResponse(data, content_type="application/json", status=403)

    if auth and not auth(request.user):
        data = json.dumps({'error': 'Permission denied.'})
        return HttpResponse(data, content_type="application/json", status=403)

    if (allowed and content_type not in allowed) and allowed != '*':
        data = json.dumps({'error': 'Invalid file type (%s).' % content_type})
        return HttpResponse(data, content_type="application/json", status=400)

    if hasattr(key, '__call__'):
        key = key(filename)
    elif key == '/':
        key = '${filename}'
    else:
        # The literal string '${filename}' is an S3 field variable for key.
        # https://aws.amazon.com/articles/1434#aws-table
        key = '%s/${filename}' % key

    access_key = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
    secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
    token = None

    if access_key is None or secret_access_key is None:
        # Get credentials from instance profile if not defined in settings --
        # this avoids the need to put access credentials in the settings.py file.
        # Assumes we're running on EC2.

        try:
            from botocore.credentials import InstanceMetadataProvider, InstanceMetadataFetcher
        except ImportError:
            InstanceMetadataProvider = None
            InstanceMetadataFetcher = None

        if all([InstanceMetadataProvider, InstanceMetadataFetcher]):
            provider = InstanceMetadataProvider(
                iam_role_fetcher=InstanceMetadataFetcher(timeout=1000,
                                                         num_attempts=2))
            creds = provider.load()
            access_key = creds.access_key
            secret_access_key = creds.secret_key
            token = creds.token
        else:
            data = json.dumps({
                'error':
                'Failed to access EC2 instance metadata due to missing dependency.'
            })
            return HttpResponse(data,
                                content_type="application/json",
                                status=500)

    data = create_upload_data(content_type, key, acl, bucket, cache_control,
                              content_disposition, content_length_range,
                              server_side_encryption, access_key,
                              secret_access_key, token)

    return HttpResponse(json.dumps(data), content_type="application/json")
Esempio n. 16
0
def handle_ingest(args):
    """
    awspx ingest
    """
    resources = Elements()
    account = "000000000000"
    session = None
    graph = None

    # Check to see if environment variables are being used for credentials.
    if args.env:
        session = boto3.session.Session(region_name=args.region)
    # Use existing profile
    elif args.profile in CREDENTIALS.sections():
        session = boto3.session.Session(region_name=args.region,
                                        profile_name=args.profile)
    # Use instance profile
    elif args.profile == "default":
        try:
            provider = InstanceMetadataProvider(
                iam_role_fetcher=InstanceMetadataFetcher())
            creds = provider.load()

            session = boto3.session.Session(region_name=args.region,
                                            aws_access_key_id=creds.access_key,
                                            aws_secret_access_key=creds.secret_key,
                                            aws_session_token=creds.token)
        except:
            pass

    # Create new profile
    if not session:
        if input(f"[-] Would you like to create the profile '{args.profile}'? (y/n) ").upper() == "Y":
            args.create_profile = args.profile
            handle_profile(args)
            session = boto3.session.Session(region_name=args.region,
                                            profile_name=args.profile)
        else:
            sys.exit(1)

    try:
        identity = session.client('sts').get_caller_identity()
        account = identity["Account"]

        print(f"[+] Profile:   {args.profile} (identity: {identity['Arn']})")

    except:
        print("[-] Request to establish identity (sts:GetCallerIdentity) failed.")
        sys.exit(1)

    print(f"[+] Services:  {', '.join([s.__name__ for s in args.services])}")
    print(f"[+] Database:  {args.database}")
    print(f"[+] Region:    {args.region}")

    if args.role_to_assume:
        try:
            response = session.client('sts').assume_role(
                RoleArn=args.role_to_assume,
                RoleSessionName=f"awspx",
                DurationSeconds=args.role_to_assume_duration)

        except ClientError as e:
            print("\n" + str(e))
            if "MaxSessionDuration" in e.response["Error"]["Message"]:
                print("\nTry reducing the session duration using "
                      "'--assume-role-duration'.")

            sys.exit(1)

        if response:
            print(f"[+] Assumed role: {args.role_to_assume}")
            session = boto3.session.Session(
                aws_access_key_id=response["Credentials"]["AccessKeyId"],
                aws_secret_access_key=response["Credentials"]["SecretAccessKey"],
                aws_session_token=response["Credentials"]["SessionToken"],
                region_name=args.region)
        try:
            identity = session.client('sts').get_caller_identity()
            account = identity["Account"]
            print(f"[+] Running as {identity['Arn']}.")
            print(f"[+] Region set to {args.region}.")
        except:
            print("[-] Request to establish identity (sts:GetCallerIdentity) failed.")

    print()

    if session is None:
        sys.exit(1)

    # Run IAM first to try acquire an account number
    if IAM in args.services:
        graph = IAM(session, db=args.database, verbose=args.verbose, quick=args.quick,
                    only_types=args.only_types, skip_types=args.skip_types,
                    only_arns=args.only_arns, skip_arns=args.skip_arns)
        account = graph.account_id

    for service in [s for s in args.services if s != IAM]:
        resources += service(session, account=account, verbose=args.verbose, quick=args.quick,
                             only_types=args.only_types, skip_types=args.skip_types,
                             only_arns=args.only_arns, skip_arns=args.skip_arns)

    if graph is None:
        graph = IAM(session, verbose=args.verbose, quick=args.quick,
                    db=args.database,
                    resources=resources)
    else:
        graph.update(resources)

    args.load_zip = graph.post(skip_all_actions=args.skip_all_actions)
    handle_db(args)

    if not (args.skip_all_attacks or args.skip_all_actions):
        handle_attacks(args)
Esempio n. 17
0
def get_upload_params(request):

    content_type = request.POST['type']
    filename = get_valid_filename(request.POST['name'])
    dest = get_s3upload_destinations().get(request.POST['dest'])

    if not dest:
        data = json.dumps({'error': 'File destination does not exist.'})
        return HttpResponse(data, content_type="application/json", status=400)

    key = dest.get('key')
    auth = dest.get('auth')
    allowed_types = dest.get('allowed_types')
    acl = dest.get('acl')
    bucket = dest.get('bucket')
    cache_control = dest.get('cache_control')
    content_disposition = dest.get('content_disposition')
    content_length_range = dest.get('content_length_range')
    allowed_extensions = dest.get('allowed_extensions')
    server_side_encryption = dest.get('server_side_encryption')

    if not acl:
        acl = 'public-read'

    if not key:
        data = json.dumps({'error': 'Missing destination path.'})
        return HttpResponse(data, content_type="application/json", status=403)

    if auth and not auth(request.user):
        data = json.dumps({'error': 'Permission denied.'})
        return HttpResponse(data, content_type="application/json", status=403)

    if (allowed_types
            and content_type not in allowed_types) and allowed_types != '*':
        data = json.dumps({'error': 'Invalid file type (%s).' % content_type})
        return HttpResponse(data, content_type="application/json", status=400)

    original_ext = splitext(filename)[1]
    lowercased_ext = original_ext.lower()
    if (allowed_extensions and lowercased_ext
            not in allowed_extensions) and allowed_extensions != '*':
        data = json.dumps(
            {'error': 'Forbidden file extension (%s).' % original_ext})
        return HttpResponse(data, content_type="application/json", status=415)

    if hasattr(key, '__call__'):
        key = key(filename)
    elif key == '/':
        key = filename
    else:
        key = '{0}/{1}'.format(key, filename)

    access_key = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
    secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
    token = None

    if access_key is None or secret_access_key is None:
        # Get credentials from instance profile if not defined in settings --
        # this avoids the need to put access credentials in the settings.py file.
        # Assumes we're running on EC2.

        try:
            from botocore.credentials import InstanceMetadataProvider, InstanceMetadataFetcher
        except ImportError:
            InstanceMetadataProvider = None
            InstanceMetadataFetcher = None

        if all([InstanceMetadataProvider, InstanceMetadataFetcher]):
            provider = InstanceMetadataProvider(
                iam_role_fetcher=InstanceMetadataFetcher(timeout=1000,
                                                         num_attempts=2))
            creds = provider.load()
            access_key = creds.access_key
            secret_access_key = creds.secret_key
            token = creds.token
        else:
            data = json.dumps({
                'error':
                'Failed to access EC2 instance metadata due to missing dependency.'
            })
            return HttpResponse(data,
                                content_type="application/json",
                                status=500)

    data = create_upload_data(content_type, key, acl, bucket, cache_control,
                              content_disposition, content_length_range,
                              server_side_encryption, access_key,
                              secret_access_key, token)

    url = None

    # Generate signed URL for private document access
    if acl == "private":
        url = get_signed_download_url(
            key=key.replace("${filename}", filename),
            bucket_name=bucket or settings.AWS_STORAGE_BUCKET_NAME,
            ttl=int(5 * 60),  # 5 mins
        )

    response = {
        "aws_payload": data,
        "private_access_url": url,
    }

    return HttpResponse(json.dumps(response), content_type="application/json")
Esempio n. 18
0
def index():
    if request.method == 'GET':
        return 'OK'
    elif request.method == 'POST':
        # Store the IP address of the requester
        request_ip = ipaddress.ip_address(u'{0}'.format(request.remote_addr))

        # If VALIDATE_SOURCEIP is set to false, do not validate source IP
        if os.environ.get('VALIDATE_SOURCEIP', None) != 'false':

            # If GHE_ADDRESS is specified, use it as the hook_blocks.
            if os.environ.get('GHE_ADDRESS', None):
                hook_blocks = [unicode(os.environ.get('GHE_ADDRESS'))]
            # Otherwise get the hook address blocks from the API.
            else:
                hook_blocks = requests.get(
                    'https://api.github.com/meta').json()['hooks']

            # Check if the POST request is from github.com or GHE
            for block in hook_blocks:
                if ipaddress.ip_address(request_ip) in ipaddress.ip_network(
                        block):
                    break  # the remote_addr is within the network range of github.
            else:
                if str(request_ip) != '127.0.0.1':
                    abort(403)

        if request.headers.get('X-GitHub-Event') == "ping":
            return json.dumps({'msg': 'Hi!'})
        if request.headers.get('X-GitHub-Event') != "push":
            return json.dumps({'msg': "wrong event type"})

        repos = json.loads(io.open(REPOS_JSON_PATH, 'r').read())

        payload = json.loads(request.data)
        repo_meta = {
            'name': payload['repository']['name'],
            'owner': payload['repository']['owner']['name'],
        }

        # Try to match on branch as configured in repos.json
        match = re.match(r"refs/heads/(?P<branch>.*)", payload['ref'])
        if match:
            repo_meta['branch'] = match.groupdict()['branch']
            repo = repos.get(
                '{owner}/{name}/branch:{branch}'.format(**repo_meta), None)

            # Fallback to plain owner/name lookup
            if not repo:
                repo = repos.get('{owner}/{name}'.format(**repo_meta), None)

        if repo and repo.get('path', None):
            # Check if POST request signature is valid
            key = repo.get('key', None)
            if key:
                signature = request.headers.get('X-Hub-Signature').split(
                    '=')[1]
                if type(key) == unicode:
                    key = key.encode()
                mac = hmac.new(key, msg=request.data, digestmod=sha1)
                if not compare_digest(mac.hexdigest(), signature):
                    abort(403)

        if repo.get('action', None):
            for action in repo['action']:
                subp = subprocess.Popen(action, cwd=repo.get('path', '.'))
                subp.wait()

        if repo.get('s3bucket', None):
            s3bucketname = repo.get('s3bucket')
        else:
            print('missing s3 bucketname')
            abort(500)
        if repo.get('s3key', None):
            s3key = repo.get('s3key')
        else:
            print('missing s3 filename')
            abort(500)

        print('s3 connection')

        if os.environ.get('USE_EC2', None) == 'true':
            provider = InstanceMetadataProvider(
                iam_role_fetcher=InstanceMetadataFetcher(timeout=1000,
                                                         num_attempts=2))
            creds = provider.load()
            session = boto3.Session(aws_access_key_id=creds.access_key,
                                    aws_secret_access_key=creds.secret_key,
                                    aws_session_token=creds.token)
            s3 = session.resource('s3').Bucket(s3bucketname)
        else:
            s3 = boto3.resource('s3')
            bucket = s3.Bucket(s3bucketname)

        json.load_s3 = lambda f: json.load(bucket.Object(key=f).get()['Body'])
        json.dump_s3 = lambda obj, f: bucket.Object(key=f).put(Body=json.dumps(
            obj))
        #s3 fetch
        s3data = json.load_s3(s3key)
        datad = FilehashMap(s3data)
        commithash = payload['after']
        for commit in payload['commits']:
            for z in commit['added']:
                print(z)
                datad.additem(z, commithash)
            for z in commit['modified']:
                print(z)
                datad.additem(z, commithash)
            for z in commit['removed']:
                datad.delitem(z)
                print(z)

        print('s3 upload')
        json.dump_s3(datad.displayhashmap(), s3key)

        #set perms
        s3objacl = s3.ObjectAcl(s3bucketname, s3key)
        response = s3objacl.put(ACL='public-read')
        print('s3 done')
        return 'OK'
Esempio n. 19
0
 def getCredentials(self):
     provider = InstanceMetadataProvider(iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
     creds = provider.load()
     access_key = creds.access_key
     return(access_key)
Esempio n. 20
0
def get_upload_params(request):
    content_type = request.POST['type']
    filename = request.POST['name']

    dest = get_s3direct_destinations().get(request.POST['dest'])

    if not dest:
        data = json.dumps({'error': 'File destination does not exist.'})
        return HttpResponse(data, content_type="application/json", status=400)

    key = dest.get('key')
    auth = dest.get('auth')
    allowed = dest.get('allowed')
    acl = dest.get('acl')
    bucket = dest.get('bucket')
    cache_control = dest.get('cache_control')
    content_disposition = dest.get('content_disposition')
    content_length_range = dest.get('content_length_range')
    server_side_encryption = dest.get('server_side_encryption')

    if not acl:
        acl = 'public-read'

    if not key:
        data = json.dumps({'error': 'Missing destination path.'})
        return HttpResponse(data, content_type="application/json", status=403)

    if auth and not auth(request.user):
        data = json.dumps({'error': 'Permission denied.'})
        return HttpResponse(data, content_type="application/json", status=403)

    if (allowed and content_type not in allowed) and allowed != '*':
        data = json.dumps({'error': 'Invalid file type (%s).' % content_type})
        return HttpResponse(data, content_type="application/json", status=400)

    if hasattr(key, '__call__'):
        try:
            key = key(filename)
        except ValueError as ve:
            data = json.dumps({'error': 'Filename Invalid. %s' % ve})
            return HttpResponse(data, content_type="application/json", status=400)
    elif key == '/':
        key = '${filename}'
    else:
        # The literal string '${filename}' is an S3 field variable for key.
        # https://aws.amazon.com/articles/1434#aws-table
        key = '%s/${filename}' % key

    access_key = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
    secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
    token = None

    if access_key is None or secret_access_key is None:
        # Get credentials from instance profile if not defined in settings --
        # this avoids the need to put access credentials in the settings.py file.
        # Assumes we're running on EC2.

        try:
            from botocore.credentials import InstanceMetadataProvider, InstanceMetadataFetcher
        except ImportError:
            InstanceMetadataProvider = None
            InstanceMetadataFetcher = None

        if all([InstanceMetadataProvider, InstanceMetadataFetcher]):
            provider = InstanceMetadataProvider(iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
            creds = provider.load()
            access_key = creds.access_key
            secret_access_key = creds.secret_key
            token = creds.token
        else:
            data = json.dumps({'error': 'Failed to access EC2 instance metadata due to missing dependency.'})
            return HttpResponse(data, content_type="application/json", status=500)


    data = create_upload_data(
        content_type, key, acl, bucket, cache_control, content_disposition,
        content_length_range, server_side_encryption, access_key, secret_access_key, token
    )

    return HttpResponse(json.dumps(data), content_type="application/json")
Esempio n. 21
0
    def __setupSparkSession__(
        self,
        jobConf: dict,
    ) -> SparkSession:
        '''
        Init the Spark environemnt with few default configurations and start the spark session.
        '''
        conf = SparkConf()
        #
        #Setup Spark Specific configurations
        #
        hmConf = {
            "spark.executor.pyspark.memory":
            "512m",
            "spark.debug.maxToStringFields":
            "5000",
            "spark.rps.askTimeout":
            "1200",
            "spark.network.timeout":
            "1200",
            "spark.maxRemoteBlockSizeFetchToMem":
            "512m",
            "spark.broadcast.blockSize":
            "16m",
            "spark.broadcast.compress":
            "true",
            "spark.rdd.compress":
            "true",
            "spark.io.compression.codec":
            "org.apache.spark.io.SnappyCompressionCodec",
            "spark.kryo.unsafe":
            "true",
            "spark.serializer":
            "org.apache.spark.serializer.KryoSerializer",
            "spark.kryoserializer.buffer":
            "10240",
            "spark.kryoserializer.buffer.max":
            "2040m",
            "hive.exec.dynamic.partition":
            "true",
            "hive.exec.dynamic.partition.mode":
            "nonstrict",
            "hive.warehouse.data.skiptrash":
            "true",
            "spark.sql.hive.metastorePartitionPruning":
            "true",
            "spark.sql.broadcastTimeout":
            "1200",
            "spark.sql.sources.partitionOverwriteMode":
            "dynamic",
            "spark.sql.orc.filterPushdown":
            "true",
            "spark.sql.orc.splits.include.file.footer":
            "true",
            "spark.sql.orc.cache.stripe.details.size":
            "1000",
            "spark.hadoop.parquet.enable.summary-metadata":
            "false",
            "spark.sql.parquet.mergeSchema":
            "false",
            "spark.sql.parquet.filterPushdown":
            "true",
            "spark.sql.parquet.fs.optimized.committer.optimization-enabled":
            "true",
            "spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version":
            "2",
            "spark.hadoop.mapreduce.fileoutputcommitter.cleanup-failures.ignored":
            "true"
        }

        for (k, v) in jobConf['sparkconfs'].items():
            hmConf.set(k, v)

        conf.setAll(hmConf)
        #
        #Setup Hadoop Specific configurations
        #
        hdpCnf = SparkContext._jsc.hadoopConfiguration()
        hdpCnf.set('io.file.buffer.size', '65536')
        hdpCnf.set('mapreduce.fileoutputcommitter.algorithm.version', '2')

        for (k, v) in jobConf['hadoopconfs'].items():
            hdpCnf.set(k, v)

    #
    #Setup AWS Specific configurations
    #
        if jobConf['appconfs']['runenv'].toUpperCase() == 'AWS':
            SparkContext.setSystemProperty(
                'com.amazonaws.services.s3.enableV4', 'true')
            SparkContext.setSystemProperty(
                'com.amazonaws.services.s3.enforceV4', 'true')
            conf.set(
                "spark.sql.parquet.output.committer.class",
                "com.amazon.emr.committer.EmrOptimizedSparkSqlParquetOutputCommitter"
            )

            cred = None
            try:
                from botocore.credentials import InstanceMetadataProvider, InstanceMetadataFetcher
                provider = InstanceMetadataProvider(
                    iam_role_fetcher=InstanceMetadataFetcher(timeout=1000,
                                                             num_attempts=2))
                creds = provider.load()
                hdpCnf.setAll({
                    'fs.s3a.access.key': creds.access_key,
                    'fs.s3a.access.key': creds.secret_key,
                })
            except:
                pass
            hdpCnf.setAll({
                'fs.s3a.server-side-encryption-algorithm':
                'SSE-KMS',
                'fs.s3.enableServerSideEncryption':
                'true',
                'fs.s3.enableServerSideEncryption':
                'true',
                'fs.s3.impl':
                'org.apache.hadoop.fs.s3a.S3AFileSystem',
                'fs.s3a.impl':
                'org.apache.hadoop.fs.s3a.S3AFileSystem',
                'fs.s3a.endpoint':
                "s3.%s.amazonaws.com" %
                (jobConf['appconfs']['appdefaults'] or 'us-east-1')
            })


        spark = SparkSession \
                .builder \
                .config(conf=conf) \
                .appName(jobConf['name'] or 'PySparkApp') \
                .enableHiveSupport() \
                .getOrCreate()

        sc = spark.sparkContext
        sc.setLogLevel(jobConf['appconfs']['logging']['sparkloglevel']
                       or 'INFO')
        if jobConf['appconfs']['logging']['sparkloglevel'] or 'INFO' == "DEBUG":
            msg = ""
            for k in sc._conf.getAll():
                msg += "\t%50s -> %s\n" % (k[0], k[1])
            log.debug(
                "Initiated SparkSesion with below confs,\n{}".format(msg))

        return spark
import logging
import paste.translogger
import requests
import os
import boto3
from botocore.credentials import InstanceMetadataProvider, InstanceMetadataFetcher
from requests_aws4auth import AWS4Auth

secret_key = os.environ.get("SECRET_KEY")
access_key = os.environ.get("ACCESS_KEY")

if secret_key == None:
    logger = logging.getLogger("elasticsearch-service")
    logger.info("No params so attempt get config from machine")
    provider = InstanceMetadataProvider(iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
    credentials = provider.load()

    access_key = credentials.access_key
    secret_key = credentials.secret_key

region = os.environ.get('REGION')
if region == None:
    region = "eu-central-1"

def executeSignedPost(url, body):
    service = 'es'
    awsauth = AWS4Auth(access_key, secret_key, region, service)
    r = requests.post(url, auth=awsauth, json=body)
    result = r.json()
    return result
    
Esempio n. 23
0
def get_instance_cred():
    provider = InstanceMetadataProvider(
        iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
    creds = provider.load().get_frozen_credentials()
    return creds