def __init__(self, *args, **kwargs): try: self._session = session.Session(**config.get_aws_conf()) except Exception as e: self._session = session.Session() self.client = self._session.client(service_name=self.ServiceName) self.resource = self._session.resource(service_name=self.ServiceName)
def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL, unique_key='_id', chunk_size=DEFAULT_MAX_BULK, meta_index_name="mongodb_meta", meta_type="mongodb_meta", attachment_field="content", **kwargs): aws = kwargs.get('aws', {'access_id': '', 'secret_key': '', 'region': 'us-east-1'}) client_options = kwargs.get('clientOptions', {}) if 'aws' in kwargs: if _HAS_AWS is False: raise ConfigurationError('aws extras must be installed to sign Elasticsearch requests') aws_args = kwargs.get('aws', {'region': 'us-east-1'}) aws = aws_session.Session() if 'access_id' in aws_args and 'secret_key' in aws_args: aws = aws_session.Session( aws_access_key_id = aws_args['access_id'], aws_secret_access_key = aws_args['secret_key']) credentials = aws.get_credentials() region = aws.region_name or aws_args['region'] aws_auth = AWSV4Sign(credentials, region, 'es') client_options['http_auth'] = aws_auth client_options['use_ssl'] = True client_options['verify_certs'] = True client_options['connection_class'] = es_connection.RequestsHttpConnection self.elastic = Elasticsearch( hosts=[url], **client_options) self.auto_commit_interval = auto_commit_interval self.meta_index_name = meta_index_name self.meta_type = meta_type self.unique_key = unique_key self.chunk_size = chunk_size if self.auto_commit_interval not in [None, 0]: self.run_auto_commit() self._formatter = DefaultDocumentFormatter() self.has_attachment_mapping = False self.attachment_field = attachment_field
def test_lookup_opensearch_no_vpc(): client_session = session.Session() here = path.abspath(path.dirname(__file__)) pill = placebo.attach(session=client_session, data_path=f"{here}/placebos/lookup_x_opensearch") # pill.record() pill.playback() # Public endpoint domain config = lookup_resource( { "Tags": [{ "CreatedByComposeX": r"true" }, { "ComposeXName": r"domain-01" }] }, session=client_session, ) print(config) # Private (VPC) endpoint domain config = lookup_resource( { "Tags": [{ "CreatedByComposeX": "true" }, { "ComposeXName": "domain-02" }] }, session=client_session, ) print(config)
def __init__(self): super(GroupCommands, self).__init__() s = session.Session() self._region = s.region_name if not self._region: log.error("AWS credentials and region must be setup. " "Refer AWS docs at https://goo.gl/JDi5ie") exit(-1) log.info("AWS credentials found for region '{}'".format(self._region)) self._gg = s.client("greengrass") self._iot = s.client("iot") self._lambda = s.client("lambda") self._iam = s.client("iam") self._iot_endpoint = self._iot.describe_endpoint()['endpointAddress'] try: with open(DEFINITION_FILE, 'r') as f: self.group = self.group = yaml.safe_load(f) except IOError: log.error("Group definition file `greengo.yaml` not found. " "Create file, and define the group definition first. " "See https://github.com/greengo for details.") exit(-1) self.name = self.group['Group']['name'] self._LAMBDA_ROLE_NAME = "{0}_Lambda_Role".format(self.name) _mkdir(MAGIC_DIR) self.state = _load_state()
def upload( file, path, dstfolder, t_time, s_date ): # function input Arguments (tar file)(destination path)(ime number)(sleep time) print("Initiate uploading process") # Initiate session sess = session.Session() client = sess.client( 's3', region_name='nyc3', endpoint_url='https://ppdms.nyc3.digitaloceanspaces.com', aws_access_key_id=ACCESS_ID, aws_secret_access_key=SECRET_KEY) source_path = path + "/" + file #space_name = imeino_macno try: start_time = int(strftime("%H%M%S", gmtime())) responce = client.upload_file(source_path, dstfolder, (s_date + "/" + file)) print("upload done") end_time = int(strftime("%H%M%S", gmtime())) sleep_time(start_time, end_time, t_time) return True except EndpointConnectionError as error: print(error) return False
def get_secret(pipeline_name: str) -> str: """Get API secret from SecretsManager for a given pipeline :param pipeline_name: Name of the executing pipeline :return: The secret """ region_name = os.environ["AWS_REGION"] secret_name = pipeline_name + "-NotifySecret" sess = session.Session() ssm_client = sess.client(service_name="secretsmanager", region_name=region_name,) try: logger.info( "Getting secret with name %s in region %s" % (secret_name, region_name) ) get_secret_value_response = ssm_client.get_secret_value(SecretId=secret_name) except ClientError as e: if e.response["Error"]["Code"] == "ResourceNotFoundException": logger.error("The requested secret " + secret_name + " was not found") elif e.response["Error"]["Code"] == "InvalidRequestException": logger.error("The request was invalid due to: %s" % e) elif e.response["Error"]["Code"] == "InvalidParameterException": logger.error("The request had invalid params: %s" % e) logger.error("Unknown error: %s" % e) else: logger.info("Returning %s" % get_secret_value_response["SecretString"]) return get_secret_value_response["SecretString"]
def get_hosted_es(): hosted_es = None if QuerybookSettings.ELASTICSEARCH_CONNECTION_TYPE == "naive": hosted_es = Elasticsearch(hosts=QuerybookSettings.ELASTICSEARCH_HOST) elif QuerybookSettings.ELASTICSEARCH_CONNECTION_TYPE == "aws": # TODO: generialize aws region setup from boto3 import session as boto_session from lib.utils.assume_role_aws4auth import AssumeRoleAWS4Auth credentials = boto_session.Session().get_credentials() auth = AssumeRoleAWS4Auth( credentials, QuerybookSettings.AWS_REGION, "es", ) hosted_es = Elasticsearch( hosts=QuerybookSettings.ELASTICSEARCH_HOST, http_auth=auth, connection_class=RequestsHttpConnection, use_ssl=True, verify_certs=True, ) return hosted_es
def upload_image(pid): """ Upload images """ sessions = session.Session() client = sessions.client('s3', region_name='sgp1', endpoint_url='https://ysis-space.sgp1.digitaloceanspaces.com', aws_access_key_id=ACCESS_ID, aws_secret_access_key=SECRET_KEY) file = request.files['file'] file_key = 'loctite/' + file.filename item = Loctite.query.get_or_404(pid) if item.file is not None: delete_image(item.file) client.upload_fileobj(Fileobj=file, Bucket='ysis-space', ExtraArgs={'ACL': 'public-read'}, Key=file_key) item.file = file_key db.session.commit() loctite_schema = LoctiteSchema() return loctite_schema.jsonify(Loctite.query.get(pid)), 200
def _parse_aws(self): """ Parse the AWS args and attempt to obtain credentials using :class:`boto3.session.Session`, which follows the AWS documentation at http://amzn.to/2fRCGCt """ self.logger.debug('self.aws = {}'.format(self.aws)) self.logger.debug('self.client_args = {}'.format(self.client_args)) if self.use_aws: if not 'aws_region' in self.aws or self.aws['aws_region'] is None: raise MissingArgument('Missing "aws_region".') from boto3 import session from botocore.exceptions import NoCredentialsError from requests_aws4auth import AWS4Auth try: session = session.Session() credentials = session.get_credentials() self.aws['aws_key'] = credentials.access_key self.aws['aws_secret_key'] = credentials.secret_key self.aws['aws_token'] = credentials.token # If an attribute doesn't exist, we were not able to retrieve credentials as expected so we can't continue except AttributeError: self.logger.debug('Unable to locate AWS credentials') raise NoCredentialsError # Override these self.client_args self.client_args['use_ssl'] = True self.client_args['verify_certs'] = True self.client_args[ 'connection_class'] = elasticsearch.RequestsHttpConnection self.client_args['http_auth'] = (AWS4Auth( self.aws['aws_key'], self.aws['aws_secret_key'], self.aws['aws_region'], 'es', session_token=self.aws['aws_token']))
def get_hosted_es(): hosted_es = None if ":" in QuerybookSettings.ELASTICSEARCH_HOST: host, port = QuerybookSettings.ELASTICSEARCH_HOST.split(":") else: host = QuerybookSettings.ELASTICSEARCH_HOST port = 9200 # Default port for elasticsearch if QuerybookSettings.ELASTICSEARCH_CONNECTION_TYPE == "naive": hosted_es = Elasticsearch( hosts=[host], port=port, ) elif QuerybookSettings.ELASTICSEARCH_CONNECTION_TYPE == "aws": # TODO: generialize aws region setup from boto3 import session as boto_session from lib.utils.assume_role_aws4auth import AssumeRoleAWS4Auth credentials = boto_session.Session().get_credentials() auth = AssumeRoleAWS4Auth( credentials, "us-east-1", "es", ) hosted_es = Elasticsearch( hosts=QuerybookSettings.ELASTICSEARCH_HOST, port=443, http_auth=auth, connection_class=RequestsHttpConnection, use_ssl=True, verify_certs=True, ) return hosted_es
def s3_region_url(): region_session = session.Session() region = region_session.region_name if region == 'us-east-1': return 's3.amazonaws.com' else: return 's3-' + region + '.amazonaws.com'
def __init__(self, config: Config): self._updater = Updater(token=config.bot_token, workers=config.max_workers) self._dispatcher = self._updater.dispatcher self._statistics = Statistics() self._aws_session = session.Session( aws_access_key_id=config.aws.access_key_id, aws_secret_access_key=config.aws.secret_access_key, region_name=config.aws.region_name) self._synthesizer_facade = SynthesizerFacade( PollySynthesizer(self._aws_session, self._statistics, config.language_mappings, config.voices), Validator(config.min_message_length, config.max_message_length), Sanitizer(config.max_message_length), S3FileUploader(self._aws_session, config.aws.s3_bucket), self._statistics, config.max_workers) self._commands = [ commands.start.StartCommand(), commands.synthesize.SynthesizeCommand(self._synthesizer_facade), commands.synthesize_inline.SynthesizeInlineCommand( self._synthesizer_facade, self._statistics, config.prefetch_languages, config.inline_debounce_millis), commands.stats.StatsCommand(self._statistics, config.admin_id), commands.error_handler.ErrorCommand(config.admin_id) ]
def env_init_local_dotenv(project_path: str, env_vars: dict, override_env_vars: bool = False): """ Init a new .env file if it does not exist and load it into os.environ :param project_path: path to look for the relevant .env file or to create a new one :param env_vars: environment variables to be written to the .env file if it does not exist. :param override_env_vars: where to override the system environment variables with the variables in `.env` file Note that 'project_path' parameter will be implicitly added as an additional environment variable, there is no need to duplicate it in 'env_vars' parameter """ dot_env_file = f'{project_path}/.env' region = session.Session().region_name # noinspection PyTypeChecker lines = [f'{key}={value}{os.linesep}' for key, value in env_vars.items()] proj_path_line = f'{PROJECT_DIR_KEY}={project_path}{os.linesep}' lines.append(proj_path_line) if not os.path.exists(dot_env_file): print(f'.env file not found, creating with a random password, user {getpass.getuser()} and region {region}') with open(dot_env_file, 'w') as file: file.writelines(lines) else: with open(dot_env_file, 'a+') as file: # append to the end of file file.seek(0) if PROJECT_DIR_KEY not in file.read(): file.write(os.linesep + proj_path_line) print('using existing .env file') load_dotenv(dotenv_path=dot_env_file, override=override_env_vars)
def main(): from boto3 import session, ec2 from pprint import PrettyPrinter utc = UTC() pp = PrettyPrinter(indent=4) args = get_args() delta_kill = datetime.now(utc) - timedelta(hours=args.terminate) delta_warn = datetime.now(utc) - timedelta(hours=args.warn) session = session.Session(region_name=args.region, profile_name=args.profile) ec2client = session.client('ec2') response = ec2client.describe_instances() warn_instances = [] for reservation in response['Reservations']: for instance in reservation['Instances']: launchtime = instance['LaunchTime'] if launchtime < delta_kill and args.yes: print("Terminating instance", instance['InstanceId']) # ec2client.terminate_instances(InstanceIds=[instance[u'InstanceId']]) elif launchtime < delta_kill and not args.yes: print("Skipping instance", instance['InstanceId']) elif launchtime < delta_warn: warn_instances += instance if warn_instances: print("The following instances are more than ", args.warn, "hrs old.") pp.pprint(warn_instances)
def read_from_s3(s3_url): bucket, file = s3_url.split("/") s3 = session.Session().client('s3') obj = s3.get_object(Bucket=bucket, Key=file, ResponseContentType='application/json') return json.loads(obj['Body'].read().decode('utf-8'))
def _create_s3_client(self, loc): """Create a client object to use when connecting to S3. :param loc: `glance_store.location.Location` object, supplied from glance_store.location.get_location_from_uri() :returns: An object with credentials to connect to S3 """ s3_host = self._option_get('s3_store_host') url_format = self._option_get('s3_store_bucket_url_format') calling_format = {'addressing_style': url_format} session = boto_session.Session(aws_access_key_id=loc.accesskey, aws_secret_access_key=loc.secretkey) config = boto_client.Config(s3=calling_format) location = get_s3_location(s3_host) bucket_name = loc.bucket if (url_format == 'virtual' and not boto_utils.check_dns_name(bucket_name)): raise boto_exceptions.InvalidDNSNameError(bucket_name=bucket_name) region_name, endpoint_url = None, None if location: region_name = location else: endpoint_url = s3_host return session.client(service_name='s3', endpoint_url=endpoint_url, region_name=region_name, use_ssl=(loc.scheme == 's3+https'), config=config)
def generate_signv4_mqtt_boto(iot_host, iot_region): ''' Gets the credentials from the environment using boto3 and use them to create the signed url ''' boto_session = session.Session() credentials = boto_session.get_credentials() return generate_signv4_mqtt(iot_host, iot_region, credentials.access_key, credentials.secret_key)
def get_boto_client() -> BaseClient: return session.Session().client( 's3', region_name=SPACES_REGION, endpoint_url=SPACES_ENDPOINT, aws_access_key_id=settings.SPACES_PUBLIC_KEY, aws_secret_access_key=settings.SPACES_SECRET_KEY, )
def __init__(self, service_name, profile_name=None, **kwargs): if not profile_name: aws_key = kwargs.get("AWS_ACCESS_KEY_ID", os.environ.get("AWS_ACCESS_KEY")) aws_secret = kwargs.get("AWS_SECRET_AWS_KEY", os.environ.get("AWS_ACCESS_SECRET")) self.client = client(service_name=service_name, aws_access_key_id=aws_key, aws_secret_access_key=aws_secret) else: self.session = session.Session(profile_name=profile_name) self.client = self.session.client(service_name=service_name)
def __init__(self): # create a boto3 sessions s = session.Session() # initilize clients for the services we'll use self._gg = s.client("greengrass") self._iot = s.client("iot") self._iot_data = s.client("iot-data") self._lambda = s.client("lambda")
def create_aws_auth(aws_args): try: aws_session = session.Session(**convert_aws_args(aws_args)) except TypeError as exc: raise errors.InvalidConfiguration( 'Elastic DocManager unknown aws config option: %s' % (exc, )) return AWSV4Sign(aws_session.get_credentials(), aws_session.region_name or DEFAULT_AWS_REGION, 'es')
def _list_bucket(self, accept_key=lambda k: True) -> Iterator[str]: """ Wrapper for boto3's list_objects_v2 so we can handle pagination, filter by lambda func and operate with or without credentials :param accept_key: lambda function to allow filtering return keys, e.g. lambda k: not k.endswith('/'), defaults to lambda k: True :yield: key (name) of each object """ provider = self._provider client_config = None if S3File.use_aws_account(provider): session = boto3session.Session( aws_access_key_id=provider["aws_access_key_id"], aws_secret_access_key=provider["aws_secret_access_key"]) else: session = boto3session.Session() client_config = Config(signature_version=UNSIGNED) client = make_s3_client(provider, config=client_config, session=session) ctoken = None while True: # list_objects_v2 doesn't like a None value for ContinuationToken # so we don't set it if we don't have one. if ctoken: kwargs = dict(Bucket=provider["bucket"], Prefix=provider.get("path_prefix", ""), ContinuationToken=ctoken) else: kwargs = dict(Bucket=provider["bucket"], Prefix=provider.get("path_prefix", "")) response = client.list_objects_v2(**kwargs) try: content = response["Contents"] except KeyError: pass else: for c in content: key = c["Key"] if accept_key(key): yield key ctoken = response.get("NextContinuationToken", None) if not ctoken: break
def delete_image(file_key): sessions = session.Session() client = sessions.client('s3', region_name='sgp1', endpoint_url='https://ysis-space.sgp1.digitaloceanspaces.com', aws_access_key_id=ACCESS_ID, aws_secret_access_key=SECRET_KEY) client.delete_object(Bucket='ysis-space', Key=file_key)
def create_from_file(self, file, storage_type): ''' TODO: some kind of cleanup here... ''' Record = apps.get_model("music", "Record") Track = apps.get_model("music", "Track") TrackListing = apps.get_model("music", "TrackListing") session2 = session.Session() client = session2.client( "s3", region_name=settings.AWS_S3_REGION_NAME, endpoint_url=f"https://{settings.AWS_S3_REGION_NAME}.digitaloceanspaces.com", aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY, ) # get from filename. if not in filename, then it's a wav file uploaded # via the web browser if storage_type == "microphone": extension = ".wav" else: extension = os.path.splitext(file.name)[1] if extension not in [".mp3"]: raise Exception("audio format not yet supported") storage_id = str(uuid.uuid4()) storage_filename = f"uploads/{storage_id}{extension}" client.upload_fileobj( file, "jukebox-radio-space", storage_filename, ExtraArgs={"ACL": "public-read"}, ) if extension == ".wav": # suuuuuper hacky, but I spent a lot of time on this and this is # the best I got storage_duration_ms = file.size / 8 elif extension == ".mp3": from mutagen.mp3 import MP3 audio = MP3(file) storage_duration_ms = audio.info.length * 1000 else: raise Exception("audio format not yet supported") record = Record.objects.create( storage_id=storage_id, storage_filename=storage_filename, storage_name=file.name, storage_duration_ms=storage_duration_ms, ) return record
def _get_spaces_session(self): bsession = session.Session() client = bsession.client('s3', region_name=config.S3_REGION, endpoint_url='https://%s.%s' % (config.S3_REGION, config.S3_ENDPOINT), aws_access_key_id=config.ACCESS_ID, aws_secret_access_key=config.SECRET_KEY) return client
def get_session(self, profile): '''Get session by profile name. Create or return existing one.''' try: return self.sessions[profile] except KeyError: ses = session.Session(profile_name=profile) lg.debug('session: {}'.format(ses)) self.sessions[profile] = ses return ses
def __init__(self): self._access_key_id = config.get_aws_access_key_id() self._secret_access_key = config.get_aws_secret_access_key() self._region_name = config.get_aws_region_name() self.session = session.Session( aws_access_key_id=self._access_key_id, aws_secret_access_key=self._secret_access_key, region_name=self._region_name) self.ec2 = self.session.client('ec2') self.ec2_resource = self.session.resource('ec2')
def getSessions(profile_names): """Given a set of profiles, give us the session objects that represent them and which we can use to perform the maintenance""" logger.debug('Profiles: %s', profile_names) sessions = [ session.Session(profile_name=profile_name) for profile_name in profile_names ] return sessions
def get_region(): """ Will determine and return current AWS region. :return: string describing AWS region :type: string """ global region #pylint: disable=global-statement if not region: region = botosession.Session().region_name return region
def get_client(): do_session = session.Session() client = do_session.client( "s3", region_name=REGION, endpoint_url=ENDPOINT, aws_access_key_id=DO_ACCESS_ID, aws_secret_access_key=DO_SECRET_KEY, ) return client