def __init__(self, config, verbose=10, blocking_auth=True, compression=None): # TODO: implement connection self.url = config.get('serverUrl', None) self.verbose = get_storage_verbose_level() self.logger = logs.get_logger('HTTPProvider') self.logger.setLevel(self.verbose) self.credentials: Credentials = \ Credentials.get_credentials(config) self.storage_handler = HTTPStorageHandler( self.url, self.credentials.to_dict() if self.credentials else None, compression=compression) self.auth = None guest = config.get('guest', None) if not guest and 'serviceAccount' not in config.keys(): self.auth = get_auth(config.get('authentication', None), blocking_auth) self.compression = compression if self.compression is None: self.compression = config.get('compression', None)
def __init__(self, art_name, art_dict, logger=None): self.name = art_name self.key: str = None self.local_path: str = None self.remote_path: str = None self.credentials = None self.hash = None self.logger = logger if self.logger is None: self.logger = logs.get_logger(self.__class__.__name__) self.logger.setLevel(storage_setup.get_storage_verbose_level()) self.storage_handler: StorageHandler = None self.unpack: bool = art_dict.get('unpack') self.is_mutable: bool = art_dict.get('mutable') if 'key' in art_dict.keys(): self.key = art_dict['key'] if 'local' in art_dict.keys(): self.local_path = art_dict['local'] if 'qualified' in art_dict.keys(): self.remote_path = art_dict['qualified'] if 'url' in art_dict.keys(): self.remote_path = art_dict['url'] if 'hash' in art_dict.keys(): self.hash = art_dict['hash'] self.credentials = credentials.Credentials.get_credentials(art_dict) self._setup_storage_handler(art_dict)
def __init__(self, db_config, measure_timestamp_diff=False, blocking_auth=True, compression=None): verbose = get_storage_verbose_level() self.logger = logs.get_logger(self.__class__.__name__) self.logger.setLevel(verbose) guest = db_config.get('guest') self.app = pyrebase.initialize_app(db_config) if compression is None: compression = db_config.get('compression') self.auth = None if not guest and 'serviceAccount' not in db_config.keys(): self.auth = get_auth(db_config['type'], blocking_auth, verbose=verbose) super().__init__(StorageType.storageFirebase, self.logger, measure_timestamp_diff=measure_timestamp_diff, compression=compression)
def __init__(self, queue, route, amqp_url='', config=None, logger=None): """Setup the example publisher object, passing in the URL we will use to connect to RabbitMQ. """ self._rmq_lock = threading.RLock() self._connection = None self._channel = None self._consumer = None self._consume_ready = False self._msg_tracking_lock = threading.RLock() self._deliveries = [] self._acked = 0 self._nacked = 0 self._message_number = 0 self._rmq_msg = None self._rmq_id = None self._stopping = False self._exchange = 'StudioML.topic' self._exchange_type = 'topic' self._routing_key = route self._url = amqp_url self._is_persistent: bool = False if logger is not None: self._logger = logger else: self._logger = logs.get_logger('RabbitMQ') self._logger.setLevel(get_storage_verbose_level()) if config is not None: # extract from the config data structure any settings related to # queue messaging for rabbit MQ if 'cloud' in config: if 'queue' in config['cloud']: if 'rmq' in config['cloud']['queue']: self._url = config['cloud']['queue']['rmq'] self._logger.warning('use queue url %s', self._url) flag_persistent = config['cloud']['queue']\ .get('persistent', False) if isinstance(flag_persistent, str): flag_persistent = flag_persistent.lower() == 'true' self._is_persistent = flag_persistent self._queue = queue self._queue_deleted = True self._connection_failed = False self._connection_failure_reason = None # The pika library for RabbitMQ has an asynchronous run method # that needs to run forever and will do reconnections etc # automatically for us thr = threading.Thread(target=self._run, args=(), kwargs={}) thr.setDaemon(True) thr.start() self._wait_queue_created(600)
def __init__(self, db_config, handler: StorageHandler, compression=None): self.logger = logs.get_logger(self.__class__.__name__) self.logger.setLevel(get_storage_verbose_level()) self.compression = compression if self.compression is None: self.compression = db_config.get('compression', None) self.auth = None self.storage_handler = handler self.max_keys = db_config.get('max_keys', 100)
def __init__(self, name: str, path: str = None, logger=None): if logger is not None: self._logger = logger else: self._logger = logs.get_logger('LocalQueue') self._logger.setLevel(get_storage_verbose_level()) self.name = name if path is None: self.path = self._get_queue_directory() else: self.path = path self.path = os.path.join(self.path, name) os.makedirs(self.path, exist_ok=True) # Local queue is considered active, iff its directory exists. self._lock_path = os.path.join(self.path, LOCK_FILE_NAME) self._lock = filelock.SoftFileLock(self._lock_path)
def __init__(self, cred_dict): self.logger = logs.get_logger(self.__class__.__name__) self.logger.setLevel(storage_setup.get_storage_verbose_level()) self.type = None self.key = None self.secret_key = None self.session_token = None self.region = None self.profile = None if cred_dict is None: return if isinstance(cred_dict, str) and cred_dict == 'none': return if not isinstance(cred_dict, dict): msg: str =\ "NOT SUPPORTED credentials format {0}".format(repr(cred_dict)) util.report_fatal(msg, self.logger) if len(cred_dict) == 0: # Empty credentials dictionary is like None: return if len(cred_dict) == 1 and AWS_TYPE in cred_dict.keys(): aws_creds = cred_dict[AWS_TYPE] self.type = AWS_TYPE self.key = aws_creds.get(AWS_KEY, None) self.secret_key = aws_creds.get(AWS_SECRET_KEY, None) self.session_token = aws_creds.get(AWS_SESSION_TOKEN, None) self.region = self._get_named(AWS_REGION, aws_creds) self.profile = self._get_named(AWS_PROFILE, aws_creds) if self.key is None or self.secret_key is None: msg: str = \ "INVALID aws credentials format {0}".format(repr(cred_dict)) util.report_fatal(msg, self.logger) else: msg: str =\ "NOT SUPPORTED credentials format {0}".format(repr(cred_dict)) util.report_fatal(msg, self.logger)
def __init__(self, remote_path, credentials_dict, timestamp=None, compression=None): self.logger = logs.get_logger(self.__class__.__name__) self.logger.setLevel(get_storage_verbose_level()) self.url = remote_path self.timestamp = timestamp parsed_url = urlparse(self.url) self.scheme = parsed_url.scheme self.endpoint = parsed_url.netloc self.path = parsed_url.path self.credentials = Credentials(credentials_dict) super().__init__(StorageType.storageHTTP, self.logger, False, compression=compression)
def __init__(self, name, config=None, logger=None): if logger is not None: self.logger = logger else: self.logger = logs.get_logger('SQSQueue') self.logger.setLevel(get_storage_verbose_level()) self.name = name self.is_persistent = False self.credentials = self._setup_from_config(config) aws_access_key_id = self.credentials.get_key() aws_secret_access_key = self.credentials.get_secret_key() if self.credentials.get_profile() is not None: # If profile name is specified, for whatever reason # boto3 API will barf if (key, secret key) pair # is also defined. aws_access_key_id = None aws_secret_access_key = None self._session = boto3.session.Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=None, region_name=self.credentials.get_region(), profile_name=self.credentials.get_profile() ) self._client = self._session.client('sqs') create_q_response = self._client.create_queue( QueueName=name) self.queue_url = create_q_response['QueueUrl'] self.logger.info('Creating SQS queue with name %s', name) self.logger.info('Queue url = %s', self.queue_url)
def __init__(self, config, measure_timestamp_diff=False, compression=None): self.logger = logs.get_logger(self.__class__.__name__) self.logger.setLevel(get_storage_verbose_level()) if compression is None: compression = config.get('compression', None) self.endpoint = config.get('endpoint', '~') self.endpoint = os.path.realpath(os.path.expanduser(self.endpoint)) if not os.path.exists(self.endpoint) \ or not os.path.isdir(self.endpoint): msg: str = "Store root {0} doesn't exist or not a directory. Aborting."\ .format(self.endpoint) self._report_fatal(msg) self.bucket = config.get('bucket', 'storage') self.store_root = os.path.join(self.endpoint, self.bucket) self._ensure_path_dirs_exist(self.store_root) super().__init__(StorageType.storageLocal, self.logger, measure_timestamp_diff, compression=compression)
def __init__(self, config, measure_timestamp_diff=False, compression=None): self.logger = logs.get_logger(self.__class__.__name__) self.logger.setLevel(get_storage_verbose_level()) self.credentials: Credentials =\ Credentials.get_credentials(config) self.endpoint = config.get('endpoint', None) if self.credentials is None: msg: str = "NO CREDENTIALS provided for {0}."\ .format(self.endpoint) self._report_fatal(msg) if self.credentials.get_type() != AWS_TYPE: msg: str = "EXPECTED aws credentials for {0}: {1}"\ .format(self.endpoint, repr(self.credentials.to_dict())) self._report_fatal(msg) aws_key: str = self.credentials.get_key() aws_secret_key = self.credentials.get_secret_key() region_name = self.credentials.get_region() profile_name = self.credentials.get_profile() if profile_name is not None: # it seems that explicitly specified profile name # should not be used with explicitly specified credentials: aws_key = None aws_secret_key = None session = Session(aws_access_key_id=aws_key, aws_secret_access_key=aws_secret_key, region_name=region_name, profile_name=profile_name) session.events.unregister('before-parameter-build.s3.ListObjects', set_list_objects_encoding_type_url) self.client = session.client('s3', endpoint_url=self.endpoint, config=Config(signature_version='s3v4')) if compression is None: compression = config.get('compression', None) self.cleanup_bucket = config.get('cleanup_bucket', False) if isinstance(self.cleanup_bucket, str): self.cleanup_bucket = self.cleanup_bucket.lower() == 'true' self.bucket_cleaned_up: bool = False self.endpoint = self.client._endpoint.host self.bucket = config['bucket'] try: buckets = self.client.list_buckets() except Exception as exc: msg: str = "FAILED to list buckets for {0}: {1}"\ .format(self.endpoint, exc) self._report_fatal(msg) if self.bucket not in [b['Name'] for b in buckets['Buckets']]: try: if region_name is not None: self.client.create_bucket(Bucket=self.bucket, CreateBucketConfiguration={ 'LocationConstraint': region_name }) else: self.client.create_bucket(Bucket=self.bucket) except Exception as exc: msg: str = "FAILED to create bucket {0} for {1}: {2}"\ .format(self.bucket, self.endpoint, exc) self._report_fatal(msg) super().__init__(StorageType.storageS3, self.logger, measure_timestamp_diff, compression=compression)
def __init__(self): self.logger = logs.get_logger(self.__class__.__name__) self.logger.setLevel(get_storage_verbose_level()) self.handlers_cache = dict() self.cleanup_at_exit: bool = True