def instance_hook(self, input, instance, attrs): common_instance_hook(self, input, instance, attrs) if attrs.is_create_edit: # Parse extra arguments to confirm their syntax is correct parse_extra_into_dict(input.extra) elif attrs.is_delete: if instance.is_default: raise BadRequest(self.cid, 'Cannot delete the default cache') else: input.cache_type = instance.cache_type
def __init__(self, name, config, config_no_sensitive): self.logger = getLogger(self.__class__.__name__) self.name = name self.config = config self.engine_name = config['engine'] # self.engine.name is 'mysql' while 'self.engine_name' is mysql+pymysql # Safe for printing out to logs, any sensitive data has been shadowed self.config_no_sensitive = config_no_sensitive _extra = {} # Postgres-only if self.engine_name.startswith('mysql'): _extra['pool_recycle'] = 600 elif self.engine_name.startswith('postgres'): _extra['connect_args'] = {'application_name': get_component_name()} extra = self.config.get('extra') # Optional, hence .get _extra.update(parse_extra_into_dict(extra)) engine_url = engine_def.format(**config) self.engine = create_engine(engine_url, pool_size=int(config['pool_size']), **_extra) event.listen(self.engine, 'checkin', self.on_checkin) event.listen(self.engine, 'checkout', self.on_checkout) event.listen(self.engine, 'connect', self.on_connect) event.listen(self.engine, 'first_connect', self.on_first_connect)
def impl(): try: servers = [ elem.strip() for elem in config.servers.splitlines() ] cache = _MemcachedClient(servers, asbool(config.is_debug), **parse_extra_into_dict(config.extra)) self._add_cache(config, cache) except Exception, e: logger.warn(format_exc(e))
def _update_aws_config(self, msg): """ Parses the address to AWS we store into discrete components S3Connection objects expect. Also turns metadata string into a dictionary """ url_info = urlparse(msg.address) msg.is_secure = True if url_info.scheme == 'https' else False msg.port = url_info.port if url_info.port else (443 if msg.is_secure else 80) msg.host = url_info.netloc msg.metadata = parse_extra_into_dict(msg.metadata_)
def set(self, key, value, bucket=ZATO_NONE, content_type=ZATO_NONE, metadata=ZATO_NONE, storage_class=ZATO_NONE, encrypt_at_rest=ZATO_NONE): _bucket = Bucket(self.impl, bucket if bucket != ZATO_NONE else self.zato_default_bucket) _key = Key(_bucket) _key.content_type = content_type if content_type != ZATO_NONE else self.zato_content_type _key.metadata.update(metadata if metadata != ZATO_NONE else parse_extra_into_dict(self.zato_metadata, False)) _key.name = key _key.storage_class = storage_class if storage_class != ZATO_NONE else self.zato_storage_class _key.set_contents_from_string( value, encrypt_key=(encrypt_at_rest if encrypt_at_rest != ZATO_NONE else self.zato_encrypt_at_rest))
def add_client(self): conn = Connection(authurl=self.config.auth_url, user=self.config.user, key=self.config.key, retries=self.config.retries, snet=self.config.is_snet, starting_backoff=float(self.config.starting_backoff), max_backoff=float(self.config.max_backoff), tenant_name=self.config.tenant_name, os_options=parse_extra_into_dict(self.config.custom_options), auth_version=self.config.auth_version, cacert=self.config.cacert, insecure=not self.config.should_validate_cert, ssl_compression=self.config.needs_tls_compr, retry_on_ratelimit=self.config.should_retr_ratelimit) try: conn.head_account() except Exception, e: self.logger.warn('Could not HEAD an account (%s), e:`%s`', self.config.name, format_exc(e))
def handle( self, _msg='Cleaning up WSX pub/sub, channel:`%s`, now:`%s (%s)`, md:`%s`, ma:`%s` (%s)' ): # We receive a multi-line list of WSX channel name -> max timeout accepted on input config = parse_extra_into_dict(self.request.raw_request) with closing(self.odb.session()) as session: # Delete stale connections for each subscriber for channel_name, max_delta in config.items(): # Input timeout is in minutes but timestamps in ODB are in seconds # so we convert the minutes to seconds, as expected by the database. max_delta = max_delta * 60 # We compare everything using seconds now = utcnow_as_ms() # Laster interaction time for each connection must not be older than that many seconds ago max_allowed = now - max_delta now_as_iso = datetime_from_ms(now * 1000) max_allowed_as_iso = datetime_from_ms(max_allowed * 1000) self.logger.info(_msg, channel_name, now_as_iso, now, max_delta, max_allowed_as_iso, max_allowed) logger_pubsub.info(_msg, channel_name, now_as_iso, now, max_delta, max_allowed_as_iso, max_allowed) # Delete old connections for that channel session.execute( SubscriptionDelete().\ where(SubscriptionTable.c.ws_channel_id==WSXChannelTable.c.id).\ where(WSXChannelTable.c.name==channel_name).\ where(SubscriptionTable.c.last_interaction_time < max_allowed) ) # Commit all deletions session.commit()
def __init__(self, name, config, config_no_sensitive): self.name = name self.config = config self.logger = getLogger(self.__class__.__name__) # Safe for printing out to logs, any sensitive data has been shadowed self.config_no_sensitive = config_no_sensitive _extra = {} _extra['connect_args'] = {'application_name': get_component_name()} extra = self.config.get('extra') # Optional, hence .get _extra.update(parse_extra_into_dict(extra)) engine_url = engine_def.format(**config) self.engine = create_engine(engine_url, pool_size=int(config['pool_size']), **_extra) event.listen(self.engine, 'checkin', self.on_checkin) event.listen(self.engine, 'checkout', self.on_checkout) event.listen(self.engine, 'connect', self.on_connect) event.listen(self.engine, 'first_connect', self.on_first_connect)
def add_client(self): conn = Connection(authurl=self.config.auth_url, user=self.config.user, key=self.config.key, retries=self.config.retries, snet=self.config.is_snet, starting_backoff=float(self.config.starting_backoff), max_backoff=float(self.config.max_backoff), tenant_name=self.config.tenant_name, os_options=parse_extra_into_dict( self.config.custom_options), auth_version=self.config.auth_version, cacert=self.config.cacert, insecure=not self.config.should_validate_cert, ssl_compression=self.config.needs_tls_compr, retry_on_ratelimit=self.config.should_retr_ratelimit) try: conn.head_account() except Exception, e: self.logger.warn('Could not HEAD an account (%s), e:`%s`', self.config.name, format_exc(e))
def set(self, key, value, bucket=ZATO_NONE, content_type=ZATO_NONE, metadata=ZATO_NONE, storage_class=ZATO_NONE, encrypt_at_rest=ZATO_NONE): _bucket = Bucket( self.impl, bucket if bucket != ZATO_NONE else self.zato_default_bucket) _key = Key(_bucket) _key.content_type = content_type if content_type != ZATO_NONE else self.zato_content_type _key.metadata.update(metadata if metadata != ZATO_NONE else parse_extra_into_dict(self.zato_metadata, False)) _key.name = key _key.storage_class = storage_class if storage_class != ZATO_NONE else self.zato_storage_class _key.set_contents_from_string( value, encrypt_key=(encrypt_at_rest if encrypt_at_rest != ZATO_NONE else self.zato_encrypt_at_rest))
def __init__(self, name, config, config_no_sensitive): self.logger = getLogger(self.__class__.__name__) self.name = name self.config = config self.engine_name = config[ 'engine'] # self.engine.name is 'mysql' while 'self.engine_name' is mysql+pymysql # Safe for printing out to logs, any sensitive data has been shadowed self.config_no_sensitive = config_no_sensitive _extra = {} # MySQL only if self.engine_name.startswith('mysql'): _extra['pool_recycle'] = 600 # Postgres-only elif self.engine_name.startswith('postgres'): _extra['connect_args'] = {'application_name': get_component_name()} extra = self.config.get('extra') # Optional, hence .get _extra.update(parse_extra_into_dict(extra)) # SQLite has no pools if self.engine_name != 'sqlite': _extra['pool_size'] = int(config.get('pool_size', 1)) if _extra['pool_size'] == 0: _extra['poolclass'] = NullPool engine_url = get_engine_url(config) self.engine = create_engine(engine_url, **_extra) event.listen(self.engine, 'checkin', self.on_checkin) event.listen(self.engine, 'checkout', self.on_checkout) event.listen(self.engine, 'connect', self.on_connect) event.listen(self.engine, 'first_connect', self.on_first_connect)
def __init__(self, name, config, config_no_sensitive): self.logger = getLogger(self.__class__.__name__) self.name = name self.config = config self.engine_name = config["engine"] # self.engine.name is 'mysql' while 'self.engine_name' is mysql+pymysql # Safe for printing out to logs, any sensitive data has been shadowed self.config_no_sensitive = config_no_sensitive _extra = {} # MySQL only if self.engine_name.startswith("mysql"): _extra["pool_recycle"] = 600 # Postgres-only elif self.engine_name.startswith("postgres"): _extra["connect_args"] = {"application_name": get_component_name()} extra = self.config.get("extra") # Optional, hence .get _extra.update(parse_extra_into_dict(extra)) # SQLite has no pools if self.engine_name != "sqlite": _extra["pool_size"] = int(config.get("pool_size", 1)) if _extra["pool_size"] == 0: _extra["poolclass"] = NullPool engine_url = get_engine_url(config) self.engine = create_engine(engine_url, **_extra) event.listen(self.engine, "checkin", self.on_checkin) event.listen(self.engine, "checkout", self.on_checkout) event.listen(self.engine, "connect", self.on_connect) event.listen(self.engine, "first_connect", self.on_first_connect)
config.profiler.log_filename), cachegrind_filename=os.path.join( profiler_dir, config.profiler.cachegrind_filename), discard_first_request=config.profiler.discard_first_request, flush_at_shutdown=config.profiler.flush_at_shutdown, path=config.profiler.url_path, unwind=config.profiler.unwind) # New in 2.0 - set environmet variables for servers to inherit os_environ = config.get('os_environ', {}) for key, value in os_environ.items(): os.environ[key] = value # Run the app at last if start_gunicorn_app: zato_gunicorn_app.run() else: return zato_gunicorn_app.zato_wsgi_app if __name__ == '__main__': base_dir = sys.argv[1] if not os.path.isabs(base_dir): base_dir = os.path.abspath(os.path.join(os.getcwd(), base_dir)) options = sys.argv[2].split(';') options = '\n'.join(options) options = parse_extra_into_dict(options) run(base_dir, options=options)