def instance_hook(self, input, instance, attrs): common_instance_hook(self, input, instance, attrs) if attrs.is_create_edit: # Parse extra arguments to confirm their syntax is correct, # output is ignored on purpose, we just want to validate it. parse_extra_into_dict(input.extra) elif attrs.is_delete: if instance.is_default: raise BadRequest(self.cid, 'Cannot delete the default cache') else: input.cache_type = instance.cache_type
def _init_impl(self): with self.update_lock: # Create a pool of at most that many connections session = create_session(50) scope = as_list(self.config.default_scope, ',') config = { 'session': session, 'user_agent': self.config.user_agent, 'oauth2_access_token': self.server.decrypt(self.config.secret), 'oauth2_access_token_expiration': int(self.config.oauth2_access_token_expiration or 0), 'scope': scope, 'max_retries_on_error': int(self.config.max_retries_on_error or 0), 'max_retries_on_rate_limit': int(self.config.max_retries_on_rate_limit or 0), 'timeout': int(self.config.timeout), 'headers': parse_extra_into_dict(self.config.http_headers), } # Create the actual connection object self._impl = DropboxClient(**config) # Confirm the connection was established self.ping() # We can assume we are connected now self.is_connected = True
def handle( self, _msg='Cleaning up WSX pub/sub, channel:`%s`, now:`%s (%s)`, md:`%s`, ma:`%s` (%s)' ): # We receive a multi-line list of WSX channel name -> max timeout accepted on input config = parse_extra_into_dict(self.request.raw_request) with closing(self.odb.session()) as session: # Delete stale connections for each subscriber for channel_name, max_delta in config.items(): # Input timeout is in minutes but timestamps in ODB are in seconds # so we convert the minutes to seconds, as expected by the database. max_delta = max_delta * 60 # We compare everything using seconds now = utcnow_as_ms() # Laster interaction time for each connection must not be older than that many seconds ago max_allowed = now - max_delta now_as_iso = datetime_from_ms(now * 1000) max_allowed_as_iso = datetime_from_ms(max_allowed * 1000) # Get all sub_keys that are about to be deleted - retrieving them from the DELETE # statement below is not portable so we do it manually first. items = self._run_max_allowed_query(session, SubscriptionSelect(), channel_name, max_allowed) sub_key_list = [item.sub_key for item in items] if sub_key_list: self.logger.debug(_msg, channel_name, now_as_iso, now, max_delta, max_allowed_as_iso, max_allowed) logger_pubsub.info(_msg, channel_name, now_as_iso, now, max_delta, max_allowed_as_iso, max_allowed) # First we need a list of topics to which sub_keys were related - required by broker messages. topic_sub_keys = get_topic_sub_keys_from_sub_keys( session, self.server.cluster_id, sub_key_list) # Now, delete old connections for that channel from SQL self._run_max_allowed_query(session, SubscriptionDelete(), channel_name, max_allowed) # Next, notify processes about deleted subscriptions to allow to update in-RAM structures self.broker_client.publish({ 'topic_sub_keys': topic_sub_keys, 'action': PUBSUB.SUBSCRIPTION_DELETE.value, }) # Commit all deletions session.commit()
def add_client(self): conn = Connection(authurl=self.config.auth_url, user=self.config.user, key=self.config.key, retries=self.config.retries, snet=self.config.is_snet, starting_backoff=float(self.config.starting_backoff), max_backoff=float(self.config.max_backoff), tenant_name=self.config.tenant_name, os_options=parse_extra_into_dict(self.config.custom_options), auth_version=self.config.auth_version, cacert=self.config.cacert, insecure=not self.config.should_validate_cert, ssl_compression=self.config.needs_tls_compr, retry_on_ratelimit=self.config.should_retr_ratelimit) try: conn.head_account() except Exception: self.logger.warn('Could not HEAD an account (%s), e:`%s`', self.config.name, format_exc()) self.client.put_client(conn)
def set(self, key, value, bucket=ZATO_NONE, content_type=ZATO_NONE, metadata=ZATO_NONE, storage_class=ZATO_NONE, encrypt_at_rest=ZATO_NONE): _bucket = Bucket( self.impl, bucket if bucket != ZATO_NONE else self.zato_default_bucket) _key = Key(_bucket) _key.content_type = content_type if content_type != ZATO_NONE else self.zato_content_type _key.metadata.update(metadata if metadata != ZATO_NONE else parse_extra_into_dict(self.zato_metadata, False)) _key.name = key _key.storage_class = storage_class if storage_class != ZATO_NONE else self.zato_storage_class _key.set_contents_from_string( value, encrypt_key=(encrypt_at_rest if encrypt_at_rest != ZATO_NONE else self.zato_encrypt_at_rest))