def subscribe(cls, connection, subscription): if connection == None: raise custom_exceptions.PubsubException("Subscriber not connected") key = subscription.get_key() session = ConnectionRegistry.get_session(connection) if session == None: raise custom_exceptions.PubsubException("No session found") subscription.connection_ref = weakref.ref(connection) session.setdefault('subscriptions', {}) if key in session['subscriptions']: raise custom_exceptions.AlreadySubscribedException( "This connection is already subscribed for such event.") session['subscriptions'][key] = subscription cls.__subscriptions.setdefault(subscription.event, weakref.WeakKeyDictionary()) cls.__subscriptions[subscription.event][subscription] = None if hasattr(subscription, 'after_subscribe'): if connection.on_finish != None: # If subscription is processed during the request, wait to # finish and then process the callback connection.on_finish.addCallback(subscription.after_subscribe) else: # If subscription is NOT processed during the request (any real use case?), # process callback instantly (better now than never). subscription.after_subscribe(True) # List of 2-tuples is prepared for future multi-subscriptions return ((subscription.event, key, subscription), )
def _sync_sessions(self): start = posix_time() self._dropped_from_last_sync = False all_conns = collections.deque() curr_list = [] count = 0 for connection_ref in ConnectionRegistry.iterate(): try: curr_list.append(connection_ref()) count += 1 except: pass if count >= config.REPORTER_SYNC_SESSIONS_BATCH_SIZE: #@UndefinedVariable all_conns.append(curr_list) curr_list = [] if len(curr_list) > 0: all_conns.append(curr_list) self._to_sync_connections = all_conns self._plan_sync_process() log.info("Sync. start took %.3fs" % (posix_time() - start, )) #@UndefinedVariable
def _sync_sessions(self): start = posix_time() self._dropped_from_last_sync = False all_conns = collections.deque() curr_list = [] count = 0 for connection_ref in ConnectionRegistry.iterate(): try: curr_list.append(connection_ref()) count += 1 except: pass if count >= config.REPORTER_SYNC_SESSIONS_BATCH_SIZE: #@UndefinedVariable all_conns.append(curr_list) curr_list = [] if len(curr_list) > 0: all_conns.append(curr_list) self._to_sync_connections = all_conns self._plan_sync_process() log.info("Sync. start took %.3fs" % (posix_time() - start,)) #@UndefinedVariable
def find_connection(id): for conn in ConnectionRegistry.iterate(): try: conn = conn() if conn.get_session()['session_id'] == id: return conn except: pass return None
def _get_sessions_iterator(connections=False): for connection_ref in ConnectionRegistry.iterate(): try: connection = connection_ref() session = connection.get_session() if connections: session = session.copy() session['connection'] = connection yield session except: # Not connected pass
def _process_no_submits(self): # Plan the next execution reactor.callLater( self._NO_SHARE_RECALCULATION_PERIOD, #@UndefinedVariable self._process_no_submits) # Statistics start = posix_time() total, changes = 0, 0 for connection_ref in self._no_submit_conn_refs.iterkeyrefs(): try: connection = connection_ref() session = connection.get_session() total += 1 except: # Not connected continue # Skip not initialized sessions if not session.get('session_id'): continue # Decrease hash rate for all authorized workers Interfaces.share_manager.update_stats_by_no_submit(session) # Shortcut for the slowest miners, recalculation is not necessary if session['SL_difficulty'] == 1 and not session[ 'SL_requested_difficulty']: continue # Difficulty recalculation should almost always lead to new difficulty here if self._recalculate(connection, session): changes += 1 # Log results only when at least one connection has finished its short period if changes > 0: log.info("No submits processed in %.03fs, %d / %d (chng, total)" % \ (posix_time() - start, changes, total)) # Get new shallow copy of all connections self._no_submit_conn_refs = ConnectionRegistry.get_shallow_copy()
def get_subscription(cls, connection, event, key=None): '''Return subscription object for given connection and event''' session = ConnectionRegistry.get_session(connection) if session == None: raise custom_exceptions.PubsubException("No session found") if key == None: sub = [ sub for sub in session.get('subscriptions', {}).values() if sub.event == event ] try: return sub[0] except IndexError: raise custom_exceptions.PubsubException( "Not subscribed for event %s" % event) else: raise Exception( "Searching subscriptions by key is not implemented yet")
def unsubscribe(cls, connection, subscription=None, key=None): if connection == None: raise custom_exceptions.PubsubException("Subscriber not connected") session = ConnectionRegistry.get_session(connection) if session == None: raise custom_exceptions.PubsubException("No session found") if subscription: key = subscription.get_key() try: # Subscription don't need to be removed from cls.__subscriptions, # because it uses weak reference there. del session['subscriptions'][key] except KeyError: print("Warning: Cannot remove subscription from connection session" ) # Python3 return False return True
def _process_no_submits(self): # Plan the next execution reactor.callLater(self._NO_SHARE_RECALCULATION_PERIOD, #@UndefinedVariable self._process_no_submits) # Statistics start = posix_time() total, changes = 0, 0 for connection_ref in self._no_submit_conn_refs.iterkeyrefs(): try: connection = connection_ref() session = connection.get_session() total += 1 except: # Not connected continue # Skip not initialized sessions if not session.get('session_id'): continue # Decrease hash rate for all authorized workers Interfaces.share_manager.update_stats_by_no_submit(session) # Shortcut for the slowest miners, recalculation is not necessary if session['SL_difficulty'] == 1 and not session['SL_requested_difficulty']: continue # Difficulty recalculation should almost always lead to new difficulty here if self._recalculate(connection, session): changes += 1 # Log results only when at least one connection has finished its short period if changes > 0: log.info("No submits processed in %.03fs, %d / %d (chng, total)" % \ (posix_time() - start, changes, total)) # Get new shallow copy of all connections self._no_submit_conn_refs = ConnectionRegistry.get_shallow_copy()