def vacuum_bans(self): """remove any bans that might have expired. This takes a while, so it is split up over the event loop""" def do_vacuum_bans(): """do the actual clearing of bans""" bans_count = len(self.bans) log.info("starting ban vacuum with {count} bans", count=bans_count) start_time = time.time() # create a copy of the items, so we don't have issues modifying # while iteraing for ban in list(self.bans.iteritems()): ban_exipry = ban[1][2] if ban_exipry is None: # entry never expires continue if ban[1][2] < start_time: # expired del self.bans[ban[0]] yield log.debug( "ban vacuum took {time:.2f} seconds, removed {count} bans", count=bans_count - len(self.bans), time=time.time() - start_time) self.save_bans() # TODO: use cooperate() here instead, once you figure out why it's # swallowing errors. Perhaps try add an errback? coiterate(do_vacuum_bans())
def season(self, value): if self._season != value: self._season = value if self._cache is not None: # Issue 388: Apply the season to the permanent cache. # Use a list so that we don't end up with indefinite amounts # of work to do, and also so that we don't try to do work # while the permanent cache is changing size. coiterate(imap(value.transform, list(self._cache.iterperm())))
def RequestAvatars(self, contacts): def simpleIterate(contacts): if len(contacts) > 0: for handle_id in contacts: handle = self.handle(telepathy.HANDLE_TYPE_CONTACT, handle_id) d = getPage(str(self.avatars_urls[handle.name]['avatar']), timeout=20) d.addCallback(self.on_fetch_avatars_ok, handle) d.addErrback(self.on_fetch_avatars_failed, handle) yield d coiterate(simpleIterate(contacts))
def chat(self, container): if container.message.startswith("/"): coiterate( self.transport.write(make_packet("chat", message=line)) for line in self.factory.run_command(container.message[1:]) ) else: message = "<%s> %s" % (self.username, container.message) print message packet = make_packet("chat", message=message) self.factory.broadcast(packet)
def main(): global c app.PrepareBase() # start the network client c = BAClient() # log stdout using twisted log.startLogging(sys.stdout) # Add connection scene to director start_scene = Scene(MessageLayerStyled('connecting...')) # TODO: add net_message director_run_no_loop(start_scene) if net_options.pyglet_reactor: @director.window.event def on_close(): reactor.stop() # Return true to ensure that no other handlers # on the stack receive the on_close event return True else: task.coiterate(twisted_draw()\ ).addCallback(shutdown).addErrback(bailout) # create a factory, connect to server and store the protocol instance deferred = defer.Deferred() fac = BAClientFactory(reactor, c, deferred) def store_protocol(p): fac.p = p if net_options.observer: fac.p.callRemote(cmd.AddObserver ).addErrback(bailout) else: fac.p.callRemote(cmd.AddClient).addErrback(bailout) HOST = my.network['host'] PORT = my.network['port'] connector = reactor.connectTCP(HOST, PORT, fac) #@UnusedVariable deferred.addCallback(store_protocol) deferred.addErrback(lambda reason: schedule_once(c.cant_connect, 2, str(reason.value))) # start network_loop reactor.run(call_interval=1 / 100.)
def iterateInReactor(i, delay=None): """ Cooperatively iterate over the given iterator. @see: L{twisted.internet.task.coiterate}. """ return coiterate(i)
def post_tube_offer(self, tube, tube_conn): service = tube.props[CHANNEL_TYPE_DBUS_TUBE + ".ServiceName"] if service == BUS_NAME: self.coherence.dbus.add_to_connection(tube_conn, OBJECT_PATH) self.coherence_tube = tube_conn elif service == DEVICE_IFACE: self.device_tube = tube_conn elif service == SERVICE_IFACE: self.service_tube = tube_conn if not self.announce_done and None not in (self.coherence_tube, self.device_tube, self.service_tube): self.announce_done = True def iterate(devices): for device in devices: yield self._register_device(device) def done(result): bus = self.coherence.dbus.bus bus.add_signal_receiver( self._media_server_found, "UPnP_ControlPoint_MediaServer_detected") bus.add_signal_receiver( self._media_server_removed, "UPnP_ControlPoint_MediaServer_removed") dfr = task.coiterate(iterate(self.coherence.dbus.devices.values())) dfr.addCallback(lambda gen: done)
def post_tube_offer(self, tube, tube_conn): service = tube.props[CHANNEL_TYPE_DBUS_TUBE + ".ServiceName"] if service == BUS_NAME: self.coherence.dbus.add_to_connection(tube_conn, OBJECT_PATH) self.coherence_tube = tube_conn elif service == DEVICE_IFACE: self.device_tube = tube_conn elif service == SERVICE_IFACE: self.service_tube = tube_conn if not self.announce_done and None not in (self.coherence_tube, self.device_tube, self.service_tube): self.announce_done = True def iterate(devices): for device in devices: yield self._register_device(device) def done(result): bus = self.coherence.dbus.bus bus.add_signal_receiver(self._media_server_found, "UPnP_ControlPoint_MediaServer_detected") bus.add_signal_receiver(self._media_server_removed, "UPnP_ControlPoint_MediaServer_removed") dfr = task.coiterate(iterate(self.coherence.dbus.devices.values())) dfr.addCallback(lambda gen: done)
def enable_cache(self, size): """ Set the permanent cache size. Changing the size of the cache sets off a series of events which will empty or fill the cache to make it the proper size. For reference, 3 is a large-enough size to completely satisfy the Notchian client's login demands. 10 is enough to completely fill the Notchian client's chunk buffer. :param int size: The taxicab radius of the cache, in chunks """ log.msg("Setting cache size to %d..." % size) self.permanent_cache = set() def assign(chunk): self.permanent_cache.add(chunk) x = self.spawn[0] // 16 z = self.spawn[2] // 16 rx = xrange(x - size, x + size) rz = xrange(z - size, z + size) d = coiterate(self.request_chunk(x, z).addCallback(assign) for x, z in product(rx, rz)) d.addCallback(lambda chaff: log.msg("Cache size is now %d" % size))
def retrain(self): """ Force all L{iquotient.IHamFilter}s to forget their trained state, then retrain them based on L{exmess.Message}s with C{trained} set to C{True}, then reclassify all messages. This should only be called in the batch process. """ filters = list(self.store.powerupsFor(iquotient.IHamFilter)) for f in filters: f.forgetTraining() sq = MailboxSelector(self.store) sq.setLimit(5000) sq.refineByStatus(TRAINED_STATUS) work = iter(list(sq)) # XXX This really should use in-database state, otherwise a restart in # the middle will muck things up. def go(): for msg in work: for f in filters: f.train(msg._spam, msg) yield None self.reclassify() return coiterate(go())
def handle_conflicts(self, ids): self.info("Detected %d conflicts", len(ids)) if ids: return itask.coiterate( (self.conflict_cb(doc_id) for doc_id in ids)) else: self.resolve_alert(ALERT_NAME, 'ok')
def enable_cache(self, size): """ Set the permanent cache size. Changing the size of the cache sets off a series of events which will empty or fill the cache to make it the proper size. For reference, 3 is a large-enough size to completely satisfy the Notchian client's login demands. 10 is enough to completely fill the Notchian client's chunk buffer. :param int size: The taxicab radius of the cache, in chunks """ log.msg("Setting cache size to %d..." % size) self.permanent_cache = set() def assign(chunk): self.permanent_cache.add(chunk) x = self.spawn[0] // 16 z = self.spawn[2] // 16 rx = xrange(x - size, x + size) rz = xrange(z - size, z + size) d = coiterate( self.request_chunk(x, z).addCallback(assign) for x, z in product(rx, rz)) d.addCallback(lambda chaff: log.msg("Cache size is now %d" % size))
def handleNotifies(self, clock=reactor): """Process all notify message in the notifications set.""" def gen_notifications(notifications): while len(notifications) != 0: yield notifications.pop() return task.coiterate( self.handleNotify(notification, clock=clock) for notification in gen_notifications(self.notifications))
def parse_data(self, xml_data): def iterate(root): for item in root.findall('./movieinfo'): trailer = self._parse_into_trailer(item) yield trailer root = xml_data.getroot() return task.coiterate(iterate(root))
def parse_data(self, root): def iterate(root): for item in root.findall('./movieinfo'): trailer = self._parse_into_trailer(item) yield trailer return task.coiterate(iterate(root))
def processNotifications(self, clock=reactor): """Process all notifications.""" def gen_notifications(notifications): while len(notifications) != 0: yield notifications.popleft() return task.coiterate( self.processNotification(notification, clock=clock) for notification in gen_notifications(self.notifications))
def _list_files(self): """ Get a detailed listing of the current directory """ file_list = FTPFileListProtocol() d = self.list('.', file_list) d.addCallback(lambda ignore: task.coiterate(self._retrieve_files(file_list))) d.addErrback(self._list_files_errback) return d
def _consume_iterator(self, iterator): results = [] errors = [] def collect_result(d): return d.addCallbacks(lambda result: results.append(result), lambda f: errors.append(f)) work = imap(collect_result, iterator) ds = [task.coiterate(work) for i in range(0,10)] return defer.gatherResults(ds).addCallback(lambda ign: (results, errors))
def send_messages(): def message_iterator(): for i in range(count): content = body + "-%d" % i msg = Content(content) msg["delivery mode"] = 2 chan.basic_publish(exchange="chatservice", content=msg, routing_key="txamqp_chatroom") print "Sending message: %s" % content yield None return task.coiterate(message_iterator())
def remote_handlePublicMessage(self, protocol, user, channel, message, encoding, max_line_length): try: if message.startswith("!"): return handleCommand(protocol, user, channel, message[1:], encoding, max_line_length) else: callback = functools.partial(protocol.callRemote, "msg", channel) handler = MessageHandler( self.reactor, self.good_urls, self.bad_urls, message, callback, encoding, max_line_length ) return task.coiterate(iter(handler)) except Exception: log.err()
def run(command, arguments, max_processes=None, stdout=sys.stdout): if max_processes is None: max_processes = multiprocessing.cpu_count() processes = ( VergeProcess.spawn(format_command(command, argument), stdout=stdout) for argument in arguments ) return defer.gatherResults( coiterate(processes) for _ in xrange(max_processes) )
def validate(self, value): if not value: return super(Tuple, self).validate(None) def driver(): for (f,v) in zip(self.fields, value): yield defer.maybeDeferred(f.validate, v).addCallback(result.append) # Map the items to their validated versions. result = [] d = task.coiterate(driver()) # Call the super class with the result. d.addCallback(lambda ignore: super(Tuple, self).validate(tuple(result))) return d
def get_children(self, start=0, request_count=0): tracks = [] def query_db(): rows = self.get_tracks(request_count) for row in rows: track = self.db_to_didl(row) tracks.append(track) yield track dfr = task.coiterate(query_db()) dfr.addCallback(lambda gen: tracks) return dfr
def send_messages(): def message_iterator(): for i in range(count): content = body + "-%d" % i msg = Content(content) msg["delivery mode"] = 2 chan.basic_publish(exchange="chatservice", content=msg, routing_key="txamqp_chatroom") print("Sending message: %s" % content) yield None return task.coiterate(message_iterator())
def enable_cache(self): """ Start up a rudimentary permanent cache. """ self.permanent_cache = set() def assign(chunk): self.permanent_cache.add(chunk) rx = xrange(self.spawn[0] - 3, self.spawn[0] + 3) rz = xrange(self.spawn[2] - 3, self.spawn[2] + 3) d = coiterate(assign(self.load_chunk(x, z)) for x, z in product(rx, rz)) d.addCallback(lambda chaff: log.msg("Cache is warmed up!"))
def get_devices_async(self,dbus_async_cb,dbus_async_err_cb): infos = [] def iterate_devices(devices): for device in devices: infos.append(device.get_info()) yield infos def done(generator): dbus_async_cb(dbus.Array(infos, signature='v', variant_level=2)) devices = self.devices.copy().values() dfr = task.coiterate(iterate_devices(devices)) dfr.addCallbacks(done, lambda failure: dbus_async_err_cb(failure.value))
def update_chunks(self): print "Sending chunks..." x, chaff, z, chaff = split_coords(self.player.location.x, self.player.location.z) new = set(product(xrange(x - 10, x + 10), xrange(z - 10, z + 10))) old = set(self.chunks.iterkeys()) added = new - old discarded = old - new # Perhaps some explanation is in order. # The generator expressions are stored in the protocol instance. If we # need to cancel them, we can call their close() method, which causes # them to become inert. This is incredibly important because we want # to cancel all previously pending chunk changes when a new set of # chunk changes is requested. # The coiterate() function iterates over the iterable it is fed, # without tying up the reactor, by yielding after each iteration. The # inner part of the generator expression generates all of the chunks # around the currently needed chunk, and it sorts them by distance to # the current chunk. The end result is that we load chunks one-by-one, # nearest to furthest, without stalling other clients. if self.chunk_generators: for generator in self.chunk_generators: generator.close() self.chunk_generators = [ ( self.enable_chunk(i, j) for i, j in sorted(added, key=lambda t: (t[0] - x)**2 + (t[1] - z)**2) ), (self.disable_chunk(i, j) for i, j in discarded) ] for generator in self.chunk_generators: coiterate(generator)
def remote_handlePublicMessage(self, protocol, user, channel, message, encoding, max_line_length): try: if message.startswith("!"): return handleCommand(protocol, user, channel, message[1:], encoding, max_line_length) else: callback = functools.partial(protocol.callRemote, "msg", channel) handler = MessageHandler(self.reactor, self.good_urls, self.bad_urls, message, callback, encoding, max_line_length) return task.coiterate(iter(handler)) except Exception: log.err()
def _register_device(self, device): if self.allowed_devices is not None and device.uuid not in self.allowed_devices: self.info("device not allowed: %r", device.uuid) return device.add_to_connection(self.device_tube, device.path()) self.info("adding device %s to connection: %s", device.get_markup_name(), self.device_tube) def iterate(): for service in device.services: if getattr(service,'NOT_FOR_THE_TUBES', False): continue yield service.add_to_connection(self.service_tube, service.path) dfr = task.coiterate(iterate()) return dfr
def get_children(self, start=0, request_count=0): tracks = [] def query_db(): q = "select * from CoreTracks where AlbumID=? order by TrackNumber" if request_count: q += " limit %d" % request_count rows = self._db.sql_execute(q, self.itemID) for row in rows: track = Track(row, self._db, self) tracks.append(track) yield track dfr = task.coiterate(query_db()) dfr.addCallback(lambda gen: tracks) return dfr
def get_videos(self): videos = [] def query_db(): source_id = self.get_local_video_library_id() q = "select * from CoreTracks where TrackID in " \ "(select distinct(TrackID) from CoreTracks where " \ "PrimarySourceID=?)" for row in self.db.sql_execute(q, source_id): video = Video(row, self.db, source_id) videos.append(video) yield video dfr = task.coiterate(query_db()) dfr.addCallback(lambda gen: videos) return dfr
def get_artists(self): artists = [] def query_db(): source_id = self.get_local_music_library_id() q = "select * from CoreArtists where ArtistID in " \ "(select distinct(ArtistID) from CoreTracks where " \ "PrimarySourceID=?) order by Name" for row in self.db.sql_execute(q, source_id): artist = Artist(row, self.db, source_id) artists.append(artist) yield artist dfr = task.coiterate(query_db()) dfr.addCallback(lambda gen: artists) return dfr
def parse_data(self, root): ''' Iterate over all items found inside the provided tree and parse each one of them. ''' self.info(f'BackendBaseStore.parse_data: {root}') def iterate(r): for el in r.findall(self.root_find_items): data = self.parse_item(el) if data is None: continue item = self.add_item(data) yield item return task.coiterate(iterate(root))
def startDecoding(self): """ Called to start the decoding process. @return: A C{Deferred} which will kill the task once the decoding is done or on error. """ def cullTask(result): self.decoder_task = None return result self.decoder_task = task.coiterate(self.decoder) self.decoder_task.addBoth(cullTask) return self.decoder_task
def get_children(self, start=0, end=0): albums = [] def query_db(): q = "select * from CoreAlbums where ArtistID=? and AlbumID in " \ "(select distinct(AlbumID) from CoreTracks where " \ "PrimarySourceID=?) order by Title" rows = self._db.sql_execute(q, self.itemID, self._local_music_library_id) for row in rows: album = Album(row, self._db, self) albums.append(album) yield album dfr = task.coiterate(query_db()) dfr.addCallback(lambda gen: albums) return dfr
def remote_handlePublicMessage(self, protocol, user, channel, message, max_line_length, timestamp): if self._staleness_check(timestamp): return try: callback = functools.partial( protocol.callRemote, "msg", channel) if message.startswith("!"): return handleCommand(protocol, user, channel, message[1:], max_line_length, callback) else: handler = MessageHandler(self.reactor, self.good_urls, self.bad_urls, message, callback, max_line_length) return task.coiterate(iter(handler)) except Exception: log.failure("FIXME, runaway exception")
def _register_device(self, device): if self.allowed_devices is not None and device.uuid not in self.allowed_devices: self.info("device not allowed: %r", device.uuid) return device.add_to_connection(self.device_tube, device.path()) self.info("adding device %s to connection: %s", device.get_markup_name(), self.device_tube) def iterate(): for service in device.services: if getattr(service, 'NOT_FOR_THE_TUBES', False): continue yield service.add_to_connection(self.service_tube, service.path) dfr = task.coiterate(iterate()) return dfr
def startEncoding(self): """ Called to start asynchronously iterate the encoder. @return: A C{Deferred} which will kill the task once the encoder is done or on error will kill the connection. @todo: See _startDecoding todo. The same applies here. """ def cullTask(result): self.encoder_task = None return result self.encoder_task = task.coiterate(self.encoder) self.encoder_task.addBoth(cullTask) return self.encoder_task
def get_devices_async(self, for_mirabeau, dbus_async_cb,dbus_async_err_cb): infos = [] allowed_device_types = ['urn:schemas-upnp-org:device:MediaServer:2', 'urn:schemas-upnp-org:device:MediaServer:1'] def iterate_devices(devices): for device in devices: if for_mirabeau and device.get_device_type() not in allowed_device_types: continue infos.append(device.get_info()) yield infos def done(generator): dbus_async_cb(dbus.Array(infos, signature='v', variant_level=2)) devices = self.devices.copy().values() dfr = task.coiterate(iterate_devices(devices)) dfr.addCallbacks(done, lambda failure: dbus_async_err_cb(failure.value))
def get_playlists(self, source_id, PlaylistClass, SmartPlaylistClass): playlists = [] def query_db(): q = "select * from CorePlaylists where PrimarySourceID=? order by Name" for row in self.db.sql_execute(q, source_id): playlist = PlaylistClass(row, self) playlists.append(playlist) yield playlist q = "select * from CoreSmartPlaylists where PrimarySourceID=? order by Name" for row in self.db.sql_execute(q, source_id): playlist = SmartPlaylistClass(row, self) playlists.append(playlist) yield playlist dfr = task.coiterate(query_db()) dfr.addCallback(lambda gen: playlists) return dfr
def _cb_got_seen(self, result, chat_props, messages, seen_chats, msg_desc): msgs_by_id = dict((self.get_rdkey_for_msg(m)[1], m) for m in messages) chatname = chat_props['skype_chatname'] need_chat = chatname not in seen_chats seen_msgs = set([r['value']['rd_key'][1] for r in result['rows']]) remaining = set(msgs_by_id.keys())-set(seen_msgs) # we could just process the empty list as normal, but the logging of # an info when we do have items is worthwhile... if not remaining and not need_chat: logger.debug("Chat %r has no new %s items to process", chatname, msg_desc) return None # we have something to do... logger.info("Chat %r has %d %s items to process", chatname, len(remaining), msg_desc) logger.debug("we've already seen %d %s items from this chat", len(seen_msgs), msg_desc) return task.coiterate( self.gen_items(chat_props, remaining, msgs_by_id, need_chat))
def get_albums(self): albums = [] artists = {} def query_db(): q = "select * from CoreAlbums where AlbumID in "\ "(select distinct(AlbumID) from CoreTracks where "\ "PrimarySourceID=?) order by Title" for row in self.db.sql_execute(q, self.get_local_music_library_id()): try: artist = artists[row.ArtistID] except KeyError: artist = self.get_artist_with_id(row.ArtistID) artists[row.ArtistID] = artist album = Album(row, self.db, artist) albums.append(album) yield album dfr = task.coiterate(query_db()) dfr.addCallback(lambda gen: albums) return dfr
def get_tracks(self): tracks = [] albums = {} def query_db(): q = "select * from CoreTracks where TrackID in "\ "(select distinct(TrackID) from CoreTracks where "\ "PrimarySourceID=?) order by AlbumID,TrackNumber" for row in self.db.sql_execute(q, self.get_local_music_library_id()): if row.AlbumID not in albums: album = self.get_album_with_id(row.AlbumID) albums[row.AlbumID] = album else: album = albums[row.AlbumID] track = Track(row, self.db,album) tracks.append(track) yield track dfr = task.coiterate(query_db()) dfr.addCallback(lambda gen: tracks) return dfr
def get_tracks(self): tracks = [] albums = {} def query_db(): q = "select * from CoreTracks where TrackID in " \ "(select distinct(TrackID) from CoreTracks where " \ "PrimarySourceID=?) order by AlbumID,TrackNumber" for row in self.db.sql_execute(q, self.get_local_music_library_id()): if row.AlbumID not in albums: album = self.get_album_with_id(row.AlbumID) albums[row.AlbumID] = album else: album = albums[row.AlbumID] track = Track(row, self.db, album) tracks.append(track) yield track dfr = task.coiterate(query_db()) dfr.addCallback(lambda gen: tracks) return dfr
def enable_cache(self, size): """ Set the permanent cache size. Changing the size of the cache sets off a series of events which will empty or fill the cache to make it the proper size. For reference, 3 is a large-enough size to completely satisfy the Notchian client's login demands. 10 is enough to completely fill the Notchian client's chunk buffer. :param int size: The taxicab radius of the cache, in chunks :returns: A ``Deferred`` which will fire when the cache has been adjusted. """ log.msg("Setting cache size to %d, please hold..." % size) assign = self._cache.pin def worker(x, z): log.msg("Adding %d, %d to cache..." % (x, z)) return self.request_chunk(x, z).addCallback(assign) x = self.level.spawn[0] // 16 z = self.level.spawn[2] // 16 rx = xrange(x - size, x + size) rz = xrange(z - size, z + size) work = (worker(x, z) for x, z in product(rx, rz)) d = coiterate(work) @d.addCallback def notify(none): log.msg("Cache size is now %d!" % size) return d
def get_devices_async(self, for_mirabeau, dbus_async_cb, dbus_async_err_cb): infos = [] allowed_device_types = [ 'urn:schemas-upnp-org:device:MediaServer:2', 'urn:schemas-upnp-org:device:MediaServer:1' ] def iterate_devices(devices): for device in devices: if for_mirabeau and device.get_device_type( ) not in allowed_device_types: continue infos.append(device.get_info()) yield infos def done(generator): dbus_async_cb(dbus.Array(infos, signature='v', variant_level=2)) devices = self.devices.copy().values() dfr = task.coiterate(iterate_devices(devices)) dfr.addCallbacks(done, lambda failure: dbus_async_err_cb(failure.value))
for statistic in options['statistics'].split(): stat, samples = select( raw, benchmark, param, statistic) samples = stat.squash(samples) yield upload( reactor, options['url'], options['project'], options['revision'], options['revision-date'], benchmark, param, statistic, options['backend'], options['environment'], samples) # This is somewhat hard-coded to the currently # collected stats. if statistic == 'SQL': stat, samples = select( raw, benchmark, param, 'execute') samples = stat.squash(samples, 'count') yield upload( reactor, options['url'], options['project'], options['revision'], options['revision-date'], benchmark, param, statistic + 'count', options['backend'], options['environment'], samples) d = coiterate(go()) d.addErrback(err, "Mass upload failed") reactor.callWhenRunning(d.addCallback, lambda ign: reactor.stop()) reactor.run()
def run(self): if not self.running: self.running = True task.coiterate(iter(self))