def get(self): try: ip = self.request.headers.get('X-Real-Ip', self.request.remote_ip) ua = self.request.headers.get('User-Agent', None) if ua == config.relay_ua: url = self.request.headers['X-Relay-Addr'] if not url.startswith('http://'): url = "http://" + url port = self.request.headers['X-Relay-Port'] self.weight = int(self.request.headers.get('X-Relay-Weight', 1)) log.info("Added new relay at %s:%s with weight %d.", url, port, self.weight) self.set_header("Content-Type", "audio/mpeg") self.url = "%s:%s/all.mp3" % (url, port) self.relays.append(self) emit("relays", {"count": len(self.relays)}) else: if self.request.host.startswith("localhost"): log.info("Added new debug listener at %s.", ip) self.set_header("Content-Type", "audio/mpeg") self.relays.append(self) elif not self.relays: tornado.web.RequestHandler.send_error(self, 503) else: relay = self.relay_url() log.info("Redirected new listener %s to %s", ip, relay) self.redirect(relay) except: log.error("Error in stream.get:\n%s", traceback.format_exc()) tornado.web.RequestHandler.send_error(self, 500)
def get(self): try: ip = self.request.headers.get('X-Real-Ip', self.request.remote_ip) ua = self.request.headers.get('User-Agent', None) if ua == config.relay_ua: url = self.request.headers['X-Relay-Addr'] if not url.startswith('http://'): url = "http://" + url port = self.request.headers['X-Relay-Port'] self.weight = int(self.request.headers.get( 'X-Relay-Weight', 1)) log.info("Added new relay at %s:%s with weight %d.", url, port, self.weight) self.set_header("Content-Type", "audio/mpeg") self.url = "%s:%s/all.mp3" % (url, port) self.relays.append(self) emit("relays", {"count": len(self.relays)}) else: if self.request.host.startswith("localhost"): log.info("Added new debug listener at %s.", ip) self.set_header("Content-Type", "audio/mpeg") self.relays.append(self) elif not self.relays: tornado.web.RequestHandler.send_error(self, 503) else: relay = self.relay_url() log.info("Redirected new listener %s to %s", ip, relay) self.redirect(relay) except: log.error("Error in stream.get:\n%s", traceback.format_exc()) tornado.web.RequestHandler.send_error(self, 500)
def generate(get_relays, get_stats, **queues): while True: time.sleep(config.monitor_update_time) relays = get_relays() emit("relays", {"count": len(relays)}) yield {"listeners": [dict(dict(g.request.headers).items() + [("remote_ip", g.request.remote_ip)]) for g in relays], "queues": dict([(n, q.buffered) for n, q in queues.iteritems()]), "info": get_stats()}
def generate(get_relays, get_stats, **queues): while True: time.sleep(config.monitor_update_time) relays = get_relays() emit("relays", {"count": len(relays)}) yield { "listeners": [ dict( dict(g.request.headers).items() + [("remote_ip", g.request.remote_ip)]) for g in relays ], "queues": dict([(n, q.buffered) for n, q in queues.iteritems()]), "info": get_stats() }
def broadcast(self): try: now = time.time() self.__broadcast() if not self.__first_send: log.info("Sending first frame for %s.", self.__name) self.__first_send = time.time() self.__semaphore.release() uptime = float(now - self.__first_send) if self.__count > 0 and not self.__count % 30: samples = self.__count * 1152 duration = float(self.__count) * 1152.0 / 44100.0 buffered = self.queue.buffered emit( 'drift', { 'ms': (duration - uptime) * 1000.0, 'rate': (duration / uptime), }) emit('buffered', { 'queue': self.__name, 'frames': buffered, }) if self.__count > 0 and not self.__count % 2296: log.debug("Sent %d frames (%dsam, %fs) over %fs (%fx).", self.__count, samples, duration, uptime, duration / uptime) if (float(self.__count) * 1152.0 / 44100.0) \ + self.__drift_limit < uptime: log.warning( "Queue %s drifting by %2.2f ms. Compensating...", self.__name, 1000 * (uptime - (float(self.__count) * 1152.0 / 44100.0))) while (float(self.__count) * 1152.0 / 44100.0) < uptime: self.__broadcast() except Queue.Empty: if self.__packet and not self.__starving: self.__starving = True log.critical("Dropping frames! Queue %s is starving!", self.__name) log.critical("Committing suicide.") sys.exit(RESTART_EXIT_CODE)
def __lame_write(self): while not self.finished: try: data = self.__write_queue.get() if data is None: break if isinstance(data, numpy.ndarray): self.buffered += len(data) / self.channels \ * (self.input_wordlength / 8) try: data.tofile(self.lame.stdin) except IOError: log.error("Could not write to lame!") self.finished = True break else: try: tmp = 0 for chunk in data.render(self.stream_chunk_size): try: samples = len(chunk) self.buffered += samples self.lame_input_length += samples tmp += samples chunk.tofile(self.lame.stdin) except IOError: log.error("Could not write to lame!") self.finished = True break self.delta += tmp - data.samples log.debug("Current delta: %d samples.", self.delta) # Note: this delta will cause drift of 1 second/month. # TODO: Fix it. Eventually. emit('lame_delta', {"samples": self.delta}) except: log.error("Couldn't render segment due to:\n%s", traceback.format_exc()) except: log.critical("Failed to write to Lame:\n%s", traceback.format_exc()) finally: self.encode.release() log.critical("Encoder finishing!")
def broadcast(self): try: now = time.time() self.__broadcast() if not self.__first_send: log.info("Sending first frame for %s.", self.__name) self.__first_send = time.time() self.__semaphore.release() uptime = float(now - self.__first_send) if self.__count > 0 and not self.__count % 30: samples = self.__count * 1152 duration = float(self.__count) * 1152.0 / 44100.0 buffered = self.queue.buffered emit('drift', { 'ms': (duration - uptime) * 1000.0, 'rate': (duration / uptime), }) emit('buffered', { 'queue': self.__name, 'frames': buffered, }) if self.__count > 0 and not self.__count % 2296: log.debug("Sent %d frames (%dsam, %fs) over %fs (%fx).", self.__count, samples, duration, uptime, duration / uptime) if (float(self.__count) * 1152.0 / 44100.0) \ + self.__drift_limit < uptime: log.warning("Queue %s drifting by %2.2f ms. Compensating...", self.__name, 1000 * (uptime - (float(self.__count) * 1152.0 / 44100.0)) ) while (float(self.__count) * 1152.0 / 44100.0) < uptime: self.__broadcast() except Queue.Empty: if self.__packet and not self.__starving: self.__starving = True log.critical("Dropping frames! Queue %s is starving!", self.__name) log.critical("Committing suicide.") sys.exit(RESTART_EXIT_CODE)
def on_finish(self): if self in self.relays: self.relays.remove(self) ip = self.request.headers.get('X-Real-Ip', self.request.remote_ip) log.info("Removed relay at %s with weight %d.", ip, self.weight) emit("relays", {"count": len(self.relays)})
def generate(): try: tracks = [] last = [] wait = 2 # seconds d = Database() while test: yield d.merge(client.get('/tracks/73783917')) while True: log.info("Grabbing fresh tracklist from SoundCloud...") with Timer() as t: while not tracks: try: tracks = client.get('/tracks', order='hotness', limit=200, offset=0) tracks += client.get('/tracks', order='hotness', limit=200, offset=200) except Exception as e: log.warning("Got %s from SoundCloud. Retrying in %2.2f seconds...", e, wait) time.sleep(wait) log.info("Got %d tracks in %2.2fms.", len(tracks), t.ms) emit('tracks_fetch', {"count": len(tracks), "ms": t.ms}) if last and not any([t.id == last[-1].id for t in tracks]): tracks.append(last[-1]) tracks = cull(tracks) tracks += list(get_force_mix_tracks(d)) try: tracks = [d.merge(t) for t in tracks] except: log.warning("Could not merge tracks with DB due to:\n%s", traceback.format_exc()) log.info("Solving TSP on %d tracks...", len(tracks)) with Timer() as t: tracks = [tracks[i] for i in tsp.solve(tracks, distance, len(tracks) * config.tsp_mult)] log.info("Solved TSP in %2.2fms.", t.ms) emit('tsp_solve', {"count": len(tracks), "ms": t.ms}) for track in tracks: for criterion in criteria: criterion.postcompute(track) if last: i = getIndexOfId(tracks, last[-1].id) + 1 tracks = tracks[i:] + tracks[:i] for track in tracks: for priority in get_immediate_tracks(d): emit('decide_priority') yield priority emit('decide_normal') yield track last = tracks tracks = [] except: print traceback.format_exc() log.critical("%s", traceback.format_exc())