Esempio n. 1
0
def load_translations():
    global master
    global translations
    global locale_names_json

    master_file = open(
        os.path.join(os.path.dirname(__file__), "../lang/en_MASTER.json"))
    master = json.load(master_file)
    master_file.close()

    locale_names = {}
    for root, subdir, files in os.walk(
            os.path.join(os.path.dirname(__file__), "../lang")):
        for filename in files:
            if filename == "en_MASTER.json":
                continue
            if not filename.endswith(".json"):
                continue
            try:
                f = codecs.open(os.path.join(os.path.dirname(__file__),
                                             "../lang/", filename),
                                "r",
                                encoding="utf-8")
                translations[filename[:-5]] = RainwaveLocale(
                    filename[:-5], master, json.load(f))
                f.close()
                locale_names[filename[:-5]] = translations[
                    filename[:-5]].dict['language_name']
            except:
                log.warn("locale",
                         "%s is not a valid JSON file." % filename[:-5])

    locale_names_json = tornado.escape.json_encode(locale_names)
Esempio n. 2
0
def integrate_new_events(sid):
	max_sched_id, max_elec_id, num_elections = _get_schedule_stats(sid)

	# Will remove events that no longer have a schedule ID with them
	new_next = []
	for e in next[sid]:
		if e.is_election and db.c.fetch_var("SELECT elec_id FROM r4_elections WHERE elec_id = %s" % e.id):
			new_next.append(e)
		elif db.c.fetch_var("SELECT sched_id FROM r4_schedule WHERE sched_id = %s" % e.id):
			new_next.append(e)
	next[sid] = new_next

	# This is the line of code that loads in any upcoming events
	unused_sched_id = db.c.fetch_list("SELECT sched_id FROM r4_schedule WHERE sid = %s AND sched_id > %s AND sched_used = FALSE AND sched_start <= %s ORDER BY sched_start", (sid, max_sched_id, int(time.time()) + 86400))
	for sched_id in unused_sched_id:
		e = event.load_by_id(sched_id)
		if not e:
			log.warn("schedule", "Unused event ID %s was None." % sched_id)
		else:
			next[sid].append(e)

	# Step 4: Insert "priority elections" ahead of anything else
	priority_elec_ids = db.c.fetch_list("SELECT elec_id FROM r4_elections WHERE sid = %s AND elec_id > %s AND elec_priority = TRUE ORDER BY elec_id DESC", (sid, max_elec_id))
	for elec_id in priority_elec_ids:
		e = playlist.Election.load_by_id(elec_id)
		# The client, through the API, sets start times, so we don't have to worry about where in the array it goes.  The sorter will take care of it.
		next[sid].append(e)
		if e.id > max_elec_id:
			max_elec_id = e.id
		num_elections += 1
		e.set_priority(False)

	return (max_sched_id, max_elec_id, num_elections)
Esempio n. 3
0
def start_icecast_sync():
    global all_returned

    stream_names = {}
    for sid in config.station_ids:
        stream_names[sid] = config.get_station(sid)['stream_filename']

    if all_returned:
        log.warn("icecast_sync", "Previous operation did not finish!")

    all_returned = {}
    listener_ids = {}
    for relay, relay_info in config.get("relays").iteritems():
        listener_ids[relay] = []
        relay_base_url = "%s%s:%s/admin/listclients?mount=/" % (
            relay_info['protocol'], relay_info['ip_address'],
            relay_info['port'])
        for sid in relay_info['sids']:
            # Commented out since the beta version of the site doesn't do MP3
            #all_returned["%s_%s_mp3" % (relay, sid)] = False
            #handler = IcecastSyncCallback(relay, relay_info, "%s_%s_mp3" % (relay, sid), sid)
            #http_client = tornado.httpclient.AsyncHTTPClient()
            #http_client.fetch(relay_base_url + stream_names[sid] + ".mp3",
            #				  handler.respond,
            #				  auth_username=relay_info['admin_username'],
            #				  auth_password=relay_info['admin_password'])

            all_returned["%s_%s_ogg" % (relay, sid)] = False
            handler2 = IcecastSyncCallback(relay, relay_info,
                                           "%s_%s_ogg" % (relay, sid), sid)
            http_client2 = tornado.httpclient.AsyncHTTPClient()
            http_client2.fetch(relay_base_url + stream_names[sid] + ".ogg",
                               handler2.respond,
                               auth_username=relay_info['admin_username'],
                               auth_password=relay_info['admin_password'])
Esempio n. 4
0
def start_icecast_sync():
	global all_returned
	global listener_ids
	global blargh
	
	stream_names = {}
	for sid in config.station_ids:
		stream_names[sid] = config.get_station(sid, 'stream_filename')
	
	if all_returned:
		log.warn("icecast_sync", "Previous operation did not finish!")

	all_returned = {}
	listener_ids = {}
	for relay, relay_info in config.get("relays").iteritems():
		listener_ids[relay] = []
		relay_base_url = "%s%s:%s/admin/listclients?mount=/" % (relay_info['protocol'], relay_info['ip_address'], relay_info['port'])
		for sid in relay_info['sids']:
			all_returned["%s_%s_mp3" % (relay, sid)] = False
			handler = IcecastSyncCallback(relay, relay_info, "%s_%s_mp3" % (relay, sid), sid)
			http_client = tornado.httpclient.HTTPClient()
			http_client.fetch(relay_base_url + stream_names[sid] + ".mp3",
				                         auth_username=relay_info['admin_username'],
										 auth_password=relay_info['admin_password'],
			                             callback=handler.respond)

			all_returned["%s_%s_ogg" % (relay, sid)] = False
			handler2 = IcecastSyncCallback(relay, relay_info, "%s_%s_ogg" % (relay, sid), sid)
			http_client2 = tornado.httpclient.HTTPClient()
			http_client2.fetch(relay_base_url + stream_names[sid] + ".ogg",
				                         auth_username=relay_info['admin_username'],
										 auth_password=relay_info['admin_password'],
			                             callback=handler2.respond)
	_process()
Esempio n. 5
0
 def length(self):
     # These go in descending order of accuracy
     if not self.used and hasattr(self, "songs"):
         return self.songs[0].data["length"]
     elif self.start_actual:
         return self.start_actual - self.end
     elif self.start and self.end:
         return self.start - self.end
     elif hasattr(self, "songs"):
         return self.songs[0].data["length"]
     else:
         log.warn(
             "event",
             "Event ID %s (type %s) failed on length calculation.  Used: %s / Songs: %s / Start Actual: %s / Start: %s / End: %s"
             % (
                 self.id,
                 self.type,
                 self.used,
                 len(self.songs),
                 self.start_actual,
                 self.start,
                 self.end,
             ),
         )
         return 0
Esempio n. 6
0
    def get(self, sid):
        self.success = False
        self.sid = None
        if int(sid) in config.station_ids:
            self.sid = int(sid)
        else:
            return

        try:
            schedule.advance_station(self.sid)
        except psycopg2.extensions.TransactionRollbackError as e:
            if not self.retried:
                self.retried = True
                log.warn(
                    "backend",
                    "Database transaction deadlock.  Re-opening database and setting retry timeout."
                )
                db.close()
                db.open()
                tornado.ioloop.IOLoop.instance().add_timeout(
                    datetime.timedelta(milliseconds=350), self.get)
            else:
                raise

        if not config.get("liquidsoap_annotations"):
            self.write(schedule.get_current_file(self.sid))
        else:
            self.write(
                self._get_annotated(schedule.get_current_event(self.sid)))
        self.success = True
Esempio n. 7
0
def get_random_song(sid):
	"""
	Fetch a random song, abiding by all election block, request block, and
	availability rules.  Falls back to get_random_ignore_requests on failure.
	"""

	sql_query = ("FROM r4_song_sid "
					"JOIN r4_songs USING (song_id) "
					"JOIN r4_album_sid ON (r4_album_sid.album_id = r4_songs.album_id AND r4_album_sid.sid = r4_song_sid.sid) "
				"WHERE r4_song_sid.sid = %s "
					"AND song_exists = TRUE "
					"AND song_cool = FALSE "
					"AND song_request_only = FALSE "
					"AND song_elec_blocked = FALSE "
					"AND album_requests_pending IS NULL")
	num_available = db.c.fetch_var("SELECT COUNT(song_id) " + sql_query, (sid,))
	log.info("song_select", "Song pool size (cooldown, blocks, requests): %s" % num_available)
	offset = 0
	if num_available == 0:
		log.warn("song_select", "No songs available despite no timing rules.")
		log.debug("song_select", "Song select query: SELECT COUNT(song_id) " + (sql_query %  (sid,)))
		return get_random_song_ignore_requests(sid)
	else:
		offset = random.randint(1, num_available) - 1
		song_id = db.c.fetch_var("SELECT song_id " + sql_query + " LIMIT 1 OFFSET %s", (sid, offset))
		return Song.load_from_id(song_id, sid)
Esempio n. 8
0
def _add_scan_error(filename, xception, full_exc=None):
    scan_errors = []
    try:
        scan_errors = cache.get("backend_scan_errors")
    except:
        pass
    if not scan_errors:
        scan_errors = []

    eo = {
        "time": int(timestamp()),
        "file": filename,
        "type": xception.__class__.__name__,
        "error": str(xception),
        "traceback": ""
    }
    if not isinstance(xception, PassableScanError) and not isinstance(
            xception, IOError) and not isinstance(xception, OSError):
        if full_exc:
            eo['traceback'] = traceback.format_exception(*full_exc)  #pylint: disable=W0142
            log.exception("scan", "Error scanning %s" % filename, full_exc)
        else:
            eo['traceback'] = traceback.format_exception(*sys.exc_info())
            log.exception("scan", "Error scanning %s" % filename,
                          sys.exc_info())
    else:
        log.warn("scan",
                 "Warning scanning %s: %s" % (filename, xception.message))
    scan_errors.insert(0, eo)
    if len(scan_errors) > 100:
        scan_errors = scan_errors[0:100]
    cache.set("backend_scan_errors", scan_errors)
Esempio n. 9
0
def get_random_song(sid):
	"""
	Fetch a random song, abiding by all election block, request block, and
	availability rules.  Falls back to get_random_ignore_requests on failure.
	"""

	sql_query = ("FROM r4_song_sid "
					"JOIN r4_songs USING (song_id) "
					"JOIN r4_album_sid ON (r4_album_sid.album_id = r4_songs.album_id AND r4_album_sid.sid = r4_song_sid.sid) "
				"WHERE r4_song_sid.sid = %s "
					"AND song_exists = TRUE "
					"AND song_cool = FALSE "
					"AND song_request_only = FALSE "
					"AND song_elec_blocked = FALSE "
					"AND album_requests_pending IS NULL")
	num_available = db.c.fetch_var("SELECT COUNT(song_id) " + sql_query, (sid,))
	log.info("song_select", "Song pool size (cooldown, blocks, requests): %s" % num_available)
	offset = 0
	if num_available == 0:
		log.warn("song_select", "No songs available despite no timing rules.")
		log.debug("song_select", "Song select query: SELECT COUNT(song_id) " + (sql_query %  (sid,)))
		return get_random_song_ignore_requests(sid)
	else:
		offset = random.randint(1, num_available) - 1
		song_id = db.c.fetch_var("SELECT song_id " + sql_query + " LIMIT 1 OFFSET %s", (sid, offset))
		return Song.load_from_id(song_id, sid)
Esempio n. 10
0
def _start(callback):
	global in_process
	if in_process:
		log.warn("icecast_sync", "Previous operation did not finish!")

	stream_names = {}
	for sid in config.station_ids:
		stream_names[sid] = config.get_station(sid, 'stream_filename')

	for relay, relay_info in config.get("relays").iteritems():
		relay_base_url = "%s%s:%s/admin/listclients?mount=/" % (relay_info['protocol'], relay_info['ip_address'], relay_info['port'])
		for sid in relay_info['sids']:
			for ftype in ('.mp3', '.ogg'):
				try:
					handler = IcecastSyncCallback(relay, relay_info, ftype, sid, callback)
					in_process[handler] = False
					http_client = tornado.httpclient.HTTPClient()
					http_client.fetch(relay_base_url + stream_names[sid] + ftype,
										auth_username=relay_info['admin_username'],
										auth_password=relay_info['admin_password'],
										callback=handler.process)
				except Exception as e:
					log.exception("icecast_sync", "Could not sync %s %s.%s" % (relay, stream_names[sid], ftype), e)

	callback()
Esempio n. 11
0
def _start(callback):
    global in_process
    if in_process:
        log.warn("icecast_sync", "Previous operation did not finish!")

    stream_names = {}
    for sid in config.station_ids:
        stream_names[sid] = config.get_station(sid, 'stream_filename')

    for relay, relay_info in config.get("relays").iteritems():
        relay_base_url = "%s%s:%s/admin/listclients?mount=/" % (
            relay_info['protocol'], relay_info['ip_address'],
            relay_info['port'])
        for sid in relay_info['sids']:
            for ftype in ('.mp3', '.ogg'):
                try:
                    handler = IcecastSyncCallback(relay, relay_info, ftype,
                                                  sid, callback)
                    in_process[handler] = False
                    http_client = tornado.httpclient.HTTPClient()
                    http_client.fetch(
                        relay_base_url + stream_names[sid] + ftype,
                        auth_username=relay_info['admin_username'],
                        auth_password=relay_info['admin_password'],
                        callback=handler.process)
                except Exception as e:
                    log.exception(
                        "icecast_sync", "Could not sync %s %s.%s" %
                        (relay, stream_names[sid], ftype), e)

    callback()
Esempio n. 12
0
def search(opts):
    query= {}
    if ':' in opts:
        opts = str.split(opts, ':')
        query = {
            'track': opts[0],
            'artist': opts[1],
            'limit': 1
        }
        log.success('searching track: "%s" of Artist: "%s"' % (opts[0], opts[1]))
    else:
        query = {
            'track': opts,
            'limit': 1
        }
        log.success('searching track: "%s"' % (opts))

    params = setParams(query)
    request = URL_BASE + urllib.urlencode(params)
    log.info('url to search:', request)

    try:
        response = urllib2.urlopen(request)
        status = response.getcode()
        if status == 200:
            log.info('response status:', status)
            handleRequest(response)
        elif status == 404:
            log.warn('response status:', status)
        else:
            log.err('response status:', status)

    except urllib2.HTTPError, e:
        log.err('HTTPError:', e.code, 'the url:', e.url, e.reason)
Esempio n. 13
0
def get_random_song_timed(sid, target_seconds = None, target_delta = None):
	"""
	Fetch a random song abiding by all election block, request block, and
	availability rules, but giving priority to the target song length
	provided.  Falls back to get_random_song on failure.
	"""
	if not target_seconds:
		return get_random_song(sid)
	if not target_delta:
		target_delta = config.get_station(sid, "song_lookup_length_delta")

	sql_query = ("FROM r4_song_sid "
					"JOIN r4_songs USING (song_id) "
					"JOIN r4_album_sid ON (r4_album_sid.album_id = r4_songs.album_id AND r4_album_sid.sid = r4_song_sid.sid) "
				"WHERE r4_song_sid.sid = %s "
					"AND song_exists = TRUE "
					"AND song_cool = FALSE "
					"AND song_elec_blocked = FALSE "
					"AND album_requests_pending IS NULL "
					"AND song_request_only = FALSE "
					"AND song_length >= %s AND song_length <= %s")
	lower_target_bound = target_seconds - (target_delta / 2)
	upper_target_bound = target_seconds + (target_delta / 2)
	num_available = db.c.fetch_var("SELECT COUNT(r4_song_sid.song_id) " + sql_query, (sid, lower_target_bound, upper_target_bound))
	log.info("song_select", "Song pool size (cooldown, blocks, requests, timed) [target %s delta %s]: %s" % (target_seconds, target_delta, num_available))
	if num_available == 0:
		log.warn("song_select", "No songs available with target_seconds %s and target_delta %s." % (target_seconds, target_delta))
		log.debug("song_select", "Song select query: SELECT COUNT(r4_song_sid.song_id) " + sql_query % (sid, lower_target_bound, upper_target_bound))
		return get_random_song(sid)
	else:
		offset = random.randint(1, num_available) - 1
		song_id = db.c.fetch_var("SELECT r4_song_sid.song_id " + sql_query + " LIMIT 1 OFFSET %s", (sid, lower_target_bound, upper_target_bound, offset))
		return Song.load_from_id(song_id, sid)
Esempio n. 14
0
 def load_event_in_progress(self):
     if self.id:
         elec_id = db.c.fetch_var(
             "SELECT elec_id FROM r4_elections WHERE elec_type = %s AND elec_in_progress = TRUE AND sid = %s AND sched_id = %s ORDER BY elec_id DESC LIMIT 1",
             (self.elec_type, self.sid, self.id))
     else:
         elec_id = db.c.fetch_var(
             "SELECT elec_id FROM r4_elections WHERE elec_type = %s AND elec_in_progress = TRUE AND sid = %s AND sched_id IS NULL ORDER BY elec_id DESC LIMIT 1",
             (self.elec_type, self.sid))
     log.debug(
         "load_election",
         "Check for in-progress elections (type %s, sid %s, sched_id %s): %s"
         % (self.elec_type, self.sid, self.id, elec_id))
     if elec_id:
         elec = self.elec_class.load_by_id(elec_id)
         if not elec.songs or not len(elec.songs):
             log.warn("load_election",
                      "Election ID %s is empty.  Marking as used.")
             db.c.update(
                 "UPDATE r4_elections SET elec_used = TRUE WHERE elec_id = %s",
                 (elec.id, ))
             return self.load_next_event()
         elec.name = self.name
         elec.url = self.url
         elec.dj_user_id = self.dj_user_id
         return elec
     else:
         return self.load_next_event()
Esempio n. 15
0
 def load_next_event(self,
                     target_length=None,
                     min_elec_id=0,
                     skip_requests=False):
     if self.id:
         elec_id = db.c.fetch_var(
             "SELECT elec_id FROM r4_elections WHERE elec_type = %s and elec_used = FALSE AND sid = %s AND elec_id > %s AND sched_id = %s ORDER BY elec_id LIMIT 1",
             (self.elec_type, self.sid, min_elec_id, self.id))
     else:
         elec_id = db.c.fetch_var(
             "SELECT elec_id FROM r4_elections WHERE elec_type = %s and elec_used = FALSE AND sid = %s AND elec_id > %s AND sched_id IS NULL ORDER BY elec_id LIMIT 1",
             (self.elec_type, self.sid, min_elec_id))
     log.debug(
         "load_election",
         "Check for next election (type %s, sid %s, min. ID %s, sched_id %s): %s"
         % (self.elec_type, self.sid, min_elec_id, self.id, elec_id))
     if elec_id:
         elec = self.elec_class.load_by_id(elec_id)
         if not elec.songs or not len(elec.songs):
             log.warn("load_election",
                      "Election ID %s is empty.  Marking as used.")
             db.c.update(
                 "UPDATE r4_elections SET elec_used = TRUE WHERE elec_id = %s",
                 (elec.id, ))
             return self.load_next_event()
         elec.url = self.url
         elec.name = self.name
         return elec
     elif self.id and not self.always_return_elec:
         return None
     else:
         return self._create_election(target_length, skip_requests)
Esempio n. 16
0
    def process(self, response):
        global in_process

        if response.code != 200:
            log.warn(
                "icecast_sync",
                "%s %s %s failed query: %s %s" % (
                    self.relay_name,
                    config.station_id_friendly[self.sid],
                    self.ftype,
                    response.code,
                    response.reason,
                ),
            )
            in_process[self] = True
            return None

        listeners = []
        for listener in (ElementTree.fromstring(
                response.body).find("source").iter("listener")):
            listeners.append(listener)
        in_process[self] = listeners
        log.debug(
            "icecast_sync",
            "%s %s %s count: %s" % (
                self.relay_name,
                config.station_id_friendly[self.sid],
                self.ftype,
                len(listeners),
            ),
        )
        return None
Esempio n. 17
0
	def get(self, sid):
		self.success = False
		self.sid = None
		if int(sid) in config.station_ids:
			self.sid = int(sid)
		else:
			return

		try:
			schedule.advance_station(self.sid)
		except psycopg2.extensions.TransactionRollbackError as e:
			if not self.retried:
				self.retried = True
				log.warn("backend", "Database transaction deadlock.  Re-opening database and setting retry timeout.")
				db.close()
				db.open()
				tornado.ioloop.IOLoop.instance().add_timeout(datetime.timedelta(milliseconds=350), self.get)
			else:
				raise

		if not config.get("liquidsoap_annotations"):
			self.write(schedule.get_current_file(self.sid))
		else:
			self.write(self._get_annotated(schedule.get_current_event(self.sid)))
		self.success = True
Esempio n. 18
0
def get_random_song_timed(sid, target_seconds = None, target_delta = None):
	"""
	Fetch a random song abiding by all election block, request block, and
	availability rules, but giving priority to the target song length
	provided.  Falls back to get_random_song on failure.
	"""
	if not target_seconds:
		return get_random_song(sid)
	if not target_delta:
		target_delta = config.get_station(sid, "song_lookup_length_delta")

	sql_query = ("FROM r4_song_sid "
					"JOIN r4_songs USING (song_id) "
					"JOIN r4_album_sid ON (r4_album_sid.album_id = r4_songs.album_id AND r4_album_sid.sid = r4_song_sid.sid) "
				"WHERE r4_song_sid.sid = %s "
					"AND song_exists = TRUE "
					"AND song_cool = FALSE "
					"AND song_elec_blocked = FALSE "
					"AND album_requests_pending IS NULL "
					"AND song_request_only = FALSE "
					"AND song_length >= %s AND song_length <= %s")
	lower_target_bound = target_seconds - (target_delta / 2)
	upper_target_bound = target_seconds + (target_delta / 2)
	num_available = db.c.fetch_var("SELECT COUNT(r4_song_sid.song_id) " + sql_query, (sid, lower_target_bound, upper_target_bound))
	log.info("song_select", "Song pool size (cooldown, blocks, requests, timed) [target %s delta %s]: %s" % (target_seconds, target_delta, num_available))
	if num_available == 0:
		log.warn("song_select", "No songs available with target_seconds %s and target_delta %s." % (target_seconds, target_delta))
		log.debug("song_select", "Song select query: SELECT COUNT(r4_song_sid.song_id) " + sql_query % (sid, lower_target_bound, upper_target_bound))
		return get_random_song(sid)
	else:
		offset = random.randint(1, num_available) - 1
		song_id = db.c.fetch_var("SELECT r4_song_sid.song_id " + sql_query + " LIMIT 1 OFFSET %s", (sid, lower_target_bound, upper_target_bound, offset))
		return Song.load_from_id(song_id, sid)
Esempio n. 19
0
def get_random_song_ignore_requests(sid):
    """
	Fetch a random song abiding by election block and availability rules,
	but ignoring request blocking rules.
	"""
    sql_query = ("FROM r4_song_sid "
                 "WHERE r4_song_sid.sid = %s "
                 "AND song_exists = TRUE "
                 "AND song_cool = FALSE "
                 "AND song_request_only = FALSE "
                 "AND song_elec_blocked = FALSE ")
    num_available = db.c.fetch_var("SELECT COUNT(song_id) " + sql_query,
                                   (sid, ))
    log.debug("song_select",
              "Song pool size (cooldown, blocks): %s" % num_available)
    offset = 0
    if num_available == 0:
        log.warn("song_select",
                 "No songs available while ignoring pending requests.")
        log.debug(
            "song_select",
            "Song select query: SELECT COUNT(song_id) " + (sql_query %
                                                           (sid, )),
        )
        return get_random_song_ignore_all(sid)
    else:
        offset = random.randint(1, num_available) - 1
        song_id = db.c.fetch_var(
            "SELECT song_id " + sql_query + " LIMIT 1 OFFSET %s",
            (sid, offset))
        return Song.load_from_id(song_id, sid)
Esempio n. 20
0
def load_translations():
	global master
	global translations
	global locale_names_json

	master_file = open(os.path.join(os.path.dirname(__file__), "../lang/en_MASTER.json"))
	master = json.load(master_file)
	master_file.close()

	locale_names = {}
	for root, subdir, files in os.walk(os.path.join(os.path.dirname(__file__), "../lang")):
		for filename in files:
			if filename == "en_MASTER.json":
				continue
			if not filename.endswith(".json"):
				continue
			try:
				f = codecs.open(os.path.join(os.path.dirname(__file__), "../lang/", filename), "r", encoding="utf-8")
				translations[filename[:-5]] = RainwaveLocale(filename[:-5], master, json.load(f))
				f.close()
				locale_names[filename[:-5]] = translations[filename[:-5]].dict['language_name']
			except:
				log.warn("locale", "%s is not a valid JSON file." % filename[:-5])

	locale_names_json = tornado.escape.json_encode(locale_names)
Esempio n. 21
0
def start_icecast_sync():
	global all_returned
	
	stream_names = {}
	for sid in config.station_ids:
		stream_names[sid] = config.get_station(sid)['stream_filename']
	
	if all_returned:
		log.warn("icecast_sync", "Previous operation did not finish!")

	all_returned = {}
	listener_ids = {}
	for relay, relay_info in config.get("relays").iteritems():
		listener_ids[relay] = []
		relay_base_url = "%s%s:%s/admin/listclients?mount=/" % (relay_info['protocol'], relay_info['ip_address'], relay_info['port'])
		for sid in relay_info['sids']:
			# Commented out since the beta version of the site doesn't do MP3
			#all_returned["%s_%s_mp3" % (relay, sid)] = False
			#handler = IcecastSyncCallback(relay, relay_info, "%s_%s_mp3" % (relay, sid), sid)
			#http_client = tornado.httpclient.AsyncHTTPClient()
			#http_client.fetch(relay_base_url + stream_names[sid] + ".mp3",
			#				  handler.respond,
			#				  auth_username=relay_info['admin_username'],
			#				  auth_password=relay_info['admin_password'])

			all_returned["%s_%s_ogg" % (relay, sid)] = False
			handler2 = IcecastSyncCallback(relay, relay_info, "%s_%s_ogg" % (relay, sid), sid)
			http_client2 = tornado.httpclient.AsyncHTTPClient()
			http_client2.fetch(relay_base_url + stream_names[sid] + ".ogg",
							  handler2.respond,
							  auth_username=relay_info['admin_username'],
							  auth_password=relay_info['admin_password'])
Esempio n. 22
0
def get_producer_at_time(sid, at_time):
    to_ret = None
    local_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(at_time))
    time_ahead = int((at_time - timestamp()) / 60)
    sched_id = db.c.fetch_var(
        "SELECT sched_id "
        "FROM r4_schedule "
        "WHERE sid = %s AND sched_start <= %s AND sched_end > %s "
        "ORDER BY sched_id DESC "
        "LIMIT 1",
        (sid, at_time + 20, at_time),
    )
    try:
        to_ret = events.event.BaseProducer.load_producer_by_id(sched_id)
        if to_ret:
            to_ret.start_producer()
    except Exception as e:
        log.warn("get_producer", "Failed to obtain producer at time %s (%sm ahead)." % (local_time, time_ahead))
        log.exception(
            "get_producer",
            "Failed to get an appropriate producer at time %s  (%sm ahead)." % (local_time, time_ahead),
            e,
        )
    if not to_ret:
        log.debug(
            "get_producer", "No producer at time %s  (%sm ahead), defaulting to election." % (local_time, time_ahead)
        )
        return election.ElectionProducer(sid)
    if not to_ret.has_next_event():
        log.warn("get_producer", "Producer ID %s (type %s, %s) has no events." % (to_ret.id, to_ret.type, to_ret.name))
        return election.ElectionProducer(sid)
    return to_ret
Esempio n. 23
0
def get_producer_at_time(sid, at_time):
    to_ret = None
    local_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(at_time))
    time_ahead = int((at_time - timestamp()) / 60)
    sched_id = db.c.fetch_var(
        "SELECT sched_id "
        "FROM r4_schedule "
        "WHERE sid = %s AND sched_start <= %s AND sched_end > %s "
        "ORDER BY sched_id DESC "
        "LIMIT 1", (sid, at_time + 20, at_time))
    try:
        to_ret = events.event.BaseProducer.load_producer_by_id(sched_id)
        if to_ret:
            to_ret.start_producer()
    except Exception as e:
        log.warn(
            "get_producer",
            "Failed to obtain producer at time %s (%sm ahead)." %
            (local_time, time_ahead))
        log.exception(
            "get_producer",
            "Failed to get an appropriate producer at time %s  (%sm ahead)." %
            (local_time, time_ahead), e)
    if not to_ret:
        log.debug(
            "get_producer",
            "No producer at time %s  (%sm ahead), defaulting to election." %
            (local_time, time_ahead))
        return election.ElectionProducer(sid)
    if not to_ret.has_next_event():
        log.warn(
            "get_producer", "Producer ID %s (type %s, %s) has no events." %
            (to_ret.id, to_ret.type, to_ret.name))
        return election.ElectionProducer(sid)
    return to_ret
Esempio n. 24
0
def advance_station(sid):
	db.c.start_transaction()
	try:
		log.debug("advance", "Advancing station %s." % sid)
		start_time = time.time()
		# If we need some emergency elections here
		if len(upnext[sid]) == 0:
			manage_next(sid)

		while upnext[sid][0].used or len(upnext[sid][0].songs) == 0:
			log.warn("advance", "Event ID %s was already used or has zero songs.  Removing." % upnext[sid][0].id)
			upnext[sid].pop(0)
			if len(upnext[sid]) == 0:
				manage_next(sid)

		start_time = time.time()
		upnext[sid][0].prepare_event()
		db.c.commit()

		log.debug("advance", "upnext[0] preparation time: %.6f" % (time.time() - start_time,))
		log.info("advance", "Next song: %s" % get_advancing_file(sid))

		tornado.ioloop.IOLoop.instance().add_timeout(datetime.timedelta(milliseconds=150), lambda: post_process(sid))
	except:
		db.c.rollback()
		raise
Esempio n. 25
0
def sync_frontend_all(sid):
	try:
		params = urllib.urlencode({ "sid": sid })
		for i in range(0, config.get("api_num_processes")):
			urllib2.urlopen(urllib2.Request("http://localhost:%s/api/sync_update_all" % (config.get("api_base_port") + i,), params))
			log.debug("sync_front", "Sent update_all to API port %s" % (config.get("api_base_port") + i,))
	except urllib2.URLError, e:
		log.warn("sync_front", "Could not connect to an API port: %s" % repr(e.reason))
Esempio n. 26
0
	def write_output(self):
		if hasattr(self, "_output"):
			if hasattr(self, "_startclock"):
				exectime = timestamp() - self._startclock
			else:
				exectime = -1
			if exectime > 0.5:
				log.warn("long_request", "%s took %s to execute!" % (self.url, exectime))
			self.append("api_info", { "exectime": exectime, "time": round(timestamp()) })
			self.write(json.dumps(self._output, ensure_ascii=False))
Esempio n. 27
0
	def write_output(self):
		if hasattr(self, "_output"):
			if hasattr(self, "_startclock"):
				exectime = timestamp() - self._startclock
			else:
				exectime = -1
			if exectime > 0.5:
				log.warn("long_request", "%s took %s to execute!" % (self.url, exectime))
			self.append("api_info", { "exectime": exectime, "time": round(timestamp()) })
			self.write(tornado.escape.json_encode(self._output))
Esempio n. 28
0
def sync_result(response):
    if response.error:
        try:
            js = json.loads(response.body)
            for k, v in js.iteritems():
                if u"text" in v:
                    log.warn("sync_front", "%s: %s" % (k, v["text"]))
        except:
            pass
        log.warn("sync_front", "Error %s syncing to frontend at URL %s." % (response.code, response.request.url))
Esempio n. 29
0
	def write_output(self):
		if hasattr(self, "_output"):
			if hasattr(self, "_startclock"):
				exectime = timestamp() - self._startclock
			else:
				exectime = -1
			if exectime > 0.5:
				log.warn("long_request", "%s took %s to execute!" % (self.url, exectime))
			self.append("api_info", { "exectime": exectime, "time": round(timestamp()) })
			self.write(json.dumps(self._output, ensure_ascii=False))
Esempio n. 30
0
	def finish(self, chunk=None):
		self.set_header("Content-Type", "application/json")
		if hasattr(self, "_output"):
			if hasattr(self, "_startclock"):
				exectime = time.time() - self._startclock
			else:
				exectime = -1
			if exectime > 0.5:
				log.warn("long_request", "%s took %s to execute!" % (self.url, exectime))
			self.append("api_info", { "exectime": exectime, "time": round(time.time()) })
			self.write(tornado.escape.json_encode(self._output))
		super(APIHandler, self).finish(chunk)
Esempio n. 31
0
def sync_result(response):
	if response.error:
		try:
			js = json.loads(response.body)
			for k, v in js.iteritems():
				if u"text" in v:
					log.warn("sync_front", "%s: %s" % (k, v['text']))
		except Exception as e:
			pass
		log.warn("sync_front", "Error %s syncing to frontend at URL %s." % (response.code, response.request.url))
	else:
		log.debug("sync_front", "Sync to front successful.")
Esempio n. 32
0
def sync_frontend_user_id(user_id):
	try:
		headers = ({"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain text/html text/javascript application/json application/javascript" })
		params = urllib.urlencode({ "sid": sid })
		for i in range(0, config.get("api_num_processes")):
			conn = httplib.HTTPConnection('localhost', config.get("api_base_port") + i)
			conn.request("GET", "/api/sync_update_user", params, headers)
			conn.close()
	except socket.error:
		log.warn("sync_front", "Could not connect to an API port.")
	except socket.timeout:
		log.warn("sync_front", "Timeout connecting to an API port.")
Esempio n. 33
0
	def process(self, response):
		global in_process

		if response.code != 200:
			log.warn("icecast_sync", "%s %s %s failed query: %s %s" % (self.relay_name, config.station_id_friendly[self.sid], self.ftype, response.code, response.reason))
			in_process[self] = True
			return None

		listeners = []
		for listener in ElementTree.fromstring(response.body).find("source").iter("listener"):
			listeners.append(listener)
		in_process[self] = listeners
		log.debug("icecast_sync", "%s %s %s count: %s" % (self.relay_name, config.station_id_friendly[self.sid], self.ftype, len(listeners)))
Esempio n. 34
0
	def length(self):
		# These go in descending order of accuracy
		if not self.used and hasattr(self, "songs"):
			return self.songs[0].data['length']
		elif self.start_actual:
			return self.start_actual - self.end
		elif self.start and self.end:
			return self.start - self.end
		elif hasattr(self, "songs"):
			return self.songs[0].data['length']
		else:
			log.warn("event", "Event ID %s (type %s) failed on length calculation.  Used: %s / Songs: %s / Start Actual: %s / Start: %s / End: %s" % (self.id, self.type, self.used, len(self.songs), self.start_actual, self.start, self.end))
			return 0
Esempio n. 35
0
	def respond(self, response):
		global all_returned
		global listener_ids
		
		if response.code != 200:
			log.warn("icecast_sync", "%s %s failed query: %s %s" % (self.relay_name, self.stream_key, response.code, response.reason))
			return
		
		root = ElementTree.fromstring(response.body)
		for listener in root.find("source").iter("listener"):
			if 'id' in listener.attrib:
				listener_ids[self.relay_name].append(long(listener.attrib['id']))
			
		all_returned[self.stream_key] = True
Esempio n. 36
0
 async def request(self, client, url):
     async with client.get(url, ssl=False) as response:
         if response.status != 200:
             log.warn(
                 "icecast_sync",
                 "%s %s %s failed query: %s %s" % (
                     self.relay_name,
                     config.station_id_friendly[self.sid],
                     self.ftype,
                     response.status,
                     response.reason,
                 ),
             )
         else:
             self.response = await response.read()
Esempio n. 37
0
def get_producer_at_time(sid, at_time):
	to_ret = None
	sched_id = db.c.fetch_var(	"SELECT sched_id "
								"FROM r4_schedule "
								"WHERE sid = %s AND sched_start <= %s AND sched_end > %s "
								"ORDER BY sched_id DESC "
								"LIMIT 1", (sid, at_time + 20, at_time))
	try:
		to_ret = events.event.BaseProducer.load_producer_by_id(sched_id)
	except Exception as e:
		log.warn("get_producer", "Failed to obtain producer.")
		log.exception("get_producer", "Failed to get an appropriate producer.", e)
	if not to_ret:
		return election.ElectionProducer(sid)
	return to_ret
Esempio n. 38
0
def dj_heartbeat_check():
	# Don't do this in testing environments
	if config.get("developer_mode"):
		return
	for sid in config.station_ids:
		if cache.get_station(sid, "backend_paused_playing"):
			hb = cache.get_station(sid, "dj_heartbeat")
			hbs = cache.get_station(sid, "dj_heartbeat_start")
			if not hbs or ((timestamp() - hbs) <= 10):
				pass
			elif not hb or ((timestamp() - hb) >= 15):
				log.warn("dj_heartbeat", "DJ heart attack - resetting station to normal.")
				cache.set_station(sid, "backend_paused", False)
				cache.set_station(sid, "backend_paused_playing", False)
				liquidsoap.kick_dj(sid)
				liquidsoap.skip(sid)
Esempio n. 39
0
    def get(self, sid):
        self.success = False
        self.sid = None
        if int(sid) in config.station_ids:
            self.sid = int(sid)
        else:
            return

        if cache.get_station(self.sid, "backend_paused"):
            if not cache.get_station(self.sid, "dj_heartbeat_start"):
                log.debug("dj", "Setting server start heatbeat.")
                cache.set_station(self.sid, "dj_heartbeat_start", timestamp())
            self.write(self._get_pause_file())
            schedule.set_upnext_crossfade(self.sid, False)
            cache.set_station(self.sid, "backend_paused_playing", True)
            sync_to_front.sync_frontend_dj(self.sid)
            return
        else:
            cache.set_station(self.sid, "dj_heartbeat_start", False)
            cache.set_station(self.sid, "backend_paused", False)
            cache.set_station(self.sid, "backend_paused_playing", False)

        try:
            schedule.advance_station(self.sid)
        except (psycopg2.OperationalError, psycopg2.InterfaceError) as e:
            log.warn("backend", e.diag.message_primary)
            db.close()
            db.connect()
            raise
        except psycopg2.extensions.TransactionRollbackError as e:
            log.warn(
                "backend",
                "Database transaction deadlock.  Re-opening database and setting retry timeout.",
            )
            db.close()
            db.connect()
            raise

        to_send = None
        if not config.get("liquidsoap_annotations"):
            to_send = schedule.get_advancing_file(self.sid)
        else:
            to_send = self._get_annotated(
                schedule.get_advancing_event(self.sid))
        self.success = True
        if not cache.get_station(self.sid, "get_next_socket_timeout"):
            self.write(to_send)
Esempio n. 40
0
def dj_heartbeat_check():
    # Don't do this in testing environments
    if config.get("developer_mode"):
        return
    for sid in config.station_ids:
        if cache.get_station(sid, "backend_paused_playing"):
            hb = cache.get_station(sid, "dj_heartbeat")
            hbs = cache.get_station(sid, "dj_heartbeat_start")
            if not hbs or ((timestamp() - hbs) <= 10):
                pass
            elif not hb or ((timestamp() - hb) >= 15):
                log.warn("dj_heartbeat",
                         "DJ heart attack - resetting station to normal.")
                cache.set_station(sid, "backend_paused", False)
                cache.set_station(sid, "backend_paused_playing", False)
                liquidsoap.kick_dj(sid)
                liquidsoap.skip(sid)
Esempio n. 41
0
    def respond(self, response):
        global all_returned
        global listener_ids

        if response.code != 200:
            log.warn(
                "icecast_sync", "%s %s failed query: %s %s" %
                (self.relay_name, self.stream_key, response.code,
                 response.reason))
            return

        root = ElementTree.fromstring(response.body)
        for listener in root.find("source").iter("listener"):
            if 'id' in listener.attrib:
                listener_ids[self.relay_name].append(
                    long(listener.attrib['id']))

        all_returned[self.stream_key] = True
Esempio n. 42
0
	def load_event_in_progress(self):
		if self.id:
			elec_id = db.c.fetch_var("SELECT elec_id FROM r4_elections WHERE elec_type = %s AND elec_in_progress = TRUE AND sid = %s AND sched_id = %s ORDER BY elec_id DESC LIMIT 1", (self.elec_type, self.sid, self.id))
		else:
			elec_id = db.c.fetch_var("SELECT elec_id FROM r4_elections WHERE elec_type = %s AND elec_in_progress = TRUE AND sid = %s AND sched_id IS NULL ORDER BY elec_id DESC LIMIT 1", (self.elec_type, self.sid))
		log.debug("load_election", "Check for in-progress elections (type %s, sid %s, sched_id %s): %s" % (self.elec_type, self.sid, self.id, elec_id))
		if elec_id:
			elec = self.elec_class.load_by_id(elec_id)
			if not elec.songs or not len(elec.songs):
				log.warn("load_election", "Election ID %s is empty.  Marking as used.")
				db.c.update("UPDATE r4_elections SET elec_used = TRUE WHERE elec_id = %s", (elec.id,))
				return self.load_next_event()
			elec.name = self.name
			elec.url = self.url
			elec.dj_user_id = self.dj_user_id
			return elec
		else:
			return self.load_next_event()
Esempio n. 43
0
	def load_next_event(self, target_length = None, min_elec_id = 0, skip_requests = False):
		if self.id:
			elec_id = db.c.fetch_var("SELECT elec_id FROM r4_elections WHERE elec_type = %s and elec_used = FALSE AND sid = %s AND elec_id > %s AND sched_id = %s ORDER BY elec_id LIMIT 1", (self.elec_type, self.sid, min_elec_id, self.id))
		else:
			elec_id = db.c.fetch_var("SELECT elec_id FROM r4_elections WHERE elec_type = %s and elec_used = FALSE AND sid = %s AND elec_id > %s AND sched_id IS NULL ORDER BY elec_id LIMIT 1", (self.elec_type, self.sid, min_elec_id))
		log.debug("load_election", "Check for next election (type %s, sid %s, min. ID %s, sched_id %s): %s" % (self.elec_type, self.sid, min_elec_id, self.id, elec_id))
		if elec_id:
			elec = self.elec_class.load_by_id(elec_id)
			if not elec.songs or not len(elec.songs):
				log.warn("load_election", "Election ID %s is empty.  Marking as used.")
				db.c.update("UPDATE r4_elections SET elec_used = TRUE WHERE elec_id = %s", (elec.id,))
				return self.load_next_event()
			elec.url = self.url
			elec.name = self.name
			return elec
		elif self.id and not self.always_return_elec:
			return None
		else:
			return self._create_election(target_length, skip_requests)
Esempio n. 44
0
	def get(self, sid):	#pylint: disable=W0221
		self.success = False
		self.sid = None
		if int(sid) in config.station_ids:
			self.sid = int(sid)
		else:
			return

		if cache.get_station(self.sid, "backend_paused"):
			if not cache.get_station(self.sid, "dj_heartbeat_start"):
				log.debug("dj", "Setting server start heatbeat.")
				cache.set_station(self.sid, "dj_heartbeat_start", timestamp())
			self.write(self._get_pause_file())
			schedule.set_upnext_crossfade(self.sid, False)
			cache.set_station(self.sid, "backend_paused_playing", True)
			sync_to_front.sync_frontend_dj(self.sid)
			return
		else:
			cache.set_station(self.sid, "dj_heartbeat_start", False)
			cache.set_station(self.sid, "backend_paused", False)
			cache.set_station(self.sid, "backend_paused_playing", False)

		try:
			schedule.advance_station(self.sid)
		except (psycopg2.OperationalError, psycopg2.InterfaceError) as e:
			log.warn("backend", e.diag.message_primary)
			db.close()
			db.connect()
			raise
		except psycopg2.extensions.TransactionRollbackError as e:
			log.warn("backend", "Database transaction deadlock.  Re-opening database and setting retry timeout.")
			db.close()
			db.connect()
			raise

		to_send = None
		if not config.get("liquidsoap_annotations"):
			to_send = schedule.get_advancing_file(self.sid)
		else:
			to_send = self._get_annotated(schedule.get_advancing_event(self.sid))
		self.success = True
		if not cache.get_station(self.sid, "get_next_socket_timeout"):
			self.write(to_send)
Esempio n. 45
0
def loadPLaylist(fich):
    if os.path.isfile(fich):
        try:
            playlistName = os.path.basename(fich)[:3] + '.msk'
            ext = os.path.splitext(fich)[1][1:].lower()
            if ext == 'm3u':
                m3u.proccess(fich)
            elif ext == 'pls':
                playlist = pls.proccess(fich)
                result=tags.getMetaData(playlistName, playlist)
                for item in result:
                    search(item)
            elif ext == 'xspf':
                log.warn("xspf not supported yet")
            else:
                log.err("this file type is not supported")
        except Exception as e:
            log.err("An error occurred while processing the file:", fich)
            log.err("Error:", e)
    else:
        log.err("The file: %s does not exist, check the path or filename" % (fich))
Esempio n. 46
0
def get_random_song_ignore_requests(sid):
	"""
	Fetch a random song abiding by election block and availability rules,
	but ignoring request blocking rules.
	"""
	sql_query = ("FROM r4_song_sid "
			"WHERE r4_song_sid.sid = %s "
				"AND song_exists = TRUE "
				"AND song_cool = FALSE "
				"AND song_request_only = FALSE "
				"AND song_elec_blocked = FALSE ")
	num_available = db.c.fetch_var("SELECT COUNT(song_id) " + sql_query, (sid,))
	log.debug("song_select", "Song pool size (cooldown, blocks): %s" % num_available)
	offset = 0
	if num_available == 0:
		log.warn("song_select", "No songs available while ignoring pending requests.")
		log.debug("song_select", "Song select query: SELECT COUNT(song_id) " + (sql_query %  (sid,)))
		return get_random_song_ignore_all(sid)
	else:
		offset = random.randint(1, num_available) - 1
		song_id = db.c.fetch_var("SELECT song_id " + sql_query + " LIMIT 1 OFFSET %s", (sid, offset))
		return Song.load_from_id(song_id, sid)
Esempio n. 47
0
def integrate_new_events(sid):
    max_sched_id, max_elec_id, num_elections = _get_schedule_stats(sid)

    # Will remove events that no longer have a schedule ID with them
    new_next = []
    for e in next[sid]:
        if e.is_election and db.c.fetch_var(
                "SELECT elec_id FROM r4_elections WHERE elec_id = %s" % e.id):
            new_next.append(e)
        elif db.c.fetch_var(
                "SELECT sched_id FROM r4_schedule WHERE sched_id = %s" % e.id):
            new_next.append(e)
    next[sid] = new_next

    # This is the line of code that loads in any upcoming events
    unused_sched_id = db.c.fetch_list(
        "SELECT sched_id FROM r4_schedule WHERE sid = %s AND sched_id > %s AND sched_used = FALSE AND sched_start <= %s ORDER BY sched_start",
        (sid, max_sched_id, int(time.time()) + 86400))
    for sched_id in unused_sched_id:
        e = event.load_by_id(sched_id)
        if not e:
            log.warn("schedule", "Unused event ID %s was None." % sched_id)
        else:
            next[sid].append(e)

    # Step 4: Insert "priority elections" ahead of anything else
    priority_elec_ids = db.c.fetch_list(
        "SELECT elec_id FROM r4_elections WHERE sid = %s AND elec_id > %s AND elec_priority = TRUE ORDER BY elec_id DESC",
        (sid, max_elec_id))
    for elec_id in priority_elec_ids:
        e = playlist.Election.load_by_id(elec_id)
        # The client, through the API, sets start times, so we don't have to worry about where in the array it goes.  The sorter will take care of it.
        next[sid].append(e)
        if e.id > max_elec_id:
            max_elec_id = e.id
        num_elections += 1
        e.set_priority(False)

    return (max_sched_id, max_elec_id, num_elections)
Esempio n. 48
0
def start_icecast_sync():
    global all_returned
    global listener_ids
    global blargh

    stream_names = {}
    for sid in config.station_ids:
        stream_names[sid] = config.get_station(sid, 'stream_filename')

    if all_returned:
        log.warn("icecast_sync", "Previous operation did not finish!")

    all_returned = {}
    listener_ids = {}
    for relay, relay_info in config.get("relays").iteritems():
        listener_ids[relay] = []
        relay_base_url = "%s%s:%s/admin/listclients?mount=/" % (
            relay_info['protocol'], relay_info['ip_address'],
            relay_info['port'])
        for sid in relay_info['sids']:
            all_returned["%s_%s_mp3" % (relay, sid)] = False
            handler = IcecastSyncCallback(relay, relay_info,
                                          "%s_%s_mp3" % (relay, sid), sid)
            http_client = tornado.httpclient.HTTPClient()
            http_client.fetch(relay_base_url + stream_names[sid] + ".mp3",
                              auth_username=relay_info['admin_username'],
                              auth_password=relay_info['admin_password'],
                              callback=handler.respond)

            all_returned["%s_%s_ogg" % (relay, sid)] = False
            handler2 = IcecastSyncCallback(relay, relay_info,
                                           "%s_%s_ogg" % (relay, sid), sid)
            http_client2 = tornado.httpclient.HTTPClient()
            http_client2.fetch(relay_base_url + stream_names[sid] + ".ogg",
                               auth_username=relay_info['admin_username'],
                               auth_password=relay_info['admin_password'],
                               callback=handler2.respond)
    _process()
Esempio n. 49
0
def _add_scan_error(filename, xception, full_exc=None):
	scan_errors = []
	try:
		scan_errors = cache.get("backend_scan_errors")
	except:
		pass
	if not scan_errors:
		scan_errors = []

	eo = { "time": int(timestamp()), "file": filename, "type": xception.__class__.__name__, "error": str(xception), "traceback": "" }
	if not isinstance(xception, PassableScanError) and not isinstance(xception, IOError) and not isinstance(xception, OSError):
		if full_exc:
			eo['traceback'] = traceback.format_exception(*full_exc)			#pylint: disable=W0142
			log.exception("scan", "Error scanning %s" % filename, full_exc)
		else:
			eo['traceback'] = traceback.format_exception(*sys.exc_info())
			log.exception("scan", "Error scanning %s" % filename, sys.exc_info())
	else:
		log.warn("scan", "Warning scanning %s: %s" % (filename, xception.message))
	scan_errors.insert(0, eo)
	if len(scan_errors) > 100:
		scan_errors = scan_errors[0:100]
	cache.set("backend_scan_errors", scan_errors)
Esempio n. 50
0
	def vote(self, entry_id, event, lock_count):
		if not self.user.lock_to_sid(self.sid, lock_count):
			log.warn("vote", "Could not lock user: listener ID %s voting for entry ID %s, tried to lock for %s events." % (self.user.data['listener_id'], entry_id, lock_count))
			self.append(self.return_name, api.returns.ErrorReturn(0, "Internal server error. (logged)", { "entry_id": entry_id, "try_again": True }))
			return False
		
		vote_id = None
		song = event.get_entry(entry_id)
		if self.user.data['listener_voted_entry']:
			if not event.add_vote_to_entry(entry_id, -1):
				log.warn("vote", "Could not subtract vote from entry: listener ID %s voting for entry ID %s." % (self.user.data['listener_id'], entry_id))
				self.append(self.return_name, api.returns.ErrorReturn(0, "Internal server error. (logged)", { "entry_id": entry_id, "try_again": True }))
				return False
			if not self.user.is_anonymous():
				vote_id = db.c.fetch_var("SELECT vote_id FROM r4_vote_history WHERE user_id = %s AND song_id = %s ORDER BY vote_id DESC LIMIT 1", (self.user.id, song.id))
			
		if not db.c.update("UPDATE r4_listeners SET listener_voted_entry = %s WHERE listener_id = %s", (entry_id, self.user.data['listener_id'])):
			log.warn("vote", "Could not set voted_entry: listener ID %s voting for entry ID %s." % (self.user.data['listener_id'], entry_id))
			self.append(self.return_name, api.returns.ErrorReturn(0, "Internal server error. (logged)", { "entry_id": entry_id, "try_again": True }))
			return False
		self.user.update({ "listener_voted_entry": entry_id })
		
		if not event.add_vote_to_entry(entry_id):
			log.warn("vote", "Could not add vote to entry: listener ID %s voting for entry ID %s." % (self.user.data['listener_id'], entry_id))
			self.append(self.return_name, api.returns.ErrorReturn(0, "Internal server error. (logged)", { "entry_id": entry_id, "try_again": True }))
			return False
		
		if not self.user.is_anonymous():
			if vote_id:
				db.c.update("UPDATE r4_vote_history SET song_id = %s WHERE vote_id = %s", (song.id, vote_id))
			else:
				db.c.update("UPDATE phpbb_users SET radio_totalvotes = radio_totalvotes + 1 WHERE user_id = %s", (self.user.id,))
				self.user.update({ "radio_totalvotes": self.user.data['radio_totalvotes'] + 1 })
		
				timewindow = time.time() - 1209600
				db.c.query("SELECT user_id, COUNT(song_id) AS c FROM rw_vote_history WHERE vote_time > %s GROUP BY user_id HAVING COUNT(song_id) > %s", (timewindow, self.user.data['radio_totalvotes']))
				rank = db.c.rowcount + 1
				db.c.update(
					"INSERT INTO r4_vote_history (elec_id, user_id, song_id, vote_at_rank, vote_at_count) "
					"VALUES (%s, %s, %s, %s, %s)",
					(event.id, self.user.id, song.id, rank, self.user.data['radio_totalvotes']))
		
		return True
Esempio n. 51
0
	def get(self, sid):
		self.success = False
		self.sid = None
		if int(sid) in config.station_ids:
			self.sid = int(sid)
		else:
			return

		# This program must be run on 1 station for 1 instance, which would allow this operation to be safe.
		# Also works if 1 process is serving all stations.  Pinging any instance for any station
		# would break the program here, though.
		if cache.get_station(self.sid, "get_next_socket_timeout") and sid_output[self.sid]:
			log.warn("backend", "Using previous output to prevent flooding.")
			self.write(sid_output[self.sid])
			sid_output[self.sid] = None
			self.success = True
		else:
			try:
				schedule.advance_station(self.sid)
			except (psycopg2.OperationalError, psycopg2.InterfaceError) as e:
				log.warn("backend", e.diag.message_primary)
				db.close()
				db.connect()
				raise
			except psycopg2.extensions.TransactionRollbackError as e:
				log.warn("backend", "Database transaction deadlock.  Re-opening database and setting retry timeout.")
				db.close()
				db.connect()
				raise

			to_send = None
			if not config.get("liquidsoap_annotations"):
				to_send = schedule.get_advancing_file(self.sid)
			else:
				to_send = self._get_annotated(schedule.get_advancing_event(self.sid))
			sid_output[self.sid] = to_send
			self.success = True
			if not cache.get_station(self.sid, "get_next_socket_timeout"):
				self.write(to_send)
Esempio n. 52
0
    def vote(self, entry_id, event, lock_count):
        # Subtract a previous vote from the song's total if there was one
        already_voted = False
        if self.user.is_anonymous():
            log.debug(
                "vote", "Anon already voted: %s" %
                (self.user.id, self.user.data['listener_voted_entry']))
            if self.user.data['listener_voted_entry'] and self.user.data[
                    'listener_voted_entry'] == entry_id:
                raise APIException("already_voted_for_song")
            if self.user.data['listener_voted_entry']:
                already_voted = True
                if not event.add_vote_to_entry(entry_id, -1):
                    log.warn(
                        "vote",
                        "Could not subtract vote from entry: listener ID %s voting for entry ID %s."
                        % (self.user.data['listener_id'], entry_id))
                    raise APIException("internal_error")
        else:
            already_voted = db.c.fetch_row(
                "SELECT entry_id, vote_id, song_id FROM r4_vote_history WHERE user_id = %s AND elec_id = %s",
                (self.user.id, event.id))
            log.debug("vote", "Already voted: %s" % repr(already_voted))
            if already_voted and already_voted['entry_id'] == entry_id:
                raise APIException("already_voted_for_song")
            elif already_voted:
                log.debug(
                    "vote",
                    "Subtracting vote from %s" % already_voted['entry_id'])
                if not event.add_vote_to_entry(already_voted['entry_id'], -1):
                    log.warn(
                        "vote",
                        "Could not subtract vote from entry: listener ID %s voting for entry ID %s."
                        % (self.user.data['listener_id'], entry_id))
                    raise APIException("internal_error")

        # If this is a new vote, we need to check to make sure the listener is not locked.
        if not already_voted and self.user.data[
                'listener_lock'] and self.user.data[
                    'listener_lock_sid'] != self.sid:
            raise APIException(
                "user_locked", "User locked to %s for %s more songs." %
                (config.station_id_friendly[
                    self.user.data['listener_lock_sid']],
                 self.user.data['listener_lock_counter']))
        # Issue the listener lock (will extend a lock if necessary)
        if not self.user.lock_to_sid(self.sid, lock_count):
            log.warn(
                "vote",
                "Could not lock user: listener ID %s voting for entry ID %s, tried to lock for %s events."
                % (self.user.data['listener_id'], entry_id, lock_count))
            raise APIException(
                "internal_error",
                "Internal server error.  User is now locked to station ID %s."
                % self.sid)

        # Make sure the vote is tracked
        track_success = False
        if self.user.is_anonymous():
            if not db.c.update(
                    "UPDATE r4_listeners SET listener_voted_entry = %s WHERE listener_id = %s",
                (entry_id, self.user.data['listener_id'])):
                log.warn(
                    "vote",
                    "Could not set voted_entry: listener ID %s voting for entry ID %s."
                    % (self.user.data['listener_id'], entry_id))
                raise APIException("internal_error")
            self.user.update({"listener_voted_entry": entry_id})
            track_success = True
        else:
            if already_voted:
                db.c.update(
                    "UPDATE r4_vote_history SET song_id = %s, entry_id = %s WHERE vote_id = %s",
                    (event.get_entry(entry_id).id, entry_id,
                     already_voted['vote_id']))
            else:
                time_window = int(time.time()) - 1209600
                vote_count = db.c.fetch_var(
                    "SELECT COUNT(vote_id) FROM r4_vote_history WHERE vote_time > %s AND user_id = %s",
                    (time_window, self.user.id))
                db.c.execute(
                    "SELECT user_id, COUNT(song_id) AS c FROM r4_vote_history WHERE vote_time > %s GROUP BY user_id HAVING COUNT(song_id) > %s",
                    (time_window, vote_count))
                rank = db.c.rowcount + 1
                db.c.update(
                    "INSERT INTO r4_vote_history (elec_id, entry_id, user_id, song_id, vote_at_rank, vote_at_count) "
                    "VALUES (%s, %s, %s, %s, %s, %s)",
                    (event.id, entry_id, self.user.id,
                     event.get_entry(entry_id).id, rank, vote_count))
            track_success = True

            user_vote_cache = cache.get_user(self.user, "vote_history")
            if not user_vote_cache:
                user_vote_cache = []
            while len(user_vote_cache) > 5:
                user_vote_cache.pop(0)
            user_vote_cache.append((event.id, entry_id))
            cache.set_user(self.user, "vote_history", user_vote_cache)

        # Register vote
        if not event.add_vote_to_entry(entry_id):
            log.warn(
                "vote",
                "Could not add vote to entry: listener ID %s voting for entry ID %s."
                % (self.user.data['listener_id'], entry_id))
            raise APIException("internal_error")
Esempio n. 53
0
    def vote(self, entry_id, event, lock_count):
        # Subtract a previous vote from the song's total if there was one
        already_voted = False
        if self.user.is_anonymous():
            # log.debug("vote", "Anon already voted: %s" % (self.user.data['voted_entry'],))
            if (self.user.data["voted_entry"]
                    and self.user.data["voted_entry"] == entry_id):
                # immediately return and a success will be registered
                return True
            if self.user.data["voted_entry"]:
                already_voted = self.user.data["voted_entry"]
        else:
            previous_vote = db.c.fetch_row(
                "SELECT entry_id, vote_id, song_id FROM r4_vote_history WHERE user_id = %s AND elec_id = %s",
                (self.user.id, event.id),
            )
            # log.debug("vote", "Already voted: %s" % repr(already_voted))
            if previous_vote and previous_vote["entry_id"] == entry_id:
                # immediately return and a success will be registered
                return True
            elif previous_vote:
                already_voted = previous_vote["entry_id"]

        db.c.start_transaction()
        try:
            if already_voted:
                if not event.add_vote_to_entry(already_voted, -1):
                    log.warn(
                        "vote",
                        "Could not subtract vote from entry: listener ID %s voting for entry ID %s."
                        % (self.user.data["listener_id"], already_voted),
                    )
                    raise APIException("internal_error")

            # If this is a new vote, we need to check to make sure the listener is not locked.
            if (not already_voted and self.user.data["lock"]
                    and self.user.data["lock_sid"] != self.sid):
                raise APIException(
                    "user_locked",
                    "User locked to %s for %s more song(s)." % (
                        config.station_id_friendly[self.user.data["lock_sid"]],
                        self.user.data["lock_counter"],
                    ),
                )
            # Issue the listener lock (will extend a lock if necessary)
            if not self.user.lock_to_sid(self.sid, lock_count):
                log.warn(
                    "vote",
                    "Could not lock user: listener ID %s voting for entry ID %s, tried to lock for %s events."
                    % (self.user.data["listener_id"], entry_id, lock_count),
                )
                raise APIException(
                    "internal_error",
                    "Internal server error.  User is now locked to station ID %s."
                    % self.sid,
                )

            if self.user.is_anonymous():
                if not db.c.update(
                        "UPDATE r4_listeners SET listener_voted_entry = %s WHERE listener_id = %s",
                    (entry_id, self.user.data["listener_id"]),
                ):
                    log.warn(
                        "vote",
                        "Could not set voted_entry: listener ID %s voting for entry ID %s."
                        % (self.user.data["listener_id"], entry_id),
                    )
                    raise APIException("internal_error")
                self.user.update({"voted_entry": entry_id})
            else:
                if already_voted:
                    db.c.update(
                        "UPDATE r4_vote_history SET song_id = %s, entry_id = %s WHERE user_id = %s and entry_id = %s",
                        (
                            event.get_entry(entry_id).id,
                            entry_id,
                            self.user.id,
                            already_voted,
                        ),
                    )
                else:
                    db.c.update(
                        "INSERT INTO r4_vote_history (elec_id, entry_id, user_id, song_id, sid) "
                        "VALUES (%s, %s, %s, %s, %s)",
                        (
                            event.id,
                            entry_id,
                            self.user.id,
                            event.get_entry(entry_id).id,
                            event.sid,
                        ),
                    )
                    db.c.update(
                        "UPDATE phpbb_users SET radio_inactive = FALSE, radio_last_active = %s WHERE user_id = %s",
                        (timestamp(), self.user.id),
                    )

                    autovoted_entry = event.has_request_by_user(self.user.id)
                    if autovoted_entry:
                        event.add_vote_to_entry(
                            autovoted_entry.data["entry_id"], -1)

                user_vote_cache = cache.get_user(self.user, "vote_history")
                if not user_vote_cache:
                    user_vote_cache = []
                found = False
                for voted in user_vote_cache:
                    if voted[0] == event.id:
                        found = True
                        voted[1] = entry_id
                while len(user_vote_cache) > 5:
                    user_vote_cache.pop(0)
                if not found:
                    user_vote_cache.append([event.id, entry_id])
                cache.set_user(self.user, "vote_history", user_vote_cache)

            # Register vote
            if not event.add_vote_to_entry(entry_id):
                log.warn(
                    "vote",
                    "Could not add vote to entry: listener ID %s voting for entry ID %s."
                    % (self.user.data["listener_id"], entry_id),
                )
                raise APIException("internal_error")
            db.c.commit()
        except:
            db.c.rollback()
            raise

        return True
Esempio n. 54
0
app.template_folder = "templates/"

app.secret_key = config.flask.session_key
if (app.secret_key == "debug" and config.root.debug is not True):
    raise RuntimeError("Secret key not set in a production environment")

app.permanent_session_lifetime = datetime.timedelta(hours=4)

app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True

app.host = config.network.host
app.port = config.network.port

### Init App ###
csrf.init(app)
error.init(app)
before.init(app)
route.init(app)

log.info("Server initialised")

### Debug ###
if (config.root.debug):
    log.warn("Debug mode is enabled")

    app.debug = True
    app.host = "127.0.0.1"
    app.port = 5000
    app.run()