Example #1
0
    def _listen(self, task_id):
        # task_ids start at zero, so we gobble up ports starting at the base port and work up
        port_no = int(config.get("api_base_port")) + task_id

        # Log according to configured directory and port # we're operating on
        log_file = "%s/rw_api_%s.log" % (config.get("api_log_dir"), port_no)
        if config.test_mode and os.path.exists(log_file):
            os.remove(log_file)
        log.init(log_file, config.get("log_level"))
        log.debug("start", "Server booting, port %s." % port_no)
        db.open()
        cache.open()

        for sid in config.station_ids:
            cache.update_local_cache_for_sid(sid)

            # If we're not in developer, remove development-related URLs
        if not config.get("developer_mode"):
            i = 0
            while i < len(request_classes):
                if request_classes[i][0].find("/test/") != -1:
                    request_classes.pop(i)
                    i = i - 1
                i = i + 1

                # Make sure all other errors get handled in an API-friendly way
        request_classes.append((r".*", api.web.Error404Handler))

        # Initialize the help (rather than it scan all URL handlers every time someone hits it)
        api.help.sectionize_requests()

        # Initialize playlist variables
        playlist.prepare_cooldown_algorithm(sid)

        # Fire ze missiles!
        app = tornado.web.Application(
            request_classes,
            debug=(config.test_mode or config.get("developer_mode")),
            template_path=os.path.join(os.path.dirname(__file__), "../templates"),
            static_path=os.path.join(os.path.dirname(__file__), "../static"),
            autoescape=None,
        )
        http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
        http_server.listen(port_no)

        if config.get("api_user") and config.get("api_group"):
            chuser.change_user(config.get("api_user"), config.get("api_group"))

        for request in request_classes:
            log.debug("start", "   Handler: %s" % str(request))
        log.info("start", "API server bootstrapped and ready to go.")
        self.ioloop = tornado.ioloop.IOLoop.instance()
        try:
            self.ioloop.start()
        finally:
            self.ioloop.stop()
            http_server.stop()
            db.close()
            log.info("stop", "Server has been shutdown.")
            log.close()
Example #2
0
def start():
	db.open()
	cache.open()
	if config.test_mode:
		playlist.remove_all_locks(1)

	app = tornado.web.Application([
		(r"/advance/([0-9]+)", AdvanceScheduleRequest),
		(r"/refresh/([0-9]+)", RefreshScheduleRequest)
		], debug=(config.test_mode or config.get("developer_mode")))

	server = tornado.httpserver.HTTPServer(app)
	server.listen(int(config.get("backend_port")), address='127.0.0.1')

	if config.get("backend_user") or config.get("backend_group"):
		chuser.change_user(config.get("backend_user"), config.get("backend_group"))

	pid = os.getpid()
	pidfile = open(config.get("backend_pid_file"), 'w')
	pidfile.write(str(pid))
	pidfile.close()

	schedule.load()

	log.debug("start", "Backend server bootstrapped, port %s, ready to go." % int(config.get("backend_port")))

	for sid in config.station_ids:
		playlist.prepare_cooldown_algorithm(sid)

	try:
		tornado.ioloop.IOLoop.instance().start()
	finally:
		db.close()
Example #3
0
    def _listen(self, task_id):
        # task_ids start at zero, so we gobble up ports starting at the base port and work up
        port_no = int(config.get("api_base_port")) + task_id

        # Log according to configured directory and port # we're operating on
        log_file = "%s/rw_api_%s.log" % (config.get("api_log_dir"), port_no)
        if config.test_mode and os.path.exists(log_file):
            os.remove(log_file)
        log.init(log_file, config.get("log_level"))
        log.debug("start", "Server booting, port %s." % port_no)
        db.open()
        cache.open()

        for sid in config.station_ids:
            cache.update_local_cache_for_sid(sid)

        # If we're not in developer, remove development-related URLs
        if not config.get("developer_mode"):
            i = 0
            while (i < len(request_classes)):
                if request_classes[i][0].find("/test/") != -1:
                    request_classes.pop(i)
                    i = i - 1
                i = i + 1

        # Make sure all other errors get handled in an API-friendly way
        request_classes.append((r".*", api.web.Error404Handler))

        # Initialize the help (rather than it scan all URL handlers every time someone hits it)
        api.help.sectionize_requests()

        # Initialize playlist variables
        playlist.prepare_cooldown_algorithm(sid)

        # Fire ze missiles!
        app = tornado.web.Application(
            request_classes,
            debug=(config.test_mode or config.get("developer_mode")),
            template_path=os.path.join(os.path.dirname(__file__),
                                       "../templates"),
            static_path=os.path.join(os.path.dirname(__file__), "../static"),
            autoescape=None)
        http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
        http_server.listen(port_no)

        if config.get("api_user") and config.get("api_group"):
            chuser.change_user(config.get("api_user"), config.get("api_group"))

        for request in request_classes:
            log.debug("start", "   Handler: %s" % str(request))
        log.info("start", "API server bootstrapped and ready to go.")
        self.ioloop = tornado.ioloop.IOLoop.instance()
        try:
            self.ioloop.start()
        finally:
            self.ioloop.stop()
            http_server.stop()
            db.close()
            log.info("stop", "Server has been shutdown.")
            log.close()
Example #4
0
def advance_station(sid):
	playlist.prepare_cooldown_algorithm(sid)
	playlist.clear_updated_albums(sid)
	
	# TODO LATER: Make sure we can "pause" the station here to handle DJ interruptions
	# Requires controlling the streamer itself to some degree and will take more
	# work on the API than the back-end.

	current[sid].finish()
	
	last_song = current[sid].get_song()
	if last_song:
		history[sid].insert(0, last_song)
		db.c.update("INSERT INTO r4_song_history (sid, song_id) VALUES (%s, %s)", (sid, last_song.id))
		
	while len(history[sid]) > 3:
		history[sid].pop()
		
	# TODO: IMPORTANT: Block currently playing song/album from being selected so there's no "hole"
	
	integrate_new_events(sid)
	sort_next(sid)
	if len(next[sid]) == 0:
		_create_elections(sid)
	current[sid] = next[sid].pop(0)
	current[sid].start_event()
Example #5
0
def post_process(sid):
	try:
		db.c.start_transaction()
		start_time = time.time()
		playlist.prepare_cooldown_algorithm(sid)
		rainwave.playlist_objects.album.clear_updated_albums(sid)
		log.debug("post", "Playlist prepare time: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		current[sid].finish()
		log.debug("post", "Current finish time: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		last_song = current[sid].get_song()
		if last_song:
			db.c.update("INSERT INTO r4_song_history (sid, song_id) VALUES (%s, %s)", (sid, last_song.id))
		log.debug("post", "Last song insertion time: %s" % (time.time() - start_time,))

		start_time = time.time()
		history[sid].insert(0, current[sid])
		while len(history[sid]) > 5:
			history[sid].pop()
		log.debug("post", "History management: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		current[sid] = upnext[sid].pop(0)
		current[sid].start_event()
		log.debug("advance", "Current management: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		playlist.warm_cooled_songs(sid)
		playlist.warm_cooled_albums(sid)
		log.debug("advance", "Cooldown warming: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		_add_listener_count_record(sid)
		_trim(sid)
		user.trim_listeners(sid)
		cache.update_user_rating_acl(sid, history[sid][0].get_song().id)
		user.unlock_listeners(sid)
		log.debug("advance", "User management and trimming: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		# reduce song blocks has to come first, otherwise it wll reduce blocks generated by _create_elections
		playlist.reduce_song_blocks(sid)
		# update_cache updates both the line and expiry times
		# this is expensive and not necessary to do more than once
		# DO THIS AFTER EVERYTHING ELSE, RIGHT BEFORE NEXT MANAGEMENT, OR PEOPLE'S REQUESTS SLIP THROUGH THE CRACKS
		request.update_cache(sid)
		# add to the event list / update start times for events
		manage_next(sid)
		log.debug("advance", "Request and upnext management: %.6f" % (time.time() - start_time,))

		update_memcache(sid)
		sync_to_front.sync_frontend_all(sid)
		db.c.commit()
	except:
		db.c.rollback()
		raise
Example #6
0
    def _listen(self, sid):
        pid = os.getpid()
        pid_file = open(
            "%s/backend_%s.pid" % (
                config.get_directory("pid_dir"),
                config.station_id_friendly[sid].lower(),
            ),
            "w",
        )
        pid_file.write(str(pid))
        pid_file.close()

        db.connect()
        cache.connect()
        zeromq.init_pub()
        log.init(
            "%s/rw_%s.log" % (
                config.get_directory("log_dir"),
                config.station_id_friendly[sid].lower(),
            ),
            config.get("log_level"),
        )
        memory_trace.setup(config.station_id_friendly[sid].lower())

        if config.test_mode:
            playlist.remove_all_locks(sid)

        # (r"/refresh/([0-9]+)", RefreshScheduleRequest)
        app = tornado.web.Application(
            [
                (r"/advance/([0-9]+)", AdvanceScheduleRequest),
            ],
            debug=(config.test_mode or config.get("developer_mode")),
        )

        port = int(config.get("backend_port")) + sid
        server = tornado.httpserver.HTTPServer(app)
        server.listen(port, address="127.0.0.1")

        for station_id in config.station_ids:
            playlist.prepare_cooldown_algorithm(station_id)
        schedule.load()
        log.debug(
            "start",
            "Backend server started, station %s port %s, ready to go." %
            (config.station_id_friendly[sid], port),
        )

        ioloop = tornado.ioloop.IOLoop.instance()
        try:
            ioloop.start()
        finally:
            ioloop.stop()
            server.stop()
            db.close()
            log.info("stop", "Backend has been shutdown.")
            log.close()
Example #7
0
def advance_station(sid):
    # This has been necessary during development and debugging.
    # Do we want to add an "if config.get("developer_mode")" here so it crashes in production and we hunt down the bug?
    # next[sid] = filter(None, next[sid])

    start_time = time.time()
    playlist.prepare_cooldown_algorithm(sid)
    playlist.clear_updated_albums(sid)
    log.debug("advance",
              "Playlist prepare time: %.6f" % (time.time() - start_time, ))

    start_time = time.time()
    current[sid].finish()
    log.debug("advance",
              "Current finish time: %.6f" % (time.time() - start_time, ))

    start_time = time.time()
    last_song = current[sid].get_song()
    if last_song:
        db.c.update(
            "INSERT INTO r4_song_history (sid, song_id) VALUES (%s, %s)",
            (sid, last_song.id))
    log.debug("advance",
              "Last song insertion time: %s" % (time.time() - start_time, ))

    start_time = time.time()
    history[sid].insert(0, current[sid])
    while len(history[sid]) > 5:
        history[sid].pop()
    log.debug("advance",
              "History management: %.6f" % (time.time() - start_time, ))

    start_time = time.time()
    integrate_new_events(sid)
    # If we need some emergency elections here
    if len(next[sid]) == 0:
        next[sid].append(_create_election(sid))
    else:
        sort_next(sid)
    log.debug("advance",
              "Next event management: %.6f" % (time.time() - start_time, ))

    start_time = time.time()
    current[sid] = next[sid].pop(0)
    current[sid].start_event()
    log.debug("advance",
              "Current management: %.6f" % (time.time() - start_time, ))

    tornado.ioloop.IOLoop.instance().add_timeout(
        datetime.timedelta(milliseconds=100), lambda: post_process(sid))
Example #8
0
def advance_station(sid):
	# This has been necessary during development and debugging.
	# Do we want to add an "if config.get("developer_mode")" here so it crashes in production and we hunt down the bug?
	# next[sid] = filter(None, next[sid])

	start_time = time.time()
	playlist.prepare_cooldown_algorithm(sid)
	playlist.clear_updated_albums(sid)
	log.debug("advance", "Playlist prepare time: %.6f" % (time.time() - start_time,))

	start_time = time.time()
	current[sid].finish()
	log.debug("advance", "Current finish time: %.6f" % (time.time() - start_time,))

	start_time = time.time()
	last_song = current[sid].get_song()
	if last_song:
		db.c.update("INSERT INTO r4_song_history (sid, song_id) VALUES (%s, %s)", (sid, last_song.id))
	log.debug("advance", "Last song insertion time: %s" % (time.time() - start_time,))

	start_time = time.time()
	history[sid].insert(0, current[sid])
	while len(history[sid]) > 5:
		history[sid].pop()
	log.debug("advance", "History management: %.6f" % (time.time() - start_time,))

	start_time = time.time()
	integrate_new_events(sid)
	# If we need some emergency elections here
	if len(next[sid]) == 0:
		next[sid].append(_create_election(sid))
	else:
		sort_next(sid)
	log.debug("advance", "Next event management: %.6f" % (time.time() - start_time,))

	start_time = time.time()
	current[sid] = next[sid].pop(0)
	current[sid].start_event()
	log.debug("advance", "Current management: %.6f" % (time.time() - start_time,))

	tornado.ioloop.IOLoop.instance().add_timeout(datetime.timedelta(milliseconds=100), lambda: post_process(sid))
Example #9
0
	def _listen(self, sid):
		pid = os.getpid()
		pid_file = open("%s/backend_%s.pid" % (config.get_directory("pid_dir"), config.station_id_friendly[sid].lower()), 'w')
		pid_file.write(str(pid))
		pid_file.close()

		db.connect()
		cache.connect()
		zeromq.init_pub()
		log.init("%s/rw_%s.log" % (config.get_directory("log_dir"), config.station_id_friendly[sid].lower()), config.get("log_level"))
		memory_trace.setup(config.station_id_friendly[sid].lower())

		if config.test_mode:
			playlist.remove_all_locks(sid)

		# (r"/refresh/([0-9]+)", RefreshScheduleRequest)
		app = tornado.web.Application([
			(r"/advance/([0-9]+)", AdvanceScheduleRequest),
			], debug=(config.test_mode or config.get("developer_mode")))

		port = int(config.get("backend_port")) + sid
		server = tornado.httpserver.HTTPServer(app)
		server.listen(port, address='127.0.0.1')

		for station_id in config.station_ids:
			playlist.prepare_cooldown_algorithm(station_id)
		schedule.load()
		log.debug("start", "Backend server started, station %s port %s, ready to go." % (config.station_id_friendly[sid], port))

		ioloop = tornado.ioloop.IOLoop.instance()
		try:
			ioloop.start()
		finally:
			ioloop.stop()
			server.stop()
			db.close()
			log.info("stop", "Backend has been shutdown.")
			log.close()
Example #10
0
def start():
    db.open()
    cache.open()
    if config.test_mode:
        playlist.remove_all_locks(1)

    app = tornado.web.Application(
        [(r"/advance/([0-9]+)", AdvanceScheduleRequest),
         (r"/refresh/([0-9]+)", RefreshScheduleRequest)],
        debug=(config.test_mode or config.get("developer_mode")))

    server = tornado.httpserver.HTTPServer(app)
    server.listen(int(config.get("backend_port")), address='127.0.0.1')

    if config.get("backend_user") or config.get("backend_group"):
        chuser.change_user(config.get("backend_user"),
                           config.get("backend_group"))

    pid = os.getpid()
    pidfile = open(config.get("backend_pid_file"), 'w')
    pidfile.write(str(pid))
    pidfile.close()

    schedule.load()

    log.debug(
        "start", "Backend server bootstrapped, port %s, ready to go." %
        int(config.get("backend_port")))

    for sid in config.station_ids:
        playlist.prepare_cooldown_algorithm(sid)

    try:
        tornado.ioloop.IOLoop.instance().start()
    finally:
        db.close()
Example #11
0
	def _listen(self, task_id):
		zeromq.init_pub()
		zeromq.init_sub()

		import api_requests.sync
		api_requests.sync.init()

		# task_ids start at zero, so we gobble up ports starting at the base port and work up
		port_no = int(config.get("api_base_port")) + task_id

		pid = os.getpid()
		pid_file = open("%s/api_%s.pid" % (config.get_directory("pid_dir"), port_no), 'w')
		pid_file.write(str(pid))
		pid_file.close()

		# Log according to configured directory and port # we're operating on
		log_file = "%s/rw_api_%s.log" % (config.get_directory("log_dir"), port_no)
		if config.test_mode and os.path.exists(log_file):
			os.remove(log_file)
		log.init(log_file, config.get("log_level"))
		log.debug("start", "Server booting, port %s." % port_no)
		db.connect()
		cache.connect()
		memory_trace.setup(port_no)

		api.locale.load_translations()
		api.locale.compile_static_language_files()

		if config.get("web_developer_mode"):
			for station_id in config.station_ids:
				playlist.prepare_cooldown_algorithm(station_id)
			# automatically loads every station ID and fills things in if there's no data
			schedule.load()
			for station_id in config.station_ids:
				schedule.update_memcache(station_id)
				rainwave.request.update_line(station_id)
				rainwave.request.update_expire_times()
				cache.set_station(station_id, "backend_ok", True)
				cache.set_station(station_id, "backend_message", "OK")
				cache.set_station(station_id, "get_next_socket_timeout", False)

		for sid in config.station_ids:
			cache.update_local_cache_for_sid(sid)
			playlist.prepare_cooldown_algorithm(sid)
			playlist.update_num_songs()

		# If we're not in developer, remove development-related URLs
		if not config.get("developer_mode"):
			i = 0
			while (i < len(request_classes)):
				if request_classes[i][0].find("/test/") != -1:
					request_classes.pop(i)
					i = i - 1
				i = i + 1

		# Make sure all other errors get handled in an API-friendly way
		request_classes.append((r"/api/.*", api.web.Error404Handler))
		request_classes.append((r"/api4/.*", api.web.Error404Handler))
		request_classes.append((r".*", api.web.HTMLError404Handler))

		# Initialize the help (rather than it scan all URL handlers every time someone hits it)
		api.help.sectionize_requests()

		# Fire ze missiles!
		global app
		app = tornado.web.Application(request_classes,
			debug=(config.test_mode or config.get("developer_mode")),
			template_path=os.path.join(os.path.dirname(__file__), "../templates"),
			static_path=os.path.join(os.path.dirname(__file__), "../static"),
			autoescape=None)
		http_server = tornado.httpserver.HTTPServer(app, xheaders = True)
		http_server.listen(port_no)

		if config.get("api_user") and config.get("api_group"):
			chuser.change_user(config.get("api_user"), config.get("api_group"))

		for request in request_classes:
			log.debug("start", "   Handler: %s" % str(request))
		log.info("start", "API server on port %s ready to go." % port_no)
		self.ioloop = tornado.ioloop.IOLoop.instance()

		try:
			self.ioloop.start()
		finally:
			self.ioloop.stop()
			http_server.stop()
			db.close()
			log.info("stop", "Server has been shutdown.")
			log.close()
Example #12
0
def post_process(sid):
	try:
		db.c.start_transaction()
		start_time = time.time()
		playlist.prepare_cooldown_algorithm(sid)
		rainwave.playlist_objects.album.clear_updated_albums(sid)
		log.debug("post", "Playlist prepare time: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		current[sid].finish()
		log.debug("post", "Current finish time: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		last_song = current[sid].get_song()
		if last_song:
			db.c.update("INSERT INTO r4_song_history (sid, song_id) VALUES (%s, %s)", (sid, last_song.id))
		log.debug("post", "Last song insertion time: %s" % (time.time() - start_time,))

		start_time = time.time()
		history[sid].insert(0, current[sid])
		while len(history[sid]) > 5:
			history[sid].pop()
		log.debug("post", "History management: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		current[sid] = upnext[sid].pop(0)
		current[sid].start_event()
		log.debug("advance", "Current management: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		playlist.warm_cooled_songs(sid)
		playlist.warm_cooled_albums(sid)
		log.debug("advance", "Cooldown warming: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		_add_listener_count_record(sid)
		_trim(sid)
		user.trim_listeners(sid)
		cache.update_user_rating_acl(sid, history[sid][0].get_song().id)
		user.unlock_listeners(sid)
		log.debug("advance", "User management and trimming: %.6f" % (time.time() - start_time,))

		start_time = time.time()
		# reduce song blocks has to come first, otherwise it wll reduce blocks generated by _create_elections
		playlist.reduce_song_blocks(sid)
		# update_cache updates both the line and expiry times
		# this is expensive and not necessary to do more than once
		# DO THIS AFTER EVERYTHING ELSE, RIGHT BEFORE NEXT MANAGEMENT, OR PEOPLE'S REQUESTS SLIP THROUGH THE CRACKS
		request.update_line(sid)
		# add to the event list / update start times for events
		manage_next(sid)
		# update expire times AFTER manage_next, so people who aren't in line anymore don't see expiry times
		request.update_expire_times()
		log.debug("advance", "Request and upnext management: %.6f" % (time.time() - start_time,))

		update_memcache(sid)
		sync_to_front.sync_frontend_all(sid)
		db.c.commit()
	except:
		db.c.rollback()
		raise
Example #13
0
    def _listen(self, task_id):
        zeromq.init_pub()
        zeromq.init_sub()

        import api_requests.sync

        api_requests.sync.init()

        # task_ids start at zero, so we gobble up ports starting at the base port and work up
        port_no = int(config.get("api_base_port")) + task_id

        # Log according to configured directory and port # we're operating on
        log_file = "%s/rw_api_%s.log" % (config.get_directory("log_dir"),
                                         port_no)
        log.init(log_file, config.get("log_level"))
        log.debug("start", "Server booting, port %s." % port_no)
        db.connect(auto_retry=False, retry_only_this_time=True)
        cache.connect()
        memory_trace.setup(port_no)

        api.locale.load_translations()
        api.locale.compile_static_language_files()

        if config.get("developer_mode"):
            for station_id in config.station_ids:
                playlist.prepare_cooldown_algorithm(station_id)
            # automatically loads every station ID and fills things in if there's no data
            schedule.load()
            for station_id in config.station_ids:
                schedule.update_memcache(station_id)
                rainwave.request.update_line(station_id)
                rainwave.request.update_expire_times()
                cache.set_station(station_id, "backend_ok", True)
                cache.set_station(station_id, "backend_message", "OK")
                cache.set_station(station_id, "get_next_socket_timeout", False)

        for sid in config.station_ids:
            cache.update_local_cache_for_sid(sid)
            playlist.prepare_cooldown_algorithm(sid)
            playlist.update_num_songs()

        # If we're not in developer, remove development-related URLs
        if not config.get("developer_mode"):
            i = 0
            while i < len(request_classes):
                if request_classes[i][0].find("/test/") != -1:
                    request_classes.pop(i)
                    i = i - 1
                i = i + 1

        # Make sure all other errors get handled in an API-friendly way
        request_classes.append((r"/api/.*", api.web.Error404Handler))
        request_classes.append((r"/api4/.*", api.web.Error404Handler))
        request_classes.append((r".*", api.web.HTMLError404Handler))

        # Initialize the help (rather than it scan all URL handlers every time someone hits it)
        api.help.sectionize_requests()

        # Fire ze missiles!
        global app
        debug = config.get("developer_mode")
        app = tornado.web.Application(
            request_classes,
            debug=debug,
            template_path=os.path.join(os.path.dirname(__file__),
                                       "../templates"),
            static_path=os.path.join(os.path.dirname(__file__), "../static"),
            autoescape=None,
            autoreload=debug,
            serve_traceback=debug,
        )
        http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
        http_server.listen(port_no)

        for request in request_classes:
            log.debug("start", "   Handler: %s" % str(request))
        log.info("start", "API server on port %s ready to go." % port_no)
        self.ioloop = tornado.ioloop.IOLoop.instance()

        db_keepalive = tornado.ioloop.PeriodicCallback(db.connection_keepalive,
                                                       10000)
        db_keepalive.start()

        try:
            self.ioloop.start()
        finally:
            self.ioloop.stop()
            http_server.stop()
            db.close()
            log.info("stop", "Server has been shutdown.")
            log.close()
Example #14
0
def post_process(sid):
    try:
        db.c.start_transaction()
        start_time = timestamp()
        playlist.prepare_cooldown_algorithm(sid)
        rainwave.playlist_objects.album.clear_updated_albums(sid)
        log.debug("post", "Playlist prepare time: %.6f" % (timestamp() - start_time,))

        start_time = timestamp()
        current[sid].finish()
        for sched_id in db.c.fetch_list(
            "SELECT sched_id FROM r4_schedule WHERE sched_end < %s AND sched_used = FALSE",
            (timestamp(),),
        ):
            t_evt = events.event.BaseProducer.load_producer_by_id(sched_id)
            t_evt.finish()
        log.debug("post", "Current finish time: %.6f" % (timestamp() - start_time,))

        start_time = timestamp()
        last_song = current[sid].get_song()
        if last_song:
            db.c.update(
                "INSERT INTO r4_song_history (sid, song_id) VALUES (%s, %s)",
                (sid, last_song.id),
            )
        log.debug("post", "Last song insertion time: %s" % (timestamp() - start_time,))

        start_time = timestamp()
        history[sid].insert(0, current[sid])
        while len(history[sid]) > 5:
            history[sid].pop()
        log.debug("post", "History management: %.6f" % (timestamp() - start_time,))

        start_time = timestamp()
        current[sid] = upnext[sid].pop(0)
        current[sid].start_event()
        log.debug("advance", "Current management: %.6f" % (timestamp() - start_time,))

        start_time = timestamp()
        playlist.warm_cooled_songs(sid)
        playlist.warm_cooled_albums(sid)
        log.debug("advance", "Cooldown warming: %.6f" % (timestamp() - start_time,))

        start_time = timestamp()
        _trim(sid)
        user.trim_listeners(sid)
        cache.update_user_rating_acl(sid, history[sid][0].get_song().id)
        user.unlock_listeners(sid)
        db.c.update(
            "UPDATE r4_listeners SET listener_voted_entry = NULL WHERE sid = %s", (sid,)
        )
        log.debug(
            "advance",
            "User management and trimming: %.6f" % (timestamp() - start_time,),
        )

        start_time = timestamp()
        # reduce song blocks has to come first, otherwise it wll reduce blocks generated by _create_elections
        playlist.reduce_song_blocks(sid)
        # update_cache updates both the line and expiry times
        # this is expensive and must be done before and after every request is filled
        # DO THIS AFTER EVERYTHING ELSE, RIGHT BEFORE NEXT MANAGEMENT, OR PEOPLE'S REQUESTS SLIP THROUGH THE CRACKS
        request.update_line(sid)
        # add to the event list / update start times for events
        manage_next(sid)
        # update expire times AFTER manage_next, so people who aren't in line anymore don't see expiry times
        request.update_expire_times()
        log.debug(
            "advance",
            "Request and upnext management: %.6f" % (timestamp() - start_time,),
        )

        update_memcache(sid)

        sync_to_front.sync_frontend_all(sid)
        db.c.commit()
    except:
        db.c.rollback()
        raise

    if (
        current[sid]
        and config.has_station(sid, "tunein_partner_key")
        and config.get_station(sid, "tunein_partner_key")
    ):
        ti_song = current[sid].get_song()
        if ti_song:
            ti_title = ti_song.data["title"]
            ti_album = ti_song.albums[0].data["name"]
            ti_artist = ", ".join([a.data["name"] for a in ti_song.artists])

            params = {
                "id": config.get_station(sid, "tunein_id"),
                "title": ti_title,
                "artist": ti_artist,
                "album": ti_album,
            }

            try:
                req = requests.Request(
                    "GET", "http://air.radiotime.com/Playing.ashx", params=params
                )
                p = req.prepare()
                # Must be done here rather than in params because of odd strings TuneIn creates
                p.url += "&partnerId=%s" % config.get_station(sid, "tunein_partner_id")
                p.url += "&partnerKey=%s" % config.get_station(
                    sid, "tunein_partner_key"
                )
                s = requests.Session()
                resp = s.send(p, timeout=3)
                log.debug(
                    "advance", "TuneIn updated (%s): %s" % (resp.status_code, resp.text)
                )
            except Exception as e:
                log.exception("advance", "Could not update TuneIn.", e)
Example #15
0
def post_process(sid):
    try:
        db.c.start_transaction()
        start_time = timestamp()
        playlist.prepare_cooldown_algorithm(sid)
        rainwave.playlist_objects.album.clear_updated_albums(sid)
        log.debug("post", "Playlist prepare time: %.6f" % (timestamp() - start_time,))

        start_time = timestamp()
        current[sid].finish()
        for sched_id in db.c.fetch_list(
            "SELECT sched_id FROM r4_schedule WHERE sched_end < %s AND sched_used = FALSE", (timestamp(),)
        ):
            t_evt = events.event.BaseProducer.load_producer_by_id(sched_id)
            t_evt.finish()
        log.debug("post", "Current finish time: %.6f" % (timestamp() - start_time,))

        start_time = timestamp()
        last_song = current[sid].get_song()
        if last_song:
            db.c.update("INSERT INTO r4_song_history (sid, song_id) VALUES (%s, %s)", (sid, last_song.id))
        log.debug("post", "Last song insertion time: %s" % (timestamp() - start_time,))

        start_time = timestamp()
        history[sid].insert(0, current[sid])
        while len(history[sid]) > 5:
            history[sid].pop()
        log.debug("post", "History management: %.6f" % (timestamp() - start_time,))

        start_time = timestamp()
        current[sid] = upnext[sid].pop(0)
        current[sid].start_event()
        log.debug("advance", "Current management: %.6f" % (timestamp() - start_time,))

        start_time = timestamp()
        playlist.warm_cooled_songs(sid)
        playlist.warm_cooled_albums(sid)
        log.debug("advance", "Cooldown warming: %.6f" % (timestamp() - start_time,))

        start_time = timestamp()
        _add_listener_count_record(sid)
        _trim(sid)
        user.trim_listeners(sid)
        cache.update_user_rating_acl(sid, history[sid][0].get_song().id)
        user.unlock_listeners(sid)
        db.c.update("UPDATE r4_listeners SET listener_voted_entry = NULL WHERE sid = %s", (sid,))
        log.debug("advance", "User management and trimming: %.6f" % (timestamp() - start_time,))

        start_time = timestamp()
        # reduce song blocks has to come first, otherwise it wll reduce blocks generated by _create_elections
        playlist.reduce_song_blocks(sid)
        # update_cache updates both the line and expiry times
        # this is expensive and must be done before and after every request is filled
        # DO THIS AFTER EVERYTHING ELSE, RIGHT BEFORE NEXT MANAGEMENT, OR PEOPLE'S REQUESTS SLIP THROUGH THE CRACKS
        request.update_line(sid)
        # add to the event list / update start times for events
        manage_next(sid)
        # update expire times AFTER manage_next, so people who aren't in line anymore don't see expiry times
        request.update_expire_times()
        log.debug("advance", "Request and upnext management: %.6f" % (timestamp() - start_time,))

        update_memcache(sid)

        sync_to_front.sync_frontend_all(sid)
        db.c.commit()
    except:
        db.c.rollback()
        raise

    if current[sid] and config.has_station(sid, "tunein_partner_key") and config.get_station(sid, "tunein_partner_key"):
        ti_song = current[sid].get_song()
        if ti_song:
            ti_title = ti_song.data["title"]
            ti_album = ti_song.albums[0].data["name"]
            ti_artist = ", ".join([a.data["name"] for a in ti_song.artists])

            params = {
                "id": config.get_station(sid, "tunein_id"),
                "title": ti_title,
                "artist": ti_artist,
                "album": ti_album,
            }

            try:
                req = requests.Request("GET", "http://air.radiotime.com/Playing.ashx", params=params)
                p = req.prepare()
                # Must be done here rather than in params because of odd strings TuneIn creates
                p.url += "&partnerId=%s" % config.get_station(sid, "tunein_partner_id")
                p.url += "&partnerKey=%s" % config.get_station(sid, "tunein_partner_key")
                s = requests.Session()
                resp = s.send(p, timeout=3)
                log.debug("advance", "TuneIn updated (%s): %s" % (resp.status_code, resp.text))
            except Exception as e:
                log.exception("advance", "Could not update TuneIn.", e)
Example #16
0
def advance_station(sid):
	playlist.prepare_cooldown_algorithm(sid)
Example #17
0
    def _listen(self, task_id):
        import api_requests.sync
        api_requests.sync.init()

        # task_ids start at zero, so we gobble up ports starting at the base port and work up
        port_no = int(config.get("api_base_port")) + task_id

        pid = os.getpid()
        pid_file = open(
            "%s/api_%s.pid" % (config.get_directory("pid_dir"), port_no), 'w')
        pid_file.write(str(pid))
        pid_file.close()

        # Log according to configured directory and port # we're operating on
        log_file = "%s/rw_api_%s.log" % (config.get_directory("log_dir"),
                                         port_no)
        if config.test_mode and os.path.exists(log_file):
            os.remove(log_file)
        log.init(log_file, config.get("log_level"))
        log.debug("start", "Server booting, port %s." % port_no)
        db.connect()
        cache.connect()
        memory_trace.setup(port_no)

        if config.get("web_developer_mode"):
            for station_id in config.station_ids:
                playlist.prepare_cooldown_algorithm(station_id)
            # automatically loads every station ID and fills things in if there's no data
            schedule.load()
            for station_id in config.station_ids:
                schedule.update_memcache(station_id)
                rainwave.request.update_line(station_id)
                rainwave.request.update_expire_times()
                cache.set_station(station_id, "backend_ok", True)
                cache.set_station(station_id, "backend_message", "OK")
                cache.set_station(station_id, "get_next_socket_timeout", False)

        for sid in config.station_ids:
            cache.update_local_cache_for_sid(sid)
            playlist.prepare_cooldown_algorithm(sid)
            playlist.update_num_songs()

        # If we're not in developer, remove development-related URLs
        if not config.get("developer_mode"):
            i = 0
            while (i < len(request_classes)):
                if request_classes[i][0].find("/test/") != -1:
                    request_classes.pop(i)
                    i = i - 1
                i = i + 1

        # Make sure all other errors get handled in an API-friendly way
        request_classes.append((r"/api/.*", api.web.Error404Handler))
        request_classes.append((r"/api4/.*", api.web.Error404Handler))
        request_classes.append((r".*", api.web.HTMLError404Handler))

        # Initialize the help (rather than it scan all URL handlers every time someone hits it)
        api.help.sectionize_requests()

        # Fire ze missiles!
        app = tornado.web.Application(
            request_classes,
            debug=(config.test_mode or config.get("developer_mode")),
            template_path=os.path.join(os.path.dirname(__file__),
                                       "../templates"),
            static_path=os.path.join(os.path.dirname(__file__), "../static"),
            autoescape=None)
        http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
        http_server.listen(port_no)

        if config.get("api_user") and config.get("api_group"):
            chuser.change_user(config.get("api_user"), config.get("api_group"))

        if task_id == 0:
            buildtools.bake_css()
            buildtools.bake_js()
            buildtools.bake_beta_js()

        for request in request_classes:
            log.debug("start", "   Handler: %s" % str(request))
        log.info("start", "API server on port %s ready to go." % port_no)
        self.ioloop = tornado.ioloop.IOLoop.instance()

        try:
            self.ioloop.start()
        finally:
            self.ioloop.stop()
            http_server.stop()
            db.close()
            log.info("stop", "Server has been shutdown.")
            log.close()