示例#1
0
	def _handle_event(self, event):
		try:
			if hasattr(event, "src_path") and event.src_path and check_file_is_in_directory(event.src_path, self.root_directory):
				if _is_bad_extension(event.src_path):
					pass
				elif not os.path.isdir(event.src_path):
					log.debug("scan_event", "%s src_path for file %s" % (event.event_type, event.src_path))
					if _is_image(event.src_path) and (event.event_type == 'deleted' or event.event_type == 'moved'):
						pass
					else:
						self._handle_file(event.src_path)
				else:
					log.debug("scan_event", "%s src_path for dir %s" % (event.event_type, event.src_path))
					self._handle_directory(event.src_path)

			if hasattr(event, "dest_path") and event.dest_path and check_file_is_in_directory(event.dest_path, self.root_directory):
				if _is_bad_extension(event.dest_path):
					pass
				elif not os.path.isdir(event.dest_path):
					log.debug("scan_event", "%s dest_path for file %s" % (event.event_type, event.dest_path))
					if _is_image(event.dest_path) and (event.event_type == 'deleted'):
						pass
					else:
						self._handle_file(event.dest_path)
				else:
					log.debug("scan_event", "%s dest_path for dir %s" % (event.event_type, event.dest_path))
					self._handle_directory(event.dest_path)
		except Exception as xception:
			_add_scan_error(self.root_directory, xception)
			log.critical("scan_event", "Exception occurred - reconnecting to the database just in case.")
			db.close()
			db.connect()
示例#2
0
	def write_error(self, status_code, **kwargs):
		if self._output_array:
			self._output = []
		else:
			self._output = {}
		if kwargs.has_key("exc_info"):
			exc = kwargs['exc_info'][1]

			# Restart DB on a connection error if that's what we're handling
			if isinstance(exc, (psycopg2.OperationalError, psycopg2.InterfaceError)):
				try:
					db.close()
					db.connect()
					self.append("error", { "code": 500, "tl_key": "db_error_retry", "text": self.locale.translate("db_error_retry") })
				except:
					self.append("error", { "code": 500, "tl_key": "db_error_permanent", "text": self.locale.translate("db_error_permanent") })
			elif isinstance(exc, APIException):
				exc.localize(self.locale)
				self.append(self.return_name, exc.jsonable())
			elif isinstance(exc, SongNonExistent):
				self.append("error", { "code": status_code, "tl_key": "song_does_not_exist", "text": self.locale.translate("song_does_not_exist") })
			else:
				self.append("error", { "code": status_code, "tl_key": "internal_error", "text": repr(exc) })
				self.append("traceback", { "traceback": traceback.format_exception(kwargs['exc_info'][0], kwargs['exc_info'][1], kwargs['exc_info'][2]) })
		else:
			self.append("error", { "tl_key": "internal_error", "text": self.locale.translate("internal_error") } )
		self.finish()
示例#3
0
def _html_write_error(self, status_code, **kwargs):
	if kwargs.has_key("exc_info"):
		exc = kwargs['exc_info'][1]

		# Restart DB on a connection error if that's what we're handling
		if isinstance(exc, (psycopg2.OperationalError, psycopg2.InterfaceError)):
			try:
				db.close()
				db.connect()
				self.append("error", { "code": 500, "tl_key": "db_error_retry", "text": self.locale.translate("db_error_retry") })
			except:
				self.append("error", { "code": 500, "tl_key": "db_error_permanent", "text": self.locale.translate("db_error_permanent") })
		elif isinstance(exc, APIException):
			if not isinstance(self.locale, locale.RainwaveLocale):
				exc.localize(locale.RainwaveLocale.get("en_CA"))
			else:
				exc.localize(self.locale)
		if (isinstance(exc, APIException) or isinstance(exc, tornado.web.HTTPError)) and exc.reason:
			self.write(self.render_string("basic_header.html", title="%s - %s" % (status_code, exc.reason)))
		else:
			self.write(self.render_string("basic_header.html", title="HTTP %s - %s" % (status_code, tornado.httputil.responses.get(status_code, 'Unknown'))))
		if status_code == 500 or config.get("developer_mode"):
			self.write("<p>")
			self.write(self.locale.translate("unknown_error_message"))
			self.write("</p><p>")
			self.write(self.locale.translate("debug_information"))
			self.write("</p><div class='json'>")
			for line in traceback.format_exception(kwargs['exc_info'][0], kwargs['exc_info'][1], kwargs['exc_info'][2]):
				self.write(line)
			self.write("</div>")
	self.finish()
示例#4
0
def full_music_scan(full_reset):
    _common_init()
    db.connect()
    cache.connect()
    db.c.start_transaction()

    global immediate_art
    immediate_art = False

    try:
        if full_reset:
            db.c.update("UPDATE r4_songs SET song_file_mtime = 0")
        db.c.update("UPDATE r4_songs SET song_scanned = FALSE")

        _scan_all_directories()

        # This procedure is slow but steady and easy to use.
        dead_songs = db.c.fetch_list(
            "SELECT song_id FROM r4_songs WHERE song_scanned = FALSE AND song_verified = TRUE"
        )
        for song_id in dead_songs:
            song = playlist.Song.load_from_id(song_id)
            song.disable()

        _process_album_art_queue(on_screen=True)
        db.c.commit()
    except:
        db.c.rollback()
        raise
示例#5
0
def full_music_scan(full_reset):
	_common_init()
	db.connect()
	cache.connect()
	db.c.start_transaction()

	global immediate_art
	immediate_art = False

	try:
		if full_reset:
			db.c.update("UPDATE r4_songs SET song_file_mtime = 0")
		db.c.update("UPDATE r4_songs SET song_scanned = FALSE")

		_scan_all_directories()

		# This procedure is slow but steady and easy to use.
		dead_songs = db.c.fetch_list("SELECT song_id FROM r4_songs WHERE song_scanned = FALSE AND song_verified = TRUE")
		for song_id in dead_songs:
			song = playlist.Song.load_from_id(song_id)
			song.disable()

		_process_album_art_queue(on_screen=True)
		db.c.commit()
	except:
		db.c.rollback()
		raise
示例#6
0
    def _listen(self, sid):
        pid = os.getpid()
        pid_file = open(
            "%s/backend_%s.pid" % (
                config.get_directory("pid_dir"),
                config.station_id_friendly[sid].lower(),
            ),
            "w",
        )
        pid_file.write(str(pid))
        pid_file.close()

        db.connect()
        cache.connect()
        zeromq.init_pub()
        log.init(
            "%s/rw_%s.log" % (
                config.get_directory("log_dir"),
                config.station_id_friendly[sid].lower(),
            ),
            config.get("log_level"),
        )
        memory_trace.setup(config.station_id_friendly[sid].lower())

        if config.test_mode:
            playlist.remove_all_locks(sid)

        # (r"/refresh/([0-9]+)", RefreshScheduleRequest)
        app = tornado.web.Application(
            [
                (r"/advance/([0-9]+)", AdvanceScheduleRequest),
            ],
            debug=(config.test_mode or config.get("developer_mode")),
        )

        port = int(config.get("backend_port")) + sid
        server = tornado.httpserver.HTTPServer(app)
        server.listen(port, address="127.0.0.1")

        for station_id in config.station_ids:
            playlist.prepare_cooldown_algorithm(station_id)
        schedule.load()
        log.debug(
            "start",
            "Backend server started, station %s port %s, ready to go." %
            (config.station_id_friendly[sid], port),
        )

        ioloop = tornado.ioloop.IOLoop.instance()
        try:
            ioloop.start()
        finally:
            ioloop.stop()
            server.stop()
            db.close()
            log.info("stop", "Backend has been shutdown.")
            log.close()
示例#7
0
    def _handle_event(self, event):
        try:
            if hasattr(event, "src_path"
                       ) and event.src_path and check_file_is_in_directory(
                           event.src_path, self.root_directory):
                if _is_bad_extension(event.src_path):
                    pass
                elif not os.path.isdir(event.src_path):
                    log.debug(
                        "scan_event", "%s src_path for file %s" %
                        (event.event_type, event.src_path))
                    if _is_image(event.src_path) and (
                            event.event_type == 'deleted'
                            or event.event_type == 'moved'):
                        pass
                    else:
                        self._handle_file(event.src_path)
                else:
                    log.debug(
                        "scan_event", "%s src_path for dir %s" %
                        (event.event_type, event.src_path))
                    self._handle_directory(event.src_path)

            if hasattr(event, "dest_path"
                       ) and event.dest_path and check_file_is_in_directory(
                           event.dest_path, self.root_directory):
                if _is_bad_extension(event.dest_path):
                    pass
                elif not os.path.isdir(event.dest_path):
                    log.debug(
                        "scan_event", "%s dest_path for file %s" %
                        (event.event_type, event.dest_path))
                    if _is_image(event.dest_path) and (event.event_type
                                                       == 'deleted'):
                        pass
                    else:
                        self._handle_file(event.dest_path)
                else:
                    log.debug(
                        "scan_event", "%s dest_path for dir %s" %
                        (event.event_type, event.dest_path))
                    self._handle_directory(event.dest_path)
        except Exception as xception:
            _add_scan_error(self.root_directory, xception)
            log.critical(
                "scan_event",
                "Exception occurred - reconnecting to the database just in case."
            )
            db.close()
            db.connect()
示例#8
0
文件: server.py 项目: Siqo53/rainwave
	def get(self, sid):	#pylint: disable=W0221
		self.success = False
		self.sid = None
		if int(sid) in config.station_ids:
			self.sid = int(sid)
		else:
			return

		if cache.get_station(self.sid, "backend_paused") and cache.get_station(self.sid, "backend_pause_extend"):
			self.write(self._get_pause_file())
			cache.set_station(self.sid, "backend_pause_extend", False)
			cache.set_station(self.sid, "backend_paused_playing", True)
			return
		else:
			cache.set_station(self.sid, "backend_pause_extend", False)
			cache.set_station(self.sid, "backend_paused", False)
			cache.set_station(self.sid, "backend_paused_playing", False)

		# This program must be run on 1 station for 1 instance, which would allow this operation to be safe.
		# Also works if 1 process is serving all stations.  Pinging any instance for any station
		# would break the program here, though.
		if cache.get_station(self.sid, "get_next_socket_timeout") and sid_output[self.sid]:
			log.warn("backend", "Using previous output to prevent flooding.")
			self.write(sid_output[self.sid])
			sid_output[self.sid] = None
			self.success = True
		else:
			try:
				schedule.advance_station(self.sid)
			except (psycopg2.OperationalError, psycopg2.InterfaceError) as e:
				log.warn("backend", e.diag.message_primary)
				db.close()
				db.connect()
				raise
			except psycopg2.extensions.TransactionRollbackError as e:
				log.warn("backend", "Database transaction deadlock.  Re-opening database and setting retry timeout.")
				db.close()
				db.connect()
				raise

			to_send = None
			if not config.get("liquidsoap_annotations"):
				to_send = schedule.get_advancing_file(self.sid)
			else:
				to_send = self._get_annotated(schedule.get_advancing_event(self.sid))
			sid_output[self.sid] = to_send
			self.success = True
			if not cache.get_station(self.sid, "get_next_socket_timeout"):
				self.write(to_send)
示例#9
0
    def get(self, sid):
        self.success = False
        self.sid = None
        if int(sid) in config.station_ids:
            self.sid = int(sid)
        else:
            return

        if cache.get_station(self.sid, "backend_paused"):
            if not cache.get_station(self.sid, "dj_heartbeat_start"):
                log.debug("dj", "Setting server start heatbeat.")
                cache.set_station(self.sid, "dj_heartbeat_start", timestamp())
            self.write(self._get_pause_file())
            schedule.set_upnext_crossfade(self.sid, False)
            cache.set_station(self.sid, "backend_paused_playing", True)
            sync_to_front.sync_frontend_dj(self.sid)
            return
        else:
            cache.set_station(self.sid, "dj_heartbeat_start", False)
            cache.set_station(self.sid, "backend_paused", False)
            cache.set_station(self.sid, "backend_paused_playing", False)

        try:
            schedule.advance_station(self.sid)
        except (psycopg2.OperationalError, psycopg2.InterfaceError) as e:
            log.warn("backend", e.diag.message_primary)
            db.close()
            db.connect()
            raise
        except psycopg2.extensions.TransactionRollbackError as e:
            log.warn(
                "backend",
                "Database transaction deadlock.  Re-opening database and setting retry timeout.",
            )
            db.close()
            db.connect()
            raise

        to_send = None
        if not config.get("liquidsoap_annotations"):
            to_send = schedule.get_advancing_file(self.sid)
        else:
            to_send = self._get_annotated(
                schedule.get_advancing_event(self.sid))
        self.success = True
        if not cache.get_station(self.sid, "get_next_socket_timeout"):
            self.write(to_send)
示例#10
0
	def get(self, sid):	#pylint: disable=W0221
		self.success = False
		self.sid = None
		if int(sid) in config.station_ids:
			self.sid = int(sid)
		else:
			return

		if cache.get_station(self.sid, "backend_paused"):
			if not cache.get_station(self.sid, "dj_heartbeat_start"):
				log.debug("dj", "Setting server start heatbeat.")
				cache.set_station(self.sid, "dj_heartbeat_start", timestamp())
			self.write(self._get_pause_file())
			schedule.set_upnext_crossfade(self.sid, False)
			cache.set_station(self.sid, "backend_paused_playing", True)
			sync_to_front.sync_frontend_dj(self.sid)
			return
		else:
			cache.set_station(self.sid, "dj_heartbeat_start", False)
			cache.set_station(self.sid, "backend_paused", False)
			cache.set_station(self.sid, "backend_paused_playing", False)

		try:
			schedule.advance_station(self.sid)
		except (psycopg2.OperationalError, psycopg2.InterfaceError) as e:
			log.warn("backend", e.diag.message_primary)
			db.close()
			db.connect()
			raise
		except psycopg2.extensions.TransactionRollbackError as e:
			log.warn("backend", "Database transaction deadlock.  Re-opening database and setting retry timeout.")
			db.close()
			db.connect()
			raise

		to_send = None
		if not config.get("liquidsoap_annotations"):
			to_send = schedule.get_advancing_file(self.sid)
		else:
			to_send = self._get_annotated(schedule.get_advancing_event(self.sid))
		self.success = True
		if not cache.get_station(self.sid, "get_next_socket_timeout"):
			self.write(to_send)
示例#11
0
	def get(self, sid):
		self.success = False
		self.sid = None
		if int(sid) in config.station_ids:
			self.sid = int(sid)
		else:
			return

		# This program must be run on 1 station for 1 instance, which would allow this operation to be safe.
		# Also works if 1 process is serving all stations.  Pinging any instance for any station
		# would break the program here, though.
		if cache.get_station(self.sid, "get_next_socket_timeout") and sid_output[self.sid]:
			log.warn("backend", "Using previous output to prevent flooding.")
			self.write(sid_output[self.sid])
			sid_output[self.sid] = None
			self.success = True
		else:
			try:
				schedule.advance_station(self.sid)
			except (psycopg2.OperationalError, psycopg2.InterfaceError) as e:
				log.warn("backend", e.diag.message_primary)
				db.close()
				db.connect()
				raise
			except psycopg2.extensions.TransactionRollbackError as e:
				log.warn("backend", "Database transaction deadlock.  Re-opening database and setting retry timeout.")
				db.close()
				db.connect()
				raise

			to_send = None
			if not config.get("liquidsoap_annotations"):
				to_send = schedule.get_advancing_file(self.sid)
			else:
				to_send = self._get_annotated(schedule.get_advancing_event(self.sid))
			sid_output[self.sid] = to_send
			self.success = True
			if not cache.get_station(self.sid, "get_next_socket_timeout"):
				self.write(to_send)
示例#12
0
	def _listen(self, sid):
		pid = os.getpid()
		pid_file = open("%s/backend_%s.pid" % (config.get_directory("pid_dir"), config.station_id_friendly[sid].lower()), 'w')
		pid_file.write(str(pid))
		pid_file.close()

		db.connect()
		cache.connect()
		zeromq.init_pub()
		log.init("%s/rw_%s.log" % (config.get_directory("log_dir"), config.station_id_friendly[sid].lower()), config.get("log_level"))
		memory_trace.setup(config.station_id_friendly[sid].lower())

		if config.test_mode:
			playlist.remove_all_locks(sid)

		# (r"/refresh/([0-9]+)", RefreshScheduleRequest)
		app = tornado.web.Application([
			(r"/advance/([0-9]+)", AdvanceScheduleRequest),
			], debug=(config.test_mode or config.get("developer_mode")))

		port = int(config.get("backend_port")) + sid
		server = tornado.httpserver.HTTPServer(app)
		server.listen(port, address='127.0.0.1')

		for station_id in config.station_ids:
			playlist.prepare_cooldown_algorithm(station_id)
		schedule.load()
		log.debug("start", "Backend server started, station %s port %s, ready to go." % (config.station_id_friendly[sid], port))

		ioloop = tornado.ioloop.IOLoop.instance()
		try:
			ioloop.start()
		finally:
			ioloop.stop()
			server.stop()
			db.close()
			log.info("stop", "Backend has been shutdown.")
			log.close()
示例#13
0
def find_relatives(word):
	conn = connect()
	word_id = None
	syn_id = None
	word_relatives = {}
	cur = conn.cursor()
	cur.execute( "Select id from words where word_value = %s" ,[str(word)])
	result = cur.fetchone()
	if result is not None:
		word_id = result[0]
	if word_id:
		word_relatives['word'] = word
		cur.execute( "Select wid_2,sid_2,relation from word_word where wid_1 = %s" ,[word_id])
		result = cur.fetchall()
		cur.execute( "Select wid_1,sid_1,relation from word_word where wid_2 = %s" ,[word_id])
		result += cur.fetchall()
		word_relatives['direct_relations'] = []
		for record in result:
			cur.execute( "Select word_value from words where id = %s" ,[str(record[0])])
			related = cur.fetchone()
			if related:
				relation = {'word':related[0], 'sense_id':record[1], 'relation':record[2]}
				word_relatives['direct_relations'].append(relation)

		word_relatives['synsets'] = []
		cur.execute( "Select sid from syn_word where wid = %s" ,[word_id])
		result = cur.fetchall()
		for record in result:
			synset_id = record[0]
			synset = {'id':synset_id,'relations':[]}
			cur.execute( "Select id_2,relation from synset_synset where id_1 = %s" ,[synset_id])
			relatives = cur.fetchall()
			cur.execute( "Select id_1,relation from synset_synset where id_2 = %s" ,[synset_id])
			relatives += cur.fetchall()
			for relative in relatives:
				synset_id = relative[0]
				cur.execute( "Select word_value from syn_word join words on syn_word.wid = words.id where sid = %s" ,[synset_id])
				relation = {'words':[],'relation':relative[1]}
				for word in cur.fetchall():
					relation['words'].append(word[0])
				synset['relations'].append(relation)
			word_relatives['synsets'].append(synset)
	if len(word_relatives) > 0:
		return word_relatives
	else:
		print "theres nothing found"
		return None
示例#14
0
 def start(self, *args, **kwargs):
     super(RWObserver, self).start(*args, **kwargs)
     db.connect()
     cache.connect()
示例#15
0
	def _listen(self, task_id):
		zeromq.init_pub()
		zeromq.init_sub()

		import api_requests.sync
		api_requests.sync.init()

		# task_ids start at zero, so we gobble up ports starting at the base port and work up
		port_no = int(config.get("api_base_port")) + task_id

		pid = os.getpid()
		pid_file = open("%s/api_%s.pid" % (config.get_directory("pid_dir"), port_no), 'w')
		pid_file.write(str(pid))
		pid_file.close()

		# Log according to configured directory and port # we're operating on
		log_file = "%s/rw_api_%s.log" % (config.get_directory("log_dir"), port_no)
		if config.test_mode and os.path.exists(log_file):
			os.remove(log_file)
		log.init(log_file, config.get("log_level"))
		log.debug("start", "Server booting, port %s." % port_no)
		db.connect()
		cache.connect()
		memory_trace.setup(port_no)

		api.locale.load_translations()
		api.locale.compile_static_language_files()

		if config.get("web_developer_mode"):
			for station_id in config.station_ids:
				playlist.prepare_cooldown_algorithm(station_id)
			# automatically loads every station ID and fills things in if there's no data
			schedule.load()
			for station_id in config.station_ids:
				schedule.update_memcache(station_id)
				rainwave.request.update_line(station_id)
				rainwave.request.update_expire_times()
				cache.set_station(station_id, "backend_ok", True)
				cache.set_station(station_id, "backend_message", "OK")
				cache.set_station(station_id, "get_next_socket_timeout", False)

		for sid in config.station_ids:
			cache.update_local_cache_for_sid(sid)
			playlist.prepare_cooldown_algorithm(sid)
			playlist.update_num_songs()

		# If we're not in developer, remove development-related URLs
		if not config.get("developer_mode"):
			i = 0
			while (i < len(request_classes)):
				if request_classes[i][0].find("/test/") != -1:
					request_classes.pop(i)
					i = i - 1
				i = i + 1

		# Make sure all other errors get handled in an API-friendly way
		request_classes.append((r"/api/.*", api.web.Error404Handler))
		request_classes.append((r"/api4/.*", api.web.Error404Handler))
		request_classes.append((r".*", api.web.HTMLError404Handler))

		# Initialize the help (rather than it scan all URL handlers every time someone hits it)
		api.help.sectionize_requests()

		# Fire ze missiles!
		global app
		app = tornado.web.Application(request_classes,
			debug=(config.test_mode or config.get("developer_mode")),
			template_path=os.path.join(os.path.dirname(__file__), "../templates"),
			static_path=os.path.join(os.path.dirname(__file__), "../static"),
			autoescape=None)
		http_server = tornado.httpserver.HTTPServer(app, xheaders = True)
		http_server.listen(port_no)

		if config.get("api_user") and config.get("api_group"):
			chuser.change_user(config.get("api_user"), config.get("api_group"))

		for request in request_classes:
			log.debug("start", "   Handler: %s" % str(request))
		log.info("start", "API server on port %s ready to go." % port_no)
		self.ioloop = tornado.ioloop.IOLoop.instance()

		try:
			self.ioloop.start()
		finally:
			self.ioloop.stop()
			http_server.stop()
			db.close()
			log.info("stop", "Server has been shutdown.")
			log.close()
示例#16
0
    def write_error(self, status_code, **kwargs):
        if self._output_array:
            self._output = []
        else:
            if self._output and "message_id" in self._output:
                self._output = {
                    "message_id": self._output["message_id"],
                }
                self._output[self.return_name] = {
                    "tl_key": "internal_error",
                    "text": self.locale.translate("internal_error"),
                    "status": 500,
                    "success": False,
                }
            else:
                self._output = {}
        if "exc_info" in kwargs:
            exc = kwargs["exc_info"][1]

            # Restart DB on a connection error if that's what we're handling
            if isinstance(
                    exc, (psycopg2.OperationalError, psycopg2.InterfaceError)):
                try:
                    db.close()
                    db.connect()
                    self.append(
                        "error",
                        {
                            "code": 500,
                            "tl_key": "db_error_retry",
                            "text": self.locale.translate("db_error_retry"),
                        },
                    )
                except Exception:
                    self.append(
                        "error",
                        {
                            "code": 500,
                            "tl_key": "db_error_permanent",
                            "text":
                            self.locale.translate("db_error_permanent"),
                        },
                    )
            elif isinstance(exc, APIException):
                exc.localize(self.locale)
                self.append(self.return_name, exc.jsonable())
            elif isinstance(exc, SongNonExistent):
                self.append(
                    "error",
                    {
                        "code": status_code,
                        "tl_key": "song_does_not_exist",
                        "text": self.locale.translate("song_does_not_exist"),
                    },
                )
            else:
                self.append(
                    "error",
                    {
                        "code": status_code,
                        "tl_key": "internal_error",
                        "text": repr(exc),
                    },
                )
                self.append(
                    "traceback",
                    {
                        "traceback":
                        traceback.format_exception(
                            kwargs["exc_info"][0],
                            kwargs["exc_info"][1],
                            kwargs["exc_info"][2],
                        )
                    },
                )
        else:
            self.append(
                "error",
                {
                    "tl_key": "internal_error",
                    "text": self.locale.translate("internal_error"),
                },
            )
        if not kwargs.get("no_finish"):
            self.finish()
示例#17
0
    def _listen(self, task_id):
        import api_requests.sync
        api_requests.sync.init()

        # task_ids start at zero, so we gobble up ports starting at the base port and work up
        port_no = int(config.get("api_base_port")) + task_id

        pid = os.getpid()
        pid_file = open(
            "%s/api_%s.pid" % (config.get_directory("pid_dir"), port_no), 'w')
        pid_file.write(str(pid))
        pid_file.close()

        # Log according to configured directory and port # we're operating on
        log_file = "%s/rw_api_%s.log" % (config.get_directory("log_dir"),
                                         port_no)
        if config.test_mode and os.path.exists(log_file):
            os.remove(log_file)
        log.init(log_file, config.get("log_level"))
        log.debug("start", "Server booting, port %s." % port_no)
        db.connect()
        cache.connect()
        memory_trace.setup(port_no)

        if config.get("web_developer_mode"):
            for station_id in config.station_ids:
                playlist.prepare_cooldown_algorithm(station_id)
            # automatically loads every station ID and fills things in if there's no data
            schedule.load()
            for station_id in config.station_ids:
                schedule.update_memcache(station_id)
                rainwave.request.update_line(station_id)
                rainwave.request.update_expire_times()
                cache.set_station(station_id, "backend_ok", True)
                cache.set_station(station_id, "backend_message", "OK")
                cache.set_station(station_id, "get_next_socket_timeout", False)

        for sid in config.station_ids:
            cache.update_local_cache_for_sid(sid)
            playlist.prepare_cooldown_algorithm(sid)
            playlist.update_num_songs()

        # If we're not in developer, remove development-related URLs
        if not config.get("developer_mode"):
            i = 0
            while (i < len(request_classes)):
                if request_classes[i][0].find("/test/") != -1:
                    request_classes.pop(i)
                    i = i - 1
                i = i + 1

        # Make sure all other errors get handled in an API-friendly way
        request_classes.append((r"/api/.*", api.web.Error404Handler))
        request_classes.append((r"/api4/.*", api.web.Error404Handler))
        request_classes.append((r".*", api.web.HTMLError404Handler))

        # Initialize the help (rather than it scan all URL handlers every time someone hits it)
        api.help.sectionize_requests()

        # Fire ze missiles!
        app = tornado.web.Application(
            request_classes,
            debug=(config.test_mode or config.get("developer_mode")),
            template_path=os.path.join(os.path.dirname(__file__),
                                       "../templates"),
            static_path=os.path.join(os.path.dirname(__file__), "../static"),
            autoescape=None)
        http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
        http_server.listen(port_no)

        if config.get("api_user") and config.get("api_group"):
            chuser.change_user(config.get("api_user"), config.get("api_group"))

        if task_id == 0:
            buildtools.bake_css()
            buildtools.bake_js()
            buildtools.bake_beta_js()

        for request in request_classes:
            log.debug("start", "   Handler: %s" % str(request))
        log.info("start", "API server on port %s ready to go." % port_no)
        self.ioloop = tornado.ioloop.IOLoop.instance()

        try:
            self.ioloop.start()
        finally:
            self.ioloop.stop()
            http_server.stop()
            db.close()
            log.info("stop", "Server has been shutdown.")
            log.close()
示例#18
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
import progressbar
from libs.db import connect
if __name__ == "__main__":
	
	conn = connect()

	

	wordrepo = ET.parse('XMLFILES/words.xml')
	root = wordrepo.getroot()
	bar = progressbar.ProgressBar(maxval=len(root),
								  widgets=[
									    'Words Phase 1 [', progressbar.Timer(), '] ',
									    progressbar.Bar(),
									    ' (', progressbar.ETA(), ') ',
									])
	bar.start()
	for idx, word in enumerate(root):
		bar.update(idx)
		cur = conn.cursor()
		if word is not None:
			word_id = word.find("./WID")
			word_value = word.find("./WORDVALUE")
			pos = word.find("./POS")
			ava = word.find("./AVAINFO")
			if  word_id is not None and word_value is not None and pos is not None and ava is not None :
				cur.execute("INSERT INTO words (id, word_value, pos, ava) VALUES (%s, %s, %s, %s)",(word_id.text, word_value.text, pos.text, ava.text))
	conn.commit()
from libs import config
from libs import db
from libs import cache
from rainwave.playlist_objects.album import Album
from rainwave.playlist_objects.album import clear_updated_albums

if __name__ == "__main__":
	parser = argparse.ArgumentParser(description="Rainwave DB migration script for using dates from ID3 tags.")
	parser.add_argument("--config", default=None)
	args = parser.parse_args()
	config.load(args.config)

	for sid in config.station_ids:
		clear_updated_albums(sid)

	db.connect()
	cache.connect()

	print "Adding columns to database..."

	db.c.update("ALTER TABLE r4_albums ADD album_year SMALLINT")
	db.c.update("ALTER TABLE r4_songs ADD song_track_number SMALLINT")
	db.c.update("ALTER TABLE r4_songs ADD song_disc_number SMALLINT")
	db.c.update("ALTER TABLE r4_songs ADD song_year SMALLINT")

	for album_id in db.c.fetch_list("SELECT album_id FROM r4_albums ORDER BY album_id"):
		a = Album.load_from_id(album_id)
		# Will update the album year
		a.reconcile_sids()

	print "Done"
示例#20
0
	def start(self, *args, **kwargs):
		super(RWObserver, self).start(*args, **kwargs)
		db.connect()
		cache.connect()
示例#21
0
    os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))

from libs import config
from libs import db
from libs import cache

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description=
        "Rainwave DB migration script for adding a listener key for each API key."
    )
    parser.add_argument("--config", default=None)
    args = parser.parse_args()
    config.load(args.config)

    db.connect()
    cache.connect()

    print "Making changes..."

    db.c.update("ALTER TABLE r4_api_keys DROP COLUMN api_ip")
    db.c.update("ALTER TABLE r4_api_keys ADD COLUMN api_key_listen_key TEXT")
    db.c.update("ALTER TABLE r4_listeners ADD listener_key TEXT")
    db.c.create_idx("r4_api_keys", "api_key")

    for key in db.c.fetch_list("SELECT api_key FROM r4_api_keys"):
        listen_key = ''.join(
            random.choice(string.ascii_uppercase + string.digits +
                          string.ascii_lowercase) for x in range(10))
        db.c.update(
            "UPDATE r4_api_keys SET api_key_listen_key = %s WHERE api_key = %s",
示例#22
0
    def _listen(self, task_id):
        zeromq.init_pub()
        zeromq.init_sub()

        import api_requests.sync

        api_requests.sync.init()

        # task_ids start at zero, so we gobble up ports starting at the base port and work up
        port_no = int(config.get("api_base_port")) + task_id

        # Log according to configured directory and port # we're operating on
        log_file = "%s/rw_api_%s.log" % (config.get_directory("log_dir"),
                                         port_no)
        log.init(log_file, config.get("log_level"))
        log.debug("start", "Server booting, port %s." % port_no)
        db.connect(auto_retry=False, retry_only_this_time=True)
        cache.connect()
        memory_trace.setup(port_no)

        api.locale.load_translations()
        api.locale.compile_static_language_files()

        if config.get("developer_mode"):
            for station_id in config.station_ids:
                playlist.prepare_cooldown_algorithm(station_id)
            # automatically loads every station ID and fills things in if there's no data
            schedule.load()
            for station_id in config.station_ids:
                schedule.update_memcache(station_id)
                rainwave.request.update_line(station_id)
                rainwave.request.update_expire_times()
                cache.set_station(station_id, "backend_ok", True)
                cache.set_station(station_id, "backend_message", "OK")
                cache.set_station(station_id, "get_next_socket_timeout", False)

        for sid in config.station_ids:
            cache.update_local_cache_for_sid(sid)
            playlist.prepare_cooldown_algorithm(sid)
            playlist.update_num_songs()

        # If we're not in developer, remove development-related URLs
        if not config.get("developer_mode"):
            i = 0
            while i < len(request_classes):
                if request_classes[i][0].find("/test/") != -1:
                    request_classes.pop(i)
                    i = i - 1
                i = i + 1

        # Make sure all other errors get handled in an API-friendly way
        request_classes.append((r"/api/.*", api.web.Error404Handler))
        request_classes.append((r"/api4/.*", api.web.Error404Handler))
        request_classes.append((r".*", api.web.HTMLError404Handler))

        # Initialize the help (rather than it scan all URL handlers every time someone hits it)
        api.help.sectionize_requests()

        # Fire ze missiles!
        global app
        debug = config.get("developer_mode")
        app = tornado.web.Application(
            request_classes,
            debug=debug,
            template_path=os.path.join(os.path.dirname(__file__),
                                       "../templates"),
            static_path=os.path.join(os.path.dirname(__file__), "../static"),
            autoescape=None,
            autoreload=debug,
            serve_traceback=debug,
        )
        http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
        http_server.listen(port_no)

        for request in request_classes:
            log.debug("start", "   Handler: %s" % str(request))
        log.info("start", "API server on port %s ready to go." % port_no)
        self.ioloop = tornado.ioloop.IOLoop.instance()

        db_keepalive = tornado.ioloop.PeriodicCallback(db.connection_keepalive,
                                                       10000)
        db_keepalive.start()

        try:
            self.ioloop.start()
        finally:
            self.ioloop.stop()
            http_server.stop()
            db.close()
            log.info("stop", "Server has been shutdown.")
            log.close()