def update_rating(self): for sid in db.c.fetch_list("SELECT sid FROM r4_album_sid WHERE album_id = %s", (self.id,)): ratings = db.c.fetch_all( "SELECT r4_song_ratings.song_rating_user AS rating, COUNT(r4_song_ratings.user_id) AS count " "FROM r4_songs " "JOIN r4_song_sid ON (r4_songs.song_id = r4_song_sid.song_id AND r4_song_sid.sid = %s AND r4_song_sid.song_exists = TRUE) " "JOIN r4_song_ratings ON (r4_song_sid.song_id = r4_song_ratings.song_id) " "JOIN phpbb_users ON (r4_song_ratings.user_id = phpbb_users.user_id AND phpbb_users.radio_inactive = FALSE) " "WHERE r4_songs.album_id = %s " "GROUP BY rating ", (sid, self.id), ) (points, potential_points) = rating.rating_calculator(ratings) log.debug( "song_rating", "%s album ratings for %s (%s)" % (potential_points, self.data["name"], config.station_id_friendly[sid]), ) if points > 0 and potential_points > config.get("rating_threshold_for_calc"): self.rating_precise = ((points / potential_points) * 4) + 1 self.data["rating"] = round(self.rating_precise, 1) self.data["rating_count"] = potential_points log.debug("album_rating", "%s new rating for %s" % (self.rating_precise, self.data["name"])) db.c.update( "UPDATE r4_album_sid SET album_rating = %s, album_rating_count = %s WHERE album_id = %s AND sid = %s", (self.rating_precise, potential_points, self.id, sid), )
def descargarManga(codigoManga = None, parametros = ParamDescarga): log.debug(codigoManga) manga = config.mangas[codigoManga] lstExclusions = exclusionFiles(manga) log.info(" exclusions.txt == %s" % lstExclusions) MangaGet.lstCapitulos(manga, parametros) listCapitulos = [] #TODO: Debo seguir trabajando en el tema de las exclusiones que no esta bien for capitulo in manga.capitulos: if not (capitulo.code in lstExclusions): listCapitulos.append(capitulo) fileTime = time.strftime("%Y%m%d") fileDownload = MangaFile.getMangaDownloadFolder(manga.uCode, "t%s_%s"%(fileTime, config.CONST_DOWNLOAD_FILE)) for capitulo in listCapitulos: MangaFile.crearDirectorio(capitulo, manga) capitulo = MangaGet.lstImagenes(manga, capitulo) totalImgCarpeta = MangaFile.totalArchivosCarpeta(capitulo) if(capitulo.length > totalImgCarpeta): log.debug("Descargando Imágenes del capítulo :: %s" % capitulo.code) file_ = open(fileDownload, 'a') file_.write("====== Resumen C%s ====== \n"%(capitulo.code)) file_.close() descargarImagenesCapitulo(manga, capitulo, fileDownload) totalImgCarpeta = MangaFile.totalArchivosCarpeta(capitulo) file_ = open(fileDownload, 'a') file_.write("C%s \t Total:%s \t Descargados:%s \n"%(capitulo.code, capitulo.length, totalImgCarpeta)) file_.close() else: log.error("Todos los archivos del capitulo %s ya han sido descargados"%capitulo.title) return manga
def monitor(): _common_init() pid = os.getpid() pid_file = open("%s/scanner.pid" % config.get_directory("pid_dir"), 'w') pid_file.write(str(pid)) pid_file.close() mask = ( pyinotify.IN_CREATE | pyinotify.IN_CLOSE_WRITE | pyinotify.IN_DELETE | pyinotify.IN_MOVED_TO | pyinotify.IN_MOVED_FROM | pyinotify.IN_EXCL_UNLINK ) try: go = True while go: try: log.info("scan", "File monitor started.") wm = pyinotify.WatchManager() wm.add_watch(str(config.get("monitor_dir")), mask, rec=True) pyinotify.Notifier(wm, FileEventHandler()).loop() go = False except NewDirectoryException: log.debug("scan", "New directory added, restarting watch.") finally: try: wm.close() except: pass finally: log.info("scan", "File monitor shutdown.")
def next(self): urlPack = self.queueGet() url = signature.sign(urlPack.url, signature.aweme) urlPack.url = url['url'] urlPack.priority = self.getPipePriority() log.debug("DouyinVideoDetailProvider next: {}".format(urlPack)) return urlPack
def get_random_song(sid): """ Fetch a random song, abiding by all election block, request block, and availability rules. Falls back to get_random_ignore_requests on failure. """ sql_query = ("FROM r4_song_sid " "JOIN r4_songs USING (song_id) " "JOIN r4_album_sid ON (r4_album_sid.album_id = r4_songs.album_id AND r4_album_sid.sid = r4_song_sid.sid) " "WHERE r4_song_sid.sid = %s " "AND song_exists = TRUE " "AND song_cool = FALSE " "AND song_request_only = FALSE " "AND song_elec_blocked = FALSE " "AND album_requests_pending IS NULL") num_available = db.c.fetch_var("SELECT COUNT(song_id) " + sql_query, (sid,)) log.info("song_select", "Song pool size (cooldown, blocks, requests): %s" % num_available) offset = 0 if num_available == 0: log.warn("song_select", "No songs available despite no timing rules.") log.debug("song_select", "Song select query: SELECT COUNT(song_id) " + (sql_query % (sid,))) return get_random_song_ignore_requests(sid) else: offset = random.randint(1, num_available) - 1 song_id = db.c.fetch_var("SELECT song_id " + sql_query + " LIMIT 1 OFFSET %s", (sid, offset)) return Song.load_from_id(song_id, sid)
def _do_user_update(self, session, updated_by_ip): # clear() might wipe out the timeouts for a bigger update (that includes user update anyway!) # don't bother updating again if that's already happened if not session in self.throttled: return del (self.throttled[session]) try: potential_mixup_warn = updated_by_ip and not session.user.is_anonymous( ) and not session.user.is_tunedin() session.refresh_user() if potential_mixup_warn and not session.user.is_tunedin(): log.debug( "sync_update_ip", "Warning logged in user of potential M3U mixup at IP %s" % session.request.remote_ip) session.login_mixup_warn() else: session.update_user() except Exception as e: log.exception("sync", "Session failed to be updated during update_user.", e) try: session.rw_finish() except Exception: log.exception("sync", "Session failed finish() during update_user.", e)
def update_rating(self, skip_album_update=False): ratings = db.c.fetch_all( "SELECT song_rating_user AS rating, COUNT(user_id) AS count FROM r4_song_ratings JOIN phpbb_users USING (user_id) WHERE song_id = %s AND radio_inactive = FALSE AND song_rating_user IS NOT NULL GROUP BY song_rating_user", (self.id, ), ) (points, potential_points) = rating.rating_calculator(ratings) log.debug("song_rating", "%s ratings for %s" % (potential_points, self.filename)) if points > 0 and potential_points > config.get( "rating_threshold_for_calc"): self.data["rating"] = ((points / potential_points) * 4) + 1 self.data["rating_count"] = potential_points log.debug( "song_rating", "rating update: %s for %s" % (self.data["rating"], self.filename), ) db.c.update( "UPDATE r4_songs SET song_rating = %s, song_rating_count = %s WHERE song_id = %s", (self.data["rating"], potential_points, self.id), ) if not skip_album_update: for album in self.albums: album.update_rating()
def work(resp, stat): pipe = None root = None try: logDelayTime(resp) if checkResp(resp) == True: pipe = stat.getPipeByName(resp['trespassing_field']['pipe']) log.notice("got result of pipe: {}, result: {}".format( pipe.name, resp['result'])) urlPacker = cPickle.loads( base64.b64decode(resp['trespassing_field']['urlPack'])) root = json.loads(resp['html_body']) saveResult = pipe.saver.start(root, urlPacker) if not saveResult: raise RuntimeError("saver_error: pipe={}, resp={}".format( pipe.name, resp)) incrPipeSaverStatus(pipe.name, "ok") except Exception as e: traceback.print_exc() log.fatal("handle_spider_result_worker_err: error={}, resp={}".format( e, resp)) if pipe is not None: try: msg = urlPacker.msg msg.retry = msg.retry + 1 if msg.retry > 5: log.debug("retry num > 5, push to trash") pipe.pushToTrashList(base64.b64encode(cPickle.dumps(msg))) incrPipeSaverStatus(pipe.name, "error") else: log.debug("push to retry list {}".format(msg.retry)) pipe.pushToRetryList(base64.b64encode(cPickle.dumps(msg))) except Exception as e: log.fatal("unexcept_error_on_csrev_work", e)
def _do_ip_update(self, ip_address): if not ip_address in self.ip_update_timers or not self.ip_update_timers[ ip_address]: return del self.ip_update_timers[ip_address] for session in self.find_ip(ip_address): try: if session.user.is_anonymous(): session.update_user() log.debug("sync_update_ip", "Updated IP %s" % session.request.remote_ip) else: log.debug( "sync_update_ip", "Warning logged in user of potential mixup at IP %s" % session.request.remote_ip) session.anon_registered_mixup_warn() except Exception as e: try: session.finish() except: pass self.remove(session) log.exception( "sync", "Session failed to be updated during update_user.", e) self.clean()
def get_producer_at_time(sid, at_time): to_ret = None local_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(at_time)) time_ahead = int((at_time - timestamp()) / 60) sched_id = db.c.fetch_var( "SELECT sched_id " "FROM r4_schedule " "WHERE sid = %s AND sched_start <= %s AND sched_end > %s " "ORDER BY sched_id DESC " "LIMIT 1", (sid, at_time + 20, at_time)) try: to_ret = events.event.BaseProducer.load_producer_by_id(sched_id) if to_ret: to_ret.start_producer() except Exception as e: log.warn( "get_producer", "Failed to obtain producer at time %s (%sm ahead)." % (local_time, time_ahead)) log.exception( "get_producer", "Failed to get an appropriate producer at time %s (%sm ahead)." % (local_time, time_ahead), e) if not to_ret: log.debug( "get_producer", "No producer at time %s (%sm ahead), defaulting to election." % (local_time, time_ahead)) return election.ElectionProducer(sid) if not to_ret.has_next_event(): log.warn( "get_producer", "Producer ID %s (type %s, %s) has no events." % (to_ret.id, to_ret.type, to_ret.name)) return election.ElectionProducer(sid) return to_ret
def _scan_file(filename, sids): # log.debug("scan", u"sids: {} Scanning file: {}".format(sids, filename)) global _album_art_queue global _art_only try: if _is_mp3(filename) and not _art_only: # Only scan the file if we don't have a previous mtime for it, or the mtime is different old_mtime = db.c.fetch_var( "SELECT song_file_mtime FROM r4_songs WHERE song_filename = %s AND song_verified = TRUE", (filename, )) if not old_mtime or old_mtime != os.stat(filename)[8]: # log.debug("scan", "mtime mismatch, scanning for changes") playlist.Song.load_from_file(filename, sids) else: # log.debug("scan", "mtime match, no action taken") db.c.update( "UPDATE r4_songs SET song_scanned = TRUE WHERE song_filename = %s", (filename, )) elif _is_image(filename): _album_art_queue.append([filename, sids]) except (OSError, WindowsError, IOError) as e: # deleted file if e.errno == 2: log.debug("scan", "File has been moved/deleted: %s" % filename) s = playlist.Song.load_from_deleted_file(filename) if s: s.disable() else: _add_scan_error(filename, xception) except Exception as xception: _add_scan_error(filename, xception)
def get_next(sid): line = cache.get_station(sid, "request_line") if not line: return None song = None for pos in range(0, len(line)): if not line[pos]: pass # ?!?! elif not line[pos]['song_id']: log.debug("request", "Passing on user %s since they have no valid first song." % line[pos]['username']) else: entry = line.pop(pos) song = playlist.Song.load_from_id(entry['song_id'], sid) log.debug("request", "Fulfilling %s's request for %s." % (entry['username'], song.filename)) song.data['elec_request_user_id'] = entry['user_id'] song.data['elec_request_username'] = entry['username'] u = User(entry['user_id']) db.c.update("DELETE FROM r4_request_store WHERE user_id = %s AND song_id = %s", (u.id, entry['song_id'])) u.remove_from_request_line() request_count = db.c.fetch_var("SELECT COUNT(*) FROM r4_request_history WHERE user_id = %s", (u.id,)) + 1 db.c.update("DELETE FROM r4_request_store WHERE song_id = %s AND user_id = %s", (song.id, u.id)) db.c.update("INSERT INTO r4_request_history (user_id, song_id, request_wait_time, request_line_size, request_at_count, sid) " "VALUES (%s, %s, %s, %s, %s, %s)", (u.id, song.id, timestamp() - entry['line_wait_start'], len(line), request_count, sid)) db.c.update("UPDATE phpbb_users SET radio_totalrequests = %s WHERE user_id = %s", (request_count, u.id)) song.update_request_count(sid) break return song
def _real_worker(self, urlPack): for _ in range(2): try: json_data = {} json_data['target_url'] = urlPack.url json_data['method'] = 'POST' json_data[ 'request_header'] = "Content-Type: application/x-www-form-urlencoded\r\nUser-Agent: kwai-android" json_data['post_data'] = mcpack.RAW(urlPack.form) bypass = urlPack.getExtra() bypass['submitTime'] = time.time() bypass['urlPack'] = base64.b64encode( cPickle.dumps(urlPack, protocol=-1)) cspubutil.patch(json_data, bypass, urlPack=urlPack) failedList = cspubutil.send2cspub([json_data]) #log.debug("send2cspub_data:{}".format(json_data)) if len(failedList) > 0: log.fatal("send2cspub_error:{}".format(failedList)) self.incrStatKey('sub2cspub') log.debug("submit2cspub: {}, bypass: {}".format( urlPack, bypass)) return True except Exception as e: traceback.print_exc() log.fatal("crawlerkuaishou_cspubmodel_real_worker_error:{},{}". format(e, urlPack)) return False
def _process_line(line, sid): new_line = [] # user_positions has user_id as a key and position as the value, this is cached for quick lookups by API requests # so users know where they are in line user_positions = {} t = int(timestamp()) albums_with_requests = [] position = 1 # For each person for row in line: add_to_line = False u = User(row['user_id']) row['song_id'] = None # If their time is up, remove them and don't add them to the new line if row['line_expiry_tune_in'] and row['line_expiry_tune_in'] <= t: log.debug("request_line", "%s: Removed user ID %s from line for tune in timeout, expiry time %s current time %s" % (sid, u.id, row['line_expiry_tune_in'], t)) u.remove_from_request_line() else: tuned_in_sid = db.c.fetch_var("SELECT sid FROM r4_listeners WHERE user_id = %s AND sid = %s AND listener_purge = FALSE", (u.id, sid)) tuned_in = True if tuned_in_sid == sid else False if tuned_in: # Get their top song ID song_id = u.get_top_request_song_id(sid) # If they have no song and their line expiry has arrived, boot 'em if not song_id and row['line_expiry_election'] and (row['line_expiry_election'] <= t): log.debug("request_line", "%s: Removed user ID %s from line for election timeout, expiry time %s current time %s" % (sid, u.id, row['line_expiry_election'], t)) u.remove_from_request_line() # Give them more chances if they still have requests # They'll get added to the line of whatever station they're tuned in to (if any!) if u.has_requests(): u.put_in_request_line(u.get_tuned_in_sid()) # If they have no song, start the expiry countdown elif not song_id and not row['line_expiry_election']: row['line_expiry_election'] = t + 900 db.c.update("UPDATE r4_request_line SET line_expiry_election = %s WHERE user_id = %s", ((t + 900), row['user_id'])) add_to_line = True # Keep 'em in line else: if song_id: albums_with_requests.append(db.c.fetch_var("SELECT album_id FROM r4_songs WHERE song_id = %s", (song_id,))) row['song_id'] = song_id add_to_line = True elif not row['line_expiry_tune_in'] or row['line_expiry_tune_in'] == 0: db.c.update("UPDATE r4_request_line SET line_expiry_tune_in = %s WHERE user_id = %s", ((t + 600), row['user_id'])) add_to_line = True else: add_to_line = True if add_to_line: new_line.append(row) user_positions[u.id] = position position = position + 1 cache.set_station(sid, "request_line", new_line, True) cache.set_station(sid, "request_user_positions", user_positions, True) db.c.update("UPDATE r4_album_sid SET album_requests_pending = NULL WHERE album_requests_pending = TRUE AND sid = %s", (sid,)) for album_id in albums_with_requests: db.c.update("UPDATE r4_album_sid SET album_requests_pending = TRUE WHERE album_id = %s AND sid = %s", (album_id, sid)) return new_line
def _process(self, event): # Ignore WinSCP events. if event.pathname.endswith('.filepart'): return try: matched_sids = [] for song_dirs_path, sids in config.get('song_dirs').iteritems(): if event.pathname.startswith(song_dirs_path): matched_sids.extend(sids) except Exception as xception: _add_scan_error(event.pathname, xception) log.debug("scan", "%s %s %s" % (event.maskname, event.pathname, matched_sids)) try: if event.dir: _scan_directory(event.pathname, matched_sids) elif len(matched_sids) == 0 or event.mask in DELETE_OPERATION: _disable_file(event.pathname) else: _scan_file(event.pathname, matched_sids) _process_album_art_queue() except Exception as xception: _add_scan_error(event.pathname, xception)
def _scan_directory(directory, sids): # Normalize and add a trailing separator to the directory name directory = os.path.join(os.path.normpath(directory), "") # Windows workaround eww, damnable directory names if os.name == "nt": directory = directory.replace("\\", "\\\\") songs = db.c.fetch_list("SELECT song_id FROM r4_songs WHERE song_filename LIKE %s || '%%' AND song_verified = TRUE", (directory,)) for song_id in songs: # log.debug("scan", "Marking Song ID %s for possible deletion." % song_id) db.c.update("UPDATE r4_songs SET song_scanned = FALSE WHERE song_id = %s", (song_id,)) do_scan = False try: os.stat(directory) do_scan = True except (IOError, OSError): log.debug("scan", "Directory %s no longer exists." % directory) if do_scan and len(sids) > 0: for root, subdirs, files in os.walk(directory, followlinks = True): #pylint: disable=W0612 for filename in files: filename = os.path.normpath(root + os.sep + filename) _scan_file(filename, sids) songs = db.c.fetch_list("SELECT song_id FROM r4_songs WHERE song_filename LIKE %s || '%%' AND song_scanned = FALSE AND song_verified = TRUE", (directory,)) for song_id in songs: s = playlist.Song.load_from_id(song_id) log.debug("scan", "Disabling song: %s" % s.filename) s.disable()
def get_random_song_ignore_requests(sid): """ Fetch a random song abiding by election block and availability rules, but ignoring request blocking rules. """ sql_query = ("FROM r4_song_sid " "WHERE r4_song_sid.sid = %s " "AND song_exists = TRUE " "AND song_cool = FALSE " "AND song_request_only = FALSE " "AND song_elec_blocked = FALSE ") num_available = db.c.fetch_var("SELECT COUNT(song_id) " + sql_query, (sid, )) log.debug("song_select", "Song pool size (cooldown, blocks): %s" % num_available) offset = 0 if num_available == 0: log.warn("song_select", "No songs available while ignoring pending requests.") log.debug( "song_select", "Song select query: SELECT COUNT(song_id) " + (sql_query % (sid, )), ) return get_random_song_ignore_all(sid) else: offset = random.randint(1, num_available) - 1 song_id = db.c.fetch_var( "SELECT song_id " + sql_query + " LIMIT 1 OFFSET %s", (sid, offset)) return Song.load_from_id(song_id, sid)
def advance_station(sid): db.c.start_transaction() try: log.debug("advance", "Advancing station %s." % sid) start_time = time.time() # If we need some emergency elections here if len(upnext[sid]) == 0: manage_next(sid) while upnext[sid][0].used or len(upnext[sid][0].songs) == 0: log.warn("advance", "Event ID %s was already used or has zero songs. Removing." % upnext[sid][0].id) upnext[sid].pop(0) if len(upnext[sid]) == 0: manage_next(sid) start_time = time.time() upnext[sid][0].prepare_event() db.c.commit() log.debug("advance", "upnext[0] preparation time: %.6f" % (time.time() - start_time,)) log.info("advance", "Next song: %s" % get_advancing_file(sid)) tornado.ioloop.IOLoop.instance().add_timeout(datetime.timedelta(milliseconds=150), lambda: post_process(sid)) except: db.c.rollback() raise
def _listen(self, task_id): # task_ids start at zero, so we gobble up ports starting at the base port and work up port_no = int(config.get("api_base_port")) + task_id # Log according to configured directory and port # we're operating on log_file = "%s/rw_api_%s.log" % (config.get("api_log_dir"), port_no) if config.test_mode and os.path.exists(log_file): os.remove(log_file) log.init(log_file, config.get("log_level")) log.debug("start", "Server booting, port %s." % port_no) db.open() cache.open() for sid in config.station_ids: cache.update_local_cache_for_sid(sid) # If we're not in developer, remove development-related URLs if not config.get("developer_mode"): i = 0 while i < len(request_classes): if request_classes[i][0].find("/test/") != -1: request_classes.pop(i) i = i - 1 i = i + 1 # Make sure all other errors get handled in an API-friendly way request_classes.append((r".*", api.web.Error404Handler)) # Initialize the help (rather than it scan all URL handlers every time someone hits it) api.help.sectionize_requests() # Initialize playlist variables playlist.prepare_cooldown_algorithm(sid) # Fire ze missiles! app = tornado.web.Application( request_classes, debug=(config.test_mode or config.get("developer_mode")), template_path=os.path.join(os.path.dirname(__file__), "../templates"), static_path=os.path.join(os.path.dirname(__file__), "../static"), autoescape=None, ) http_server = tornado.httpserver.HTTPServer(app, xheaders=True) http_server.listen(port_no) if config.get("api_user") and config.get("api_group"): chuser.change_user(config.get("api_user"), config.get("api_group")) for request in request_classes: log.debug("start", " Handler: %s" % str(request)) log.info("start", "API server bootstrapped and ready to go.") self.ioloop = tornado.ioloop.IOLoop.instance() try: self.ioloop.start() finally: self.ioloop.stop() http_server.stop() db.close() log.info("stop", "Server has been shutdown.") log.close()
def _start_cooldown_db(self, sid, cool_time): if config.has_station(sid, "cooldown_enable_for_categories") and not config.get_station(sid, "cooldown_enable_for_categories"): return cool_end = int(cool_time + timestamp()) log.debug("cooldown", "Group ID %s Station ID %s cool_time period: %s" % (self.id, sid, cool_time)) # Make sure to update both the if and else SQL statements if doing any updates if db.c.allows_join_on_update: db.c.update("UPDATE r4_song_sid SET song_cool = TRUE, song_cool_end = %s " "FROM r4_song_group " "WHERE r4_song_sid.song_id = r4_song_group.song_id AND r4_song_group.group_id = %s " "AND r4_song_sid.sid = %s AND r4_song_sid.song_exists = TRUE AND r4_song_sid.song_cool_end <= %s ", (cool_end, self.id, sid, cool_end)) request_only_end = cool_end + 300 db.c.update("UPDATE r4_song_sid SET song_request_only = TRUE, song_request_only_end = %s " "FROM r4_song_group " "WHERE r4_song_sid.song_id = r4_song_group.song_id AND r4_song_group.group_id = %s " "AND r4_song_sid.sid = %s AND r4_song_sid.song_exists = TRUE AND r4_song_sid.song_cool_end <= %s " "AND song_request_only_end IS NOT NULL", (request_only_end, self.id, sid, cool_end)) else: song_ids = db.c.fetch_list( "SELECT song_id " "FROM r4_song_group JOIN r4_song_sid USING (song_id) " "WHERE r4_song_group.group_id = %s AND r4_song_sid.sid = %s AND r4_song_sid.song_exists = TRUE AND r4_song_sid.song_cool_end < %s", (self.id, sid, timestamp() - cool_time)) for song_id in song_ids: db.c.update("UPDATE r4_song_sid SET song_cool = TRUE, song_cool_end = %s WHERE song_id = %s AND sid = %s", (cool_end, song_id, sid))
def update_rating(self, skip_album_update = False): """ Calculate an updated rating from the database. """ dislikes = db.c.fetch_var("SELECT COUNT(*) FROM r4_song_ratings JOIN phpbb_users USING (user_id) WHERE radio_inactive = FALSE AND song_id = %s AND song_rating_user < 3 GROUP BY song_id", (self.id,)) if not dislikes: dislikes = 0 neutrals = db.c.fetch_var("SELECT COUNT(*) FROM r4_song_ratings JOIN phpbb_users USING (user_id) WHERE radio_inactive = FALSE AND song_id = %s AND song_rating_user >= 3 AND song_rating_user < 3.5 GROUP BY song_id", (self.id,)) if not neutrals: neutrals = 0 neutralplus = db.c.fetch_var("SELECT COUNT(*) FROM r4_song_ratings JOIN phpbb_users USING (user_id) WHERE radio_inactive = FALSE AND song_id = %s AND song_rating_user >= 3.5 AND song_rating_user < 4 GROUP BY song_id", (self.id,)) if not neutralplus: neutralplus = 0 likes = db.c.fetch_var("SELECT COUNT(*) FROM r4_song_ratings JOIN phpbb_users USING (user_id) WHERE radio_inactive = FALSE AND song_id = %s AND song_rating_user >= 4 GROUP BY song_id", (self.id,)) if not likes: likes = 0 rating_count = dislikes + neutrals + neutralplus + likes log.debug("song_rating", "%s ratings for %s" % (rating_count, self.filename)) if rating_count > config.get("rating_threshold_for_calc"): self.data['rating'] = round(((((likes + (neutrals * 0.5) + (neutralplus * 0.75)) / (likes + dislikes + neutrals + neutralplus) * 4.0)) + 1), 1) log.debug("song_rating", "rating update: %s for %s" % (self.data['rating'], self.filename)) db.c.update("UPDATE r4_songs SET song_rating = %s, song_rating_count = %s WHERE song_id = %s", (self.data['rating'], rating_count, self.id)) if not skip_album_update: for album in self.albums: album.update_rating()
def _listen(self, task_id): # task_ids start at zero, so we gobble up ports starting at the base port and work up port_no = int(config.get("api_base_port")) + task_id # Log according to configured directory and port # we're operating on log_file = "%s/rw_api_%s.log" % (config.get("api_log_dir"), port_no) if config.test_mode and os.path.exists(log_file): os.remove(log_file) log.init(log_file, config.get("log_level")) log.debug("start", "Server booting, port %s." % port_no) db.open() cache.open() for sid in config.station_ids: cache.update_local_cache_for_sid(sid) # If we're not in developer, remove development-related URLs if not config.get("developer_mode"): i = 0 while (i < len(request_classes)): if request_classes[i][0].find("/test/") != -1: request_classes.pop(i) i = i - 1 i = i + 1 # Make sure all other errors get handled in an API-friendly way request_classes.append((r".*", api.web.Error404Handler)) # Initialize the help (rather than it scan all URL handlers every time someone hits it) api.help.sectionize_requests() # Initialize playlist variables playlist.prepare_cooldown_algorithm(sid) # Fire ze missiles! app = tornado.web.Application( request_classes, debug=(config.test_mode or config.get("developer_mode")), template_path=os.path.join(os.path.dirname(__file__), "../templates"), static_path=os.path.join(os.path.dirname(__file__), "../static"), autoescape=None) http_server = tornado.httpserver.HTTPServer(app, xheaders=True) http_server.listen(port_no) if config.get("api_user") and config.get("api_group"): chuser.change_user(config.get("api_user"), config.get("api_group")) for request in request_classes: log.debug("start", " Handler: %s" % str(request)) log.info("start", "API server bootstrapped and ready to go.") self.ioloop = tornado.ioloop.IOLoop.instance() try: self.ioloop.start() finally: self.ioloop.stop() http_server.stop() db.close() log.info("stop", "Server has been shutdown.") log.close()
def _count(): global in_process log.debug("icecast_sync", "All responses came back for counting.") try: stations = {} for sid in config.station_ids: stations[sid] = 0 for handler, data in in_process.iteritems(): if isinstance(data, list): stations[handler.sid] += len(data) for sid, listener_count in stations.iteritems(): log.debug("icecast_sync", "%s has %s listeners." % (config.station_id_friendly[sid], listener_count)) db.c.update("INSERT INTO r4_listener_counts (sid, lc_guests) VALUES (%s, %s)", (sid, listener_count)) _cache_relay_status() # db.c.update("DELETE FROM r4_listener_counts WHERE lc_time <= %s", (current_time - config.get("trim_history_length"),)) in_process = {} except Exception as e: log.exception("icecast_sync", "Could not finish counting listeners.", e)
def start(): db.open() cache.open() if config.test_mode: playlist.remove_all_locks(1) app = tornado.web.Application([ (r"/advance/([0-9]+)", AdvanceScheduleRequest), (r"/refresh/([0-9]+)", RefreshScheduleRequest) ], debug=(config.test_mode or config.get("developer_mode"))) server = tornado.httpserver.HTTPServer(app) server.listen(int(config.get("backend_port")), address='127.0.0.1') if config.get("backend_user") or config.get("backend_group"): chuser.change_user(config.get("backend_user"), config.get("backend_group")) pid = os.getpid() pidfile = open(config.get("backend_pid_file"), 'w') pidfile.write(str(pid)) pidfile.close() schedule.load() log.debug("start", "Backend server bootstrapped, port %s, ready to go." % int(config.get("backend_port"))) for sid in config.station_ids: playlist.prepare_cooldown_algorithm(sid) try: tornado.ioloop.IOLoop.instance().start() finally: db.close()
def start_cooldown(self, sid): """ Calculates cooldown based on jfinalfunk's crazy algorithms. Cooldown may be overriden by song_cool_* rules found in database. """ cool_time = cooldown_config[sid]['max_song_cool'] if self.data['cool_override']: cool_time = self.data['cool_override'] else: cool_rating = self.data['rating'] if not self.data['rating'] or self.data['rating'] == 0: cool_rating = 4 # 3.5 is the rating range (2.5 to 5.0) and 2.5 is the "minimum" rating, effectively. auto_cool = ((3.5 - (cool_rating - 2.5)) / 3.5) * cooldown_config[sid]['max_song_cool'] + cooldown_config[sid]['min_song_cool'] cool_time = auto_cool * get_age_cooldown_multiplier(self.data['added_on']) * self.data['cool_multiply'] cool_time = int(cool_time + time.time()) db.c.update("UPDATE r4_song_sid SET song_cool = TRUE, song_cool_end = %s WHERE song_id = %s AND sid = %s", (cool_time, self.id, sid)) self.data['cool'] = True self.data['cool_end'] = cool_time for metadata in self.groups: log.debug("song_cooldown", "Starting group cooldown on group %s" % metadata.id) metadata.start_cooldown(sid) # Albums always have to go last since album records in the DB store cached cooldown values for metadata in self.albums: log.debug("song_cooldown", "Starting album cooldown on group %s" % metadata.id) metadata.start_cooldown(sid)
def _scan_directory(directory, sids): # Windows workaround eww, damnable directory names if os.name == "nt": directory = os.path.normpath(directory).replace("\\", "\\\\") songs = db.c.fetch_list( "SELECT song_id FROM r4_songs WHERE song_filename LIKE %s || '%%' AND song_verified = TRUE", (directory, )) for song_id in songs: # log.debug("scan", "Marking Song ID %s for possible deletion." % song_id) db.c.update( "UPDATE r4_songs SET song_scanned = FALSE WHERE song_id = %s", (song_id, )) for root, subdirs, files in os.walk(directory, followlinks=True): for filename in files: filename = os.path.normpath(root + os.sep + filename) _scan_file(filename, sids) songs = db.c.fetch_list( "SELECT song_id FROM r4_songs WHERE song_filename LIKE %s || '%%' AND song_scanned = FALSE AND song_verified = TRUE", (directory, )) for song_id in songs: s = playlist.Song.load_from_id(song_id) log.debug("scan", "Disabling song: %s" % s.filename) s.disable()
def _scan_directory(directory, sids): # Normalize and add a trailing separator to the directory name directory = os.path.join(os.path.normpath(directory), "") songs = db.c.fetch_list( "SELECT song_id FROM r4_songs WHERE song_filename LIKE %s || '%%' AND song_verified = TRUE", (directory, ), ) for song_id in songs: db.c.update( "UPDATE r4_songs SET song_scanned = FALSE WHERE song_id = %s", (song_id, )) do_scan = False try: os.stat(directory) do_scan = True except (IOError, OSError): log.debug("scan", "Directory %s no longer exists." % directory) if do_scan and len(sids) > 0: for root, _subdirs, files in os.walk(directory, followlinks=True): for filename in files: filename = os.path.join(root, filename) _scan_file(filename, sids) songs = db.c.fetch_list( "SELECT song_id FROM r4_songs WHERE song_filename LIKE %s || '%%' AND song_scanned = FALSE AND song_verified = TRUE", (directory, ), ) for song_id in songs: s = playlist.Song.load_from_id(song_id) log.debug("scan", "Disabling song: %s" % s.filename) s.disable()
def set_album_fave(sid, album_id, user_id, fave): db.c.start_transaction() exists = db.c.fetch_row("SELECT * FROM r4_album_ratings WHERE album_id = %s AND user_id = %s AND sid = %s", (album_id, user_id, sid)) rating = None rating_complete = None if not exists: if db.c.update("INSERT INTO r4_album_ratings (album_id, user_id, album_fave, sid) VALUES (%s, %s, %s, %s)", (album_id, user_id, fave, sid)) == 0: log.debug("rating", "Failed to insert record for fave %s %s, fave is: %s." % ("album", album_id, fave)) return False else: rating = exists["album" + "_rating_user"] rating_complete = exists['album_rating_complete'] if db.c.update("UPDATE r4_album_ratings SET album_fave = %s WHERE album_id = %s AND user_id = %s AND sid = %s", (fave, album_id, user_id, sid)) == 0: log.debug("rating", "Failed to update record for fave %s %s, fave is: %s." % ("album", album_id, fave)) return False if (not exists and fave) or (not exists["album" + "_fave"] and fave): db.c.update("UPDATE r4_album_sid SET album_fave_count = album_fave_count + 1 WHERE album_id = %s AND sid = %s", (album_id, sid)) elif (exists and exists["album" + "_fave"] and not fave): db.c.update("UPDATE r4_album_sid SET album_fave_count = album_fave_count - 1 WHERE album_id = %s AND sid = %s", (album_id, sid)) if "album" == "album": cache.set_album_rating(sid, album_id, user_id, { "rating_user": rating, "fave": fave, "rating_complete": rating_complete }) elif "album" == "song": cache.set_song_rating(album_id, user_id, { "rating_user": rating, "fave": fave }) return True db.c.commit()
def get_producer_at_time(sid, at_time): to_ret = None local_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(at_time)) time_ahead = int((at_time - timestamp()) / 60) sched_id = db.c.fetch_var( "SELECT sched_id " "FROM r4_schedule " "WHERE sid = %s AND sched_start <= %s AND sched_end > %s " "ORDER BY sched_id DESC " "LIMIT 1", (sid, at_time + 20, at_time), ) try: to_ret = events.event.BaseProducer.load_producer_by_id(sched_id) if to_ret: to_ret.start_producer() except Exception as e: log.warn("get_producer", "Failed to obtain producer at time %s (%sm ahead)." % (local_time, time_ahead)) log.exception( "get_producer", "Failed to get an appropriate producer at time %s (%sm ahead)." % (local_time, time_ahead), e, ) if not to_ret: log.debug( "get_producer", "No producer at time %s (%sm ahead), defaulting to election." % (local_time, time_ahead) ) return election.ElectionProducer(sid) if not to_ret.has_next_event(): log.warn("get_producer", "Producer ID %s (type %s, %s) has no events." % (to_ret.id, to_ret.type, to_ret.name)) return election.ElectionProducer(sid) return to_ret
def handleUserDetail(self, root, data, urlPack): # @UnusedVariable cursor = data["max_cursor"] aweme_list = data["aweme_list"] for aweme in aweme_list: vid = aweme["aweme_id"] uid = aweme["author_user_id"] obj = dbtools.MongoObject() obj.setMeta(const_douyin.DATA_TYPE_VIDEO, const_douyin.DATA_PROVIDER, vid, version=const_douyin.DATA_VERSION) obj.setData(aweme) obj.setUserId(uid) obj.save(const_douyin.MONGO_TABLE_VIDEO) log.debug("DouyinAuthorVideoSaver Inserting obj {}".format( obj.getLastObjectId())) self.addStatObject(obj.getLastObjectId(), const_douyin.DATA_TYPE_VIDEO) if data["has_more"] == 1: msg = Message(const_douyin.DATA_TYPE_AUTHOR, uid) msg.setExtra("cursor", cursor) self.publish(msg) else: log.debug("DouyinAuthorVideoSaver: no more!") return
def get_random_song_timed(sid, target_seconds = None, target_delta = None): """ Fetch a random song abiding by all election block, request block, and availability rules, but giving priority to the target song length provided. Falls back to get_random_song on failure. """ if not target_seconds: return get_random_song(sid) if not target_delta: target_delta = config.get_station(sid, "song_lookup_length_delta") sql_query = ("FROM r4_song_sid " "JOIN r4_songs USING (song_id) " "JOIN r4_album_sid ON (r4_album_sid.album_id = r4_songs.album_id AND r4_album_sid.sid = r4_song_sid.sid) " "WHERE r4_song_sid.sid = %s " "AND song_exists = TRUE " "AND song_cool = FALSE " "AND song_elec_blocked = FALSE " "AND album_requests_pending IS NULL " "AND song_request_only = FALSE " "AND song_length >= %s AND song_length <= %s") lower_target_bound = target_seconds - (target_delta / 2) upper_target_bound = target_seconds + (target_delta / 2) num_available = db.c.fetch_var("SELECT COUNT(r4_song_sid.song_id) " + sql_query, (sid, lower_target_bound, upper_target_bound)) log.info("song_select", "Song pool size (cooldown, blocks, requests, timed) [target %s delta %s]: %s" % (target_seconds, target_delta, num_available)) if num_available == 0: log.warn("song_select", "No songs available with target_seconds %s and target_delta %s." % (target_seconds, target_delta)) log.debug("song_select", "Song select query: SELECT COUNT(r4_song_sid.song_id) " + sql_query % (sid, lower_target_bound, upper_target_bound)) return get_random_song(sid) else: offset = random.randint(1, num_available) - 1 song_id = db.c.fetch_var("SELECT r4_song_sid.song_id " + sql_query + " LIMIT 1 OFFSET %s", (sid, lower_target_bound, upper_target_bound, offset)) return Song.load_from_id(song_id, sid)
def get_next(sid): line = cache.get_station(sid, "request_line") if not line: return None song = None for pos in range(0, len(line)): if not line[pos]: pass # ?!?! elif not line[pos]['song_id']: log.debug("request", "Passing on user %s since they have no valid first song." % line[pos]['username']) else: entry = line.pop(pos) song = playlist.Song.load_from_id(entry['song_id'], sid) log.debug("request", "Fulfilling %s's request for %s." % (entry['username'], song.filename)) song.data['elec_request_user_id'] = entry['user_id'] song.data['elec_request_username'] = entry['username'] u = User(entry['user_id']) db.c.update("DELETE FROM r4_request_store WHERE user_id = %s AND song_id = %s", (u.id, entry['song_id'])) u.remove_from_request_line() if u.has_requests(): u.put_in_request_line(u.get_tuned_in_sid()) request_count = db.c.fetch_var("SELECT COUNT(*) FROM r4_request_history WHERE user_id = %s", (u.id,)) + 1 db.c.update("DELETE FROM r4_request_store WHERE song_id = %s AND user_id = %s", (song.id, u.id)) db.c.update("INSERT INTO r4_request_history (user_id, song_id, request_wait_time, request_line_size, request_at_count, sid) " "VALUES (%s, %s, %s, %s, %s, %s)", (u.id, song.id, time.time() - entry['line_wait_start'], len(line), request_count, sid)) db.c.update("UPDATE phpbb_users SET radio_totalrequests = %s WHERE user_id = %s", (request_count, u.id)) song.update_request_count(sid) # If we fully update the line, the user may sneak in and get 2 requests in the same election. # This is not a good idea, so we leave it to the scheduler to issue the full cache update. cache.set_station(sid, "request_line", line, True) break return song
def set_song_fave(song_id, user_id, fave): db.c.start_transaction() exists = db.c.fetch_row("SELECT * FROM r4_song_ratings WHERE song_id = %s AND user_id = %s", (song_id, user_id)) rating = None if not exists: if ( db.c.update( "INSERT INTO r4_song_ratings (song_id, user_id, song_fave) VALUES (%s, %s, %s)", (song_id, user_id, fave), ) == 0 ): log.debug("rating", "Failed to insert record for song fave %s, fave is: %s." % (song_id, fave)) return False else: rating = exists["song_rating_user"] if ( db.c.update( "UPDATE r4_song_ratings SET song_fave = %s WHERE song_id = %s AND user_id = %s", (fave, song_id, user_id), ) == 0 ): log.debug("rating", "Failed to update record for fave song %s, fave is: %s." % (song_id, fave)) return False if (not exists and fave) or (not exists["song_fave"] and fave): db.c.update("UPDATE r4_songs SET song_fave_count = song_fave_count + 1 WHERE song_id = %s", (song_id,)) elif exists and exists["song_fave"] and not fave: db.c.update("UPDATE r4_songs SET song_fave_count = song_fave_count - 1 WHERE song_id = %s", (song_id,)) cache.set_song_rating(song_id, user_id, {"rating_user": rating, "fave": fave}) db.c.commit() return True
def fill(self, target_song_length=None, skip_requests=False): # ONLY RUN _ADD_REQUESTS ONCE PER FILL if not skip_requests: self._add_requests() for i in range(len(self.songs), self._num_songs): try: if not target_song_length and len( self.songs) > 0 and 'length' in self.songs[0].data: target_song_length = self.songs[0].data['length'] log.debug( "elec_fill", "Second song in election, aligning to length %s" % target_song_length) song = self._fill_get_song(target_song_length) song.data['entry_votes'] = 0 song.data['entry_type'] = ElecSongTypes.normal song.data['elec_request_user_id'] = 0 song.data['elec_request_username'] = None self._check_song_for_conflict(song) self.add_song(song) except Exception as e: log.exception("elec_fill", "Song failed to fill in an election.", e) pass if len(self.songs) == 0: raise ElectionEmptyException
def process_IN_CLOSE_WRITE(self, event): if event.dir: log.debug( "scan", "Ignoring close write event for directory %s" % event.pathname) return self._process(event)
def _listen(self, task_id): # task_ids start at zero, so we gobble up ports starting at the base port and work up port_no = int(config.get("api_base_port")) + task_id # Log according to configured directory and port # we're operating on log_file = "%s/api%s.log" % (config.get("log_dir"), port_no) if config.test_mode and os.path.exists(log_file): os.remove(log_file) log.init(log_file, config.get("log_level")) log.debug("start", "Server booting, port %s." % port_no) db.open() # Fire ze missiles! app = tornado.web.Application(request_classes) http_server = tornado.httpserver.HTTPServer(app, xheaders = True) http_server.listen(port_no) for request in request_classes: log.debug("start", " Handler: %s" % str(request)) for sid in config.station_ids: cache.update_local_cache_for_sid(sid) log.info("start", "Server bootstrapped and ready to go.") self.ioloop = tornado.ioloop.IOLoop.instance() self.ioloop.start() http_server.stop() log.info("stop", "Server has been shutdown.") log.close()
def add_anonymous(self, sid): # Here we'll erase any extra records for the same IP address (shouldn't happen but you never know, especially # if the system gets a reset). There is a small flaw here; there's a chance we'll pull in 2 clients with the same client ID. # I (rmcauley) am classifying this as "collatoral damage" - an anon user who is actively using the website # can re-tune-in on the small chance that this occurs. records = db.c.fetch_list("SELECT listener_icecast_id FROM r4_listeners WHERE listener_ip = %s", (self.get_argument("ip"),)) if len(records) == 0: db.c.update("INSERT INTO r4_listeners " "(sid, listener_ip, user_id, listener_relay, listener_agent, listener_icecast_id) " "VALUES (%s, %s, %s, %s, %s, %s)", (sid, self.get_argument("ip"), 1, self.relay, self.get_argument("agent"), self.get_argument("client"))) sync_to_front.sync_frontend_ip(self.get_argument("ip")) self.append("Anonymous user from IP %s is now tuned in with record." % self.get_argument("ip")) self.failed = False else: # Keep one valid entry on file for the listener by popping once records.pop() # Erase the rest while len(records) > 1: db.c.update("DELETE FROM r4_listeners WHERE listener_icecast_id = %s", (records.pop(),)) log.debug("ldetect", "Deleted extra record for icecast ID %s from IP %s." % (self.get_argument("client"), self.get_argument("ip"))) db.c.update("UPDATE r4_listeners SET listener_icecast_id = %s, listener_purge = FALSE WHERE listener_ip = %s", (self.get_argument("client"), self.get_argument("ip"))) self.append("Anonymous user from IP %s record updated." % self.get_argument("ip")) self.failed = False sync_to_front.sync_frontend_ip(self.get_argument("ip"))
def _add_requests(self): # ONLY RUN IS_REQUEST_NEEDED ONCE if self.is_request_needed() and len(self.songs) < self._num_songs: log.debug("requests", "Ready for requests, filling %s." % self._num_requests) for i in range(0, self._num_requests): #pylint: disable=W0612 self.add_song(self.get_request())
def load_next_event(self, target_length=None, min_elec_id=0, skip_requests=False): if self.id: elec_id = db.c.fetch_var( "SELECT elec_id FROM r4_elections WHERE elec_type = %s and elec_used = FALSE AND sid = %s AND elec_id > %s AND sched_id = %s ORDER BY elec_id LIMIT 1", (self.elec_type, self.sid, min_elec_id, self.id)) else: elec_id = db.c.fetch_var( "SELECT elec_id FROM r4_elections WHERE elec_type = %s and elec_used = FALSE AND sid = %s AND elec_id > %s AND sched_id IS NULL ORDER BY elec_id LIMIT 1", (self.elec_type, self.sid, min_elec_id)) log.debug( "load_election", "Check for next election (type %s, sid %s, min. ID %s, sched_id %s): %s" % (self.elec_type, self.sid, min_elec_id, self.id, elec_id)) if elec_id: elec = self.elec_class.load_by_id(elec_id) if not elec.songs or not len(elec.songs): log.warn("load_election", "Election ID %s is empty. Marking as used.") db.c.update( "UPDATE r4_elections SET elec_used = TRUE WHERE elec_id = %s", (elec.id, )) return self.load_next_event() elec.url = self.url elec.name = self.name return elec elif self.id and not self.always_return_elec: return None else: return self._create_election(target_length, skip_requests)
def load_event_in_progress(self): if self.id: elec_id = db.c.fetch_var( "SELECT elec_id FROM r4_elections WHERE elec_type = %s AND elec_in_progress = TRUE AND sid = %s AND sched_id = %s ORDER BY elec_id DESC LIMIT 1", (self.elec_type, self.sid, self.id)) else: elec_id = db.c.fetch_var( "SELECT elec_id FROM r4_elections WHERE elec_type = %s AND elec_in_progress = TRUE AND sid = %s AND sched_id IS NULL ORDER BY elec_id DESC LIMIT 1", (self.elec_type, self.sid)) log.debug( "load_election", "Check for in-progress elections (type %s, sid %s, sched_id %s): %s" % (self.elec_type, self.sid, self.id, elec_id)) if elec_id: elec = self.elec_class.load_by_id(elec_id) if not elec.songs or not len(elec.songs): log.warn("load_election", "Election ID %s is empty. Marking as used.") db.c.update( "UPDATE r4_elections SET elec_used = TRUE WHERE elec_id = %s", (elec.id, )) return self.load_next_event() elec.name = self.name elec.url = self.url elec.dj_user_id = self.dj_user_id return elec else: return self.load_next_event()
def fill(self, target_song_length=None, skip_requests=False): # ONLY RUN _ADD_REQUESTS ONCE PER FILL if not skip_requests: self._add_requests() for i in range(len(self.songs), self._num_songs): #pylint: disable=W0612 try: if not target_song_length and len( self.songs) > 0 and 'length' in self.songs[0].data: target_song_length = self.songs[0].data['length'] log.debug( "elec_fill", "Second song in election, aligning to length %s" % target_song_length) song = self._fill_get_song(target_song_length) song.data['entry_votes'] = 0 song.data['entry_type'] = ElecSongTypes.normal song.data['elec_request_user_id'] = 0 song.data['elec_request_username'] = None self._check_song_for_conflict(song) self.add_song(song) except Exception as e: log.exception("elec_fill", "Song failed to fill in an election.", e) if len(self.songs) == 0: raise ElectionEmptyException for song in self.songs: if 'elec_request_user_id' in song.data and song.data[ 'elec_request_user_id']: log.debug( "elec_fill", "Putting user %s back in line after request fulfillment." % song.data['elec_request_username']) u = User(song.data['elec_request_user_id']) u.put_in_request_line(u.get_tuned_in_sid()) request.update_line(self.sid)
def monitor(): _common_init() pid = os.getpid() pid_file = open("%s/scanner.pid" % config.get_directory("pid_dir"), 'w') pid_file.write(str(pid)) pid_file.close() mask = (pyinotify.IN_ATTRIB | pyinotify.IN_CREATE | pyinotify.IN_CLOSE_WRITE | pyinotify.IN_DELETE | pyinotify.IN_MOVED_TO | pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVE_SELF | pyinotify.IN_EXCL_UNLINK) try: go = True while go: try: log.info("scan", "File monitor started.") wm = pyinotify.WatchManager() wm.add_watch(str(config.get("monitor_dir")), mask, rec=True) pyinotify.Notifier(wm, FileEventHandler()).loop() go = False except NewDirectoryException: log.debug("scan", "New directory added, restarting watch.") except DeletedDirectoryException: log.debug("scan", "Directory was deleted, restarting watch.") finally: try: wm.close() except: pass finally: log.info("scan", "File monitor shutdown.")
def next(self): urlPack = self.queueGet() url = signature.sign(urlPack.url, signature.hotsoon) urlPack.url = url['url'] urlPack.priority = self.getPipePriority() log.debug("HuoshanAuthorRelationUrlProvider next: {}".format(urlPack)) return urlPack
def fill(self, target_song_length = None, skip_requests = False): # ONLY RUN _ADD_REQUESTS ONCE PER FILL if not skip_requests: self._add_requests() for i in range(len(self.songs), self._num_songs): #pylint: disable=W0612 try: if not target_song_length and len(self.songs) > 0 and 'length' in self.songs[0].data: target_song_length = self.songs[0].data['length'] log.debug("elec_fill", "Second song in election, aligning to length %s" % target_song_length) song = self._fill_get_song(target_song_length) song.data['entry_votes'] = 0 song.data['entry_type'] = ElecSongTypes.normal song.data['elec_request_user_id'] = 0 song.data['elec_request_username'] = None self._check_song_for_conflict(song) self.add_song(song) except Exception as e: log.exception("elec_fill", "Song failed to fill in an election.", e) if len(self.songs) == 0: raise ElectionEmptyException for song in self.songs: if 'elec_request_user_id' in song.data and song.data['elec_request_user_id']: log.debug("elec_fill", "Putting user %s back in line after request fulfillment." % song.data['elec_request_username']) u = User(song.data['elec_request_user_id']) u.put_in_request_line(u.get_tuned_in_sid()) request.update_line(self.sid)
def _scan_file(filename, sids): # log.debug("scan", u"sids: {} Scanning file: {}".format(sids, filename)) global _album_art_queue global _art_only try: if _is_mp3(filename) and not _art_only: # Only scan the file if we don't have a previous mtime for it, or the mtime is different old_mtime = db.c.fetch_var("SELECT song_file_mtime FROM r4_songs WHERE song_filename = %s AND song_verified = TRUE", (filename,)) if not old_mtime or old_mtime != os.stat(filename)[8]: # log.debug("scan", "mtime mismatch, scanning for changes") playlist.Song.load_from_file(filename, sids) else: # log.debug("scan", "mtime match, no action taken") db.c.update("UPDATE r4_songs SET song_scanned = TRUE WHERE song_filename = %s", (filename,)) elif _is_image(filename): _album_art_queue.append([filename, sids]) except (OSError, WindowsError, IOError) as e: # deleted file if e.errno == 2: log.debug("scan", "File has been moved/deleted: %s" % filename) s = playlist.Song.load_from_deleted_file(filename) if s: s.disable() else: _add_scan_error(filename, xception) except Exception as xception: _add_scan_error(filename, xception)
def _start_cooldown_db(self, sid, cool_time): if config.has_station( sid, "cooldown_enable_for_categories") and not config.get_station( sid, "cooldown_enable_for_categories"): return cool_end = int(cool_time + timestamp()) log.debug( "cooldown", "Group ID %s Station ID %s cool_time period: %s" % (self.id, sid, cool_time), ) # Make sure to update both the if and else SQL statements if doing any updates db.c.update( "UPDATE r4_song_sid SET song_cool = TRUE, song_cool_end = %s " "FROM r4_song_group " "WHERE r4_song_sid.song_id = r4_song_group.song_id AND r4_song_group.group_id = %s " "AND r4_song_sid.sid = %s AND r4_song_sid.song_exists = TRUE AND r4_song_sid.song_cool_end <= %s ", (cool_end, self.id, sid, cool_end), ) request_only_end = cool_end + 300 db.c.update( "UPDATE r4_song_sid SET song_request_only = TRUE, song_request_only_end = %s " "FROM r4_song_group " "WHERE r4_song_sid.song_id = r4_song_group.song_id AND r4_song_group.group_id = %s " "AND r4_song_sid.sid = %s AND r4_song_sid.song_exists = TRUE AND r4_song_sid.song_cool_end <= %s " "AND song_request_only_end IS NOT NULL", (request_only_end, self.id, sid, cool_end), )
def handleChallengeInfo(self, root, data, urlPack): # @UnusedVariable log.debug(data) obj = dbtools.MongoObject() obj.setMeta(self.DATA_TYPE_TOPIC, self.DATA_PROVIDER, data["cid"]) obj.setData(data) obj.save(self.MONGO_TABLE_TOPIC) return
def sync_frontend_all(sid): _sync_frontend_all_timed_stop(sid) http_client = AsyncHTTPClient() params = urllib.urlencode({ "sid": sid }) for i in range(0, config.get("api_num_processes")): http_client.fetch("http://localhost:%s/api4/sync_update_all" % (config.get("api_base_port") + i,), sync_result, method='POST', body=params) log.debug("sync_front", "Sent update_all to API port %s" % (config.get("api_base_port") + i,))
def sync_frontend_all(sid): try: params = urllib.urlencode({ "sid": sid }) for i in range(0, config.get("api_num_processes")): urllib2.urlopen(urllib2.Request("http://localhost:%s/api/sync_update_all" % (config.get("api_base_port") + i,), params)) log.debug("sync_front", "Sent update_all to API port %s" % (config.get("api_base_port") + i,)) except urllib2.URLError, e: log.warn("sync_front", "Could not connect to an API port: %s" % repr(e.reason))
def get(self): # if not self.current_user: # self.redirect("/login/") # return # users = get_users() # self.write(json.dumps(users)) log.debug(self.current_user) self.render('../apps/index/templates/index.html')
def start_election_block(self, sid, num_elections = False): if self.elec_block is not None: if self.elec_block > 0: log.debug("elec_block", "%s SID %s blocking ID %s for override %s" % (self.__class__.__name__, sid, self.id, self.elec_block)) self._start_election_block_db(sid, self.elec_block) elif num_elections: log.debug("elec_block", "%s SID %s blocking ID %s for normal %s" % (self.__class__.__name__, sid, self.id, num_elections)) self._start_election_block_db(sid, num_elections)
def _disable_file(filename): # aka "delete this off the playlist" log.debug("scan", "Attempting to disable file: {}".format(filename)) try: if _is_mp3(filename): song = playlist.Song.load_from_deleted_file(filename) song.disable() except Exception as xception: _add_scan_error(filename, xception)
def load_from_file(klass, filename, sids): """ Produces an instance of the Song class with all album, group, and artist IDs loaded from only a filename. All metadata is saved to the database and updated where necessary. """ # log.debug("playlist", u"sids {} loading song from file {}".format(sids, filename)) kept_artists = [] kept_groups = [] matched_entry = db.c.fetch_row("SELECT song_id FROM r4_songs WHERE song_filename = %s", (filename,)) if matched_entry: log.debug("playlist", "this filename matches an existing database entry, song_id {}".format(matched_entry['song_id'])) s = klass.load_from_id(matched_entry['song_id']) for metadata in s.artists: try: if metadata.is_tag: metadata.disassociate_song_id(s.id) else: kept_artists.append(metadata) except MetadataUpdateError: pass for metadata in s.groups: try: if metadata.is_tag: metadata.disassociate_song_id(s.id) else: kept_groups.append(metadata) except MetadataUpdateError: pass elif len(sids) == 0: raise SongHasNoSIDsException else: s = klass() s.load_tag_from_file(filename) s.save(sids) new_artists = Artist.load_list_from_tag(s.artist_tag) new_groups = SongGroup.load_list_from_tag(s.genre_tag) i = 0 for metadata in new_artists: metadata.associate_song_id(s.id, order=i) i += 1 for metadata in new_groups: metadata.associate_song_id(s.id) s.artists = new_artists + kept_artists s.groups = new_groups + kept_groups s.albums = [ Album.load_from_name(s.album_tag) ] s.albums[0].associate_song_id(s.id) s.update_artist_parseable() return s