def encode_lp(): LoggingUtils.info("Received a request under the low priority encode route") authorized = RequestAuthorizer.authorize(request.headers) # Check authorization if not authorized: LoggingUtils.debug("Returning 401 http status code", color=LoggingUtils.YELLOW) return "Unauthorized request", 401 job = JobGenerator.create_from_json(request.get_json()) # Create a job instance if not job: LoggingUtils.debug("Returning 400 http status code", color=LoggingUtils.YELLOW) return "Malformed request", 400 # Enqueue job encode_lp_queue.enqueue(encode_worker, args=(job, RcloneConf.get_config(), EncoderConf.create_encoder_config_store()), job_timeout=JOB_TIMEOUT, result_ttl=RESULT_TTL, failure_ttl=FAILURE_TTL, job_id=_create_job_id(job.episode, "encode")) LoggingUtils.info("Enqueued a new encoder job to the 'encode' queue", color=LoggingUtils.CYAN) return "Request accepted", 200
def notify(job: Job, nconf: NotifierConfigStore) -> None: """Notify worker""" try: # 1. Get the info of the show LoggingUtils.info("[1/X] Fetching show information via Hisha...", color=LoggingUtils.CYAN) info = hisha.search(job.show) # 2. Check filters LoggingUtils.info("[2/X] Checking user list filters...", color=LoggingUtils.CYAN) filters = UserListFilter.check(job, info, nconf.anilist_tracker, nconf.mal_tracker, nconf.whitelist) if not filters: LoggingUtils.info( "User isn't watching this show, concluding job immediately.", color=LoggingUtils.LYELLOW) return False # 3. First automata: Start sending Discord webhooks LoggingUtils.info("[3/X] Sending Discord Webhook Notifications...", color=LoggingUtils.CYAN) DiscordWebhook.send(job, info, nconf.discord_webhooks) # 4. Send POST requests LoggingUtils.info("[4/X] Sending POST requests to endpoints...", color=LoggingUtils.CYAN) RestSender.send(JobUtils.to_dict(job), nconf.endpoints) except Exception as e: # In the event of an exception, we want to simply log it LoggingUtils.critical(e, color=LoggingUtils.LRED) raise e
def notify(): LoggingUtils.info("Received a request for notify") authorized = RequestAuthorizer.authorize(request.headers) # Check authorization if not authorized: LoggingUtils.debug("Returning 401 http status code", color=LoggingUtils.YELLOW) return "Unauthorized request", 401 job = JobGenerator.create_from_json(request.get_json()) # Create a job instance if not job: LoggingUtils.debug("Returning 400 http status code", color=LoggingUtils.YELLOW) return "Malformed request", 400 # Enqueue job notify_queue.enqueue(notify_worker, args=(job, NotifierConf.create_notifier_config_store()), job_timeout=JOB_TIMEOUT, result_ttl=RESULT_TTL, failure_ttl=FAILURE_TTL, job_id=_create_job_id(job.episode, "notify")) LoggingUtils.info("Enqueued a new notify job to the 'notify' queue", color=LoggingUtils.CYAN) return "Request accepted", 200
def send(json_dict: str, urls: List[Dict[str, str]]) -> bool: """ Sends notifications. Returns bool if all successful Assumes the passed in string is already in JSON dict form. """ for url in urls: LoggingUtils.info("Sending request to url {}".format(url['url'])) headers = dict() headers['Content-Type'] = "application/json" if 'auth' in url and url['auth']: headers['Authorization'] = url['auth'] headers['authorization'] = url['auth'] try: res = requests.post(url['url'], json=json_dict, headers=headers, timeout=5) if res.status_code == 200: LoggingUtils.info( "Successfully sent with return of 200 OK") elif res.status_code == 201: LoggingUtils.info( "Successfully sent with return of 201 Created") elif res.status_code == 202: LoggingUtils.info( "Successfully sent with return of 202 Accepted") else: LoggingUtils.info( "Sent request with a return of {}".format( res.status_code)) except requests.exceptions.ConnectionError: LoggingUtils.warning( "Connection error occured while sending to {}".format( url['url']), color=LoggingUtils.RED) except requests.exceptions.MissingSchema: LoggingUtils.warning("Missing http/https schema for {}".format( url['url']), color=LoggingUtils.RED) except requests.exceptions.Timeout: LoggingUtils.warning( "Timeout occured while sending to {}".format(url['url']), color=LoggingUtils.RED) except: LoggingUtils.warning( "Unknown error occured while sending to {}".format( url['url']), color=LoggingUtils.RED) return True
def send(cls, job: Job, hisha: HishaInfo, webhooks: List[str]): embed = cls._generate_embed(job, hisha, webhooks) LoggingUtils.info("Sending out Discord webhook notifications", color=LoggingUtils.LMAGENTA) for webhook in webhooks: try: requests.post(webhook, json=embed, timeout=5) except: LoggingUtils.warning( "There was an error when sending out a Discord webhook to: {}" .format(webhook), color=LoggingUtils.YELLOW) LoggingUtils.info("Done sending out webhook notifications", color=LoggingUtils.GREEN) return
def _page_search(self, search, status): """ Searches for a show using the page query Params: search - show to search for status - status to filter under Returns: the individual show data if it's found, or None otherwise """ try: info = self._anilist(self._PAGE_QUERY, search, status)['Page']['media'] except: # Default to an empty list if the results are bad - Hisha can cleanly exit here LoggingUtils.debug("No data provided for {} in {}, returning None".format(search, status)) return None # Match against the titles provided in the response for show in info: # Match against the titles for title in show['title'].values(): if self._check_equality_regex(search, title): LoggingUtils.info("Matched {} to {}".format(search, title)) return show else: LoggingUtils.debug("Didn't match {} to {}".format(search, title)) # Match against the synonyms for title in show['synonyms']: if self._check_equality_regex(search, title): LoggingUtils.info("Matched {} to {}".format(search, title)) return show else: LoggingUtils.debug("Didn't match {} to {}".format(search, title)) # If there are no matches, return None LoggingUtils.debug("Didn't find a match for {} in {}".format(search, status)) return None
def search(self, show): """ Searches for a show and returns its information from Anilist """ airing = self._single_search(show, "RELEASING") if airing: LoggingUtils.info("Creating HishaInfo for {} in RELEASING".format(show)) return self._create_hisha_info(airing, show) finished = self._page_search(show, "FINISHED") if finished: LoggingUtils.info("Creating HishaInfo for {} in FINISHED".format(show)) return self._create_hisha_info(finished, show) not_yet_released = self._single_search(show, "NOT_YET_RELEASED") if not_yet_released: LoggingUtils.info("Creating HishaInfo for {} in NOT_YET_RELEASED".format(show)) return self._create_hisha_info(not_yet_released, show) # None of the three found a result, so create a dummy Hisha object and return it LoggingUtils.info("Creating HishaInfo for {} with default values".format(show)) return self._create_hisha_info(None, show)
def encode(job: Job, rconf: RcloneConfigStore, econf: EncoderConfigStore) -> None: """Job worker""" tempfolder = TempFolderController.get_temp_folder() rclone_conf_tempfile = RcloneTempFileController.get_temp_file(rconf) try: # Step 1: Copy the file from rclone provided source to temp folder LoggingUtils.info("[1/7] Starting download of episode file...", color=LoggingUtils.LCYAN) src_file = Rclone.download(job, econf.downloading_sources, tempfolder, rclone_conf_tempfile, econf.downloading_rclone_flags) # Step 2: Prepare the file (copy over streams, populate metadata, extract subs, etc) LoggingUtils.info( "[2/7] Preparing episode file for hardsub and extracting subs...", color=LoggingUtils.LCYAN) sub1_file, sub2_file = FFmpeg.prepare(job, src_file, tempfolder) # Step 3: Add the OpenSans-Semibold.ttf font LoggingUtils.info("[3/7] Adding OpenSans-Semibold.ttf font...", color=LoggingUtils.LCYAN) FFmpeg.add_font(job, src_file, tempfolder) # Step 4: Encode the video using the built in attachments. This also fixes the audio file LoggingUtils.info("[4/7] Beginning hardsub encode of episode...", color=LoggingUtils.LCYAN) hardsub_file = FFmpeg.hardsub(job, src_file, tempfolder, sub1_file, sub2_file) # Step 5: Create a job for our new file LoggingUtils.info( "[5/7] Creating new Job instance for the hardsubbed file...", color=LoggingUtils.LCYAN) hardsub_job = EncodeJobGenerator.create_job_for_hardsub( job, hardsub_file) # Step 6: Upload the new file LoggingUtils.info( "[6/7] Uploading hardsubbed file to destination(s)...", color=LoggingUtils.LCYAN) Rclone.upload(hardsub_job, econf.uploading_destinations, hardsub_file, rclone_conf_tempfile, econf.uploading_rclone_flags) # Step 7: Send POST requests LoggingUtils.info("[7/7] Sending POST requests to endpoints...", color=LoggingUtils.LCYAN) RestSender.send(JobUtils.to_dict(hardsub_job), econf.endpoints) # Finally, destroy the temp folder and files TempFolderController.destroy_temp_folder() RcloneTempFileController.destroy_temp_file() except RcloneError as re: LoggingUtils.critical(re.message, color=LoggingUtils.LRED) LoggingUtils.critical("S/D: {} to {}".format(re.source, re.dest), color=LoggingUtils.LRED) LoggingUtils.critical(re.output, color=LoggingUtils.RED) # In any case, delete the temp folder TempFolderController.destroy_temp_folder() RcloneTempFileController.destroy_temp_file() # Reraise - this will clutter up the logs but make it visible in RQ-dashboard raise re except FFmpegError as fe: LoggingUtils.critical(fe.message, color=LoggingUtils.LRED) LoggingUtils.critical(fe.output, color=LoggingUtils.RED) # In any case, delete the temp folder TempFolderController.destroy_temp_folder() RcloneTempFileController.destroy_temp_file() # Reraise - this will clutter up the logs but make it visible in RQ-dashboard raise fe except WorkerCancelledError as we: LoggingUtils.critical(we.message, color=LoggingUtils.LRED) TempFolderController.destroy_temp_folder() RcloneTempFileController.destroy_temp_file() # Reraise for dashboard raise we except Exception as e: # In the event of an exception, we want to simply log it LoggingUtils.critical(e, color=LoggingUtils.LRED) TempFolderController.destroy_temp_folder() RcloneTempFileController.destroy_temp_file() raise e
def distribute(job: Job, rconf: RcloneConfigStore, dconf: DistributorConfigStore) -> None: """Job worker""" tempfolder = TempFolderController.get_temp_folder() rclone_conf_tempfile = RcloneTempFileController.get_temp_file(rconf) try: # Step 1: Check if we should even download the show LoggingUtils.info("[1/X] Fetching show information via Hisha...", color=LoggingUtils.CYAN) info = hisha.search(job.show) # Step 2: Check filter LoggingUtils.info("[2/X] Checking user list filters...", color=LoggingUtils.CYAN) filters = UserListFilter.check(job, info, dconf.anilist_tracker, dconf.mal_tracker, dconf.whitelist) if not filters: LoggingUtils.info( "User isn't watching this show, concluding job immediately.", color=LoggingUtils.LYELLOW) return False # Step 3: Download the file LoggingUtils.info("[3/5] Starting download of episode file...", color=LoggingUtils.LCYAN) sources = None flags = None if job.sub.lower() == "softsub": LoggingUtils.info( "Softsub mode detected, loading softsub download configs", color=LoggingUtils.CYAN) sources = dconf.softsub_downloading_sources flags = dconf.softsub_downloading_rclone_flags elif job.sub.lower() == "hardsub": LoggingUtils.info( "Hardsub mode detected, loading hardsub download configs", color=LoggingUtils.CYAN) sources = dconf.hardsub_downloading_sources flags = dconf.hardsub_downloading_rclone_flags else: raise JobSubTypeError(job, "Unknown sub type {}".format(job.sub)) src_file = Rclone.download(job, sources, tempfolder, rclone_conf_tempfile, flags) # Step 4: Upload it elsewhere LoggingUtils.info( "[4/5] Uploading hardsubbed file to destination(s)...", color=LoggingUtils.LCYAN) destinations = None flags = None if job.sub.lower() == "softsub": LoggingUtils.info( "Softsub mode detected, loading softsub upload configs", color=LoggingUtils.CYAN) destinations = dconf.softsub_uploading_destinations flags = dconf.softsub_uploading_rclone_flags elif job.sub.lower() == "hardsub": LoggingUtils.info( "Hardsub mode detected, loading hardsub upload configs", color=LoggingUtils.CYAN) destinations = dconf.hardsub_uploading_destinations flags = dconf.hardsub_uploading_rclone_flags else: raise JobSubTypeError(job, "Unknown sub type {}".format(job.sub)) Rclone.upload(job, destinations, src_file, rclone_conf_tempfile, flags) # Step 5: Send POST requests LoggingUtils.info("[5/5] Sending POST requests to endpoints...", color=LoggingUtils.LCYAN) RestSender.send(JobUtils.to_dict(job), dconf.endpoints) # Finally, destroy the temp folder TempFolderController.destroy_temp_folder() RcloneTempFileController.destroy_temp_file() except RcloneError as re: LoggingUtils.critical(re.message, color=LoggingUtils.LRED) LoggingUtils.critical("S/D: {} to {}".format(re.source, re.dest), color=LoggingUtils.LRED) LoggingUtils.critical(re.output, color=LoggingUtils.RED) # In any case, delete the temp folder TempFolderController.destroy_temp_folder() RcloneTempFileController.destroy_temp_file() # Reraise - this will clutter up the logs but make it visible in RQ-dashboard raise re except JobSubTypeError as jste: LoggingUtils.critical(jste.message, color=LoggingUtils.LRED) LoggingUtils.critical("Job: {}".format(jste.job), color=LoggingUtils.LRED) TempFolderController.destroy_temp_folder() RcloneTempFileController.destroy_temp_file() raise jste except Exception as e: # In the event of an exception, we want to simply log it LoggingUtils.critical(e, color=LoggingUtils.LRED) TempFolderController.destroy_temp_folder() RcloneTempFileController.destroy_temp_file() raise e
""" # Set worker name based on user host, or if Docker, the passed in build variable WORKER_NAME = str() if 'WORKER_NAME' in os.environ: WORKER_NAME = "{}|docker".format(os.environ.get('WORKER_NAME')) else: WORKER_NAME = "{name}@{fqdn}:{ident}".format( name=getpass.getuser(), fqdn=socket.getfqdn(), ident=datetime.now().strftime("%Y%m%d.%H%M")) print("Set Worker name as {}".format(WORKER_NAME)) qs = sys.argv[1:] qs = [q for q in qs if q] # Remove empty elements LoggingUtils.info("*** Listening on {}...".format(', '.join(qs)), color=LoggingUtils.LGREEN) while True: with Connection(): try: redis_conn = Redis(host=WorkerConf.redis_host, port=WorkerConf.redis_port, password=WorkerConf.redis_password, socket_keepalive=True, socket_timeout=180, health_check_interval=60) w = Worker(qs, connection=redis_conn, name=WORKER_NAME) w.work() except RedisConnectionError as rce: LoggingUtils.critical(
def check(cls, job: Job, info: HishaInfo, anilist: str, mal: str, whitelist: List[str]) -> bool: """ Checks whether or not user is watching show. If anilist and mal both aren't active, returns True If either are active, it will check either (or both). If is watching on either, return true If show name has whitelisted term in it, return true Otherwise, returns false """ # If neither are being used, it's true by default if not anilist and not mal: LoggingUtils.info("No filters are being used, checks passed", color=LoggingUtils.GREEN) return True # Check our whitelist for term in whitelist: if term in job.show.lower() or term in info.title_english.lower(): LoggingUtils.info( "Term {} is whitelisted and in show name, returning True". format(term), color=LoggingUtils.GREEN) return True # If anilist is provided, check it if anilist: if cls._KISHI.is_user_watching_id(anilist, info.id): LoggingUtils.debug("Check passed for Anilist via ID") LoggingUtils.info( "User is watching show on Anilist, returning True", color=LoggingUtils.GREEN) return True if cls._KISHI.is_user_watching_names(anilist, info.title_english): LoggingUtils.debug( "Check passed for Anilist via Hisha show name") LoggingUtils.info( "User is watching show on Anilist, returning True", color=LoggingUtils.GREEN) return True if cls._KISHI.is_user_watching_names(anilist, job.show): LoggingUtils.debug( "Check passed for Anilist via origin show name") LoggingUtils.info( "User is watching show on Anilist, returning True", color=LoggingUtils.GREEN) return True if mal: if cls._AKARI.is_user_watching_id(mal, info.idMal): LoggingUtils.debug("Check passed for MyAnimeList via ID") LoggingUtils.info( "User is watching show on MyAnimeList, returning True", color=LoggingUtils.GREEN) return True if cls._AKARI.is_user_watching_names(mal, info.title_english): LoggingUtils.debug( "Check passed for MyAnimeList via Hisha show name") LoggingUtils.info( "User is watching show on MyAnimeList, returning True", color=LoggingUtils.GREEN) return True if cls._AKARI.is_user_watching_names(mal, job.show): LoggingUtils.debug( "Check passed for MyAnimeList via origin show name") LoggingUtils.info( "User is watching show on MyAnimeList, returning True", color=LoggingUtils.GREEN) return True LoggingUtils.info( "Didn't find the show in any given filter, returning False", color=LoggingUtils.YELLOW) return False
def work(self, burst=False, logging_level="INFO", date_format=DEFAULT_LOGGING_DATE_FORMAT, log_format=DEFAULT_LOGGING_FORMAT, max_jobs=None): """Starts the work loop. Pops and performs all jobs on the current list of queues. When all queues are empty, block and wait for new jobs to arrive on any of the queues, unless `burst` mode is enabled. The return value indicates whether any jobs were processed. """ setup_loghandlers(logging_level, date_format, log_format) self._install_signal_handlers() completed_jobs = 0 self.register_birth() self.log.info("Worker %s: started, version %s", self.key, VERSION) #LoggingUtils.info("Worker {}: started, version {}".format(self.key, VERSION)) self.set_state(WorkerStatus.STARTED) qnames = self.queue_names() self.log.info('*** Listening on %s...', green(', '.join(qnames))) # LoggingUtils.info("*** Listening on {}...".format(', '.join(qnames)), color=LoggingUtils.LGREEN) try: while True: try: self.check_for_suspension(burst) if self.should_run_maintenance_tasks: self.clean_registries() if self._stop_requested: self.log.info('Worker %s: stopping on request', self.key) LoggingUtils.info( 'Worker {}: stopping on request'.format(self.key), color=LoggingUtils.LCYAN) break timeout = None if burst else max( 1, self.default_worker_ttl - 15) result = self.dequeue_job_and_maintain_ttl(timeout) if result is None: if burst: self.log.info("Worker %s: done, quitting", self.key) LoggingUtils.info( "Worker {}: done, quitting".format(self.key), color=LoggingUtils.LCYAN) break job, queue = result self.execute_job(job, queue) self.heartbeat() completed_jobs += 1 if max_jobs is not None: if completed_jobs >= max_jobs: self.log.info( "Worker %s: finished executing %d jobs, quitting", self.key, completed_jobs) LoggingUtils.info( "Worker {}: finished executing {} jobs, quitting" .format(self.key, completed_jobs)) break except StopRequested: #break raise WorkerCancelledError except SystemExit: # Cold shutdown detected #raise raise WorkerCancelledError # These are our custom changes except TimeoutError: # This is an expected error thrown by us almost always # Catch it in the external loop so we can re-raise to not get stuck in a loop raise TimeoutError except: # noqa self.log.error( 'Worker %s: found an unhandled exception, quitting...', self.key, exc_info=True) LoggingUtils.error( "Worker {}: found an unhandled exception, quitting...". format(self.key)) break except WorkerCancelledError: if not self.is_horse: self.register_death() raise WorkerCancelledError() except TimeoutError: if not self.is_horse: self.register_death() raise TimeoutError() finally: if not self.is_horse: self.register_death() return bool(completed_jobs)
def authorize(cls, headers: EnvironHeaders) -> bool: key = headers.get("Authorization") # If incoming request did not provide a key if not key: # Case where no key provided when one was required if cls._CONF: LoggingUtils.info( "Request denied - a key was required but no key was provided", color=LoggingUtils.YELLOW) return False # Case where no key was provided and none were required elif not cls._CONF: LoggingUtils.info( "Request authorized - no key was required, and no key was provided", color=LoggingUtils.GREEN) return True else: # Case where a key was provided and one was required if cls._CONF: for user, password in cls._CONF.items(): if key == password: # Found a matching key, so return True LoggingUtils.info( "Request authorized - a key was required, and a matching key was provided", color=LoggingUtils.GREEN) LoggingUtils.info( "Matching key was sent from {}".format(user)) return True # If no matching key was found, return false LoggingUtils.info( "Request denied - a key was required, but a nonmatching key was provided", color=LoggingUtils.YELLOW) # Case where a key was provided but one was not required elif not cls._CONF: LoggingUtils.info( "Request denied - a key was not required, but one was provided", color=LoggingUtils.YELLOW) return False
job_timeout=JOB_TIMEOUT, result_ttl=RESULT_TTL, failure_ttl=FAILURE_TTL, job_id=_create_job_id(job.episode, "encode")) LoggingUtils.info("Enqueued a new encoder job to the 'encode' queue", color=LoggingUtils.CYAN) return "Request accepted", 200 @app.errorhandler(RedisConnectionError) def handle_redis_connection_error(error): LoggingUtils.critical("It appears that Redis is down.") response = { "success": False, "error": { "type": "Redis Connection", "message": "Redis connection error has occured." } } return jsonify(response), 500 def _create_job_id(episode: str, jobtype: str) -> str: return "[{}] {}".format(jobtype, episode) if __name__ == "__main__": LoggingUtils.info("Initializing Izumi application server") app.run(host='0.0.0.0', port=8080, debug=False)