def fetch_episode_of_bangumi(self, bangumi_id, subtitle_list=None, max_page=MAX_PAGE): response_data = [] if subtitle_list: for subtitle_id in subtitle_list: data = {'tag_id': [bangumi_id, subtitle_id, BANGUMI_TAG]} response = get_response(DETAIL_URL, 'POST', json=data) response_data.extend(response['torrents']) else: for i in range(max_page): if max_page > 1: print_info('Fetch page {0} ...'.format(i + 1)) data = {'tag_id': [bangumi_id, BANGUMI_TAG], 'p': i + 1} response = get_response(DETAIL_URL, 'POST', json=data) if response: response_data.extend(response['torrents']) for index, bangumi in enumerate(response_data): response_data[index] = { 'download': bangumi['magnet'], 'subtitle_group': bangumi['team_id'], 'title': bangumi['title'], 'episode': self.parse_episode(bangumi['title']), 'time': int(time.mktime(datetime.datetime.strptime(bangumi['publish_time'].split('.')[0], "%Y-%m-%dT%H:%M:%S").timetuple())) } return response_data
def history(ret): m = ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December') data = Followed.select(Followed).order_by(Followed.updated_time.asc()) year = None month = None print_info('Bangumi Timeline') for i in data: if i.status == STATUS_DELETED: slogan = ' ABANDON' color = RED else: slogan = 'FINISHED' color = GREEN if not i.updated_time: date = datetime.datetime.fromtimestamp(0) else: date = datetime.datetime.fromtimestamp(int(i.updated_time)) if date.year != 1970: if date.year != year: print('%s%s%s' % (GREEN, str(date.year), COLOR_END)) year = date.year if date.year == year and date.month != month: print(' |\n |--- %s%s%s\n | |' % (YELLOW, m[date.month - 1], COLOR_END)) month = date.month print(' | |--- [%s%s%s] %s' % (color, slogan, COLOR_END, i.bangumi_name))
def fetch_episode_of_bangumi(self, bangumi_id, subtitle_list=None, max_page=MAX_PAGE): response_data = [] ret = [] if subtitle_list: for subtitle_id in subtitle_list: data = {'tag_id': [bangumi_id, subtitle_id, BANGUMI_TAG]} response = get_response(DETAIL_URL, 'POST', json=data) response_data.extend(response['torrents']) else: for i in range(max_page): if max_page > 1: print_info('Fetch page {0} ...'.format(i + 1)) data = {'tag_id': [bangumi_id, BANGUMI_TAG], 'p': i + 1} response = get_response(DETAIL_URL, 'POST', json=data) if response: response_data.extend(response['torrents']) for index, bangumi in enumerate(response_data): ret.append({ # 'download': bangumi['magnet'], 'download': TORRENT_URL + bangumi['_id'] + '/download.torrent', 'subtitle_group': bangumi['team_id'], 'title': bangumi['title'], 'episode': self.parse_episode(bangumi['title']), 'time': int(time.mktime(datetime.datetime.strptime(bangumi['publish_time'].split('.')[0], "%Y-%m-%dT%H:%M:%S").timetuple())) }) if os.environ.get('DEBUG'): print(ret[index]['download']) return ret
def bangumi_calendar(self, force_update=False, save=True, cover=None): if force_update and not test_connection(): force_update = False print_warning('Network is unreachable') if force_update: print_info('Fetching bangumi info ...') Bangumi.delete_all() weekly_list = self.fetch(save=save) else: weekly_list = Bangumi.get_updating_bangumi() if not weekly_list: print_warning('Warning: no bangumi schedule, fetching ...') weekly_list = self.fetch(save=save) if cover is not None: # download cover to local cover_to_be_download = cover for daily_bangumi in weekly_list.values(): for bangumi in daily_bangumi: _, file_path = convert_cover_to_path(bangumi['cover']) if not glob.glob(file_path): cover_to_be_download.append(bangumi['cover']) if cover_to_be_download: print_info('Updating cover ...') download_cover(cover_to_be_download) return weekly_list
def __new__(cls, *args, **kwargs): if cls._defined is None: script_files = glob.glob('{}{}*.py'.format(SCRIPT_PATH, os.path.sep)) for i in script_files: try: s = imp.load_source('script', os.path.join(SCRIPT_PATH, i)) script_class = getattr(s, 'Script')() if cls.check(script_class): cls.scripts.append(script_class) print_info('Load script {} successfully.'.format(i)) except: print_warning('Load script {} failed, ignored'.format(i)) if os.getenv('DEBUG_SCRIPT'): # pragma: no cover traceback.print_exc() # self.scripts = filter(self._check_followed, self.scripts) # self.scripts = filter(self._check_bangumi, self.scripts) cls._defined = super(ScriptRunner, cls).__new__(cls, *args, **kwargs) return cls._defined
def __new__(cls, *args, **kwargs): # type: ignore if cls._defined is None: script_files = glob.glob(f"{SCRIPT_PATH}{os.path.sep}*.py") for i in script_files: try: loader = SourceFileLoader("script", os.path.join(SCRIPT_PATH, i)) mod = types.ModuleType(loader.name) loader.exec_module(mod) script_class = mod.Script() if cls.check(script_class): cls.scripts.append(script_class) print_info(f"Load script {i} successfully.") except Exception: print_warning(f"Load script {i} failed, ignored") if os.getenv("DEBUG_SCRIPT"): # pragma: no cover traceback.print_exc() cls._defined = super().__new__(cls, *args, **kwargs) return cls._defined
def download_prepare(data: List[Episode]) -> None: queue = save_to_bangumi_download_queue(data) driver = get_download_driver(DOWNLOAD_DELEGATE) for download in queue: save_path = os.path.join( os.path.join(SAVE_PATH, normalize_path(download.name)), str(download.episode), ) if not os.path.exists(save_path): os.makedirs(save_path) # mark as downloading download.status = STATUS_DOWNLOADING download.save() try: driver.add_download(url=download.download, save_path=save_path) print_info("Add torrent into the download queue, " f"the file will be saved at {save_path}") except Exception as e: if os.getenv("DEBUG"): # pragma: no cover traceback.print_exc() raise e print_error(f"Error: {e}", exit_=False) download.status = STATUS_NOT_DOWNLOAD download.save()
def download(self): if not self.torrent.startswith("magnet:"): # self._call("web.get_magnet_info", [self.torrent, ]) # else: e = self._call("web.download_torrent_from_url", [ self.torrent, ]) self.torrent = e["result"] options = { "path": self.torrent, "options": { "add_paused": False, "compact_allocation": False, "move_completed": False, "download_location": self.save_path, "max_connections": -1, "max_download_speed": -1, "max_upload_slots": -1, "max_upload_speed": -1, }, } e = self._call("web.add_torrents", [[options]]) print_info( "Add torrent into the download queue, the file will be saved at {}" .format(self.save_path)) return e
def check(cls, script: "ScriptBase", fs: str) -> None: model = script.Model() if model.due_date and model.due_date < datetime.datetime.now(): print(f"Skip load {fs} because it has reach its due_date") return cls.scripts.append(script) print_info(f"Load script {fs} successfully.")
def download(self): tc = self.get_client() print(tc.add_torrent) tc.add_torrent(self.torrent, download_dir=self.save_path) print_info( "Add torrent into the download queue, the file will be saved at {}" .format(self.save_path))
def install_crontab(): print_info('Installing crontab job') if IS_WINDOWS: copy(os.path.join(os.path.dirname(__file__), 'others/cron.vbs'), BGMI_PATH) os.system('schtasks /Create /SC HOURLY /TN "bgmi updater" /TR "{}" /IT /F'.format( os.path.join(BGMI_PATH, 'cron.vbs'))) else: path = os.path.join(os.path.dirname(__file__), 'others/crontab.sh') os.system("bash '%s'" % path)
def download(self): if self.old_version: self.server.aria2.addUri([self.torrent], {"dir": self.save_path}) else: self.server.aria2.addUri(ARIA2_RPC_TOKEN, [self.torrent], {"dir": self.save_path}) print_info( 'Add torrent into the download queue, the file will be saved at {0}' .format(self.save_path))
def exec_sql(sql: str, db: str = DB_PATH) -> None: try: print_info(f"Execute {sql}") conn = sqlite3.connect(db) conn.execute(sql) conn.commit() conn.close() except sqlite3.OperationalError: # pragma: no cover print_error("Execute SQL statement failed", exit_=False)
def exec_sql(sql, db=DB_PATH): try: print_info('Execute {}'.format(sql)) conn = sqlite3.connect(db) conn.execute(sql) conn.commit() conn.close() except sqlite3.OperationalError: # pragma: no cover print_error('Execute SQL statement failed', exit_=False)
def cal(force_update: bool = False, save: bool = False, cover: Optional[List[str]] = None) -> Dict[str, List[Dict[str, Any]]]: logger.debug("cal force_update: %r save: %r", force_update, save) weekly_list = Bangumi.get_updating_bangumi() if not weekly_list: print_warning("Warning: no bangumi schedule, fetching ...") force_update = True if force_update: print_info("Fetching bangumi info ...") website.fetch(save=save) weekly_list = Bangumi.get_updating_bangumi() if cover is not None: # download cover to local cover_to_be_download = cover for daily_bangumi in weekly_list.values(): for bangumi in daily_bangumi: _, file_path = convert_cover_url_to_path(bangumi["cover"]) if not (os.path.exists(file_path) and bool(imghdr.what(file_path))): cover_to_be_download.append(bangumi["cover"]) if cover_to_be_download: print_info("Updating cover ...") download_cover(cover_to_be_download) runner = ScriptRunner() patch_list = runner.get_models_dict() for i in patch_list: weekly_list[i["update_time"].lower()].append(i) logger.debug(weekly_list) # for web api, return all subtitle group info r = weekly_list # type: Dict[str, List[Dict[str, Any]]] for day, value in weekly_list.items(): for index, bangumi in enumerate(value): bangumi["cover"] = normalize_path(bangumi["cover"]) subtitle_group = list( map( lambda x: { "name": x["name"], "id": x["id"] }, Subtitle.get_subtitle_by_id( bangumi["subtitle_group"].split(", " "")), )) r[day][index]["subtitle_group"] = subtitle_group logger.debug(r) return r
def download(self): print_warning('XunleiLixian is deprecated, please choose aria2-rpc or transmission-rpc.') overwrite = '--overwrite' if self.overwrite else '' command = [XUNLEI_LX_PATH, 'download', '--torrent', overwrite, '--output-dir={0}'.format(self.save_path), self.torrent, '--verification-code-path={0}'.format(os.path.join(TMP_PATH, 'vcode.jpg'))] print_info('Run command {0}'.format(' '.join(command))) print_warning('Verification code path: {0}'.format(os.path.join(TMP_PATH, 'vcode.jpg'))) self.call(command)
def download(self): try: import transmissionrpc tc = PatchClient(TRANSMISSION_RPC_URL, port=TRANSMISSION_RPC_PORT) tc.add_torrent(self.torrent, download_dir=self.save_path.encode('utf-8')) print_info( 'Add torrent into the download queue, the file will be saved at {0}' .format(self.save_path)) except ImportError: pass
def install_crontab(): print_info('Installing crontab job') if IS_WINDOWS: copy(os.path.join(os.path.dirname(__file__), 'others/cron.vbs'), BGMI_PATH) os.system( 'powershell.exe schtasks /Create /SC HOURLY /TN "bgmi updater" /TR "{}" /IT /F' .format(os.path.join(BGMI_PATH, 'cron.vbs'))) else: path = os.path.join(os.path.dirname(__file__), 'others/crontab.sh') os.system("bash '%s'" % path)
def install_crontab(): print_info("Installing crontab job") if IS_WINDOWS: copy(os.path.join(os.path.dirname(__file__), "others/cron.vbs"), BGMI_PATH) os.system( 'schtasks /Create /SC HOURLY /TN "bgmi updater" /TR "{}" /IT /F'. format(os.path.join(BGMI_PATH, "cron.vbs"))) else: path = os.path.join(os.path.dirname(__file__), "others/crontab.sh") os.system("bash '%s'" % path)
def install_crontab(): if IS_WINDOWS: copy(os.path.join(os.path.dirname(__file__), 'cron.vbs'), BGMI_PATH) print_info('cron.vbs is located as {}'.format( os.path.join(BGMI_PATH, 'cron.vbs'))) print_warning( 'if you want to enable bgmi autoupdate, see https://github.com/BGmi/BGmi/blob/master/README.windows.md for next step' ) else: print_info('Installing crontab job') path = os.path.join(os.path.dirname(__file__), 'crontab.sh') os.system('sh \'%s\'' % path)
def history(ret): m = ( "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ) data = Followed.select(Followed).order_by(Followed.updated_time.asc()) bangumi_data = Bangumi.get_updating_bangumi() year = None month = None updating_bangumi = list( map(lambda s: s["name"], itertools.chain(*bangumi_data.values()))) print_info("Bangumi Timeline") for i in data: if i.status == STATUS_DELETED: slogan = "ABANDON" color = RED else: if i.bangumi_name in updating_bangumi: slogan = "FOLLOWING" color = YELLOW else: slogan = "FINISHED" color = GREEN if not i.updated_time: date = datetime.datetime.fromtimestamp(0) else: date = datetime.datetime.fromtimestamp(int(i.updated_time)) if date.year != 1970: if date.year != year: print("{}{}{}".format(GREEN, str(date.year), COLOR_END)) year = date.year if date.year == year and date.month != month: print(" |\n |--- {}{}{}\n | |".format( YELLOW, m[date.month - 1], COLOR_END)) month = date.month print(" | |--- [{}{:<9}{}] ({:<2}) {}".format( color, slogan, COLOR_END, i.episode, i.bangumi_name))
def download(self): try: import transmissionrpc tc = PatchClient(TRANSMISSION_RPC_URL, port=TRANSMISSION_RPC_PORT, user=TRANSMISSION_RPC_USERNAME, password=TRANSMISSION_RPC_PASSWORD) try: tc.add_torrent(self.torrent, download_dir=self.save_path) except UnicodeEncodeError: tc.add_torrent(self.torrent, download_dir=self.save_path.encode('utf-8')) print_info('Add torrent into the download queue, the file will be saved at {0}'.format(self.save_path)) except ImportError: pass
def download(self): if not os.path.exists(self.save_path): os.makedirs(self.save_path) command = [ WGET_PATH, '--no-parent', '-r', '--no-host-directories', '--cut-dirs', '100', '--reject', 'index.html', '-P', '{0}/'.format(self.save_path), 'https://bgmi.ricterz.me/bangumi/{0}/{1}/'.format( self.name, self.episode) ] print_info('Start download ...') self.call(command)
def filter_wrapper(ret): result = filter_(name=ret.name, subtitle=ret.subtitle, include=ret.include, exclude=ret.exclude, regex=ret.regex) if 'data' not in result: globals()["print_{}".format(result['status'])](result['message']) else: print_info('Usable subtitle group: {0}'.format(', '.join(result['data']['subtitle_group']))) followed_filter_obj = Filter.get(bangumi_name=ret.name) print_filter(followed_filter_obj) return result['data']
def get_response(url, method="GET", **kwargs): if os.environ.get("DEBUG"): # pragma: no cover print_info(f"Request URL: {url}") try: r = requests.request(method.lower(), url, **kwargs) if os.environ.get("DEBUG"): # pragma: no cover print(r.text) return r.json() except requests.ConnectionError: print_error("error: failed to establish a new connection") except ValueError: print_error( "error: server returned data maybe not be json," " please create a issue at https://github.com/BGmi/BGmi/issues")
def filter_wrapper(ret): result = filter_(name=ret.name, subtitle=ret.subtitle, include=ret.include, exclude=ret.exclude, regex=ret.regex) if 'data' not in result: globals()["print_{}".format(result['status'])](result['message']) else: print_info('Usable subtitle group: {0}'.format(', '.join( result['data']['subtitle_group']))) followed_filter_obj = Filter.get(bangumi_name=ret.name) print_filter(followed_filter_obj) return result['data']
def download(self): overwrite = '--overwrite' if self.overwrite else '' command = [ XUNLEI_LX_PATH, 'download', '--torrent', overwrite, '--output-dir={0}'.format(self.save_path), self.torrent, '--verification-code-path={0}'.format( os.path.join(TMP_PATH, 'vcode.jpg')) ] print_info('Run command {0}'.format(' '.join(command))) print_warning('Verification code path: {0}'.format( os.path.join(TMP_PATH, 'vcode.jpg'))) self.call(command)
def get_response(url, method='GET', **kwargs): # kwargs['proxies'] = {'http': "http://*****:*****@gmail.com')
def filter_wrapper(ret: Any) -> None: result = filter_( name=ret.name, subtitle=ret.subtitle, include=ret.include, exclude=ret.exclude, regex=ret.regex, ) if "data" not in result: globals()["print_{}".format(result["status"])](result["message"]) else: print_info("Usable subtitle group: {}".format(", ".join( result["data"]["subtitle_group"]))) followed_filter_obj = Filter.get(bangumi_name=result["data"]["name"]) print_filter(followed_filter_obj)
def print_filter(followed_filter_obj): print_info('Followed subtitle group: {0}'.format(', '.join( map(lambda s: s['name'], Subtitle.get_subtitle(followed_filter_obj.subtitle.split(', '))) ) if followed_filter_obj.subtitle else 'None')) print_info('Include keywords: {0}'.format(followed_filter_obj.include)) print_info('Exclude keywords: {0}'.format(followed_filter_obj.exclude)) print_info('Regular expression: {0}'.format(followed_filter_obj.regex))
def print_filter(followed_filter_obj: Filter) -> None: print_info("Followed subtitle group: {}".format(", ".join( x["name"] for x in Subtitle. get_subtitle_by_id(followed_filter_obj.subtitle.split(", ") )) if followed_filter_obj.subtitle else "None")) print_info(f"Include keywords: {followed_filter_obj.include}") print_info(f"Exclude keywords: {followed_filter_obj.exclude}") print_info(f"Regular expression: {followed_filter_obj.regex}")
def get_response(url, method='GET', **kwargs): # kwargs['proxies'] = {'http': "http://*****:*****@gmail.com')
def download(self): print_warning( 'XunleiLixian is deprecated, please choose aria2-rpc or transmission-rpc.' ) overwrite = '--overwrite' if self.overwrite else '' command = [ XUNLEI_LX_PATH, 'download', '--torrent', overwrite, '--output-dir={0}'.format(self.save_path), self.torrent, '--verification-code-path={0}'.format( os.path.join(TMP_PATH, 'vcode.jpg')) ] print_info('Run command {0}'.format(' '.join(command))) print_warning('Verification code path: {0}'.format( os.path.join(TMP_PATH, 'vcode.jpg'))) self.call(command)
def get_response(url, method="GET", **kwargs): # kwargs['proxies'] = {'http': "http://*****:*****@gmail.com" )
def print_filter(followed_filter_obj): print_info("Followed subtitle group: {}".format(", ".join( map( lambda s: s["name"], Subtitle. get_subtitle_by_id(followed_filter_obj.subtitle.split(", ")), )) if followed_filter_obj.subtitle else "None")) print_info("Include keywords: {}".format(followed_filter_obj.include)) print_info("Exclude keywords: {}".format(followed_filter_obj.exclude)) print_info("Regular expression: {}".format(followed_filter_obj.regex))
def fetch_episode_of_bangumi( self, bangumi_id: str, max_page: int, subtitle_list: Optional[List[str]] = None, ) -> List[Episode]: response_data = [] ret = [] if subtitle_list: for subtitle_id in subtitle_list: data = { "tag_id": [bangumi_id, subtitle_id, BANGUMI_TAG] } # type: Dict[str, Any] response = get_response(DETAIL_URL, "POST", json=data) response_data.extend(response["torrents"]) else: for i in range(max_page): if max_page > 1: print_info(f"Fetch page {i + 1} ...") data = { "tag_id": [bangumi_id, BANGUMI_TAG], "p": i + 1, } response = get_response(DETAIL_URL, "POST", json=data) if response: response_data.extend(response["torrents"]) for index, bangumi in enumerate(response_data): ret.append( Episode( download=TORRENT_URL + bangumi["_id"] + "/download.torrent", subtitle_group=bangumi["team_id"], title=bangumi["title"], episode=self.parse_episode(bangumi["title"]), time=int( datetime.datetime.strptime( bangumi["publish_time"].split(".")[0], "%Y-%m-%dT%H:%M:%S").timestamp()), )) if os.environ.get("DEBUG"): print(ret[index].download) return ret
def download(self): if not self.torrent.startswith('magnet:'): # self._call("web.get_magnet_info", [self.torrent, ]) # else: e = self._call('web.download_torrent_from_url', [self.torrent, ]) self.torrent = e['result'] options = {"path": self.torrent, "options": {"add_paused": False, "compact_allocation": False, "move_completed": False, "download_location": self.save_path, "max_connections": -1, "max_download_speed": -1, "max_upload_slots": -1, "max_upload_speed": -1, }} e = self._call('web.add_torrents', [[options]]) print_info('Add torrent into the download queue, the file will be saved at {0}'.format(self.save_path)) return e
def download_status(status=None): last_status = -1 for download_data in Download.get_all_downloads(status=status): latest_status = download_data['status'] name = ' {0}. <{1}: {2}>'.format(download_data['id'], download_data['name'], download_data['episode']) if latest_status != last_status: if latest_status == STATUS_DOWNLOADING: print('Downloading items:') elif latest_status == STATUS_NOT_DOWNLOAD: print('Not downloaded items:') elif latest_status == STATUS_DOWNLOADED: print('Downloaded items:') if download_data['status'] == STATUS_NOT_DOWNLOAD: print_info(name, indicator=False) elif download_data['status'] == STATUS_DOWNLOADING: print_warning(name, indicator=False) elif download_data['status'] == STATUS_DOWNLOADED: print_success(name, indicator=False) last_status = download_data['status']
def install(): # install xunlei-lixian import tarfile import requests print_info('Downloading xunlei-lixian from https://github.com/iambus/xunlei-lixian/') r = requests.get('https://github.com/iambus/xunlei-lixian/tarball/master', stream=True, headers={'Accept-Encoding': ''}) f = NamedTemporaryFile(delete=False) with f: for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) f.close() print_success('Download successfully, save at %s, extracting ...' % f.name) zip_file = tarfile.open(f.name, 'r:gz') zip_file.extractall(os.path.join(BGMI_PATH, 'tools/xunlei-lixian')) dir_name = zip_file.getnames()[0] print_info('Create link file ...') if not os.path.exists(XUNLEI_LX_PATH): os.symlink(os.path.join(BGMI_PATH, 'tools/xunlei-lixian/{0}/lixian_cli.py'.format(dir_name)), XUNLEI_LX_PATH) else: print_warning('{0} already exists'.format(XUNLEI_LX_PATH)) print_success('All done') print_info('Please run command \'{0} config\' to configure your lixian-xunlei ' '(Notice: only for Thunder VIP)'.format(XUNLEI_LX_PATH))
def run(self, return_=True, download=False): for script in self.scripts: print_info('fetching {} ...'.format(script.bangumi_name)) download_item = self.make_dict(script) script_obj = script.Model().obj if not download_item: print_info('Got nothing, quit script {}.'.format(script)) continue max_episode = max(download_item, key=lambda d: d['episode']) episode = max_episode['episode'] episode_range = range(script_obj.episode + 1, episode + 1) if episode <= script_obj.episode: continue print_success('{} updated, episode: {}'.format(script.bangumi_name, episode)) script_obj.episode = episode script_obj.status = STATUS_UPDATED script_obj.updated_time = int(time.time()) script_obj.save() download_queue = [] for i in episode_range: for e in download_item: if i == e['episode']: download_queue.append(e) if return_: self.download_queue.extend(download_queue) continue if download: print_success('Start downloading of {}'.format(script)) download_prepare(download_queue) return self.download_queue
def history(ret): m = ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December') data = Followed.select(Followed).order_by(Followed.updated_time.asc()) bangumi_data = Bangumi.get_updating_bangumi() year = None month = None updating_bangumi = list(map(lambda s: s['name'], itertools.chain(*bangumi_data.values()))) print_info('Bangumi Timeline') for i in data: if i.status == STATUS_DELETED: slogan = 'ABANDON' color = RED else: if i.bangumi_name in updating_bangumi: slogan = 'FOLLOWING' color = YELLOW else: slogan = 'FINISHED' color = GREEN if not i.updated_time: date = datetime.datetime.fromtimestamp(0) else: date = datetime.datetime.fromtimestamp(int(i.updated_time)) if date.year != 1970: if date.year != year: print('%s%s%s' % (GREEN, str(date.year), COLOR_END)) year = date.year if date.year == year and date.month != month: print(' |\n |--- %s%s%s\n | |' % (YELLOW, m[date.month - 1], COLOR_END)) month = date.month print(' | |--- [%s%-9s%s] (%-2s) %s' % (color, slogan, COLOR_END, i.episode, i.bangumi_name))
def fetch_(ret): try: bangumi_obj = Bangumi.get(name=ret.name) except Bangumi.DoesNotExist: print_error('Bangumi {0} not exist'.format(ret.name)) return try: Followed.get(bangumi_name=bangumi_obj.name) except Followed.DoesNotExist: print_error('Bangumi {0} is not followed'.format(ret.name)) return followed_filter_obj = Filter.get(bangumi_name=ret.name) print_filter(followed_filter_obj) print_info('Fetch bangumi {0} ...'.format(bangumi_obj.name)) _, data = website.get_maximum_episode(bangumi_obj, ignore_old_row=False if ret.not_ignore else True) if not data: print_warning('Nothing.') for i in data: print_success(i['title'])
def __new__(cls, *args, **kwargs): if cls._defined is None: script_files = glob.glob('{}{}*.py'.format(SCRIPT_PATH, os.path.sep)) for i in script_files: try: s = imp.load_source('script', os.path.join(SCRIPT_PATH, i)) script_class = getattr(s, 'Script')() if cls.check(script_class): cls.scripts.append(script_class) print_info('Load script {} successfully.'.format(i)) except: print_warning('Load script {} failed, ignored'.format(i)) if os.getenv('DEBUG_SCRIPT'): # pragma: no cover traceback.print_exc() # self.scripts = filter(self._check_followed, self.scripts) # self.scripts = filter(self._check_bangumi, self.scripts) cls._defined = super(ScriptRunner, cls).__new__(cls, *args, **kwargs) return cls._defined
def download_status(status=None): print_info('Print download status in database') BaseDownloadService.download_status(status=status) print('') print_info('Print download status in transmission-rpc') try: import transmissionrpc tc = transmissionrpc.Client(TRANSMISSION_RPC_URL, port=TRANSMISSION_RPC_PORT, user=TRANSMISSION_RPC_USERNAME, password=TRANSMISSION_RPC_PASSWORD) for torrent in tc.get_torrents(): print_info(' * {0}: {1}'.format(torrent.status, torrent), indicator=False) except ImportError: pass
def download_status(status=None): Aria2DownloadRPC.check_aria2c_version() print_info('Print download status in database') BaseDownloadService.download_status(status=status) print() print_info('Print download status in aria2c-rpc') try: server = PatchedServerProxy(ARIA2_RPC_URL) # self.server.aria2 status_dict = { STATUS_DOWNLOADING: ['tellActive'], STATUS_NOT_DOWNLOAD: ['tellWaiting'], STATUS_DOWNLOADED: ['tellStopped'], None: ['tellStopped', 'tellWaiting', 'tellActive'], } for method in status_dict.get(status): if method not in ('tellActive', ): params = (0, 1000) else: params = () if Aria2DownloadRPC.old_version: data = server.aria2[method](*params) else: data = server.aria2[method](ARIA2_RPC_TOKEN, *params) if data: print_warning('RPC {0}:'.format(method), indicator=False) for row in data: print_success('- {0}'.format(row['dir']), indicator=False) for file_ in row['files']: print_info(' * {0}'.format(file_['path']), indicator=False) except Exception as e: print_error('Cannot connect to aria2-rpc server')
def print_filter(followed_filter_obj): print_info('Followed subtitle group: {0}'.format(', '.join(map(lambda s: s['name'], Subtitle.get_subtitle_by_id( followed_filter_obj.subtitle.split(', ')))) if followed_filter_obj.subtitle else 'None')) print_info('Include keywords: {0}'.format(followed_filter_obj.include)) print_info('Exclude keywords: {0}'.format(followed_filter_obj.exclude)) print_info('Regular expression: {0}'.format(followed_filter_obj.regex))
def update(name, download=None, not_ignore=False): logger.debug('updating bangumi info with args: download: {}'.format(download)) result = {'status': 'info', 'message': '', 'data': {'updated': [], 'downloaded': []}} ignore = not bool(not_ignore) print_info('marking bangumi status ...') now = int(time.time()) for i in Followed.get_all_followed(): if i['updated_time'] and int(i['updated_time'] + 60 * 60 * 24) < now: followed_obj = Followed.get(bangumi_name=i['bangumi_name']) followed_obj.status = STATUS_FOLLOWED followed_obj.save() for script in ScriptRunner().scripts: obj = script.Model().obj if obj.updated_time and int(obj.updated_time + 60 * 60 * 24) < now: obj.status = STATUS_FOLLOWED obj.save() print_info('updating subscriptions ...') download_queue = [] if download: if not name: print_warning('No specified bangumi, ignore `--download` option') if len(name) > 1: print_warning('Multiple specified bangumi, ignore `--download` option') if not name: updated_bangumi_obj = Followed.get_all_followed() else: updated_bangumi_obj = [] for i in name: try: f = Followed.get(bangumi_name=i) f = model_to_dict(f) updated_bangumi_obj.append(f) except DoesNotExist: pass runner = ScriptRunner() script_download_queue = runner.run() for subscribe in updated_bangumi_obj: print_info('fetching %s ...' % subscribe['bangumi_name']) try: bangumi_obj = Bangumi.get(name=subscribe['bangumi_name']) except Bangumi.DoesNotExist: print_error('Bangumi<{0}> does not exists.'.format(subscribe['bangumi_name']), exit_=False) continue try: followed_obj = Followed.get(bangumi_name=subscribe['bangumi_name']) except Followed.DoesNotExist: print_error('Bangumi<{0}> is not followed.'.format(subscribe['bangumi_name']), exit_=False) continue episode, all_episode_data = website.get_maximum_episode(bangumi=bangumi_obj, ignore_old_row=ignore, max_page=1) if (episode.get('episode') > subscribe['episode']) or (len(name) == 1 and download): if len(name) == 1 and download: episode_range = download else: episode_range = range( subscribe['episode'] + 1, episode.get('episode', 0) + 1) print_success('%s updated, episode: %d' % (subscribe['bangumi_name'], episode['episode'])) followed_obj.episode = episode['episode'] followed_obj.status = STATUS_UPDATED followed_obj.updated_time = int(time.time()) followed_obj.save() result['data']['updated'].append({'bangumi': subscribe['bangumi_name'], 'episode': episode['episode']}) for i in episode_range: for epi in all_episode_data: if epi['episode'] == i: download_queue.append(epi) break if download is not None: result['data']['downloaded'] = download_queue download_prepare(download_queue) download_prepare(script_download_queue) print_info('Re-downloading ...') download_prepare(Download.get_all_downloads( status=STATUS_NOT_DOWNLOAD)) return result
def download(self): if self.old_version: self.server.aria2.addUri([self.torrent], {"dir": self.save_path}) else: self.server.aria2.addUri(ARIA2_RPC_TOKEN, [self.torrent], {"dir": self.save_path}) print_info('Add torrent into the download queue, the file will be saved at {0}'.format(self.save_path))