def get_player(bangumi_name: str) -> Dict[int, Dict[str, str]]: episode_list = {} # new path if os.path.exists(os.path.join(SAVE_PATH, normalize_path(bangumi_name))): bangumi_name = normalize_path(bangumi_name) bangumi_path = os.path.join(SAVE_PATH, bangumi_name) path_walk = os.walk(bangumi_path) logger.debug("os.walk(bangumi_path) => %s", pformat(path_walk)) for root, _, files in path_walk: _ = root.replace(bangumi_path, "").split(os.path.sep) base_path = root.replace(SAVE_PATH, "") if len(_) >= 2: episode_path = root.replace(os.path.join(SAVE_PATH, bangumi_name), "") if episode_path.split(os.path.sep)[1].isdigit(): episode = int(episode_path.split(os.path.sep)[1]) else: continue else: episode = -1 for bangumi in files: if any(bangumi.lower().endswith(x) for x in [".mp4", ".mkv", ".webm"]): video_file_path = os.path.join(base_path, bangumi) video_file_path = os.path.join( os.path.dirname(video_file_path), os.path.basename(video_file_path)) video_file_path = video_file_path.replace(os.path.sep, "/") episode_list[episode] = {"path": video_file_path} break return episode_list
def get_player(bangumi_name): episode_list = {} # new path if os.path.exists(os.path.join(SAVE_PATH, normalize_path(bangumi_name))): bangumi_name = normalize_path(bangumi_name) bangumi_path = os.path.join(SAVE_PATH, bangumi_name) path_walk = os.walk(bangumi_path) logger.debug('os.walk(bangumi_path) => {}'.format(pformat(path_walk))) for root, _, files in path_walk: _ = root.replace(bangumi_path, '').split(os.path.sep) base_path = root.replace(SAVE_PATH, '') if len(_) >= 2: episode_path = root.replace(os.path.join(SAVE_PATH, bangumi_name), '') if episode_path.split(os.path.sep)[1].isdigit(): episode = int(episode_path.split(os.path.sep)[1]) else: continue else: episode = -1 for bangumi in files: if any([bangumi.lower().endswith(x) for x in ['.mp4', '.mkv', '.webm']]): video_file_path = os.path.join(base_path, bangumi) video_file_path = os.path.join(os.path.dirname(video_file_path), os.path.basename(video_file_path)) video_file_path = video_file_path.replace(os.path.sep, '/') episode_list[episode] = {'path': video_file_path} break return episode_list
def cal(force_update=False, save=False): weekly_list = website.bangumi_calendar(force_update=force_update, save=save) # for web api r = weekly_list for day, value in weekly_list.items(): for index, bangumi in enumerate(value): bangumi['cover'] = normalize_path(bangumi['cover']) if isinstance(bangumi['subtitle_group'], list): subtitle_group = list( map(lambda x: { 'name': x['name'], 'id': x['id'] }, Subtitle.get_subtitle_by_id(bangumi['subtitle_group']))) else: subtitle_group = list( map( lambda x: { 'name': x['name'], 'id': x['id'] }, Subtitle.get_subtitle_by_id( bangumi['subtitle_group'].split(', ' '')))) r[day][index]['subtitle_group'] = subtitle_group return r
def cal(force_update=False, save=False): logger.debug('cal force_update: {} save: {}'.format(force_update, save)) weekly_list = website.bangumi_calendar(force_update=force_update, save=save) runner = ScriptRunner() patch_list = runner.get_models_dict() for i in patch_list: weekly_list[i['update_time'].lower()].append(i) logger.debug(weekly_list) # for web api, return all subtitle group info r = weekly_list for day, value in weekly_list.items(): for index, bangumi in enumerate(value): bangumi['cover'] = normalize_path(bangumi['cover']) if isinstance(bangumi['subtitle_group'], list): subtitle_group = list(map(lambda x: {'name': x['name'], 'id': x['id']}, Subtitle.get_subtitle_by_id( bangumi['subtitle_group']))) else: subtitle_group = list(map(lambda x: {'name': x['name'], 'id': x['id']}, Subtitle.get_subtitle_by_id( bangumi['subtitle_group'].split(', ' '')))) r[day][index]['subtitle_group'] = subtitle_group logger.debug(r) return r
def download_prepare(data: List[Episode]) -> None: queue = save_to_bangumi_download_queue(data) driver = get_download_driver(DOWNLOAD_DELEGATE) for download in queue: save_path = os.path.join( os.path.join(SAVE_PATH, normalize_path(download.name)), str(download.episode), ) if not os.path.exists(save_path): os.makedirs(save_path) # mark as downloading download.status = STATUS_DOWNLOADING download.save() try: driver.add_download(url=download.download, save_path=save_path) print_info("Add torrent into the download queue, " f"the file will be saved at {save_path}") except Exception as e: if os.getenv("DEBUG"): # pragma: no cover traceback.print_exc() raise e print_error(f"Error: {e}", exit_=False) download.status = STATUS_NOT_DOWNLOAD download.save()
def cal(force_update=False, save=False): logger.debug('cal force_update: {} save: {}'.format(force_update, save)) weekly_list = website.bangumi_calendar(force_update=force_update, save=save) runner = ScriptRunner() patch_list = runner.get_models_dict() for i in patch_list: weekly_list[i['update_time'].lower()].append(i) logger.debug(weekly_list) # for web api, return all subtitle group info r = weekly_list for day, value in weekly_list.items(): for index, bangumi in enumerate(value): bangumi['cover'] = normalize_path(bangumi['cover']) if isinstance(bangumi['subtitle_group'], list): subtitle_group = list( map(lambda x: { 'name': x['name'], 'id': x['id'] }, Subtitle.get_subtitle_by_id(bangumi['subtitle_group']))) else: subtitle_group = list( map( lambda x: { 'name': x['name'], 'id': x['id'] }, Subtitle.get_subtitle_by_id( bangumi['subtitle_group'].split(', ' '')))) r[day][index]['subtitle_group'] = subtitle_group logger.debug(r) return r
def cal(force_update: bool = False, save: bool = False, cover: Optional[List[str]] = None) -> Dict[str, List[Dict[str, Any]]]: logger.debug("cal force_update: %r save: %r", force_update, save) weekly_list = Bangumi.get_updating_bangumi() if not weekly_list: print_warning("Warning: no bangumi schedule, fetching ...") force_update = True if force_update: print_info("Fetching bangumi info ...") website.fetch(save=save) weekly_list = Bangumi.get_updating_bangumi() if cover is not None: # download cover to local cover_to_be_download = cover for daily_bangumi in weekly_list.values(): for bangumi in daily_bangumi: _, file_path = convert_cover_url_to_path(bangumi["cover"]) if not (os.path.exists(file_path) and bool(imghdr.what(file_path))): cover_to_be_download.append(bangumi["cover"]) if cover_to_be_download: print_info("Updating cover ...") download_cover(cover_to_be_download) runner = ScriptRunner() patch_list = runner.get_models_dict() for i in patch_list: weekly_list[i["update_time"].lower()].append(i) logger.debug(weekly_list) # for web api, return all subtitle group info r = weekly_list # type: Dict[str, List[Dict[str, Any]]] for day, value in weekly_list.items(): for index, bangumi in enumerate(value): bangumi["cover"] = normalize_path(bangumi["cover"]) subtitle_group = list( map( lambda x: { "name": x["name"], "id": x["id"] }, Subtitle.get_subtitle_by_id( bangumi["subtitle_group"].split(", " "")), )) r[day][index]["subtitle_group"] = subtitle_group logger.debug(r) return r
def __init__(self, *args: Any, **kwargs: Any) -> None: if self.latest_version is None: if os.path.exists(os.path.join(BGMI_PATH, "latest")): with open(os.path.join(BGMI_PATH, "latest")) as f: self.latest_version = f.read().strip() runner = ScriptRunner() self.patch_list = runner.get_models_dict() for i in self.patch_list: i["cover"] = normalize_path(i["cover"]) super().__init__(*args, **kwargs)
def getBangumiList(): # subscribe list data = Followed.get_all_followed(STATUS_DELETED, STATUS_UPDATING) # bgmi-scripts list runner = ScriptRunner() patch_list = runner.get_models_dict() for i in patch_list: i['cover'] = normalize_path(i['cover']) data.extend(patch_list) return data
def get_player(bangumi_name): episode_list = {} # new path if os.path.exists(os.path.join(SAVE_PATH, normalize_path(bangumi_name))): bangumi_name = normalize_path(bangumi_name) bangumi_path = os.path.join(SAVE_PATH, bangumi_name) path_walk = os.walk(bangumi_path) logger.debug('os.walk(bangumi_path) => {}'.format(pformat(path_walk))) for root, _, files in path_walk: _ = root.replace(bangumi_path, '').split(os.path.sep) base_path = root.replace(SAVE_PATH, '') if len(_) >= 2: episode_path = root.replace(os.path.join(SAVE_PATH, bangumi_name), '') if episode_path.split(os.path.sep)[1].isdigit(): episode = int(episode_path.split(os.path.sep)[1]) else: continue else: episode = -1 for bangumi in files: if any([ bangumi.lower().endswith(x) for x in ['.mp4', '.mkv', '.webm'] ]): video_file_path = os.path.join(base_path, bangumi) video_file_path = os.path.join( os.path.dirname(video_file_path), os.path.basename(video_file_path)) video_file_path = video_file_path.replace(os.path.sep, '/') episode_list[episode] = {'path': video_file_path} break return episode_list
def get(self, type_=''): data = Followed.get_all_followed(STATUS_DELETED, STATUS_UPDATING if not type_ == 'old' else STATUS_END) if type_ == 'index': data.extend(self.patch_list) data.sort(key=lambda _: _['updated_time'] if _['updated_time'] else 1) for bangumi in data: bangumi['cover'] = '{}/{}'.format(COVER_URL, normalize_path(bangumi['cover'])) data.reverse() for item in data: item['player'] = get_player(item['bangumi_name']) self.write(self.jsonify(data)) self.finish()
def download_prepare(data): """ list[dict] dict[ name:str, keyword you use when search title:str, title of episode episode:int, episode of bangumi download:str, link to download ] :param data: :return: """ queue = save_to_bangumi_download_queue(data) for download in queue: save_path = os.path.join( os.path.join(SAVE_PATH, normalize_path(download.name)), str(download.episode), ) if not os.path.exists(save_path): os.makedirs(save_path) # mark as downloading download.status = STATUS_DOWNLOADING download.save() try: # start download download_class = get_download_class( download_obj=download, save_path=save_path ) download_class.download() download_class.check_download(download.name) # mark as downloaded download.downloaded() except Exception as e: if os.getenv("DEBUG"): # pragma: no cover import traceback traceback.print_exc() raise e print_error("Error: {}".format(e), exit_=False) download.status = STATUS_NOT_DOWNLOAD download.save()
def convert_cover_to_path(self, cover_url): """ convert bangumi cover to file path :param cover_url: bangumi cover path :type cover_url:str :rtype: str,str,str :return:file_path, dir_path, url """ if cover_url.startswith('https://') or cover_url.startswith('http://'): url = cover_url else: url = '{}/{}'.format(self.cover_url, cover_url) cover_url = normalize_path(cover_url) file_path = os.path.join(SAVE_PATH, 'cover') file_path = os.path.join(file_path, cover_url) dir_path = os.path.dirname(file_path) return dir_path, file_path, url
def get(self, type_=""): data = Followed.get_all_followed( STATUS_DELETED, STATUS_UPDATING if not type_ == "old" else STATUS_END) if type_ == "index": data.extend(self.patch_list) data.sort( key=lambda _: _["updated_time"] if _["updated_time"] else 1) for bangumi in data: bangumi["cover"] = "{}/{}".format(COVER_URL, normalize_path(bangumi["cover"])) data.reverse() for item in data: item["player"] = get_player(item["bangumi_name"]) self.write(self.jsonify(data)) self.finish()
def get(self, type_: str = "") -> None: data: List[dict] = Followed.get_all_followed( STATUS_DELETED, STATUS_END if type_ == "old" else STATUS_UPDATING) def sorter(_: Dict[str, int]) -> int: return _["updated_time"] if _["updated_time"] else 1 if type_ == "index": data.extend(self.patch_list) data.sort(key=sorter) for bangumi in data: bangumi["cover"] = "{}/{}".format(COVER_URL, normalize_path(bangumi["cover"])) data.reverse() for item in data: item["player"] = get_player(item["bangumi_name"]) self.write(self.jsonify(data)) self.finish()
def cal(force_update=False, save=False): logger.debug("cal force_update: {} save: {}".format(force_update, save)) weekly_list = website.bangumi_calendar(force_update=force_update, save=save) runner = ScriptRunner() patch_list = runner.get_models_dict() for i in patch_list: weekly_list[i["update_time"].lower()].append(i) logger.debug(weekly_list) # for web api, return all subtitle group info r = weekly_list for day, value in weekly_list.items(): for index, bangumi in enumerate(value): bangumi["cover"] = normalize_path(bangumi["cover"]) if isinstance(bangumi["subtitle_group"], list): subtitle_group = list( map( lambda x: { "name": x["name"], "id": x["id"] }, Subtitle.get_subtitle_by_id(bangumi["subtitle_group"]), )) else: subtitle_group = list( map( lambda x: { "name": x["name"], "id": x["id"] }, Subtitle.get_subtitle_by_id( bangumi["subtitle_group"].split(", " "")), )) r[day][index]["subtitle_group"] = subtitle_group logger.debug(r) return r
def download_prepare(data): """ list[dict] dict[ name:str, keyword you use when search title:str, title of episode episode:int, episode of bangumi download:str, link to download ] :param data: :return: """ queue = save_to_bangumi_download_queue(data) for download in queue: save_path = os.path.join(os.path.join(SAVE_PATH, normalize_path(download.name)), str(download.episode)) if not os.path.exists(save_path): os.makedirs(save_path) # mark as downloading download.status = STATUS_DOWNLOADING download.save() try: # start download download_class = get_download_class(download_obj=download, save_path=save_path) download_class.download() download_class.check_download(download.name) # mark as downloaded download.downloaded() except Exception as e: if os.getenv('DEBUG'): # pragma: no cover import traceback traceback.print_exc() raise e print_error('Error: {0}'.format(e), exit_=False) download.status = STATUS_NOT_DOWNLOAD download.save()