def __init__(self): self.deluge_address = '{}json'.format( return_default_config_string('deluge_address')) deluge_secret = return_default_config_string('deluge_secret') self.cookies = self.create_deluge_cookies(self.deluge_address, deluge_secret) self.translate_map = BackendTranslation()
def verify_aria2_configs_exist(): if not return_default_config_string('aria_address') or \ not int(return_default_config_string('aria_port') or 0) or \ not return_default_config_string('aria_token'): return False else: return True
def single_scrape(): req_data = json.loads(request.get_data() or '{}') each_jav = req_data['update_dict'] sources = return_default_config_string('jav_obj_priority').split(',') errors = [] emby_folder = EmbyFileStructure(return_default_config_string('file_path')) # some processing for postfixes postfix, car_str = emby_folder.extract_subtitle_postfix_filename( os.path.splitext(each_jav['file_name'])[0]) _, car_str = emby_folder.extract_CDs_postfix_filename(car_str) each_jav.update({'car': car_str}) # scrape jav_obj = parse_single_jav(each_jav, sources) # add chinese subtitle tag if any if postfix: jav_obj.setdefault('genres', []).append('中字') # handle error when all sources fail if jav_obj.get('errors') and isinstance(jav_obj['errors'], list) and len( jav_obj['errors']) == len(sources): errors.append( json.dumps({ 'log': '{} process failed, cannot find any info in all sources {}'. format(each_jav['car'], sources) })) elif jav_obj.get('error') and isinstance(jav_obj['error'], str): # handle one of the source is not valid errors.append( json.dumps({ 'log': '{} process failed, one of the source within {} is not valid on {}' .format(each_jav['car'], sources, jav_obj['error']) })) # file structure operations try: jav_obj = emby_folder.create_new_folder(jav_obj) except KeyError as e: _car = each_jav.get('car', 'Unknown') errors.append(json.dumps({'log': f'error: {e}, skipping {_car}'})) # write images emby_folder.write_images(jav_obj) # write nfo emby_folder.write_nfo(jav_obj) # move video file jav_obj = emby_folder.put_processed_file(jav_obj) return jsonify({'success': jav_obj, 'errors': errors})
def return_actor_role(): language_map = { 'cn': '女优', 'en': 'P**n Star' } language = return_default_config_string('display_language') return language_map[language]
class BackendTranslation: translate_json = json.load( open(resource_path('translation.json'), 'r', encoding='utf8')) language = return_default_config_string('display_language') def __getitem__(self, key): return self.translate_json.get(key, {}).get(self.language, None)
def find_images(): car = request.args.get('car') sources = request.args.get('sources') if not car: return jsonify({'error': 'cannot find car from request'}), 400 db_conn = JavManagerDB() try: jav_obj = dict(db_conn.get_by_pk(car)) except (DoesNotExist, TypeError) as e: # typeerror to catch dict(None) jav_obj = {'car': car} # verify sources if not sources: sources = return_default_config_string('jav_obj_priority').split(',') else: sources = str(sources).split(',') res = parse_single_jav({'car': car}, sources) if res != jav_obj: jav_obj.update(res) db_conn.upcreate_jav(jav_obj) return jsonify({'success': jav_obj})
def update_jav_dict(): # update db jav dict, also rewrite nfo and images req_data = json.loads(request.get_data() or '{}') update_dict = req_data['update_dict'] # update db db_conn = JavManagerDB() db_conn.upcreate_jav(update_dict) file_writer = EmbyFileStructure(return_default_config_string('file_path')) # file structure operations try: jav_obj = file_writer.create_folder_for_existing_jav(update_dict) except KeyError as e: _car = update_dict.get('car', 'Unknown') update_dict.append(json.dumps({'log': f'error: {e}, skipping {_car}'})) # write images file_writer.write_images(jav_obj) # write nfo file_writer.write_nfo(jav_obj) # move video file jav_obj = file_writer.move_existing_file(jav_obj) return jsonify({'success': jav_obj}) # post updated jav_obj back to UI
def new_pick_index_rescrape(): car = request.args.get('car') source = request.args.get('source') pick_index = request.args.get('pick_index') if not car: return jsonify({'error': 'cannot find car from request'}), 400 if not pick_index.isdigit(): return jsonify({'error': f'{pick_index} is not a valid index'}), 400 # incremental pick index jav obj must exists currently db_conn = JavManagerDB() db_jav_obj = dict(db_conn.get_by_pk(car)) db_jav_obj_old = deepcopy(db_jav_obj) # verify sources sources = return_default_config_string('jav_obj_priority').split(',') if source not in sources: raise Exception(f'{source} is not a valid source for pick index update') try: scraped_info = SOURCES_MAP[source]({'car': car}, pick_index=int(pick_index)).scrape_jav() except JAVNotFoundException: errors = (db_jav_obj.get('errors') or []) errors.append( '{} cannot be found in {}'.format(db_jav_obj['car'], source) ) scraped_info = {'errors': errors} print(scraped_info) db_jav_obj.update(scraped_info) # also save it separate key db_jav_obj[source] = scraped_info if db_jav_obj_old != db_jav_obj: db_conn.upcreate_jav(db_jav_obj) return jsonify({'success': db_jav_obj})
def __init__(self, root_path): if not os.path.exists(root_path): raise Exception(f'{root_path} does not exist') if not os.path.isdir(root_path): raise Exception(f'{root_path} is not a valid directory for scan') self.root_path = root_path self.file_list = [] self.folder_structure = return_default_config_string('folder_structure') # settings from ini file self.handle_multi_cds = ( return_default_config_string('handle_multi_cds')== '是' ) self.preserve_subtitle_filename = ( return_default_config_string('preserve_subtitle_filename')== '是' ) self.subtitle_filename_postfix = return_default_config_string('subtitle_filename_postfix').split(',') self.jav_manage = JavManagerDB()
def parse_emby_folder(): path = request.args.get('path') sources = request.args.get('sources') # verify sources if not sources: sources = return_default_config_string('jav_obj_priority').split(',') else: sources = str(sources).split(',') emby_folder = EmbyFileStructure(path) # scan folder emby_folder.scan_emby_root_path() processed = [] for each_jav in emby_folder.file_list: # scrape jav_obj = parse_single_jav(each_jav, sources) # file structure operations # write images emby_folder.write_images(jav_obj) # write nfo emby_folder.write_nfo(jav_obj) processed.append(each_jav['car']) return jsonify({'success': processed})
def remove_existing_tag(): """ This endpoint is used to scan javs already exist locally and update db """ emby_folder = EmbyFileStructure(return_default_config_string('file_path')) # scan folder emby_folder.remove_tags() return 'ok'
def rescan_emby_folder(): """ This endpoint is used to scan javs already exist locally and update db """ emby_folder = EmbyFileStructure(return_default_config_string('file_path')) # scan folder emby_folder.scan_emby_root_path() return jsonify({'success': [jav_obj['directory'] for jav_obj in emby_folder.file_list]})
def rewrite_images(): req_data = json.loads(request.get_data() or '{}') update_dict = req_data['update_dict'] file_writer = EmbyFileStructure(return_default_config_string('file_path')) # we can directly call this since it only writes top level key fields file_writer.write_images(update_dict) return jsonify({'success': 'good'})
def verify_local_nfo(): directory = request.args.get('directory') filename = request.args.get('filename') root = return_default_config_string('file_path') # special processing to convert linux db path to windows directory = directory.replace('/', os.sep).replace('\\', os.sep) print(os.path.join(root, directory, filename)) whether_exists = os.path.isfile(os.path.join(root, directory, filename)) return jsonify({'success': whether_exists})
def rewrite_nfo(): req_data = json.loads(request.get_data() or '{}') update_dict = req_data['update_dict'] JavManagerDB().upcreate_jav(update_dict) file_writer = EmbyFileStructure(return_default_config_string('file_path')) # we can directly call this since it only writes top level key fields file_writer.write_nfo(update_dict, verify=True) return jsonify({'success': 'good'})
def parse_single(): car = request.args.get('car') sources = request.args.get('sources') if not car: return jsonify({'error': 'cannot find car from request'}), 400 # verify sources if not sources: sources = return_default_config_string('jav_obj_priority').split(',') else: sources = str(sources).split(',') res = parse_single_jav({'car': car}, sources) #import ipdb; ipdb.set_trace() return jsonify({'car': car, 'sources': sources, 'parsed_output': res})
def find_images(car: str): db_conn = JavManagerDB() try: jav_obj = dict(db_conn.get_by_pk(car)) except (DoesNotExist, TypeError) as e: # typeerror to catch dict(None) jav_obj = {'car': car} sources = return_default_config_string('jav_obj_priority').split(',') res = parse_single_jav({'car': car}, sources) if res != jav_obj: jav_obj.update(res) db_conn.upcreate_jav(jav_obj) return jav_obj
def restructure_jav(): req_data = json.loads(request.get_data() or '{}') update_dict = req_data['update_dict'] file_writer = EmbyFileStructure(return_default_config_string('file_path')) # file structure operations try: jav_obj = file_writer.create_folder_for_existing_jav(update_dict) except KeyError as e: _car = update_dict.get('car', 'Unknown') update_dict.append(json.dumps({'log': f'error: {e}, skipping {_car}'})) # write images file_writer.write_images(jav_obj) # write nfo file_writer.write_nfo(jav_obj) # move video file jav_obj = file_writer.move_existing_file(jav_obj) return jsonify({'success': 'good'})
def custom_magnet_sorting(magnet_list: list): # sort based on size try: magnet_list.sort(key=lambda x: x['size_sort']) except KeyError: magnet_list.sort(key=lambda x: x['size']) # put subtitled first _rt = [] subtitled_strings = return_default_config_string('subtitle_filename_postfix').split(',') while magnet_list: _current = magnet_list.pop() if any([x in _current.get('title', '') for x in subtitled_strings]): _rt = [_current] + _rt else: # append to return list if it is not subtitled _rt.append(_current) return _rt
def download_magnet(): req_data = json.loads(request.get_data() or '{}') car = req_data.get('car') magnet = req_data.get('magnet') if not car or not magnet: return jsonify( {'error': 'required fields are not found in posted json'}), 400 if return_default_config_string('magnet_downloader') == 'aria2': _downloader = OOFDownloader() else: # use deluge client _downloader = DelugeDownloader() jav_obj = _downloader.handle_jav_download(car, magnet) if not jav_obj.get('error'): return jsonify({'success': jav_obj}) else: return jsonify({'error': jav_obj.get('error')}), 400
def search_ikoa_dmmc(car: str): # prototype server_addr = return_default_config_string('ikoa_dmmc_server') res = requests.get(server_addr + 'lookup?id={}'.format(car), timeout=10) #print(res.text) rt = [] sources = res.json()['success']['sources'] if 'ikoa' in sources and not need_ikoa_credit(car): rt.append({ 'title': f'ikoa - {car}', 'car': car, 'idmm': f'{server_addr}download?id={car}&source=ikoa', 'size': '-', 'size_sort': '-' }) if 'dmmc' in sources: rt.append({ 'title': f'dmmc - {car}', 'car': car, 'idmm': f'{server_addr}download?id={car}&source=dmmc', 'size': '-', 'size_sort': '-' }) return rt
import aria2p from JavHelper.core.ini_file import return_default_config_string aria2 = aria2p.API( aria2p.Client( host=return_default_config_string('aria_address'), port=int(return_default_config_string('aria_port') or 0), secret=return_default_config_string('aria_token') ) )
# -*- coding:utf-8 -*- import os from urllib.parse import urlparse from PIL import Image import requests import re import traceback import codecs from copy import deepcopy from JavHelper.core.backend_translation import BackendTranslation from JavHelper.core.nfo_parser import EmbyNfo from JavHelper.core.ini_file import return_default_config_string if return_default_config_string('db_type') == 'sqlite': from JavHelper.model.jav_manager import SqliteJavManagerDB as JavManagerDB else: from JavHelper.model.jav_manager import BlitzJavManagerDB as JavManagerDB POSTER_NAME = 'poster' FANART_NAME = 'fanart' DEFAULT_FILENAME_PATTERN = r'^.*?(?P<pre>[a-zA-Z]{2,6})\W*(?P<digit>\d{1,6}).*?$' class EmbyFileStructure: def __init__(self, root_path=return_default_config_string('file_path')): if not os.path.exists(root_path): raise Exception(f'{root_path} does not exist') if not os.path.isdir(root_path):
def get_necessary_sources(): return jsonify({ 'success': return_default_config_string('jav_obj_priority').split(',') })
def pre_scan_files(): path = request.args.get('path') or return_default_config_string( 'file_path') file_list = [] # handle usual error if not os.path.exists(path): return jsonify({'response': [{ 'file_name': f'{path} does not exist' }]}), 400 if not os.path.isdir(path): return jsonify({ 'response': [{ 'file_name': f'{path} is not a valid directory for scan' }] }), 400 retry_num = 0 # implement a retry method since sometimes the rename is too quick for os to handle # example: python made rename (incomplete), then front end immediately call dir scan, # dir scan get a list of files pre-renamed, rename completed, # subsequent getsize will fail since the old filename no longer exists while retry_num < 3: try: for file_name in os.listdir(path): # filter out dot file if file_name.startswith('.'): continue # don't care about directory size elif os.path.isdir(os.path.join(path, file_name)): #file_list.append({'file_name': file_name, 'size': 'folder - will not process'}) # longer care about directory, just skip them pass else: file_size = os.path.getsize(os.path.join(path, file_name)) >> 20 _car = os.path.splitext(file_name)[0] file_list.append({ 'file_name': file_name, 'car': _car, 'size': f'{file_size}MB' }) break except Exception as e: print(f'{e} happens, retry') retry_num += 1 sleep(3) return jsonify({ 'response': sorted(file_list, key=lambda k: k.get('file_name')), 'header': [{ 'name': 'File Name', 'selector': 'file_name', 'sortable': True }, { 'name': 'Size', 'selector': 'size', 'sortable': True }] })
def directory_path(): return jsonify({'success': return_default_config_string('file_path')})
def get_aria2(): return aria2p.API( aria2p.Client(host=return_default_config_string('aria_address'), port=int(return_default_config_string('aria_port') or 0), secret=return_default_config_string('aria_token')))
def parse_unprocessed_folder(): path = request.args.get('path') sources = request.args.get('sources') # verify sources if not sources: sources = return_default_config_string('jav_obj_priority').split(',') else: sources = str(sources).split(',') emby_folder = EmbyFileStructure(path) # scan folder emby_folder.scan_new_root_path() processed = [] total = len(emby_folder.file_list) #print(emby_folder.file_list) def long_process(): yield json.dumps({ 'log': 'start bulk jav parses for {} items'.format( len(emby_folder.file_list)) }) + '\n' for each_jav in emby_folder.file_list: # scrape jav_obj = parse_single_jav(each_jav, sources) # handle error when all sources fail if jav_obj.get('errors') and isinstance( jav_obj['errors'], list) and len( jav_obj['errors']) == len(sources): processed.append(each_jav['car']) yield json.dumps({ 'log': '{} process failed, cannot find any info in all sources {}, {} to go' .format(each_jav['car'], sources, total - len(processed)) }) + '\n' continue elif jav_obj.get('error') and isinstance(jav_obj['error'], str): # handle one of the source is not valid processed.append(each_jav['car']) yield json.dumps({ 'log': '{} process failed, one of the source within {} is not valid on {}' .format(each_jav['car'], sources, jav_obj['error']) }) + '\n' continue # file structure operations try: jav_obj = emby_folder.create_new_folder(jav_obj) except KeyError as e: _car = each_jav.get('car', 'Unknown') yield json.dumps({'log': f'error: {e}, skipping {_car}' }) + '\n' continue # write images emby_folder.write_images(jav_obj) # write nfo emby_folder.write_nfo(jav_obj) # move video file jav_obj = emby_folder.put_processed_file(jav_obj) processed.append(each_jav['car']) yield json.dumps({ 'log': '{} processed, {} to go'.format(each_jav['car'], total - len(processed)) }) + '\n' yield json.dumps({'log': 'jav parse finishes'}) + '\n' return Response(long_process(), mimetype='text/event-stream')