def has_link(strmFilename, link): strmFilename_alt = strmFilename + '.alternative' if filesystem.isfile(strmFilename_alt): with filesystem.fopen(strmFilename_alt, "r") as alternative: for line in alternative: if line.startswith('plugin://'): if link in urllib.unquote(line): return True return False
def recheck_torrent_if_need(from_time, settings): if settings.torrent_player != 'torrent2http': return def check_modify_time(fn): import time, filesystem mt = filesystem.getmtime(fn) if abs(from_time - mt) < 3600: return True return False def get_hashes(fn): with filesystem.fopen(fn, 'r') as hf: hashes = hf.readlines() return [h.strip('\r\n') for h in hashes] return [] def rehash_torrent(hashes, torrent_path): import time try: from torrent2httpplayer import Torrent2HTTPPlayer from torrent2http import State except ImportError: return player = Torrent2HTTPPlayer(settings) player.AddTorrent(torrent_path) player.GetLastTorrentData() #player.StartBufferFile(0) player._AddTorrent(torrent_path) player.engine.start() f_status = player.engine.file_status(0) while True: time.sleep(1.0) status = player.engine.status() if status.state in [ State.FINISHED, State.SEEDING, State.DOWNLOADING ]: break player.engine.wait_on_close() player.close() def process_dir(_d): for fn in filesystem.listdir(_d): full_name = filesystem.join(_d, fn) if fn.endswith('.hashes') and check_modify_time(full_name): hashes = get_hashes(full_name) if len(hashes) > 1: rehash_torrent(hashes, full_name.replace('.hashes', '')) for d in filesystem.listdir(settings.torrents_path()): dd = filesystem.join(settings.torrents_path(), d) if not filesystem.isfile(dd): process_dir(dd)
def get_links_with_ranks(strmFilename, settings, use_scrape_info=False): #import vsdbg #vsdbg._bp() strmFilename_alt = strmFilename + '.alternative' items = [] saved_dict = {} if filesystem.isfile(strmFilename_alt): with filesystem.fopen(strmFilename_alt, "r") as alternative: curr_rank = 1 while True: line = alternative.readline() if not line: break line = line.decode('utf-8') if line.startswith('#'): line = line.lstrip('#') parts = line.split('=') if len(parts) > 1: saved_dict[parts[0]] = parts[1].strip(' \n\t\r') elif line.startswith('plugin://script.media.aggregator'): try: saved_dict['link'] = line.strip(u'\r\n\t ') if use_scrape_info: sp = seeds_peers(saved_dict) saved_dict = dict(saved_dict, **sp) if 'rank' in saved_dict: curr_rank = float(saved_dict['rank']) else: curr_rank = get_rank( saved_dict.get('full_title', ''), saved_dict, settings) except BaseException as e: import log log.print_tb(e) curr_rank = 1 item = { 'rank': curr_rank, 'link': line.strip(u'\r\n\t ') } items.append(dict(item, **saved_dict)) saved_dict.clear() items.sort(key=operator.itemgetter('rank')) #debug('Sorded items') #debug(items) return items
def make_alternative(self, strmFilename, link, parser): strmFilename_alt = strmFilename + '.alternative' s_alt = u'' if filesystem.isfile(strmFilename_alt): with filesystem.fopen(strmFilename_alt, "r") as alternative: s_alt = alternative.read().decode('utf-8') if not (link in s_alt): try: with filesystem.fopen(strmFilename_alt, "a+") as alternative: for key, value in parser.Dict().iteritems(): if key in ['director', 'studio', 'country', 'plot', 'actor', 'genre', 'country_studio']: continue alternative.write('#%s=%s\n' % (make_utf8(key), make_utf8(value))) alternative.write(link.encode('utf-8') + '\n') except: pass
def make_alternative(self, strmFilename, link, parser): strmFilename_alt = strmFilename + '.alternative' s_alt = u'' if filesystem.isfile(strmFilename_alt): with filesystem.fopen(strmFilename_alt, "r") as alternative: s_alt = alternative.read().decode('utf-8') if not (link in s_alt): try: with filesystem.fopen(strmFilename_alt, "a+") as alternative: for key, value in parser.Dict().iteritems(): if key in [ 'director', 'studio', 'country', 'plot', 'actor', 'genre', 'country_studio' ]: continue alternative.write('#%s=%s\n' % (make_utf8(key), make_utf8(value))) alternative.write(link.encode('utf-8') + '\n') except: pass
def get_links_with_ranks(strmFilename, settings, use_scrape_info = False): strmFilename_alt = strmFilename + '.alternative' items = [] saved_dict = {} if filesystem.isfile(strmFilename_alt): with filesystem.fopen(strmFilename_alt, "r") as alternative: curr_rank = 1 while True: line = alternative.readline() if not line: break line = line.decode('utf-8') if line.startswith('#'): line = line.lstrip('#') parts = line.split('=') if len(parts) > 1: saved_dict[parts[0]] = parts[1].strip(' \n\t\r') elif line.startswith('plugin://script.media.aggregator'): try: if use_scrape_info: saved_dict['link'] = line.strip(u'\r\n\t ') sp = seeds_peers(saved_dict) saved_dict = dict(saved_dict, **sp) if 'rank' in saved_dict: curr_rank = float(saved_dict['rank']) else: curr_rank = get_rank(saved_dict['full_title'], saved_dict, settings) except: curr_rank = 1 item = {'rank': curr_rank, 'link': line.strip(u'\r\n\t ')} items.append(dict(item, **saved_dict)) saved_dict.clear() items.sort(key=operator.itemgetter('rank')) #debug('Sorded items') #debug(items) return items