def getsearchurl(self): try: magneturls = get_storage('magneturls') magneturls[self.name] = 'http://www.btmovi.org' magneturls.sync() return jubturl = 'https://jubt.gq/' rsp = _http('https://jubt.gitlab.io/home/') for match in re.finditer( r"\x3Ctd\x3E\s*\x3Ca\s+href\x3D[\x22\x27](?P<url>.*?)[\x22\x27]", rsp, re.IGNORECASE | re.DOTALL): if url_is_alive(match.group('url')): jubturl = match.group('url') break rsp = _http(jubturl + '/cn/index.html') match = re.search( r"window\x2Eopen\x28(?:\x26\x2334\x3B)?(?P<url>(?:http|https)\x3A[\w\x2E\x2F]*?)(?:\x26\x2334\x3B)?\x2C(?:\x26\x2334\x3B)?(?:(?!window).)*?strong\x3E磁力蜘蛛\s*\x7C.*?\x3C\x2Fdiv\x3E", rsp, re.IGNORECASE | re.DOTALL) if match: magneturls[self.name] = [ match.group('url').strip().rstrip('/') ] else: magneturls[self.name] = 'http://www.btmovi.org' magneturls.sync() except: xbmc.log(msg=format_exc(), level=xbmc.LOGERROR)
def search(self, what, cat='all',sorttype='relevance',page='1'): result={} result['state']=False result['list']=[] try: # pageresult = _http(self.url) # match = re.search(r'<strong><a\x20href="(.*?)"', pageresult, re.DOTALL | re.MULTILINE) # baseurl='' # if match: # baseurl = match.group(1).rstrip('/') # if baseurl: magneturls=get_storage('magneturls') baseurl=magneturls[self.name] if sorttype=='addtime': sorttype='create_time' elif sorttype=='size': sorttype='length' else : sorttype='relavance' searchurl='%s/search/%s/?c=&s=%s&p=%s'%(baseurl,parse.quote(what),sorttype,str(int(page))) detailurl=[] pageresult = _http(searchurl) for match in re.finditer(r"title[\x22\x27]\s+href\x3D[\x22\x27]\x2Fwiki\x2F(?P<hashes>.*?)\x2Ehtml[\x22\x27]", pageresult, re.IGNORECASE | re.DOTALL): detailurl.append('%s/api/json_info?hashes=%s'%(baseurl,match.group('hashes'))) q = Queue.Queue() for u in detailurl: t = threading.Thread(target=get_url, args = (q,u)) t.daemon = True t.start() for u in detailurl: try: rsp = q.get(block=True, timeout=4) jsonrsp = json.loads(rsp[rsp.index('{'):]) #xbmcgui.Dialog().notification(heading='bthaha', message='aaa') for res in jsonrsp['result']: res_dict = dict() res_dict['name'] = res['name'] res_dict['size'] = strOfSize(int(res['length'])) res_dict['filecount'] = '' res_dict['seeds'] = '' res_dict['leech'] = '' res_dict['link'] = r'magnet:?xt=urn:btih:'+res['info_hash'] res_dict['date'] =res['create_time'] res_dict['desc_link'] = '' res_dict['engine_url'] = self.url result['list'].append(res_dict) except: xbmc.log(msg=format_exc(),level=xbmc.LOGERROR) break except: xbmc.log(msg=format_exc(),level=xbmc.LOGERROR) return result result['state']=True return result
def search(self, what, cat='all', sorttype='relevance', page='1'): result = {} result['state'] = False result['list'] = [] result['sorttype'] = sorttype if sorttype == 'addtime': sorttype = '-time-' elif sorttype == 'size': sorttype = '-length-' elif sorttype == 'relevance': sorttype = '-' else: sorttype = '-requests-' try: #searchurl='https://btsow.pw' # pageresult = _http(self.url) # match = re.search(r'<strong><a\x20href="(.*?)"', pageresult, re.DOTALL | re.MULTILINE) # if match: # searchurl = match.group(1) magneturls = get_storage('magneturls') searchurl = magneturls[self.name] searchurl = searchurl + '/search/kw-%s%s%s.html' % ( parse.quote(what), str(sorttype), str(int(page))) pageresult = _http(searchurl) #xbmc.log(msg=pageresult) rmain = r'\x3Ca\s+title\x3D[\x22\x27](?P<title>.*?)[\x22\x27]\s+href\x3D[\x22\x27]\x2Fhash\x2F(?P<magnet>[a-z0-9]{40})\x2Ehtml[\x22\x27].*?文件大小.*?\x3Cb.*?\x3E(?P<filesize>.*?)\x3C\x2Fb\x3E.*?创建时间.*?\x3Cb.*?\x3E(?P<createtime>.*?)\x3C\x2Fb\x3E.*?下载热度.*?\x3Cb.*?\x3E(?P<pop>.*?)\x3C\x2Fb\x3E' reobj = re.compile(rmain, re.DOTALL) for match in reobj.finditer(pageresult): title = match.group('title') filesize = match.group('filesize') createtime = match.group('createtime') magnet = r'magnet:?xt=urn:btih:' + match.group('magnet') res_dict = dict() res_dict['name'] = title res_dict['size'] = filesize res_dict['seeds'] = '' res_dict['leech'] = '' res_dict['link'] = magnet res_dict['date'] = createtime res_dict['desc_link'] = '' res_dict['engine_url'] = self.url result['list'].append(res_dict) if len(result['list']) > 0: result['nextpage'] = True except: xbmc.log(msg=format_exc(), level=xbmc.LOGERROR) return result result['state'] = True return result
def getsearchurl(self): try: magneturls=get_storage('magneturls') pageresult = _http(self.url) match = re.search(r'<strong><a\x20href="(.*?)"', pageresult, re.DOTALL | re.MULTILINE) baseurl='' if match: magneturls[self.name]= match.group(1).rstrip('/') else: magneturls[self.name]= 'https://glz.bthaha.monster' magneturls.sync() except: xbmc.log(msg=format_exc(),level=xbmc.LOGERROR)
def getsearchurl(self): try: magneturls = get_storage('magneturls') pageresult = _http(self.url) match = re.search( r'window\x2elocation\x2ehref\x3D[\x22\x27](.*?)[\x22\x27]', pageresult, re.DOTALL | re.MULTILINE) if match: magneturls[self.name] = match.group(1).rstrip('/') else: magneturls[self.name] = 'https://xccl.vip' magneturls.sync() except: xbmc.log(msg=format_exc(), level=xbmc.LOGERROR)
def search(self, what, cat='all', sorttype='relevance', page='1'): result = {} result['state'] = False result['list'] = [] try: #searchurl='https://btsow.pw' # pageresult = _http(self.url) # match = re.search(r'<strong><a\x20href="(.*?)"', pageresult, re.DOTALL | re.MULTILINE) # if match: # searchurl = match.group(1) magneturls = get_storage('magneturls') searchurl = magneturls[self.name] searchurl = searchurl + '/search/%s/page/%s' % (parse.quote(what), str(int(page))) pageresult = _http(searchurl) #xbmc.log(msg=pageresult) rmain = r'<div\x20class="row">.*?<a\x20href="(?P<href>.*?)"\x20title="(?P<title>.*?)">.*?Size:(?P<filesize>.*?)\x20/\x20Convert\x20Date:(?P<createtime>.*?)</div>' reobj = re.compile(rmain, re.DOTALL) for match in reobj.finditer(pageresult): title = match.group('title') filesize = match.group('filesize') createtime = match.group('createtime') magnet = match.group('href')[match.group('href').rfind('/') + 1:] magnet = 'magnet:?xt=urn:btih:' + magnet res_dict = dict() res_dict['name'] = title res_dict['size'] = filesize res_dict['seeds'] = '' res_dict['leech'] = '' res_dict['link'] = magnet res_dict['date'] = createtime res_dict['desc_link'] = '' res_dict['engine_url'] = self.url result['list'].append(res_dict) if len(result['list']) > 0: result['nextpage'] = True except: xbmc.log(msg=format_exc(), level=xbmc.LOGERROR) return result result['state'] = True return result
def search(self, what, cat='all', sorttype='relevance', page='1'): result = {} result['state'] = False result['list'] = [] result['sorttype'] = sorttype if sorttype == 'addtime': sorttype = 'time' elif sorttype == 'size': sorttype = 'size' elif sorttype == 'relevance': sorttype = 'rel' else: sorttype = 'hits' magneturls = get_storage('magneturls') searchurl = magneturls[self.name] searchurl = '%s/so/%s_%s_%s.html' % (searchurl, parse.quote(what), str(sorttype), str(int(page))) try: pageresult = _http(searchurl) rmain = r'\x2Fbt\x2F(?P<magnet>[a-z0-9]{40})\x2Ehtml.*?[\x22\x27]\x3E(?P<title>.*?)\x3C\x2Fa\x3E.*?创建时间.*?\x3Cb\x3E(?P<createtime>.*?)\x3C\x2Fb\x3E.*?文件大小.*?\x3E(?P<filesize>.*?)\x3C\x2Fb\x3E' reobj = re.compile(rmain, re.IGNORECASE | re.DOTALL) for match in reobj.finditer(pageresult): title = match.group('title').replace('<em>', '').replace('</em>', '') filesize = match.group('filesize') createtime = match.group('createtime') magnet = r'magnet:?xt=urn:btih:' + match.group('magnet') res_dict = dict() res_dict['name'] = title res_dict['size'] = filesize res_dict['filecount'] = '' res_dict['seeds'] = '' res_dict['leech'] = '' res_dict['link'] = magnet res_dict['date'] = createtime res_dict['desc_link'] = '' res_dict['engine_url'] = self.url result['list'].append(res_dict) if pageresult.find('»') >= 0: result['nextpage'] = True except Exception as ex: xbmc.log(msg=format_exc(), level=xbmc.LOGERROR) return result result['state'] = True return result
__temppath__ = xbmc.translatePath(os.path.join(__cwd__, 'temp')) if not os.path.exists(__temppath__): os.makedirs(__temppath__) import six from six.moves.urllib import parse from six.moves.urllib import request from six.moves import http_cookiejar as cookielib from commfunc import get_storage, _http, url_is_alive from xbmcswift2 import Plugin plugin = Plugin() setthumbnail = False moviepoint = {} subcache = get_storage('subcache') searchvalues = get_storage('searchvalues') colors = { 'dir': 'FF9966', 'video': 'FF0033', 'bt': '33FF00', 'audio': '66CCCC', 'subtitle': '505050', 'image': '99CC33', 'back': '0099CC', 'next': 'CCCCFF', 'menu': 'CCFF66', 'star1': 'FFFF00', 'star0': '777777', 'sort': '666699',