class UnCaptchaReCaptcha: def __init__(self, lang='en'): self.sessionEx = MainSessionWrapper() def processCaptcha(self, sitekey, referer=''): answer = '' retArg = self.sessionEx.waitForFinishOpen(UnCaptchaReCaptchaMyJDWidget, title=_("My JDownloader reCAPTCHA v2 solution"), sitekey=sitekey, referer=referer) if retArg is not None and len(retArg) and retArg[0]: answer = retArg[0] return answer
class UnCaptchaReCaptcha: def __init__(self, lang='en'): self.HTTP_HEADER = { 'Accept-Language': lang, 'Referer': 'https://www.google.com/recaptcha/api2/demo', 'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.18) Gecko/20110621 Mandriva Linux/1.9.2.18-0.1mdv2010.2 (2010.2) Firefox/3.6.18' } self.cm = common() self.sessionEx = MainSessionWrapper() self.COOKIE_FILE = GetCookieDir('google.cookie') def processCaptcha(self, key, referer=None): post_data = None token = '' iteration = 0 if referer != None: self.HTTP_HEADER['Referer'] = referer reCaptchaUrl = 'http://www.google.com/recaptcha/api/fallback?k=%s' % ( key) while iteration < 20: #,'cookiefile':self.COOKIE_FILE, 'use_cookie': True, 'load_cookie': True, 'save_cookie':True sts, data = self.cm.getPage(reCaptchaUrl, { 'header': self.HTTP_HEADER, 'raw_post_data': True }, post_data=post_data) if not sts: SetIPTVPlayerLastHostError( _('Fail to get "%s".') % reCaptchaUrl) return '' printDBG("+++++++++++++++++++++++++++++++++++++++++") printDBG(data) printDBG("+++++++++++++++++++++++++++++++++++++++++") imgUrl = ph.search(data, '"(/recaptcha/api2/payload[^"]+?)"')[0] iteration += 1 message = ph.clean_html( ph.find(data, ('<div', '>', 'imageselect-desc'), '</div>', flags=0)[1]) if not message: message = ph.clean_html( ph.find(data, ('<label', '>', 'fbc-imageselect-message-text'), '</label>', flags=0)[1]) if not message: message = ph.clean_html( ph.find(data, ('<div', '>', 'imageselect-message'), '</div>', flags=0)[1]) if '' == message: token = ph.find(data, ('<div', '>', 'verification-token'), '</div>', flags=0)[1] token = ph.find(data, ('<textarea', '>'), '</textarea>', flags=0)[1].strip() if token == '': token = ph.search(data, '"this\.select\(\)">(.*?)</textarea>')[0] if token == '': token = ph.find(data, ('<textarea', '>'), '</textarea>', flags=0)[1].strip() if '' != token: printDBG('>>>>>>>> Captcha token[%s]' % (token)) else: printDBG('>>>>>>>> Captcha Failed\n\n%s\n\n' % data) break cval = ph.search(data, 'name="c"\s+value="([^"]+)')[0] imgUrl = 'https://www.google.com%s' % (imgUrl.replace( '&', '&')) message = ph.clean_html(message) accepLabel = ph.clean_html( ph.search(data, 'type="submit"\s+value="([^"]+)')[0]) filePath = GetTmpDir('.iptvplayer_captcha.jpg') printDBG(">>>>>>>> Captcha message[%s]" % (message)) printDBG(">>>>>>>> Captcha accep label[%s]" % (accepLabel)) printDBG(">>>>>>>> Captcha imgUrl[%s] filePath[%s]" % (imgUrl, filePath)) params = { 'maintype': 'image', 'subtypes': ['jpeg'], 'check_first_bytes': ['\xFF\xD8', '\xFF\xD9'] } ret = self.cm.saveWebFile(filePath, imgUrl, params) if not ret.get('sts'): SetIPTVPlayerLastHostError(_('Fail to get "%s".') % imgUrl) break retArg = self.sessionEx.waitForFinishOpen( UnCaptchaReCaptchaWidget, imgFilePath=filePath, message=message, title="reCAPTCHA v2", additionalParams={'accep_label': accepLabel}) printDBG('>>>>>>>> Captcha response[%s]' % (retArg)) if retArg is not None and len(retArg) and retArg[0]: answer = retArg[0] printDBG('>>>>>>>> Captcha answer[%s]' % (answer)) post_data = urllib.urlencode({ 'c': cval, 'response': answer }, doseq=True) else: break return token
class UnCaptchaReCaptcha: def __init__(self, lang='en'): self.HTTP_HEADER = { 'Accept-Language': lang, 'Referer': 'https://www.google.com/recaptcha/demo/', 'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.18) Gecko/20110621 Mandriva Linux/1.9.2.18-0.1mdv2010.2 (2010.2) Firefox/3.6.18' } self.cm = common() self.sessionEx = MainSessionWrapper() self.challenge = '' self.response = '' def processCaptcha(self, key): post_data = None token = '' iteration = 0 reCaptchaUrl = 'https://www.google.com/recaptcha/api/noscript?k=%s' % ( key) while iteration < 20: sts, data = self.cm.getPage(reCaptchaUrl, { 'header': self.HTTP_HEADER, 'raw_post_data': True }, post_data=post_data) if not sts: SetIPTVPlayerLastHostError( _('Fail to get "%s".') % reCaptchaUrl) return '' imgUrl = self.cm.ph.getSearchGroups(data, 'src="(image[^"]+?)"')[0] iteration += 1 message = self.cm.ph.getSearchGroups(data, '<p[^>]*>([^<]+?)</p>')[0] token = self.cm.ph.getSearchGroups( data, '<textarea[^>]*>([^<]+?)</textarea>')[0] if '' != token: printDBG('>>>>>>>> Captcha token[%s]' % (token)) break elif message == '': printDBG('>>>>>>>> Captcha Failed') break recaptcha_challenge_field = self.cm.ph.getSearchGroups( data, 'name="recaptcha_challenge_field"[^>]+?value="([^"]+)"')[0] imgUrl = 'https://www.google.com/recaptcha/api/%s' % ( imgUrl.replace('&', '&')) message = clean_html(message) accepLabel = clean_html( self.cm.ph.getSearchGroups( data, 'type="submit"[^>]+?value="([^"]+)"')[0]) filePath = GetTmpDir('.iptvplayer_captcha.jpg') printDBG(">>>>>>>> Captcha message[%s]" % (message)) printDBG(">>>>>>>> Captcha accep label[%s]" % (accepLabel)) printDBG(">>>>>>>> Captcha imgUrl[%s] filePath[%s]" % (imgUrl, filePath)) params = { 'maintype': 'image', 'subtypes': ['jpeg'], 'check_first_bytes': ['\xFF\xD8', '\xFF\xD9'] } ret = self.cm.saveWebFile(filePath, imgUrl, params) if not ret.get('sts'): SetIPTVPlayerLastHostError(_('Fail to get "%s".') % imgUrl) break params = deepcopy(IPTVMultipleInputBox.DEF_PARAMS) params['accep_label'] = _('Send') params['title'] = accepLabel params['list'] = [] item = deepcopy(IPTVMultipleInputBox.DEF_INPUT_PARAMS) item['label_size'] = (300, 57) item['input_size'] = (300, 25) item['icon_path'] = filePath item['input']['text'] = '' params['list'].append(item) ret = 0 retArg = self.sessionEx.waitForFinishOpen(IPTVMultipleInputBox, params) printDBG('>>>>>>>> Captcha response[%s]' % (retArg)) if retArg is not None and len(retArg) and retArg[0]: recaptcha_response_field = retArg[0] printDBG('>>>>>>>> Captcha recaptcha_response_field[%s]' % (recaptcha_response_field)) post_data = urllib.urlencode( { 'recaptcha_challenge_field': recaptcha_challenge_field, 'recaptcha_response_field': recaptcha_response_field, 'submit': accepLabel }, doseq=True) else: break return token
class UnCaptchaReCaptcha: def __init__(self, lang='en'): self.cm = common() self.sessionEx = MainSessionWrapper() self.MAIN_URL = 'https://2captcha.com/' def getMainUrl(self): return self.MAIN_URL def getFullUrl(self, url, mainUrl=None): if mainUrl == None: mainUrl = self.getMainUrl() return self.cm.getFullUrl(url, mainUrl) def processCaptcha(self, sitekey, referer='', action='verify'): sleepObj = None token = '' errorMsgTab = [] apiKey = config.plugins.iptvplayer.api_key_2captcha.value apiUrl = self.getFullUrl('/in.php?key=') + apiKey + '&method=userrecaptcha&version=v3&action=' + action + '&min_score=0.3&googlekey=' + sitekey + '&json=1&pageurl=' + urllib.quote(referer) try: token = '' sts, data = self.cm.getPage(apiUrl) if sts: printDBG('API DATA:\n%s\n' % data) data = json_loads(data, '', True) if data['status'] == '1': captchaid = data['request'] sleepObj = GetIPTVSleep() sleepObj.Sleep(300, False) tries = 0 while True: tries += 1 timeout = sleepObj.getTimeout() if tries == 1: timeout = 10 elif timeout > 10: timeout = 5 time.sleep(timeout) apiUrl = self.getFullUrl('/res.php?key=') + apiKey + '&action=get&json=1&id=' + captchaid sts, data = self.cm.getPage(apiUrl) if not sts: continue # maybe simple continue here ? errorMsgTab.append(_('Network failed %s.') % '2') break else: printDBG('API DATA:\n%s\n' % data) data = json_loads(data, '', True) if data['status'] == '1' and data['request'] != '': token = data['request'] break if sleepObj.getTimeout() == 0: errorMsgTab.append(_('%s timeout.') % self.getMainUrl()) break else: errorMsgTab.append(data['request']) else: errorMsgTab.append(_('Network failed %s.') % '1') except Exception as e: errorMsgTab.append(str(e)) printExc() if sleepObj != None: sleepObj.Reset() if token == '': self.sessionEx.waitForFinishOpen(MessageBox, (_('Resolving reCaptcha with %s failed!\n\n') % self.getMainUrl()) + '\n'.join(errorMsgTab), type = MessageBox.TYPE_ERROR, timeout = 10) return token
class ZalukajCOM(CBaseHostClass): USER_AGENT = 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.18) Gecko/20110621 Mandriva Linux/1.9.2.18-0.1mdv2010.2 (2010.2) Firefox/3.6.18' HEADER = { 'User-Agent': USER_AGENT, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' } AJAX_HEADER = dict(HEADER) AJAX_HEADER.update({ 'X-Requested-With': 'XMLHttpRequest', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache' }) DOMAIN = 'zalukaj.com' MAIN_URL = 'https://' + DOMAIN + '/' FILMS_URL = MAIN_URL + '/gatunek,%d/%s,%s,strona-%d' SEARCH_URL = MAIN_URL + '/szukaj' LOGIN_URL = MAIN_URL + '/account.php' DEFAULT_ICON_URL = 'http://www.userlogos.org/files/logos/8596_famecky/zalukaj.png' MAIN_CAT_TAB = [{ 'category': 'films_sub_menu', 'title': "Filmy", 'url': '' }, { 'category': 'series_sub_menu', 'title': "Seriale", 'url': MAIN_URL }, { 'category': 'search', 'title': "Szukaj filmu", 'search_item': True }, { 'category': 'search_history', 'title': _('Search history') }] FILMS_SUB_MENU = [{ 'category': 'films_category', 'title': 'Kategorie', 'url': MAIN_URL }, { 'category': 'films_list', 'title': 'Ostatnio oglądane', 'url': MAIN_URL + '/cache/lastseen.html' }, { 'category': 'films_list', 'title': 'Ostatnio dodane', 'url': MAIN_URL + '/cache/lastadded.html' }, { 'category': 'films_popular', 'title': 'Najpopularniejsze', 'url': '' }] FILMS_POPULAR = [{ 'category': 'films_list', 'title': 'Wczoraj', 'url': MAIN_URL + '/cache/wyswietlenia-wczoraj.html' }, { 'category': 'films_list', 'title': 'Ostatnie 7 dni', 'url': MAIN_URL + '/cache/wyswietlenia-tydzien.html' }, { 'category': 'films_list', 'title': 'W tym miesiącu', 'url': MAIN_URL + '/cache/wyswietlenia-miesiac.html' }] SERIES_SUB_MENU = [{ 'category': 'series_list', 'title': 'Lista', 'url': MAIN_URL }, { 'category': 'series_updated', 'title': 'Ostatnio zaktualizowane', 'url': MAIN_URL + '/seriale' }] LANGS_TAB = [{ 'title': 'Wszystkie', 'lang': 'wszystkie' }, { 'title': 'Z lektorem', 'lang': 'tlumaczone' }, { 'title': 'Napisy pl', 'lang': 'napisy-pl' }, { 'title': 'Nietłumaczone', 'lang': 'nie-tlumaczone' }] def __init__(self): printDBG("ZalukajCOM.__init__") CBaseHostClass.__init__(self, { 'history': 'ZalukajCOM', 'cookie': 'zalukajtv.cookie' }) self.loggedIn = None self.login = '' self.password = '' self.msg = '' def _getPage(self, url, http_params_base={}, params=None, loggedIn=None): if None == loggedIn: loggedIn = self.loggedIn HEADER = ZalukajCOM.HEADER if loggedIn: http_params = { 'header': HEADER, 'use_cookie': True, 'save_cookie': True, 'load_cookie': True, 'cookiefile': self.COOKIE_FILE } else: http_params = {'header': HEADER} http_params.update(http_params_base) return self.getPage(url, http_params, params) def getPage(self, url, addParams={}, post_data=None): HTTP_HEADER = dict(self.HEADER) addParams = dict(addParams) addParams.update({'header': HTTP_HEADER}) proxy = config.plugins.iptvplayer.zalukajtv_proxy.value if proxy != 'None': if proxy == 'proxy_1': proxy = config.plugins.iptvplayer.alternative_proxy1.value else: proxy = config.plugins.iptvplayer.alternative_proxy2.value addParams = dict(addParams) addParams.update({'http_proxy': proxy}) sts, data = self.cm.getPage(url, addParams, post_data) try: if 'Duze obciazenie!' in data: message = self.cleanHtmlStr( re.compile('<script.+?</script>', re.DOTALL).sub("", data)) GetIPTVNotify().push(message, 'info', 5) SetIPTVPlayerLastHostError(message) except Exception: pass return sts, data def getFullIconUrl(self, url): url = self.getFullUrl(url) proxy = config.plugins.iptvplayer.zalukajtv_proxy.value if proxy != 'None': if proxy == 'proxy_1': proxy = config.plugins.iptvplayer.alternative_proxy1.value else: proxy = config.plugins.iptvplayer.alternative_proxy2.value url = strwithmeta(url, {'iptv_http_proxy': proxy}) return url def _listLeftTable(self, cItem, category, m1, m2, sp): printDBG("ZalukajCOM.listLeftGrid") sts, data = self._getPage(cItem['url']) if not sts: return printDBG(data) data = self.cm.ph.getDataBeetwenMarkers(data, m1, m2, False)[1] data = data.split(sp) if len(data): del data[-1] for item in data: params = dict(cItem) url = self.getFullUrl( self.cm.ph.getSearchGroups(item, 'href="([^"]+?)"', 1)[0]) if self.DOMAIN not in url: continue params.update({ 'category': category, 'title': self.cleanHtmlStr(item), 'url': url }) self.addDir(params) def listFilmsCategories(self, cItem, category): printDBG("ZalukajCOM.listFilmsCategories") self._listLeftTable( cItem, category, '<table id="one" cellpadding="0" cellspacing="3">', '</table>', '</td>') def listSeries(self, cItem, category): printDBG("ZalukajCOM.listFilmsCategories") self._listLeftTable( cItem, category, '<table id="main_menu" cellpadding="0" cellspacing="3">', '</table>', '</td>') def listFilms(self, cItem): printDBG("ZalukajCOM.listFilms") url = cItem['url'] page = cItem.get('page', 1) nextPage = False extract = False try: cat = int(url.split('/')[-1]) sort = config.plugins.iptvplayer.zalukajtv_filmssort.value url = ZalukajCOM.FILMS_URL % (cat, sort, cItem['lang'], page) extract = True except Exception: pass sts, data = self._getPage(url, {}, cItem.get('post_data', None)) #self.cm.ph.writeToFile("/home/sulge/zalukaj.html", data) if not sts: return printDBG("ZalukajCOM data " + data) sp = '<div class="tivief4">' if not sp in data: sp = '<div class="details">' if extract: if self.cm.ph.getSearchGroups(data, 'strona\-(%d)[^0-9]' % (page + 1))[0] != '': nextPage = True m2 = '<div class="categories_page">' if m2 not in data: m2 = '<div class="doln">' data = self.cm.ph.getDataBeetwenMarkers(data, sp, m2, True)[1] data = data.split(sp) if len(data): del data[0] for item in data: year = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, '<p>', '</p>', False)[1]) desc = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, '</h3>', '</div>', False)[1]) more = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, '<p class="few_more">', '</p>', False)[1]) desc = '%s | %s | %s |' % (year, more, desc) icon = self.getFullUrl( self.cm.ph.getDataBeetwenMarkers(item, 'background-image:url(', ')', False)[1]) if '' == icon: icon = self.getFullUrl( self.cm.ph.getSearchGroups(item, 'src="([^"]+?)"', 1)[0]) url = self.getFullUrl( self.cm.ph.getSearchGroups(item, '<a href="([^"]+?)"', 1)[0]) title = self.cleanHtmlStr( self.cm.ph.getSearchGroups(item, 'title="([^"]+?)"', 1)[0]) title2 = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, '<h3>', '</h3>', False)[1]) if len(title) < len(title2): title = title2 if '' != url: self.addVideo({ 'title': title, 'url': url, 'desc': desc, 'icon': icon }) if nextPage: params = dict(cItem) params.update({'title': _('Next page'), 'page': page + 1}) self.addDir(params) def listUpdatedSeries(self, cItem, category): printDBG("ZalukajCOM.listUpdatedSeries") sts, data = self._getPage(cItem['url']) if not sts: return sp = '<div class="latest tooltip">' m2 = '<div class="doln">' data = self.cm.ph.getDataBeetwenMarkers(data, sp, m2, True)[1] data = data.split(sp) if len(data): del data[0] for item in data: icon = self.getFullUrl( self.cm.ph.getSearchGroups(item, 'src="([^"]+?)"', 1)[0]) url = self.getFullUrl( self.cm.ph.getSearchGroups(item, '<a href="([^"]+?)"', 1)[0]) title = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, '<div class="latest_title">', '</div>', False)[1]) desc = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, '<div class="latest_info">', '</div>', False)[1]) if '' == url: continue params = dict(cItem) params.update({ 'category': category, 'title': title, 'url': url, 'desc': desc, 'icon': icon }) self.addDir(params) def _listSeriesBase(self, cItem, category, m1, m2, sp): printDBG("ZalukajCOM._listSeriesBase") sts, data = self._getPage(cItem['url']) if not sts: return icon = self.getFullIconUrl( self.cm.ph.getSearchGroups( data, '''<img[^>]+?src=['"]([^'^"]*?/promote_serial/[^'^"]+?)['"]''') [0]) data = self.cm.ph.getDataBeetwenNodes(data, m1, m2, False)[1] data = data.split(sp) if len(data): del data[-1] for item in data: url = self.getFullUrl( self.cm.ph.getSearchGroups(item, 'href="([^"]+?)"', 1)[0]) title = self.cleanHtmlStr(item) if '' == url: continue params = dict(cItem) params.update({ 'category': category, 'title': title, 'url': url, 'icon': icon }) if 'video' == category: self.addVideo(params) else: self.addDir(params) def listSeriesSeasons(self, cItem, category): printDBG("ZalukajCOM.listSeriesSeasons") self._listSeriesBase(cItem, category, ('<div', '>', '"sezony"'), ('<div', '>', 'class="doln2"'), '</div>') if 1 == len(self.currList): newItem = self.currList[0] self.currList = [] self.listSeriesEpisodes(newItem) def listSeriesEpisodes(self, cItem): printDBG("ZalukajCOM.listSeriesEpisodes") self._listSeriesBase(cItem, 'video', ('<div', '>', '"odcinkicat"'), ('<div', '>', 'class="doln2"'), '</div>') def listSearchResult(self, cItem, searchPattern, searchType): printDBG( "ZalukajCOM.listSearchResult cItem[%s], searchPattern[%s] searchType[%s]" % (cItem, searchPattern, searchType)) #searchPattern = urllib.quote_plus(searchPattern) post_data = {'searchinput': searchPattern} szukaj = 'https://zalukaj.com/v2/ajax/load.search?html=1&q=%s' % searchPattern params = { 'name': 'category', 'category': 'films_list', 'url': szukaj, 'post_data': None } self.listFilms(params) def getLinksForVideo(self, cItem): printDBG("ZalukajCOM.getLinksForVideo url[%s]" % cItem['url']) self.tryTologin() if self.loggedIn: tries = [True, False] else: tries = [False] urlTab = [] for loggedIn in tries: url = cItem['url'] sts, data = self._getPage(url, loggedIn=loggedIn) if not sts: continue url = self.getFullUrl( self.cm.ph.getSearchGroups(data, '"([^"]+?player.php[^"]+?)"', 1)[0]) if '' == url: printDBG('No player.php in data') data = self.cm.ph.getDataBeetwenMarkers( data, 'Oglądaj Film Online', '<div class="doln">', False)[1] url = self.getFullUrl( self.cm.ph.getSearchGroups(data, 'href="([^"]+?)"[^>]*?target', 1)[0]) urlTab.extend(self.up.getVideoLinkExt(url)) continue sts, data = self._getPage(url, loggedIn=loggedIn) if not sts: continue url = self.getFullUrl( self.cm.ph.getSearchGroups(data, '<a href="([^"]+?)"', 1)[0]) if '' == url: printDBG('No href in data[%s]' % '') continue sts, data = self._getPage(url, loggedIn=loggedIn) if not sts: continue # First check for premium link premium = False premiumLinks = self.cm.ph.getSearchGroups( data, '"bitrates"\t?\:\t?(\[[^]]+?\])', 1)[0] if premiumLinks != '': printDBG("New premium premiumLinks: [%s]" % premiumLinks) try: premiumLinks = byteify(json.loads(premiumLinks)) for pItem in premiumLinks: urlTab.append({ 'name': 'zalukaj.tv premium ' + pItem.get('label', ''), 'url': pItem['url'] }) premium = True except Exception: printExc() if not premium: url = self.cm.ph.getSearchGroups(data, "url:'([^']+?)'", 1)[0] printDBG("Old premium url: [%s]" % url) if url.startswith('http'): urlTab.append({'name': 'zalukaj.tv premium ', 'url': url}) premium = True if not premium: printDBG('No premium link data[%s]' % data) tmp = self.cm.ph.getAllItemsBeetwenMarkers( data, '<source', '>', False, False) for item in tmp: if 'video/mp4' in item or '.mp4' in item: label = self.cm.ph.getSearchGroups( item, '''label=['"]([^"^']+?)['"]''')[0] res = self.cm.ph.getSearchGroups( item, '''res=['"]([^"^']+?)['"]''')[0] if label == '': label = res url = self.cm.ph.getSearchGroups( item, '''src=['"]([^"^']+?)['"]''')[0] if url.startswith('//'): url = 'http:' + url if not self.cm.isValidUrl(url): continue urlTab.append({ 'name': 'zalukaj.tv premium ' + label, 'url': strwithmeta(url, {'Referer': cItem['url']}) }) premium = True if not premium: url = self.getFullUrl( self.cm.ph.getSearchGroups(data, 'iframe src="([^"]+?)" width=', 1)[0]) if self.cm.isValidUrl(url): urlTab.extend(self.up.getVideoLinkExt(url)) # premium link should be checked at first, so if we have free link here break if len(urlTab): break return urlTab def captcha(self): self.USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0' self.HTTP_HEADER = { 'User-Agent': self.USER_AGENT, 'DNT': '1', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Encoding': 'gzip, deflate', 'Referer': 'https://zalukaj.com/', 'Origin': 'https://zalukaj.com/' } self.defaultParams = { 'header': self.HTTP_HEADER, 'with_metadata': True, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE } httpParams = dict(self.defaultParams) imgUrl = 'https://zalukaj.com/captcha-image' from copy import deepcopy from Plugins.Extensions.IPTVPlayer.components.iptvmultipleinputbox import IPTVMultipleInputBox from Plugins.Extensions.IPTVPlayer.components.asynccall import MainSessionWrapper self.sessionEx = MainSessionWrapper() captchaTitle = '' captchaLabel = _('Captcha') captchaTitle = captchaLabel sendLabel = _('Send') header = dict(self.HTTP_HEADER) header['Accept'] = 'image/png,image/*;q=0.8,*/*;q=0.5' params = dict(self.defaultParams) params.update({ 'maintype': 'image', 'subtypes': ['jpeg', 'png'], 'check_first_bytes': ['\xFF\xD8', '\xFF\xD9', '\x89\x50\x4E\x47'], 'header': header }) filePath = GetTmpDir('.iptvplayer_captcha.jpg') rm(filePath) ret = self.cm.saveWebFile(filePath, imgUrl.replace('&', '&'), params) if not ret.get('sts'): SetIPTVPlayerLastHostError(_('Fail to get "%s".') % imgUrl) return [] params = deepcopy(IPTVMultipleInputBox.DEF_PARAMS) params['accep_label'] = sendLabel params['title'] = captchaLabel params['status_text'] = captchaTitle params['status_text_hight'] = 200 params['with_accept_button'] = True params['list'] = [] item = deepcopy(IPTVMultipleInputBox.DEF_INPUT_PARAMS) item['label_size'] = (660, 110) item['input_size'] = (680, 25) item['icon_path'] = filePath item['title'] = _('Answer') item['input']['text'] = '' params['list'].append(item) params['vk_params'] = {'invert_letters_case': True} ret = 0 retArg = self.sessionEx.waitForFinishOpen(IPTVMultipleInputBox, params) printDBG(retArg) if retArg and len(retArg) and retArg[0]: return retArg[0][0].lower() else: return [] def tryTologin(self): printDBG('tryTologin start') config.plugins.iptvplayer.zalukajtv_login config.plugins.iptvplayer.zalukajtv_password if None == self.loggedIn or self.login != config.plugins.iptvplayer.zalukajtv_login.value or\ self.password != config.plugins.iptvplayer.zalukajtv_password.value: self.login = config.plugins.iptvplayer.zalukajtv_login.value self.password = config.plugins.iptvplayer.zalukajtv_password.value rm(self.COOKIE_FILE) self.loggedIn = False self.msg = '' if '' == self.login.strip() or '' == self.password.strip(): return False rm(self.COOKIE_FILE) captcha = self.captcha() sts, msg = False, 'Problem z zalogowaniem użytkownika \n"%s".' % self.login post_data = None sts, data = self._getPage(ZalukajCOM.LOGIN_URL, params=post_data, loggedIn=True) if sts: printDBG(data) hash = self.cm.ph.getSearchGroups( data, '''name="hash" value=['"]([^'^"]+?)['"]''')[0].strip() expires = self.cm.ph.getSearchGroups( data, '''"expires" value=['"]([^'^"]+?)['"]''')[0].strip() post_data = { 'expires': expires, 'hash': hash, 'username': self.login, 'password': self.password, 'captcha': captcha } #%(expires,hash,self.login,self.password) self.USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0' self.HTTP_HEADER = { 'User-Agent': self.USER_AGENT, 'DNT': '1', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Encoding': 'gzip, deflate', 'Referer': 'https://zalukaj.com/', 'Origin': 'https://zalukaj.com/' } self.defaultParams = { 'header': self.HTTP_HEADER, 'with_metadata': True, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE } httpParams = dict(self.defaultParams) httpParams['header'] = dict(httpParams['header']) httpParams['header']['Referer'] = 'https://zalukaj.com/' sts, data = self.cm.getPage('https://zalukaj.com/ajax/login', httpParams, post_data) printDBG(data) #printDBG( 'Host getInitList: chyba zalogowano do premium...' ) #if 'error' in data: captcha() sts, data = self._getPage( url=self.getFullUrl('/libs/ajax/login.php?login=1'), loggedIn=True) if sts: sts, data2 = self._getPage( self.getFullUrl('/libs/ajax/login.php?login=1'), loggedIn=True) if sts: printDBG(data) sts, tmp = self.cm.ph.getDataBeetwenMarkers( data, '<p>Typ Konta:', '</p>', False) if sts: tmp = tmp.replace('(kliknij by oglądać bez limitów)', '') msg = 'Zostałeś poprawnie zalogowany.' + '\nTyp Konta: ' + self.cleanHtmlStr( tmp) tmp = self.cm.ph.getDataBeetwenMarkers( data, '<p>Zebrane Punkty:', '</p>', False)[1].replace('» Wymień na VIP «', '') if '' != tmp: msg += '\nZebrane Punkty: ' + self.cleanHtmlStr( tmp) self.loggedIn = True if not self.loggedIn: self.sessionEx.open(MessageBox, msg, type=MessageBox.TYPE_INFO, timeout=10) else: self.msg = msg.replace('\n', '[/br]') return self.loggedIn def handleService(self, index, refresh=0, searchPattern='', searchType=''): printDBG('ZalukajCOM.handleService start') self.tryTologin() CBaseHostClass.handleService(self, index, refresh, searchPattern, searchType) name = self.currItem.get("name", None) category = self.currItem.get("category", '') printDBG( "ZalukajCOM.handleService: ---------> name[%s], category[%s] " % (name, category)) searchPattern = self.currItem.get("search_pattern", searchPattern) self.currList = [] if None == name: self.listsTab(ZalukajCOM.MAIN_CAT_TAB, { 'name': 'category', 'desc': self.msg }) #FILMS elif 'films_sub_menu' == category: self.listsTab(ZalukajCOM.FILMS_SUB_MENU, self.currItem) elif 'films_popular' == category: self.listsTab(ZalukajCOM.FILMS_POPULAR, self.currItem) elif 'films_category' == category: self.listFilmsCategories(self.currItem, 'add_lang') #LANGS elif 'add_lang' == category: newItem = dict(self.currItem) newItem.update({'category': 'films_list'}) self.listsTab(ZalukajCOM.LANGS_TAB, newItem) #LIST FILMS elif 'films_list' == category: self.listFilms(self.currItem) #SERIES elif 'series_sub_menu' == category: self.listsTab(ZalukajCOM.SERIES_SUB_MENU, self.currItem) elif 'series_list' == category: self.listSeries(self.currItem, 'series_seasons') elif 'series_updated' == category: self.listUpdatedSeries(self.currItem, 'series_episodes') elif 'series_seasons' == category: self.listSeriesSeasons(self.currItem, 'series_episodes') elif 'series_episodes' == category: self.listSeriesEpisodes(self.currItem) #WYSZUKAJ elif category in ["search", "search_next_page"]: cItem = dict(self.currItem) cItem.update({'search_item': False, 'name': 'category'}) self.listSearchResult(cItem, searchPattern, searchType) #HISTORIA WYSZUKIWANIA elif category == "search_history": self.listsHistory({ 'name': 'history', 'category': 'search' }, 'desc', _("Type: ")) else: printExc() CBaseHostClass.endHandleService(self, index, refresh)
class diffanime: HOST = 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.18) Gecko/20110621 Mandriva Linux/1.9.2.18-0.1mdv2010.2 (2010.2) Firefox/3.6.18' HEADER = {'User-Agent': HOST, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'} MAINURL = 'http://diff-anime.pl' SERVICE_MENU_TABLE = { 1: "Lista anime (alfabetycznie)", # 2: "Lista anime (wg. gatunku)", 2: "Ranking", 3: "Ulubione", 4: "Aktualności" } def __init__(self): self.up = urlparser.urlparser() self.cm = pCommon.common() self.sessionEx = MainSessionWrapper() self.ytp = YouTubeParser() self.ytformats = config.plugins.iptvplayer.ytformat.value # Temporary data self.currList = [] self.currItem = {} # Login data self.COOKIEFILE = GetCookieDir('Diff-anime.cookie') self.usePremiumAccount = config.plugins.iptvplayer.diffanime_premium.value self.username = config.plugins.iptvplayer.diffanime_login.value self.password = config.plugins.iptvplayer.diffanime_password.value def getCurrList(self): return self.currList def setCurrList(self, list): self.currList = list def getCurrItem(self): return self.currItem def setCurrItem(self, item): self.currItem = item # Login in to the site def requestLoginData(self): if False == self.usePremiumAccount: printDBG("diffanime niezalogowany") else: self.usePremiumAccount = False url = self.MAINURL query_data = {'url': url, 'header': self.HEADER, 'use_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIEFILE, 'return_data': True} postdata = {'user_name': self.username, 'user_pass': self.password, 'remember_me': 'y', "login": "******"} try: data = self.cm.getURLRequestData(query_data, postdata) except: printDBG("diffanime requestLoginData exception") return if 'Wyloguj' in data: printDBG("diffanime Notification(" + self.username + ", Zostales poprawnie zalogowany)") self.usePremiumAccount = True else: self.sessionEx.waitForFinishOpen(MessageBox, 'Błąd logowania. Sprawdź dane.\nlogin - ' + self.username + ' \nhasło - ' + self.password, type=MessageBox.TYPE_INFO, timeout=10) printDBG("diffanime Notification(Blad logowania)") # end login def addDir(self, params): params['type'] = 'category' self.currList.append(params) return def addVideo(self, params): params['type'] = 'video' self.currList.append(params) return def setTable(self): return self.SERVICE_MENU_TABLE # Get YT link def getYTVideoUrl(self, url): printDBG("getYTVideoUrl url[%s]" % url) tmpTab = self.ytp.getDirectLinks(url, self.ytformats) movieUrls = [] for item in tmpTab: movieUrls.append({'name': item['format'] + '\t' + item['ext'], 'url': item['url']}) return movieUrls def getVideoUrlforYTube(self, url): printDBG("getVideoUrl url[%s]" % url) query_data = {'url': url, 'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True} try: data = self.cm.getURLRequestData(query_data) except: printDBG('getVideoUrl exception') return '' match = re.search('src="//www.youtube.com/(.+?)"', data) if match: printDBG('www.youtube.com/' + match.group(1)) return self.getYTVideoUrl('www.youtube.com/' + match.group(1)) else: printDBG('nie znaleziono YT link') return '' # end Get YT link # Get mp4 link def getVideoUrl(self, url): printDBG("getVideoUrl url[%s]" % url) query_data = {'url': url, 'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True} try: data = self.cm.getURLRequestData(query_data) except: printDBG('getVideoUrl exception') return '' match = re.search("'file': '(.+?)',", data) if match: return match.group(1) else: printDBG('nie znaleziono mp4 link') return '' # end Get mp4 link def listsMainMenu(self, table): query_data = {'url': self.MAINURL + '/newsy', 'return_data': True} try: data = self.cm.getURLRequestData(query_data) except: printDBG('listAbcItem exception') return match = re.compile("div class='sRight'><div class='panel news'>(.+?)<div class='left'><div>Czytań:", re.DOTALL).findall(data) if len(match) > 0: match2 = re.search(".png' alt=([^<]+?)class='news-category'", match[0]) if match2: plot = match2.group(1) else: plot = '' match3 = re.search("class='news-category' />([^<]+?)</div>", match[0]) if match3: plot2 = match3.group(1) else: plot2 = '' icon = re.compile("<div class='content'><img src='(.+?)' alt='").findall(match[0]) for num, val in table.items(): params = {'name': 'main-menu', 'category': val, 'title': val, 'icon': self.MAINURL + icon[0], 'plot': self.cm.html_entity_decode(plot + plot2)} self.addDir(params) def listsABCMenu(self, table): for i in range(len(table)): params = {'name': 'abc-menu', 'category': table[i], 'title': table[i], 'icon': ''} self.addDir(params) # "AKTUALNOŚCI" def getlistsNews(self, url): query_data = {'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True} try: data = self.cm.getURLRequestData(query_data) except: printDBG('listsGenre EXCEPTION') r2 = re.compile("<div class='head'><h2><a href='/news/(.+?)'>(.+?)</a>").findall(data) if len(r2) > 0: for i in range(len(r2)): value = r2[i] title = self.cm.html_entity_decode(value[1]) data = self.MAINURL + '/news/' + value[0] data2 = self.cm.getURLRequestData({'url': data, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True}) value = re.search("<div class='content'><img src='(.+?)' alt='(.+?)' class='news-category' />(.+?).<br />", data2) if value: icon = self.MAINURL + value.group(1) plot = self.cm.html_entity_decode(value.group(2) + value.group(3)) else: icon = '' plot = '' params = {'name': 'news', 'title': title, 'icon': icon, 'plot': plot, 'page': data} self.addVideo(params) # "ULUBIONE" def getlistsUlubione(self, url): query_data = {'url': url + '/odcinki', 'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True} try: data = self.cm.getURLRequestData(query_data) except: printDBG('listsGenre EXCEPTION') r = re.compile("<div id='pUAL' class='panel pDef'>(.+?)<div id='footer'>", re.DOTALL).findall(data) if len(r) > 0: x1 = re.compile("W trakcie<(.+?)>Ukończone<", re.DOTALL).findall(data) if len(x1) > 0: rx1 = re.compile("'sTitle'><a href='(.+?)'>(.+?)</a>").findall(x1[0]) if len(rx1) > 0: for i in range(len(rx1)): value = rx1[i] title = self.cm.html_entity_decode("W trakcie - " + value[1]) page = self.MAINURL + value[0] params = {'name': 'ulubione', 'title': title, 'page': page, 'icon': ''} self.addDir(params) x2 = re.compile("Ukończone<(.+?)>Wstrzymane<", re.DOTALL).findall(data) if len(x2) > 0: rx2 = re.compile("'sTitle'><a href='(.+?)'>(.+?)</a>").findall(x2[0]) if len(rx2) > 0: for i in range(len(rx2)): value = rx2[i] title = self.cm.html_entity_decode("Ukończone - " + value[1]) page = self.MAINURL + value[0] params = {'name': 'ulubione', 'title': title, 'page': page, 'icon': ''} self.addDir(params) x3 = re.compile("Wstrzymane<(.+?)>Porzucone<", re.DOTALL).findall(data) if len(x3) > 0: rx3 = re.compile("'sTitle'><a href='(.+?)'>(.+?)</a>").findall(x3[0]) if len(rx3) > 0: for i in range(len(rx3)): value = rx3[i] title = self.cm.html_entity_decode("Wstrzymane - " + value[1]) page = self.MAINURL + value[0] params = {'name': 'ulubione', 'title': title, 'page': page, 'icon': ''} self.addDir(params) x4 = re.compile("Porzucone<(.+?)>W planach<", re.DOTALL).findall(data) if len(x4) > 0: rx4 = re.compile("'sTitle'><a href='(.+?)'>(.+?)</a>").findall(x4[0]) if len(rx4) > 0: for i in range(len(rx4)): value = rx4[i] title = self.cm.html_entity_decode("Porzucone - " + value[1]) page = self.MAINURL + value[0] params = {'name': 'ulubione', 'title': title, 'page': page, 'icon': ''} self.addDir(params) x5 = re.compile("W planach<(.+?)='footer'>", re.DOTALL).findall(data) if len(x5) > 0: rx5 = re.compile("'sTitle'><a href='(.+?)'>(.+?)</a>").findall(x5[0]) if len(rx5) > 0: for i in range(len(rx5)): value = rx5[i] title = self.cm.html_entity_decode("W planach - " + value[1]) page = self.MAINURL + value[0] params = {'name': 'ulubione', 'title': title, 'page': page, 'icon': ''} self.addDir(params) # "RANKING" def getlistsRanks(self, url): query_data = {'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True} try: data = self.cm.getURLRequestData(query_data) except: printDBG('listsGenre EXCEPTION') r = re.compile("<h2>Ranking anime</h2>(.+?)</table>", re.DOTALL).findall(data) if len(r) > 0: r2 = re.compile("<td class='td2'><a href='/(.+?)'><img src='(.+?)' class='img' /></a><div class='con'><a href='(.+?)'>(.+?)</a><p>").findall(r[0]) if len(r2) > 0: for i in range(len(r2)): value = r2[i] title = self.cm.html_entity_decode(value[3]) page = self.MAINURL + value[2] icon = self.MAINURL + value[1] params = {'name': 'ranks', 'title': title, 'page': page, 'icon': icon} self.addDir(params) # "KATEGORIE" def getlistsGenre(self, url): query_data = {'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True} try: data = self.cm.getURLRequestData(query_data) except: printDBG('listsGenre EXCEPTION') r = re.compile("</div><div id='pSettings' class='panel'>(.+?)</div></div>", re.DOTALL).findall(data) if len(r) > 0: r2 = re.compile("<a href='(.+?)'>(.+?)</a>").findall(r[0]) if len(r2) > 0: for i in range(len(r2)): value = r2[i] title = self.cm.html_entity_decode(value[1]) page = value[0] + '&rowstart=00' params = {'name': 'genset', 'title': title, 'page': page, 'plot': title, 'icon': ''} self.addDir(params) # ANIME TITLES def getAnimeList(self, url): query_data = {'url': self.MAINURL + url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True} try: data = self.cm.getURLRequestData(query_data) except: printDBG('getAnimeList EXCEPTION') nextPage = False if -1 != data.find("div class='pagenav") and -1 != data.find("class='img"): nextPage = True else: nextPage = False r = re.compile("</div><div id='pSeries' class='panel'>(.+?)<div id='footer'>", re.DOTALL).findall(data) if len(r) > 0: r2 = re.compile("</a><div class='con'><a href='/(.+?)'>(.+?)</a><p>").findall(r[0]) if len(r2) > 0: for i in range(len(r2)): value = r2[i] title = self.cm.html_entity_decode(value[1]) page = value[0] data = self.MAINURL + "/" + value[0] data2 = self.cm.getURLRequestData({'url': data, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True}) #Cover grafika = re.search("</div><div class='content'><div class='con'><a href='(.+?)' class='fbox'>", data2) if grafika: icon = self.MAINURL + grafika.group(1) else: icon ='' #Description match = re.search("<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)</div>", data2) if match: plot = match.group(1) else: match = re.search("<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)<", data2) if match: plot = match.group(1) else: match = re.search("<h2>Opis anime</h2></div><.+?>(.+?)<", data2) if match: plot = match.group(1) else: plot = '' params = {'name': 'episodelist', 'title': title, 'page': page, 'icon': icon, 'plot': self.cm.html_entity_decode(plot)} self.addDir(params) if nextPage is True: nextpage = url[:-2] + str(int(url[-2:]) + 10) params = {'name': 'nextpage', 'title': 'Next page', 'page': nextpage} self.addDir(params) # EPISODES LIST def getEpisodeList(self, url): query_data = {'url': url + '/odcinki', 'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True} query_data1 = {'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True} try: data = self.cm.getURLRequestData(query_data) # episodes data data2 = self.cm.getURLRequestData(query_data1) # cover, desc. data except: printExc() # Description match = re.search("<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)</div>", data2) if match: plot = match.group(1) else: match = re.search("<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)<", data2) if match: plot = match.group(1) else: match = re.search("<h2>Opis anime</h2></div><.+?>(.+?)<", data2) if match: plot = match.group(1) else: plot = '' # Cover grafika = re.search("</div><div class='content'><div class='con'><a href='(.+?)' class='fbox'>", data2) if grafika: icon = self.MAINURL + grafika.group(1) else: icon ='' # Episodes match = re.compile("<span class='head2'>Statystyki:</span>(.+?)<div class='mainCon'>", re.DOTALL).findall(data) if len(match) > 0: match2 = re.compile("#(.+?)</div><div class=.+?</div><div class='con3'><a href='(.+?)' class='i'>").findall(match[0]) if len(match2) > 0: for i in range(len(match2)): value = match2[i] page = self.MAINURL + value[1] title = 'Odcinek ' + value[0] params = {'title': title, 'page': page, 'plot': self.cm.html_entity_decode(plot), 'icon': icon} self.addVideo(params) def handleService(self, index, refresh=0, searchPattern='', searchType=''): printDBG('handleService start') if 0 == refresh: if len(self.currList) <= index: printDBG("handleService wrong index: %s, len(self.currList): %d" % (index, len(self.currList))) return if -1 == index: # use default value self.currItem = {"name": None} printDBG("handleService for first self.category") else: self.currItem = self.currList[index] name = self.currItem.get("name", '') title = self.currItem.get("title", '') category = self.currItem.get("category", '') page = self.currItem.get("page", '') icon = self.currItem.get("icon", '') printDBG("handleService: |||||||||||||||||||||||||||||||| [%s] " % name) self.currList = [] if str(page) == 'None' or page == '': page = '0' # Fill the Menu #MAIN MENU if name is None: #logowanie if self.usePremiumAccount: self.requestLoginData() self.listsMainMenu(self.SERVICE_MENU_TABLE) #LISTA ANIME (Alfabetycznie) elif category == self.setTable()[1]: self.listsABCMenu(self.cm.makeABCList()) elif name == 'abc-menu': url = '/lista-anime?letter=' + category + '&rowstart=00' self.getAnimeList(url) elif name == 'nextpage': self.getAnimeList(page) #LISTA ANIME (wg. Gatunku) # elif category == self.setTable()[2]: # url = self.MAINURL + '/lista-anime' # self.getlistsGenre(url) # elif name == 'genset': # self.getAnimeList(page) #LISTA ANIME (wg. Rankingu) elif category == self.setTable()[2]: url = self.MAINURL + '/ranking-anime' self.getlistsRanks(url) elif name == 'ranks': self.getEpisodeList(page) #ULUBIONE elif category == self.setTable()[3]: url = self.MAINURL + '/moja-lista/' + self.username self.getlistsUlubione(url) elif name == 'ulubione': self.getEpisodeList(page) #AKTUALNOŚCI elif category == self.setTable()[4]: url = self.MAINURL + '/newsy' self.getlistsNews(url) elif name == 'news': self.getlistsNews(page) #Episodes will not display without this: #Episodes list elif name == 'episodelist': url = self.MAINURL + '/' + page self.getEpisodeList(url)
class UnCaptchaReCaptcha: def __init__(self, lang='en'): self.cm = common() self.sessionEx = MainSessionWrapper() self.MAIN_URL = 'https://www.9kw.eu/' def getMainUrl(self): return self.MAIN_URL def getFullUrl(self, url, mainUrl=None): if mainUrl == None: mainUrl = self.getMainUrl() return self.cm.getFullUrl(url, mainUrl) def processCaptcha(self, sitekey, referer=''): sleepObj = None token = '' errorMsgTab = [] apiKey = config.plugins.iptvplayer.api_key_9kweu.value apiUrl = self.getFullUrl( '/index.cgi?apikey=' ) + apiKey + '&action=usercaptchaupload&interactive=1&json=1&file-upload-01=' + sitekey + '&oldsource=recaptchav2&pageurl=' + urllib.quote( referer) try: token = '' sts, data = self.cm.getPage(apiUrl) if sts: printDBG('API DATA:\n%s\n' % data) data = json_loads(data) if 'captchaid' in data: captchaid = data['captchaid'] sleepObj = GetIPTVSleep() sleepObj.Sleep(300, False) tries = 0 while True: tries += 1 timeout = sleepObj.getTimeout() if tries == 1: timeout = 10 elif timeout > 10: timeout = 5 time.sleep(timeout) apiUrl = self.getFullUrl( '/index.cgi?apikey=' ) + apiKey + '&action=usercaptchacorrectdata&json=1&id=' + captchaid sts, data = self.cm.getPage(apiUrl) if not sts: continue # maybe simple continue here ? errorMsgTab.append(_('Network failed %s.') % '2') break else: printDBG('API DATA:\n%s\n' % data) data = json_loads(data) token = data['answer'] if token != '': break if sleepObj.getTimeout() == 0: errorMsgTab.append( _('%s timeout.') % self.getMainUrl()) break else: errorMsgTab.append(data['error']) else: errorMsgTab.append(_('Network failed %s.') % '1') except Exception as e: errorMsgTab.append(str(e)) printExc() if sleepObj != None: sleepObj.Reset() if token == '': self.sessionEx.waitForFinishOpen( MessageBox, (_('Resolving reCaptcha with %s failed!\n\n') % self.getMainUrl()) + '\n'.join(errorMsgTab), type=MessageBox.TYPE_ERROR, timeout=10) return token
class CBaseSubProviderClass: def __init__(self, params={}): self.TMP_FILE_NAME = '.iptv_subtitles.file' self.TMP_DIR_NAME = '/.iptv_subtitles.dir/' self.sessionEx = MainSessionWrapper(mainThreadIdx=1) proxyURL = params.get('proxyURL', '') useProxy = params.get('useProxy', False) if 'MozillaCookieJar' == params.get('cookie_type', ''): self.cm = common(proxyURL, useProxy, True) else: self.cm = common(proxyURL, useProxy) self.currList = [] self.currItem = {} if '' != params.get('cookie', ''): self.COOKIE_FILE = GetCookieDir(params['cookie']) self.moreMode = False self.params = params self.minPyVer = params.get('min_py_ver', 0) def checkPythonVersion(self, pyVer): try: from Screens.MessageBox import MessageBox import sys if sys.version_info < pyVer: message = _('This service requires a new Enigma2 image with a Python version %s or later.') % ('.'.join(str(x) for x in pyVer)) self.sessionEx.waitForFinishOpen(MessageBox, message, type = MessageBox.TYPE_INFO, timeout = 10) except Exception: printExc() def getSupportedFormats(self, all=False): if all: ret = list(IPTVSubtitlesHandler.getSupportedFormats()) else: ret = list(IPTVSubtitlesHandler.SUPPORTED_FORMATS) return ret def getMaxFileSize(self): return 1024 * 1024 * 5 # 5MB, max size of sub file to be download def getMaxItemsInDir(self): return 500 def listsTab(self, tab, cItem): for item in tab: params = dict(cItem) params.update(item) params['name'] = 'category' self.addDir(params) def iptv_execute(self, cmd): printDBG("iptv_execute cmd_exec [%s]" % cmd) ret = iptv_execute(1)(cmd) printDBG("iptv_execute cmd_ret sts[%s] code[%s] data[%s]" % (ret.get('sts', ''), ret.get('code', ''), ret.get('data', ''))) return ret @staticmethod def cleanHtmlStr(str): str = str.replace('<', ' <') str = str.replace(' ', ' ') str = clean_html(str) str = str.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') return CParsingHelper.removeDoubles(str, ' ').strip() @staticmethod def getStr(v, default=''): if type(v) == type(u''): return v.encode('utf-8') elif type(v) == type(''): return v return default def getCurrList(self): return self.currList def setCurrList(self, list): self.currList = list def getCurrItem(self): return self.currItem def setCurrItem(self, item): self.currItem = item def addDir(self, params, atTheEnd=True): params['type'] = 'category' if atTheEnd: self.currList.append(params) else: self.currList.insert(0, params) return def addMore(self, params, atTheEnd=True): params['type'] = 'more' if atTheEnd: self.currList.append(params) else: self.currList.insert(0, params) return def addSubtitle(self, params, atTheEnd=True): params['type'] = 'subtitle' if atTheEnd: self.currList.append(params) else: self.currList.insert(0, params) return def getMainUrl(self): return self.MAIN_URL #def getFullUrl(self, url): # if url.startswith('//'): # url = 'http:' + url # elif url.startswith('://'): # url = 'http' + url # elif url.startswith('/'): # url = self.getMainUrl() + url[1:] # elif 0 < len(url) and '://' not in url: # url = self.getMainUrl() + url # return url def getFullUrl(self, url, currUrl=None): if url.startswith('./'): url = url[1:] if currUrl == None or not self.cm.isValidUrl(currUrl): try: mainUrl = self.getMainUrl() except Exception: mainUrl = 'http://fake' else: mainUrl = self.cm.getBaseUrl(currUrl) if url.startswith('//'): proto = mainUrl.split('://', 1)[0] url = proto + ':' + url elif url.startswith('://'): proto = mainUrl.split('://', 1)[0] url = proto + url elif url.startswith('/'): url = mainUrl + url[1:] elif 0 < len(url) and '://' not in url: if currUrl == None or not self.cm.isValidUrl(currUrl): url = mainUrl + url else: url = urljoin(currUrl, url) return url def handleService(self, index, refresh=0): if self.minPyVer > 0: self.checkPythonVersion(self.minPyVer) self.minPyVer = 0 # inform only once self.moreMode = False if 0 == refresh: if len(self.currList) <= index: return if -1 == index: self.currItem = { "name": None } else: self.currItem = self.currList[index] if 2 == refresh: # refresh for more items printDBG("CBaseSubProviderClass endHandleService index[%s]" % index) # remove item more and store items before and after item more self.beforeMoreItemList = self.currList[0:index] self.afterMoreItemList = self.currList[index+1:] self.moreMode = True if -1 == index: self.currItem = { "name": None } else: self.currItem = self.currList[index] def endHandleService(self, index, refresh): if 2 == refresh: # refresh for more items currList = self.currList self.currList = self.beforeMoreItemList for item in currList: if 'more' == item['type'] or (item not in self.beforeMoreItemList and item not in self.afterMoreItemList): self.currList.append(item) self.currList.extend(self.afterMoreItemList) self.beforeMoreItemList = [] self.afterMoreItemList = [] self.moreMode = False def imdbGetSeasons(self, imdbid, promSeason=None): printDBG('CBaseSubProviderClass.imdbGetSeasons imdbid[%s]' % imdbid) promotItem = None list = [] # get all seasons sts, data = self.cm.getPage("http://www.imdb.com/title/tt%s/episodes" % imdbid) if not sts: return False, [] data = self.cm.ph.getDataBeetwenMarkers(data, '<select id="bySeason"', '</select>', False)[1] seasons = re.compile('value="([0-9]+?)"').findall(data) for season in seasons: if None != promSeason and season == str(promSeason): promotItem = season else: list.append(season) if promotItem != None: list.insert(0, promotItem) return True, list def imdbGetEpisodesForSeason(self, imdbid, season, promEpisode=None): printDBG('CBaseSubProviderClass.imdbGetEpisodesForSeason imdbid[%s] season[%s]' % (imdbid, season)) promotItem = None list = [] # get episodes for season sts, data = self.cm.getPage("http://www.imdb.com/title/tt%s/episodes/_ajax?season=%s" % (imdbid, season)) if not sts: return False, [] data = self.cm.ph.getDataBeetwenMarkers(data, '<div class="list detail eplist">', '<hr>', False)[1] data = data.split('<div class="clear">') if len(data): del data[-1] for item in data: episodeTitle = self.cm.ph.getSearchGroups(item, 'title="([^"]+?)"')[0] eimdbid = self.cm.ph.getSearchGroups(item, 'data-const="tt([0-9]+?)"')[0] episode = self.cm.ph.getSearchGroups(item, 'content="([0-9]+?)"')[0] params = {"episode_title":episodeTitle, "episode":episode, "eimdbid":eimdbid} if None != promEpisode and episode == str(promEpisode): promotItem = params else: list.append(params) if promotItem != None: list.insert(0, promotItem) return True, list def imdbGetMoviesByTitle(self, title): printDBG('CBaseSubProviderClass.imdbGetMoviesByTitle title[%s]' % (title)) sts, data = self.cm.getPage("http://www.imdb.com/find?ref_=nv_sr_fn&q=%s&s=tt" % urllib.quote_plus(title)) if not sts: return False, [] list = [] data = self.cm.ph.getDataBeetwenMarkers(data, '<table class="findList">', '</table>', False)[1] data = data.split('</tr>') if len(data): del data[-1] for item in data: item = item.split('<a ') item = '<a ' + item[2] if '(Video Game)' in item: continue imdbid = self.cm.ph.getSearchGroups(item, '/tt([0-9]+?)/')[0] baseTtitle = ' '.join( self.cm.ph.getAllItemsBeetwenMarkers(item, '<a ', '</a>') ) #title = title.split('<br/>')[0] title = self.cleanHtmlStr(item) year = self.cm.ph.getSearchGroups(item, '\((20[0-9]{2})\)')[0] if '' == year: year = self.cm.ph.getSearchGroups(item, '\((20[0-9]{2})\)')[0] if title.endswith('-'): title = title[:-1].strip() list.append({'title':title, 'base_title':self.cleanHtmlStr(baseTtitle), 'year':year, 'imdbid':imdbid}) return True, list def imdbGetOrginalByTitle(self, imdbid): printDBG('CBaseSubProviderClass.imdbGetOrginalByTitle imdbid[%s]' % (imdbid)) if not imdbid.startswith('tt'): imdbid = 'tt' + imdbid sts, data = self.cm.getPage('http://www.imdb.com/title/' + imdbid) if not sts: return False, {} title = self.cm.ph.getSearchGroups(data, '''<meta property='og:title' content="([^\(^"]+?)["\(]''')[0].strip() return True, {'title':title} def getTypeFromThemoviedb(self, imdbid, title): if '(TV Series)' in title: return 'series' itemType = 'movie' try: # lazy import import base64 try: import json except Exception: import simplejson as json from Plugins.Extensions.IPTVPlayer.tools.iptvtools import byteify url = "https://api.themoviedb.org/3/find/tt{0}?api_key={1}&external_source=imdb_id".format(imdbid, base64.b64decode('NjMxMWY4MmQ1MjAxNDI2NWQ3NjVkMzk4MDJhYWZhYTc=')) sts, data = self.cm.getPage(url) if not sts: return itemType data = byteify(json.loads(data)) if len(data["tv_results"]): itemType = 'series' except Exception: printExc() return itemType def downloadAndUnpack(self, url, params={}, post_data=None, unpackToSubDir=False): data, fileName = self.downloadFileData(url, params, post_data) if data == None: return None ext = fileName.rsplit('.', 1)[-1].lower() printDBG("fileName[%s] ext[%s]" % (fileName, ext)) if ext not in ['zip', 'rar']: SetIPTVPlayerLastHostError(_('Unknown file extension "%s".') % ext) return None tmpFile = GetTmpDir( self.TMP_FILE_NAME ) tmpArchFile = tmpFile + '.' + ext tmpDIR = '' if unpackToSubDir: dirName = fileName.rsplit('.', 1)[0].split('filename=', 1)[-1] if dirName != '': tmpDIR = GetSubtitlesDir(dirName) if tmpDIR == '': tmpDIR = GetTmpDir(self.TMP_DIR_NAME) printDBG(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") printDBG(fileName) printDBG(tmpFile) printDBG(tmpArchFile) printDBG(tmpDIR) printDBG(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") if not self.writeFile(tmpArchFile, data): return None if not self.unpackArchive(tmpArchFile, tmpDIR): rm(tmpArchFile) return None return tmpDIR def downloadFileData(self, url, params={}, post_data=None): printDBG('CBaseSubProviderClass.downloadFileData url[%s]' % url) urlParams = dict(params) urlParams['return_data'] = False try: fileSize = self.getMaxFileSize() sts, response = self.cm.getPage(url, urlParams, post_data) fileName = response.info().get('Content-Disposition', '') if fileName != '': tmpFileName = self.cm.ph.getSearchGroups(fileName.lower(), '''filename=['"]([^'^"]+?)['"]''')[0] if tmpFileName != '': printDBG("downloadFileData: replace fileName[%s] with [%s]" % (fileName, tmpFileName)) fileName = tmpFileName else: fileName = urllib.unquote(response.geturl().split('/')[-1]) data = response.read(fileSize) response.close() return data, fileName except Exception: printExc() return None, '' def writeFile(self, filePath, data): printDBG('CBaseSubProviderClass.writeFile path[%s]' % filePath) try: with open(filePath, 'w') as f: f.write(data) return True except Exception: printExc() SetIPTVPlayerLastHostError(_('Failed to write file "%s".') % filePath) return False def unpackZipArchive(self, tmpFile, tmpDIR): errorCode = 0 # check if archive is not evil cmd = "unzip -l '{0}' 2>&1 ".format(tmpFile) ret = self.iptv_execute(cmd) if not ret['sts'] or 0 != ret['code']: errorCode = ret['code'] if errorCode == 0: errorCode = 9 elif '..' in ret['data']: errorCode = 9 # if archive is valid then upack it if errorCode == 0: cmd = "unzip -o '{0}' -d '{1}' 2>/dev/null".format(tmpFile, tmpDIR) ret = self.iptv_execute(cmd) if not ret['sts'] or 0 != ret['code']: errorCode = ret['code'] if errorCode == 0: errorCode = 9 if errorCode != 0: message = _('Unzip error code[%s].') % errorCode if str(errorCode) == str(127): message += '\n' + _('It seems that unzip utility is not installed.') elif str(errorCode) == str(9): message += '\n' + _('Wrong format of zip archive.') SetIPTVPlayerLastHostError(message) return False return True def unpackArchive(self, tmpFile, tmpDIR): printDBG('CBaseSubProviderClass.unpackArchive tmpFile[%s], tmpDIR[%s]' % (tmpFile, tmpDIR)) rmtree(tmpDIR, ignore_errors=True) if not mkdirs(tmpDIR): SetIPTVPlayerLastHostError(_('Failed to create directory "%s".') % tmpDIR) return False if tmpFile.endswith('.zip'): return self.unpackZipArchive(tmpFile, tmpDIR) elif tmpFile.endswith('.rar'): cmd = "unrar e -o+ -y '{0}' '{1}' 2>/dev/null".format(tmpFile, tmpDIR) printDBG("cmd[%s]" % cmd) ret = self.iptv_execute(cmd) if not ret['sts'] or 0 != ret['code']: message = _('Unrar error code[%s].') % ret['code'] if str(ret['code']) == str(127): message += '\n' + _('It seems that unrar utility is not installed.') elif str(ret['code']) == str(9): message += '\n' + _('Wrong format of rar archive.') SetIPTVPlayerLastHostError(message) return False return True return False def listSupportedFilesFromPath(self, cItem, subExt=['srt'], archExt=['rar', 'zip'], dirCategory=None): printDBG('CBaseSubProviderClass.listSupportedFilesFromPath') maxItems = self.getMaxItemsInDir() numItems = 0 # list files for file in os_listdir(cItem['path']): numItems += 1 filePath = os_path.join(cItem['path'], file) params = dict(cItem) if os_path.isfile(filePath): ext = file.rsplit('.', 1)[-1].lower() params.update({'file_path':filePath, 'title':os_path.splitext(file)[0]}) if ext in subExt: params['ext'] = ext self.addSubtitle(params) elif ext in archExt: self.addDir(params) elif dirCategory != None and os_path.isdir(filePath): params.update({'category':dirCategory, 'path':filePath, 'title':file}) self.addDir(params) if numItems >= maxItems: break self.currList.sort(key=lambda k: k['title']) def converFileToUtf8(self, inFile, outFile, lang=''): printDBG('CBaseSubProviderClass.converFileToUtf8 inFile[%s] outFile[%s]' % (inFile, outFile)) # detect encoding encoding = '' cmd = '%s "%s"' % (GetUchardetPath(), inFile) ret = self.iptv_execute(cmd) if ret['sts'] and 0 == ret['code']: encoding = MapUcharEncoding(ret['data']) if 0 != ret['code'] or 'unknown' in encoding: encoding = '' else: encoding = encoding.strip() if lang == '': lang = GetDefaultLang() if lang == 'pl' and encoding == 'iso-8859-2': encoding = GetPolishSubEncoding(tmpFile) elif '' == encoding: encoding = 'utf-8' # convert file to UTF-8 try: with open(inFile) as f: data = f.read() try: data = data.decode(encoding).encode('UTF-8') if self.writeFile(outFile, data): return True except Exception: printExc() SetIPTVPlayerLastHostError(_('Failed to convert the file "%s" to UTF-8.') % inFile) except Exception: printExc() SetIPTVPlayerLastHostError(_('Failed to open the file "%s".') % inFile) return False
class UnCaptchaReCaptcha: def __init__(self, lang='en'): self.COOKIE_FILE = GetCookieDir('google.cookie') self.HTTP_HEADER = { 'Accept': 'text/html', 'Accept-Charset': 'UTF-8', 'Accept-Encoding': 'gzip', 'Accept-Language': lang, 'Referer': 'https://www.google.com/recaptcha/api2/demo', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36' } self.HttpParams = { 'header': self.HTTP_HEADER, 'cookiefile': self.COOKIE_FILE, 'use_cookie': True, 'load_cookie': True, 'save_cookie': True } self.cm = common() self.sessionEx = MainSessionWrapper() def processCaptcha(self, key, referer=None, lang='en'): post_data = None token = '' iteration = 0 if referer != None: self.HttpParams['header']['Referer'] = referer #reCaptchaUrl = 'http://www.google.com/recaptcha/api/fallback?k=%s' % (key) #new method as in plugin kodiondemand reCaptchaApiUrl = "https://www.google.com/recaptcha/api.js?hl=%s" % lang sts, apiCode = self.cm.getPage(reCaptchaApiUrl, self.HttpParams) if not sts: SetIPTVPlayerLastHostError( _('Fail to get "%s".') % reCaptchaApiUrl) return '' apiVersionUrl = re.findall("po.src\s*=\s*'(.*?)';", apiCode) if not apiVersionUrl: SetIPTVPlayerLastHostError( _('Fail to get "%s".') % reCaptchaApiUrl) return '' version = apiVersionUrl[0].split("/")[5] printDBG("reCaptcha version: %s" % version) reCaptchaUrl = "https://www.google.com/recaptcha/api/fallback?k=" + key + "&hl=" + lang + "&v=" + version + "&t=2&ff=true" printDBG("reCaptchaUrl: %s " % reCaptchaUrl) while iteration < 20: #,'cookiefile':self.COOKIE_FILE, 'use_cookie': True, 'load_cookie': True, 'save_cookie':True #sts, data = self.cm.getPage(reCaptchaUrl, {'header':self.HTTP_HEADER, 'raw_post_data':True}, post_data=post_data) if post_data: self.HttpParams['raw_post_data'] = True sts, data = self.cm.getPage(reCaptchaUrl, self.HttpParams, post_data) else: sts, data = self.cm.getPage(reCaptchaUrl, self.HttpParams) if not sts: SetIPTVPlayerLastHostError( _('Fail to get "%s".') % reCaptchaUrl) return '' printDBG("+++++++++++++++++++++++++++++++++++++++++") printDBG(data) printDBG("+++++++++++++++++++++++++++++++++++++++++") imgUrl = ph.search(data, '"(/recaptcha/api2/payload[^"]+?)"')[0] iteration += 1 message = ph.clean_html( ph.find(data, ('<div', '>', 'imageselect-desc'), '</div>', flags=0)[1]) if not message: message = ph.clean_html( ph.find(data, ('<label', '>', 'fbc-imageselect-message-text'), '</label>', flags=0)[1]) if not message: message = ph.clean_html( ph.find(data, ('<div', '>', 'imageselect-message'), '</div>', flags=0)[1]) if '' == message: token = ph.find(data, ('<div', '>', 'verification-token'), '</div>', flags=0)[1] token = ph.find(data, ('<textarea', '>'), '</textarea>', flags=0)[1].strip() if token == '': token = ph.search(data, '"this\.select\(\)">(.*?)</textarea>')[0] if token == '': token = ph.find(data, ('<textarea', '>'), '</textarea>', flags=0)[1].strip() if '' != token: printDBG('>>>>>>>> Captcha token[%s]' % (token)) else: printDBG('>>>>>>>> Captcha Failed\n\n%s\n\n' % data) break cval = ph.search(data, 'name="c"\s+value="([^"]+)')[0] imgUrl = 'https://www.google.com%s' % (imgUrl.replace( '&', '&')) message = ph.clean_html(message) accepLabel = ph.clean_html( ph.search(data, 'type="submit"\s+value="([^"]+)')[0]) filePath = GetTmpDir('.iptvplayer_captcha.jpg') printDBG(">>>>>>>> Captcha message[%s]" % (message)) printDBG(">>>>>>>> Captcha accep label[%s]" % (accepLabel)) printDBG(">>>>>>>> Captcha imgUrl[%s] filePath[%s]" % (imgUrl, filePath)) params = { 'maintype': 'image', 'subtypes': ['jpeg'], 'check_first_bytes': ['\xFF\xD8', '\xFF\xD9'] } ret = self.cm.saveWebFile(filePath, imgUrl, params) if not ret.get('sts'): SetIPTVPlayerLastHostError(_('Fail to get "%s".') % imgUrl) break retArg = self.sessionEx.waitForFinishOpen( UnCaptchaReCaptchaWidget, imgFilePath=filePath, message=message, title="reCAPTCHA v2", additionalParams={'accep_label': accepLabel}) printDBG('>>>>>>>> Captcha response[%s]' % (retArg)) if retArg is not None and len(retArg) and retArg[0]: answer = retArg[0] printDBG('>>>>>>>> Captcha answer[%s]' % (answer)) post_data = urllib.urlencode({ 'c': cval, 'response': answer }, doseq=True) printDBG(str(post_data)) else: break return token
class CBaseHostClass: def __init__(self, params={}): self.sessionEx = MainSessionWrapper() self.up = urlparser() proxyURL = params.get('proxyURL', '') useProxy = params.get('useProxy', False) if 'MozillaCookieJar' == params.get('cookie_type', ''): self.cm = common(proxyURL, useProxy, True) else: self.cm = common(proxyURL, useProxy) self.currList = [] self.currItem = {} if '' != params.get('history', ''): self.history = CSearchHistoryHelper( params['history'], params.get('history_store_type', False)) if '' != params.get('cookie', ''): self.COOKIE_FILE = GetCookieDir(params['cookie']) self.moreMode = False self.minPyVer = params.get('min_py_ver', 0) def checkPythonVersion(self, pyVer): try: from Screens.MessageBox import MessageBox import sys if sys.version_info < pyVer: hasSNI = False try: from ssl import wrap_socket from inspect import getargspec if 'server_hostname' in '%s' % [getargspec(wrap_socket)]: hasSNI = True except Exception: pass if not hasSNI: message = _( 'This service requires a new Enigma2 image with a Python version %s or later.' ) % ('.'.join(str(x) for x in pyVer)) message += '\n' + _( 'You can also install SNI patch for you python if available.' ) self.sessionEx.waitForFinishOpen(MessageBox, message, type=MessageBox.TYPE_INFO, timeout=10) except Exception: printExc() def informAboutGeoBlockingIfNeeded(self, country, onlyOnce=True): try: if onlyOnce and self.isGeoBlockingChecked: return except Exception: self.isGeoBlockingChecked = False sts, data = self.cm.getPage( 'https://dcinfos.abtasty.com/geolocAndWeather.php') if not sts: return try: data = byteify(json.loads(data.strip()[1:-1]), '', True) if data['country'] != country: message = _( '%s uses "geo-blocking" measures to prevent you from accessing the services from outside the %s Territory.' ) GetIPTVNotify().push(message % (self.getMainUrl(), country), 'info', 5) self.isGeoBlockingChecked = True except Exception: printExc() def listsTab(self, tab, cItem, type='dir'): defaultType = type for item in tab: params = dict(cItem) params.update(item) params['name'] = 'category' type = item.get('type', defaultType) if type == 'dir': self.addDir(params) elif type == 'marker': self.addMarker(params) else: self.addVideo(params) def getMainUrl(self): return self.MAIN_URL def setMainUrl(self, url): if self.cm.isValidUrl(url): self.MAIN_URL = self.cm.getBaseUrl(url) return True return False def getFullUrl(self, url, currUrl=None): if url.startswith('./'): url = url[1:] if currUrl == None or not self.cm.isValidUrl(currUrl): try: mainUrl = self.getMainUrl() except Exception: mainUrl = 'http://fake' else: mainUrl = self.cm.getBaseUrl(currUrl) if url.startswith('//'): proto = mainUrl.split('://', 1)[0] url = proto + ':' + url elif url.startswith('://'): proto = mainUrl.split('://', 1)[0] url = proto + url elif url.startswith('/'): url = mainUrl + url[1:] elif 0 < len(url) and '://' not in url: if currUrl == None or not self.cm.isValidUrl(currUrl): url = mainUrl + url else: url = urljoin(currUrl, url) return url def getFullIconUrl(self, url, currUrl=None): if currUrl != None: return self.getFullUrl(url, currUrl) else: return self.getFullUrl(url) def getDefaulIcon(self, cItem=None): try: return self.DEFAULT_ICON_URL except Exception: pass return '' @staticmethod def cleanHtmlStr(str): str = str.replace('<', ' <') str = str.replace(' ', ' ') str = str.replace(' ', ' ') str = clean_html(str) str = str.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') return CParsingHelper.removeDoubles(str, ' ').strip() @staticmethod def getStr(v, default=''): if type(v) == type(u''): return v.encode('utf-8') elif type(v) == type(''): return v return default def getCurrList(self): return self.currList def setCurrList(self, list): self.currList = list def getCurrItem(self): return self.currItem def setCurrItem(self, item): self.currItem = item def addDir(self, params): params['type'] = 'category' self.currList.append(params) return def addMore(self, params): params['type'] = 'more' self.currList.append(params) return def addVideo(self, params): params['type'] = 'video' self.currList.append(params) return def addAudio(self, params): params['type'] = 'audio' self.currList.append(params) return def addPicture(self, params): params['type'] = 'picture' self.currList.append(params) return def addData(self, params): params['type'] = 'data' self.currList.append(params) return def addArticle(self, params): params['type'] = 'article' self.currList.append(params) return def addMarker(self, params): params['type'] = 'marker' self.currList.append(params) return def listsHistory(self, baseItem={ 'name': 'history', 'category': 'Wyszukaj' }, desc_key='plot', desc_base=(_("Type: "))): list = self.history.getHistoryList() for histItem in list: plot = '' try: if type(histItem) == type({}): pattern = histItem.get('pattern', '') search_type = histItem.get('type', '') if '' != search_type: plot = desc_base + _(search_type) else: pattern = histItem search_type = None params = dict(baseItem) params.update({ 'title': pattern, 'search_type': search_type, desc_key: plot }) self.addDir(params) except Exception: printExc() def getFavouriteData(self, cItem): try: return json.dumps(cItem) except Exception: printExc() return '' def getLinksForFavourite(self, fav_data): try: if self.MAIN_URL == None: self.selectDomain() except Exception: printExc() links = [] try: cItem = byteify(json.loads(fav_data)) links = self.getLinksForItem(cItem) except Exception: printExc() return links def setInitListFromFavouriteItem(self, fav_data): try: if self.MAIN_URL == None: self.selectDomain() except Exception: printExc() try: params = byteify(json.loads(fav_data)) except Exception: params = {} printExc() return False self.currList.append(params) return True def getLinksForItem(self, cItem): return self.getLinksForVideo(cItem) def handleService(self, index, refresh=0, searchPattern='', searchType=''): if self.minPyVer > 0: self.checkPythonVersion(self.minPyVer) self.minPyVer = 0 # inform only once self.moreMode = False if 0 == refresh: if len(self.currList) <= index: return if -1 == index: self.currItem = {"name": None} else: self.currItem = self.currList[index] if 2 == refresh: # refresh for more items printDBG( ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> endHandleService index[%s]" % index) # remove item more and store items before and after item more self.beforeMoreItemList = self.currList[0:index] self.afterMoreItemList = self.currList[index + 1:] self.moreMode = True if -1 == index: self.currItem = {"name": None} else: self.currItem = self.currList[index] def endHandleService(self, index, refresh): if 2 == refresh: # refresh for more items currList = self.currList self.currList = self.beforeMoreItemList for item in currList: if 'more' == item['type'] or ( item not in self.beforeMoreItemList and item not in self.afterMoreItemList): self.currList.append(item) self.currList.extend(self.afterMoreItemList) self.beforeMoreItemList = [] self.afterMoreItemList = [] self.moreMode = False
class diffanime: HOST = 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.18) Gecko/20110621 Mandriva Linux/1.9.2.18-0.1mdv2010.2 (2010.2) Firefox/3.6.18' HEADER = { 'User-Agent': HOST, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' } MAINURL = 'http://diff-anime.pl' SERVICE_MENU_TABLE = { 1: "Lista anime (alfabetycznie)", # 2: "Lista anime (wg. gatunku)", 2: "Ranking", 3: "Ulubione", 4: "Aktualności" } def __init__(self): self.up = urlparser.urlparser() self.cm = pCommon.common() self.sessionEx = MainSessionWrapper() self.ytp = YouTubeParser() self.ytformats = config.plugins.iptvplayer.ytformat.value # Temporary data self.currList = [] self.currItem = {} # Login data self.COOKIEFILE = GetCookieDir('Diff-anime.cookie') self.usePremiumAccount = config.plugins.iptvplayer.diffanime_premium.value self.username = config.plugins.iptvplayer.diffanime_login.value self.password = config.plugins.iptvplayer.diffanime_password.value def getCurrList(self): return self.currList def setCurrList(self, list): self.currList = list def getCurrItem(self): return self.currItem def setCurrItem(self, item): self.currItem = item # Login in to the site def requestLoginData(self): if False == self.usePremiumAccount: printDBG("diffanime niezalogowany") else: self.usePremiumAccount = False url = self.MAINURL query_data = { 'url': url, 'header': self.HEADER, 'use_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIEFILE, 'return_data': True } postdata = { 'user_name': self.username, 'user_pass': self.password, 'remember_me': 'y', "login": "******" } try: data = self.cm.getURLRequestData(query_data, postdata) except: printDBG("diffanime requestLoginData exception") return if 'Wyloguj' in data: printDBG("diffanime Notification(" + self.username + ", Zostales poprawnie zalogowany)") self.usePremiumAccount = True else: self.sessionEx.waitForFinishOpen( MessageBox, 'Błąd logowania. Sprawdź dane.\nlogin - ' + self.username + ' \nhasło - ' + self.password, type=MessageBox.TYPE_INFO, timeout=10) printDBG("diffanime Notification(Blad logowania)") # end login def addDir(self, params): params['type'] = 'category' self.currList.append(params) return def addVideo(self, params): params['type'] = 'video' self.currList.append(params) return def setTable(self): return self.SERVICE_MENU_TABLE # Get YT link def getYTVideoUrl(self, url): printDBG("getYTVideoUrl url[%s]" % url) tmpTab = self.ytp.getDirectLinks(url, self.ytformats) movieUrls = [] for item in tmpTab: movieUrls.append({ 'name': item['format'] + '\t' + item['ext'], 'url': item['url'] }) return movieUrls def getVideoUrlforYTube(self, url): printDBG("getVideoUrl url[%s]" % url) query_data = { 'url': url, 'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True } try: data = self.cm.getURLRequestData(query_data) except: printDBG('getVideoUrl exception') return '' match = re.search('src="//www.youtube.com/(.+?)"', data) if match: printDBG('www.youtube.com/' + match.group(1)) return self.getYTVideoUrl('www.youtube.com/' + match.group(1)) else: printDBG('nie znaleziono YT link') return '' # end Get YT link # Get mp4 link def getVideoUrl(self, url): printDBG("getVideoUrl url[%s]" % url) query_data = { 'url': url, 'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True } try: data = self.cm.getURLRequestData(query_data) except: printDBG('getVideoUrl exception') return '' match = re.search("'file': '(.+?)',", data) if match: return match.group(1) else: printDBG('nie znaleziono mp4 link') return '' # end Get mp4 link def listsMainMenu(self, table): query_data = {'url': self.MAINURL + '/newsy', 'return_data': True} try: data = self.cm.getURLRequestData(query_data) except: printDBG('listAbcItem exception') return match = re.compile( "div class='sRight'><div class='panel news'>(.+?)<div class='left'><div>Czytań:", re.DOTALL).findall(data) if len(match) > 0: match2 = re.search(".png' alt=([^<]+?)class='news-category'", match[0]) if match2: plot = match2.group(1) else: plot = '' match3 = re.search("class='news-category' />([^<]+?)</div>", match[0]) if match3: plot2 = match3.group(1) else: plot2 = '' icon = re.compile( "<div class='content'><img src='(.+?)' alt='").findall( match[0]) for num, val in table.items(): params = { 'name': 'main-menu', 'category': val, 'title': val, 'icon': self.MAINURL + icon[0], 'plot': self.cm.html_entity_decode(plot + plot2) } self.addDir(params) def listsABCMenu(self, table): for i in range(len(table)): params = { 'name': 'abc-menu', 'category': table[i], 'title': table[i], 'icon': '' } self.addDir(params) # "AKTUALNOŚCI" def getlistsNews(self, url): query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True } try: data = self.cm.getURLRequestData(query_data) except: printDBG('listsGenre EXCEPTION') r2 = re.compile("<div class='head'><h2><a href='/news/(.+?)'>(.+?)</a>" ).findall(data) if len(r2) > 0: for i in range(len(r2)): value = r2[i] title = self.cm.html_entity_decode(value[1]) data = self.MAINURL + '/news/' + value[0] data2 = self.cm.getURLRequestData({ 'url': data, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }) value = re.search( "<div class='content'><img src='(.+?)' alt='(.+?)' class='news-category' />(.+?).<br />", data2) if value: icon = self.MAINURL + value.group(1) plot = self.cm.html_entity_decode( value.group(2) + value.group(3)) else: icon = '' plot = '' params = { 'name': 'news', 'title': title, 'icon': icon, 'plot': plot, 'page': data } self.addVideo(params) # "ULUBIONE" def getlistsUlubione(self, url): query_data = { 'url': url + '/odcinki', 'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True } try: data = self.cm.getURLRequestData(query_data) except: printDBG('listsGenre EXCEPTION') r = re.compile( "<div id='pUAL' class='panel pDef'>(.+?)<div id='footer'>", re.DOTALL).findall(data) if len(r) > 0: x1 = re.compile("W trakcie<(.+?)>Ukończone<", re.DOTALL).findall(data) if len(x1) > 0: rx1 = re.compile("'sTitle'><a href='(.+?)'>(.+?)</a>").findall( x1[0]) if len(rx1) > 0: for i in range(len(rx1)): value = rx1[i] title = self.cm.html_entity_decode("W trakcie - " + value[1]) page = self.MAINURL + value[0] params = { 'name': 'ulubione', 'title': title, 'page': page, 'icon': '' } self.addDir(params) x2 = re.compile("Ukończone<(.+?)>Wstrzymane<", re.DOTALL).findall(data) if len(x2) > 0: rx2 = re.compile("'sTitle'><a href='(.+?)'>(.+?)</a>").findall( x2[0]) if len(rx2) > 0: for i in range(len(rx2)): value = rx2[i] title = self.cm.html_entity_decode("Ukończone - " + value[1]) page = self.MAINURL + value[0] params = { 'name': 'ulubione', 'title': title, 'page': page, 'icon': '' } self.addDir(params) x3 = re.compile("Wstrzymane<(.+?)>Porzucone<", re.DOTALL).findall(data) if len(x3) > 0: rx3 = re.compile("'sTitle'><a href='(.+?)'>(.+?)</a>").findall( x3[0]) if len(rx3) > 0: for i in range(len(rx3)): value = rx3[i] title = self.cm.html_entity_decode("Wstrzymane - " + value[1]) page = self.MAINURL + value[0] params = { 'name': 'ulubione', 'title': title, 'page': page, 'icon': '' } self.addDir(params) x4 = re.compile("Porzucone<(.+?)>W planach<", re.DOTALL).findall(data) if len(x4) > 0: rx4 = re.compile("'sTitle'><a href='(.+?)'>(.+?)</a>").findall( x4[0]) if len(rx4) > 0: for i in range(len(rx4)): value = rx4[i] title = self.cm.html_entity_decode("Porzucone - " + value[1]) page = self.MAINURL + value[0] params = { 'name': 'ulubione', 'title': title, 'page': page, 'icon': '' } self.addDir(params) x5 = re.compile("W planach<(.+?)='footer'>", re.DOTALL).findall(data) if len(x5) > 0: rx5 = re.compile("'sTitle'><a href='(.+?)'>(.+?)</a>").findall( x5[0]) if len(rx5) > 0: for i in range(len(rx5)): value = rx5[i] title = self.cm.html_entity_decode("W planach - " + value[1]) page = self.MAINURL + value[0] params = { 'name': 'ulubione', 'title': title, 'page': page, 'icon': '' } self.addDir(params) # "RANKING" def getlistsRanks(self, url): query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True } try: data = self.cm.getURLRequestData(query_data) except: printDBG('listsGenre EXCEPTION') r = re.compile("<h2>Ranking anime</h2>(.+?)</table>", re.DOTALL).findall(data) if len(r) > 0: r2 = re.compile( "<td class='td2'><a href='/(.+?)'><img src='(.+?)' class='img' /></a><div class='con'><a href='(.+?)'>(.+?)</a><p>" ).findall(r[0]) if len(r2) > 0: for i in range(len(r2)): value = r2[i] title = self.cm.html_entity_decode(value[3]) page = self.MAINURL + value[2] icon = self.MAINURL + value[1] params = { 'name': 'ranks', 'title': title, 'page': page, 'icon': icon } self.addDir(params) # "KATEGORIE" def getlistsGenre(self, url): query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True } try: data = self.cm.getURLRequestData(query_data) except: printDBG('listsGenre EXCEPTION') r = re.compile( "</div><div id='pSettings' class='panel'>(.+?)</div></div>", re.DOTALL).findall(data) if len(r) > 0: r2 = re.compile("<a href='(.+?)'>(.+?)</a>").findall(r[0]) if len(r2) > 0: for i in range(len(r2)): value = r2[i] title = self.cm.html_entity_decode(value[1]) page = value[0] + '&rowstart=00' params = { 'name': 'genset', 'title': title, 'page': page, 'plot': title, 'icon': '' } self.addDir(params) # ANIME TITLES def getAnimeList(self, url): query_data = { 'url': self.MAINURL + url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True } try: data = self.cm.getURLRequestData(query_data) except: printDBG('getAnimeList EXCEPTION') nextPage = False if -1 != data.find("div class='pagenav") and -1 != data.find( "class='img"): nextPage = True else: nextPage = False r = re.compile( "</div><div id='pSeries' class='panel'>(.+?)<div id='footer'>", re.DOTALL).findall(data) if len(r) > 0: r2 = re.compile( "</a><div class='con'><a href='/(.+?)'>(.+?)</a><p>").findall( r[0]) if len(r2) > 0: for i in range(len(r2)): value = r2[i] title = self.cm.html_entity_decode(value[1]) page = value[0] data = self.MAINURL + "/" + value[0] data2 = self.cm.getURLRequestData({ 'url': data, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }) #Cover grafika = re.search( "</div><div class='content'><div class='con'><a href='(.+?)' class='fbox'>", data2) if grafika: icon = self.MAINURL + grafika.group(1) else: icon = '' #Description match = re.search( "<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)</div>", data2) if match: plot = match.group(1) else: match = re.search( "<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)<", data2) if match: plot = match.group(1) else: match = re.search( "<h2>Opis anime</h2></div><.+?>(.+?)<", data2) if match: plot = match.group(1) else: plot = '' params = { 'name': 'episodelist', 'title': title, 'page': page, 'icon': icon, 'plot': self.cm.html_entity_decode(plot) } self.addDir(params) if nextPage is True: nextpage = url[:-2] + str(int(url[-2:]) + 10) params = { 'name': 'nextpage', 'title': 'Next page', 'page': nextpage } self.addDir(params) # EPISODES LIST def getEpisodeList(self, url): query_data = { 'url': url + '/odcinki', 'use_host': True, 'host': self.HOST, 'use_cookie': True, 'save_cookie': False, 'load_cookie': True, 'cookiefile': self.COOKIEFILE, 'use_post': False, 'return_data': True } query_data1 = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True } try: data = self.cm.getURLRequestData(query_data) # episodes data data2 = self.cm.getURLRequestData(query_data1) # cover, desc. data except: printExc() # Description match = re.search( "<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)</div>", data2) if match: plot = match.group(1) else: match = re.search( "<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)<", data2) if match: plot = match.group(1) else: match = re.search("<h2>Opis anime</h2></div><.+?>(.+?)<", data2) if match: plot = match.group(1) else: plot = '' # Cover grafika = re.search( "</div><div class='content'><div class='con'><a href='(.+?)' class='fbox'>", data2) if grafika: icon = self.MAINURL + grafika.group(1) else: icon = '' # Episodes match = re.compile( "<span class='head2'>Statystyki:</span>(.+?)<div class='mainCon'>", re.DOTALL).findall(data) if len(match) > 0: match2 = re.compile( "#(.+?)</div><div class=.+?</div><div class='con3'><a href='(.+?)' class='i'>" ).findall(match[0]) if len(match2) > 0: for i in range(len(match2)): value = match2[i] page = self.MAINURL + value[1] title = 'Odcinek ' + value[0] params = { 'title': title, 'page': page, 'plot': self.cm.html_entity_decode(plot), 'icon': icon } self.addVideo(params) def handleService(self, index, refresh=0, searchPattern='', searchType=''): printDBG('handleService start') if 0 == refresh: if len(self.currList) <= index: printDBG( "handleService wrong index: %s, len(self.currList): %d" % (index, len(self.currList))) return if -1 == index: # use default value self.currItem = {"name": None} printDBG("handleService for first self.category") else: self.currItem = self.currList[index] name = self.currItem.get("name", '') title = self.currItem.get("title", '') category = self.currItem.get("category", '') page = self.currItem.get("page", '') icon = self.currItem.get("icon", '') printDBG("handleService: |||||||||||||||||||||||||||||||| [%s] " % name) self.currList = [] if str(page) == 'None' or page == '': page = '0' # Fill the Menu #MAIN MENU if name is None: #logowanie if self.usePremiumAccount: self.requestLoginData() self.listsMainMenu(self.SERVICE_MENU_TABLE) #LISTA ANIME (Alfabetycznie) elif category == self.setTable()[1]: self.listsABCMenu(self.cm.makeABCList()) elif name == 'abc-menu': url = '/lista-anime?letter=' + category + '&rowstart=00' self.getAnimeList(url) elif name == 'nextpage': self.getAnimeList(page) #LISTA ANIME (wg. Gatunku) # elif category == self.setTable()[2]: # url = self.MAINURL + '/lista-anime' # self.getlistsGenre(url) # elif name == 'genset': # self.getAnimeList(page) #LISTA ANIME (wg. Rankingu) elif category == self.setTable()[2]: url = self.MAINURL + '/ranking-anime' self.getlistsRanks(url) elif name == 'ranks': self.getEpisodeList(page) #ULUBIONE elif category == self.setTable()[3]: url = self.MAINURL + '/moja-lista/' + self.username self.getlistsUlubione(url) elif name == 'ulubione': self.getEpisodeList(page) #AKTUALNOŚCI elif category == self.setTable()[4]: url = self.MAINURL + '/newsy' self.getlistsNews(url) elif name == 'news': self.getlistsNews(page) #Episodes will not display without this: #Episodes list elif name == 'episodelist': url = self.MAINURL + '/' + page self.getEpisodeList(url)