def _get_episode_url(self, show_url, video):
        episode_pattern = 'href="([^"]+-s0*%se0*%s(?!\d)[^"]*)' % (video.season, video.episode)
        result = self._default_get_episode_url(show_url, video, episode_pattern)
        if result:
            return result

        url = urlparse.urljoin(self.base_url, show_url)
        html = self._http_get(url, cache_limit=2)
        fragment = dom_parser.parse_dom(html, "ul", {"class": "episode_list"})
        if fragment:
            ep_urls = dom_parser.parse_dom(fragment[0], "a", ret="href")
            ep_dates = dom_parser.parse_dom(fragment[0], "span", {"class": "episode_air_d"})
            ep_titles = dom_parser.parse_dom(fragment[0], "span", {"class": "episode_name"})
            force_title = scraper_utils.force_title(video)
            if not force_title and kodi.get_setting("airdate-fallback") == "true" and video.ep_airdate:
                for ep_url, ep_date in zip(ep_urls, ep_dates):
                    log_utils.log(
                        "Quikr Ep Airdate Matching: %s - %s - %s" % (ep_url, ep_date, video.ep_airdate),
                        log_utils.LOGDEBUG,
                    )
                    if video.ep_airdate == scraper_utils.to_datetime(ep_date, "%Y-%m-%d").date():
                        return scraper_utils.pathify_url(ep_url)

            if force_title or kodi.get_setting("title-fallback") == "true":
                norm_title = scraper_utils.normalize_title(video.ep_title)
                for ep_url, ep_title in zip(ep_urls, ep_titles):
                    ep_title = re.sub("<span>.*?</span>\s*", "", ep_title)
                    log_utils.log(
                        "Quikr Ep Title Matching: %s - %s - %s" % (ep_url, norm_title, video.ep_title),
                        log_utils.LOGDEBUG,
                    )
                    if norm_title == scraper_utils.normalize_title(ep_title):
                        return scraper_utils.pathify_url(ep_url)
示例#2
0
def getChannelGuideUrl(tvchannel):
    url = 'https://www.cinemagia.ro/program-tv/'
    
    try:
        req = urllib2.Request(url)
        req.add_header('User-Agent', common.HEADERS['User-Agent'])
        conn = urllib2.urlopen(req, timeout=5)
        html = conn.read()
        conn.close()
        
        urls = parseDOM(html, 'a', attrs={'class': 'station-link'}, ret='href')
        names = parseDOM(html, 'a', attrs={'class': 'station-link'})
        
        seq2 = tvchannel.lower().replace(' ', '').strip()
        
        seqm = SequenceMatcher()
        seqm.set_seq2(seq2)
        
        ratio_list = []
        
        for name in names:
            seq1 = name.lower().replace(' ', '').strip()
            seqm.set_seq1(seq1)
            ratio = seqm.ratio()
            ratio_list.append(ratio)
        
        ratio_max_index = max(xrange(len(ratio_list)), key=ratio_list.__getitem__)
        
        return urls[ratio_max_index]
    
    except:
        log_utils.log(traceback.print_exc())
    
    return None
    def download_subtitle(self, url):
        url = BASE_URL + url
        (response, srt) = self.__get_url(url)
        if "Content-Disposition" not in response.info():
            return

        cd = response.info()["Content-Disposition"]
        r = re.search('filename="(.*)"', cd)
        if r:
            filename = r.group(1)
        else:
            filename = "addic7ed_subtitle.srt"

        final_path = os.path.join(BASE_PATH, filename)
        final_path = xbmc.translatePath(final_path)
        if not xbmcvfs.exists(os.path.dirname(final_path)):
            try:
                try:
                    xbmcvfs.mkdirs(os.path.dirname(final_path))
                except:
                    os.mkdir(os.path.dirname(final_path))
            except:
                log_utils.log("Failed to create directory %s" % os.path.dirname(final_path), log_utils.LOGERROR)
                raise

        with open(final_path, "w") as f:
            f.write(srt)
        return final_path
    def __get_cached_url(self, url, cache=8):
        log_utils.log("Fetching Cached URL: %s" % url, log_utils.LOGDEBUG)
        before = time.time()

        _, html = db_connection.get_cached_url(url, cache)
        if html:
            log_utils.log("Returning cached result for: %s" % (url), log_utils.LOGDEBUG)
            return html

        log_utils.log("No cached url found for: %s" % url, log_utils.LOGDEBUG)
        req = urllib2.Request(url)

        host = BASE_URL.replace("http://", "")
        req.add_header("User-Agent", USER_AGENT)
        req.add_header("Host", host)
        req.add_header("Referer", BASE_URL)
        try:
            body = self.__http_get_with_retry(url, req)
            body = body.decode("utf-8")
            parser = HTMLParser.HTMLParser()
            body = parser.unescape(body)
        except Exception as e:
            kodi.notify(msg="Failed to connect to URL: %s" % (url), duration=5000)
            log_utils.log("Failed to connect to URL %s: (%s)" % (url, e), log_utils.LOGERROR)
            return ""

        db_connection.cache_url(url, body)
        after = time.time()
        log_utils.log("Cached Url Fetch took: %.2f secs" % (after - before), log_utils.LOGDEBUG)
        return body
 def __get_links_from_playlist(self, grab_url, headers):
     sources = {}
     grab_url = grab_url.replace('\\', '')
     grab_html = self._http_get(grab_url, headers=headers, cache_limit=.5)
     js_data = scraper_utils.parse_json(grab_html, grab_url)
     try: playlist = js_data['playlist'][0]['sources']
     except: playlist = []
     for item in playlist:
         stream_url = item.get('file')
         if stream_url:
             if stream_url.startswith('/'):
                 stream_url = urlparse.urljoin(self.base_url, stream_url)
                 redir_url = self._http_get(stream_url, headers=headers, allow_redirect=False, method='HEAD')
                 if redir_url.startswith('http'):
                     stream_url = redir_url
             
             if self._get_direct_hostname(stream_url) == 'gvideo':
                 quality = scraper_utils.gv_get_quality(stream_url)
             elif 'label' in item:
                 quality = scraper_utils.height_get_quality(item['label'])
             else:
                 quality = QUALITIES.HIGH
             
             log_utils.log('Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG)
             sources[stream_url] = {'quality': quality, 'direct': True}
             if not kodi.get_setting('scraper_url'): break
     return sources
示例#6
0
    def __get_cached_url(self, url, cache=8):
        log_utils.log('Fetching Cached URL: %s' % url, xbmc.LOGDEBUG)
        before = time.time()

        _, html = db_connection.get_cached_url(url, cache)
        if html:
            log_utils.log('Returning cached result for: %s' % (url), xbmc.LOGDEBUG)
            return html

        log_utils.log('No cached url found for: %s' % url, xbmc.LOGDEBUG)
        req = urllib2.Request(url)

        host = BASE_URL.replace('http://', '')
        req.add_header('User-Agent', USER_AGENT)
        req.add_header('Host', host)
        req.add_header('Referer', BASE_URL)
        try:
            body = self.__http_get_with_retry(url, req)
            body = body.decode('utf-8')
            parser = HTMLParser.HTMLParser()
            body = parser.unescape(body)
        except Exception as e:
            builtin = 'XBMC.Notification(PrimeWire, Failed to connect to URL: %s, 5000, %s)'
            xbmc.executebuiltin(builtin % (url, ICON_PATH))
            log_utils.log('Failed to connect to URL %s: (%s)' % (url, e), xbmc.LOGERROR)
            return ''

        db_connection.cache_url(url, body)
        after = time.time()
        log_utils.log('Cached Url Fetch took: %.2f secs' % (after - before), xbmc.LOGDEBUG)
        return body
 def _get_episode_url(self, show_url, video):
     query = urlparse.parse_qs(urlparse.urlparse(show_url).query)
     if 'id' in query:
         url = urlparse.urljoin(self.base_url, '/api/v2/shows/%s' % (query['id'][0]))
         js_data = self._http_get(url, cache_limit=.5)
         if 'episodes' in js_data:
             force_title = scraper_utils.force_title(video)
             if not force_title:
                 for episode in js_data['episodes']:
                     if int(video.season) == int(episode['season']) and int(video.episode) == int(episode['number']):
                         return scraper_utils.pathify_url('?id=%s' % (episode['id']))
                 
                 if kodi.get_setting('airdate-fallback') == 'true' and video.ep_airdate:
                     for episode in js_data['episodes']:
                         if 'airdate' in episode:
                             ep_airdate = scraper_utils.to_datetime(episode['airdate'], "%Y-%m-%d").date()
                             if video.ep_airdate == (ep_airdate - datetime.timedelta(days=1)):
                                 return scraper_utils.pathify_url('?id=%s' % (episode['id']))
             else:
                 log_utils.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), log_utils.LOGDEBUG)
             
             if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title:
                 norm_title = scraper_utils.normalize_title(video.ep_title)
                 for episode in js_data['episodes']:
                     if 'name' in episode and norm_title in scraper_utils.normalize_title(episode['name']):
                         return scraper_utils.pathify_url('?id=%s' % (episode['id']))
def parse_link(link, item, patterns):
    delim = '[._ -]'
    link = urllib.unquote(link)
    file_name = link.split('/')[-1]
    for pattern in patterns:
        pattern = pattern.format(delim=delim)
        match = re.search(pattern, file_name, re.I)
        if match:
            match = dict((k, v) for k, v in match.groupdict().iteritems() if v is not None)
            item.update(match)
            break
    else:
        log_utils.log('No Regex Match: |%s|%s|' % (item, link), log_utils.LOGDEBUG)

    extra = item['extra'].upper()
    if 'X265' in extra or 'HEVC' in extra:
        item['format'] = 'x265'
    
    item['dubbed'] = True if 'DUBBED' in extra else False
    
    if 'airdate' in item and item['airdate']:
        pattern = '{delim}+'.format(delim=delim)
        item['airdate'] = re.sub(pattern, '-', item['airdate'])
        item['airdate'] = utils2.to_datetime(item['airdate'], "%Y-%m-%d").date()
        
    return item
 def resolve_link(self, link):
     try:
         xbmcvfs.delete(M3U8_PATH)
         query = urlparse.parse_qs(link)
         query = dict([(key, query[key][0]) if query[key] else (key, '') for key in query])
         if 'vid_id' in query and 'stream_id' in query and 'height' in query:
             auth_url = PL_URL % (query['vid_id'], query['stream_id'])
             result = self.__get_playlist_with_token(auth_url)
             if not result:
                 if int(query['height']) > 720:
                     if self.auth_torba():
                         result = self.__get_playlist_with_token(auth_url)
                 else:
                     result = self.__authorize_ip(auth_url)
             
             if result:
                 key = '%sp' % (query['height'])
                 if key in result:
                     if 'audio' in result:
                         streams = {'audio_stream': result['audio'], 'stream_name': key, 'video_stream': result[key]}
                         f = xbmcvfs.File(M3U8_PATH, 'w')
                         for line in M3U8_TEMPLATE:
                             line = line.format(**streams)
                             f.write(line + '\n')
                         return M3U8_PATH
                     else:
                         return result[key]
     except Exception as e:
         log_utils.log('Failure during torba resolver: %s' % (e), log_utils.LOGWARNING)
示例#10
0
 def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
     self.timeout = timeout
     self.__scraper = None
     try:
         self.__scraper = self.real_scraper(timeout)
     except Exception as e:
         log_utils.log('Failure during %s scraper creation: %s' % (self.get_name(), e), log_utils.LOGDEBUG)
示例#11
0
文件: utils.py 项目: SNAPflix/salts
def do_startup_task(task):
    run_on_startup=ADDON.get_setting('auto-%s' % task)=='true' and ADDON.get_setting('%s-during-startup' % task) == 'true' 
    if run_on_startup and not xbmc.abortRequested:
        log_utils.log('Service: Running startup task [%s]' % (task))
        now = datetime.datetime.now()
        xbmc.executebuiltin('RunPlugin(plugin://%s/?mode=%s)' % (ADDON.get_id(), task))
        db_connection.set_setting('%s-last_run' % (task), now.strftime("%Y-%m-%d %H:%M:%S.%f"))
示例#12
0
def _getHtml():
    global LOGIN_COOKIE_FILE
    global __email
    global __password
    
    html = ''
    
    tries = 2
    while (tries > 0):
        try:
            cj = mechanize.LWPCookieJar()
            cj.load(LOGIN_COOKIE_FILE)
            opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
            req = mechanize.Request(common.URLS['login'])
            for k, v in common.HEADERS.items():
                req.add_header(k, v)
            conn = opener.open(req)
            cj.save(LOGIN_COOKIE_FILE)
            html = conn.read()
            conn.close()
        except:
            log_utils.log(traceback.print_exc())
        
        if html and __email in html:
            break
        else:
            doLogin(__email, __password)
        
        tries -= 1
        
        time.sleep(1)
    
    return html
示例#13
0
def finder1(html,url):
    global limit
    ref=url
    try:
        urls = re.findall('<i?frame.+?src=(?:\'|\")(.+?)(?:\'|\")',html)
        try:
            urls.append(re.findall("playStream\('iframe', '(.+?)'\)",html)[0])
        except: pass

        urls += re.findall('<a.+?href=[\'\"](/live-.+?stream.+?)[\'\"]',html)
        from random import shuffle
        shuffle(urls)
        for url in urls:
            if 'c4.zedo' in url:
                continue
            if "micast" in url or 'turbocast' in url:
                return finder47(html,ref)
            rr = resolve_it(url)
            if rr:
                return rr
            uri = manual_fix(url,ref)
            if limit>=25:
                log("Exiting - iframe visit limit reached")
                return
            resolved = find_link(uri) 
            if resolved:
                break
        headers = {'User-Agent': client.agent(), 'Referer': ref}
        if '.m3u8' in resolved and '|' not in resolved:
            headers.update({'X-Requested-With':'ShockwaveFlash/20.0.0.286', 'Host':urlparse.urlparse(resolved).netloc, 'Connection':'keep-alive'})
            resolved += '|%s' % urllib.urlencode(headers)
        return resolved
    except:
        return
示例#14
0
    def __execute(self, sql, params=None):
        if params is None:
            params = []

        rows = None
        sql = self.__format(sql)
        tries = 1
        while True:
            try:
                cur = self.db.cursor()
                # log_utils.log('Running: %s with %s' % (sql, params), log_utils.LOGDEBUG)
                cur.execute(sql, params)
                if sql[:6].upper() == 'SELECT' or sql[:4].upper() == 'SHOW':
                    rows = cur.fetchall()
                cur.close()
                self.db.commit()
                return rows
            except OperationalError as e:
                if tries < MAX_TRIES:
                    tries += 1
                    log_utils.log('Retrying (%s/%s) SQL: %s Error: %s' % (tries, MAX_TRIES, sql, e), log_utils.LOGWARNING)
                    self.db = None
                    self.__connect_to_db()
                elif any(s for s in ['no such table', 'no such column'] if s in str(e)):
                    self.db.rollback()
                    raise DatabaseRecoveryError(e)
                else:
                    raise
            except DatabaseError as e:
                self.db.rollback()
                raise DatabaseRecoveryError(e)
示例#15
0
def parallel_get_progress(q, trakt_id, cached):
    worker = threading.current_thread()
    log_utils.log("Worker: %s (%s) for %s progress" % (worker.name, worker, trakt_id), log_utils.LOGDEBUG)
    progress = trakt_api.get_show_progress(trakt_id, full=True, cached=cached)
    progress["trakt"] = trakt_id  # add in a hacked show_id to be used to match progress up to the show its for
    log_utils.log("Got progress for %s from %s" % (trakt_id, worker), log_utils.LOGDEBUG)
    q.put(progress)
示例#16
0
    def __get_cached_url(self, url, cache=8):
        log_utils.log('Fetching Cached URL: %s' % url, log_utils.LOGDEBUG)
        before = time.time()

        _created, _res_header, html = self.db_connection.get_cached_url(url, cache_limit=cache)
        if html:
            log_utils.log('Returning cached result for: %s' % (url), log_utils.LOGDEBUG)
            return html

        log_utils.log('No cached url found for: %s' % url, log_utils.LOGDEBUG)
        req = urllib2.Request(url)

        host = BASE_URL.replace('http://', '')
        req.add_header('User-Agent', USER_AGENT)
        req.add_header('Host', host)
        req.add_header('Referer', BASE_URL)
        try:
            body = self.__http_get_with_retry(url, req)
            body = body.decode('utf-8')
            parser = HTMLParser.HTMLParser()
            body = parser.unescape(body)
        except Exception as e:
            kodi.notify(msg='Failed to connect to URL: %s' % (url), duration=5000)
            log_utils.log('Failed to connect to URL %s: (%s)' % (url, e), log_utils.LOGERROR)
            return ''

        self.db_connection.cache_url(url, body)
        after = time.time()
        log_utils.log('Cached Url Fetch took: %.2f secs' % (after - before), log_utils.LOGDEBUG)
        return body
示例#17
0
    def export_from_db(self, full_path):
        temp_path = os.path.join(kodi.translate_path("special://profile"), 'temp_export_%s.csv' % (int(time.time())))
        with open(temp_path, 'w') as f:
            writer = csv.writer(f)
            f.write('***VERSION: %s***\n' % self.get_db_version())
            if self.__table_exists('rel_url'):
                f.write(CSV_MARKERS.REL_URL + '\n')
                for fav in self.get_all_rel_urls():
                    writer.writerow(self.__utf8_encode(fav))
            if self.__table_exists('other_lists'):
                f.write(CSV_MARKERS.OTHER_LISTS + '\n')
                for sub in self.get_all_other_lists():
                    writer.writerow(self.__utf8_encode(sub))
            if self.__table_exists('saved_searches'):
                f.write(CSV_MARKERS.SAVED_SEARCHES + '\n')
                for sub in self.get_all_searches():
                    writer.writerow(self.__utf8_encode(sub))
            if self.__table_exists('bookmark'):
                f.write(CSV_MARKERS.BOOKMARKS + '\n')
                for sub in self.get_bookmarks():
                    writer.writerow(self.__utf8_encode(sub))

        log_utils.log('Copying export file from: |%s| to |%s|' % (temp_path, full_path), log_utils.LOGDEBUG)
        if not xbmcvfs.copy(temp_path, full_path):
            raise Exception('Export: Copy from |%s| to |%s| failed' % (temp_path, full_path))

        if not xbmcvfs.delete(temp_path):
            raise Exception('Export: Delete of %s failed.' % (temp_path))
示例#18
0
    def search(self, video_type, title, year, season=''):
        search_url = urlparse.urljoin(self.base_url, '/index.php?search_keywords=')
        search_url += urllib.quote_plus(title)
        search_url += '&year=' + urllib.quote_plus(str(year))
        if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE]:
            search_url += '&search_section=2'
        else:
            search_url += '&search_section=1'

        results = []
        html = self. _http_get(self.base_url, cache_limit=0)
        match = re.search('input type="hidden" name="key" value="([0-9a-f]*)"', html)
        if match:
            key = match.group(1)
            search_url += '&key=' + key

            html = self._http_get(search_url, cache_limit=.25)
            pattern = r'class="index_item.+?href="(.+?)" title="Watch (.+?)"?\(?([0-9]{4})?\)?"?>'
            for match in re.finditer(pattern, html):
                url, title, year = match.groups('')
                result = {'url': scraper_utils.pathify_url(url), 'title': scraper_utils.cleanse_title(title), 'year': year}
                results.append(result)
        else:
            log_utils.log('Unable to locate PW search key', log_utils.LOGWARNING)
        return results
示例#19
0
    def download_subtitle(self, url):
        url = BASE_URL + url
        (response, srt) = self.__get_url(url)
        if not hasattr(response, 'info') or 'Content-Disposition' not in response.info():
            return

        cd = response.info()['Content-Disposition']
        r = re.search('filename="(.*)"', cd)
        if r:
            filename = r.group(1)
        else:
            filename = 'addic7ed_subtitle.srt'
        filename = re.sub('[^\x00-\x7F]', '', filename)

        final_path = os.path.join(kodi.get_setting('subtitle-folder'), filename)
        final_path = kodi.translate_path(final_path)
        if not xbmcvfs.exists(os.path.dirname(final_path)):
            try:
                try: xbmcvfs.mkdirs(os.path.dirname(final_path))
                except: os.mkdir(os.path.dirname(final_path))
            except:
                log_utils.log('Failed to create directory %s' % os.path.dirname(final_path), log_utils.LOGERROR)
                raise

        with open(final_path, 'w') as f:
            f.write(srt)
        return final_path
示例#20
0
    def __get_links(self, url, video):
        hosters = []
        seen_urls = set()
        for search_type in SEARCH_TYPES:
            search_url, params = self.__translate_search(url, search_type)
            if search_url:
                html = self._http_get(search_url, params=params, cache_limit=.5)
                js_result = scraper_utils.parse_json(html, search_url)
                if js_result.get('status') == 'success':
                    for result in js_result['result']:
                        if len(result['hosterurls']) > 1: continue
                        if result['extension'] == 'rar': continue
                        
                        stream_url = result['hosterurls'][0]['url']
                        if stream_url not in seen_urls:
                            if scraper_utils.release_check(video, result['title']):
                                host = urlparse.urlsplit(stream_url).hostname
                                quality = scraper_utils.get_quality(video, host, self._get_title_quality(result['title']))
                                hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': False}
                                hoster['extra'] = result['title']
                                hosters.append(hoster)
                                seen_urls.add(stream_url)
                else:
                    log_utils.log('Alluc API Error: |%s|%s|: %s' % (search_url, params, js_result.get('message', 'Unknown Error')), log_utils.LOGWARNING)

        return hosters
示例#21
0
    def __execute(self, sql, params=None):
        if params is None:
            params = []

        rows = None
        sql = self.__format(sql)
        tries = 1
        while True:
            try:
                cur = self.db.cursor()
                # log_utils.log('Running: %s with %s' % (sql, params), log_utils.LOGDEBUG)
                cur.execute(sql, params)
                if sql[:6].upper() == 'SELECT' or sql[:4].upper() == 'SHOW':
                    rows = cur.fetchall()
                cur.close()
                self.db.commit()
                return rows
            except OperationalError:
                if tries < MAX_TRIES:
                    tries += 1
                    log_utils.log('Retrying (%s/%s) SQL: %s' % (tries, MAX_TRIES, sql), log_utils.LOGWARNING)
                    self.db = None
                    self.__connect_to_db()
                else:
                    raise
    def get_url(self, video):
        url = None
        self.create_db_connection()
        result = self.db_connection.get_related_url(video.video_type, video.title, video.year, self.get_name(), video.season, video.episode)
        if result:
            url = result[0][0]
            log_utils.log('Got local related url: |%s|%s|%s|%s|%s|' % (video.video_type, video.title, video.year, self.get_name(), url), log_utils.LOGDEBUG)
        else:
            date_match = False
            search_title = '%s S%02dE%02d' % (video.title, int(video.season), int(video.episode))
            results = self.search(video.video_type, search_title, '')
            if not results and video.ep_airdate is not None:
                search_title = '%s %s' % (video.title, video.ep_airdate.strftime('%Y.%m.%d'))
                results = self.search(video.video_type, search_title, '')
                date_match = True

            best_q_index = -1
            for result in results:
                if date_match and video.ep_airdate.strftime('%Y.%m.%d') not in result['title']:
                    continue
                
                if Q_DICT[result['quality']] > best_q_index:
                    best_q_index = Q_DICT[result['quality']]
                    url = result['url']
            self.db_connection.set_related_url(video.video_type, video.title, video.year, self.get_name(), url, video.season, video.episode)
        return url
示例#23
0
    def processCaptcha(self, key, lang):
        headers = {'Referer': 'https://www.google.com/recaptcha/api2/demo', 'Accept-Language': lang}
        html = get_url('http://www.google.com/recaptcha/api/fallback?k=%s' % (key), headers=headers)
        token = ''
        iteration = 0
        while True:
            payload = re.findall('"(/recaptcha/api2/payload[^"]+)', html)
            iteration += 1
            message = re.findall('<label[^>]+class="fbc-imageselect-message-text"[^>]*>(.*?)</label>', html)
            if not message:
                message = re.findall('<div[^>]+class="fbc-imageselect-message-error">(.*?)</div>', html)
            if not message:
                token = re.findall('"this\.select\(\)">(.*?)</textarea>', html)[0]
                if token:
                    log_utils.log('Captcha Success: %s' % (token), log_utils.LOGDEBUG)
                else:
                    log_utils.log('Captcha Failed', log_utils.LOGDEBUG)
                break
            else:
                message = message[0]
                payload = payload[0]

            cval = re.findall('name="c"\s+value="([^"]+)', html)[0]
            captcha_imgurl = 'https://www.google.com%s' % (payload.replace('&amp;', '&'))
            message = re.sub('</?strong>', '', message)
            oSolver = cInputWindow(captcha=captcha_imgurl, msg=message, iteration=iteration)
            captcha_response = oSolver.get()
            if not captcha_response:
                break

            data = {'c': cval, 'response': captcha_response}
            html = get_url("http://www.google.com/recaptcha/api/fallback?k=%s" % (key), data=data, headers=headers)
        return token
示例#24
0
def update_all_scrapers():
        try: last_check = int(kodi.get_setting('last_list_check'))
        except: last_check = 0
        now = int(time.time())
        list_url = kodi.get_setting('scraper_url')
        scraper_password = kodi.get_setting('scraper_password')
        list_path = os.path.join(kodi.translate_path(kodi.get_profile()), 'scraper_list.txt')
        exists = os.path.exists(list_path)
        if list_url and scraper_password and (not exists or last_check < (now - (24 * 60 * 60))):
            scraper_list = utils2.get_and_decrypt(list_url, scraper_password)
            if scraper_list:
                try:
                    with open(list_path, 'w') as f:
                        f.write(scraper_list)
    
                    kodi.set_setting('last_list_check', str(now))
                    kodi.set_setting('scraper_last_update', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(now)))
                    for line in scraper_list.split('\n'):
                        line = line.replace(' ', '')
                        if line:
                            scraper_url, filename = line.split(',')
                            if scraper_url.startswith('http'):
                                update_scraper(filename, scraper_url)
                except Exception as e:
                    log_utils.log('Exception during scraper update: %s' % (e), log_utils.LOGWARNING)
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if source_url:
            url = urlparse.urljoin(self.base_url, source_url)
            html = self._http_get(url, cache_limit=.5)
            js_result = scraper_utils.parse_json(html, url)
            if 'error' in js_result:
                log_utils.log('DD.tv API error: "%s" @ %s' % (js_result['error'], url), log_utils.LOGWARNING)
                return hosters

            for result in js_result:
                if not scraper_utils.release_check(video, result['release'], require_title=False): continue
                if result['quality'] in self.q_order:
                    for key in result['links']:
                        url = result['links'][key][0]
                        if re.search('\.rar(\.|$)', url):
                            continue
                        
                        hostname = urlparse.urlparse(url).hostname
                        hoster = {'multi-part': False, 'class': self, 'views': None, 'url': url, 'rating': None, 'host': hostname, 'quality': QUALITY_MAP[result['quality']], 'direct': False}
                        hoster['format'] = result['quality']
                        if 'x265' in result['release'] and result['quality'] != '1080P-X265': hoster['dd_qual'] += '-x265'
                        hosters.append(hoster)

        return hosters
示例#26
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            params = urlparse.parse_qs(source_url)
            if video.video_type == VIDEO_TYPES.MOVIE:
                cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"movieid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libMovies"}'
                result_key = 'moviedetails'
            else:
                cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"episodeid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libTvShows"}'
                result_key = 'episodedetails'

            run = cmd % (params['id'][0])
            meta = xbmc.executeJSONRPC(run)
            meta = scraper_utils.parse_json(meta)
            log_utils.log('Source Meta: %s' % (meta), log_utils.LOGDEBUG)
            if 'result' in meta and result_key in meta['result']:
                details = meta['result'][result_key]
                def_quality = [item[0] for item in sorted(SORT_KEYS['quality'].items(), key=lambda x:x[1])][self.def_quality]
                host = {'multi-part': False, 'class': self, 'url': details['file'], 'host': 'XBMC Library', 'quality': def_quality, 'views': details['playcount'], 'rating': None, 'direct': True}
                stream_details = details['streamdetails']
                if len(stream_details['video']) > 0 and 'width' in stream_details['video'][0]:
                    host['quality'] = scraper_utils.width_get_quality(stream_details['video'][0]['width'])
                hosters.append(host)
        return hosters
示例#27
0
def update_settings():
    full_path = os.path.join(kodi.get_path(), 'resources', 'settings.xml')
    
    try:
        # open for append; skip update if it fails
        with open(full_path, 'a') as f:
            pass
    except Exception as e:
        log_utils.log('Dynamic settings update skipped: %s' % (e), log_utils.LOGWARNING)
    else:
        with open(full_path, 'r') as f:
            xml = f.read()

        new_settings = []
        cat_count = 1
        old_xml = xml
        classes = scraper.Scraper.__class__.__subclasses__(scraper.Scraper)  # @UndefinedVariable
        classes += proxy.Proxy.__class__.__subclasses__(proxy.Proxy)  # @UndefinedVariable
        for cls in sorted(classes, key=lambda x: x.get_name().upper()):
            if not cls.get_name() or cls.has_proxy(): continue
            new_settings += cls.get_settings()
            if len(new_settings) > 90:
                xml = update_xml(xml, new_settings, cat_count)
                new_settings = []
                cat_count += 1
    
        if new_settings:
            xml = update_xml(xml, new_settings, cat_count)
    
        if xml != old_xml:
            with open(full_path, 'w') as f:
                f.write(xml)
        else:
            log_utils.log('No Settings Update Needed', log_utils.LOGDEBUG)
示例#28
0
def reset_cache():
    try:
        shutil.rmtree(cache_path)
        return True
    except Exception as e:
        log_utils.log('Failed to Reset Cache: %s' % (e), log_utils.LOGWARNING)
        return False
示例#29
0
def doLogin(email, password):
    global LOGIN_COOKIE_FILE
    
    try:
        if os.path.isfile(LOGIN_COOKIE_FILE):
            os.remove(LOGIN_COOKIE_FILE)
        
        data = {
            'email': email,
            'password': password,
            'remember': '1',
            'Submit': 'Login',
            'action': 'process'
        }
        
        conn = http_req(common.URLS['login'], data, common.HEADERS, LOGIN_COOKIE_FILE)
        ret_code = conn.code
        conn.close()

        if ret_code == 302:
            return True

    except:
        log_utils.log(traceback.print_exc())

    return False
示例#30
0
def find_link(url, html=''):
    global limit
    limit+=1
    log('Finding in : %s'%url)
    try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
    except: referer = 'http://' + urlparse.urlparse(url).netloc
    host  = urlparse.urlparse(url).netloc
    headers = {'Referer':referer, 'Host':host, 'User-Agent' : client.agent(), 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' : 'en-US,en;q=0.5'}
    
    if html=='':
        url = manual_url_fix(url)
        html = client.request(url, headers=headers)
        html = manual_html_fix(url,html,headers)

    ref=url
    fs=list(globals().copy())
    for f in fs:
        if 'finder' in f:
            resolved = eval (f+"(html,ref)")
            if resolved:
                log('Resolved with %s: %s'%(f,resolved))
                return resolved
                break


    return
示例#31
0
    def fit_transform(self, table):
        todo_cols = [table.label]
        if not todo_cols:
            return

        df = table.train_X
        # log(f'before fit_trans df')
        # print(df.head())
        col2type = {}

        opt = n_lag
        opt = partial(opt, todo_cols=todo_cols, windows=self.windows)

        if table.is_multivar:
            #if not CONSTANT.USE_TIME_STAMP_MODE:
            if table.key_num < CONSTANT.KEY_NUM:
                log('use order')
                groups = df[[table.key_time_col] + [table.key_col] +
                            todo_cols].groupby(table.key_col)
                groups = [group[1] for group in groups]
                #res = Parallel(n_jobs=CONSTANT.JOBS, require='sharedmem')(delayed(opt)(group) for group in groups)
                res = []
                for group in groups:
                    tmp = opt(group)
                    res.append(tmp)
                if res:
                    tmp = pd.concat(res)
                    tmp = tmp.drop(todo_cols, axis=1)
                    for col in tmp.columns:
                        if col != table.key_col and col != table.key_time_col:
                            col2type[col] = CONSTANT.NUMERICAL_TYPE
                    print(f'before merge shape: {df.shape}')
                    df = pd.merge(df,
                                  tmp,
                                  how='left',
                                  on=[table.key_time_col, table.key_col])
                    print(f'after merge shape: {df.shape}')
                    del tmp
                    gc.collect()
            else:
                time_col = table.key_time_col
                cat_col = table.key_col

                new_cols = []
                for window in self.windows:
                    for col in todo_cols:
                        series_matrix = pd.pivot_table(
                            df,
                            index=[time_col],
                            values=[col],
                            columns=cat_col,
                        )  # aggfunc=[np.mean])
                        series_matrix = series_matrix.fillna(method="ffill")
                        series_matrix_shift1 = series_matrix.shift(1)

                        lag_mean = series_matrix_shift1.rolling(window).mean()
                        lag_std = series_matrix_shift1.rolling(window).std()
                        lag_max = series_matrix_shift1.rolling(window).max()
                        lag_min = series_matrix_shift1.rolling(window).min()

                        new_col = f'{col}_lag_{window}_mean'
                        col2type[new_col] = CONSTANT.NUMERICAL_TYPE
                        new_cols.append(new_col)
                        df = revert_pivot_feat_join(df, lag_mean, new_col)

                        new_col = f'{col}_lag_{window}_std'
                        col2type[new_col] = CONSTANT.NUMERICAL_TYPE
                        new_cols.append(new_col)
                        df = revert_pivot_feat_join(df, lag_std, new_col)

                        new_col = f'{col}_lag_{window}_max'
                        col2type[new_col] = CONSTANT.NUMERICAL_TYPE
                        new_cols.append(new_col)
                        df = revert_pivot_feat_join(df, lag_max, new_col)

                        new_col = f'{col}_lag_{window}_min'
                        col2type[new_col] = CONSTANT.NUMERICAL_TYPE
                        new_cols.append(new_col)
                        df = revert_pivot_feat_join(df, lag_min, new_col)

        else:
            tmp = opt(df[[table.key_time_col] + todo_cols])
            tmp.drop([table.key_time_col] + todo_cols, axis=1, inplace=True)
            for col in tmp.columns:
                col2type[col] = CONSTANT.NUMERICAL_TYPE
                df[col] = tmp[col]
            del tmp
            gc.collect()
        table.update_data(df, col2type, mode='train')
示例#32
0
def content(url, searched=False):

    try:
        c = client.request(url)
        r = dom_parser2.parse_dom(c, 'div',
                                  {'class': ['video-box', 'four-column']})
        r = [(dom_parser2.parse_dom(i, 'a', req='href'), \
              dom_parser2.parse_dom(i, 'div', {'class': 'video-box-title'}), \
              dom_parser2.parse_dom(i, 'div', {'class': 'video-duration'}), \
              dom_parser2.parse_dom(i, 'img', req='data-original')) for i in r]
        r = [(urlparse.urljoin(base_domain, i[0][0].attrs['href']),
              i[1][0].content, i[2][0].content, i[3][0].attrs['data-original'])
             for i in r if i]
        if (not r) and (not searched):
            log_utils.log(
                'Scraping Error in %s:: Content of request: %s' %
                (base_name.title(), str(c)), log_utils.LOGERROR)
            kodi.notify(msg='Scraping Error: Info Added To Log File',
                        duration=6000,
                        sound=True)
    except Exception as e:
        if (not searched):
            log_utils.log(
                'Fatal Error in %s:: Error: %s' % (base_name.title(), str(e)),
                log_utils.LOGERROR)
            kodi.notify(msg='Fatal Error', duration=4000, sound=True)
            quit()
        else:
            pass

    dirlst = []

    for i in r:
        try:
            name = '%s - [ %s ]' % (kodi.sortX(i[1].encode('utf-8')).title(),
                                    kodi.sortX(i[2].encode('utf-8')))
            if searched:
                description = 'Result provided by %s' % base_name.title()
            else:
                description = name
            content_url = i[0] + '|SPLIT|%s' % base_name
            fanarts = xbmc.translatePath(
                os.path.join('special://home/addons/script.wankbank.artwork',
                             'resources/art/%s/fanart.jpg' % filename))
            dirlst.append({
                'name': name,
                'url': content_url,
                'mode': player_mode,
                'icon': i[3],
                'fanart': fanarts,
                'description': description,
                'folder': False
            })
        except Exception as e:
            log_utils.log(
                'Error adding menu item %s in %s:: Error: %s' %
                (i[1].title(), base_name.title(), str(e)), log_utils.LOGERROR)

    if dirlst:
        buildDirectory(dirlst, stopend=True, isVideo=True, isDownloadable=True)
    else:
        if (not searched):
            kodi.notify(msg='No Content Found')
            quit()

    if searched: return str(len(r))

    if not searched:

        try:
            search_pattern = '''\<link\s*rel\=['"]next['"]\s*href\=['"]([^'"]+)'''
            parse = base_domain
            helper.scraper().get_next_page(content_mode, url, search_pattern,
                                           filename)
        except Exception as e:
            log_utils.log(
                'Error getting next page for %s :: Error: %s' %
                (base_name.title(), str(e)), log_utils.LOGERROR)
示例#33
0
def record_timeouts(fails):
    for key in fails:
        if fails[key] == True:
            log_utils.log('Recording Timeout of %s' % (key),
                          log_utils.LOGWARNING)
            increment_setting('%s_fail' % key)
示例#34
0
def download_media(url, path, file_name):
    try:
        progress = int(kodi.get_setting('down_progress'))
        import urllib2
        request = urllib2.Request(url)
        request.add_header('User-Agent', USER_AGENT)
        request.add_unredirected_header('Host', request.get_host())
        response = urllib2.urlopen(request)

        content_length = 0
        if 'Content-Length' in response.info():
            content_length = int(response.info()['Content-Length'])

        file_name = file_name.replace('.strm', get_extension(url, response))
        full_path = os.path.join(path, file_name)
        log_utils.log('Downloading: %s -> %s' % (url, full_path),
                      log_utils.LOGDEBUG)

        path = xbmc.makeLegalFilename(path)
        if not xbmcvfs.exists(path):
            try:
                try:
                    xbmcvfs.mkdirs(path)
                except:
                    os.mkdir(path)
            except Exception as e:
                raise Exception(i18n('failed_create_dir'))

        file_desc = xbmcvfs.File(full_path, 'w')
        total_len = 0
        if progress:
            if progress == PROGRESS.WINDOW:
                dialog = xbmcgui.DialogProgress()
            else:
                dialog = xbmcgui.DialogProgressBG()

            dialog.create('Stream All The Sources',
                          i18n('downloading') % (file_name))
            dialog.update(0)
        while True:
            data = response.read(CHUNK_SIZE)
            if not data:
                break

            if progress == PROGRESS.WINDOW and dialog.iscanceled():
                break

            total_len += len(data)
            if not file_desc.write(data):
                raise Exception('failed_write_file')

            percent_progress = (
                total_len) * 100 / content_length if content_length > 0 else 0
            log_utils.log(
                'Position : %s / %s = %s%%' %
                (total_len, content_length, percent_progress),
                log_utils.LOGDEBUG)
            if progress == PROGRESS.WINDOW:
                dialog.update(percent_progress)
            elif progress == PROGRESS.BACKGROUND:
                dialog.update(percent_progress, 'Stream All The Sources')
        else:
            kodi.notify(msg=i18n('download_complete') % (file_name),
                        duration=5000)
            log_utils.log('Download Complete: %s -> %s' % (url, full_path),
                          log_utils.LOGDEBUG)

        file_desc.close()
        if progress:
            dialog.close()

    except Exception as e:
        log_utils.log(
            'Error (%s) during download: %s -> %s' % (str(e), url, file_name),
            log_utils.LOGERROR)
        kodi.notify(msg=i18n('download_error') % (str(e), file_name),
                    duration=5000)
def content(url, searched=False):
    import xbmcgui
    dialog = xbmcgui.Dialog()
    try:
        c = client.request(url)
        soup = BeautifulSoup(c, 'html.parser')
        r = soup.find_all('content')
        if (not r) and (not searched):
            log_utils.log(
                'Scraping Error in %s:: Content of request: %s' %
                (base_name.title(), str(c)), log_utils.LOGERROR)
            kodi.notify(msg='Scraping Error: Info Added To Log File',
                        duration=6000,
                        sound=True)
    except Exception as e:
        if (not searched):
            log_utils.log(
                'Fatal Error in %s:: Error: %s' % (base_name.title(), str(e)),
                log_utils.LOGERROR)
            kodi.notify(msg='Fatal Error', duration=4000, sound=True)
            quit()
        else:
            pass

    dirlst = []

    for i in r:
        try:
            title = i.find('title').text
            url2 = i.find('media').text
            icon = i.find('icon').text
            fanarts = i.find('fanart').text
            description = i.find('desc').text
            dirlst.append({
                'name': title,
                'url': url2,
                'mode': player_mode,
                'icon': icon,
                'fanart': fanarts,
                'description': description,
                'folder': False
            })
        except Exception as e:
            log_utils.log(
                'Error adding menu item %s Error: %s' % (title, str(e)),
                log_utils.LOGERROR)

    if dirlst:
        buildDirectory(dirlst, stopend=True, isVideo=True, isDownloadable=True)
    else:
        if (not searched):
            kodi.notify(msg='No Content Found')
            quit()

    if searched: return str(len(r))

    if not searched:

        try:
            search_pattern = '''\<link\s*rel\=['"]next['"]\s*href\=['"]([^'"]+)'''
            parse = base_domain
            helper.scraper().get_next_page(content_mode, url, search_pattern,
                                           filename)
        except Exception as e:
            log_utils.log(
                'Error getting next page for %s :: Error: %s' %
                (base_name.title(), str(e)), log_utils.LOGERROR)
示例#36
0
def content(url, searched=False):

    try:
        c = client.request(url)
        r = dom_parser2.parse_dom(c, 'div', {'class': 'plugcontainer'})
        r = [(dom_parser2.parse_dom(i, 'a', req=['href','title']), \
              dom_parser2.parse_dom(i, 'img', req='src'), \
              dom_parser2.parse_dom(i, 'p', {'class': 'txt'})) \
              for i in r if i]
        r = [(i[0][0].attrs['href'], i[0][0].attrs['title'], \
              i[1][0].attrs['src'], i[2][1].content if i[2][1] else 'Unknown') \
              for i in r if 'link.php' not in i[0][0].attrs['href']]
        if (not r) and (not searched):
            log_utils.log(
                'Scraping Error in %s:: Content of request: %s' %
                (base_name.title(), str(c)), log_utils.LOGERROR)
            kodi.notify(msg='Scraping Error: Info Added To Log File',
                        duration=6000,
                        sound=True)
    except Exception as e:
        if (not searched):
            log_utils.log(
                'Fatal Error in %s:: Error: %s' % (base_name.title(), str(e)),
                log_utils.LOGERROR)
            kodi.notify(msg='Fatal Error', duration=4000, sound=True)
            quit()
        else:
            pass

    dirlst = []

    for i in r:
        try:
            name = kodi.sortX(i[1].encode('utf-8')).title()
            if searched:
                description = 'Result provided by %s' % base_name.title()
            else:
                description = i[3]
            content_url = i[0] + '|SPLIT|%s' % base_name
            fanarts = xbmc.translatePath(
                os.path.join('special://home/addons/script.wankbank.artwork',
                             'resources/art/%s/fanart.jpg' % filename))
            dirlst.append({
                'name': name,
                'url': content_url,
                'mode': player_mode,
                'icon': i[2],
                'fanart': fanarts,
                'description': description,
                'folder': False
            })
        except Exception as e:
            log_utils.log(
                'Error adding menu item %s in %s:: Error: %s' %
                (i[1].title(), base_name.title(), str(e)), log_utils.LOGERROR)

    if dirlst:
        buildDirectory(dirlst, stopend=True, isVideo=True, isDownloadable=True)
    else:
        if (not searched):
            kodi.notify(msg='No Content Found')
            quit()

    if searched: return str(len(r))

    if not searched:

        try:
            search_pattern = '''<a\s*href=['"]([^'"]+)['"]\s*class=['"]plugurl['"]\s*title=['"]next\s*page['"]>'''
            parse = base_domain
            helper.scraper().get_next_page(content_mode, url, search_pattern,
                                           filename, parse)
        except Exception as e:
            log_utils.log(
                'Error getting next page for %s :: Error: %s' %
                (base_name.title(), str(e)), log_utils.LOGERROR)
示例#37
0
    def __call_trakt(self,
                     url,
                     data=None,
                     params=None,
                     auth=True,
                     cache_limit=.25,
                     cached=True):
        if not cached: cache_limit = 0
        db_cache_limit = cache_limit if cache_limit > 8 else 8
        json_data = json.dumps(data) if data else None
        headers = {
            'Content-Type': 'application/json',
            'trakt-api-key': V2_API_KEY,
            'trakt-api-version': 2
        }
        url = '%s%s%s' % (self.protocol, BASE_URL, url)
        if params: url = url + '?' + urllib.urlencode(params)

        db_connection = DB_Connection()
        created, cached_result = db_connection.get_cached_url(
            url, db_cache_limit)
        if cached_result and (time.time() - created) < (60 * 60 * cache_limit):
            result = cached_result
            log_utils.log('Returning cached result for: %s' % (url),
                          log_utils.LOGDEBUG)
        else:
            auth_retry = False
            while True:
                try:
                    if auth:
                        headers.update(
                            {'Authorization': 'Bearer %s' % (self.token)})
                    log_utils.log(
                        'Trakt Call: %s, header: %s, data: %s' %
                        (url, headers, data), log_utils.LOGDEBUG)
                    request = urllib2.Request(url,
                                              data=json_data,
                                              headers=headers)
                    f = urllib2.urlopen(request, timeout=self.timeout)
                    result = ''
                    while True:
                        data = f.read()
                        if not data: break
                        result += data

                    db_connection.cache_url(url, result)
                    break
                except (ssl.SSLError, socket.timeout) as e:
                    if cached_result:
                        result = cached_result
                        log_utils.log(
                            'Temporary Trakt Error (%s). Using Cached Page Instead.'
                            % (str(e)), log_utils.LOGWARNING)
                    else:
                        raise TransientTraktError('Temporary Trakt Error: ' +
                                                  str(e))
                except urllib2.URLError as e:
                    if isinstance(e, urllib2.HTTPError):
                        if e.code in TEMP_ERRORS:
                            if cached_result:
                                result = cached_result
                                log_utils.log(
                                    'Temporary Trakt Error (%s). Using Cached Page Instead.'
                                    % (str(e)), log_utils.LOGWARNING)
                                break
                            else:
                                raise TransientTraktError(
                                    'Temporary Trakt Error: ' + str(e))
                        elif e.code == 401 or e.code == 405:
                            if auth_retry or url.endswith('/token'):
                                self.token = None
                                kodi.set_setting('trakt_oauth_token', '')
                                kodi.set_setting('trakt_refresh_token', '')
                                raise TraktError(
                                    'Trakt Call Authentication Failed (%s)' %
                                    (e.code))
                            else:
                                result = self.get_token()
                                self.token = result['access_token']
                                kodi.set_setting('trakt_oauth_token',
                                                 result['access_token'])
                                kodi.set_setting('trakt_refresh_token',
                                                 result['refresh_token'])
                                auth_retry = True
                        elif e.code == 404:
                            raise TraktNotFoundError()
                        else:
                            raise
                    elif isinstance(e.reason, socket.timeout) or isinstance(
                            e.reason, ssl.SSLError):
                        if cached_result:
                            result = cached_result
                            log_utils.log(
                                'Temporary Trakt Error (%s). Using Cached Page Instead'
                                % (str(e)), log_utils.LOGWARNING)
                            break
                        else:
                            raise TransientTraktError(
                                'Temporary Trakt Error: ' + str(e))
                    else:
                        raise TraktError('Trakt Error: ' + str(e))
                except:
                    raise

        response = json.loads(result)

        if 'status' in response and response['status'] == 'failure':
            if 'message' in response: raise TraktError(response['message'])
            if 'error' in response: raise TraktError(response['error'])
            else: raise TraktError()
        else:
            # log_utils.log('Trakt Response: %s' % (response), xbmc.LOGDEBUG)
            return response
示例#38
0
def content(url, searched=False):
    if not base_domain in url: url = base_domain + url
    try:
        c = client.request(url)
        r = re.findall(
            '<div class="preloadLine">(.*?)<span class="video_count">',
            c,
            flags=re.DOTALL)
        if (not r) and (not searched):
            log_utils.log(
                'Scraping Error in %s:: Content of request: %s' %
                (base_name.title(), str(c)), log_utils.LOGERROR)
            kodi.notify(msg='Scraping Error: Info Added To Log File',
                        duration=6000,
                        sound=True)
    except Exception as e:
        if (not searched):
            log_utils.log(
                'Fatal Error in %s:: Error: %s' % (base_name.title(), str(e)),
                log_utils.LOGERROR)
            kodi.notify(msg='Fatal Error', duration=4000, sound=True)
            quit()
        else:
            pass

    dirlst = []
    for i in r:
        try:
            name = re.findall('alt="(.*?)"', i, flags=re.DOTALL)[0]
            url2 = re.findall('<a.+?href="(.*?)"', i, flags=re.DOTALL)[0]
            if not base_domain in url2: url2 = base_domain + url2
            icon = re.findall('data-thumb_url="(.*?)"', i, flags=re.DOTALL)[0]
            #desc = re.findall('<span class="duration">(.*?)</span>',i, flags=re.DOTALL)[0].strip()
            fanarts = xbmc.translatePath(
                os.path.join('special://home/addons/script.xxxodus.artwork',
                             'resources/art/%s/fanart.jpg' % filename))
            dirlst.append({
                'name': name,
                'url': url2,
                'mode': player_mode,
                'icon': icon,
                'fanart': fanarts,
                'folder': False
            })
        except Exception as e:
            log_utils.log(
                'Error adding menu item %s in %s:: Error: %s' %
                (i[1].title(), base_name.title(), str(e)), log_utils.LOGERROR)

    if dirlst:
        buildDirectory(dirlst, stopend=True, isVideo=True, isDownloadable=True)
    else:
        if (not searched):
            kodi.notify(msg='No Content Found')
            quit()

    if searched: return str(len(r))

    if not searched:

        try:
            search_pattern = '''<link rel="next"\s+href=['"]([^'"]+)['"]'''
            parse = base_domain
            helper.scraper().get_next_page(content_mode, url, search_pattern,
                                           filename)
        except Exception as e:
            log_utils.log(
                'Error getting next page for %s :: Error: %s' %
                (base_name.title(), str(e)), log_utils.LOGERROR)
示例#39
0
    def fit_transform(self, table):
        cat_cols = table.id_cols
        todo_cols = table.init_num_cols + [table.label]
        key_time_col = table.key_time_col

        df = table.train_X

        col2type = {}

        # def window_encode(df, cat, time, todo_cols, windows):
        #     groups = df[[time] + [cat] + todo_cols].groupby(cat)
        #     groups = [group[1] for group in groups]
        #     res = []
        #     for group_df in groups:
        #         pre_lag = 1
        #         for window in windows:
        #             rolled = group_df[todo_cols].shift(pre_lag).rolling(window=window)
        #             group_df = group_df.join(rolled.mean().add_suffix(f'_{cat}_lag_{window}_mean'))
        #             group_df = group_df.join(rolled.max().add_suffix(f'_{cat}_lag_{window}_max'))
        #             group_df = group_df.join(rolled.min().add_suffix(f'_{cat}_lag_{window}_min'))
        #         res.append(group_df)
        #     tmp = pd.concat(res)
        #     #tmp.fillna(method='bfill', inplace=True)
        #     tmp.drop(todo_cols+[time, cat], axis=1, inplace=True)
        #     return tmp

        def window_encode(df, cat, time, todo_cols, windows):
            group_df = df[[time] + [cat] + todo_cols].groupby([cat,
                                                               time]).mean()
            res = []
            pre_lag = 1
            for window in windows:
                rolled = group_df[todo_cols].shift(pre_lag).rolling(
                    window=window)
                group_df = group_df.join(
                    rolled.mean().add_suffix(f'_{cat}_lag_{window}_mean'))
                group_df = group_df.join(
                    rolled.max().add_suffix(f'_{cat}_lag_{window}_max'))
                group_df = group_df.join(
                    rolled.min().add_suffix(f'_{cat}_lag_{window}_min'))
            res.append(group_df)
            tmp = pd.concat(res, axis=1)
            tmp.reset_index(drop=False, inplace=True)
            tmp.drop(todo_cols, axis=1, inplace=True)
            print(f'shape before merge{df.shape}')
            tmp = pd.merge(df[[time, cat]], tmp, how='left', on=[time, cat])
            print(f'shape after merge{tmp.shape}')
            #tmp.fillna(method='bfill', inplace=True)
            tmp.drop([time, cat], axis=1, inplace=True)
            return tmp

        opt = window_encode
        opt = partial(opt,
                      time=key_time_col,
                      todo_cols=todo_cols,
                      windows=self.windows)

        # for cat in cat_cols:
        #     tmp = opt(df, cat)

        res = Parallel(n_jobs=CONSTANT.JOBS,
                       require='sharedmem')(delayed(opt)(df, cat)
                                            for cat in cat_cols)
        if res:
            tmp = pd.concat(res, axis=1)
            for col in tmp.columns:
                col2type[col] = CONSTANT.NUMERICAL_TYPE
                df[col] = tmp[col]
            del tmp
            gc.collect()
            table.update_data(df, col2type, 'train')
            log(f'{self.__class__.__name__} produce {len(col2type)} features')
示例#40
0
def find_link(url, name, iconimage, downloadableLink=False):

    xbmc.executebuiltin("ActivateWindow(busydialog)")

    if '|SPLIT|' in url: url = url.split('|SPLIT|')[0]
    if 'site=' in url: url = url.split('site=')[0]
    if '|' in url: url = url.split('|User-Agent')[0]

    c = client.request(url, output='headers')

    checks = ['video', 'mpegurl']
    exts = ['.mp4', '.flv', '.m3u8']

    try:
        if any(f for f in checks if f in c['Content-Type']):
            downloadableLink = True
    except:
        if any(f for f in exts if f in url):
            downloadableLink = True
        else:
            xbmc.executebuiltin("Dialog.Close(busydialog)")
            kodi.notify(msg='Error downloading video.')
            quit()

    name = kodi.stripColor(name)
    if '] -' in name: name = name.split('] -')[1]
    if downloadableLink:
        dest = getDest()
        dest = os.path.join(dest, '%s.mp4' % urllib.quote_plus(name))
        download(url, name, iconimage, dest)
    else:
        u = None
        log_utils.log('Sending %s to XXX Resolver' % (url),
                      log_utils.LOGNOTICE)
        if urlresolver.HostedMediaFile(url, include_xxx=True).valid_url():
            log_utils.log(
                '%s is a valid SMU resolvable URL. Attempting to resolve.' %
                (url), log_utils.LOGNOTICE)
            try:
                u = urlresolver.HostedMediaFile(url,
                                                include_xxx=True).resolve()
            except Exception as e:
                log_utils.log(
                    'Error getting valid link from SMU :: %s :: %s' %
                    (url, str(e)), log_utils.LOGERROR)
                kodi.idle()
                kodi.notify(msg='Something went wrong!  | %s' % str(e),
                            duration=8000,
                            sound=True)
                quit()
            log_utils.log('Link returned by XXX Resolver :: %s' % (u),
                          log_utils.LOGNOTICE)
        else:
            log_utils.log(
                '%s is not a valid SMU resolvable link. Attempting to resolve by XXXODUS backup resolver.'
                % (url), log_utils.LOGNOTICE)
            try:
                u = adultresolver.resolve(url)
            except Exception as e:
                log_utils.log(
                    'Error getting valid link from SMU :: %s :: %s' %
                    (url, str(e)), log_utils.LOGERROR)
                kodi.idle()
                kodi.notify(msg='Something went wrong!  | %s' % str(e),
                            duration=8000,
                            sound=True)
                quit()
            log_utils.log('%s returned by XXX-O-DUS backup resolver.' % (u),
                          log_utils.LOGNOTICE)
        if (not isinstance(u, str)):
            try:
                u = multilinkselector(u)
            except:
                pass
        if u == 'quit':
            xbmc.executebuiltin("Dialog.Close(busydialog)")
            quit()
        if u:
            dest = getDest()
            dest = os.path.join(dest, '%s.tmp_mp4' % urllib.quote_plus(name))
            download(u, name, iconimage, dest)
        else:
            xbmc.executebuiltin("Dialog.Close(busydialog)")
            kodi.notify('No Downloadable Link Found.')
            quit()
示例#41
0
def content(url, searched=False):

    try:
        c = client.request(url)
        match = re.findall('<div class="6u">(.*?)</section>',
                           c,
                           flags=re.DOTALL)
    except Exception as e:
        if (not searched):
            log_utils.log(
                'Fatal Error in %s:: Error: %s' % (base_name.title(), str(e)),
                log_utils.LOGERROR)
            kodi.notify(msg='Fatal Error', duration=4000, sound=True)
            quit()
        else:
            pass
    dirlst = []
    for items in match:
        try:
            name = re.findall('alt="(.*?)"', items, flags=re.DOTALL)[0]
            name = name.title()
            url2 = re.findall('<a href="(.*?)"', items, flags=re.DOTALL)[0]
            icon = re.findall('''<div.*?onmouseleave=.*?\(['"](.*?)['"]''',
                              items,
                              flags=re.DOTALL)[0]
            length = re.findall(
                '<span class="icon fa-clock-o meta-data">(.*?)</span>',
                items,
                flags=re.DOTALL)[0]
            if not 'https:' in url2: url2 = 'https://hqporner.com' + url2
            if not 'https:' in icon: icon = 'https:' + icon
            #icon = ''
            desc = '[COLOR yellow]Video Length :: [/COLOR]' + length
            fanarts = xbmc.translatePath(
                os.path.join('special://home/addons/script.xxxodus.artwork',
                             'resources/art/%s/fanart.jpg' % filename))
            dirlst.append({
                'name': name,
                'url': url2,
                'mode': player_mode,
                'icon': icon,
                'fanart': fanarts,
                'description': desc,
                'folder': False
            })
        except Exception as e:
            log_utils.log(
                'Error adding menu item. %s:: Error: %s' %
                (base_name.title(), str(e)), log_utils.LOGERROR)
    if dirlst:
        buildDirectory(dirlst, stopend=True, isVideo=True, isDownloadable=True)
    else:
        if (not searched):
            kodi.notify(msg='No Content Found')
            quit()

    if searched: return str(len(r))

    if not searched:
        search_pattern = '''<a\s*href=['"]([^'"]+)['"]\s*class=['"]button\s*mobile-pagi pagi-btn['"]>Next<\/a>'''
        parse = base_domain
        helper.scraper().get_next_page(content_mode, url, search_pattern,
                                       filename, parse)
示例#42
0
def mainSearch(url):

    if '|SPLIT|' in url: url, site = url.split('|SPLIT|')
    term = url
    if term == "null": term = kodi.get_keyboard('Search %s' % kodi.get_name())

    if term:
        search_on_off = kodi.get_setting("search_setting")
        if search_on_off == "true":
            delTerm(term)
            addTerm(term)

        display_term = term
        term = urllib.quote_plus(term)
        term = term.lower()

        if site == 'all':
            sources = __all__
            search_sources = []
            for i in sources:
                try:
                    if eval(i + ".search_tag") == 1: search_sources.append(i)
                except:
                    pass

            if search_sources:
                i = 0
                source_num = 0
                failed_list = ''
                line1 = kodi.giveColor('Searching: ',
                                       'white') + kodi.giveColor(
                                           '%s', 'orangered')
                line2 = kodi.giveColor('Found: %s videos', 'white')
                line3 = kodi.giveColor(
                    'Source: %s of ' + str(len(search_sources)), 'white')

                kodi.dp.create(kodi.get_name(), '', line2, '')
                xbmc.executebuiltin('Dialog.Close(busydialog)')
                for u in sorted(search_sources):
                    if kodi.dp.iscanceled(): break
                    try:
                        i += 1
                        progress = 100 * int(i) / len(search_sources)
                        kodi.dp.update(progress, line1 % u.title(),
                                       line2 % str(source_num), line3 % str(i))
                        search_url = eval(u + ".search_base") % term
                        source_n = eval(u + ".content('%s',True)" % search_url)
                        try:
                            source_n = int(source_n)
                        except:
                            source_n = 0
                        if (not source_n):
                            if failed_list == '': failed_list += str(u).title()
                            else: failed_list += ', %s' % str(u).title()
                        else: source_num += int(source_n)
                    except Exception as e:
                        log_utils.log(
                            'Error searching %s :: Error: %s' %
                            (u.title(), str(e)), log_utils.LOGERROR)
                        pass
                kodi.dp.close()
                if failed_list != '':
                    kodi.notify(msg='%s failed to return results.' %
                                failed_list,
                                duration=4000,
                                sound=True)
                    log_utils.log(
                        'Scrapers failing to return search results are :: : %s'
                        % failed_list, log_utils.LOGERROR)
                else:
                    kodi.notify(msg='%s results found.' % str(source_num),
                                duration=4000,
                                sound=True)
                xbmcplugin.setContent(kodi.syshandle, 'movies')
                xbmcplugin.endOfDirectory(kodi.syshandle, cacheToDisc=True)
                utils.setView('search')
        else:
            search_url = eval(site + ".search_base") % term
            eval(site + ".content('%s')" % search_url)
    else:
        kodi.notify(msg='Blank searches are not allowed.')
        quit()
示例#43
0
def download_media(url, path, file_name, translations, progress=None):
    try:
        if progress is None:
            progress = int(kodi.get_setting('down_progress'))
            
        i18n = translations.i18n
        active = not progress == PROGRESS.OFF
        background = progress == PROGRESS.BACKGROUND
            
        with kodi.ProgressDialog(kodi.get_name(), i18n('downloading') % (file_name), background=background, active=active) as pd:
            try:
                headers = dict([item.split('=') for item in (url.split('|')[1]).split('&')])
                for key in headers: headers[key] = urllib.unquote(headers[key])
            except:
                headers = {}
            if 'User-Agent' not in headers: headers['User-Agent'] = BROWSER_UA
            request = urllib2.Request(url.split('|')[0], headers=headers)
            response = urllib2.urlopen(request)
            if 'Content-Length' in response.info():
                content_length = int(response.info()['Content-Length'])
            else:
                content_length = 0
    
            file_name += '.' + get_extension(url, response)
            full_path = os.path.join(path, file_name)
            log_utils.log('Downloading: %s -> %s' % (url, full_path), log_utils.LOGDEBUG)
    
            path = kodi.translate_path(xbmc.makeLegalFilename(path))
            try:
                try: xbmcvfs.mkdirs(path)
                except: os.makedirs(path)
            except Exception as e:
                log_utils.log('Path Create Failed: %s (%s)' % (e, path), log_utils.LOGDEBUG)
    
            if not path.endswith(os.sep): path += os.sep
            if not xbmcvfs.exists(path):
                raise Exception(i18n('failed_create_dir'))
            
            file_desc = xbmcvfs.File(full_path, 'w')
            total_len = 0
            cancel = False
            while True:
                data = response.read(CHUNK_SIZE)
                if not data:
                    break
    
                if pd.is_canceled():
                    cancel = True
                    break
    
                total_len += len(data)
                if not file_desc.write(data):
                    raise Exception(i18n('failed_write_file'))
    
                percent_progress = (total_len) * 100 / content_length if content_length > 0 else 0
                log_utils.log('Position : %s / %s = %s%%' % (total_len, content_length, percent_progress), log_utils.LOGDEBUG)
                pd.update(percent_progress)
            
            file_desc.close()

        if not cancel:
            kodi.notify(msg=i18n('download_complete') % (file_name), duration=5000)
            log_utils.log('Download Complete: %s -> %s' % (url, full_path), log_utils.LOGDEBUG)

    except Exception as e:
        log_utils.log('Error (%s) during download: %s -> %s' % (str(e), url, file_name), log_utils.LOGERROR)
        kodi.notify(msg=i18n('download_error') % (str(e), file_name), duration=5000)
def content(url, searched=False):

    try:
        c = client.request(url)
        r = dom_parser2.parse_dom(c, 'a')
        r = [i for i in r if '<em class="time_thumb">' in i.content]
        r = [(i.attrs['href'], \
              dom_parser2.parse_dom(i.content, 'img', req=['src','alt']), \
              dom_parser2.parse_dom(i.content, 'em', {'class': 'time_thumb'})) \
            for i in r]
        r = [(urljoin(base_domain, i[0]), i[1][0].attrs['alt'],
              re.sub('<.+?>', '', i[2][0].content), i[1][0].attrs['src'])
             for i in r]
        if (not r) and (not searched):
            log_utils.log(
                'Scraping Error in %s:: Content of request: %s' %
                (base_name.title(), str(c)), log_utils.LOGERROR)
            kodi.notify(msg='Scraping Error: Info Added To Log File',
                        duration=6000,
                        sound=True)
    except Exception as e:
        if (not searched):
            log_utils.log(
                'Fatal Error in %s:: Error: %s' % (base_name.title(), str(e)),
                log_utils.LOGERROR)
            kodi.notify(msg='Fatal Error', duration=4000, sound=True)
            quit()
        else:
            pass

    dirlst = []

    for i in r:
        try:
            if PY2:
                name = '%s - [ %s ]' % (kodi.sortX(
                    i[1].encode('utf-8')).title(),
                                        kodi.sortX(i[2].encode('utf-8')))
            else:
                name = '%s - [ %s ]' % (kodi.sortX(
                    i[1]).title(), kodi.sortX(i[2]))
            if searched:
                description = 'Result provided by %s' % base_name.title()
            else:
                description = name
            content_url = i[0] + '|SPLIT|%s' % base_name
            fanarts = translatePath(
                os.path.join('special://home/addons/script.xxxodus.artwork',
                             'resources/art/%s/fanart.jpg' % filename))
            dirlst.append({
                'name': name,
                'url': content_url,
                'mode': player_mode,
                'icon': i[3],
                'fanart': fanarts,
                'description': description,
                'folder': False
            })
        except Exception as e:
            log_utils.log(
                'Error adding menu item %s in %s:: Error: %s' %
                (i[1].title(), base_name.title(), str(e)), log_utils.LOGERROR)

    if dirlst:
        buildDirectory(dirlst, stopend=True, isVideo=True, isDownloadable=True)
    else:
        if (not searched):
            kodi.notify(msg='No Content Found')
            quit()

    if searched: return str(len(r))

    if not searched:

        try:
            search_pattern = '''<li\s*class=['"]next['"]><a\s*href=['"]([^'"]+)'''
            parse = base_domain
            helper.scraper().get_next_page(content_mode, url, search_pattern,
                                           filename, parse)
        except Exception as e:
            log_utils.log(
                'Error getting next page for %s :: Error: %s' %
                (base_name.title(), str(e)), log_utils.LOGERROR)
示例#45
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30'):
    try:
        if not url:
            return

        handlers = []

        if not proxy == None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % (proxy)}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if (2, 7, 8) < sys.version_info < (2, 7, 12):
            try:
                import ssl
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                opener = urllib2.install_opener(opener)
            except:
                pass

        if url.startswith('//'): url = 'http:' + url

        _headers = {}
        try:
            _headers.update(headers)
        except:
            pass
        if 'User-Agent' in _headers:
            pass
        elif not mobile == True:
            #headers['User-Agent'] = agent()
            _headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            _headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in _headers:
            pass
        elif referer is not None:
            _headers['Referer'] = referer
        if not 'Accept-Language' in _headers:
            _headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in _headers:
            pass
        elif XHR == True:
            _headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in _headers:
            pass
        elif not cookie == None:
            _headers['Cookie'] = cookie
        if 'Accept-Encoding' in _headers:
            pass
        elif compression and limit is None:
            _headers['Accept-Encoding'] = 'gzip'

        if redirect == False:

            #old implementation
            #class NoRedirection(urllib2.HTTPErrorProcessor):
            #    def http_response(self, request, response): return response

            #opener = urllib2.build_opener(NoRedirection)
            #opener = urllib2.install_opener(opener)

            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, req, fp, code, msg, headers):
                    infourl = urllib.addinfourl(fp, headers,
                                                req.get_full_url())
                    infourl.status = code
                    infourl.code = code
                    return infourl

                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)

            try:
                del _headers['Referer']
            except:
                pass

        if isinstance(post, dict):
            post = utils.byteify(post)
            post = urllib.urlencode(post)

        url = utils.byteify(url)

        request = urllib2.Request(url, data=post)
        _add_request_header(request, _headers)

        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:

            if response.code == 503:
                cf_result = response.read(5242880)
                try:
                    encoding = response.info().getheader('Content-Encoding')
                except:
                    encoding = None
                if encoding == 'gzip':
                    cf_result = gzip.GzipFile(
                        fileobj=StringIO.StringIO(cf_result)).read()

                if 'cf-browser-verification' in cf_result:

                    netloc = '%s://%s' % (urlparse.urlparse(url).scheme,
                                          urlparse.urlparse(url).netloc)

                    if not netloc.endswith('/'): netloc += '/'

                    ua = _headers['User-Agent']

                    cf = cache.get(cfcookie().get, 168, netloc, ua, timeout)

                    _headers['Cookie'] = cf

                    request = urllib2.Request(url, data=post)
                    _add_request_header(request, _headers)

                    response = urllib2.urlopen(request, timeout=int(timeout))
                else:
                    log_utils.log(
                        'Request-Error (%s): %s' % (str(response.code), url),
                        log_utils.LOGDEBUG)
                    if error == False: return
            else:
                log_utils.log(
                    'Request-Error (%s): %s' % (str(response.code), url),
                    log_utils.LOGDEBUG)
                if error == False: return

        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass
            if close == True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close == True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close == True: response.close()
            return result

        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close == True: response.close()
            return result

        elif output == 'file_size':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = '0'
            response.close()
            return content

        if limit == '0':
            result = response.read(224 * 1024)
        elif not limit == None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)

        try:
            encoding = response.info().getheader('Content-Encoding')
        except:
            encoding = None
        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()

        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            _headers['Cookie'] = su

            request = urllib2.Request(url, data=post)
            _add_request_header(request, _headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0':
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

            try:
                encoding = response.info().getheader('Content-Encoding')
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(
                    fileobj=StringIO.StringIO(result)).read()

        if 'Blazingfast.io' in result and 'xhr.open' in result:
            netloc = '%s://%s' % (urlparse.urlparse(url).scheme,
                                  urlparse.urlparse(url).netloc)
            ua = _headers['User-Agent']
            _headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua,
                                           timeout)

            result = _basic_request(url,
                                    headers=_headers,
                                    post=post,
                                    timeout=timeout,
                                    limit=limit)

        if output == 'extended':
            try:
                response_headers = dict([(item[0].title(), item[1])
                                         for item in response.info().items()])
            except:
                response_headers = response.headers
            response_code = str(response.code)
            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            if close == True: response.close()
            return (result, response_code, response_headers, _headers, cookie)
        else:
            if close == True: response.close()
            return result
    except Exception as e:
        log_utils.log('Request-Error: (%s) => %s' % (str(e), url),
                      log_utils.LOGDEBUG)
        return
示例#46
0
import email_utils as mail
import time
import requests
import json
import sys
import log_utils as log

REQ_URL = "https://act.cmbchina.com/ActShipMobile/api/actshipprd/ACT20191009111228Y6pWLw23"

log.init(cfg)

if not mail.init(cfg):
    log.error("[-] Mail system init faild, please check the config.py")
    sys.exit(0)

log.log("[+] OK!")

if cfg.SEND_EMAIL_ON_STARTUP:
    mail.send_email("Congratulations! Your server started successfully!")

query_times = 0

while True:
    error_msg = None
    try:
        resp = requests.get(REQ_URL)
        if resp.status_code != 200:
            raise Exception("Got a wrong code: %d" % resp.status_code)
        result_obj = json.loads(resp.text)
        stock = result_obj[0]["prdstock"]
        if stock != "0":
示例#47
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            html = client.request(url)
            try:
                iframe = client.parseDOM(
                    html,
                    'iframe',
                    attrs={'class': 'embed-responsive-item'},
                    ret='src')[0]
                host = iframe.split('//')[1].replace('www.', '')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'en',
                    'url': iframe,
                    'direct': False,
                    'debridonly': False
                })
            except:
                flashvar = client.parseDOM(html,
                                           'param',
                                           attrs={'name': 'flashvars'},
                                           ret='value')[0]
                link = flashvar.split('file=')[1]
                host = link.split('//')[1].replace('www.', '')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'en',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })

            containers = client.parseDOM(html,
                                         'div',
                                         attrs={'class': 'dwn-box'})

            for list in containers:
                link = client.parseDOM(list,
                                       'a',
                                       attrs={'rel': 'nofollow'},
                                       ret='href')[0]
                redirect = client.request(link, output='geturl')
                quality, info = source_utils.get_release_quality(redirect)
                sources.append({
                    'source': 'DirectLink',
                    'quality': quality,
                    'language': 'en',
                    'url': redirect,
                    'info': info,
                    'direct': True,
                    'debridonly': False
                })
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('CoolTV - Exception: \n' + str(failure))
            return
def resolve_url(url, name=None, iconimage=None, pattern=None):
    dialog	= xbmcgui.Dialog()

    kodi.busy()

    try: url,site = url.split('|SPLIT|')
    except: 
        site = 'Unknown'
        log_utils.log('Error getting site information from :: %s' % (url), log_utils.LOGERROR)

    if not name: name = 'Unknown'
    if not iconimage: iconimage = kodi.addonicon
    name = re.sub(r'(\[.+?\])','',name); name = name.lstrip()
    if '] - ' in name: name = name.split('] - ')[-1] 
    if 'site=' in url: url,site = url.split('site=')

    if '|CHAT|' in url: 
        url,site,name = url.split('|CHAT|')
    if '- [' in name: 
        name = name.split('- [')[0]

    u = None
    url2 = url
    log_utils.log('Sending %s to XXX Resolver' % (url), log_utils.LOGNOTICE)
    blacklist.Blacklistcheck(url)
    if resolveurl.HostedMediaFile(url).valid_url():
        log_utils.log('%s is a valid SMU resolvable URL. Attempting to resolve.' % (url), log_utils.LOGNOTICE)
        try:
            u = resolveurl.HostedMediaFile(url).resolve()
            if u == None: u = adultresolver.resolve(url2)
        except Exception as e:
            log_utils.log('Error getting valid link from SMU :: %s :: %s' % (url, str(e)), log_utils.LOGERROR)
            try:
                u = adultresolver.resolve(url2)
            except:
                kodi.idle()
                kodi.notify(msg='Something went wrong!  | %s' % str(e), duration=8000, sound=True)
                quit()
        log_utils.log('Link returned by XXX Resolver :: %s' % (u), log_utils.LOGNOTICE)
    else:
        log_utils.log('%s is not a valid SMU resolvable link. Attempting to resolve by XXXODUS backup resolver.' % (url), log_utils.LOGNOTICE)
        try:
            u = adultresolver.resolve(url)
        except Exception as e:
            log_utils.log('Error getting valid link from XXXODUS backup resolver. :: %s :: %s' % (url, str(e)), log_utils.LOGERROR)
            kodi.idle()
            kodi.notify(msg='Something went wrong!  | %s' % str(e), duration=8000, sound=True)
            quit()
        log_utils.log('%s returned by XXX-O-DUS backup resolver.' % (u), log_utils.LOGNOTICE)
    if u == 'offline':
        kodi.idle()
        kodi.notify(msg='This performer is offline.', duration = 5000, sound = True)
        quit()
    if u:
        kodi.idle()
        play(u,name,iconimage,url,site)
    else:
        try:
            liz=xbmcgui.ListItem(name)
            liz.setArt({"thumb": iconimage})
            liz.setProperty('IsPlayable', 'true')
            xbmc.executebuiltin("Dialog.Close(busydialog)")
            xbmc.Player().play(url, liz, False)
        except:

            kodi.idle()
            log_utils.log('Failed to get any playable link for :: %s' % (url), log_utils.LOGERROR)
            kodi.notify(msg='Failed to get any playable link.', duration=7500, sound=True)
            quit()
def find(url, name=None, iconimage=None, pattern=None):

    kodi.busy()

    try:
        url, site = url.split('|SPLIT|')
    except:
        site = 'Unknown'
        log_utils.log('Error getting site information from :: %s' % (url),
                      log_utils.LOGERROR)

    try:
        if 'streamingporn.xyz' in url:
            c = client.request(url)
            r = dom_parser2.parse_dom(c,
                                      'a',
                                      req=['href', 'class', 'rel', 'target'])
            r = [i for i in r if i.attrs['class'] == 'external']
            r = [client.request(i.attrs['href'], output='geturl') for i in r]
            r = [i for i in r if resolveurl.HostedMediaFile(i).valid_url()]
            url = multi(r)
        elif 'spreadporn.org' in url:
            c = client.request(url)
            r = dom_parser2.parse_dom(c, 'li', req=['data-show', 'data-link'])
            r = [(i.attrs['data-link']) for i in r]
            url = multi(r)
        elif 'pandamovie.co' in url:
            c = client.request(url)
            r = dom_parser2.parse_dom(c, 'a', req='id')
            r = [(i.attrs['href']) for i in r]
            url = multi(r)
        elif 'xxxmoviestream.com' in url:
            c = client.request(url)
            pattern = '''<iframe src="(.+?)" scrolling="no" frameborder="0" width="700"'''
            r = re.findall(pattern, c)
            url = multi(r)
        elif 'sexkino.to' in url:
            c = client.request(url)
            u = dom_parser2.parse_dom(c, 'iframe',
                                      {'class': ['metaframe', 'rptss']})
            r = dom_parser2.parse_dom(c, 'tr')
            r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r]
            r = [client.request(i[0].attrs['href']) for i in r if i]
            r = [i.attrs['src'] for i in u] + [
                re.findall("window.location.href='([^']+)", i)[0] for i in r
            ]
            url = multi(r)
        elif 'watchxxxfree.tv' in url:
            r = client.request(url)
            pattern = r"""<iframe.+?src=['"]([^'"]+)"""
            r = re.findall(pattern, r)
            url = multi(r)

    except:
        kodi.idle()
        kodi.notify(msg='Error getting link for (Link Finder) %s' % name)
        kodi.idle()
        quit()

    url += '|SPLIT|%s' % site
    kodi.idle()
    player.resolve_url(url, name, iconimage)
示例#50
0
    def init_database(self):
        cur_version = kodi.get_version()
        db_version = self.__get_db_version()
        if not TRIG_DB_UPG:
            db_version = cur_version

        if db_version is not None and cur_version != db_version:
            log_utils.log('DB Upgrade from %s to %s detected.' %
                          (db_version, cur_version))
            self.progress = xbmcgui.DialogProgress()
            self.progress.create('SALTS',
                                 line1='Migrating from %s to %s' %
                                 (db_version, cur_version),
                                 line2='Saving current data.')
            self.progress.update(0)
            self.__prep_for_reinit()

        log_utils.log('Building SALTS Database', log_utils.LOGDEBUG)
        if self.db_type == DB_TYPES.MYSQL:
            self.__execute(
                'CREATE TABLE IF NOT EXISTS url_cache (url VARCHAR(255) NOT NULL, response MEDIUMBLOB, timestamp TEXT, PRIMARY KEY(url))'
            )
            self.__execute(
                'CREATE TABLE IF NOT EXISTS db_info (setting VARCHAR(255) NOT NULL, value TEXT, PRIMARY KEY(setting))'
            )
            self.__execute('CREATE TABLE IF NOT EXISTS rel_url \
            (video_type VARCHAR(15) NOT NULL, title VARCHAR(255) NOT NULL, year VARCHAR(4) NOT NULL, season VARCHAR(5) NOT NULL, episode VARCHAR(5) NOT NULL, source VARCHAR(50) NOT NULL, rel_url VARCHAR(255), \
            PRIMARY KEY(video_type, title, year, season, episode, source))')
            self.__execute(
                'CREATE TABLE IF NOT EXISTS other_lists (section VARCHAR(10) NOT NULL, username VARCHAR(255) NOT NULL, slug VARCHAR(255) NOT NULL, name VARCHAR(255), \
            PRIMARY KEY(section, username, slug))')
            self.__execute(
                'CREATE TABLE IF NOT EXISTS saved_searches (id INTEGER NOT NULL AUTO_INCREMENT, section VARCHAR(10) NOT NULL, added DOUBLE NOT NULL,query VARCHAR(255) NOT NULL, \
            PRIMARY KEY(id))')
            self.__execute(
                'CREATE TABLE IF NOT EXISTS bookmark (slug VARCHAR(255) NOT NULL, season VARCHAR(5) NOT NULL, episode VARCHAR(5) NOT NULL, resumepoint DOUBLE NOT NULL, \
            PRIMARY KEY(slug, season, episode))')
        else:
            self.__create_sqlite_db()
            self.__execute(
                'CREATE TABLE IF NOT EXISTS url_cache (url VARCHAR(255) NOT NULL, response, timestamp, PRIMARY KEY(url))'
            )
            self.__execute(
                'CREATE TABLE IF NOT EXISTS db_info (setting VARCHAR(255), value TEXT, PRIMARY KEY(setting))'
            )
            self.__execute('CREATE TABLE IF NOT EXISTS rel_url \
            (video_type TEXT NOT NULL, title TEXT NOT NULL, year TEXT NOT NULL, season TEXT NOT NULL, episode TEXT NOT NULL, source TEXT NOT NULL, rel_url TEXT, \
            PRIMARY KEY(video_type, title, year, season, episode, source))')
            self.__execute(
                'CREATE TABLE IF NOT EXISTS other_lists (section TEXT NOT NULL, username TEXT NOT NULL, slug TEXT NOT NULL, name TEXT, PRIMARY KEY(section, username, slug))'
            )
            self.__execute(
                'CREATE TABLE IF NOT EXISTS saved_searches (id INTEGER PRIMARY KEY, section TEXT NOT NULL, added DOUBLE NOT NULL,query TEXT NOT NULL)'
            )
            self.__execute(
                'CREATE TABLE IF NOT EXISTS bookmark (slug TEXT NOT NULL, season TEXT NOT NULL, episode TEXT NOT NULL, resumepoint DOUBLE NOT NULL, \
            PRIMARY KEY(slug, season, episode))')

        # reload the previously saved backup export
        if db_version is not None and cur_version != db_version:
            log_utils.log('Restoring DB from backup at %s' % (self.mig_path),
                          log_utils.LOGDEBUG)
            self.import_into_db(self.mig_path)
            log_utils.log('DB restored from %s' % (self.mig_path))

        sql = 'REPLACE INTO db_info (setting, value) VALUES(?,?)'
        self.__execute(sql, ('version', kodi.get_version()))
示例#51
0
def content(url, searched=False):

    try:
        c = client.request(url)
        soup = BeautifulSoup(c, 'html5lib')
        r = soup.find_all('div', class_={'video-box-card item'})
        if (not r) and (not searched):
            log_utils.log(
                'Scraping Error in %s:: Content of request: %s' %
                (base_name.title(), str(c)), log_utils.LOGERROR)
            kodi.notify(msg='Scraping Error: Info Added To Log File',
                        duration=6000,
                        sound=True)
    except Exception as e:
        if (not searched):
            log_utils.log(
                'Fatal Error in %s:: Error: %s' % (base_name.title(), str(e)),
                log_utils.LOGERROR)
            kodi.notify(msg='Fatal Error', duration=4000, sound=True)
            quit()
        else:
            pass

    dirlst = []

    for i in r:
        try:
            name = i.img['alt']
            url2 = i.a['href']
            icon = i.img['data-src']
            time = i.find('div', class_={'time'}).text
            name = ('%s | [COLOR yellow]%s[/COLOR]' % (name, time))
            fanarts = xbmc.translatePath(
                os.path.join('special://home/addons/script.xxxodus.artwork',
                             'resources/art/%s/fanart.jpg' % filename))
            dirlst.append({
                'name': name,
                'url': url2,
                'mode': player_mode,
                'icon': icon,
                'fanart': fanarts,
                'description': name,
                'folder': False
            })
        except Exception as e:
            log_utils.log(
                'Error adding menu item %s in %s:: Error: %s' %
                (i[1].title(), base_name.title(), str(e)), log_utils.LOGERROR)

    if dirlst:
        buildDirectory(dirlst, stopend=True, isVideo=True, isDownloadable=True)
    else:
        if (not searched):
            kodi.notify(msg='No Content Found')
            quit()

    if searched: return str(len(r))

    if not searched:
        search_pattern = '''<link\s*href=['"]([^'"]+)['"]\s*rel=['"]next['"]'''
        parse = base_domain

        helper.scraper().get_next_page(content_mode, url, search_pattern,
                                       filename)
示例#52
0
    def execute_rpc(self, command):
        if not self.has_connection_details:
            log_utils.log(
                'JSON-RPC Unable to complete request. %s' %
                self.connection_details_error, log_utils.LOGINFO)
            return {'error': self.connection_details_error}
        log_utils.log('JSON-RPC request |%s|' % command, log_utils.LOGDEBUG)
        null_response = None
        data = json.dumps(command)
        request = urllib2.Request(self.url, headers=self.headers, data=data)
        method = 'POST'
        request.get_method = lambda: method
        try:
            response = urllib2.urlopen(request)
        except urllib2.HTTPError as e:
            error = 'JSON-RPC received HTTPError |[Code %s] %s|' % (e.code,
                                                                    e.msg)
            log_utils.log(error, log_utils.LOGINFO)
            return {'error': 'HTTPError |[Code %s] %s|' % (e.code, e.msg)}
        except urllib2.URLError as e:
            error = 'JSON-RPC received URLError |%s|' % e.args
            log_utils.log(error, log_utils.LOGINFO)
            return {'error': 'URLError |%s|' % e.args}
        except socket.timeout as e:
            response = None
            null_response = {
                'result': 'No response/Timed out'
            }  # some requests do not respond timely. (ie. Player.Open + picture)

        if not null_response and response:
            contents = response.read()
            log_utils.log('JSON-RPC response |%s|' % contents,
                          log_utils.LOGDEBUG)
            json_response = json.loads(contents)
            response.close()
        else:
            json_response = null_response
            log_utils.log('JSON-RPC response |%s|' % null_response,
                          log_utils.LOGDEBUG)
        return self._eval_response(json_response)
示例#53
0
def solve(url, cj, user_agent=None, wait=True):
    if user_agent is None: user_agent = USER_AGENT
    headers = {'User-Agent': user_agent, 'Referer': url}
    if cj is not None:
        try:
            cj.load(ignore_discard=True)
        except:
            pass
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        urllib2.install_opener(opener)

    request = urllib2.Request(url)
    for key in headers:
        request.add_header(key, headers[key])
    try:
        response = urllib2.urlopen(request)
        html = response.read()
    except urllib2.HTTPError as e:
        html = e.read()

    tries = 0
    while tries < MAX_TRIES:
        solver_pattern = 'var t,r,a,f,\s*([^=]+)={"([^"]+)":([^}]+)};.+challenge-form\'\);.*?\n.*?;(.*?);a\.value'
        vc_pattern = 'input type="hidden" name="jschl_vc" value="([^"]+)'
        pass_pattern = 'input type="hidden" name="pass" value="([^"]+)'
        init_match = re.search(solver_pattern, html, re.DOTALL)
        vc_match = re.search(vc_pattern, html)
        pass_match = re.search(pass_pattern, html)

        if not init_match or not vc_match or not pass_match:
            log_utils.log(
                "Couldn't find attribute: init: |%s| vc: |%s| pass: |%s| No cloudflare check?"
                % (init_match, vc_match, pass_match), log_utils.LOGWARNING)
            return False

        init_dict, init_var, init_equation, equations = init_match.groups()
        vc = vc_match.group(1)
        password = pass_match.group(1)

        # log_utils.log("VC is: %s" % (vc), xbmc.LOGDEBUG)
        varname = (init_dict, init_var)
        result = int(solve_equation(init_equation.rstrip()))
        log_utils.log(
            'Initial value: |%s| Result: |%s|' % (init_equation, result),
            log_utils.LOGDEBUG)

        for equation in equations.split(';'):
            equation = equation.rstrip()
            if equation[:len('.'.join(varname))] != '.'.join(varname):
                log_utils.log(
                    'Equation does not start with varname |%s|' % (equation),
                    log_utils.LOGDEBUG)
            else:
                equation = equation[len('.'.join(varname)):]

            expression = equation[2:]
            operator = equation[0]
            if operator not in ['+', '-', '*', '/']:
                log_utils.log('Unknown operator: |%s|' % (equation),
                              log_utils.LOGWARNING)
                continue

            result = int(
                str(
                    eval(
                        str(result) + operator +
                        str(solve_equation(expression)))))
            log_utils.log('intermediate: %s = %s' % (equation, result),
                          log_utils.LOGDEBUG)

        scheme = urlparse.urlparse(url).scheme
        domain = urlparse.urlparse(url).hostname
        result += len(domain)
        log_utils.log('Final Result: |%s|' % (result), log_utils.LOGDEBUG)

        if wait:
            log_utils.log('Sleeping for 5 Seconds', log_utils.LOGDEBUG)
            xbmc.sleep(5000)

        url = '%s://%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s&pass=%s' % (
            scheme, domain, vc, result, urllib.quote(password))
        log_utils.log('url: %s' % (url), log_utils.LOGDEBUG)
        request = urllib2.Request(url)
        for key in headers:
            request.add_header(key, headers[key])
        try:
            opener = urllib2.build_opener(NoRedirection)
            urllib2.install_opener(opener)
            response = urllib2.urlopen(request)
            while response.getcode() in [301, 302, 303, 307]:
                if cj is not None:
                    cj.extract_cookies(response, request)
                request = urllib2.Request(
                    response.info().getheader('location'))
                for key in headers:
                    request.add_header(key, headers[key])
                if cj is not None:
                    cj.add_cookie_header(request)

                response = urllib2.urlopen(request)
            final = response.read()
            if 'cf-browser-verification' in final:
                log_utils.log('CF Failure: html: %s url: %s' % (html, url),
                              log_utils.LOGWARNING)
                tries += 1
                html = final
            else:
                break
        except urllib2.HTTPError as e:
            log_utils.log('CloudFlare Error: %s on url: %s' % (e.code, url),
                          log_utils.LOGWARNING)
            return False

    if cj is not None:
        cj.save()

    return final
示例#54
0
def comics(url=None):

    reload = False
    try:
        if (not url): url = urlparse.urljoin(base_domain, 'comix/')
        c = client.request(url)
        try:
            r = dom_parser2.parse_dom(c, 'div', {'class': 'gallery'})
            r = dom_parser2.parse_dom(r, 'a', req='href')
            r = [i for i in r if 'login' not in i.attrs['href']]
            r = [(i.attrs['href'], \
                  dom_parser2.parse_dom(i, 'img', req='data-src'), \
                  dom_parser2.parse_dom(i, 'span', {'class': 'title-text'})) \
                for i in r if i]
            r = [(urlparse.urljoin(base_domain, i[0]), i[2][0].content,
                  i[1][0].attrs['data-src']) for i in r if i]
            reload = True
        except:
            r = dom_parser2.parse_dom(c, 'div', {'class': 'gallery'})
            r = dom_parser2.parse_dom(r, 'a', req='href')
            r = [i for i in r if 'login' not in i.attrs['href']]
            r = [(i.attrs['href'], \
                  dom_parser2.parse_dom(i, 'img', req='data-src')) \
                for i in r if i]
            r = [(urlparse.urljoin(base_domain,
                                   i[0]), i[1][0].attrs['data-src']) for i in r
                 if i]
            reload = False
        if (not r):
            log_utils.log(
                'Scraping Error in %s:: Content of request: %s' %
                (base_name.title(), str(c)), log_utils.LOGERROR)
            kodi.notify(msg='Scraping Error: Info Added To Log File',
                        duration=6000,
                        sound=True)
            quit()
    except Exception as e:
        log_utils.log(
            'Fatal Error in %s:: Error: %s' % (base_name.title(), str(e)),
            log_utils.LOGERROR)
        kodi.notify(msg='Fatal Error', duration=4000, sound=True)
        quit()

    dirlst = []

    if reload:
        for i in r:
            try:
                name = kodi.sortX(i[1].encode('utf-8')).title()
                fanarts = xbmc.translatePath(
                    os.path.join(
                        'special://home/addons/script.xxxodus.artwork',
                        'resources/art/%s/fanart.jpg' % filename))
                dirlst.append({
                    'name': name,
                    'url': i[0],
                    'mode': comics_mode,
                    'icon': i[2],
                    'fanart': fanarts,
                    'folder': True
                })
            except Exception as e:
                log_utils.log(
                    'Error adding menu item %s in %s:: Error: %s' %
                    (i[1].title(), base_name.title(), str(e)),
                    log_utils.LOGERROR)
    else:
        num = 1
        for i in r:
            try:
                name = 'Page %s' % str(num)
                fanarts = xbmc.translatePath(
                    os.path.join(
                        'special://home/addons/script.xxxodus.artwork',
                        'resources/art/%s/fanart.jpg' % filename))
                dirlst.append({
                    'name': name,
                    'url': i[0],
                    'mode': pic_v_mode,
                    'icon': i[1],
                    'fanart': fanarts,
                    'folder': False
                })
            except Exception as e:
                log_utils.log(
                    'Error adding menu item %s in %s:: Error: %s' %
                    (i[1].title(), base_name.title(), str(e)),
                    log_utils.LOGERROR)
            num += 1

    if dirlst:
        buildDirectory(dirlst, stopend=True, pictures=True)
        search_pattern = '''<span\s*class=['"]next['"]>\s*<a\s*href=['"]([^'"]+)['"]>'''
        parse = base_domain
        helper.scraper().get_next_page(pic_men_mode,
                                       url,
                                       search_pattern,
                                       filename,
                                       parse,
                                       pictures=True)
    else:
        kodi.notify(msg='No Menu Items Found')
        quit()
示例#55
0
def menu():

    lover.checkupdates()

    url = base_domain
    r = requests.get(url, headers=headers).text
    r = dom_parser2.parse_dom(r, 'dd')
    r = dom_parser2.parse_dom(r, 'a', req='href')
    r = [i for i in r if 'private-cams' not in i.attrs['href']]
    r = [(urljoin(base_domain, i.attrs['href']), i.content) for i in r if i]
    dirlst = []
    icon = translatePath(
        os.path.join('special://home/addons/script.xxxodus.artwork',
                     'resources/art/main/%s.png' % filename))
    fanarts = translatePath(
        os.path.join('special://home/addons/script.xxxodus.artwork',
                     'resources/art/%s/fanart.jpg' % filename))
    dirlst.append({
        'name': 'Monitored Performers',
        'url': 'none',
        'mode': 30,
        'icon': icon,
        'fanart': fanarts,
        'folder': True
    })
    dirlst.append({
        'name': 'Search By Username',
        'url': 'none',
        'mode': 32,
        'icon': icon,
        'fanart': fanarts,
        'folder': False
    })
    dirlst.append({
        'name': 'Rooms By Tag',
        'url': 'tags',
        'mode': 302,
        'icon': icon,
        'fanart': fanarts,
        'folder': True
    })
    for i in r:
        try:
            if PY2: name = kodi.sortX(i[1].encode('utf-8')).title()
            else: name = kodi.sortX(i[1]).title()
            dirlst.append({
                'name': name,
                'url': i[0],
                'mode': content_mode,
                'icon': icon,
                'fanart': fanarts,
                'folder': True
            })
        except Exception as e:
            log_utils.log(
                'Error adding menu item %s in %s:: Error: %s' %
                (i[1].title(), base_name.title(), str(e)), log_utils.LOGERROR)
    #dialog.ok("DIRLIST",str(dirlst))
    if dirlst: buildDirectory(dirlst)
    else:
        kodi.notify(msg='No Menu Items Found')
        quit()
示例#56
0
    def generic(self, url, pattern=None):

        if 'youporn.com' in url: u = self.youporn(url)
        try:
            r = client.request(url)
            if pattern: s=re.findall(r'%s' % pattern, r)
            else:
                patterns = [
                            r'''\s*=\s*[\'\"](http.+?)[\'\"]''', \
                            r'''\s*=\s*['"](http.+?)['"]''', \
                            r'''['"][0-9_'"]+:\s[\'\"]([^'"]+)''', \
                            r'''\(\w+\([\'\"]([^\'\"]*)''', \
                            r'''[\'\"]\w+[\'\"]:['"]([^'"]*)''', \
                            r'''\s*=\s*[\'\"](http.+?)[\'\"]''', \
                            r'''\s*:\s*[\'\"](//.+?)[\'\"]''', \
                            r'''\:[\'\"](\.+?)[\'\"]''', \
                            r'''\s*\(\s*[\'\"](http.+?)[\'\"]''', \
                            r'''\s*=\s*[\'\"](//.+?)[\'\"]''', \
                            r'''\w*:\s*[\'\"](http.+?)[\'\"]''', \
                            r'''\w*=[\'\"]([^\'\"]*)''', \
                            r'''\w*\s*=\s*[\'\"]([^\'\"]*)''', \
                            r'''(?s)<file>([^<]*)''', \
                            ]
                
                s = []
                for pattern in patterns: 
                    l = re.findall(pattern, r)
                    s += [i for i in l if (urlparse.urlparse(i).path).strip('/').split('/')[-1].split('.')[-1] in ['mp4', 'flv', 'm3u8']]

                if s: s = [i for i in s if (urlparse.urlparse(i).path).strip('/').split('/')[-1].split('.')[-1] in ['mp4', 'flv', 'm3u8']]
                else: s = client.parseDOM(r, 'source', ret='src', attrs = {'type': 'video.+?'})
                
                if not s: 
                    log_utils.log('Error resolving %s :: Error: %s' % (url,str(e)), log_utils.LOGERROR)
                    return
                    
                s = ['http:' + i if i.startswith('//') else i for i in s]
                s = [urlparse.urljoin(url, i) if not i.startswith('http') else i for i in s]
                s = [x for y,x in enumerate(s) if x not in s[:y]]

            self.u = []
            def request(i):
                try:
                    i = i.replace(' ','%20')
                    c = client.request(i, output='headers', referer=url)
                    checks = ['video','mpegurl','html']
                    if any(f for f in checks if f in c['Content-Type']): self.u.append((i, int(c['Content-Length'])))
                except:
                    pass
            threads = []
            for i in s: threads.append(workers.Thread(request, i))
            [i.start() for i in threads] ; [i.join() for i in threads]

            u = sorted(self.u, key=lambda x: x[1])[::-1]
            
            mobile_mode = kodi.get_setting('mobile_mode')
            if mobile_mode == 'true': u = client.request(u[-1][0], output='geturl', referer=url)
            else: u = client.request(u[0][0], output='geturl', referer=url)
            log_utils.log('Returning %s from XXX-O-DUS Resolver' % str(u), log_utils.LOGNOTICE)
            return u
        except Exception as e:
            log_utils.log('Error resolving %s :: Error: %s' % (url,str(e)), log_utils.LOGERROR)
def train(
        train_image0_path,
        train_image1_path,
        train_camera_path,
        # Batch settings
        n_batch=settings.N_BATCH,
        n_height=settings.N_HEIGHT,
        n_width=settings.N_WIDTH,
        encoder_type=settings.ENCODER_TYPE,
        decoder_type=settings.DECODER_TYPE,
        activation_func=settings.ACTIVATION_FUNC,
        n_pyramid=settings.N_PYRAMID,
        # Training settings
        n_epoch=settings.N_EPOCH,
        learning_rates=settings.LEARNING_RATES,
        learning_schedule=settings.LEARNING_SCHEDULE,
        use_augment=settings.USE_AUGMENT,
        w_color=settings.W_COLOR,
        w_ssim=settings.W_SSIM,
        w_smoothness=settings.W_SMOOTHNESS,
        w_left_right=settings.W_LEFT_RIGHT,
        # Depth range settings
        scale_factor=settings.SCALE_FACTOR,
        # Checkpoint settings
        n_summary=settings.N_SUMMARY,
        n_checkpoint=settings.N_CHECKPOINT,
        checkpoint_path=settings.CHECKPOINT_PATH,
        # Hardware settings
        device=settings.DEVICE,
        n_thread=settings.N_THREAD):

    if device == settings.CUDA or device == settings.GPU:
        device = torch.device(settings.CUDA)
    else:
        device = torch.device(settings.CPU)

    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)

    # Set up checkpoint and event paths
    encoder_checkpoint_path = os.path.join(checkpoint_path, 'encoder-{}.pth')
    decoder_checkpoint_path = os.path.join(checkpoint_path, 'decoder-{}.pth')
    log_path = os.path.join(checkpoint_path, 'results.txt')
    event_path = os.path.join(checkpoint_path, 'events')

    # Read paths for training
    train_image0_paths = data_utils.read_paths(train_image0_path)
    train_image1_paths = data_utils.read_paths(train_image1_path)
    train_camera_paths = data_utils.read_paths(train_camera_path)

    assert len(train_image0_paths) == len(train_image1_paths)
    assert len(train_image0_paths) == len(train_camera_paths)

    n_train_sample = len(train_image0_paths)
    n_train_step = n_epoch * np.ceil(n_train_sample / n_batch).astype(np.int32)

    train_dataloader = torch.utils.data.DataLoader(
        datasets.ImagePairCameraDataset(train_image0_paths,
                                        train_image1_paths,
                                        train_camera_paths,
                                        shape=(n_height, n_width),
                                        augment=use_augment),
        batch_size=n_batch,
        shuffle=True,
        num_workers=n_thread,
        drop_last=False)

    # Build network
    model = MonodepthModel(encoder_type=encoder_type,
                           decoder_type=decoder_type,
                           activation_func=activation_func,
                           n_pyramid=n_pyramid,
                           scale_factor=scale_factor,
                           device=device)
    train_summary = SummaryWriter(event_path)
    parameters = model.parameters()
    n_param = sum(p.numel() for p in parameters)

    # Start training
    model.train()

    log('Network settings:', log_path)
    log(
        'n_batch=%d  n_height=%d  n_width=%d  n_param=%d' %
        (n_batch, n_height, n_width, n_param), log_path)
    log(
        'encoder_type=%s  decoder_type=%s  activation_func=%s  n_pyramid=%d' %
        (encoder_type, decoder_type, activation_func, n_pyramid), log_path)
    log('Training settings:', log_path)
    log(
        'n_sample=%d  n_epoch=%d  n_step=%d' %
        (n_train_sample, n_epoch, n_train_step), log_path)
    log(
        'learning_schedule=[%s]' %
        ', '.join('{}:{}'.format(l * (n_train_sample // n_batch), v, log_path)
                  for l, v in zip([0] + learning_schedule, learning_rates)),
        log_path)
    log('use_augment=%s' % use_augment, log_path)
    log(
        'w_color=%.2f  w_ssim=%.2f  w_smoothness=%.2f  w_left_right=%.2f' %
        (w_color, w_ssim, w_smoothness, w_left_right), log_path)
    log('Depth range settings:', log_path)
    log('scale_factor=%.2f' % (scale_factor), log_path)
    log('Checkpoint settings:', log_path)
    log('depth_model_checkpoint_path=%s' % checkpoint_path, log_path)

    learning_schedule.append(n_epoch)
    schedule_pos = 0
    train_step = 0
    time_start = time.time()
    log('Begin training...', log_path)
    for epoch in range(1, n_epoch + 1):
        # Set learning rate schedule
        if epoch > learning_schedule[schedule_pos]:
            schedule_pos = schedule_pos + 1
        learning_rate = learning_rates[schedule_pos]
        optimizer = torch.optim.Adam(parameters, lr=learning_rate)

        for train_image0, train_image1, train_camera in train_dataloader:
            train_step = train_step + 1
            # Fetch data
            if device.type == settings.CUDA:
                train_image0 = train_image0.cuda()
                train_image1 = train_image1.cuda()
                train_camera = train_camera.cuda()

            # Forward through the network
            model.forward(train_image0, train_camera)

            # Compute loss function
            loss = model.compute_loss(train_image0,
                                      train_image1,
                                      w_color=w_color,
                                      w_ssim=w_ssim,
                                      w_smoothness=w_smoothness,
                                      w_left_right=w_left_right)

            # Compute gradient and backpropagate
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (train_step % n_summary) == 0:
                model.log_summary(summary_writer=train_summary,
                                  step=train_step)

            # Log results and save checkpoints
            if (train_step % n_checkpoint) == 0:
                time_elapse = (time.time() - time_start) / 3600
                time_remain = (n_train_step -
                               train_step) * time_elapse / train_step
                log(
                    'Step={:6}/{}  Loss={:.5f}  Time Elapsed={:.2f}h  Time Remaining={:.2f}h'
                    .format(train_step, n_train_step, loss.item(), time_elapse,
                            time_remain), log_path)

                # Save checkpoints
                torch.save(
                    {
                        'train_step': train_step,
                        'model_state_dict': model.encoder.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict()
                    }, encoder_checkpoint_path.format(train_step))
                torch.save(
                    {
                        'train_step': train_step,
                        'model_state_dict': model.decoder.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict()
                    }, decoder_checkpoint_path.format(train_step))

    # Save checkpoints and close summary
    train_summary.close()
    torch.save(
        {
            'train_step': train_step,
            'model_state_dict': model.encoder.state_dict(),
            'optimizer_state_dict': optimizer.state_dict()
        }, encoder_checkpoint_path.format(train_step))
    torch.save(
        {
            'train_step': train_step,
            'model_state_dict': model.decoder.state_dict(),
            'optimizer_state_dict': optimizer.state_dict()
        }, decoder_checkpoint_path.format(train_step))
示例#58
0
 def http_response(self, request, response):
     log_utils.log('Stopping Redirect', log_utils.LOGDEBUG)
     return response
示例#59
0
def content(url, searched=False):

    try:
        c = requests.get(url, headers=headers).content
        soup = BeautifulSoup(c, 'html5lib')
        r = soup.find_all('li', class_={'masonry-item'})
        if (not r) and (not searched):
            log_utils.log(
                'Scraping Error in %s:: Content of request: %s' %
                (base_name.title(), str(c)), log_utils.LOGERROR)
            kodi.notify(msg='Scraping Error: Info Added To Log File',
                        duration=6000,
                        sound=True)
    except Exception as e:
        if (not searched):
            log_utils.log(
                'Fatal Error in %s:: Error: %s' % (base_name.title(), str(e)),
                log_utils.LOGERROR)
            kodi.notify(msg='Fatal Error', duration=4000, sound=True)
            quit()
        else:
            pass

    dirlst = []

    for i in r:
        try:
            name = i.a['title']
            url2 = i.a['href']
            icon = i.img['src']
            fanarts = xbmc.translatePath(
                os.path.join('special://home/addons/script.xxxodus.artwork',
                             'resources/art/%s/fanart.jpg' % filename))
            dirlst.append({
                'name': name,
                'url': url2,
                'mode': player_mode,
                'icon': icon,
                'fanart': fanarts,
                'description': name,
                'folder': False
            })
        except Exception as e:
            log_utils.log('Error: %s' % str(e), log_utils.LOGERROR)

    if dirlst:
        buildDirectory(dirlst, stopend=True, isVideo=True, isDownloadable=True)
    else:
        if (not searched):
            kodi.notify(msg='No Content Found')
            quit()

    if searched: return str(len(r))

    if not searched:

        try:
            search_pattern = '''href\=['"]([^'"]+)['"]\s+rel=['"]next'''
            parse = base_domain
            helper.scraper().get_next_page(content_mode, url, search_pattern,
                                           filename)
        except Exception as e:
            log_utils.log(
                'Error getting next page for %s :: Error: %s' %
                (base_name.title(), str(e)), log_utils.LOGERROR)
示例#60
0
def content(url, searched=False):

    r = requests.get(url, headers=headers).text
    r = dom_parser2.parse_dom(r, 'li')
    r = [(dom_parser2.parse_dom(i, 'div', {'class': 'title'}), \
        dom_parser2.parse_dom(i, 'img', req='src'), \
        dom_parser2.parse_dom(i, 'div', {'class': re.compile('thumbnail_label.+?')}), \
        dom_parser2.parse_dom(i, 'li', {'title': re.compile('.+?')}), \
        dom_parser2.parse_dom(i, 'li', {'class': 'location'}), \
        dom_parser2.parse_dom(i, 'li', {'class': 'cams'}) \
        ) for i in r if '<div class="title">' in i.content]

    r = [(dom_parser2.parse_dom(i[0], 'a'), \
        dom_parser2.parse_dom(i[0], 'span'), \
        i[2][0].content, \
        i[1][0].attrs['src'], \
        i[3][0].content if i[3] else 'Unknown', \
        i[4][0].content, \
        i[5][0].content, \
        ) for i in r]
    r = [(urljoin(base_domain, i[0][0].attrs['href']), i[0][0].content,
          i[1][0].content, i[2], i[3], i[6], i[5], i[4]) for i in r]
    dirlst = []

    for i in r:
        try:
            if PY2:
                name = '%s - [ %s ]' % (kodi.sortX(
                    i[1].encode('utf-8')).title(),
                                        kodi.sortX(i[3].encode('utf-8')))
            else:
                name = '%s - [ %s ]' % (kodi.sortX(
                    i[1]).title(), kodi.sortX(i[3]))
            if PY2:                description = 'Name: %s \nAge: %s \nLocation: %s \nStats: %s \n\nDescription: %s' % \
        (kodi.sortX(i[1].encode('utf-8')),i[2],kodi.sortX(i[6].encode('utf-8')),kodi.sortX(i[5].encode('utf-8')),kodi.sortX(i[7].encode('utf-8')))
            else:                description = 'Name: %s \nAge: %s \nLocation: %s \nStats: %s \n\nDescription: %s' % \
          (kodi.sortX(i[1]),i[2],kodi.sortX(i[6]),kodi.sortX(i[5]),kodi.sortX(i[7]))
            content_url = i[0] + '|SPLIT|%s' % base_name
            fanarts = translatePath(
                os.path.join('special://home/addons/script.xxxodus.artwork',
                             'resources/art/%s/fanart.jpg' % filename))
            dirlst.append({
                'name': name,
                'url': content_url,
                'mode': player_mode,
                'icon': i[4],
                'fanart': fanarts,
                'description': description,
                'folder': False
            })
        except Exception as e:
            log_utils.log(
                'Error adding menu item %s in %s:: Error: %s' %
                (i[1].title(), base_name.title(), str(e)), log_utils.LOGERROR)

    if dirlst:
        buildDirectory(dirlst,
                       stopend=True,
                       isVideo=False,
                       isDownloadable=False,
                       chaturbate=True)
    else:
        kodi.notify(msg='No Content Found')
        quit()

    search_pattern = '''<li><a\s*href=['"]([^'"]+)['"]\s*class=['"]next endless_page_link'''
    parse = base_domain
    helper.scraper().get_next_page(content_mode, url, search_pattern, filename,
                                   parse)