コード例 #1
0
ファイル: szukajkatv.py プロジェクト: po50on/cherry-dev
    def resolve(self, url):
        try:
            #import pydevd
            #pydevd.settrace(stdoutToServer=True, stderrToServer=True)
            #cookies = client.request(url, output='cookie')
            #verifyGet = client.request(self.verify, cookie = cookies)
            #cookies = cookies + ";tmvh=" + self.crazy_cookie_hash(verifyGet)
            cookies = cache.cache_get('szukajka_cookie')
            replace = re.findall("""tmvh=(.*)""", str(cookies['value']))[0]
            cookies = str(cookies['value'])

            verifyGet = client.request(self.verify, cookie=cookies)
            tmvh = self.crazy_cookie_hash(verifyGet)
            cookies = cookies.replace(replace, tmvh)

            test4 = client.request(url, cookie=cookies)
            test5 = client.parseDOM(test4,
                                    'a',
                                    attrs={'class': 'submit'},
                                    ret='href')[0]

            replace = re.findall("""tmvh=(.*)""", cookies)[0]
            verifyGet = client.request(self.verify, cookie=cookies)
            tmvh = self.crazy_cookie_hash(verifyGet)
            cookies = cookies.replace(replace, tmvh)

            test6 = client.request(test5, cookie=cookies)
            test7 = client.parseDOM(test6, 'iframe', ret='src')
            video_url = test7[0].replace(
                "javascript:window.location.replace('", "").replace("')", "")
            return video_url
        except Exception, e:
            return
コード例 #2
0
ファイル: mzmovies.py プロジェクト: po50on/cherry-dev
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            url = urlparse.urljoin(self.base_link, url)
            scraper = cfscrape.create_scraper()
            data = scraper.get(url).content
            data = client.parseDOM(data, 'ul', attrs={'class': 'episodios'})
            links = client.parseDOM(data,
                                    'div',
                                    attrs={'class': 'episodiotitle'})
            sp = zip(
                client.parseDOM(data, 'div', attrs={'class': 'numerando'}),
                client.parseDOM(links, 'a', ret='href'))

            Sea_Epi = '%dx%d' % (int(season), int(episode))
            for i in sp:
                sep = i[0]
                if sep == Sea_Epi:
                    url = source_utils.strip_domain(i[1])

            return url
        except:
            return
コード例 #3
0
ファイル: main.py プロジェクト: po50on/cherry-dev
def get_epsiode_link(sess, data):
    headers = {
        'Accept':
        '*/*',
        'Accept-Language':
        'pl,en-US;q=0.7,en;q=0.3',
        'Cache-Control':
        'max-age=0',
        'Connection':
        'keep-alive',
        'Host':
        'www.animezone.pl',
        'Referer':
        str(url).replace("http://", "http://www."),
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
    }

    verify = sess.get('http://animezone.pl/images/statistics.gif',
                      headers=headers)
    hostDict = resolveurl.relevant_resolvers(order_matters=True)
    hostDict = [i.domains for i in hostDict if not '*' in i.domains]
    hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)]
    hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]

    headers = {
        'Host': 'www.animezone.pl',
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',
        'Accept': '*/*',
        'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3',
        'Referer': str(url).replace("http://", "http://www."),
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'X-Requested-With': 'XMLHttpRequest',
        'Connection': 'keep-alive',
        'Pragma': 'no-cache',
        'Cache-Control': 'no-cache',
    }
    data = {'data': data}
    response = sess.post(str(url).replace("http://", "https://www."),
                         headers=headers,
                         data=data).content
    try:
        link = client.parseDOM(response, 'a', ret='href')[0]
    except:
        link = client.parseDOM(response, 'iframe', ret='src')[0]

    if not link:
        raise InvalidLink('No link')
    if str(link).startswith('//'):
        link = str(link).replace("//", "http://")
    try:

        valid, host = source_utils.is_host_valid(str(link), hostDict)
    except Exception as e:
        log_exception()
        raise InvalidLink('Exception {!r}'.format(e))
    if not valid:
        raise InvalidLink('Invalid host')
    return host, link
コード例 #4
0
ファイル: main.py プロジェクト: eliteironlix/kodi
def Listowanie_Odcinkow():
    url = urllib.unquote_plus(params['url'])
    r = s.get(url).content
    result = client.parseDOM(r, 'td', attrs={'class': 'border-c2'})
    linki = client.parseDOM(result, 'a', ret='href')
    nazwy = client.parseDOM(result, 'a', ret='title')
    nazwy2 = []
    counter2 = 1
    while counter2 < len(result):
        if result[counter2 + 1] == '-':
            counter2 += 4
            continue
        nazwy2.append(str(result[counter2 - 1]) + ". " + str(result[counter2]))
        counter2 += 4
    ##cookies save
    import json
    basePath = "special://temp/cookie.txt"
    path = xbmc.translatePath(basePath)
    with open(path, 'w') as f:
        json.dump(requests.utils.dict_from_cookiejar(s.cookies), f)
    counter = 0
    for link in linki:
        linki[counter] = 'http://www.kreskowkazone.pl/' + str(link)
        addDir(str(nazwy2[counter]), linki[counter], 5, "banner.png",
               "thumb.png", "fanart.png", "opis", "gatunek", "")
        counter += 1
    xbmcplugin.addSortMethod(handle=int(sys.argv[1]),
                             sortMethod=xbmcplugin.SORT_METHOD_TITLE)
コード例 #5
0
ファイル: primewire.py プロジェクト: po50on/cherry-dev
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'main_body')
            result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})

            title = cleantitle.get(title)

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result]
            result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]

            url = client.replaceHTMLCodes(url[0][0])
            url = proxy.parse(url)
            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #6
0
ファイル: main.py プロジェクト: Koscielny83/cherry-dev
def ListujLinki():
    url = urllib.unquote_plus(params['url'])
    result = requests.get(url).content
    result = client.parseDOM(result, 'table', attrs={'class': 'lista'})
    result = client.parseDOM(result, 'tr', attrs={'class': 'lista_hover'})
    odtwarzacz = "%sodtwarzacz-%s.html"
    hostDict = resolveurl.relevant_resolvers(order_matters=True)
    hostDict = [i.domains for i in hostDict if not '*' in i.domains]
    hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)]
    hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]
    url = url.split("pl/")[0] + "pl/"
    for item in result:
        id = client.parseDOM(item, 'span', ret='rel')[0]
        content = odtwarzacz % (url, id)
        xbmc.log('Wbijam.pl | Listuje z url: %s' % content, xbmc.LOGNOTICE)
        temp = requests.get(content).content
        try:
            link = client.parseDOM(temp, 'iframe', ret='src')
        except:
            continue
        for item2 in link:
            try:
                if str(item2).startswith("//"):
                    item2 = str(item2).replace("//", "http://")
                valid, host = source_utils.is_host_valid(str(item2), hostDict)
                if valid == False:
                    continue
                xbmc.log('Wbijam.pl | Video Link: %s' % str(item2), xbmc.LOGNOTICE)
                addDir("[B]" + host + "[/B]", str(item2), 6, '', '', '', False)
            except:
                continue
コード例 #7
0
ファイル: alltube.py プロジェクト: Koscielny83/cherry-dev
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3435.0 Safari/537.36',
				'Origin': 'http://alltube.pl',
				'Referer': 'http://alltube.pl/szukaj'
            }
            result = requests.get(url, headers = headers).content

            links = client.parseDOM(result, 'tr')
            links = [(client.parseDOM(i, 'a', attrs={'class': 'watch'}, ret='href')[0],
                    client.parseDOM(i, 'img', ret='alt')[0],
                    client.parseDOM(i, 'td', attrs={'class':'text-center'})[0]) for i in links]

            for i in links:
                try:
                    url1 = '%s?%s' % (url, i[0])
                    url1 = url1.encode('utf-8')
                    language, info = self.get_language_by_type(i[2]);
                    sources.append({'source': i[1].encode('utf-8'), 'quality': 'SD', 'language': language, 'url': url1, 'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #8
0
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:
            #import pydevd
            #pydevd.settrace(stdoutToServer=True, stderrToServer=True)
            if url == None: return sources
            r = client.request(urlparse.urljoin(self.base_link, url),
                               redirect=False)
            info = self.get_lang_by_type(client.parseDOM(r, 'title')[0])
            r = client.parseDOM(r, 'div', attrs={'class':
                                                 'tab-pane active'})[0]
            r = client.parseDOM(r, 'script')[0]
            script = r.split('"')[1]
            decoded = self.shwp(script)

            link = client.parseDOM(decoded, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            q = source_utils.check_sd_url(link)
            sources.append({
                'source': host,
                'quality': q,
                'language': 'pl',
                'url': link,
                'info': info,
                'direct': False,
                'debridonly': False
            })

            return sources
        except:
            return sources
コード例 #9
0
    def search(self, localtitle, year, search_type):
        try:

            url = urlparse.urljoin(self.base_link, self.search_link)
            r = client.request(url,
                               redirect=False,
                               post={
                                   'q': cleantitle.query(localtitle),
                                   'sb': ''
                               })
            r = client.parseDOM(r, 'div', attrs={'class': 'small-item'})

            local_simple = cleantitle.get(localtitle)
            for row in r:
                name_found = client.parseDOM(row, 'a')[1]
                year_found = name_found[name_found.find("(") +
                                        1:name_found.find(")")]
                url = client.parseDOM(row, 'a', ret='href')[1]
                if not search_type in url:
                    continue

                if cleantitle.get(
                        name_found) == local_simple and year_found == year:
                    return url
        except:
            return
コード例 #10
0
ファイル: cdahd.py プロジェクト: Koscielny83/cherry-dev
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            result = client.request(url)
            # cant user dom parser here because HTML is bugged div is not closed
            result = re.findall('<ul class="episodios">(.*?)</ul>', result,
                                re.MULTILINE | re.DOTALL)
            for item in result:
                season_episodes = re.findall('<li>(.*?)</li>', item,
                                             re.MULTILINE | re.DOTALL)
                for row in season_episodes:
                    s = client.parseDOM(row,
                                        'div',
                                        attrs={'class':
                                               'numerando'})[0].split('x')
                    season_found = s[0].strip()
                    episode_found = s[1].strip()
                    if (season_found != season):
                        break
                    if episode_found == episode:
                        return client.parseDOM(row, 'a', ret='href')[0]

        except:
            return
コード例 #11
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            tv_maze = tvmaze.tvMaze()
            tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
            tvshowtitle = tvshowtitle['name']

            t = cleantitle.get(tvshowtitle)

            q = urlparse.urljoin(self.base_link, self.search_link)
            q = q % urllib.quote_plus(tvshowtitle)

            r = client.request(q)

            r = client.parseDOM(r, 'ul', attrs={'class': 'items'})
            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i))
                 for i in r]
            r = [(i[0][0], i[1][0], i[2][-1]) for i in r
                 if i[0] and i[1] and i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
            r = r[0][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #12
0
ファイル: cdahd.py プロジェクト: Koscielny83/cherry-dev
    def get_from_main_player(self, result, sources):

        q = 'SD'
        if len(sources) == 0 and (len(
                client.parseDOM(result, 'span', attrs={'class': 'calidad2'})) >
                                  0):
            q = 'HD'
        player2 = client.parseDOM(result, 'div', attrs={'id': 'player2'})
        links = client.parseDOM(player2, 'iframe', ret='src')

        player_nav = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'player_nav'})
        transl_type = client.parseDOM(player_nav, 'a')
        result_sources = []
        for i in range(0, len(links)):
            url = links[i]
            if (self.url_not_on_list(url, sources)):
                lang, info = self.get_lang_by_type(transl_type[i])
                host = url.split("//")[-1].split("/")[0]
                result_sources.append({
                    'source': host,
                    'quality': q,
                    'language': lang,
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })

        return result_sources
コード例 #13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
            r = client.request(url)
            data = client.parseDOM(r,
                                   'div',
                                   attrs={'class': 'anime_muti_link'})
            data = [
                client.parseDOM(i, 'a', ret='data-video') for i in data if i
            ]
            try:
                for link in data[0]:
                    url = 'http:' + link
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    quality, info = source_utils.get_release_quality(url, None)
                    if not valid: continue
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            except Exception:
                pass
            return sources
        except:
            return sources
コード例 #14
0
ファイル: cdax.py プロジェクト: po50on/cherry-dev
 def resolve(self, url):
     result = client.request(url)
     result = client.parseDOM(result,
                              'div',
                              attrs={'class': 'boton reloading'})
     link = client.parseDOM(result, 'a', ret='href')[0]
     return link
コード例 #15
0
def work(items, start, end):
    for item in items[start:end]:
        try:
            h = HTMLParser()
            item = h.unescape(item)
            link = str(client.parseDOM(item, 'a', ret='href')[0])
            title = str(client.parseDOM(item, 'a')[0])
            if title == "Gry": continue
            try:
                plot = str(client.parseDOM(item, 'a', ret='title')[0])
            except:
                log_exception()
                plot = ""
            fanart, banner, thumb = Tvdb_Scraper(title)
            addon.addDir(title,
                         link,
                         mode=11,
                         banner=banner,
                         thumb=thumb,
                         fanart=fanart,
                         plot=plot,
                         genre="Bajka")
        except Exception:
            log_exception()
            print('error with item')
コード例 #16
0
ファイル: boxfilm.py プロジェクト: po50on/cherry-dev
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:

            if url == None: return sources
            result = client.request(urlparse.urljoin(self.base_link, url),
                                    redirect=False)

            section = client.parseDOM(result,
                                      'section',
                                      attrs={'id': 'video_player'})[0]
            link = client.parseDOM(section, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            spans = client.parseDOM(section, 'span')
            info = None
            for span in spans:
                if span == 'Z lektorem':
                    info = 'Lektor'

            q = source_utils.check_sd_url(link)
            sources.append({
                'source': host,
                'quality': q,
                'language': 'pl',
                'url': link,
                'info': info,
                'direct': False,
                'debridonly': False
            })

            return sources
        except:
            return sources
コード例 #17
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            langMap = {'hi':'hindi', 'ta':'tamil', 'te':'telugu', 'ml':'malayalam', 'kn':'kannada', 'bn':'bengali', 'mr':'marathi', 'pa':'punjabi'}

            lang = 'http://www.imdb.com/title/%s/' % imdb
            lang = client.request(lang)
            lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang)
            lang = [i for i in lang if 'primary_language' in i]
            lang = [urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang]
            lang = [i['primary_language'] for i in lang if 'primary_language' in i]
            lang = langMap[lang[0][0]]

            q = self.search_link % (lang, urllib.quote_plus(title))
            q = urlparse.urljoin(self.base_link, q)

            t = cleantitle.get(title)

            r = client.request(q)

            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs = {'class': 'info'})) for i in r]
            r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
            r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r]
            r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            url = str(r)
            return url
        except:
            return
コード例 #18
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(
                self.base_link, self.search_link %
                urllib.quote_plus(cleantitle.getsearch(title)))
            r = client.request(url, headers=headers, timeout='15')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except:
                url = None
                pass

            if (url == None):
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]
            return url
        except:
            return
コード例 #19
0
def ListowanieMeczy():
    try:
        url = params['url']
        result = client.request(url)
        result_images = client.parseDOM(result,
                                        'article',
                                        attrs={'class': 'post hentry'})
        result = client.parseDOM(result,
                                 'h2',
                                 attrs={'class': 'post-title entry-title'})

        images = client.parseDOM(result_images, 'a', ret='content')
        links = client.parseDOM(result, 'a', ret='href')
        titles = client.parseDOM(result, 'a', ret='title')
        for item in zip(links, titles, images):
            link = str(item[0])
            title = str(item[1])
            image = str(item[2])
            addon.addDir(title,
                         link,
                         mode='ListowanieLinkow',
                         icon=image,
                         fanart=_default_background)
    except:
        log_exception()
コード例 #20
0
def more_cdapl(link, hostDict, lang, info):
    sources = []
    if "cda.pl" in link:
        try:
            response = requests.get(link).content
            test = client.parseDOM(response,
                                   'div',
                                   attrs={'class': 'wrapqualitybtn'})
            urls = client.parseDOM(test, 'a', ret='href')
            for url in urls:
                valid, host = source_utils.is_host_valid(url, hostDict)
                q = source_utils.check_sd_url(url)
                direct = re.findall("""file":"(.*)","file_cast""",
                                    requests.get(url).content)[0].replace(
                                        "\\/", "/")
                sources.append({
                    'source': host,
                    'quality': q,
                    'language': lang,
                    'url': direct,
                    'info': info,
                    'direct': True,
                    'debridonly': False
                })
            return sources
        except Exception, e:
            print e
            return []
コード例 #21
0
ファイル: alltube.py プロジェクト: Koscielny83/cherry-dev
    def search(self, title, localtitle, year, search_type):
        try:
            titles= []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(cleantitle.normalize(cleantitle.getsearch(localtitle)))
			
            for title in titles:
                headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3435.0 Safari/537.36',
				'Origin': 'http://alltube.pl',
				'Referer': 'http://alltube.pl/szukaj'
                }
                r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'search': cleantitle.query(title)}, headers=headers)
                r = self.get_rows(r, search_type)
                
                for row in r:
                    url = client.parseDOM(row, 'a', ret='href')[0]
                    names_found = client.parseDOM(row, 'h3')[0]
                    if names_found.startswith('Zwiastun') and not title.startswith('Zwiastun'):
                        continue
                    names_found = names_found.split('/')
                    names_found = [cleantitle.normalize(cleantitle.getsearch(i)) for i in names_found]
                    for name in names_found:
                        name = name.replace("  "," ")
                        title = title.replace("  "," ")
                        words = title.split(" ")
                        found_year = self.try_read_year(url)
                        if self.contains_all_wors(name, words) and (not found_year or found_year == year):
                            return url
                        else:
                            continue
                    continue
        except Exception, e:
            print e
            return
コード例 #22
0
 def search(self, title, localtitle, year, search_type):
     try:
         titles= []
         titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
         titles.append(cleantitle.normalize(cleantitle.getsearch(localtitle)))
         
         for title in titles:
             r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'search': cleantitle.query(title)})
             r = self.get_rows(r, search_type)
             
             for row in r:
                 url = client.parseDOM(row, 'a', ret='href')[0]
                 names_found = client.parseDOM(row, 'h3')[0]
                 if names_found.startswith('Zwiastun') and not title.startswith('Zwiastun'):
                     continue
                 names_found = names_found.split('/')
                 names_found = [cleantitle.normalize(cleantitle.getsearch(i)) for i in names_found]
                 for name in names_found:
                     name = name.replace("  "," ")
                     title = title.replace("  "," ")
                     words = title.split(" ")
                     found_year = self.try_read_year(url)
                     if self.contains_all_wors(name, words) and (not found_year or found_year == year):
                         return url
                     else:
                         continue
                 continue
     except Exception, e:
         print e
         return
コード例 #23
0
ファイル: main.py プロジェクト: eliteironlix/kodi
def mySearch():
    keyb = xbmc.Keyboard('', "Wyszukiwarka kreskówek")
    keyb.doModal()
    if keyb.isConfirmed() and len(keyb.getText().strip()) > 0:
        search = keyb.getText()
        myParam = str(urllib.quote(search)).strip()
        url = "http://www.kreskowkazone.pl/szukaj?szukana=" + search
        r = client.request(url)
        result = client.parseDOM(r, 'div', attrs={'class': 'box-img'})
        linki = client.parseDOM(result, 'a', ret='href')
        if len(linki) == 0:
            addDir("Zbyt dużo lub brak wyników wyszukiwania :(", '', None,
                   'ikona.png', 'thumb.png', "", "", "", "")
            addDir("Spróbuj doprecyzować zapytanie!", '', None, 'ikona.png',
                   'thumb.png', "", "", "", "")
        nazwy = client.parseDOM(result, 'a')
        nazwy = client.parseDOM(nazwy, 'img', ret='alt')

        counter = 0
        for link in linki:
            linki[counter] = 'http://www.kreskowkazone.pl/' + str(link)
            addDir(str(nazwy[counter]), linki[counter], 4, 'ikona.png',
                   'thumb.png', "", "", "", "")
            counter += 1
    else:
        CATEGORIES()
コード例 #24
0
ファイル: main.py プロジェクト: po50on/cherry-dev
def list_episodes(nazwaSerii, wersja, fanart):
    lista = []
    lista_tytulow = []
    ocena = []

    cookie = cache.cache_get('strefadb_cookie')['value']
    if type(cookie) is dict:
        cookie = cookie['value']
    HEADERS['Cookie'] = cookie

    url = 'https://strefadb.pl/odcinki/' + nazwaSerii + '.html'
    result = s.get(url, headers=HEADERS).content
    test = client.parseDOM(result, 'ul', attrs={'class': 'lista-odcinkow'})
    test = client.parseDOM(test, 'li')
    for item in test:
        if 'a href' in item and '?typ' not in item and not 'ul class' in item:
            lista.append(item)
    for item in lista:
        try:
            item = client.parseDOM(item, 'a')
            lista_tytulow.append(str(item[0]))
            ocena.append(float(lista_tytulow[-1][-5:-1]))
            lista_tytulow[-1] = lista_tytulow[-1][:-7]
        except:
            pass
    i = 1
    while i < len(lista_tytulow) + 1:
        url = 'https://strefadb.pl/odcinki/' + nazwaSerii + '-' + str(
            i) + '.html'
        addon.addDir(str(i) + ' ' + str(lista_tytulow[i - 1]),
                     url,
                     mode='ListowanieLinkow',
                     fanart=fanart,
                     rating=ocena[i - 1])
        i += 1
コード例 #25
0
ファイル: primewire.py プロジェクト: po50on/cherry-dev
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'main_body')

            links = client.parseDOM(result, 'tbody')

            for i in links:
                try:
                    url = client.parseDOM(i, 'a', ret='href')[0]
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0]
                    url = base64.b64decode(url)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = host.encode('utf-8')

                    quality = client.parseDOM(i, 'span', ret='class')[0]
                    quality,info = source_utils.get_release_quality(quality, url)

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #26
0
    def search(self, title, localtitle, year, is_movie_search):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle)))

            for title in titles:
                url = urlparse.urljoin(self.base_link, self.search_link)
                url = url % urllib.quote(str(title).replace(" ", "_"))
                result = client.request(url)
                result = result.decode('utf-8')
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'video-clip-wrapper'})

                for item in result:
                    link = str(client.parseDOM(item, 'a', ret='href')[0])
                    nazwa = str(
                        client.parseDOM(item,
                                        'a',
                                        attrs={'class':
                                               'link-title-visit'})[0])
                    name = cleantitle.normalize(cleantitle.getsearch(nazwa))
                    name = name.replace("  ", " ")
                    title = title.replace("  ", " ")
                    words = title.split(" ")
                    if self.contains_all_wors(name,
                                              words) and str(year) in name:
                        return link
        except Exception as e:
            log_exception()
            return
コード例 #27
0
ファイル: main.py プロジェクト: po50on/cherry-dev
def WysiwetlanieLinkow():

    url = params['url']
    name = params['name']
    with requests.session() as sess:
        r = sess.get(url).text
        result = client.parseDOM(
            r,
            'table',
            attrs={
                'class':
                'table table-bordered table-striped table-hover episode'
            })
        result = client.parseDOM(result, 'tbody')
        for tr in client.parseDOM(result, 'tr'):
            r = re.search(
                r'(?is)[^<]*(?:<tr[^]>*>[^<]*)?<td[^>]*>(?P<name>[^<]*)<.*?'
                r'<span class="(?:sprites +)?(?P<lang>[^"]*?)(?: +lang)?">.*?'
                r'\bdata-\w+="(?P<data>[^"]*)"', tr)
        if r:
            name, lang, data = r.group('name', 'lang', 'data')
        try:
            host, link = get_epsiode_link(sess, data)
        except InvalidLink as e:
            pass
        else:
            name = "[B][COLOR green]{host}:[/COLOR] {name}[/B] ({lang})".format(
                **locals())
            addon.addLink(name, link, mode=6)
コード例 #28
0
ファイル: iitvx.py プロジェクト: po50on/cherry-dev
 def search(self, titles,season,episode):
     try:
         for title in titles:
             log("FanFilm.IITVX Wyszukiwanie serialu po tytule: %s" % title)
             headers = {
                 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
                 'Referer': 'http://iitvx.pl/'
             }
             data = {'text': title}
             result = client.request(self.search_link, post=data, headers=headers)
             if result is None:
                 continue
             query = 'S00E00'
             if int(season)<10:
                 query = query.replace('S00', 'S0'+season)
             if int(season)>=10:
                 query = query.replace('S00', 'S'+season)
             if int(episode)<10:
                 query = query.replace('E00', 'E0'+episode)
             if int(episode)>=10:
                 query = query.replace('E00', 'E'+episode)
             result = client.parseDOM(result, 'div', attrs={'class':'episodes-list'})
             results = client.parseDOM(result, 'li')
             for result in results:
                 test = client.parseDOM(result, 'span')[1]
                 if query == str(test):
                     log("FanFilm.IITVX Znalazlem odcinek: %s" % query)
                     link = client.parseDOM(result, 'a', ret='href')[0]
                     log("FanFilm.IITVX Znalazlem serial pod linkiem: %s" % link)
                     return link
     except Exception:
         log_exception()
         return
コード例 #29
0
ファイル: main.py プロジェクト: po50on/cherry-dev
def Rankingi(counter):
    url = params['url']
    r = client.request(url)

    result = client.parseDOM(
        r,
        'table',
        attrs={
            'class': 'table table-bordered table-striped table-hover ranking'
        })
    linki = client.parseDOM(result, 'a', ret='href')
    nazwy = client.parseDOM(result, 'a')
    n = 1
    try:
        for link in linki:
            linki[counter] = 'http://animezone.pl' + linki[counter]
            addon.addDir(str(n) + ". " + str(nazwy[counter]).replace(
                "<mark>", "").replace("</mark>", ""),
                         linki[counter],
                         mode=4)
            counter += 1
            n += 1
    except:
        log_exception()
        pass
コード例 #30
0
ファイル: vodly.py プロジェクト: po50on/cherry-dev
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            items = []
            clean_title = cleantitle.geturl(title) + '-' + year
            search_url = urlparse.urljoin(
                self.base_link,
                self.search_link % clean_title.replace('-', '+'))
            r = cache.get(client.request, 1, search_url)
            r = client.parseDOM(r, 'div', {'class': 'col-sm-12'})
            r = client.parseDOM(r, 'div', {'class': 'col-sm-2.+?'})
            r1 = client.parseDOM(r, 'h3')
            r1 = [(client.parseDOM(i, 'a',
                                   ret='href')[0], client.parseDOM(i, 'a')[0])
                  for i in r1]
            y = [re.findall('</i>\s*(\d{4})</span>', i) for i in r]

            items += [(r1[i], y[i]) for i in range(len(y))]

            r = [(i[0][0], i[1][0], i[0][1]) for i in items
                 if (cleantitle.get(i[0][1]) == cleantitle.get(title)
                     and i[1][0] == year)]
            url = r[0][0]

            return url
        except Exception:
            return