コード例 #1
0
 def search_ep(self, titles, season, episode, tvdb):
     try:
         odcinek = source_utils.absoluteNumber(tvdb, episode, season)
         for title in titles:
             title = cleantitle.normalize(
                 cleantitle.getsearch(title)).replace(" ", "+").replace(
                     "shippuden", "shippuuden")
             r = self.session.get(self.search_link % title).content
             result = client.parseDOM(r,
                                      'div',
                                      attrs={
                                          'class': 'description pull-right'
                                      })  ## na linki i opisy
             linki = client.parseDOM(result, 'a', ret='href')
             nazwy = client.parseDOM(result, 'a')
             for row in zip(linki, nazwy):
                 try:
                     tytul = re.findall("""<mark>(.*)</mark>""", row[1])[0]
                 except:
                     continue
                 tytul = cleantitle.normalize(
                     cleantitle.getsearch(tytul)).replace("  ", " ")
                 words = tytul.split(" ")
                 if self.contains_all_words(title, words):
                     link = self.base_link + row[0].replace(
                         'odcinki', 'odcinek') + '/' + odcinek
                     return link
     except:
         return
コード例 #2
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     anime = source_utils.is_anime('show', 'tvdb', tvdb)
     self.anime = anime
     if anime:
         epNo = " " + source_utils.absoluteNumber(tvdb, episode, season)
     else:
         epNo = ' s' + season.zfill(2) + 'e' + episode.zfill(2)
     return [url[0] + epNo, url[1] + epNo, '', anime]
コード例 #3
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            cookies = cache.cache_get('alltube_cookie')['value']
            self.anime = source_utils.is_anime('show', 'tvdb', tvdb)
            if self.anime:
                txts = 's01e%02d' % int(
                    source_utils.absoluteNumber(tvdb, episode, season))
            else:
                txts = 's%02de%02d' % (int(season), int(episode))
            result = client.request(url, headers={'Cookie': cookies})
            # result = requests.get(url).content
            result = client.parseDOM(result, 'li', attrs={'class': 'episode'})
            result = [i for i in result if txts in i][0]
            url = client.parseDOM(result, 'a', ret='href')[0]
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #4
0
    def search_ep(self, titles, season, episode, tvdb):
        try:
            cookies = client.request("https://shinden.pl/", output='cookie')
            headers = {
                'authority': 'shinden.pl',
                'cache-control': 'max-age=0',
                'origin': 'https://shinden.pl',
                'upgrade-insecure-requests': '1',
                'dnt': '1',
                'content-type': 'application/x-www-form-urlencoded',
                'user-agent':
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.27 Safari/537.36',
                'accept':
                'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
                'referer': 'https://shinden.pl/',
                'accept-encoding': 'gzip, deflate, br',
                'accept-language': 'pl-PL,pl;q=0.9,en-US;q=0.8,en;q=0.7',
            }
            headers.update({'Cookie': cookies})
            data = {
                'username': self.user_name,
                'password': self.user_pass,
                'login': ''
            }

            cookie = requests.post('https://shinden.pl/main/0/login',
                                   headers=headers,
                                   data=data)
            kuki = cookie.cookies.items()
            self.cookies = "; ".join([str(x) + "=" + str(y) for x, y in kuki])
            if not cookie:
                self.cookies = cookies

            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.75 Safari/537.36',
                'Accept':
                'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3',
                'Connection': 'keep-alive',
                'Upgrade-Insecure-Requests': '1',
                'Pragma': 'no-cache',
                'Cache-Control': 'no-cache',
                'TE': 'Trailers',
                'Cookie': self.cookies
            }
            odcinek = source_utils.absoluteNumber(tvdb, episode, season)
            for title in titles:
                title = cleantitle.normalize(
                    cleantitle.getsearch(title)).replace(" ", "+").replace(
                        "shippuden", "shippuuden")
                filtr = "&series_type%5B0%5D=TV&series_status%5B0%5D=Currently+Airing&series_status%5B1%5D=Finished+Airing&one_online=true"
                r = self.session.get(self.search_link % title + filtr,
                                     headers=headers).content
                result = [
                    item for item in client.parseDOM(
                        r, 'li', attrs={'class': 'desc-col'})
                    if str(item).startswith("<h3>")
                ]

                linki = [
                    item for item in client.parseDOM(result, 'a', ret='href')
                    if str(item).startswith("/titles")
                    or str(item).startswith("/series")
                ]
                nazwy = [
                    item for item in result
                    if re.search("<a href.*?>(.*?)<\/a>", item) is not None
                ]
                for row in zip(linki, nazwy):
                    try:
                        tytul = str(
                            re.findall("<a href.*?>(.*?)<\/a>",
                                       row[1])[0]).replace("<em>", "").replace(
                                           "</em>", "")
                    except:
                        continue
                    tytul = cleantitle.normalize(
                        cleantitle.getsearch(tytul)).replace("  ", " ")
                    words = tytul.split(" ")
                    if self.contains_all_words(title, words):
                        link = self.base_link + row[0] + "/all-episodes"
                        result = self.session.get(link,
                                                  headers=headers).content
                        result = client.parseDOM(
                            result,
                            'tbody',
                            attrs={'class': 'list-episode-checkboxes'})
                        result = client.parseDOM(result, 'tr')
                        for item in result:
                            item2 = client.parseDOM(item, 'td')[0]
                            if odcinek == str(item2):
                                return self.base_link + client.parseDOM(
                                    item, 'a', ret='href')[0]
        except Exception as e:
            print(e)
            return