示例#1
0
    def get_source(self, url, title, year, season, episode, start_time):
        try:
            self.items = []
            count = 0
            if url is None:
                return self.sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            urls = []
            if 'tvshowtitle' in data:
                urls.append(self.tvsearch.format(urllib.quote(query), '1'))
                urls.append(self.tvsearch.format(urllib.quote(query), '2'))
                urls.append(self.tvsearch.format(urllib.quote(query), '3'))
            else:
                urls.append(self.moviesearch.format(urllib.quote(query), '1'))
                urls.append(self.moviesearch.format(urllib.quote(query), '2'))
                urls.append(self.moviesearch.format(urllib.quote(query), '3'))
            threads = []
            for url in urls:
                threads.append(workers.Thread(self._get_items, url))
            [i.start() for i in threads]
            [i.join() for i in threads]

            threads2 = []
            for i in self.items:
                count += 1
                threads2.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads2]
            [i.join() for i in threads2]

            if dev_log == 'true':
                end_time = time.time() - float(start_time)
                send_log(self.name,
                         end_time,
                         count,
                         title,
                         year,
                         season=season,
                         episode=episode)
            # xbmc.log('@#@SOURCES:%s' % self._sources, xbmc.LOGNOTICE)
            return self.sources
        except Exception, argument:
            if dev_log == 'true':
                error_log(self.name, argument)
            return self.sources
示例#2
0
    def scrape_episode(self,
                       title,
                       show_year,
                       year,
                       season,
                       episode,
                       imdb,
                       tvdb,
                       debrid=False):
        try:
            start_time = time.time()
            hdlr = 'S%02dE%02d' % (int(season), int(episode))
            query = clean_search(title)
            self.query = urllib.quote_plus(query + ' ' + hdlr).replace(
                '+', '%20')
            threads = []
            for link in self.search_links:
                threads.append(
                    workers.Thread(self._get_sources, link, title, year,
                                   'show', season, episode, str(start_time)))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self.sources
        except Exception, argument:
            if dev_log == 'true':
                error_log(self.name, argument)
            return self.sources
示例#3
0
    def scrape_movie(self, title, year, imdb, debrid=False):
        try:
            start_time = time.time()
            search_id = '%s %s' % (title, year)
            movie_url = self.base_link + self.search_link % urllib.quote_plus(
                search_id)

            r = client.request(movie_url)
            items = client.parseDOM(r, 'article', attrs={'id': 'post-\d+'})
            #xbmc.log('@#@ITEMS:%s' % items, xbmc.LOGNOTICE)
            links = []
            for item in items:
                name = client.parseDOM(item, 'a')[0]
                name = client.replaceHTMLCodes(name)
                t = re.sub(
                    '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                    '',
                    name,
                    flags=re.I)

                if not clean_title(title) == clean_title(t):
                    continue
                if not year in name:
                    continue
                link = client.parseDOM(item, 'a', ret='href')[0]
                link += '/2/'
                links.append(link)
            #xbmc.log('@#@LINKS:%s' % links, xbmc.LOGNOTICE)
            threads = []
            for i in links:
                threads.append(
                    workers.Thread(self.get_source, i, title, year, '', '',
                                   str(start_time)))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)

            return self.sources
        except Exception, argument:
            if dev_log == 'true':
                error_log(self.name, argument)
 def get(self, netloc, ua, timeout):
     try:
         self.netloc = netloc
         self.ua = ua
         self.timeout = timeout
         self.cookie = None
         threads = []
         # for i in range(0, 5): threads.append(workers.Thread(self._get_cookie))
         threads.append(workers.Thread(self._get_cookie))
         [i.start() for i in threads]
         [i.join() for i in threads]
         while [x for x in threads if x.is_alive() == True]: time.sleep(0.5)
         if self.cookie == None:
             xbmc.log('%s returned an error. Could not collect tokens.' % netloc, xbmc.LOGDEBUG)
         return self.cookie
     except Exception as e:
         xbmc.log('%s returned an error. Could not collect tokens - Error: %s.' % (netloc, str(e)),
                       xbmc.LOGDEBUG)
         return self.cookie
示例#5
0
    def scrape_movie(self, title, year, imdb, debrid=False):
        try:
            start_time = time.time()
            query = clean_search(title)
            self.query = urllib.quote_plus(query + ' ' + year).replace(
                '+', '%20')

            threads = []
            for link in self.search_links:
                threads.append(
                    workers.Thread(self._get_sources, link, title, year,
                                   'movie', '', '', str(start_time)))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self.sources
        except Exception, argument:
            if dev_log == 'true':
                error_log(self.name, argument)
            return self.sources
示例#6
0
    def scrape_movie(self, title, year, imdb, debrid=False):
        try:
            start_time = time.time()
            if not debrid: return self.sources

            query = '%s %s' % (title, year)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            query = urllib.quote_plus(query).replace('+', '%2B')
            url = urlparse.urljoin(self.search_base_link,
                                   self.search_link % query)
            headers = {'User-Agent': client.agent(), 'Referer': self.base_link}
            scraper = cfscrape.create_scraper()
            r = scraper.get(url, headers=headers).content
            posts = json.loads(r)['results']
            posts = [(i['post_title'], i['post_name']) for i in posts]
            posts = [(i[0], i[1]) for i in posts if clean_title(
                i[0].lower().split(year)[0]) == clean_title(title)]

            filter = [
                'uhd', '4K', '2160', '1080', '720', 'hevc', 'bluray', 'web'
            ]
            posts = [(urlparse.urljoin(self.base_link, i[1]), year)
                     for i in posts if any(x in i[1] for x in filter)]

            threads = []
            for i in posts:
                threads.append(
                    workers.Thread(self.get_sources, i, title, year, '', '',
                                   str(start_time)))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)

            return self.sources
        except Exception, argument:
            if dev_log == 'true':
                error_log(self.name, argument)
            return self.sources