コード例 #1
0
    def __search(self, titles, year):
        try:
            t = [cleantitle.get(i) for i in set(titles) if i]

            for title in titles:
                url = self.search_link % cleantitle.getsearch(title)
                r = cache.get(self.scraper.get, 8, self.search_link %
                              cleantitle.getsearch(title)).content
                link = dom_parser.parse_dom(r,
                                            'div',
                                            attrs={'class': 'result-item'})
                link = [(dom_parser.parse_dom(l,
                                              'div',
                                              attrs={'class': 'title'}),
                         dom_parser.parse_dom(l,
                                              'span',
                                              attrs={'class':
                                                     'year'})[0].content)
                        for l in link]
                link = [(dom_parser.parse_dom(i[0], 'a')[0], i[1])
                        for i in link]
                link = [(i[0].attrs['href'], i[0].content, i[1]) for i in link]
                link = [
                    i[0] for i in link if cleantitle.get(i[1]) in t
                    and abs(int(i[2]) - int(year)) < 2
                ]

                if len(link) > 0:
                    return source_utils.strip_domain(link[0])
            return
        except:
            return
コード例 #2
0
    def __resolve(self, post, nume):

        url = urlparse.urljoin(self.base_link, self.get_hoster)

        params = {
            'action': 'doo_player_ajax',
            'post': post,
            'nume': nume,
            'type': 'movie'
        }

        result = self.scraper.post(url, data=params).content

        link = dom_parser.parse_dom(result, 'iframe')[0].attrs['src']

        if 'streamit' in link:
            content = self.scraper.get(link, verify=False).content
            link = dom_parser.parse_dom(content,
                                        'meta',
                                        attrs={'name':
                                               'og:url'})[0].attrs['content']

        link = self.scraper.get(link).content
        link = re.findall('name="og:url" content="(.*?)"', link)[0]
        return link
コード例 #3
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        url = urlparse.urljoin(self.base_link, url)

        content = cache.get(self.scraper.get, 8, url).content
        links = dom_parser.parse_dom(content, 'div', attrs={'id': 'seasons'})
        links = dom_parser.parse_dom(links, 'div', attrs={'class': 'se-c'})
        links = [i for i in links if dom_parser.parse_dom(i, 'span', attrs={'class': 'se-t'})[0].content == season]
        links = dom_parser.parse_dom(links, 'li')
        links = [dom_parser.parse_dom(i, 'a')[0].attrs['href'] for i in links if dom_parser.parse_dom(i, 'div', attrs={'class': 'numerando'})[0].content == season + ' - ' + episode]

        return source_utils.strip_domain(links[0])