Esempio n. 1
0
 def scrape_movies(self, max=1):
     content = dict(new=list(), top=list(), popular=list(), old=list())
     for categorie in content.keys():
         for id in range(1, max + 1):
             soup = Utils.page_downloader(
                 f'{self.link}{categorie}?page={id}')
             con = soup.find(class_='movies movies_small')
             content[categorie].extend([
                 Utils.pickup_class(movie['href'])
                 for movie in con.find_all(class_='movie')
             ])
     return content
Esempio n. 2
0
    def filter(cls, material: str, options: list, max: int = 5) -> list:
        """ filter movies based on options
        options supported values:
        filter : latest, top, popular
        year : year if under 2000 add 's' example -> 2012, 1950s
                bad usage ->  2012s, 1951
        language: language example -> arabic, english
        country : iso code of the country example -> us, ma
        type: type -> example -> action, documentary
        content rating : example -> [PG-13] +13 , unrated,
        quality: example -> bluray, hdrip
        precision : example -> 1080p, 720p
        example of filtering - > Site.filter('tv', ['top', 'us', '1080p'])
        1) order doesn't matter
        2) pass options as a list
        3) options are optinal you don't have to pass all of them
        """
        material = material.lower()
        if material not in cls.supported:
            print(f'Supported filters are {", ".join(cls.supported)}')
            return None
        text = ''
        for page in range(1, max + 1):
            query = cls.filter_api.format(material=material,
                                          options='-'.join(options),
                                          page=page)
            r = requests.get(query).json()
            text += r['html']

        soup = BeautifulSoup(text, 'lxml')
        return [
            Utils.pickup_class(link['href'],
                               title=link.find(class_='title').text)
            for link in soup.find_all(class_='movie')
        ]
Esempio n. 3
0
 def test_path(self):
     page = '{}{}'.format(self.elem['domain'], self.elem['test_path'])
     if 'vidstream' in self.elem['domain']:
         return page
     link = Utils.page_downloader(page).find(
         class_='nop btn g dl _open_window')['data-url']
     return f'{self.elem["domain"]}{link}'
Esempio n. 4
0
 def get_mp4_link(self, item=None):
     link = f'{self.api.rstrip("/")}{self.items[item]["download"]}'
     vid_link = reeeeeee.get(link,
                             headers=parameters.headers,
                             cookies=self.egy_token).url
     page = Utils.page_downloader(vid_link, cookies=self.vid_stream)
     return page.find(class_='bigbutton').attrs['href']
Esempio n. 5
0
 def shows_container_scrape(self, item):
     t = item.find(class_='contents movies_small')
     return [
         Utils.pickup_class(link=item['href'],
                            title=item.find(class_='title').text,
                            thumbnail=item.img['src'])
         for item in t.find_all('a')
     ]
Esempio n. 6
0
    def __init__(self, link, **kwargs):
        self.link = link
        self.page_type = Utils.page_type(link)
        self.access = Settings().auto_init

        if not self.access:
            if 'title' in kwargs:
                self.title, self.year = Utils.title_parser(kwargs['title'])
                if self.year is None:
                    year_url = Utils.url_to_name(link)[1]
                    self.year = year_url if year_url.isnumeric() else None
            else:
                self.title, self.year = Utils.url_to_name(link)

            for name, value in kwargs.items():
                if name != 'title':
                    setattr(self, name, value)
Esempio n. 7
0
 def get_seasons(self):
     """ return all seasons of a serie """
     container = self.soup.find(class_='contents movies_small')
     return [Utils.pickup_class(
         link=link['href'],
         title=link.find(class_='title').text,
         thumbnail=link.img['src']
     )
         for link in container.find_all('a')]
Esempio n. 8
0
    def get_similar(self):
        """get similar movies."""
        container = self.soup.find(class_='contents movies_small')

        return [Utils.pickup_class(
            link['href'],
            title=link.find(class_='title').text
        )
            for link in container.find_all('a')]
Esempio n. 9
0
 def get_episodes(self):
     """ get all episodes of a Season """
     container = self.soup.find(class_='movies_small')
     return [
         Utils.pickup_class(
             link['href'],
             title=link.find(class_='title').text,
             thumbnail=link.img['src'],
         ) for link in container.find_all('a')
     ]
Esempio n. 10
0
 def get_actors(self):
     """ get actors and thire info from a material page """
     container = self.soup.find(class_='rs_scroll pdt pdr')
     content = list()
     for actor in container.find_all(class_='cast_item'):
         name = actor.find_all(class_='td vam')[1].a.text
         content.append(
             Actor(actor.find_all(class_='td vam')[1].a['href'],
                   role=Utils.fix_actor_role(
                       name,
                       actor.find_all(class_='td vam')[1].span.text),
                   _name=name,
                   _image=actor.find_all(class_='td vam')[0].img['src']))
     return content
Esempio n. 11
0
 def search(cls, query: str, access=True, **kwargs) -> list or None:
     """ a class to search inside the site : movies, series ...
     return instance of class's that blong the the type of result
     """
     tamplate = '%s/{}' % cls.my_site
     pram = (('q', query), )
     r = requests.get(cls.search_api, params=pram, **kwargs).json()
     if r:
         return [
             Utils.pickup_class(
                 link=tamplate.format(movie['u']),
                 title=movie['t'],
             ) for elem in r for movie in r[elem]
         ]
     return None
Esempio n. 12
0
 def soup(self):
     return Utils.page_downloader(self.link)
Esempio n. 13
0
 def name(self):
     if Settings().auto_init:
         return self.scrape_image()['name']
     return ' '.join(Utils.url_to_name(self.link)).title()
Esempio n. 14
0
 def soup(self):
     if not hasattr(self, '_soup'):
         self._soup = Utils.page_downloader(self.link).find(id='mainLoad')
     return self._soup
Esempio n. 15
0
 def __init__(self):
     self.link = Utils.make_link()
     for name, value in self.scrape_home_page().items():
         setattr(self, name, value)