Beispiel #1
0
    def get_stream_urls(self, movie_name, url):
        """
        get stream urls from movie page url.
        :param movie_name:
        :param url:
        :return:
        """
        # http://www.tamildbox.world/actions.php?case=loadEP&ep_id=1st&server_id=1577
        # siteURL+'actions.php?case=loadEP&ep_id='+escape(ep_id)+'&server_id='+escape(server_id);

        items = []
        stream_urls = []
        soup = utils.get_soup_from_url(url)

        #loadEP = re.findall(r"loadEP\((.*?)\)", soup.text)[0].split(',')
        for loadEP in re.findall(r"loadEP\((.*?)\)", soup.text):
            loadEP = loadEP.split(',')
            ep_id = loadEP[0].replace("'", "").rstrip()
            server_id = loadEP[1].replace("'", "").rstrip()
            base_url = self.get_main_url()
            _action_url = '{}/actions.php?case=loadEP&ep_id={}&server_id={}'.format(
                base_url, ep_id, server_id)
            print('####### Action URL: {}'.format(_action_url))
            soup = utils.get_soup_from_url(_action_url)

            # Grab embadded links
            embeded_urls = [
                item.get('src') for item in soup.find_all('iframe')
            ]

            print('##### embaded {}'.format(embeded_urls))

            for emb_url in embeded_urls:
                if 'ssfiles' in emb_url:
                    resolved = stream_resolver.resolve_ssfiles(emb_url)
                    items += [{
                        'name': movie_name,
                        'quality': item['quality'],
                        'quality_icon': item['quality_icon'],
                        'url': item['url']
                    } for item in resolved]

                elif 'dailymotion' in emb_url:
                    resolved = stream_resolver.load_dailymotion_video(emb_url)

                    items += [{
                        'name': movie_name,
                        'quality': 'Dailymotion',
                        'quality_icon': '',
                        'url': resolved
                    }]

                else:
                    print('Host {} not found in resolver'.format(emb_url))

        return items
Beispiel #2
0
    def get_stream_urls(self, name, url):
        """
        get stream urls from movie page url.
        :param movie_name:
        :param url:
        :return:
        """
        soup = utils.get_soup_from_url(url)
        stream_urls = []

        video_wrap = soup.find('div', {'class': 'video_wrap'})

        try:
            tamiltvtube = video_wrap.find(
                'iframe', {'src': re.compile(r'http://tamiltvtube.*?')})['src']
            stream_urls = stream_resolver.load_tamiltvtube_videos(tamiltvtube)
        except:
            print('###### Except tamiltvtube')
            tamiltvtube = None

        try:
            youtube = video_wrap.find(
                'iframe',
                {'src': re.compile(r'https?://.*?youtube.*?')})['src']
            resolved_url = stream_resolver.load_youtube_video(youtube)
            stream_url = {
                'url': resolved_url,
                'quality': 'YouTube',
                'quality_icon': ''
            }
            stream_urls.append(stream_url)

        except:
            print('###### Except youtube')
            youtube = None

        try:
            dailymotion = video_wrap.find(
                'iframe',
                {'src': re.compile(r'https?://.*?dailymotion.*?')})['src']
            resolved_url = stream_resolver.load_dailymotion_video(dailymotion)
            print('##### Dailymotion resolved %s' % resolved_url)
            stream_url = {
                'url': resolved_url,
                'quality': 'Dailymotion',
                'quality_icon': ''
            }
            stream_urls.append(stream_url)
        except:
            print('###### Except dailymotion')
            dailymotion = None

        print('#### Stream URL  %s' % stream_urls)

        return [{
            'name': name,
            'quality': stream_url['quality'],
            'quality_icon': stream_url['quality_icon'],
            'url': stream_url['url']
        } for stream_url in stream_urls if stream_url['url']]
Beispiel #3
0
    def get_programmes(self, url):
        """
        get all programme by channel from given section url
        :param url:
        :return:
        """

        items = []

        soup = utils.get_soup_from_url(url)
        for video_colmn in soup.find_all('div', class_='video_colmn'):
            href = video_colmn.find('a')['href']
            if href == None:
                continue

            name = video_colmn.find('img')['alt']
            img = video_colmn.find('img')['src']

            item = dict(name=name,
                        image=img,
                        url=self.get_main_url() + href,
                        infos={'title': name})

            items.append(item)

        sorted_items = sorted(items, key=lambda k: k['name'])
        return sorted_items
Beispiel #4
0
    def get_stream_urls(self, movie_name, url):
        """
        get stream urls from movie page url.
        :param movie_name:
        :param url:
        :return:
        """
        stream_urls = []
        soup = utils.get_soup_from_url(url)

        l = soup.find_all('iframe')
        for iframe in l:
            src = iframe.get('src')
            link = urlsplit(src)
            host = link.hostname
            host = host.replace('www.', '')
            host = host.replace('.com', '')

            print('hostname is ---> ' + host)

            if host.lower() == 'videohost2':
                stream_url = stream_resolver.load_videohost2_video(src)
                print('Got stream url for videohost2 : {0}'.format(stream_url))
                stream_urls.append(stream_url)

            else:
                print('Host ingored!!')

        return [{
            'name': movie_name,
            'quality': '',
            'quality_icon': '',
            'url': stream_url
        } for stream_url in stream_urls]
Beispiel #5
0
def load_fastplay_video(url_page):
    soup = utils.get_soup_from_url(url_page)
    print("####### FASTURL")
    print(url_page)

    if "p,a,c,k,e,d" in soup.text:
        jwp = utils.JWplayer(url_page)
        sources = jwp.sources()

        items = [{
            "url": source[0] + "|Referer=" + url_page,
            "quality": source[1],
            "quality_icon": eval("ICON_" + source[1].replace("p", "")),
        } for source in sources]

    else:
        links = re.findall(r'file:"(http|https:.*?mp4)"', soup.text)
        qualities = re.findall(r'label:"(\d*p)"', soup.text)
        items = []
        for l, q in zip(links, qualities):
            d = {
                "url": l + "|Referer=" + url_page,
                "quality": q,
                "quality_icon": eval("ICON_" + q.replace("p", "")),
            }
            items.append(d)

    return items
Beispiel #6
0
    def get_programmes(self, url):
        """
        get all programme by channel from given section url
        :param url:
        :return:
        """

        print('######## LOAD programmes')
        print('URL: {}'.format(url))

        items = []

        soup = utils.get_soup_from_url(url)
        for item in soup.find_all('div', {'class': 'gallery-item'}):
            img = ''

            try:
                title = item.text.strip()
            except KeyError:
                title = None

            # next_page = {'name': 'Next Page',
            #             'image': ICON_NEXT,
            #             'infos': {},
            #             'url': next_page_url}

            try:
                img_ = item.find_all('img')[0]
            except KeyError:
                img_ = None

            if img_ is not None:
                src = img_.get('src')
                if src is not None:
                    src = img_.get('src')
                    img = src
                else:
                    try:
                        srcset = img_.get('srcset')
                        img = srcset.split(',')[0].split(' ')[0]
                    except KeyError:
                        pass

            try:
                div_ = item.find_all('a')[0]
                link = div_.get('href')
            except KeyError:
                link = None

            if title is not None and link is not None:
                item = dict(name=title,
                            image=img,
                            url=link,
                            infos={'title': title})

            items.append(item)

        sorted_items = sorted(items, key=lambda k: k['name'])
        return sorted_items
Beispiel #7
0
def load_videohost2_video(url):
    # url = 'http://videohost2.com/playd.php?id=cj8xavpa64mvw0vsdrr5m2s6c'
    # print (url)
    print("Url of video page {0}".format(url))
    stream_url_path = None
    ext = None
    stream_url = ""

    soup = utils.get_soup_from_url(url)
    scripts = soup.find_all("script", type="text/javascript")

    for script in scripts:
        # Traitement base64 method
        # print (script)
        if "atob" in str(script):
            print("Traitement BASE64")
            regex = re.compile("atob\((.*?)\)")
            base64code = re.findall(regex, str(script))
            try:
                decoded_str = base64.b64decode(base64code[0].replace('"', ""))
                regex = re.compile("src='(.*?)\?")
                try:
                    stream_url = re.findall(regex, str(decoded_str))[0]
                except:
                    print("Src not found in base64 string")

            except:
                print("Error decode base64 string")

        else:
            # Traitement hex method
            print("Traitement HEX")
            regex = re.compile("=\[(.*?)];")
            scripts_bytecode = re.findall(regex, str(scripts))

            for script_bytecode in scripts_bytecode:
                for s in script_bytecode.split(","):
                    t = s.decode("string_escape").decode("string_escape")
                    print(t)
                    if "https" in t:
                        stream_url_path = str(t).replace('"', "")

                    if "." in t:
                        ext = str(t).replace('"', "")

            if stream_url_path != None and ext != None:
                if ".mp" not in stream_url_path:
                    stream_url = stream_url_path + str(
                        url.split("=")[-1]) + ext
                else:
                    stream_url = stream_url_path
                # d = {'url': stream_url, 'quality': '720p', 'quality_icon': ICON_720}
                # items.append(d)
            else:
                print("Imposible to make stream_url for videohost2")

    return stream_url
Beispiel #8
0
    def get_stream_urls(self, movie_name, url):
        """
        get stream urls from movie page url.
        :param movie_name:
        :param url:
        :return:
        """
        stream_urls = None
        soup = utils.get_soup_from_url(url)
        l = soup.find_all("iframe")
        for iframe in l:

            src = iframe.get("src")
            if src is None:
                continue

            link = urlsplit(src)
            host = link.hostname
            host = host.replace("www.", "")
            host = host.replace(".com", "")
            host = host.replace(".tv", "")
            host = host.replace(".net", "")
            host = host.replace(".cc", "")
            host = host.replace(".sx", "")
            host = host.replace(".to", "")

            print("hostname is ---> " + host)

            if host.lower() == "vidmad":
                stream_urls = stream_resolver.load_vidmad_video(src)

            elif host.lower() == "vidmx.xyz":
                stream_urls = stream_resolver.load_vidmx_video(src)

            elif host.lower() == "fastplay":
                # print(src)
                stream_urls = stream_resolver.load_fastplay_video(src)

            elif host.lower() == "vidorg":
                stream_urls = stream_resolver.load_vidorg_videos(src)

            else:
                print("Host ingored!!")

                # stream_urls = [{
                #     'name': movie_name,
                #     'quality': 'HD',
                #     'url': steam_url
                # }]

        return [{
            "name": movie_name,
            "quality": stream_url["quality"],
            "quality_icon": stream_url["quality_icon"],
            "url": stream_url["url"],
        } for stream_url in stream_urls if stream_url["url"]]
Beispiel #9
0
    def get_movies(self, url):
        """
        get all movies from given section url
        :param url:
        :return:
        """
        movies = []
        added_items = []
        img = ''
        next_page = {}
        infos = {}

        soup = utils.get_soup_from_url(url)
        for article in soup.find_all('article'):
            try:
                title = article.find('h3').find('a')['title']
            except:
                continue

            try:
                next_page_url = soup.find('a', class_='next')['href']
                next_page = {
                    'name': 'Next Page',
                    'image': ICON_NEXT,
                    'infos': {},
                    'url': next_page_url
                }
            except:
                pass

            try:
                img = article.find('img')['src'].strip()

            except:
                continue

            try:
                if title not in added_items:
                    d = dict(name=utils.movie_name_resolver(title),
                             image=img,
                             url=article.find('h3').find('a')['href'],
                             infos={'title': utils.movie_name_resolver(title)})
                    movies.append(d)
                    added_items.append(title)
            except:
                pass
        if bool(next_page):  #If next page
            movies.append(next_page)

        #if len(movies) == 0:
        #    xbmcgui.Dialog().notification(heading='Error 404', message='No movies found')

        return [movie for movie in movies if movie['name'] and movie['url']]
Beispiel #10
0
def resolve_ssfiles(url):
    soup = utils.get_soup_from_url(url)
    links = re.findall(r'file:"(http:.*?)"', soup.text)
    qualities = re.findall(r'label:"(\d*p)"', soup.text)
    items = []
    for l, q in zip(links, qualities):
        d = {
            "url": l + "|Referer=" + url,
            "quality": q,
            "quality_icon": eval("ICON_" + q.replace("p", "")),
        }
        items.append(d)

    return items
Beispiel #11
0
def load_vidorg_videos(url):
    soup = utils.get_soup_from_url(url)
    links = re.findall(r'file:"(http:.*?)"', str(soup))
    qualities = re.findall(r'label:"(\d*p)"', str(soup))
    items = []
    for l, q in zip(links, qualities):
        try:
            icon = eval("ICON_" + q.replace("p", ""))
        except:
            icon = ""

        d = {"url": l, "quality": q, "quality_icon": icon}
        items.append(d)

    return items
Beispiel #12
0
    def get_stream_urls(self, movie_name, url):
        """
        get stream urls from movie page url.
        :param movie_name:
        :param url:
        :return:
        """
        items = []
        stream_urls = []
        soup = utils.get_soup_from_url(url)

        # Grab .hls links
        hls_streams = [
            item['src'] for item in soup.find_all(
                'iframe', {'src': re.compile(r'.*?/hls.*?')})
        ]
        if len(hls_streams) > 0:
            for src in hls_streams:
                url = re.sub('/(hls_\w*)/', '/hls/', src) + '/playlist.m3u8'
                p = urlparse(url, 'http')
                stream_urls.append(p.geturl())

        items += [{
            'name': movie_name,
            'quality': '720',
            'quality_icon': ICON_720,
            'url': stream_url
        } for stream_url in stream_urls]

        # Grab embadded links
        embeded_urls = [
            item['src'] for item in soup.find_all(
                'iframe', {'src': re.compile(r'(http.*?.html)')})
        ]

        for emb_url in embeded_urls:
            if 'ssfiles' in emb_url:
                resolved = stream_resolver.resolve_ssfiles(emb_url)
                items += [{
                    'name': movie_name,
                    'quality': item['quality'],
                    'quality_icon': item['quality_icon'],
                    'url': item['url']
                } for item in resolved]

        return items
Beispiel #13
0
    def get_movies(self, url):
        """
        get all movies from given section url
        :param url:
        :return:
        """
        movies = []
        added_items = []
        img = ''
        next_page = {}
        infos = {}

        if url == 'http://tamilrasigan.net/?s=':
            s = self.plugin.keyboard("", "Search for movie name")
            url += str(s)

        soup = utils.get_soup_from_url(url)

        for movie_list in soup.find_all('ul', class_='lcp_catlist'):
            soup = utils.get_soup_from_text(str(movie_list))
            for m in soup.find_all('a'):
                title = m.get('title')
                url = m.get('href')

                if title is not None:
                    #t = utils.movie_name_resolver(title)
                    #print ('Searchin in OMDB {}'.format(t))
                    #r = omdb.get_movie_info(title=t)
                    #print (r)
                    try:
                        d = dict(name=utils.movie_name_resolver(title),
                                 image='',
                                 url=url,
                                 infos=infos)
                        movies.append(d)
                    except:
                        pass

        if bool(next_page):  #If next page
            movies.append(next_page)

        if len(movies) == 0:
            self.plugin.notify(msg="404 No movies found", title='Not found')
        return [movie for movie in movies if movie['name'] and movie['url']]
Beispiel #14
0
    def get_stream_urls(self, movie_name, url):
        """
        get stream urls from movie page url.
        :param movie_name:
        :param url:
        :return:
        """

        items = []
        stream_urls = []
        soup = utils.get_soup_from_url(url)

        # Grab embadded links
        embeded_urls = [
            item['src'] for item in soup.find_all(
                'iframe', {'src': re.compile(r'(http.*?)')})
        ]

        for emb_url in embeded_urls:
            if 'tamilarasanmovie.com' in emb_url:
                resolved = stream_resolver.resolve_tamilarasanmovie(emb_url)
                items += [{
                    'name': movie_name,
                    'quality': item['quality'],
                    'quality_icon': item['quality_icon'],
                    'url': item['url']
                } for item in resolved]

            elif 'fastplay' in emb_url:
                resolved = stream_resolver.load_fastplay_video(emb_url)
                items += [{
                    'name': movie_name,
                    'quality': item['quality'],
                    'quality_icon': item['quality_icon'],
                    'url': item['url']
                } for item in resolved]

        return items
Beispiel #15
0
    def get_episodes(self, url):
        """
        get all episodes from given url
        :param url:
        :return:
        """

        print('########## episodes')
        print('URL : {}'.format(url))

        episodes = []

        soup = utils.get_soup_from_url(url)
        for item in soup.find_all('article'):
            try:
                href = item.find('a')['href']
            except TypeError:
                continue

            if href == None:
                continue

            name = item.text.strip()
            #day = video_colmn_list.find('span', class_='d-ate').get_text()
            #month = video_colmn_list.find('span', class_='m-oth').get_text()
            # day_letter = video_colmn_list.find(
            #    'span', class_='d-ayss').get_text()

            #_name = day + ' ' + month + '  ' + day_letter + ' | ' + name

            episode = dict(name=name,
                           url=href,
                           prog_name=name,
                           infos={'title': name})

            episodes.append(episode)

        return episodes
Beispiel #16
0
    def get_episodes(self, url):
        """
        get all episodes from given url
        :param url:
        :return:
        """

        episodes = []

        soup = utils.get_soup_from_url(url)
        for video_colmn_list in soup.find_all('div',
                                              class_='video_colmn_list'):
            try:
                href = video_colmn_list.find('a')['href']
            except TypeError:
                continue

            if href == None:
                continue

            name = video_colmn_list.find('a').get_text()
            day = video_colmn_list.find('span', class_='d-ate').get_text()
            month = video_colmn_list.find('span', class_='m-oth').get_text()
            day_letter = video_colmn_list.find('span',
                                               class_='d-ayss').get_text()

            _name = day + ' ' + month + '  ' + day_letter + ' | ' + name

            episode = dict(name=_name,
                           url=self.get_main_url() + href,
                           prog_name=name,
                           infos={'title': name})

            episodes.append(episode)

        return episodes
Beispiel #17
0
    def get_movies(self, url):
        """
        get all movies from given section url
        :param url:
        :return:
        """
        movies = []
        added_items = []
        img = ""
        next_page = {}
        infos = {}

        if url == "http://tamilyogi.vip/search":
            s = xbmcgui.Dialog().input("Search for movie name",
                                       type=xbmcgui.INPUT_ALPHANUM)
            if s == "":
                return []

            url = "http://tamilyogi.vip/?s={}".format(s)

        soup = utils.get_soup_from_url(url)

        for a in soup.find_all("a"):
            title = a.get("title")
            try:
                nextpagetag = a.get("class")
                if "next" in nextpagetag:
                    next_page_url = a.get("href")
                    next_page = {
                        "name": "Next Page",
                        "image": ICON_NEXT,
                        "infos": {},
                        "url": next_page_url,
                    }
            except:
                pass

            try:
                img = a.find("img")["src"]
            except:
                pass

            if (title is not None) and (title !=
                                        "Tamil Movie Online") and img != "":
                print(utils.movie_name_resolver(title))
                try:
                    if title not in added_items:
                        d = dict(
                            name=utils.movie_name_resolver(title),
                            image=img,
                            url=a.get("href"),
                            infos={"title": utils.movie_name_resolver(title)},
                        )
                        movies.append(d)
                        added_items.append(title)
                except Exception as e:
                    print(e)

        if bool(next_page):  # If next page
            movies.append(next_page)

        # if len(movies) == 0:
        #    self.plugin.notify(msg="404 No movies found", title='Not found')
        return [movie for movie in movies if movie["name"] and movie["url"]]
Beispiel #18
0
    def get_movies(self, url):
        """
        get all movies from given section url
        :param url:
        :return:
        """
        movies = []
        added_items = []
        img = ''
        next_page = {}
        infos = {}

        if url == 'http://www.tamildbox.world/filter':
            s = xbmcgui.Dialog().input("Search for movie name")
            if s == '':
                return []

            url = 'http://www.tamildbox.world/filter?name={}&submit='.format(s)

        soup = utils.get_soup_from_url(url)
        for listbox in soup.find_all('div', class_='listbox'):
            try:
                title = listbox.find('div', class_='name').text
            except:
                continue

            try:
                quality_name = listbox.find(
                    'span', class_='overlay').text.strip().rstrip()

            except:
                quality_name = ''

            try:
                img = listbox.find('img')['src'].strip()

            except:
                img = ''

            try:
                onclick = listbox.find('div', class_='play')['onclick']
                url = re.findall(r"'(http.*?)'", onclick)[0]
            except:
                continue

            try:
                next_page_url = soup.find(
                    'div', class_='pagination').find_all('a')[-1]['href']
                next_page = {
                    'name': 'Next Page',
                    'image': ICON_NEXT,
                    'infos': {},
                    'url': next_page_url
                }
            except:
                pass

            try:
                if title not in added_items:
                    d = dict(name=utils.movie_name_resolver(title) + ' ' +
                             quality_name,
                             image=img,
                             url=url,
                             infos={'title': utils.movie_name_resolver(title)})
                    movies.append(d)
                    added_items.append(title)
            except:
                pass

        if bool(next_page):  #If next page
            movies.append(next_page)

        if len(movies) == 0:
            xbmcgui.Dialog().notification(heading='Error 404',
                                          message='No movies found')

        return [movie for movie in movies if movie['name'] and movie['url']]
Beispiel #19
0
    def get_movies(self, url):
        """
        get all movies from given section url
        :param url:
        :return:
        """
        movies = []
        added_items = []
        next_page = {}
        infos = {}

        # if url == 'http://tamilyogi.vip/search':
        #    s = xbmcgui.Dialog().input("Search for movie name", type=xbmcgui.INPUT_ALPHANUM)
        #    if s == '':
        #        return []
        #
        #    url = 'http://tamilyogi.vip/?s={}'.format(s)

        soup = utils.get_soup_from_url(url)

        for item in soup.find_all('div', {'class': 'gallery-item'}):
            img = ''

            try:
                title = item.text.strip()
            except KeyError:
                title = None

            # next_page = {'name': 'Next Page',
            #             'image': ICON_NEXT,
            #             'infos': {},
            #             'url': next_page_url}

            try:
                img_ = item.find_all('img')[0]
            except KeyError:
                img_ = None

            if img_ is not None:
                src = img_.get('src')
                if src is not None:
                    src = img_.get('src')
                    img = src
                else:
                    try:
                        srcset = img_.get('srcset')
                        img = srcset.split(',')[0].split(' ')[0]
                    except KeyError:
                        pass

            try:
                div_ = item.find_all('a')[0]
                link = div_.get('href')
            except KeyError:
                link = None

            if title is not None and link is not None:
                try:
                    if title not in added_items:
                        d = dict(
                            name=utils.movie_name_resolver(title),
                            image=img,
                            url=link,
                            infos={'title': utils.movie_name_resolver(title)})
                        movies.append(d)
                        added_items.append(title)
                except:
                    pass

        if bool(next_page):  # If next page
            movies.append(next_page)

        # if len(movies) == 0:
        #    self.plugin.notify(msg="404 No movies found", title='Not found')

        return [movie for movie in movies if movie['name'] and movie['url']]
Beispiel #20
0
    def get_stream_urls(self, movie_name, url):
        """
        get stream urls from movie page url.
        :param movie_name:
        :param url:
        :return:
        """

        print('########## get stream url')
        print('URL: {}'.format(url))

        stream_urls = []
        soup = utils.get_soup_from_url(url)
        l = soup.find_all('iframe')

        for iframe in l:

            src = iframe.get('src')
            link = urlsplit(src)
            host = link.hostname
            host = host.replace('www.', '')
            host = host.replace('.com', '')
            host = host.replace('.tv', '')
            host = host.replace('.net', '')
            host = host.replace('.cc', '')
            host = host.replace('.sx', '')
            host = host.replace('.to', '')

            print('hostname is ---> ' + host)

            if host.lower() == 'vidmad':
                stream_urls = stream_resolver.load_vidmad_video(src)

            elif host.lower() == 'fastplay':
                # print(src)
                stream_urls = stream_resolver.load_fastplay_video(src)

            elif host.lower() == 'vidorg':
                stream_urls = stream_resolver.load_vidorg_videos(src)

            # elif host.lower() == 'malarmoon':
            #    stream_urls = stream_resolver.load_malarmoon_videos(src)
#
# elif host.lower() == 'tamilbliss':
#    stream_urls = stream_resolver.load_tamilbliss_videos(src)

            else:
                try:
                    stream_urls = stream_resolver.load_tamildhool_videos(src)
                except:
                    print('Host ingored!!')

            try:
                youtube = soup.find(
                    'iframe',
                    {'src': re.compile(r'https?://.*?youtube.*?')})['src']
                resolved_url = stream_resolver.load_youtube_video(youtube)
                stream_url = {
                    'url': resolved_url,
                    'quality': 'YouTube',
                    'quality_icon': ''
                }
                stream_urls.append(stream_url)

            except Exception as e:
                print('###### Except youtube')
                print(e)
                youtube = None

            try:
                dailymotion = soup.find(
                    'iframe',
                    {'src': re.compile(r'https?://.*?dailymotion.*?')})['src']
                resolved_url = stream_resolver.load_dailymotion_video(
                    dailymotion)
                print('##### Dailymotion resolved %s' % resolved_url)
                stream_url = {
                    'url': resolved_url,
                    'quality': 'Dailymotion',
                    'quality_icon': ''
                }
                stream_urls.append(stream_url)
            except:
                print('###### Except dailymotion')
                dailymotion = None

            # stream_urls = [{
            #     'name': movie_name,
            #     'quality': 'HD',
            #     'url': steam_url
            # }]

        print('######### Stream URLS')
        print(stream_urls)

        return [{
            'name': movie_name,
            'quality': stream_url['quality'],
            'quality_icon': stream_url['quality_icon'],
            'url': stream_url['url']
        } for stream_url in stream_urls if stream_url['url']]
Beispiel #21
0
    def get_movies(self, url):
        """
        get all movies from given section url
        :param url:
        :return:
        """
        movies = []
        added_items = []
        img = ''
        next_page = {}
        infos = {}

        if url == 'https://tamilarasan.net/search':
            s = xbmcgui.Dialog().input("Search for movie name")
            if s == '':
                return []

            url = 'https://tamilarasan.net/?s={}'.format(s)

        soup = utils.get_soup_from_url(url)

        for item in soup.find_all('div', class_='layer-wrapper'):
            try:
                title = item.find('div', class_='layer-content').text
            except:
                continue

            try:
                img = item.find('img')['src'].strip()

            except:
                img = ''

            try:
                url = item.find('div',
                                class_='layer-content').find('a').get('href')

            except:
                continue

            try:
                next_page_url = soup.find(
                    'a', class_='next page-numbers').get('href')
                next_page = {
                    'name': 'Next Page',
                    'image': ICON_NEXT,
                    'infos': {},
                    'url': next_page_url
                }
            except:
                pass

            try:
                if title not in added_items:
                    d = dict(name=utils.movie_name_resolver(title),
                             image=img,
                             url=url,
                             infos={'title': utils.movie_name_resolver(title)})
                    movies.append(d)
                    added_items.append(title)
            except:
                pass

        if bool(next_page):  # If next page
            movies.append(next_page)

        if len(movies) == 0:
            xbmcgui.Dialog().notification(heading='Error 404',
                                          message='No movies found')

        return [movie for movie in movies if movie['name'] and movie['url']]