Example #1
0
 def get_sources(self, video, video_type):
     source_url = self.get_url(video)
     hosters = []
     if source_url and source_url != FORCE_NO_MATCH:
         url = urlparse.urljoin(self.base_url, source_url)
         html = self._http_get(url, require_debrid=True, cache_limit=.5)
         if video_type == 'movies':
             pattern = '<singlelink>(.*?)(?=<hr\s*/>|download>|thanks_button_div)'
         else:
             pattern = '<hr\s*/>\s*<strong>(.*?)</strong>.*?<singlelink>(.*?)(?=<hr\s*/>|download>|thanks_button_div)'
         for match in re.finditer(pattern, html, re.DOTALL):
             if video_type == 'movies':
                 links = match.group(1)
                 match = re.search('<h2>\s*<a[^>]+>(.*?)</a>', html)
                 if match:
                     title = match.group(1)
                 else:
                     title = ''
             else:
                 title, links = match.groups()
                 
             for match in re.finditer('href="([^"]+)', links):
                 stream_url = match.group(1).lower()
                 if any(link in stream_url for link in EXCLUDE_LINKS): continue
                 host = urlparse.urlparse(stream_url).hostname
                 quality = scraper_utils.blog_get_quality(video, title, host)
                 #source = {'hostname': 'IceFilms', 'multi-part': False, 'quality': quality, 'class': '','version': label,'rating': None, 'views': None, 'direct': False}
                 hoster = {'hostname': '2DDL','multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False}
                 hosters.append(hoster)
     main_scrape.apply_urlresolver(hosters)
     return hosters
Example #2
0
    def get_sources(self, video, video_type):
        source_url = self.get_url(video)
        sources = []
        if source_url and source_url != FORCE_NO_MATCH:
            query = urlparse.parse_qs(source_url)
            if 'link' in query:
                stream_url = query['link'][0]
                host = urlparse.urlparse(stream_url).hostname
                if 'xml_file' in query:
                    xml_meta = XML_META.get(query['xml_file'][0], {})
                else:
                    xml_meta = {}

                quality = xml_meta.get('quality', QUALITIES.HD1080)
                #source = {'hostname': 'IceFilms', 'multi-part': False, 'quality': quality, 'class': '','version': label,'rating': None, 'views': None, 'direct': False}
                source = {
                    'hostname': 'RealMovies',
                    'url': stream_url,
                    'host': host,
                    'class': '',
                    'quality': quality,
                    'views': None,
                    'rating': None,
                    'direct': False
                }
                if 'quality' in xml_meta: del xml_meta['quality']
                source.update(xml_meta)
                sources.append(source)
        main_scrape.apply_urlresolver(sources)
        return sources
Example #3
0
    def get_sources(self, video, video_type):
        source_url = self.get_url(video)
        sources = []
        if source_url and source_url != FORCE_NO_MATCH:
            try:
                url = urlparse.urljoin(self.base_url, source_url)
                html = self._http_get(url, cache_limit=2)

                pattern = '<iframe id="videoframe" src="([^"]+)'
                match = re.search(pattern, html)
                url = urlparse.urljoin(self.base_url, match.group(1))
                html = self._http_get(url, cache_limit=.5)

                match = re.search('lastChild\.value="([^"]+)"(?:\s*\+\s*"([^"]+))?', html)
                secret = ''.join(match.groups(''))

                match = re.search('"&t=([^"]+)', html)
                t = match.group(1)

                match = re.search('(?:\s+|,)s\s*=(\d+)', html)
                s_start = int(match.group(1))

                match = re.search('(?:\s+|,)m\s*=(\d+)', html)
                m_start = int(match.group(1))

                for fragment in dom_parser.parse_dom(html, 'div', {'class': 'ripdiv'}):
                    match = re.match('<b>(.*?)</b>', fragment)
                    if match:
                        q_str = match.group(1).replace(' ', '').upper()
                        quality = QUALITY_MAP.get(q_str, QUALITIES.HIGH)
                    else:
                        quality = QUALITIES.HIGH

                    pattern = '''onclick='go\((\d+)\)'>([^<]+)(<span.*?)</a>'''
                    for match in re.finditer(pattern, fragment):
                        link_id, label, host_fragment = match.groups()
                        source = {'hostname':'IceFilms','multi-part': False, 'quality': quality, 'class': '', 'version': label,
                                  'rating': None, 'views': None, 'direct': False}
                        source['host'] = re.sub('(</?[^>]*>)', '', host_fragment)
                        s = s_start + random.randint(3, 1000)
                        m = m_start + random.randint(21, 1000)
                        url = AJAX_URL % (link_id, s, m, secret, t)
                        urls = self.resolve_link(url)
                        source['url'] = urls
                        sources.append(source)
            except Exception as e:
                log_utils.log('Failure (%s) during icefilms get sources: |%s|' % (str(e), video), log_utils.LOGWARNING)

        main_scrape.apply_urlresolver(sources)
        return sources
Example #4
0
    def get_sources(self, video, video_type):
        source_url = self.get_url(video)
        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            url = urlparse.urljoin(self.base_url, source_url)
            html = self._http_get(url, cache_limit=.5)

            fragment = dom_parser.parse_dom(html, 'table',
                                            {'id': 'streamlinks'})
            if fragment:
                max_age = 0
                now = min_age = int(time.time())
                for row in dom_parser.parse_dom(fragment[0], 'tr',
                                                {'id': 'pt\d+'}):
                    if video_type == 'movies':
                        pattern = 'href="([^"]+).*?/>([^<]+).*?(?:<td>.*?</td>\s*){1}<td>(.*?)</td>\s*<td>(.*?)</td>'
                    else:
                        pattern = 'href="([^"]+).*?/>([^<]+).*?(<span class="linkdate">.*?)</td>\s*<td>(.*?)</td>'
                    match = re.search(pattern, row, re.DOTALL)
                    if match:
                        url, host, age, quality = match.groups()
                        age = self.__get_age(now, age)
                        quality = quality.upper()
                        if age > max_age: max_age = age
                        if age < min_age: min_age = age
                        host = host.strip()
                        hoster = {
                            'hostname': 'iWatchOnline',
                            'multi-part': False,
                            'class': '',
                            'url': self.resolve_link(url),
                            'host': host,
                            'age': age,
                            'views': None,
                            'rating': None,
                            'direct': False
                        }
                        hoster['quality'] = scraper_utils.get_quality(
                            video, host,
                            QUALITY_MAP.get(quality, QUALITIES.HIGH))
                        hosters.append(hoster)

                unit = (max_age - min_age) / 100
                if unit > 0:
                    for hoster in hosters:
                        hoster['rating'] = (hoster['age'] - min_age) / unit

        main_scrape.apply_urlresolver(hosters)
        return hosters
Example #5
0
 def get_sources(self, video, video_type):
     hosters = []
     source_url = self.get_url(video)
     if source_url and source_url != FORCE_NO_MATCH:
         if video_type == 'movies':
             #if video.video_type == VIDEO_TYPES.MOVIE:
             meta = scraper_utils.parse_movie_link(source_url)
             stream_url = source_url + scraper_utils.append_headers(
                 {'User-Agent': scraper_utils.get_ua()})
             quality = scraper_utils.height_get_quality(meta['height'])
             #source = {'hostname': 'RealMovies', 'url': stream_url, 'host': host, 'class': '', 'quality': quality,'views': None, 'rating': None, 'direct': False}
             hoster = {
                 'hostname': 'SeriesWatch',
                 'host': self._get_direct_hostname(stream_url),
                 'class': '',
                 'quality': quality,
                 'views': None,
                 'rating': None,
                 'url': BASE_URL + stream_url,
                 'direct': True
             }
             if 'format' in meta: hoster['format'] = meta['format']
             hosters.append(hoster)
         else:
             for episode in self.__match_episode(source_url, video):
                 meta = scraper_utils.parse_episode_link(episode['title'])
                 stream_url = episode['url'] + scraper_utils.append_headers(
                     {'User-Agent': scraper_utils.get_ua()})
                 stream_url = stream_url.replace(self.base_url, '')
                 quality = scraper_utils.height_get_quality(meta['height'])
                 #hoster = {'hostname': 'SeriesWatch', 'host': self._get_direct_hostname(stream_url), 'class': '','quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
                 hoster = {
                     'hostname': 'SeriesWatch',
                     'host': self._get_direct_hostname(stream_url),
                     'class': '',
                     'quality': quality,
                     'views': None,
                     'rating': None,
                     'url': stream_url,
                     'direct': True
                 }
                 if 'format' in meta: hoster['format'] = meta['format']
                 if 'size' in episode:
                     hoster['size'] = scraper_utils.format_size(
                         int(episode['size']))
                 hosters.append(hoster)
     main_scrape.apply_urlresolver(hosters)
     return hosters
Example #6
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            url = urlparse.urljoin(self.base_url, source_url)
            html = self._http_get(url, cache_limit=.5)
            #kodi.log("Source HTML  is : " + html)
            container_pattern = r'<table[^>]+class="movie_version[ "][^>]*>(.*?)</table>'
            item_pattern = (
                r'quality_(?!sponsored|unknown)([^>]*)></span>.*?'
                r'url=([^&]+)&(?:amp;)?domain=([^&]+)&(?:amp;)?(.*?)'
                r'"version_veiws"> ([\d]+) views</')
            max_index = 0
            max_views = -1
            for container in re.finditer(container_pattern, html, re.DOTALL | re.IGNORECASE):
                for i, source in enumerate(re.finditer(item_pattern, container.group(1), re.DOTALL)):
                    qual, url, host, parts, views = source.groups()

                    if host == 'ZnJhbWVndGZv': continue  # filter out promo hosts
                    source = {'hostname': 'PrimeWire', 'url': url.decode('base-64'),'class': self, 'host': host.decode('base-64'),'views': views, 'quality': qual, 'direct': False}
                    hosters.append(source)

            # if max_views > 0:
            #     for i in xrange(0, max_index):
            #         hosters[i]['rating'] = hosters[i]['views'] * 100 / max_views
        fullsource = main_scrape.apply_urlresolver(hosters)
        return fullsource
Example #7
0
def get_sources(suf_url,pre_url):
        source_url = suf_url
        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            url = urlparse.urljoin(base_url, source_url)
            page_html = _http_get(url, cache_limit=.5)
            movie_id = dom_parser.parse_dom(page_html, 'div', {'id': 'media-player'}, 'movie-id')
            if movie_id:
                server_url = SL_URL % (movie_id[0])
                url = urlparse.urljoin(base_url, server_url)
                html = _http_get(url, cache_limit=.5)
                sources = {}
                for match in re.finditer('loadEpisode\(\s*(\d+)\s*,\s*(\d+)\s*\).*?class="btn-eps[^>]*>([^<]+)', html, re.DOTALL):
                    link_type, link_id, q_str = match.groups()
                    if link_type in ['12', '13', '14']:
                        url = urlparse.urljoin(base_url, PLAYLIST_URL1 % (link_id))
                        sources.update(__get_link_from_json(url, q_str))
                    else:
                        media_url = __get_ep_pl_url(link_type, page_html)
                        if media_url:
                            url = urlparse.urljoin(base_url, media_url)
                            xml = _http_get(url, cache_limit=.5)
                            sources.update(__get_links_from_xml(xml, pre_url))

            for source in sources:
                if sources[source]['direct']:
                    host = _get_direct_hostname(source)
                else:
                    host = urlparse.urlparse(source).hostname
                hoster = {'hostname':'123Movies','multi-part': False, 'host': host, 'quality': sources[source]['quality'], 'views': None, 'rating': None, 'url': source, 'direct': sources[source]['direct']}
                hosters.append(hoster)
        hosters = main_scrape.apply_urlresolver(hosters)
        return hosters
Example #8
0
def ice_films_tv(name,movie_title):
    try:
        title = movie_title[:-7]
        movie_year = movie_title[-6:]
        year = movie_year.replace('(','').replace(')','')
        video_type = 'shows'
        # print title
        # print year

        show_url = search(video_type,title,year)
        for e in show_url:
                url = e['url']
                #TV MAIN URL RETURNED HERE
                newseas=re.compile('S(.+?)E(.+?)  (?P<name>[A-Za-z\t .]+)').findall(name)
                for sea,epi,epi_title in newseas:
                    # print sea,epi
                    # print url
                    video = make_vid_params('Episode',title,year,sea,epi,epi_title,'')
                    #print video
                    ep_url = _get_episode_url(url, video,sea,epi)
                    #print "HERE IS END" +ep_url
                    hosters=get_sources(ep_url)

                    hosters = main_scrape.apply_urlresolver(hosters)
                    return hosters

    except Exception as e:
        hosters =[]
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='Ice Films',msg='(error) %s  %s' % (str(e), ''),duration=5000,sound=None)
        return hosters
Example #9
0
def putlocker_tv(name, movie_title):
    try:
        title = movie_title[:-7]
        movie_year = movie_title[-6:]
        year = movie_year.replace('(', '').replace(')', '')
        video_type = 'shows'
        show_url = search(video_type, title, year)
        for e in show_url:
            url = e['url']
            newseas = re.compile(
                'S(.+?)E(.+?)  (?P<name>[A-Za-z\t .]+)').findall(name)
            print newseas
            for sea, epi, epi_title in newseas:
                video = make_vid_params('Episode', title, year, sea, epi,
                                        epi_title, '')
                ep_url = _get_episode_url(url, video, sea, epi)
                hosters = get_sources(ep_url)
                hosters = main_scrape.apply_urlresolver(hosters)
                return hosters
    except Exception as e:
        hosters = []
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='Putlocker TV',
                        msg='(error) %s  %s' % (str(e), ''),
                        duration=5000,
                        sound=None)
        return hosters
Example #10
0
def ninemovies(name):
    try:
        title = name[:-7]
        movie_year = name[-6:]
        year = movie_year.replace('(', '').replace(')', '')
        video_type = 'movies'
        source = search(video_type, title, year)
        #print source
        for e in source:
            # print e
            url = e['url']
            year = e['year']
            name = e['title']
            # print "SUF URL IS = "+url
            srcurl = base_url + url
            hosters = get_sources(srcurl)
            hosters = main_scrape.apply_urlresolver(hosters)
            return hosters
    except Exception as e:
        hosters = []
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='Nine Movies',
                        msg='(error) %s  %s' % (str(e), ''),
                        duration=5000,
                        sound=None)
        return hosters
Example #11
0
def tmovies_tv(name,movie_title):

    try:
        sources = []
        searchUrl = base_url+'watch_episode/'
        # if 'House' in movie_title:
        #     movie_title = movie_title.replace('House','DR House')
        movie_name = movie_title[:-6]
        movie_name_short = movie_title[:-7]
        movie_year = movie_title[-6:]
        movie_year = movie_year.replace('(','').replace(')','')
        movie_match =movie_name.replace(" ","_").replace(":","").replace("-","")
        year_movie_match = movie_match+movie_year
        direct_movie_match = movie_match[:-1]
        seasons=re.compile('S(.+?)E(.+?) ').findall(name)
        for sea,epi in seasons:
            tmurl = searchUrl+direct_movie_match+'/'+sea+'/'+epi+'/'
            link = OPEN_URLTM(tmurl)
            names = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"})
            urls = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"}, ret='href')
            for host, url in zip(names, urls):
                host = host.replace('www.','')
                #host = tools.get_hostname(host)
                source = {'url': url, 'host':host,'direct':False}
                sources.append(source)
            sources = main_scrape.apply_urlresolver(sources)
            return sources
    except Exception as e:
        hosters =[]
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='TwoMovies',msg='(error) %s  %s' % (str(e), ''),duration=5000,sound=None)
        return hosters
Example #12
0
    def get_sources(self, video, video_type):
        source_url = self.get_url(video)
        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            page_url = urlparse.urljoin(self.base_url, source_url)
            html = self._http_get(page_url, cache_limit=.25)
            for link in dom_parser.parse_dom(html, 'div',
                                             {'class': '[^"]*ldr-item[^"]*'}):
                stream_url = dom_parser.parse_dom(link,
                                                  'a',
                                                  ret='data-actuallink')

                views = None
                watched = dom_parser.parse_dom(link, 'div',
                                               {'class': 'click-count'})
                if watched:
                    match = re.search(' (\d+) ', watched[0])
                    if match:
                        views = match.group(1)

                score = dom_parser.parse_dom(link, 'div',
                                             {'class': '\s*point\s*'})
                if score:
                    score = int(score[0])
                    rating = score * 10 if score else None

                if stream_url:
                    stream_url = stream_url[0].strip()
                    host = urlparse.urlparse(stream_url).hostname
                    quality = scraper_utils.get_quality(
                        video, host, QUALITIES.HIGH)
                    #source = {'hostname': 'IceFilms', 'multi-part': False, 'quality': quality, 'class': '','version': label,'rating': None, 'views': None, 'direct': False}
                    hoster = {
                        'hostname': 'WatchEpisodes',
                        'multi-part': False,
                        'host': host,
                        'class': self,
                        'quality': quality,
                        'views': views,
                        'rating': rating,
                        'url': stream_url,
                        'direct': False
                    }
                    hosters.append(hoster)
        main_scrape.apply_urlresolver(hosters)
        return hosters
Example #13
0
    def get_sources(self, video):
        #kodi.log(video.url)
        source_url = self.get_url(video)
        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            url = urlparse.urljoin(self.base_url, source_url)
            html = self._http_get(url, cache_limit=.5)

            match = re.search('This movie is of poor quality', html, re.I)
            if match:
                quality = QUALITIES.LOW
            else:
                quality = QUALITIES.HIGH

            for match in re.finditer('href="([^"]+/embed\d*/[^"]+)', html):
                url = match.group(1)
                embed_html = self._http_get(url, cache_limit=.5)
                r = re.search('{\s*write\("([^"]+)', embed_html)
                if r:
                    plaintext = self._caesar(r.group(1), 13).decode('base-64')
                    if 'http' not in plaintext:
                        plaintext = self._caesar(
                            r.group(1).decode('base-64'), 13).decode('base-64')
                else:
                    plaintext = embed_html
                hosters += self._get_links(plaintext)

            pattern = 'href="([^"]+)"[^>]*><[^>]+play_video.gif'
            for match in re.finditer(pattern, html, re.I):
                url = match.group(1)
                host = urlparse.urlparse(url).hostname
                hoster = {
                    'hostname': 'Afdah',
                    'multi-part': False,
                    'url': url,
                    'host': host,
                    'class': '',
                    'quality': scraper_utils.get_quality(video, host, quality),
                    'rating': None,
                    'views': None,
                    'direct': False
                }
                hosters.append(hoster)
                main_scrape.apply_urlresolver(hosters)
        return hosters
Example #14
0
def primewire_tv(name, movie_title):
    #print "SEARCHING TITLE IS =" +movie_title
    #print "EPISODE REAL NAME IS = "+name
    tvso = []
    seasons = re.compile('S(.+?)E(.+?) ').findall(name)
    for sea, epi in seasons:

        movie_name = movie_title[:-7]
        tv_title = movie_name.replace(' ', '+')
        #print "TV REAL TITLE IS = "+tv_title
        searchUrl = 'http://www.primewire.ag/index.php?search_keywords='
        surl = searchUrl + tv_title  ###########CHANGE THIS
        #print "SEARCH URL PRIME IS + " +surl
        link = OPEN_URL(surl + '&search_section=2')
        match = re.compile('<a href="/(.+?)" title="Watch (.+?)">').findall(
            link)
        for url, name in match:
            if movie_title == name:
                url = url.replace('watch', 'tv').replace('-online-free', '')
                link = OPEN_URL(base_url + url + '/season-' + sea +
                                '-episode-' + epi)
                container_pattern = r'<table[^>]+class="movie_version[ "][^>]*>(.*?)</table>'
                item_pattern = (
                    r'quality_(?!sponsored|unknown)([^>]*)></span>.*?'
                    r'url=([^&]+)&(?:amp;)?domain=([^&]+)&(?:amp;)?(.*?)'
                    r'"version_veiws"> ([\d]+) views</')
                max_index = 0
                max_views = -1
                for container in re.finditer(container_pattern, link,
                                             re.DOTALL | re.IGNORECASE):
                    for i, source in enumerate(
                            re.finditer(item_pattern, container.group(1),
                                        re.DOTALL)):
                        qual, url, host, parts, views = source.groups()
                        if kodi.get_setting('debug') == "true":
                            print "PrimeWire Debug:"
                            print "Quality is " + qual
                            print "URL IS " + url.decode('base-64')
                            print "HOST IS  " + host.decode('base-64')
                            print "VIEWS ARE " + views
                        if host == 'ZnJhbWVndGZv':
                            continue  # filter out promo hosts
                        #host = tools.get_hostname(host.decode('base-64'))
                        source = {
                            'url': url.decode('base-64'),
                            'host': host.decode('base-64'),
                            'view': views,
                            'quality': qual,
                            'direct': False
                        }
                        tvso.append(source)
        tvso = main_scrape.apply_urlresolver(tvso)
        return tvso
Example #15
0
def merdb(name):
    try:
        sources = []
        searchUrl = base_url + '?search='
        movie_name = name[:-6]
        movie_name_short = name[:-7]
        movie_year = name[-6:]
        movie_year = movie_year.replace('(', '').replace(')', '')
        sname = movie_name.replace(" ", "+")
        mername = sname[:-1]
        movie_match = movie_name.replace(" ", "_") + movie_year
        surl = searchUrl + mername
        link = OPEN_URL(surl)
        #dp.update(80)
        match = re.compile(
            '<div class="main_list_box"><a href="(.+?)" title="(.+?)"><img'
        ).findall(link)
        for url, name in match:
            if movie_match in url or movie_name_short == name:
                link = OPEN_URL(base_url + url)
                vidlinks = dom_parser.parse_dom(
                    link, 'span', {'class': "movie_version_link"})
                linknames = dom_parser.parse_dom(link, 'span',
                                                 {'class': "version_host"})
                for name, vidlink in zip(linknames, vidlinks):
                    #dp.update(80)
                    match = re.compile('<a href="(.+?)"').findall(vidlink)
                    for linkurl in match:
                        if "ads.php" not in linkurl and "Sponsor" not in name and "Host" not in name:
                            url = base_url + linkurl
                            #print "URLS IS = " +url
                            host = name.replace("'", "")
                            #linkname = tools.get_hostname(name)
                            source = {
                                'hostname': 'MerDB',
                                'views': None,
                                'url': url,
                                'host': host,
                                'direct': False
                            }
                            sources.append(source)
        #dp.close()
        sources = main_scrape.apply_urlresolver(sources)
        return sources
    except Exception as e:
        hosters = []
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='MerDb',
                        msg='(error) %s  %s' % (str(e), ''),
                        duration=5000,
                        sound=None)
        return hosters
Example #16
0
def zmovies(name):
    try:
        sources = []
        movie_name = name[:-6]
        movie_name_short = name[:-7]
        movie_year = name[-6:]
        movie_year = movie_year.replace('(', '').replace(')', '')
        sname = movie_name.replace(" ", "+")
        movie_match = movie_name.replace(" ", "-").replace(":", "")
        year_movie_match = movie_match + movie_year
        direct_movie_match = movie_match[:-1]
        tmurl = base_url + 'movies/view/' + direct_movie_match
        ytmurl = base_url + 'movies/view/' + year_movie_match
        #dp.update(25)
        #For links that are direct
        link = OPEN_URL(tmurl)
        match = re.compile(
            'target="_blank"   href="(.+?)"> <b> Watch Full </b></a> </td>'
        ).findall(link)
        for url in match:
            hmf = urlresolver.HostedMediaFile(url)
            if hmf:

                #linkname= hmf.get_host()
                linkname = tools.get_hostname(url)
                host = linkname
                source = {'url': url, 'host': host, 'direct': False}
                sources.append(source)
        #Fro Links that need year added
        link = OPEN_URL(ytmurl)
        #dp.update(80)
        match = re.compile(
            'target="_blank"   href="(.+?)"> <b> Watch Full </b></a> </td>'
        ).findall(link)
        for url in match:
            linkname = tools.get_hostname(url)
            host = linkname
            source = {'url': url, 'host': host, 'direct': False}
            sources.append(source)
        #dp.close()
        sources = main_scrape.apply_urlresolver(sources)
        return sources
    except Exception as e:
        hosters = []
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='Zee Moviess',
                        msg='(error) %s  %s' % (str(e), ''),
                        duration=5000,
                        sound=None)
        return hosters
Example #17
0
def get_sources(suf_url):
    source_url = suf_url
    hosters = []
    sources = {}
    if source_url and source_url != FORCE_NO_MATCH:
        url = urlparse.urljoin(base_url, source_url)
        #print "URL IS = "+url
        html = get_url(url)
        for server_list in dom_parser.parse_dom(html, 'ul', {'class': 'episodes'}):
            for hash_id in dom_parser.parse_dom(server_list, 'a', ret='data-id'):
                now = time.localtime()
                url = urlparse.urljoin(base_url, hash_url)  #/ajax/film/episode?hash_id=%s&f=&p=%s
                url = url % (hash_id, now.tm_hour + now.tm_min)
                #print "CRAZY URL IS = "+url
                html =_http_get(url, headers=XHR, cache_limit=.5)
                #print "HTML IS = "+html
                if html:
                    try:
                        #print "I DID JSON"
                        js_result = json.loads(html)
                        #print js_result
                    except ValueError:
                        print 'Invalid JSON returned: %s: %s' % (html)
                        log_utils.log('Invalid JSON returned: %s' % (html), log_utils.LOGWARNING)
                    else:
                        if 'videoUrlHash' in js_result and 'grabber' in js_result:
                           # print "ITS IN THERE"
                            query = {'flash': 1, 'json': 1, 's': now.tm_min, 'link': js_result['videoUrlHash'], '_': int(time.time())}
                            query['link'] = query['link'].replace('\/', '/')
                            grab_url = js_result['grabber'].replace('\/', '/')
                            grab_url += '?' + urllib.urlencode(query)
                            html =get_url(grab_url)
                            #print "NEW HTML IS = "+html
                            if html:
                                try:
                                    js_result = json.loads(html)
                                except ValueError:
                                    print 'Invalid JSON returned: %s: %s' % (html)
                                else:
                                    for result in js_result:
                                        if 'label' in result:
                                            quality = _height_get_quality(result['label'])
                                        else:
                                            quality = _gv_get_quality(result['file'])
                                        sources[result['file']] = quality

        for source in sources:
            hoster = {'hostname':'9Movies','multi-part': False, 'host': _get_direct_hostname(source),  'quality': sources[source], 'view': None, 'rating': None, 'url': source, 'direct': True}
            hosters.append(hoster)
    hosters = main_scrape.apply_urlresolver(hosters)
    return hosters
Example #18
0
def get_sources(source_url):

    hosters = []
    if source_url and source_url != FORCE_NO_MATCH:
        url = urlparse.urljoin(base_url, source_url)
        #html = _http_get(url, cache_limit=.5)
        html = OPEN_URL_REG(url)
        #print "HTML IS NOW = "+html

        match = re.search('This movie is of poor quality', html, re.I)
        if match:
            quality = QUALITIES.LOW
        else:
            quality = QUALITIES.HIGH
        #print "QUALITY IS = "+quality

        for match in re.finditer('href="([^"]+/embed\d*/[^"]+)', html):
            url = match.group(1)
            embed_html = OPEN_URL_REG(url)
            r = re.search('{\s*write\("([^"]+)', embed_html)
            if r:
                plaintext = _caesar(r.group(1), 13).decode('base-64')
                if 'http' not in plaintext:
                    plaintext = _caesar(r.group(1).decode('base-64'),
                                        13).decode('base-64')
            else:
                plaintext = embed_html
            #print "PLAINTEXT IS = "+plaintext
            hosters += _get_links(plaintext)

        pattern = 'href="([^"]+)".*play_video.gif'
        for match in re.finditer(pattern, html, re.I):
            url = match.group(1)
            host = urlparse.urlparse(url).hostname
            host = host.replace('www.', '')
            host = host.replace('http://', '')
            hoster = {
                'hostname': 'AFDAH',
                'multi-part': False,
                'url': url,
                'host': host,
                'quality': quality,
                'rating': None,
                'views': None,
                'direct': False
            }
            #hoster = {'url': url, 'host': host,'view':None,'quality':quality,'direct':False}
            hosters.append(hoster)
    hosters = main_scrape.apply_urlresolver(hosters)
    return hosters
Example #19
0
 def get_sources(self, video, video_type):
     source_url = self.get_url(video)
     hosters = []
     if source_url and source_url != FORCE_NO_MATCH:
         page_url = urlparse.urljoin(self.base_url, source_url)
         html = self._http_get(page_url, cache_limit=.5)
         fragment = dom_parser.parse_dom(html, 'div',
                                         {'class': 'alternativesc'})
         if fragment:
             for item in dom_parser.parse_dom(fragment[0], 'div',
                                              {'class': 'altercolumn'}):
                 link = dom_parser.parse_dom(item,
                                             'a',
                                             {'class': 'altercolumnlink'},
                                             ret='href')
                 host = dom_parser.parse_dom(item, 'span')
                 if link and host:
                     link = link[0]
                     if not link.startswith('http'):
                         link = source_url + link
                     host = host[0]
                     quality = scraper_utils.get_quality(
                         video, host, QUALITIES.HIGH)
                     hoster = {
                         'hostname': 'PutLocker',
                         'multi-part': False,
                         'host': host,
                         'class': '',
                         'quality': quality,
                         'views': None,
                         'rating': None,
                         'url': link,
                         'direct': False
                     }
                     hosters.append(hoster)
     main_scrape.apply_urlresolver(hosters)
     return hosters
Example #20
0
def tmovies(name):

    try:
        sources = []
        searchUrl = base_url + 'watch_movie/'
        movie_name = name[:-6]
        movie_name_short = name[:-7]
        movie_year = name[-6:]
        movie_year = movie_year.replace('(', '').replace(')', '')
        sname = movie_name.replace(" ", "+")
        movie_match = movie_name.replace(" ",
                                         "_").replace(":",
                                                      "").replace("-", "")
        year_movie_match = movie_match + movie_year
        direct_movie_match = movie_match[:-1]
        tmurl = base_url + 'watch_movie/' + direct_movie_match
        ytmurl = base_url + 'watch_movie/' + year_movie_match
        link = OPEN_URLTM(tmurl)
        names = dom_parser.parse_dom(link, 'a', {'class': "norm vlink"})
        urls = dom_parser.parse_dom(link,
                                    'a', {'class': "norm vlink"},
                                    ret='href')
        for host, url in zip(names, urls):
            host = host.replace('www.', '')
            #host = tools.get_hostname(host)
            source = {'url': url, 'host': host, 'direct': False}
            sources.append(source)
        link = OPEN_URLTM(ytmurl)
        names = dom_parser.parse_dom(link, 'a', {'class': "norm vlink"})
        urls = dom_parser.parse_dom(link,
                                    'a', {'class': "norm vlink"},
                                    ret='href')
        for host, url in zip(names, urls):
            host = host.replace('www.', '')
            #host = tools.get_hostname(host)
            source = {'url': url, 'host': host, 'direct': False}
            sources.append(source)
        sources = main_scrape.apply_urlresolver(sources)
        return sources
    except Exception as e:
        hosters = []
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='TwoMovies',
                        msg='(error) %s  %s' % (str(e), ''),
                        duration=5000,
                        sound=None)
        return hosters
Example #21
0
def primewire(name):
    try:
            sources = []
            searchUrl = base_url+'index.php?search_keywords='
            movie_name = name[:-6]
            movie_name_short = name[:-7]
            movie_year_full = name[-6:]
            movie_year = movie_year_full.replace('(','').replace(')','')
            sname = movie_name.replace(" ","+")
            primename = sname[:-1]
            movie_match =movie_name.replace(" ","_")+movie_year
            surl = searchUrl + primename
            link = OPEN_URL(surl)
            full_match  = movie_name+movie_year_full
            match=re.compile('<a href="/(.+?)" title="Watch (.+?)">').findall(link)
            for url, name in match:
                if full_match == name:
                    link = OPEN_URL(base_url+url)
                    container_pattern = r'<table[^>]+class="movie_version[ "][^>]*>(.*?)</table>'
                    item_pattern = (
                        r'quality_(?!sponsored|unknown)([^>]*)></span>.*?'
                        r'url=([^&]+)&(?:amp;)?domain=([^&]+)&(?:amp;)?(.*?)'
                        r'"version_veiws"> ([\d]+) views</')
                    max_index = 0
                    max_views = -1
                    for container in re.finditer(container_pattern, link, re.DOTALL | re.IGNORECASE):
                        for i, source in enumerate(re.finditer(item_pattern, container.group(1), re.DOTALL)):
                            qual, url, host, parts, views = source.groups()
                            if kodi.get_setting('debug') == "true":
                                print"PrimeWire Debug:"
                                print "Quality is " + qual
                                print "URL IS " + url.decode('base-64')
                                print "HOST IS  "+host.decode('base-64')
                                print "VIEWS ARE " +views
                            if host == 'ZnJhbWVndGZv': continue  # filter out promo hosts
                            #host = tools.get_hostname(host.decode('base-64'))
                            source = {'hostname':'PrimeWire','url': url.decode('base-64'), 'host': host.decode('base-64'),'views':views,'quality':qual,'direct':False}
                            sources.append(source)
            #print "MOVIE SOURCES ARE = "+str(sources)
            sources = main_scrape.apply_urlresolver(sources)
            return sources
    except Exception as e:
        sources =[]
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='PrimeWire',msg='(error) %s  %s' % (str(e), ''),duration=5000,sound=None)
        return sources
Example #22
0
def zmovies(name):
    try:
        sources = []
        movie_name = name[:-6]
        movie_name_short = name[:-7]
        movie_year = name[-6:]
        movie_year = movie_year.replace('(','').replace(')','')
        sname = movie_name.replace(" ","+")
        movie_match =movie_name.replace(" ","-").replace(":","")
        year_movie_match = movie_match+movie_year
        direct_movie_match = movie_match[:-1]
        tmurl = base_url+'movies/view/'+direct_movie_match
        ytmurl = base_url+'movies/view/'+year_movie_match
        #dp.update(25)
        #For links that are direct
        link = OPEN_URL(tmurl)
        match=re.compile('target="_blank"   href="(.+?)"> <b> Watch Full </b></a> </td>').findall(link)
        for url in match:
            hmf = urlresolver.HostedMediaFile(url)
            if hmf:

            #linkname= hmf.get_host()
                linkname = tools.get_hostname(url)
                host = linkname
                #source = {'hostname':'IceFilms','multi-part': False, 'quality': quality, 'label': label, 'rating': None, 'views': None, 'direct': False}
                source = {'hostname':'ZMovies','views':None, 'quality': None, 'rating': None,'url': url, 'host': host, 'direct':False}
                sources.append(source)
        #Fro Links that need year added
        link = OPEN_URL(ytmurl)
        #dp.update(80)
        match=re.compile('target="_blank"   href="(.+?)"> <b> Watch Full </b></a> </td>').findall(link)
        for url in match:
            linkname = tools.get_hostname(url)
            host = linkname
            source = {'hostname':'ZMovies','views':None, 'quality': None, 'rating': None,'url': url, 'host': host, 'direct':False}
            sources.append(source)
        #dp.close()
        sources = main_scrape.apply_urlresolver(sources)
        print sources
        return sources
    except Exception as e:
        hosters =[]
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='Zee Moviess',msg='(error) %s  %s' % (str(e), ''),duration=5000,sound=None)
        return hosters
Example #23
0
def get_sources(suf_url, pre_url):
    source_url = suf_url
    hosters = []
    if source_url and source_url != FORCE_NO_MATCH:
        url = urlparse.urljoin(base_url, source_url)
        page_html = _http_get(url, cache_limit=.5)
        movie_id = dom_parser.parse_dom(page_html, 'div',
                                        {'id': 'media-player'}, 'movie-id')
        if movie_id:
            server_url = SL_URL % (movie_id[0])
            url = urlparse.urljoin(base_url, server_url)
            html = _http_get(url, cache_limit=.5)
            sources = {}
            for match in re.finditer(
                    'loadEpisode\(\s*(\d+)\s*,\s*(\d+)\s*\).*?class="btn-eps[^>]*>([^<]+)',
                    html, re.DOTALL):
                link_type, link_id, q_str = match.groups()
                if link_type in ['12', '13', '14']:
                    url = urlparse.urljoin(base_url, PLAYLIST_URL1 % (link_id))
                    sources.update(__get_link_from_json(url, q_str))
                else:
                    media_url = __get_ep_pl_url(link_type, page_html)
                    if media_url:
                        url = urlparse.urljoin(base_url, media_url)
                        xml = _http_get(url, cache_limit=.5)
                        sources.update(__get_links_from_xml(xml, pre_url))

        for source in sources:
            if sources[source]['direct']:
                host = _get_direct_hostname(source)
            else:
                host = urlparse.urlparse(source).hostname
            hoster = {
                'hostname': '123Movies',
                'multi-part': False,
                'host': host,
                'quality': sources[source]['quality'],
                'views': None,
                'rating': None,
                'url': source,
                'direct': sources[source]['direct']
            }
            hosters.append(hoster)
    hosters = main_scrape.apply_urlresolver(hosters)
    return hosters
def putlocker_movies(movie_title):
    try:
        title = movie_title[:-7]
        movie_year = movie_title[-6:]
        year = movie_year.replace('(','').replace(')','')
        video_type = 'movies'
        show_url = search(video_type,title,year)
        for e in show_url:
                url = e['url']
                hosters=get_sources(url)
                print "HOSTERS ARE " + str(hosters)
                hosters = main_scrape.apply_urlresolver(hosters)
                return hosters
    except Exception as e:
        hosters =[]
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='Putlocker Movies',msg='(error) %s  %s' % (str(e), ''),duration=5000,sound=None)
        return hosters
Example #25
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            page_url = urlparse.urljoin(self.base_url, source_url)
            headers = {'Referer': ''}
            html = self._http_get(page_url, headers=headers, cache_limit=.5)
            page_links = []
            for iframe_url in dom_parser.parse_dom(html, 'iframe', ret='src'):
                if 'youtube' not in iframe_url:
                    host = urlparse.urlparse(iframe_url).hostname
                    page_links.append((iframe_url, 'embedded', host))

            page_links += re.findall(
                '<a[^>]+href="([^"]+)[^>]+>(Version \d+)</a>([^<]+)', html)

            for stream_url, version, host in page_links:
                if not stream_url.startswith('http'):
                    url = source_url + stream_url
                    host = host.replace('&nbsp;', '')
                else:
                    url = stream_url
                    host = urlparse.urlparse(stream_url).hostname

                base_quality = QUALITIES.HD720 if version == 'embedded' else QUALITIES.HIGH
                hoster = {
                    'hostname': 'Putlocker',
                    'multi-part': False,
                    'host': host,
                    'class': self,
                    'quality':
                    scraper_utils.get_quality(video, host, base_quality),
                    'views': None,
                    'rating': None,
                    'url': url,
                    'direct': False
                }
                hoster['version'] = '(%s)' % (version)
                hosters.append(hoster)

        fullsource = main_scrape.apply_urlresolver(hosters)
        return fullsource
Example #26
0
def primewire_tv(name,movie_title):
    #print "SEARCHING TITLE IS =" +movie_title
    #print "EPISODE REAL NAME IS = "+name
    tvso = []
    seasons=re.compile('S(.+?)E(.+?) ').findall(name)
    for sea,epi in seasons:


        movie_name = movie_title[:-7]
        tv_title=movie_name.replace(' ','+')
        #print "TV REAL TITLE IS = "+tv_title
        searchUrl = 'http://www.primewire.ag/index.php?search_keywords='
        surl = searchUrl + tv_title            ###########CHANGE THIS
        #print "SEARCH URL PRIME IS + " +surl
        link = OPEN_URL(surl+'&search_section=2')
        match=re.compile('<a href="/(.+?)" title="Watch (.+?)">').findall(link)
        for url, name in match:
            if movie_title == name:
                url = url.replace('watch','tv').replace('-online-free','')
                link = OPEN_URL(base_url+url+'/season-'+sea+'-episode-'+epi)
                container_pattern = r'<table[^>]+class="movie_version[ "][^>]*>(.*?)</table>'
                item_pattern = (
                    r'quality_(?!sponsored|unknown)([^>]*)></span>.*?'
                    r'url=([^&]+)&(?:amp;)?domain=([^&]+)&(?:amp;)?(.*?)'
                    r'"version_veiws"> ([\d]+) views</')
                max_index = 0
                max_views = -1
                for container in re.finditer(container_pattern, link, re.DOTALL | re.IGNORECASE):
                    for i, source in enumerate(re.finditer(item_pattern, container.group(1), re.DOTALL)):
                        qual, url, host, parts, views = source.groups()
                        if kodi.get_setting('debug') == "true":
                            print"PrimeWire Debug:"
                            print "Quality is " + qual
                            print "URL IS " + url.decode('base-64')
                            print "HOST IS  "+host.decode('base-64')
                            print "VIEWS ARE " +views
                        if host == 'ZnJhbWVndGZv': continue  # filter out promo hosts
                        #host = tools.get_hostname(host.decode('base-64'))
                        source = {'url': url.decode('base-64'), 'host':host.decode('base-64'),'view':views,'quality':qual,'direct':False}
                        tvso.append(source)
        tvso = main_scrape.apply_urlresolver(tvso)
        return tvso
Example #27
0
def ot3_movies(name):
    try:
        title = name[:-7]
        movie_year = name[-6:]
        year = movie_year.replace('(','').replace(')','')
        video_type = 'movies'
        source = search(video_type,title,year)
        for e in source:
                url = e['url']
                year = e['year']
                name = e['title']
                srcurl =base_url+url
                hosters=get_sources(srcurl,url)
                hosters = main_scrape.apply_urlresolver(hosters)
                return hosters
    except Exception as e:
        hosters =[]
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='123Movies',msg='(error) %s  %s' % (str(e), ''),duration=5000,sound=None)
        return hosters
Example #28
0
def tmovies_tv(name, movie_title):

    try:
        sources = []
        searchUrl = base_url + 'watch_episode/'
        # if 'House' in movie_title:
        #     movie_title = movie_title.replace('House','DR House')
        movie_name = movie_title[:-6]
        movie_name_short = movie_title[:-7]
        movie_year = movie_title[-6:]
        movie_year = movie_year.replace('(', '').replace(')', '')
        movie_match = movie_name.replace(" ",
                                         "_").replace(":",
                                                      "").replace("-", "")
        year_movie_match = movie_match + movie_year
        direct_movie_match = movie_match[:-1]
        seasons = re.compile('S(.+?)E(.+?) ').findall(name)
        for sea, epi in seasons:
            tmurl = searchUrl + direct_movie_match + '/' + sea + '/' + epi + '/'
            link = OPEN_URLTM(tmurl)
            names = dom_parser.parse_dom(link, 'a', {'class': "norm vlink"})
            urls = dom_parser.parse_dom(link,
                                        'a', {'class': "norm vlink"},
                                        ret='href')
            for host, url in zip(names, urls):
                host = host.replace('www.', '')
                #host = tools.get_hostname(host)
                source = {'url': url, 'host': host, 'direct': False}
                sources.append(source)
            sources = main_scrape.apply_urlresolver(sources)
            return sources
    except Exception as e:
        hosters = []
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='TwoMovies',
                        msg='(error) %s  %s' % (str(e), ''),
                        duration=5000,
                        sound=None)
        return hosters
Example #29
0
def merdb(name):
    try:
        sources = []
        searchUrl = base_url+'?search='
        movie_name = name[:-6]
        movie_name_short = name[:-7]
        movie_year = name[-6:]
        movie_year = movie_year.replace('(','').replace(')','')
        sname = movie_name.replace(" ","+")
        mername = sname[:-1]
        movie_match =movie_name.replace(" ","_")+movie_year
        surl = searchUrl + mername
        link = OPEN_URL(surl)
        #dp.update(80)
        match=re.compile('<div class="main_list_box"><a href="(.+?)" title="(.+?)"><img').findall(link)
        for url, name in match:
            if movie_match in url or movie_name_short == name:
                link = OPEN_URL(base_url+url)
                vidlinks=dom_parser.parse_dom(link, 'span',{'class':"movie_version_link"})
                linknames=dom_parser.parse_dom(link, 'span',{'class':"version_host"})
                for name, vidlink in zip(linknames, vidlinks):
                    #dp.update(80)
                    match=re.compile('<a href="(.+?)"').findall(vidlink)
                    for linkurl in match:
                        if "ads.php" not in linkurl and "Sponsor" not in name and "Host" not in name:
                            url = base_url+linkurl
                            #print "URLS IS = " +url
                            host = name.replace("'","")
                            #linkname = tools.get_hostname(name)
                            source = {'hostname':'MerDB','views':None,'url': url, 'host': host, 'direct':False}
                            sources.append(source)
        #dp.close()
        sources = main_scrape.apply_urlresolver(sources)
        return sources
    except Exception as e:
        hosters =[]
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='MerDb',msg='(error) %s  %s' % (str(e), ''),duration=5000,sound=None)
        return hosters
Example #30
0
def get_sources(source_url):

        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            url = urlparse.urljoin(base_url, source_url)
            #html = _http_get(url, cache_limit=.5)
            html = OPEN_URL_REG(url)
            #print "HTML IS NOW = "+html

            match = re.search('This movie is of poor quality', html, re.I)
            if match:
                quality = QUALITIES.LOW
            else:
                quality = QUALITIES.HIGH
            #print "QUALITY IS = "+quality

            for match in re.finditer('href="([^"]+/embed\d*/[^"]+)', html):
                url = match.group(1)
                embed_html = OPEN_URL_REG(url)
                r = re.search('{\s*write\("([^"]+)', embed_html)
                if r:
                    plaintext = _caesar(r.group(1), 13).decode('base-64')
                    if 'http' not in plaintext:
                        plaintext = _caesar(r.group(1).decode('base-64'), 13).decode('base-64')
                else:
                    plaintext = embed_html
                #print "PLAINTEXT IS = "+plaintext
                hosters += _get_links(plaintext)

            pattern = 'href="([^"]+)".*play_video.gif'
            for match in re.finditer(pattern, html, re.I):
                url = match.group(1)
                host = urlparse.urlparse(url).hostname
                host = host.replace('www.','')
                host = host.replace('http://','')
                hoster = {'hostname':'AFDAH','multi-part': False, 'url': url, 'host': host, 'quality': quality, 'rating': None, 'views': None, 'direct': False}
                #hoster = {'url': url, 'host': host,'view':None,'quality':quality,'direct':False}
                hosters.append(hoster)
        hosters = main_scrape.apply_urlresolver(hosters)
        return hosters
Example #31
0
def tmovies(name):

    try:
        sources = []
        searchUrl = base_url+'watch_movie/'
        movie_name = name[:-6]
        movie_name_short = name[:-7]
        movie_year = name[-6:]
        movie_year = movie_year.replace('(','').replace(')','')
        sname = movie_name.replace(" ","+")
        movie_match =movie_name.replace(" ","_").replace(":","").replace("-","")
        year_movie_match = movie_match+movie_year
        direct_movie_match = movie_match[:-1]
        tmurl = base_url+'watch_movie/'+direct_movie_match
        ytmurl = base_url+'watch_movie/'+year_movie_match
        link = OPEN_URLTM(tmurl)
        names = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"})
        urls = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"}, ret='href')
        for host, url in zip(names, urls):
            host = host.replace('www.','')
            #host = tools.get_hostname(host)
            source = {'url': url, 'host':host,'direct':False}
            sources.append(source)
        link = OPEN_URLTM(ytmurl)
        names = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"})
        urls = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"}, ret='href')
        for host, url in zip(names, urls):
            host = host.replace('www.','')
            #host = tools.get_hostname(host)
            source = {'url': url, 'host':host,'direct':False}
            sources.append(source)
        sources = main_scrape.apply_urlresolver(sources)
        return sources
    except Exception as e:
        hosters =[]
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='TwoMovies',msg='(error) %s  %s' % (str(e), ''),duration=5000,sound=None)
        return hosters
Example #32
0
def putlocker_movies(movie_title):
    try:
        title = movie_title[:-7]
        movie_year = movie_title[-6:]
        year = movie_year.replace('(', '').replace(')', '')
        video_type = 'movies'
        show_url = search(video_type, title, year)
        for e in show_url:
            url = e['url']
            hosters = get_sources(url)
            print "HOSTERS ARE " + str(hosters)
            hosters = main_scrape.apply_urlresolver(hosters)
            return hosters
    except Exception as e:
        hosters = []
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='Putlocker Movies',
                        msg='(error) %s  %s' % (str(e), ''),
                        duration=5000,
                        sound=None)
        return hosters
Example #33
0
def get_sources(suf_url):
    source_url = suf_url
    hosters = []
    sources = {}
    if source_url and source_url != FORCE_NO_MATCH:
        url = urlparse.urljoin(base_url, source_url)
        #print "URL IS = "+url
        html = get_url(url)
        for server_list in dom_parser.parse_dom(html, 'ul',
                                                {'class': 'episodes'}):
            for hash_id in dom_parser.parse_dom(server_list,
                                                'a',
                                                ret='data-id'):
                now = time.localtime()
                url = urlparse.urljoin(
                    base_url, hash_url)  #/ajax/film/episode?hash_id=%s&f=&p=%s
                url = url % (hash_id, now.tm_hour + now.tm_min)
                #print "CRAZY URL IS = "+url
                html = _http_get(url, headers=XHR, cache_limit=.5)
                #print "HTML IS = "+html
                if html:
                    try:
                        #print "I DID JSON"
                        js_result = json.loads(html)
                        #print js_result
                    except ValueError:
                        print 'Invalid JSON returned: %s: %s' % (html)
                        log_utils.log('Invalid JSON returned: %s' % (html),
                                      log_utils.LOGWARNING)
                    else:
                        if 'videoUrlHash' in js_result and 'grabber' in js_result:
                            # print "ITS IN THERE"
                            query = {
                                'flash': 1,
                                'json': 1,
                                's': now.tm_min,
                                'link': js_result['videoUrlHash'],
                                '_': int(time.time())
                            }
                            query['link'] = query['link'].replace('\/', '/')
                            grab_url = js_result['grabber'].replace('\/', '/')
                            grab_url += '?' + urllib.urlencode(query)
                            html = get_url(grab_url)
                            #print "NEW HTML IS = "+html
                            if html:
                                try:
                                    js_result = json.loads(html)
                                except ValueError:
                                    print 'Invalid JSON returned: %s: %s' % (
                                        html)
                                else:
                                    for result in js_result:
                                        if 'label' in result:
                                            quality = _height_get_quality(
                                                result['label'])
                                        else:
                                            quality = _gv_get_quality(
                                                result['file'])
                                        sources[result['file']] = quality

        for source in sources:
            hoster = {
                'multi-part': False,
                'host': _get_direct_hostname(source),
                'quality': sources[source],
                'view': None,
                'rating': None,
                'url': source,
                'direct': True
            }
            hosters.append(hoster)
    hosters = main_scrape.apply_urlresolver(hosters)
    return hosters
Example #34
0
def primewire(name):
    try:
        sources = []
        searchUrl = base_url + 'index.php?search_keywords='
        movie_name = name[:-6]
        movie_name_short = name[:-7]
        movie_year_full = name[-6:]
        movie_year = movie_year_full.replace('(', '').replace(')', '')
        sname = movie_name.replace(" ", "+")
        primename = sname[:-1]
        movie_match = movie_name.replace(" ", "_") + movie_year
        surl = searchUrl + primename
        link = OPEN_URL(surl)
        full_match = movie_name + movie_year_full
        match = re.compile('<a href="/(.+?)" title="Watch (.+?)">').findall(
            link)
        for url, name in match:
            if full_match == name:
                link = OPEN_URL(base_url + url)
                container_pattern = r'<table[^>]+class="movie_version[ "][^>]*>(.*?)</table>'
                item_pattern = (
                    r'quality_(?!sponsored|unknown)([^>]*)></span>.*?'
                    r'url=([^&]+)&(?:amp;)?domain=([^&]+)&(?:amp;)?(.*?)'
                    r'"version_veiws"> ([\d]+) views</')
                max_index = 0
                max_views = -1
                for container in re.finditer(container_pattern, link,
                                             re.DOTALL | re.IGNORECASE):
                    for i, source in enumerate(
                            re.finditer(item_pattern, container.group(1),
                                        re.DOTALL)):
                        qual, url, host, parts, views = source.groups()
                        if kodi.get_setting('debug') == "true":
                            print "PrimeWire Debug:"
                            print "Quality is " + qual
                            print "URL IS " + url.decode('base-64')
                            print "HOST IS  " + host.decode('base-64')
                            print "VIEWS ARE " + views
                        if host == 'ZnJhbWVndGZv':
                            continue  # filter out promo hosts
                        #host = tools.get_hostname(host.decode('base-64'))
                        source = {
                            'url': url.decode('base-64'),
                            'host': host.decode('base-64'),
                            'view': views,
                            'quality': qual,
                            'direct': False
                        }
                        sources.append(source)
        #print "MOVIE SOURCES ARE = "+str(sources)
        sources = main_scrape.apply_urlresolver(sources)
        return sources
    except Exception as e:
        hosters = []
        log_utils.log('Error [%s]  %s' % (str(e), ''), xbmc.LOGERROR)
        if kodi.get_setting('error_notify') == "true":
            kodi.notify(header='PrimeWire',
                        msg='(error) %s  %s' % (str(e), ''),
                        duration=5000,
                        sound=None)
        return hosters