def get_links(tv_movie, original_title, season_n, episode_n, season, episode,
              show_original_year, id):
    global global_var, stop_all
    all_links = []
    if tv_movie == 'movie':
        return []
    imdb_id = cache.get(get_imdb, 999, tv_movie, id, table='pages')

    allow_debrid = True
    search_url = ('%s-s%se%s' % (clean_name(original_title, 1).replace(
        ' ', '-'), season_n, episode_n)).lower()
    for pages in range(0, 3):
        x = get_html(
            'https://eztv.re/api/get-torrents?imdb_id=%s&limit=100&page=%s' %
            (imdb_id.replace('tt', ''), str(pages)),
            headers=base_header,
            timeout=10).json()

        max_size = int(Addon.getSetting("size_limit"))
        dev_num = 1024 * 1024 * 1024
        for items in x['torrents']:
            title = items['filename']

            if 's%se%s.' % (season_n, episode_n) not in title.lower():
                continue
            lk = items['magnet_url']
            size = (float(items['size_bytes']) / dev_num)

            if int(size) < max_size:
                if '2160' in title:
                    res = '2160'
                if '1080' in title:
                    res = '1080'
                elif '720' in title:
                    res = '720'
                elif '480' in title:
                    res = '480'
                elif '360' in title:
                    res = '360'
                else:
                    res = 'HD'

                all_links.append((title, lk, str(size), res))

                global_var = all_links
    return global_var
Example #2
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            limit=None,
            referer=None,
            cookie=None,
            output='',
            timeout='30'):
    handlers = []

    if proxy is not None:
        handlers += [
            ProxyHandler({'http': '{0}'.format(proxy)}),
            urllib_request.HTTPHandler
        ]
        opener = urllib_request.build_opener(*handlers)
        urllib_request.install_opener(opener)

    if output == 'cookie' or output == 'extended' or close is not True:
        cookies = cookielib.LWPCookieJar()
        handlers += [
            urllib_request.HTTPHandler(),
            urllib_request.HTTPSHandler(),
            urllib_request.HTTPCookieProcessor(cookies)
        ]
        opener = urllib_request.build_opener(*handlers)
        urllib_request.install_opener(opener)

    try:

        if sys.version_info < (2, 7, 9):
            raise Exception()

        import ssl
        ssl_context = ssl.create_default_context()
        ssl_context.check_hostname = False
        ssl_context.verify_mode = ssl.CERT_NONE
        handlers += [urllib_request.HTTPSHandler(context=ssl_context)]
        opener = urllib_request.build_opener(*handlers)
        urllib_request.install_opener(opener)

    except:
        pass

    try:
        headers.update(headers)
    except:
        headers = {}

    if 'User-Agent' in headers:
        pass
    elif not mobile is True:
        # headers['User-Agent'] = agent()
        headers['User-Agent'] = cache.get(randomagent, 1)
    else:
        headers['User-Agent'] = 'Apple-iPhone/701.341'

    if 'Referer' in headers:
        pass
    elif referer is None:
        headers['Referer'] = '%s://%s/' % (urlparse(url).scheme,
                                           urlparse(url).netloc)
    else:
        headers['Referer'] = referer

    if not 'Accept-Language' in headers:
        headers['Accept-Language'] = 'en-US'

    if 'Cookie' in headers:
        pass
    elif cookie is not None:
        headers['Cookie'] = cookie

    if redirect is False:

        class NoRedirection(urllib_error.HTTPError):
            def http_response(self, request, response):
                return response

        opener = urllib_request.build_opener(NoRedirection)
        urllib_request.install_opener(opener)

        try:
            del headers['Referer']
        except:
            pass

    req = urllib_request.Request(url, data=post, headers=headers)

    try:
        response = urllib_request.urlopen(req, timeout=int(timeout))

    except urllib_error.HTTPError as response:

        if response.code == 503:

            if 'cf-browser-verification' in response.read(5242880):

                netloc = '%s://%s' % (urlparse(url).scheme,
                                      urlparse(url).netloc)

                cf = cache.get(cfcookie, 168, netloc, headers['User-Agent'],
                               timeout)

                headers['Cookie'] = cf

                request = urllib_request.Request(url,
                                                 data=post,
                                                 headers=headers)

                response = urllib_request.urlopen(request,
                                                  timeout=int(timeout))

            elif error is False:
                return

        elif error is False:
            return

    if output == 'cookie':

        try:
            result = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
        except:
            pass
        try:
            result = cf
        except:
            pass

    elif output == 'response':

        if limit == '0':
            result = (str(response.code), response.read(224 * 1024))
        elif limit is not None:
            result = (str(response.code), response.read(int(limit) * 1024))
        else:
            result = (str(response.code), response.read(5242880))

    elif output == 'chunk':

        try:
            content = int(response.headers['Content-Length'])
        except:
            content = (2049 * 1024)

        if content < (2048 * 1024):
            return
        result = response.read(16 * 1024)

    elif output == 'extended':

        try:
            cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
        except:
            pass
        try:
            cookie = cf
        except:
            pass
        content = response.headers
        result = response.read(5242880)
        return result, headers, content, cookie

    elif output == 'geturl':
        result = response.geturl()

    elif output == 'headers':
        content = response.headers
        return content

    else:
        if limit == '0':
            result = response.read(224 * 1024)
        elif limit is not None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)

    if close is True:
        response.close()
    return result
Example #3
0
def get_links(tv_movie, original_title, season_n, episode_n, season, episode,
              show_original_year, id):
    global global_var, stop_all
    all_links = []
    imdb_id = cache.get(get_imdb, 999, tv_movie, id, table='pages')

    x = get_html(
        "https://torrentapi.org/pubapi_v2.php?app_id=me&get_token=get_token",
        headers=base_header,
        timeout=10).json()
    token = x['token']
    if tv_movie == 'movie':
        search_url = [((clean_name(original_title, 1).replace(' ', '%20') +
                        '%20' + show_original_year)).lower()]
    elif tv_movie == 'tv':

        if Addon.getSetting('debrid_select') == '0':

            search_url = [
                clean_name(original_title, 1).replace(' ', '%20') + '%20' +
                'S' + season_n,
                clean_name(original_title, 1).replace(' ', '%20') + '%20' +
                's' + season_n + 'e' + episode_n,
                clean_name(original_title, 1).replace(' ', '%20') + '%20' +
                'season ' + season
            ]
        else:
            search_url = [
                clean_name(original_title, 1).replace(' ', '%20') + '%20' +
                's' + season_n + 'e' + episode_n
            ]
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0',
        'Accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.5',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
    }

    for itt in search_url:
        time.sleep(0.4)
        ur = 'https://torrentapi.org/pubapi_v2.php?app_id=Torapi&mode=search&search_imdb=%s&token=%s&sort=seeders&ranked=0&limit=100&format=json_extended&search_string=%s' % (
            imdb_id, token, itt)

        y = get_html(ur, headers=headers, timeout=10).json()
        if 'torrent_results' not in y:

            continue
        for results in y['torrent_results']:

            if stop_all == 1:
                break
            nam = results['title']
            size = (float(results['size']) / (1024 * 1024 * 1024))
            peer = results['leechers']
            seed = results['seeders']
            links = results['download']
            if '4k' in nam:
                res = '2160'
            elif '2160' in nam:
                res = '2160'
            elif '1080' in nam:
                res = '1080'
            elif '720' in nam:
                res = '720'
            elif '480' in nam:
                res = '480'
            elif '360' in nam:
                res = '360'
            else:
                res = 'HD'
            max_size = int(Addon.getSetting("size_limit"))

            if (size) < max_size:

                all_links.append((nam, links, str(size), res))

                global_var = all_links
    return global_var
Example #4
0
    def get(self, query):
        try:
            match = re.findall('(.+?) \((\d{4})\)/imdb=$', query)

            if len(match) > 0:

                title, year = match[0][0], match[0][1]

                query = ' '.join(
                    urllib.unquote_plus(
                        re.sub('%\w\w', ' ',
                               urllib.quote_plus(title))).split())

                url = 'https://subz.xyz/search?q=%s' % urllib.quote_plus(query)

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result,
                                      'section',
                                      attrs={'class': 'movies'})[0]
                url = re.findall('(/movies/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urljoin('https://subz.xyz', i) for i in url]
                url = url[:3]

                for i in url:
                    c = cache.get(self.cache, 2200, i)

                    if c is not None:
                        if cleantitle.get(c[0]) == cleantitle.get(
                                title) and c[1] == year:
                            try:
                                item = self.r
                            except:
                                item = client.request(i)
                            break

            else:

                title, season, episode = re.findall(
                    '(.+?) S(\d+)E(\d+)/imdb=$', query)[0]

                season, episode = '%01d' % int(season), '%01d' % int(episode)

                query = ' '.join(
                    urllib.unquote_plus(
                        re.sub('%\w\w', ' ',
                               urllib.quote_plus(title))).split())

                url = 'https://subz.xyz/search?q=%s' % urllib.quote_plus(query)

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result,
                                      'section',
                                      attrs={'class': 'tvshows'})[0]
                url = re.findall('(/series/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urljoin('https://subz.xyz', i) for i in url]
                url = url[:3]

                for i in url:
                    c = cache.get(self.cache, 2200, i)

                    if c is not None:
                        if cleantitle.get(c[0]) == cleantitle.get(title):
                            item = i
                            break

                item = '%s/seasons/%s/episodes/%s' % (item, season, episode)
                item = client.request(item)

            item = re.sub(r'[^\x00-\x7F]+', ' ', item)
            items = client.parseDOM(item, 'tr', attrs={'data-id': '.+?'})
        except:
            return

        for item in items:
            try:

                r = client.parseDOM(item, 'td', attrs={'class': '.+?'})[-1]

                url = client.parseDOM(r, 'a', ret='href')[0]
                url = client.replaceHTMLCodes(url)
                url = url.replace("'", "").encode('utf-8')

                name = url.split('/')[-1].strip()
                name = re.sub('\s\s+', ' ', name)
                name = name.replace('_', '').replace('%20', '.')
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                self.list.append({
                    'name': name,
                    'url': url,
                    'source': 'subzxyz',
                    'rating': 5
                })
            except:
                pass

        return self.list
Example #5
0
        html_g_tv = {}
        url_g = 'https://api.themoviedb.org/3/genre/tv/list?api_key=34142515d9d23817496eeb4ff1d223d0&language=' + lang
        logging.warning(url_g)
        html_g_tv = get_html(url_g, headers=headers).json()

        html_g_movie = {}
        url_g = 'https://api.themoviedb.org/3/genre/movie/list?api_key=34142515d9d23817496eeb4ff1d223d0&language=' + lang
        html_g_movie = get_html(url_g, headers=headers).json()
    except Exception as e:
        logging.warning('Err in HTML_G:' + str(e))
    return html_g_tv, html_g_movie


time_to_save = int(Addon.getSetting("save_time"))
html_g_tv, html_g_movie = get_html_g()
html_g_tv, html_g_movie = cache.get(get_html_g, time_to_save, table='posters')


def addNolink(name,
              url,
              mode,
              isFolder,
              fanart='DefaultFolder.png',
              iconimage="DefaultFolder.png",
              plot=' ',
              all_w_trk='',
              all_w={},
              heb_name=' ',
              data=' ',
              year=' ',
              generes=' ',
Example #6
0
def get_links(tv_movie, original_title, season_n, episode_n, season, episode,
              show_original_year, id):
    global global_var, stop_all
    all_links = []
    imdb_id = cache.get(get_imdb, 999, tv_movie, id, table='pages')

    if 1:

        x = get_html('http://movies-v2.api-fetch.sh/%s/%s' %
                     (tv_movie.replace('tv', 'show'), imdb_id),
                     headers=base_header).json()

        if 'episodes' in x:
            for items in x['episodes']:

                if int(season) != int(items['season']) or int(episode) != int(
                        items['episode']):
                    continue

                for items2 in items['torrents']:

                    if stop_all == 1:
                        break
                    link = items['torrents'][items2]['url']
                    if link == None:
                        continue
                    name = original_title

                    seed = items['torrents'][items2]['seeds']
                    peer = items['torrents'][items2]['peers']
                    size = 0

                    if stop_all == 1:
                        break

                    if '4k' in items2:
                        res = '2160'
                    elif '2160' in items2:
                        res = '2160'
                    elif '1080' in items2:
                        res = '1080'
                    elif '720' in items2:
                        res = '720'
                    elif '480' in items2:
                        res = '480'
                    elif '360' in items2:
                        res = '360'
                    else:
                        res = 'HD'

                    try:
                        size = (float(size) / (1024 * 1024 * 1024))

                    except:
                        size = 0

                    max_size = int(Addon.getSetting("size_limit"))

                    if size < max_size:

                        all_links.append((name, link, str(size), res))

                        global_var = all_links
        else:
            for items in x['torrents']['en']:

                if stop_all == 1:
                    break
                link = x['torrents']['en'][items]['url']
                if link == None:
                    continue
                name = original_title
                seed = x['torrents']['en'][items]['seed']
                peer = x['torrents']['en'][items]['peer']
                size = x['torrents']['en'][items]['size']

                if stop_all == 1:
                    break

                if '4k' in items:
                    res = '2160'
                elif '2160' in items:
                    res = '2160'
                elif '1080' in items:
                    res = '1080'
                elif '720' in items:
                    res = '720'
                elif '480' in items:
                    res = '480'
                elif '360' in items:
                    res = '360'
                else:
                    res = 'HD'

                try:
                    size = (float(size) / (1024 * 1024 * 1024))

                except:
                    size = 0
                max_size = int(Addon.getSetting("size_limit"))

                if size < max_size:

                    all_links.append((name, link, str(size), res))

                    global_var = all_links
    return global_var
Example #7
0
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id):
    global global_var,stop_all
    
    search_url=clean_name(original_title,1).replace(' ','%20')
    imdb_id=cache.get(get_imdb, 999,tv_movie,id,table='pages')
    
      
    
      
    
    
    all_links=[]
    
    all_l=[]
    
    if 1:
      
        
            
        x=get_html('https://qazwsxedcrfvtgb.info/show/'+(imdb_id),headers=base_header,timeout=10,verify=False).json()
        logging.warning(x)
        for items in x['episodes']:
                         title=clean_name(original_title,1)
                         if tv_movie=='tv':
                            res_c='720'
                            title=title+'.S%sE%s'%(season_n,episode_n)
                            if not(episode==str(items['episode']) and season==str(items['season'])):
                                continue
                         else:
                            res_c='1080'
                         if 'mb_stream' in items:
                            for key in items['mb_stream']:
                                id_lk=items['mb_stream'][key]
                         else:
                            continue
                         
                         link='https://drive.google.com/file/d/'+id_lk+'/view'
                         
                               
                         if '4k' in res_c:
                              res='2160'
                         elif '2160' in res_c:
                              res='2160'
                         elif '1080' in res_c:
                              res='1080'
                         elif '720' in res_c:
                              res='720'
                         elif '480' in res_c:
                              res='480'
                         elif '360' in res_c:
                              res='360'
                         else:
                              res='HD'
                              
                      
                         
                   
                         if 1:
                           
                           all_links.append((title,link,str(0),res))
                       
                           global_var=all_links
                         
    
    return global_var
        
    
Example #8
0
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id):
    global global_var,stop_all
    all_links=[]
 
    imdb_id=cache.get(get_imdb, 999,tv_movie,id,table='pages')
    try:
        que=urllib.quote_plus
    except:
        que=urllib.parse.quote_plus
    seed=''
    f_seeds=False
    use_debrid=Addon.getSetting('debrid_use')=='true'

    if (Addon.getSetting('torrents')=='true' and use_debrid==False):
        f_seeds=True
        seed='S: >>'
 
    if tv_movie=='movie':
     ur='https://torrentio.strem.fun/stream/movie/%s.json'%imdb_id
    elif tv_movie=='tv':
     ur='https://torrentio.strem.fun/stream/movie/{0}%3A{1}%3A{2}.json'.format(imdb_id,season,episode)
     
    headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Accept-Language': 'en-US,en;q=0.5',
    'Connection': 'keep-alive',
    'Upgrade-Insecure-Requests': '1',
    }
    if 1:
        logging.warning(ur)
        y=get_html(ur,headers=headers,timeout=10).json()
       
        for results in y['streams']:
            
            nam=results['title']
            if f_seeds:
                regex='👤 (.+?) 💾'
                
                seeds=re.compile(regex).findall(nam)
                if len(seeds)>0:
                    seed=seeds[0]
                else:
                    continue
                if int(Addon.getSetting('min_seed'))>int(seed):
                    continue
                seed='S:%s>>,'%str(seed)
            if stop_all==1:
                break
            
            regex='💾(.+?)⚙️'
            s=re.compile(regex).findall(nam)
            size=0
            if len(s)>0:
                size=float(s[0].replace('GB','').replace('MB','').replace(",",'').strip())
                if 'MB' in s:
                   size=size/1000
            
            links=results['infoHash']
            try:
                lk='magnet:?xt=urn:btih:%s&dn=%s'%(links,que(nam))
            except:
                lk='magnet:?xt=urn:btih:%s&dn=%s'%(links,que(nam.encode('utf-8')))
            if '4k' in nam:
                  res='2160'
            elif '2160' in nam:
                  res='2160'
            elif '1080' in nam:
                      res='1080'
            elif '720' in nam:
                  res='720'
            elif '480' in nam:
                  res='480'
            elif '360' in nam:
                  res='360'
            else:
                  res='HD'
            max_size=int(Addon.getSetting("size_limit"))
            
            
            if (size)<max_size:
               
                all_links.append((seed+nam,lk,str(size),res))

                global_var=all_links
    return global_var
     os.makedirs(user_dataDir)
def get_html_g():
    url_g='https://api.themoviedb.org/3/genre/tv/list?api_key=fb126fd0d6df5cb02b1cb676eadd2d1a&language='+lang
    html_g_tv=get_html(url_g).json()
    try:
        url_g='https://api.themoviedb.org/3/genre/tv/list?api_key=fb126fd0d6df5cb02b1cb676eadd2d1a&language='+lang
        html_g_tv=get_html(url_g).json()
         
   
        url_g='https://api.themoviedb.org/3/genre/movie/list?api_key=fb126fd0d6df5cb02b1cb676eadd2d1a&language='+lang
        html_g_movie=get_html(url_g).json()
    except Exception as e:
        logging.warning('Err in HTML_G:'+str(e))
    return html_g_tv,html_g_movie

html_g_tv,html_g_movie=cache.get(get_html_g,72, table='posters')
def addNolink( name, url,mode,isFolder,fanart='DefaultFolder.png', iconimage="DefaultFolder.png",plot=' ',all_w_trk='',all_w={},heb_name=' ',data=' ',year=' ',generes=' ',rating=' ',trailer=' ',watched='no',original_title=' ',id=' ',season=' ',episode=' ' ,eng_name=' ',show_original_year=' ',dates=' ',dd=' ',dont_place=False):
 
            added_pre=''
            if (episode!=' ' and episode!='%20' and episode!=None) :
             
              tv_show='tv'
            else:
                tv_show='movie'
            if '%' in str(episode):
                episode=' '
            if tv_show=='tv':
                ee=str(episode)
            else:
                ee=str(id)
            time_to_save_trk=int(Addon.getSetting("time_to_save"))