def processCaptcha(self, key,lang):
        
        headers=[("User-Agent", client.agent()),
                 ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
                ("Referer", "https://www.google.com/recaptcha/api2/demo"),
                 ("Accept-Language", lang)];

        html=getUrl("http://www.google.com/recaptcha/api/fallback?k=" + key,headers=headers);
        token=""
        round=0
        while True:
            payload = re.findall("\"(/recaptcha/api2/payload[^\"]+)",html);
            round+=1
            message =re.findall("<label .*?class=\"fbc-imageselect-message-text\">(.*?)</label>",html);
            if len(message)==0:
                message =re.findall("<div .*?class=\"fbc-imageselect-message-error\">(.*?)</div>",html)
            if len(message)==0:
                token = re.findall("\"this\\.select\\(\\)\">(.*?)</textarea>",html)[0];
                if not token=="":
                    line1 = "Captcha Sucessfull"
                    xbmc.executebuiltin('Notification(%s, %s, %d, %s)'%('Liveresolver',line1, 3000, None))
                else:
                    line1 = "Captcha failed"
                    xbmc.executebuiltin('Notification(%s, %s, %d, %s)'%('Liveresolver',line1, 3000, None))
                break
            else:
                message=message[0]
                payload=payload[0]


            imgurl=re.findall("name=\"c\"\\s+value=\\s*\"([^\"]+)",html)[0]

            headers=[("User-Agent", client.agent()),
                 ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
                 ("Referer", "http://www.google.com/recaptcha/api/fallback?k=" + key),
                 ("Accept-Language", lang)];
               
            cval=re.findall('name="c" value="(.*?)"',html)[0]
            captcha_imgurl = "https://www.google.com"+payload.replace('&amp;','&')
            
            #print message
            message=message.replace('<strong>','')
            message=message.replace('</strong>','')
            #captcha_response=raw_input('-->')
            
            oSolver = cInputWindow(captcha = captcha_imgurl,msg = message,round=round)
            captcha_response = oSolver.get()
            #print 'captcha_response',captcha_response
            if captcha_response=="":
                break
            responses=""
            for rr in captcha_response.split(','):
                responses += "&response=" + rr;
           
           
            html = getUrl("http://www.google.com/recaptcha/api/fallback?k="+key                               
                                    ,post=urllib.urlencode({'c' : cval,})+responses,headers=headers)#.decode('unicode-escape')
            #print html
        return token
def finder72(html,ref):
    try:
        url = re.findall('src\s*:\s*\'(.+?(?:.m3u8)?)\'',html)[0]
        url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref})
        return url
    except:
        pass
def finder1(html,url):
    global limit
    ref=url
    try:
        urls = re.findall('<i?frame.+?src=(?:\'|\")(.+?)(?:\'|\")',html)
        try:
            urls.append(re.findall("playStream\('iframe', '(.+?)'\)",html)[0])
        except: pass

        urls += re.findall('<a.+?href=[\'\"](/live-.+?stream.+?)[\'\"]',html)
        from random import shuffle
        shuffle(urls)
        for url in urls:
            if 'c4.zedo' in url:
                continue
            if "micast" in url or 'turbocast' in url:
                return finder47(html,ref)
            rr = resolve_it(url)
            if rr:
                return rr
            uri = manual_fix(url,ref)
            if limit>=25:
                log("Exiting - iframe visit limit reached")
                return
            resolved = find_link(uri) 
            if resolved:
                break
        headers = {'User-Agent': client.agent(), 'Referer': ref}
        if '.m3u8' in resolved and '|' not in resolved:
            headers.update({'X-Requested-With':'ShockwaveFlash/20.0.0.286', 'Host':urlparse.urlparse(resolved).netloc, 'Connection':'keep-alive'})
            resolved += '|%s' % urllib.urlencode(headers)
        return resolved
    except:
        return
def find_link(url, html=''):
    global limit
    limit+=1
    log('Finding in : %s'%url)
    try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
    except: referer = 'http://' + urlparse.urlparse(url).netloc
    host  = urlparse.urlparse(url).netloc
    headers = {'Referer':referer, 'Host':host, 'User-Agent' : client.agent(), 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' : 'en-US,en;q=0.5'}
    
    if html=='':
        url = manual_url_fix(url)
        html = client.request(url, headers=headers)
        html = manual_html_fix(url,html,headers)

    ref=url
    fs=list(globals().copy())
    for f in fs:
        if 'finder' in f:
            resolved = eval (f+"(html,ref)")
            if resolved:
                log('Resolved with %s: %s'%(f,resolved))
                return resolved
                break


    return
Exemple #5
0
def find_link(url, html=''):
    global limit
    limit += 1
    log('Finding in : %s' % url)
    try:
        referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
    except:
        referer = 'http://' + urlparse.urlparse(url).netloc
    host = urlparse.urlparse(url).netloc
    headers = {
        'Referer': referer,
        'Host': host,
        'User-Agent': client.agent(),
        'Accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.5'
    }

    if html == '':
        url = manual_url_fix(url)
        html = client.request(url, headers=headers)
        html = manual_html_fix(url, html, headers)

    ref = url
    fs = list(globals().copy())
    for f in fs:
        if 'finder' in f:
            resolved = eval(f + "(html,ref)")
            if resolved:
                log('Resolved with %s: %s' % (f, resolved))
                return resolved
                break

    return
Exemple #6
0
def getUrl(url,
           cookieJar=None,
           post=None,
           timeout=20,
           headers=None,
           noredir=False):

    cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)

    if noredir:
        opener = urllib2.build_opener(NoRedirection, cookie_handler,
                                      urllib2.HTTPBasicAuthHandler(),
                                      urllib2.HTTPHandler())
    else:
        opener = urllib2.build_opener(cookie_handler,
                                      urllib2.HTTPBasicAuthHandler(),
                                      urllib2.HTTPHandler())
    #opener = urllib2.install_opener(opener)
    req = urllib2.Request(url)
    req.add_header('User-Agent', client.agent())
    if headers:
        for h, hv in headers:
            req.add_header(h, hv)

    response = opener.open(req, post, timeout=timeout)
    link = response.read()
    response.close()
    return link
Exemple #7
0
def performCaptcha(sitename,
                   cj,
                   returnpage=True,
                   captcharegex='data-sitekey="(.*?)"',
                   lang="en",
                   headers=None):

    sitepage = getUrl(sitename, cookieJar=cj, headers=headers)
    sitekey = re.findall(captcharegex, sitepage)
    token = ""
    if len(sitekey) >= 1:
        c = UnCaptchaReCaptcha()
        token = c.processCaptcha(sitekey[0], lang)
        if returnpage:
            if headers == None:
                headers = [("User-Agent", client.agent()),
                           ("Referer", sitename)]
            else:
                headers += [("Referer", sitename)]
            sitepage = getUrl(sitename,
                              cookieJar=cj,
                              post=urllib.urlencode(
                                  {"g-recaptcha-response": token}),
                              headers=headers)

    if returnpage:
        return sitepage
    else:
        return token
def finder79(html,url):
    try:
        ref = url
        url = re.findall("playStream\('hls', '(.+?)'",html)[0] 
        url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':'ShockwaveFlash/19.0.0.245', 'Host':urlparse.urlparse(url).netloc, 'Connection':'keep-alive','Accept':'*/*'})
        return url
    except:
        return
def finder73(html,url):
    try:
        ref = url
        url = re.findall('Player\(\{\n\s*source\:\s*\'(.+?)\'\,',html)[0]
        url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref})
        return url
    except:
        return
def finder75(html,url):
    try:
        ref = url
        url = re.findall('file: window.atob\(\'(.+?)\'\),', html)[0]
        file = base64.b64decode(url)
        file += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':'ShockwaveFlash/19.0.0.245', 'Host':urlparse.urlparse(file).netloc, 'Connection':'keep-alive','Accept':'*/*'})
        return file
    except:
        return
Exemple #11
0
def finder72(html, ref):
    try:
        url = re.findall('src\s*:\s*\'(.+?(?:.m3u8)?)\'', html)[0]
        url += '|%s' % urllib.urlencode({
            'User-Agent': client.agent(),
            'Referer': ref
        })
        return url
    except:
        pass
def finder65(html,url):
    try:
        referer = url
        url = re.findall('src=(?:\'|\")(.+?)(?:\'|\").+?type="video/mp4"',html)[0]
        if len(url)<10:
            raise
        url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': referer})

        return url
    except:
        return
Exemple #13
0
def finder73(html, url):
    try:
        ref = url
        url = re.findall('Player\(\{\n\s*source\:\s*\'(.+?)\'\,', html)[0]
        url += '|%s' % urllib.urlencode({
            'User-Agent': client.agent(),
            'Referer': ref
        })
        return url
    except:
        return
def finder92(html,ref):
    try:
        url = re.findall('source\s*src=\s*"\s*(.+?)\s*"\s*type=\s*"\s*application/x-mpegURL\s*"\s*/>',html)[0]
        if 'rtmp' in url:
            url+=' swfUrl=http://www.shadow-net.biz/javascript/videojs/flashls/video-js.swf flashver=WIN\\2020,0,0,286 live=true timeout=18 swfVfy=1 pageUrl=http://www.shadow-net.biz/'
        elif 'm3u8' in url:

            url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':'ShockwaveFlash/20.0.0.286', 'Host':urlparse.urlparse(url).netloc, 'Connection':'keep-alive','Accept':'*/*'})
        url = url.replace('https://crossorigin.me/','')
        return url
    except:
        return
 def __init__(self, *args, **kwargs):
     self.max_iter = 1
     self.sitekey = "6Lc50JwUAAAAAAVVXOTavycUhRtMGphLfi3sj0v6"
     self.lang = "en"
     self.baseUrl = "https://fmovies.taxi/"
     self.ajax = "user/ajax/menu-bar?ts=1556460000&_=740"
     self.captchaActive = False
     self.cval = None
     self.waf = "waf-verify"
     self.cookie = ''
     self.ua = client.agent(
     )  #"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0"
     self.headers = {'User-Agent': self.ua}
Exemple #16
0
def get_url(url, data=None, timeout=20, headers=None):
    if headers is None: headers = {}
    if data is None: data = {}
    post_data = urllib.urlencode(data, doseq=True)
    if 'User-Agent' not in headers:
        headers['User-Agent'] = client.agent()

    req = urllib2.Request(url)
    for key in headers:
        req.add_header(key, headers[key])

    response = urllib2.urlopen(req, data=post_data, timeout=timeout)
    result = response.read()
    response.close()
    return result
Exemple #17
0
def finder79(html, url):
    try:
        ref = url
        url = re.findall("playStream\('hls', '(.+?)'", html)[0]
        url += '|%s' % urllib.urlencode({
            'User-Agent': client.agent(),
            'Referer': ref,
            'X-Requested-With': 'ShockwaveFlash/19.0.0.245',
            'Host': urlparse.urlparse(url).netloc,
            'Connection': 'keep-alive',
            'Accept': '*/*'
        })
        return url
    except:
        return
Exemple #18
0
def finder65(html, url):
    try:
        referer = url
        url = re.findall('src=(?:\'|\")(.+?)(?:\'|\").+?type="video/mp4"',
                         html)[0]
        if len(url) < 10:
            raise
        url += '|%s' % urllib.urlencode({
            'User-Agent': client.agent(),
            'Referer': referer
        })

        return url
    except:
        return
Exemple #19
0
def get_url(url, data=None, timeout=20, headers=None):
    if headers is None: headers = {}
    if data is None: data = {}
    post_data = urllib.urlencode(data, doseq=True)
    if 'User-Agent' not in headers:
        headers['User-Agent'] = client.agent()

    req = urllib2.Request(url)
    for key in headers:
        req.add_header(key, headers[key])

    response = urllib2.urlopen(req, data=post_data, timeout=timeout)
    result = response.read()
    response.close()
    return result
Exemple #20
0
def finder75(html, url):
    try:
        ref = url
        url = re.findall('file: window.atob\(\'(.+?)\'\),', html)[0]
        file = base64.b64decode(url)
        file += '|%s' % urllib.urlencode({
            'User-Agent': client.agent(),
            'Referer': ref,
            'X-Requested-With': 'ShockwaveFlash/19.0.0.245',
            'Host': urlparse.urlparse(file).netloc,
            'Connection': 'keep-alive',
            'Accept': '*/*'
        })
        return file
    except:
        return
def finder4(html,url):
    ref = url
    try:
        try:
            link = re.compile('file\s*:\s*"(.+?)"').findall(html)[0]
        except:
            link = re.compile("file\s*:\s*'(.+?)'").findall(html)[0]
        if '.png' in link or link == '.flv':
            return
        if '.f4m' in link:
            link = link+'?referer=%s'%url
        if '.m3u8' in link and '|' not in link:
            link += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':'ShockwaveFlash/20.0.0.228', 'Host':urlparse.urlparse(link).netloc, 'Connection':'keep-alive','Accept':'*/*'})
        
        return link
    except:
        return 
Exemple #22
0
def request(url,
            post=None,
            headers=None,
            mobile=False,
            safe=False,
            timeout='30'):
    try:
        try:
            headers.update(headers)
        except:
            headers = {}

        if not 'User-Agent' in headers: headers['User-Agent'] = client.agent()

        u = '%s://%s' % (urlparse.urlparse(url).scheme,
                         urlparse.urlparse(url).netloc)

        cookie = cache.get(cloudflare, 168, u, post, headers, mobile, safe,
                           timeout)
        result = client.request(url,
                                cookie=cookie,
                                post=post,
                                headers=headers,
                                mobile=mobile,
                                safe=safe,
                                timeout=timeout,
                                output='response',
                                error=True)

        if result[0] == '503':
            cookie = cache.get(cloudflare, 0, u, post, headers, mobile, safe,
                               timeout)
            result = client.request(url,
                                    cookie=cookie,
                                    post=post,
                                    headers=headers,
                                    mobile=mobile,
                                    safe=safe,
                                    timeout=timeout)
        else:
            result = result[1]

        return result
    except:
        return
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None, noredir=False):

    cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)

    if noredir:
        opener = urllib2.build_opener(NoRedirection,cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
    else:
        opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
    #opener = urllib2.install_opener(opener)
    req = urllib2.Request(url)
    req.add_header('User-Agent',client.agent())
    if headers:
        for h,hv in headers:
            req.add_header(h,hv)

    response = opener.open(req,post,timeout=timeout)
    link=response.read()
    response.close()
    return link;
Exemple #24
0
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='30'):
    try:
        try: headers.update(headers)
        except: headers = {}

        if not 'User-Agent' in headers: headers['User-Agent'] = client.agent()

        u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)

        cookie = cache.get(cloudflare, 168, u, post, headers, mobile, safe, timeout)
        result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True)

        if result[0] == '503':
            cookie = cache.get(cloudflare, 0, u, post, headers, mobile, safe, timeout)
            result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout)
        else:
            result= result[1]

        return result
    except:
        return
Exemple #25
0
def finder92(html, ref):
    try:
        url = re.findall(
            'source\s*src=\s*"\s*(.+?)\s*"\s*type=\s*"\s*application/x-mpegURL\s*"\s*/>',
            html)[0]
        if 'rtmp' in url:
            url += ' swfUrl=http://www.shadow-net.biz/javascript/videojs/flashls/video-js.swf flashver=WIN\\2020,0,0,286 live=true timeout=18 swfVfy=1 pageUrl=http://www.shadow-net.biz/'
        elif 'm3u8' in url:

            url += '|%s' % urllib.urlencode({
                'User-Agent': client.agent(),
                'Referer': ref,
                'X-Requested-With': 'ShockwaveFlash/20.0.0.286',
                'Host': urlparse.urlparse(url).netloc,
                'Connection': 'keep-alive',
                'Accept': '*/*'
            })
        url = url.replace('https://crossorigin.me/', '')
        return url
    except:
        return
def performCaptcha(sitename,cj,returnpage=True,captcharegex='data-sitekey="(.*?)"',lang="en",headers=None):


    sitepage=getUrl(sitename,cookieJar=cj,headers=headers)
    sitekey=re.findall(captcharegex,sitepage)
    token=""
    if len(sitekey)>=1:
        c=UnCaptchaReCaptcha()
        token=c.processCaptcha(sitekey[0],lang)
        if returnpage:
            if headers==None:
                headers=[("User-Agent", client.agent()),
                 ("Referer", sitename)];
            else:
                headers+=[("Referer", sitename)]
            sitepage=getUrl(sitename,cookieJar=cj,post=urllib.urlencode({"g-recaptcha-response":token}),headers=headers)
            
    if returnpage:
        return sitepage
    else:
        return token
Exemple #27
0
def finder1(html, url):
    global limit
    ref = url
    try:
        urls = re.findall('<i?frame.+?src=(?:\'|\")(.+?)(?:\'|\")', html)
        try:
            urls.append(re.findall("playStream\('iframe', '(.+?)'\)", html)[0])
        except:
            pass

        urls += re.findall('<a.+?href=[\'\"](/live-.+?stream.+?)[\'\"]', html)
        from random import shuffle
        shuffle(urls)
        for url in urls:
            if 'c4.zedo' in url:
                continue
            if "micast" in url or 'turbocast' in url:
                return finder47(html, ref)
            rr = resolve_it(url)
            if rr:
                return rr
            uri = manual_fix(url, ref)
            if limit >= 25:
                log("Exiting - iframe visit limit reached")
                return
            resolved = find_link(uri)
            if resolved:
                break
        headers = {'User-Agent': client.agent(), 'Referer': ref}
        if '.m3u8' in resolved and '|' not in resolved:
            headers.update({
                'X-Requested-With': 'ShockwaveFlash/20.0.0.286',
                'Host': urlparse.urlparse(resolved).netloc,
                'Connection': 'keep-alive'
            })
            resolved += '|%s' % urllib.urlencode(headers)
        return resolved
    except:
        return
Exemple #28
0
def finder4(html, url):
    ref = url
    try:
        try:
            link = re.compile('file\s*:\s*"(.+?)"').findall(html)[0]
        except:
            link = re.compile("file\s*:\s*'(.+?)'").findall(html)[0]
        if '.png' in link or link == '.flv':
            return
        if '.f4m' in link:
            link = link + '?referer=%s' % url
        if '.m3u8' in link and '|' not in link:
            link += '|%s' % urllib.urlencode({
                'User-Agent': client.agent(),
                'Referer': ref,
                'X-Requested-With': 'ShockwaveFlash/20.0.0.228',
                'Host': urlparse.urlparse(link).netloc,
                'Connection': 'keep-alive',
                'Accept': '*/*'
            })

        return link
    except:
        return
def resolve(link,ref):
        out=[]
        link = link.replace('https://videoapi.my.mail.ru/videos/embed/mail/','http://videoapi.my.mail.ru/videos/mail/')
        link = link.replace('html','json')
        cookieJar = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar), urllib2.HTTPHandler())
        conn = urllib2.Request(link)
        connection = opener.open(conn)
        f = connection.read()
        connection.close()
        js = json.loads(f)
        for cookie in cookieJar:
            token = cookie.value
        js = js['videos']
        for x in js:
            url = x['url'] + '|%s'%(urllib.urlencode({'Cookie':'video_key=%s'%token, 'User-Agent':client.agent(), 'Referer':ref} ))
            out+=[{x['key']:url}]
        return out
Exemple #30
0
    def processCaptcha(self, key, lang):

        headers = [
            ("User-Agent", client.agent()),
            ("Accept",
             "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
             ), ("Referer", "https://www.google.com/recaptcha/api2/demo"),
            ("Accept-Language", lang)
        ]

        html = getUrl("http://www.google.com/recaptcha/api/fallback?k=" + key,
                      headers=headers)
        token = ""
        round = 0
        while True:
            payload = re.findall("\"(/recaptcha/api2/payload[^\"]+)", html)
            round += 1
            message = re.findall(
                "<label .*?class=\"fbc-imageselect-message-text\">(.*?)</label>",
                html)
            if len(message) == 0:
                message = re.findall(
                    "<div .*?class=\"fbc-imageselect-message-error\">(.*?)</div>",
                    html)
            if len(message) == 0:
                token = re.findall("\"this\\.select\\(\\)\">(.*?)</textarea>",
                                   html)[0]
                if not token == "":
                    line1 = "Captcha Sucessfull"
                    xbmc.executebuiltin('Notification(%s, %s, %d, %s)' %
                                        ('Liveresolver', line1, 3000, None))
                else:
                    line1 = "Captcha failed"
                    xbmc.executebuiltin('Notification(%s, %s, %d, %s)' %
                                        ('Liveresolver', line1, 3000, None))
                break
            else:
                message = message[0]
                payload = payload[0]

            imgurl = re.findall("name=\"c\"\\s+value=\\s*\"([^\"]+)", html)[0]

            headers = [
                ("User-Agent", client.agent()),
                ("Accept",
                 "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
                 ),
                ("Referer",
                 "http://www.google.com/recaptcha/api/fallback?k=" + key),
                ("Accept-Language", lang)
            ]

            cval = re.findall('name="c" value="(.*?)"', html)[0]
            captcha_imgurl = "https://www.google.com" + payload.replace(
                '&amp;', '&')

            #print message
            message = message.replace('<strong>', '')
            message = message.replace('</strong>', '')
            #captcha_response=raw_input('-->')

            oSolver = cInputWindow(captcha=captcha_imgurl,
                                   msg=message,
                                   round=round)
            captcha_response = oSolver.get()
            #print 'captcha_response',captcha_response
            if captcha_response == "":
                break
            responses = ""
            for rr in captcha_response.split(','):
                responses += "&response=" + rr

            html = getUrl("http://www.google.com/recaptcha/api/fallback?k=" +
                          key,
                          post=urllib.urlencode({
                              'c': cval,
                          }) + responses,
                          headers=headers)  #.decode('unicode-escape')
            #print html
        return token
def google(url):
    try:
        netloc = urlparse.urlparse(url.strip().lower()).netloc
        netloc = netloc.split('.google')[0]

        if netloc == 'docs' or netloc == 'drive':
            url = url.split('/preview', 1)[0]
            url = url.replace('drive.google.com', 'docs.google.com')

            result = client.request(url,
                                    headers={'User-Agent': client.agent()})

            result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]

            result = json.loads(result)
            result = [i.split('|')[-1] for i in result.split(',')]
            result = sum([googletag(i) for i in result], [])

        elif netloc == 'photos':
            result = client.request(url,
                                    headers={'User-Agent': client.agent()})

            result = result.replace('\r', '').replace('\n',
                                                      '').replace('\t', '')
            result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = [googletag(i)[0] for i in result]

        elif netloc == 'picasaweb':
            id = re.compile('#(\d*)').findall(url)[0]

            result = client.request(url,
                                    headers={'User-Agent': client.agent()})

            result = re.search('feedPreload:\s*(.*}]}})},', result,
                               re.DOTALL).group(1)
            result = json.loads(result)['feed']['entry']

            if len(result) > 1:
                result = [
                    i for i in result if str(id) in i['link'][0]['href']
                ][0]
            elif len(result) == 1:
                result = result[0]

            result = result['media']['content']
            result = [i['url'] for i in result if 'video' in i['type']]
            result = sum([googletag(i) for i in result], [])

        elif netloc == 'plus':
            result = client.request(url,
                                    headers={'User-Agent': client.agent()})

            id = (urlparse.urlparse(url).path).split('/')[-1]
            result = result.replace('\r', '').replace('\n',
                                                      '').replace('\t', '')
            result = result.split('"%s"' % id)[-1].split(']]')[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = [googletag(i)[0] for i in result]

        url = []
        try:
            url += [[i for i in result if i['quality'] == '1080p'][0]]
        except:
            pass
        try:
            url += [[i for i in result if i['quality'] == 'HD'][0]]
        except:
            pass
        try:
            url += [[i for i in result if i['quality'] == 'SD'][0]]
        except:
            pass

        if url == []: return
        return url
    except:
        return
def google(url):
    try:
        netloc = urlparse.urlparse(url.strip().lower()).netloc
        netloc = netloc.split('.google')[0]



        if netloc == 'docs' or netloc == 'drive':
            url = url.split('/preview', 1)[0]
            url = url.replace('drive.google.com', 'docs.google.com')

            result = client.request(url, headers={'User-Agent': client.agent()})

            result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]

            result = json.loads(result)
            result = [i.split('|')[-1] for i in result.split(',')]
            result = sum([googletag(i) for i in result], [])



        elif netloc == 'photos':
            result = client.request(url, headers={'User-Agent': client.agent()})

            result = result.replace('\r','').replace('\n','').replace('\t','')
            result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]

            result = result.replace('\\u003d','=').replace('\\u0026','&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = [googletag(i)[0] for i in result]



        elif netloc == 'picasaweb':
            id = re.compile('#(\d*)').findall(url)[0]

            result = client.request(url, headers={'User-Agent': client.agent()})

            result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1)
            result = json.loads(result)['feed']['entry']

            if len(result) > 1: result = [i for i in result if str(id) in i['link'][0]['href']][0]
            elif len(result) == 1: result = result[0]

            result = result['media']['content']
            result = [i['url'] for i in result if 'video' in i['type']]
            result = sum([googletag(i) for i in result], [])



        elif netloc == 'plus':
            result = client.request(url, headers={'User-Agent': client.agent()})

            id = (urlparse.urlparse(url).path).split('/')[-1]
            result = result.replace('\r','').replace('\n','').replace('\t','')
            result = result.split('"%s"' % id)[-1].split(']]')[0]

            result = result.replace('\\u003d','=').replace('\\u0026','&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = [googletag(i)[0] for i in result]



        url = []
        try: url += [[i for i in result if i['quality'] == '1080p'][0]]
        except: pass
        try: url += [[i for i in result if i['quality'] == 'HD'][0]]
        except: pass
        try: url += [[i for i in result if i['quality'] == 'SD'][0]]
        except: pass

        if url == []: return
        return url
    except:
        return
Exemple #33
0
def links(url,base,img,icon):
    if base not in url:
        url = base + url
    ref = url
    out = []
    html = client.request(url)

    html = convert.unescape(html.decode('utf-8'))
    dailys = re.findall('src=[\"\'](//(?:www.)?dailymotion.com/embed/video/[^\"\']+)[\"\']',html)
    vks = re.findall('src=[\"\'](//(?:www.)?vk.com/video_ext.php[^\"\']+)[\"\']',html)
    gvid720 = re.findall('src=[\"\'](https?://.+?google.+?/[^\"\']+)" type=[\"\']video/mp4[\"\'] data-res=[\"\']720p[\"\']',html)
    gvid360 = re.findall('src=[\"\'](https?://.+?google.+?[^\"\']+)" type=[\"\']video/mp4[\"\'] data-res=[\"\']360p[\"\']',html)
    mailru = re.findall('(https?://(?:www.)?videoapi.my.mail.ru/videos/[^\"\']+)[\"\']',html)
    opnld = re.findall('(https?://(?:www.)?openload.co/[^\"\']+)[\"\']',html)
    uptstrm = re.findall('(https?://(?:www(?:[\d+])?.)?uptostream.com[^\"\']+)[\"\']',html)
    veevr = re.findall('(https?://(?:www.)?veevr.com[^\"\']+)[\"\']',html)
    plywr = re.findall('(//config.playwire.com/[^\"\']+)[\"\']',html)
    speedvideo = re.findall('(https?://(?:www.)?speedvideo.net/[^\"\']+)[\"\']',html)
    videowood = re.findall('(https?://(?:www.)?videowood.tv/video/[^\"\']+)[\"\']',html)
    vshare = re.findall('(https?://(?:www.)?vshare.io/[^\"\']+)[\"\']',html)
    youtube = re.findall('(https?://(?:www.)?youtu(?:be)?.(?:be|com)/embed/[^\"\']+)[\"\']',html)
    filehoot = re.findall('(https?://(?:www.)?filehoot.com[^\"\']+)[\"\']',html)
    torrent = re.findall('(https?://(?:www.)?userscloud.com[^\"\']+)[\"\']',html)
    urls = []

    i = 0
    for v in plywr:
        i+=1
        title = 'Playwire video %s'%i
        url = v 
        if url not in urls:
            out.append((title,url,control.icon_path(icon)))
            urls.append(url)

    i = 0
    for v in veevr:
        i+=1
        url = v
        from resources.lib.resolvers import veevr
        urlx = veevr.resolve(url)
        
        for url in urlx:
            if url[0] not in urls:
                title = 'Veevr video %s'%url[1].replace('<sup>HD</sup>','')
                out.append((title,url[0],control.icon_path(icon)))
                urls.append(url[0])

    i = 0
    for v in uptstrm:
        from resources.lib.resolvers import uptostream
        urlx =  uptostream.resolve(v)
        i+=1
        for u in urlx:
            q = u[1]
            title = 'Uptostream video n.%s %s'%(i,q)
            url = u[0] 
            if url not in urls:
                out.append((title,url,control.icon_path(icon)))
                urls.append(url)

    i = 1
    for v in dailys:
        
        title = 'Dailymotion video %s'%i
        url = v
        if url not in urls:
            i+=1
            out.append((title,url,control.icon_path(icon)))
            urls.append(url)

    i = 0
    for v in vks:
        i+=1
        title = 'VK.com video %s'%i
        url = v
        if url not in urls:
            out.append((title,url,control.icon_path(icon)))
            urls.append(url)

    i = 0
    for v in gvid720:
        i+=1
        title = 'GVIDEO video %s 720p'%i
        url = v
        if url not in urls:
            out.append((title,url,control.icon_path(icon)))
            urls.append(url)

    i = 0
    for v in gvid360:
        i+=1
        title = 'GVIDEO video 360p'%i
        url = v
        if url not in urls:
            out.append((title,url,control.icon_path(icon)))
            urls.append(url)

    i = 1
    for v in opnld:
        
        title = 'Openload video %s'%i
        url = v.replace('/embed/','/f/').rstrip('/')
        if url not in urls:
            i+=1
            out.append((title,url,control.icon_path(icon)))
            urls.append(url)

    i = 0
    for v in speedvideo:
        i+=1
        title = 'Speedvideo video %s'%i
        url = v
        if url not in urls:
            out.append((title,url,control.icon_path(icon)))
            urls.append(url)
    i = 0
    for v in videowood:
        i+=1
        title = 'Videowood video %s'%i
        url = v
        if url not in urls:
            out.append((title,url,control.icon_path(icon)))
            urls.append(url)
    

    i = 0
    for v in vshare:
        i+=1
        url = v
        title = 'vshare.io video %s'%i
        if url not in urls:

            out.append((title,url,control.icon_path(icon)))
            urls.append(url)

    i = 0
    for v in youtube:
        i+=1
        url = v
        title = 'YouTube video %s'%i
        if url not in urls:

            out.append((title,url,control.icon_path(icon)))
            urls.append(url)

    i = 0
    for v in filehoot:
        i+=1
        url = v
        title = 'Filehoot video %s'%i
        if url not in urls:

            out.append((title,url,control.icon_path(icon)))
            urls.append(url)

    i = 0
    for v in torrent:
        i+=1
        url = v
        title = 'Torrent video %s'%i
        if url not in urls:
            url = url + '?referer=%s'%ref
            out.append((title,url,control.icon_path(icon)))
            urls.append(url)


    i = 0
    for v in mailru:
        link = v
        i+=1
        title = 'Mail.ru video %s'%i
        link = link.replace('https://videoapi.my.mail.ru/videos/embed/mail/','http://videoapi.my.mail.ru/videos/mail/')
        link = link.replace('html','json')
        cookieJar = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar), urllib2.HTTPHandler())
        conn = urllib2.Request(link)
        connection = opener.open(conn)
        f = connection.read()
        connection.close()
        js = json.loads(f)
        for cookie in cookieJar:
            token = cookie.value
        js = js['videos']
        for x in js:
            url = x['url'] + '|%s'%(urllib.urlencode({'Cookie':'video_key=%s'%token, 'User-Agent':client.agent(), 'Referer':ref} ))
            title = 'Mail.ru video ' + x['key']
            if url not in urls:
                out.append((title,url,control.icon_path(icon)))
                urls.append(url)
    log(out)
    return out
Exemple #34
0
def google(url):
    try:
        if any(x in url for x in ['youtube.', 'docid=']):
            url = 'https://drive.google.com/file/d/%s/view' % re.compile(
                'docid=([\w-]+)').findall(url)[0]

        netloc = urlparse.urlparse(url.strip().lower()).netloc
        netloc = netloc.split('.google')[0]

        if netloc == 'docs' or netloc == 'drive':
            url = url.split('/preview', 1)[0]
            url = url.replace('drive.google.com', 'docs.google.com')

        headers = {'User-Agent': client.agent()}

        result = client.request(url, output='extended', headers=headers)

        try:
            headers['Cookie'] = result[2]['Set-Cookie']
        except:
            pass

        result = result[0]

        if netloc == 'docs' or netloc == 'drive':
            result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
            result = json.loads(result)
            result = [i.split('|')[-1] for i in result.split(',')]
            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        elif netloc == 'photos':
            result = result.replace('\r', '').replace('\n',
                                                      '').replace('\t', '')
            result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        elif netloc == 'picasaweb':
            id = re.compile('#(\d*)').findall(url)[0]

            result = re.search('feedPreload:\s*(.*}]}})},', result,
                               re.DOTALL).group(1)
            result = json.loads(result)['feed']['entry']

            if len(result) > 1:
                result = [
                    i for i in result if str(id) in i['link'][0]['href']
                ][0]
            elif len(result) == 1:
                result = result[0]

            result = result['media']['content']
            result = [i['url'] for i in result if 'video' in i['type']]
            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        elif netloc == 'plus':
            id = (urlparse.urlparse(url).path).split('/')[-1]

            result = result.replace('\r', '').replace('\n',
                                                      '').replace('\t', '')
            result = result.split('"%s"' % id)[-1].split(']]')[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        result = sorted(result, key=lambda i: i.get('height', 0), reverse=True)

        url = []
        for q in ['4K', '1440p', '1080p', 'HD', 'SD']:
            try:
                url += [[i for i in result if i.get('quality') == q][0]]
            except:
                pass

        for i in url:
            i.pop('height', None)
            i.update({'url': i['url'] + '|%s' % urllib.urlencode(headers)})

        if not url: return
        return url
    except:
        return