예제 #1
0
def login():
    url_origen = "http://www.pordede.com"
    data = httptools.downloadpage(url_origen).data
    if config.get_setting("pordedeuser", "pordede") in data:
        return True

    key = scrapertools.find_single_match(data, 'data-sitekey="([^"]+)"')
    sess_check = scrapertools.find_single_match(data, ' SESS\s*=\s*"([^"]+)"')

    result = platformtools.show_recaptcha(key, url_origen)
    if result:
        post = "LoginForm[username]=" + config.get_setting(
            "pordedeuser",
            "pordede") + "&LoginForm[password]=" + config.get_setting(
                "pordedepassword", "pordede")
        post += "&LoginForm[verifyCode]=&g-recaptcha-response=%s&popup=1&sesscheck=%s" % (
            result, sess_check)

        headers = {"Referer": url_origen, "X-Requested-With": "XMLHttpRequest"}
        data = httptools.downloadpage("http://www.pordede.com/site/login",
                                      post,
                                      headers=headers,
                                      replace_headers=True).data
        if "Login correcto, entrando" in data:
            return True

    return False
예제 #2
0
파일: wstream.py 프로젝트: myarchives/tes1
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    def int_bckup_method():
        global data, headers
        page_url = scrapertools.find_single_match(
            data,
            r"""<center><a href='(https?:\/\/wstream[^']+)'\s*title='bkg'""")
        if page_url:
            data = httptools.downloadpage(page_url,
                                          headers=headers,
                                          follow_redirects=True,
                                          post={
                                              'g-recaptcha-response': captcha
                                          }).data

    def getSources(data):
        possibileSources = scrapertools.find_multiple_matches(
            data, r'sources:\s*(\[[^\]]+\])')
        for data in possibileSources:
            try:
                data = re.sub('([A-z]+):(?!/)', '"\\1":', data)
                keys = json.loads(data)

                for key in keys:
                    video_urls.append([
                        '%s [%sp]' %
                        (key['type'].replace('video/', ''), key['label']),
                        key['src'].replace('https', 'http') + '|' + _headers
                    ])
            except:
                pass

    logger.info("[Wstream] url=" + page_url)
    video_urls = []
    global data, real_url
    # logger.info(data)
    sitekey = scrapertools.find_multiple_matches(
        data, """data-sitekey=['"] *([^"']+)""")
    if sitekey: sitekey = sitekey[-1]
    captcha = platformtools.show_recaptcha(sitekey,
                                           page_url) if sitekey else ''

    possibleParam = scrapertools.find_multiple_matches(
        data,
        r"""<input.*?(?:name=["']([^'"]+).*?value=["']([^'"]*)['"]>|>)""")
    if possibleParam and possibleParam[0]:
        post = {param[0]: param[1] for param in possibleParam if param[0]}
        if captcha: post['g-recaptcha-response'] = captcha
        if post:
            data = httptools.downloadpage(real_url,
                                          headers=headers,
                                          post=post,
                                          follow_redirects=True).data
        elif captcha:
            int_bckup_method()
    elif captcha:
        int_bckup_method()
    else:
        platformtools.dialog_ok(config.get_localized_string(20000),
                                config.get_localized_string(707434))
        return []

    headers.append(['Referer', real_url])
    _headers = urllib.urlencode(dict(headers))

    post_data = scrapertools.find_single_match(
        data,
        r"</div>\s*<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>"
    )
    if post_data != "":
        from lib import jsunpack
        data = jsunpack.unpack(post_data)
        getSources(data)
    else:
        getSources(data)

    if not video_urls:
        media_urls = scrapertools.find_multiple_matches(
            data, r'(http[^\s]*?\.mp4)')

        for media_url in media_urls:
            video_urls.append(
                ['video' + " mp4 [wstream] ", media_url + '|' + _headers])
    video_urls.sort(key=lambda x: x[0])
    return video_urls
예제 #3
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):

    def int_bckup_method():
        global data,headers
        page_url = scrapertools.find_single_match(data, r"""<center><a href='(https?:\/\/wstream[^']+)'\s*title='bkg'""")
        if not page_url:
            page_url = scrapertools.find_single_match(data, r"""<form action=['"]([^'"]+)['"]""")
        if page_url.startswith('/'):
            page_url = 'https://wstream.video' + page_url
        if page_url:
            data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, post={'g-recaptcha-response': captcha}, verify=False).data

    def getSources(data):
        possibileSources = scrapertools.find_multiple_matches(data, r'sources:\s*(\[[^\]]+\])')
        for data in possibileSources:
            try:
                data = re.sub('([A-z]+):(?!/)', '"\\1":', data)
                keys = json.loads(data)
                for key in keys:
                    if 'label' in key:
                        if not 'type' in key:
                            key['type'] = 'mp4'
                        if not 'src' in key and 'file' in key:
                            key['src'] = key['file']
                        video_urls.append(['%s [%s]' % (key['type'].replace('video/', ''), key['label']), key['src'].replace('https', 'http') + '|' + _headers])
                    elif type(key) != dict:
                        filetype = key.split('.')[-1]
                        if '?' in filetype: filetype = filetype.split('?')[0]
                        video_urls.append([filetype, key.replace('https', 'http') + '|' + _headers])
                    else:
                        if not 'src' in key and 'file' in key: key['src'] = key['file']
                        if key['src'].split('.')[-1] == 'mpd': pass
                        video_urls.append([key['src'].split('.')[-1], key['src'].replace('https', 'http') + '|' + _headers])
            except:
                pass

    logger.info("[Wstream] url=" + page_url)
    video_urls = []
    global data, real_url, headers

    sitekey = scrapertools.find_multiple_matches(data, """data-sitekey=['"] *([^"']+)""")
    if sitekey: sitekey = sitekey[-1]
    captcha = platformtools.show_recaptcha(sitekey, page_url.replace('116.202.226.34', headers[1][1]).replace('nored.icu', headers[1][1])) if sitekey else ''

    possibleParam = scrapertools.find_multiple_matches(data,r"""<input.*?(?:name=["']([^'"]+).*?value=["']([^'"]*)['"]>|>)""")
    if possibleParam[0][0]:
        post = {param[0]: param[1] for param in possibleParam if param[0]}
        if captcha: post['g-recaptcha-response'] = captcha
        if post:
            data = httptools.downloadpage(real_url, headers=headers, post=post, follow_redirects=True, verify=False).data
        elif captcha:
            int_bckup_method()
    elif captcha or not sitekey:
        int_bckup_method()
    else:
        platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(707434))
        return []

    headers.append(['Referer', real_url.replace('116.202.226.34', headers[1][1]).replace('nored.icu', headers[1][1])])
    _headers = urllib.urlencode(dict(headers))

    post_data = scrapertools.find_single_match(data, r"<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
    if post_data != "":
        from lib import jsunpack
        data = jsunpack.unpack(post_data)
        getSources(data)
    else:
        getSources(data)

    if not video_urls:
        media_urls = scrapertools.find_multiple_matches(data, r'(http[^\s]*?\.(?:mp4|m3u8))')

        for media_url in media_urls:
            video_urls.append([media_url.split('.')[-1] + " [Wstream] ", media_url + '|' + _headers])
    video_urls.sort(key=lambda x: x[0])
    return video_urls