def __extract_swf_player(url, content, referer=None):
    domain = __extract_js_var(content, "flashvars\.domain")
    assert domain is not "undefined"

    key = __extract_js_var(content, "flashvars\.filekey")
    filename = __extract_js_var(content, "flashvars\.file")
    cid, cid2, cid3 = ("undefined", "undefined", "undefined")
    user, password = ("undefined", "undefined")

    data = {
            'key': key,
            'file': filename,
            'cid': cid,
            'cid2': cid2,
            'cid3': cid3,
            'pass': password,
            'user': user,
            'numOfErrors': "0"
    }
    token_url = "%s/api/player.api.php?%s" % (domain, urllib.urlencode(data))

    video_info_res = http.send_request(http.add_referer_url(token_url, url), set_request=__set_flash(url))
    video_info = dict(urlparse.parse_qsl(video_info_res.text))

    if video_info.has_key("error_msg"):
        print "[*] Error Message: %s" % (video_info["error_msg"])
    if not video_info.has_key("url"):
        return None
    return video_info['url']
def __extract_9anime(url, page_content):
    episode_id = url.rsplit('/', 1)[1]
    url_info = urlparse.urlparse(url)
    domain = url_info.netloc
    scheme = url_info.scheme

    url_base = "%s://%s" % (scheme, domain)
    url = "%s/ajax/episode/info?id=%s&update=0" % (url_base, episode_id)
    set_request = NineAnimeTokenDecoder.set_request("%s/token?v1" % url_base,
                                                    http.send_request)

    time.sleep(1)
    urlRequest = http.send_request(url, set_request=set_request)

    grabInfo = json.loads(urlRequest.text)
    if 'error' in grabInfo.keys():
        raise Exception('error while trying to fetch info: %s' %
                        grabInfo['error'])
    if grabInfo['type'] == 'iframe':
        target = grabInfo['target']
        if target.startswith('//'):
            target = "%s:%s" % (url_info.scheme, target)

        return load_video_from_url(target)
    elif grabInfo['type'] == 'direct':
        return __9anime_extract_direct(url, grabInfo)

    raise Exception('Unknown case, please report')
예제 #3
0
def __extract_9anime(url, page_content, referer=None):
    episode_id = url.rsplit('/', 1)[1]
    url_info = urlparse.urlparse(url)
    domain = url_info.netloc
    scheme = url_info.scheme

    url_base = "%s://%s" % (scheme, domain)

    ts_value = NineAnimeUrlExtender.get_ts_value(page_content)
    server_id = NineAnimeUrlExtender.get_server_value(page_content)
    extra_param = NineAnimeUrlExtender.get_extra_url_parameter(
        episode_id, server_id, ts_value)
    ep_info_url = "%s/ajax/episode/info?ts=%s&_=%d&id=%s&server=%d" % \
    (url_base, ts_value, extra_param, episode_id, server_id)

    time.sleep(0.3)
    urlRequest = http.send_request(ep_info_url)
    grabInfo = json.loads(urlRequest.text)
    grabInfo = NineAnimeUrlExtender.decode_info(grabInfo)
    if 'error' in grabInfo.keys():
        raise Exception('error while trying to fetch info: %s' %
                        grabInfo['error'])
    if grabInfo['type'] == 'iframe':
        target = grabInfo['target']
        if target.startswith('//'):
            target = "%s:%s" % (url_info.scheme, target)

        return load_video_from_url(http.add_referer_url(target, url))
    elif grabInfo['type'] == 'direct':
        return __9anime_extract_direct(url, grabInfo)

    raise Exception('Unknown case, please report')
def load_video_from_url(in_url):
    found_extractor = None

    for extractor in _EMBED_EXTRACTORS.keys():
        if in_url.startswith(extractor):
            found_extractor = _EMBED_EXTRACTORS[extractor]
            break

    if found_extractor is None:
        print "[*E*] No extractor found for %s" % in_url
        return None

    try:
        if found_extractor['preloader'] is not None:
            print "Modifying Url: %s" % in_url
            in_url = found_extractor['preloader'](in_url)

        print "Probing source: %s" % in_url
        reqObj = http.send_request(in_url)
        return found_extractor['parser'](http.raw_url(reqObj.url),
                                         reqObj.text,
                                         http.get_referer(in_url))
    except http.URLError:
        return None # Dead link, Skip result
    except:
        raise

    return None
예제 #5
0
def load_video_from_url(in_url):
    found_extractor = None

    for extractor in _EMBED_EXTRACTORS.keys():
        if in_url.startswith(extractor):
            found_extractor = _EMBED_EXTRACTORS[extractor]
            break

    if found_extractor is None:
        print "[*E*] No extractor found for %s" % in_url
        return None

    try:
        if found_extractor['preloader'] is not None:
            print "Modifying Url: %s" % in_url
            in_url = found_extractor['preloader'](in_url)

        print "Probing source: %s" % in_url
        reqObj = http.send_request(in_url)
        return found_extractor['parser'](http.raw_url(reqObj.url), reqObj.text,
                                         http.get_referer(in_url))
    except http.URLError:
        return None  # Dead link, Skip result
    except:
        raise

    return None
def __extract_mycloud(url, content, referer=None):
    fixHttp = lambda x: ("https://" + x) if not x.startswith("http") else x
    strip_res = lambda x: x.split("/")[-1].split(".")[0]
    formatUrls = lambda x: (strip_res(x), http.add_referer_url(x, url))

    # Get m3u of all res
    m3u_list = re.findall("\"file\"\s*:\s*\"\/*(.+)\"", content)
    m3u_list = map(fixHttp, m3u_list)
    m3u_list = m3u_list[0]

    # Read and parse all res
    playlist_req = http.send_request(http.add_referer_url(m3u_list, url))
    if playlist_req.status_code != 200:
        raise Exception("Error from server %d" % playlist_req.status_code)

    playlist_content = playlist_req.text
    playlist_content = map(lambda x: x.strip(), playlist_content.split("\n"))
    playlist_content = filter(lambda x: not x.startswith("#") and len(x),
                              playlist_content)

    # Build source urls
    sources = map(lambda x: __relative_url(m3u_list, x), playlist_content)
    sources = map(formatUrls, sources)

    return __check_video_list(url, sources, True, True)
예제 #7
0
def __extract_swf_player(url, content, referer=None):
    domain = __extract_js_var(content, "flashvars\.domain")
    assert domain is not "undefined"

    key = __extract_js_var(content, "flashvars\.filekey")
    filename = __extract_js_var(content, "flashvars\.file")
    cid, cid2, cid3 = ("undefined", "undefined", "undefined")
    user, password = ("undefined", "undefined")

    data = {
        'key': key,
        'file': filename,
        'cid': cid,
        'cid2': cid2,
        'cid3': cid3,
        'pass': password,
        'user': user,
        'numOfErrors': "0"
    }
    token_url = "%s/api/player.api.php?%s" % (domain, urllib.urlencode(data))

    video_info_res = http.send_request(http.add_referer_url(token_url, url),
                                       set_request=__set_flash(url))
    video_info = dict(urlparse.parse_qsl(video_info_res.text))

    if video_info.has_key("error_msg"):
        print "[*] Error Message: %s" % (video_info["error_msg"])
    if not video_info.has_key("url"):
        return None
    return video_info['url']
예제 #8
0
def __wrapper_add_token(url, data):
    token, cb = data

    def inject_token(req):
        req.add_header("Authorization", "Bearer {}".format(token))
        return req

    response = http.send_request(url, set_request=inject_token)
    return cb(url, response.text)
예제 #9
0
    def export(self):
        """Export the stack in Tutum.

        :returns: bool -- whether or not the operation succeeded
        :raises: TutumApiError
        """
        if not self._detail_uri:
            raise TutumApiError("You must save the object before performing this operation")
        url = "/".join([self._detail_uri, "export"])
        return send_request("GET", url, inject_header=False)
예제 #10
0
def _HLS_HOOK(item):
   r = http.send_request(item.getPath())
    if r.ok:
        lines = r.text.split('\n')
        for index, line in enumerate(lines):
            # On the line below you can change the number to one of: 240, 360, 480, 720, 1080.
            # Keep the 'x' and the comma as they are.
            if 'x480,' in line:
                item.setPath(lines[index+1])
                return item
    return item 
def __extract_mycloud(url, content):
    playlist_url = re.findall("\"file\"\s*:\s*\"\/*(.+)\"", content)[0]
    if not playlist_url.startswith("http"):
        playlist_url = "https://" + playlist_url

    joinUrls = lambda x: (x[0], urlparse.urljoin(playlist_url, x[1]).rstrip())
    playlist_content = http.send_request(playlist_url,
                                         set_request=__set_referer(url)).text
    playlist_entries = re.findall("=\d*x(\d*)\n*([^#]*)\n*#?",
                                  playlist_content)
    playlist_entries_full = map(joinUrls, playlist_entries)
    return playlist_entries_full
예제 #12
0
    def playSource():
        reqObj = http.send_request(http.add_referer_url(raw_url, ""))
        if reqObj.status_code != 200:
            raise Exception("Error from server %d" % reqObj.status_code)

        results = VIDEO_RE.findall(reqObj.text)
        if not results:
            results = VIDEO_RE_NEW.findall(reqObj.text)
        if not results:
            raise Exception("Unable to find source")

        return results[0]
    def playSource():
        reqObj = http.send_request(http.add_referer_url(raw_url, referer))
        if reqObj.status_code != 200:
            raise Exception("Error from server %d" % reqObj.status_code)

        results = VIDEO_RE.findall(reqObj.text)
        if not results:
            results = VIDEO_RE_NEW.findall(reqObj.text)
        if not results:
            raise Exception("Unable to find source")

        return results[0]
def __9anime_extract_direct(refer_url, grabInfo):
    url = "%s?%s" % (grabInfo['grabber'], urllib.urlencode(grabInfo['params']))
    url = __relative_url(refer_url, url)
    resp = json.loads(http.send_request(url,
                                        set_request=__set_referer(refer_url)).text)
    if 'data' not in resp.keys():
        raise Exception('Failed to parse 9anime direct with error: %s' %
                        resp['error'] if 'error' in resp.keys() else
                        'No-Error-Set')

    if 'error' in resp.keys() and resp['error']:
        print '[*E*] Failed to parse 9anime direct but got data with error: %s' % resp['error']

    return __check_video_list(refer_url, map(lambda x: (x['label'], x['file']), resp['data']))
def __extract_9anime(url, page_content):
    episode_id = url.rsplit('/', 1)[1]
    domain = urlparse.urlparse(url).netloc
    url = "http://%s/ajax/episode/info?id=%s&update=0" % (domain, episode_id)
    grabInfo = json.loads(http.send_request(url).text)
    if 'error' in grabInfo.keys():
        raise Exception('error while trying to fetch info: %s' %
                        grabInfo['error'])
    if grabInfo['type'] == 'iframe':
        return load_video_from_url(grabInfo['target'])
    elif grabInfo['type'] == 'direct':
        return __9anime_extract_direct(url, grabInfo)

    raise Exception('Unknown case, please report')
def __extract_9anime(url, page_content, referer=None):
    url_info = urlparse.urlparse(url)
    episode_id = url_info.path.rsplit('/', 1)[-1]
    qs = dict(urlparse.parse_qsl(url_info.query))
    domain = url_info.netloc
    scheme = url_info.scheme

    url_base = "%s://%s" % (scheme, domain)

    ts_value = NineAnimeUrlExtender.get_ts_value(page_content)
    if not qs.has_key("server_id"):
        raise Exception('missing server id')

    server_id = int(qs["server_id"])

    #extra_param = NineAnimeUrlExtender.get_extra_url_parameter(episode_id, server_id, ts_value)
    extra_param = _9ANIME_EXTRA_PARAM

    tryNo = 0
    ep_info_url = "%s/ajax/episode/info?ts=%s&_=%s&id=%s&server=%d" % \
    (url_base, ts_value, extra_param, episode_id, server_id)

    while True:
        time.sleep(0.3)
        urlRequest = http.send_request(ep_info_url)
        grabInfo = json.loads(urlRequest.text)
        grabInfo = NineAnimeUrlExtender.decode_info(grabInfo)
        if 'error' in grabInfo.keys():
            if tryNo < 2:
                tryNo += 1
                retry = "true" if tryNo == 2 else "false"
                ep_info_url = __9anime_retry(episode_id, server_id, retry)
                continue

            raise Exception('error while trying to fetch info: %s' %
                            grabInfo['error'])

        break

    if grabInfo['type'] == 'iframe':
        target = grabInfo['target']
        if target.startswith('//'):
            target = "%s:%s" % (url_info.scheme, target)

        return load_video_from_url(http.add_referer_url(target, url))
    elif grabInfo['type'] == 'direct':
        return __9anime_extract_direct(url, grabInfo)

    raise Exception('Unknown case, please report')
def __extract_9anime(url, page_content, referer=None):
    url_info = urlparse.urlparse(url)
    episode_id = url_info.path.rsplit('/', 1)[-1]
    qs = dict(urlparse.parse_qsl(url_info.query))
    domain = url_info.netloc
    scheme = url_info.scheme

    url_base = "%s://%s" % (scheme, domain)

    ts_value = NineAnimeUrlExtender.get_ts_value(page_content)
    if not qs.has_key("server_id"):
        raise Exception('missing server id')

    server_id = int(qs["server_id"])

    #extra_param = NineAnimeUrlExtender.get_extra_url_parameter(episode_id, server_id, ts_value)
    extra_param = _9ANIME_EXTRA_PARAM

    tryNo = 0
    ep_info_url = "%s/ajax/episode/info?ts=%s&_=%s&id=%s&server=%d" % \
    (url_base, ts_value, extra_param, episode_id, server_id)

    while True:
        time.sleep(0.3)
        urlRequest = http.send_request(ep_info_url)
        grabInfo = json.loads(urlRequest.text)
        grabInfo = NineAnimeUrlExtender.decode_info(grabInfo)
        if 'error' in grabInfo.keys():
            if tryNo < 2:
                tryNo += 1
                retry = "true" if tryNo == 2 else "false"
                ep_info_url = __9anime_retry(episode_id, server_id, retry)
                continue

            raise Exception('error while trying to fetch info: %s' %
                            grabInfo['error'])

        break

    if grabInfo['type'] == 'iframe':
        target = grabInfo['target']
        if target.startswith('//'):
            target = "%s:%s" % (url_info.scheme, target)

        return load_video_from_url(http.add_referer_url(target, url))
    elif grabInfo['type'] == 'direct':
        return __9anime_extract_direct(url, grabInfo)

    raise Exception('Unknown case, please report')
def __9anime_retry(ep_id, server_id, retry):
    reqUrl = "https://projectman.ga/api/url?id=%s&server=%s&retry=%s" % \
    (ep_id, server_id, retry)
    response = json.loads(http.send_request(reqUrl).text)
    result_url = response["results"]

    # Store for next
    try:
        url_info = urlparse.urlparse(result_url)
        arguments = dict(urlparse.parse_qsl(url_info.query))
        set_9anime_extra(arguments["_"])
    except:
        print "[*E*] retry store failed"

    return result_url
예제 #19
0
def __9anime_retry(ep_id, server_id, retry):
    reqUrl = "https://projectman.ga/api/url?id=%s&server=%s&retry=%s" % \
    (ep_id, server_id, retry)
    response = json.loads(http.send_request(reqUrl).text)
    result_url = response["results"]

    # Store for next
    try:
        url_info = urlparse.urlparse(result_url)
        arguments = dict(urlparse.parse_qsl(url_info.query))
        set_9anime_extra(arguments["_"])
    except:
        print "[*E*] retry store failed"

    return result_url
def load_video_from_url(in_url):
    try:
        print "Probing source: %s" % in_url
        reqObj = http.send_request(in_url)
        page_content = reqObj.text
        url = reqObj.url
    except http.URLError:
        return None # Dead link, Skip result
    except:
        raise

    for extractor in _EMBED_EXTRACTORS.keys():
        if in_url.startswith(extractor):
            return _EMBED_EXTRACTORS[extractor](url, page_content)
    print "[*E*] No extractor found for %s" % url
    return None
def __9anime_extract_direct(refer_url, grabInfo):
    # grabInfo['grabber'] sometimes ands with /?server=23 so we have to concat with & instead of ?
    url_parameter_concat = "&" if "?" in grabInfo['grabber'] else "?"
    url = "%s%s%s" % (grabInfo['grabber'], url_parameter_concat, urllib.urlencode(grabInfo['params']))
    url = __relative_url(refer_url, url)
    resp = json.loads(http.send_request(http.add_referer_url(url, refer_url)).text)

    possible_error = resp['error'] if 'error' in resp.keys() else 'No-Error-Set'
    if 'data' not in resp.keys():
        if possible_error == 'deleted':
            return None
        raise Exception('Failed to parse 9anime direct with error: %s' %
                        resp['error'] if 'error' in resp.keys() else
                        'No-Error-Set')

    if 'error' in resp.keys() and resp['error']:
        print '[*E*] Failed to parse 9anime direct but got data with error: %s' % resp['error']

    return __check_video_list(refer_url, map(lambda x: (x['label'], x['file']), resp['data']))
def __9anime_extract_direct(refer_url, grabInfo):
    # grabInfo['grabber'] sometimes ands with /?server=23 so we have to concat with & instead of ?
    url_parameter_concat = "&" if "?" in grabInfo['grabber'] else "?"
    url = "%s%s%s" % (grabInfo['grabber'], url_parameter_concat, urllib.urlencode(grabInfo['params']))
    url = __relative_url(refer_url, url)
    resp = json.loads(http.send_request(http.add_referer_url(url, refer_url)).text)

    possible_error = resp['error'] if 'error' in resp.keys() else 'No-Error-Set'
    if 'data' not in resp.keys():
        if possible_error == 'deleted':
            return None
        raise Exception('Failed to parse 9anime direct with error: %s' %
                        resp['error'] if 'error' in resp.keys() else
                        'No-Error-Set')

    if 'error' in resp.keys() and resp['error']:
        print '[*E*] Failed to parse 9anime direct but got data with error: %s' % resp['error']

    return __check_video_list(refer_url, map(lambda x: (x['label'], x['file']), resp['data']))
예제 #23
0
def get_auth(username, password):
    """Returns the user's Username and ApiKey, or raises an exception if username/password incorrect

    :param username: The username/email of the user to authenticate
    :type username: str
    :param password: The password of the user to authenticate
    :type password: str
    :raises: TutumAuthError
    :returns: str, str -- the Username, ApiKey to use for the given username/email
    """
    auth = HTTPBasicAuth(username, password)
    json = send_request("GET", "/auth", auth=auth)
    user = username
    apikey = None
    if json:
        objects = json.get('objects', None)
        if objects and len(objects) > 0:
            user = objects[0].get('username', username)
            apikey = objects[0].get('key')
    return user, apikey
def load_video_from_url(in_url):

    settings = xbmcaddon.Addon(id='plugin.video.9anime')
    sortres = settings.getSetting("sortres")
    serverchoice = settings.getSetting("serverchoice")

    try:
        print "Probing source: %s" % in_url
        reqObj = http.send_request(in_url)
        page_content = reqObj.text
        url = reqObj.url
    except http.URLError:
        return None  # Dead link, Skip result
    except:
        raise

    # Thanks to githubus11 for sorting function - unfortunately I can't get it working with OpenLoad involved
    try:
        for extractor in _EMBED_EXTRACTORS.keys():
            if in_url.startswith(extractor):
                extract_result = _EMBED_EXTRACTORS[extractor](url,
                                                              page_content)
                if serverchoice == "OpenLoad" or serverchoice == "All":  # Skips Sorting function if OpenLoad needs to be included
                    sorted_result = extract_result
                elif "Best" in sortres:
                    sorted_result = sorted(extract_result,
                                           key=__sort_extract_result,
                                           reverse=True)
                else:
                    sorted_result = sorted(extract_result,
                                           key=__sort_extract_result,
                                           reverse=False)
                return sorted_result
    except:
        pass

    print "[*E*] No extractor found for %s" % url
    return None
def __extract_mycloud(url, content, referer=None):
    fixHttp = lambda x: ("https://" + x) if not x.startswith("http") else x
    strip_res = lambda x: x.split("/")[-1].split(".")[0]
    formatUrls = lambda x: (strip_res(x), http.add_referer_url(x, url))

    # Get m3u of all res
    m3u_list = re.findall("\"file\"\s*:\s*\"\/*(.+)\"", content)
    m3u_list = map(fixHttp, m3u_list)
    m3u_list = m3u_list[0]

    # Read and parse all res
    playlist_req = http.send_request(http.add_referer_url(m3u_list, url))
    if playlist_req.status_code != 200:
        raise Exception("Error from server %d" % playlist_req.status_code)

    playlist_content = playlist_req.text
    playlist_content = map(lambda x: x.strip(), playlist_content.split("\n"))
    playlist_content = filter(lambda x: not x.startswith("#") and len(x), playlist_content)

    # Build source urls
    sources = map(lambda x: __relative_url(m3u_list, x), playlist_content)
    sources = map(formatUrls, sources)

    return __check_video_list(url, sources, True, True)
예제 #26
0
 def _send_request(self, url, data=None, set_request=None):
     return http.send_request(url, data, set_request).text
예제 #27
0
 def playSource():
     playUrl = http.add_referer_url("%s&q=%s" % (url, label), referer)
     reqObj = http.send_request(playUrl)
     return VIDEO_RE.findall(reqObj.text)[0]
예제 #28
0
 def _send_request(self, url, data=None):
     return http.send_request(url, data).text
 def _send_request(self, url, data=None, set_request=None):
     return http.send_request(url, data, set_request).text