def challenge(self, html): adyoulike_data_string = None found = re.search(self.ADYOULIKE_INPUT_PATTERN, html) if found: adyoulike_data_string = found.group(1) else: self.plugin.fail("Can't read AdYouLike input data") # {"adyoulike":{"key":"P~zQ~O0zV0WTiAzC-iw0navWQpCLoYEP"}, # "all":{"element_id":"ayl_private_cap_92300","lang":"fr","env":"prod"}} ayl_data = json_loads(adyoulike_data_string) res = self.plugin.load( r'http://api-ayl.appspot.com/challenge?key=%(ayl_key)s&env=%(ayl_env)s&callback=%(callback)s' % { "ayl_key": ayl_data[self.engine]["key"], "ayl_env": ayl_data["all"]["env"], "callback": self.ADYOULIKE_CALLBACK }) found = re.search(self.ADYOULIKE_CHALLENGE_PATTERN, res) challenge_string = None if found: challenge_string = found.group(1) else: self.plugin.fail("Invalid AdYouLike challenge") challenge_data = json_loads(challenge_string) return ayl_data, challenge_data
def handleFree(self): ukey = re.search(self.__pattern__, self.pyfile.url).group(1) json_url = "http://ifile.it/new_download-request.json" post_data = {"ukey": ukey, "ab": "0"} json_response = json_loads(self.load(json_url, post=post_data)) self.logDebug(json_response) if json_response["status"] == 3: self.offline() if json_response["captcha"]: captcha_key = re.search(self.RECAPTCHA_KEY_PATTERN, self.html).group(1) recaptcha = ReCaptcha(self) post_data["ctype"] = "recaptcha" for i in range(5): post_data["recaptcha_challenge"], post_data["recaptcha_response"] = recaptcha.challenge(captcha_key) json_response = json_loads(self.load(json_url, post=post_data)) self.logDebug(json_response) if json_response["retry"]: self.invalidCaptcha() else: self.correctCaptcha() break else: self.fail("Incorrect captcha") if not "ticket_url" in json_response: self.parseError("Download URL") self.download(json_response["ticket_url"])
def handleFree(self): found = re.search(self.WAIT_PATTERN, self.html) seconds = int(found.group(1)) self.logDebug("Found wait", seconds) self.setWait(seconds + 1) self.wait() response = self.load('http://cloudzer.net/io/ticket/slot/%s' % self.file_info['ID'], post=' ', cookies=True) self.logDebug("Download slot request response", response) response = json_loads(response) if response["succ"] is not True: self.fail("Unable to get a download slot") recaptcha = ReCaptcha(self) challenge, response = recaptcha.challenge(self.CAPTCHA_KEY) post_data = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": response} response = json_loads(self.load('http://cloudzer.net/io/ticket/captcha/%s' % self.file_info['ID'], post=post_data, cookies=True)) self.logDebug("Captcha check response", response) self.logDebug("First check") if "err" in response: if response["err"] == "captcha": self.logDebug("Wrong captcha") self.invalidCaptcha() self.retry() elif "Sie haben die max" in response["err"] or "You have reached the max" in response["err"]: self.logDebug("Download limit reached, waiting an hour") self.setWait(3600, True) self.wait() if "type" in response: if response["type"] == "download": url = response["url"] self.logDebug("Download link", url) self.download(url, disposition=True)
def handleFree(self): ukey = re.search(self.__pattern__, self.pyfile.url).group(1) json_url = 'http://ifile.it/new_download-request.json' post_data = {"ukey": ukey, "ab": "0"} json_response = json_loads(self.load(json_url, post=post_data)) self.logDebug(json_response) if json_response['status'] == 3: self.offline() if json_response["captcha"]: captcha_key = re.search(self.RECAPTCHA_KEY_PATTERN, self.html).group(1) recaptcha = ReCaptcha(self) post_data["ctype"] = "recaptcha" for i in range(5): post_data["recaptcha_challenge"], post_data[ "recaptcha_response"] = recaptcha.challenge(captcha_key) json_response = json_loads(self.load(json_url, post=post_data)) self.logDebug(json_response) if json_response["retry"]: self.invalidCaptcha() else: self.correctCaptcha() break else: self.fail("Incorrect captcha") if not "ticket_url" in json_response: self.parseError("Download URL") self.download(json_response["ticket_url"])
def handle_free(self, pyfile): m = re.search('<h2>((Daily )?Download Limit)</h2>', self.html) if m: pyfile.error = encode(m.group(1)) self.log_warning(pyfile.error) self.retry(6, (6 * 60 if m.group(2) else 15) * 60, pyfile.error) ajax_url = "http://uploading.com/files/get/?ajax" self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With: XMLHttpRequest"]) self.req.http.lastURL = pyfile.url res = json_loads(self.load(ajax_url, post={'action': 'second_page', 'code': self.info['pattern']['ID']})) if 'answer' in res and 'wait_time' in res['answer']: wait_time = int(res['answer']['wait_time']) self.log_info(_("Waiting %d seconds") % wait_time) self.wait(wait_time) else: self.error(_("No AJAX/WAIT")) res = json_loads(self.load(ajax_url, post={'action': 'get_link', 'code': self.info['pattern']['ID'], 'pass': '******'})) if 'answer' in res and 'link' in res['answer']: url = res['answer']['link'] else: self.error(_("No AJAX/URL")) self.html = self.load(url) m = re.search(r'<form id="file_form" action="(.*?)"', self.html) if m: url = m.group(1) else: self.error(_("No URL")) self.link = url
def challenge(self, key=None, html=None): if not key: if self.detect_key(html): key = self.key else: errmsg = _("AdYouLike key not found") self.plugin.fail(errmsg) raise TypeError(errmsg) ayl, callback = key # {"adyoulike":{"key":"P~zQ~O0zV0WTiAzC-iw0navWQpCLoYEP"}, # "all":{"element_id":"ayl_private_cap_92300","lang":"fr","env":"prod"}} ayl = json_loads(ayl) html = self.plugin.req.load("http://api-ayl.appspot.com/challenge", get={'key' : ayl['adyoulike']['key'], 'env' : ayl['all']['env'], 'callback': callback}) try: challenge = json_loads(re.search(callback + r'\s*\((.+?)\)', html).group(1)) except AttributeError: errmsg = _("AdYouLike challenge pattern not found") self.plugin.fail(errmsg) raise AttributeError(errmsg) self.logDebug("Challenge: %s" % challenge) return self.result(ayl, challenge), challenge
def result(self, server, challenge): # Adyoulike.g._jsonp_5579316662423138 # ({"translations":{"fr":{"instructions_visual":"Recopiez « Soonnight » ci-dessous :"}}, # "site_under":true,"clickable":true,"pixels":{"VIDEO_050":[],"DISPLAY":[],"VIDEO_000":[],"VIDEO_100":[], # "VIDEO_025":[],"VIDEO_075":[]},"medium_type":"image/adyoulike", # "iframes":{"big":"<iframe src=\"http://www.soonnight.com/campagn.html\" scrolling=\"no\" # height=\"250\" width=\"300\" frameborder=\"0\"></iframe>"},"shares":{},"id":256, # "token":"e6QuI4aRSnbIZJg02IsV6cp4JQ9~MjA1","formats":{"small":{"y":300,"x":0,"w":300,"h":60}, # "big":{"y":0,"x":0,"w":300,"h":250},"hover":{"y":440,"x":0,"w":300,"h":60}}, # "tid":"SqwuAdxT1EZoi4B5q0T63LN2AkiCJBg5"}) if isinstance(server, basestring): server = json_loads(server) if isinstance(challenge, basestring): challenge = json_loads(challenge) try: instructions_visual = challenge['translations'][server['all']['lang']]['instructions_visual'] result = re.search(u'«(.+?)»', instructions_visual).group(1).strip() except AttributeError: errmsg = _("AdYouLike result not found") self.plugin.fail(errmsg) raise AttributeError(errmsg) result = {'_ayl_captcha_engine' : "adyoulike", '_ayl_env' : server['all']['env'], '_ayl_tid' : challenge['tid'], '_ayl_token_challenge': challenge['token'], '_ayl_response' : response} self.logDebug("Result: %s" % result) return result
def loadAccountInfo(self, user, req): # It looks like the first API request always fails, so we retry 5 times, it should work on the second try for _i in xrange(5): rep = req.load("https://secure.filecloud.io/api-fetch_apikey.api", post={ "username": user, "password": self.getAccountData(user)['password'] }) rep = json_loads(rep) if rep['status'] == 'ok': break elif rep['status'] == 'error' and rep[ 'message'] == 'no such user or wrong password': self.logError(_("Wrong username or password")) return {"valid": False, "premium": False} else: return {"premium": False} akey = rep['akey'] self.accounts[user]['akey'] = akey # Saved for hoster plugin rep = req.load("http://api.filecloud.io/api-fetch_account_details.api", post={"akey": akey}) rep = json_loads(rep) if rep['is_premium'] == 1: return { "validuntil": float(rep['premium_until']), "trafficleft": -1 } else: return {"premium": False}
def result(self, server, challenge): #: Adyoulike.g._jsonp_5579316662423138 #: ({'translations':{'fr':{'instructions_visual':"Recopiez « Soonnight » ci-dessous :"}}, #: 'site_under':true,'clickable':true,'pixels':{'VIDEO_050':[],'DISPLAY':[],'VIDEO_000':[],'VIDEO_100':[], #: 'VIDEO_025':[],'VIDEO_075':[]},'medium_type':"image/adyoulike", #: 'iframes':{'big':"<iframe src=\"http://www.soonnight.com/campagn.html\" scrolling=\"no\" #: height=\"250\" width=\"300\" frameborder=\"0\"></iframe>"},'shares':{},'id':256, #: 'token':"e6QuI4aRSnbIZJg02IsV6cp4JQ9~MjA1",'formats':{'small':{'y':300,'x':0,'w':300,'h':60}, #: 'big':{'y':0,'x':0,'w':300,'h':250},'hover':{'y':440,'x':0,'w':300,'h':60}}, #: 'tid':"SqwuAdxT1EZoi4B5q0T63LN2AkiCJBg5"}) if isinstance(server, basestring): server = json_loads(server) if isinstance(challenge, basestring): challenge = json_loads(challenge) try: instructions_visual = challenge['translations'][server['all']['lang']]['instructions_visual'] result = re.search(u'«(.+?)»', instructions_visual).group(1).strip() except AttributeError: self.fail(_("AdYouLike result not found")) result = {'_ayl_captcha_engine' : "adyoulike", '_ayl_env' : server['all']['env'], '_ayl_tid' : challenge['tid'], '_ayl_token_challenge': challenge['token'], '_ayl_response' : response} self.log_debug("Result: %s" % result) return result
def getLinks(self): folder_id = re.search(self.__pattern__, self.pyfile.url).group('id') grid = self.load('http://turbobit.net/downloadfolder/gridFile', get={ 'id_folder': folder_id, 'rows': 200 }, decode=True) grid = json_loads(grid) links_count = grid["records"] pages = int(math.ceil(links_count / 200.0)) ids = list() for i in grid['rows']: ids.append(i['id']) for p in range(2, pages + 1): grid = self.load('http://turbobit.net/downloadfolder/gridFile', get={ 'id_folder': folder_id, 'rows': 200, 'page': p }, decode=True) grid = json_loads(grid) for i in grid['rows']: ids.append(i['id']) return map(format_links, ids)
def challenge(self, html): adyoulike_data_string = None found = re.search(self.ADYOULIKE_INPUT_PATTERN, html) if found: adyoulike_data_string = found.group(1) else: self.plugin.fail("Can't read AdYouLike input data") # {"adyoulike":{"key":"P~zQ~O0zV0WTiAzC-iw0navWQpCLoYEP"}, # "all":{"element_id":"ayl_private_cap_92300","lang":"fr","env":"prod"}} ayl_data = json_loads(adyoulike_data_string) res = self.plugin.load( r'http://api-ayl.appspot.com/challenge?key=%(ayl_key)s&env=%(ayl_env)s&callback=%(callback)s' % { "ayl_key": ayl_data[self.engine]["key"], "ayl_env": ayl_data["all"]["env"], "callback": self.ADYOULIKE_CALLBACK}) found = re.search(self.ADYOULIKE_CHALLENGE_PATTERN, res) challenge_string = None if found: challenge_string = found.group(1) else: self.plugin.fail("Invalid AdYouLike challenge") challenge_data = json_loads(challenge_string) return ayl_data, challenge_data
def process(self, pyfile): if re.match(self.__pattern__, pyfile.url): link_status = {'generated': pyfile.url} elif not self.account: # Check account self.logError(_("Please enter your %s account or deactivate this plugin") % "rpnet") self.fail("No rpnet account provided") else: (user, data) = self.account.selectAccount() self.logDebug("Original URL: %s" % pyfile.url) # Get the download link response = self.load("https://premium.rpnet.biz/client_api.php", get={"username": user, "password": data['password'], "action": "generate", "links": self.pyfile.url}) self.logDebug("JSON data: %s" % response) link_status = json_loads(response)['links'][0] # get the first link... since we only queried one # Check if we only have an id as a HDD link if 'id' in link_status: self.logDebug("Need to wait at least 30 seconds before requery") self.setWait(30) # wait for 30 seconds self.wait() # Lets query the server again asking for the status on the link, # we need to keep doing this until we reach 100 max_tries = 30 my_try = 0 while (my_try <= max_tries): self.logDebug("Try: %d ; Max Tries: %d" % (my_try, max_tries)) response = self.load("https://premium.rpnet.biz/client_api.php", get={"username": user, "password": data['password'], "action": "downloadInformation", "id": link_status['id']}) self.logDebug("JSON data hdd query: %s" % response) download_status = json_loads(response)['download'] if download_status['status'] == '100': link_status['generated'] = download_status['rpnet_link'] self.logDebug("Successfully downloaded to rpnet HDD: %s" % link_status['generated']) break else: self.logDebug("At %s%% for the file download" % download_status['status']) self.setWait(30) self.wait() my_try += 1 if my_try > max_tries: # We went over the limit! self.fail("Waited for about 15 minutes for download to finish but failed") if 'generated' in link_status: self.download(link_status['generated'], disposition=True) elif 'error' in link_status: self.fail(link_status['error']) else: self.fail("Something went wrong, not supposed to enter here")
def handleFree(self): found = re.search('<h2>((Daily )?Download Limit)</h2>', self.html) if found: self.pyfile.error = found.group(1) self.logWarning(self.pyfile.error) self.retry(max_tries=6, wait_time=21600 if found.group(2) else 900, reason=self.pyfile.error) ajax_url = "http://uploading.com/files/get/?ajax" self.req.http.c.setopt(HTTPHEADER, ["X-Requested-With: XMLHttpRequest"]) self.req.http.lastURL = self.pyfile.url response = json_loads( self.load(ajax_url, post={ 'action': 'second_page', 'code': self.file_info['ID'] })) if 'answer' in response and 'wait_time' in response['answer']: wait_time = int(response['answer']['wait_time']) self.log.info("%s: Waiting %d seconds." % (self.__name__, wait_time)) self.setWait(wait_time) self.wait() else: self.pluginParseError("AJAX/WAIT") response = json_loads( self.load(ajax_url, post={ 'action': 'get_link', 'code': self.file_info['ID'], 'pass': '******' })) if 'answer' in response and 'link' in response['answer']: url = response['answer']['link'] else: self.pluginParseError("AJAX/URL") self.html = self.load(url) found = re.search(r'<form id="file_form" action="(.*?)"', self.html) if found: url = found.group(1) else: self.pluginParseError("URL") self.download(url) check = self.checkDownload( {"html": re.compile("\A<!DOCTYPE html PUBLIC")}) if check == "html": self.logWarning( "Redirected to a HTML page, wait 10 minutes and retry") self.setWait(600, True) self.wait()
def handle_premium(self, pyfile): user, info = self.account.select() #: Get the download link res = self.load("https://premium.rpnet.biz/client_api.php", get={'username': user, 'password': info['login']['password'], 'action' : "generate", 'links' : pyfile.url}) self.log_debug("JSON data: %s" % res) link_status = json_loads(res)['links'][0] #: Get the first link... since we only queried one #: Check if we only have an id as a HDD link if 'id' in link_status: self.log_debug("Need to wait at least 30 seconds before requery") self.wait(30) #: Wait for 30 seconds #: Lets query the server again asking for the status on the link, #: We need to keep doing this until we reach 100 max_tries = 30 my_try = 0 while (my_try <= max_tries): self.log_debug("Try: %d ; Max Tries: %d" % (my_try, max_tries)) res = self.load("https://premium.rpnet.biz/client_api.php", get={'username': user, 'password': info['login']['password'], 'action' : "downloadInformation", 'id' : link_status['id']}) self.log_debug("JSON data hdd query: %s" % res) download_status = json_loads(res)['download'] if download_status['status'] == "100": link_status['generated'] = download_status['rpnet_link'] self.log_debug("Successfully downloaded to rpnet HDD: %s" % link_status['generated']) break else: self.log_debug("At %s%% for the file download" % download_status['status']) self.wait(30) my_try += 1 if my_try > max_tries: #: We went over the limit! self.fail(_("Waited for about 15 minutes for download to finish but failed")) if 'generated' in link_status: self.link = link_status['generated'] return elif 'error' in link_status: self.fail(link_status['error']) else: self.fail(_("Something went wrong, not supposed to enter here"))
def do_recaptcha(self): self.logDebug('Trying to solve captcha') captcha_key = re.search(self.CAPTCHA_KEY_PATTERN, self.html).group(1) shortencode = re.search(self.CAPTCHA_SHORTENCODE_PATTERN, self.html).group(1) url = re.search(self.CAPTCHA_DOWNLOAD_PATTERN, self.html).group(1) recaptcha = ReCaptcha(self) for i in range(5): challenge, code = recaptcha.challenge(captcha_key) response = json_loads( self.load(self.file_info['HOST'] + '/rest/captcha/test', post={ 'challenge': challenge, 'response': code, 'shortencode': shortencode })) self.logDebug("reCaptcha response : %s" % response) if response == True: self.correctCaptcha() break else: self.invalidCaptcha() else: self.fail("Invalid captcha") return url
def getDomain(self, req): xml = req.load( self.API_URL + "/utility?method=getFilesonicDomainForCurrentIp&format=json", decode=True) return json_loads( xml)["FSApi_Utility"]["getFilesonicDomainForCurrentIp"]["response"]
def login(self, user, data, req): page = req.load("http://fastix.ru/api_v2/?sub=get_apikey&email=%s&password=%s" % (user, data["password"])) api = json_loads(page) api = api['apikey'] data["api"] = api if "error_code" in page: self.wrongPassword()
def handlePremium(self, pyfile): data = json_loads( self.load("https://real-debrid.com/ajax/unrestrict.php", get={ 'lang': "en", 'link': pyfile.url, 'password': self.getPassword(), 'time': int(time.time() * 1000) })) self.logDebug("Returned Data: %s" % data) if data['error'] != 0: if data['message'] == "Your file is unavailable on the hoster.": self.offline() else: self.logWarning(data['message']) self.tempOffline() else: if pyfile.name and pyfile.name.endswith( '.tmp') and data['file_name']: pyfile.name = data['file_name'] pyfile.size = parseFileSize(data['file_size']) self.link = data['generated_links'][0][-1] if self.getConfig('ssl'): self.link = self.link.replace("http://", "https://") else: self.link = self.link.replace("https://", "http://")
def handleFree(self, pyfile): m = re.search(self.LINK_FREE_PATTERN, self.html) if m is None: self.error(_("LINK_FREE_PATTERN not found")) url = m.group(1) self.logDebug( ('FREEUSER' if m.group(2) == 'download' else 'GUEST') + ' URL', url) res = json_loads( self.load(urlparse.urljoin("http://115.com", url), decode=False)) if "urls" in res: mirrors = res['urls'] elif "data" in res: mirrors = res['data'] else: mirrors = None for mr in mirrors: try: self.link = mr['url'].replace("\\", "") self.logDebug("Trying URL: " + self.link) break except Exception: continue else: self.fail(_("No working link found"))
def process(self, pyfile): # Check account if not self.account or not self.account.canUse(): self.logError(_("Please enter a valid premiumize.me account or deactivate this plugin")) self.fail("No valid premiumize.me account provided") # In some cases hostsers do not supply us with a filename at download, so we are going to set a fall back filename (e.g. for freakshare or xfileshare) self.pyfile.name = self.pyfile.name.split('/').pop() # Remove everthing before last slash # Correction for automatic assigned filename: Removing html at end if needed suffix_to_remove = ["html", "htm", "php", "php3", "asp", "shtm", "shtml", "cfml", "cfm"] temp = self.pyfile.name.split('.') if temp.pop() in suffix_to_remove: self.pyfile.name = ".".join(temp) # Get account data (user, data) = self.account.selectAccount() # Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api) answer = self.load("https://api.premiumize.me/pm-api/v1.php?method=directdownloadlink¶ms[login]=%s¶ms[pass]=%s¶ms[link]=%s" % (user, data['password'], self.pyfile.url)) data = json_loads(answer) # Check status and decide what to do status = data['status'] if status == 200: self.download(data['result']['location'], disposition=True) elif status == 400: self.fail("Invalid link") elif status == 404: self.offline() elif status >= 500: self.tempOffline() else: self.fail(data['statusmessage'])
def getInfo(urls): result = [] #: [ .. (name, size, status, url) .. ] regex = re.compile(DailymotionCom.__pattern__) apiurl = "https://api.dailymotion.com/video/" request = {"fields": "access_error,status,title"} for url in urls: id = regex.search(url).group("ID") page = getURL(apiurl + id, get=request) info = json_loads(page) if "title" in info: name = info["title"] + ".mp4" else: name = url if "error" in info or info["access_error"]: status = "offline" else: status = info["status"] if status in ("ready", "published"): status = "online" elif status in ("waiting", "processing"): status = "temp. offline" else: status = "offline" result.append((name, 0, statusMap[status], url)) return result
def getHoster(self): # If no accounts are available there will be no hosters available if not self.account or not self.account.canUse(): print "ReloadCc: No accounts available" return [] # Get account data (user, data) = self.account.selectAccount() # Get supported hosters list from reload.cc using the json API v1 query_params = dict(via='pyload', v=1, get_supported='true', get_traffic='true', user=user) try: query_params.update(dict(hash=self.account.infos[user]['pwdhash'])) except Exception: query_params.update(dict(pwd=data['password'])) answer = getURL("http://api.reload.cc/login", get=query_params) data = json_loads(answer) # If account is not valid thera are no hosters available if data['status'] != "ok": print "ReloadCc: Status is not ok: %s" % data['status'] return [] # Extract hosters from json file return data['msg']['supportedHosters']
def handlePremium(self): api_key = self.user premium_key = self.account.getAccountData(self.user)["password"] json_data = [ api_key, [ "download/direct_links", { "pass": premium_key, "link": self.pyfile.url } ] ] api_rep = self.load('http://api.letitbit.net/json', post={'r': json_dumps(json_data)}) self.logDebug('API Data: ' + api_rep) api_rep = json_loads(api_rep) if api_rep['status'] == 'FAIL': self.fail(api_rep['data']) direct_link = api_rep['data'][0][0] self.logDebug('Direct Link: ' + direct_link) self.download(direct_link, disposition=True)
def getAccountStatus(self, user, req): # Use premiumize.me API v1 (see https://secure.premiumize.me/?show=api) # to retrieve account info and return the parsed json answer answer = req.load( "https://api.premiumize.me/pm-api/v1.php?method=accountstatus¶ms[login]=%s¶ms[pass]=%s" % (user, self.accounts[user]['password'])) return json_loads(answer)
def submit(self, captcha, captchaType="file", match=None): if not PYLOAD_KEY: raise CaptchaTraderException("No API Key Specified!") #if type(captcha) == str and captchaType == "file": # raise CaptchaTraderException("Invalid Type") assert captchaType in ("file", "url-jpg", "url-jpeg", "url-png", "url-bmp") req = getRequest() #raise timeout threshold req.c.setopt(LOW_SPEED_TIME, 80) try: json = req.load(CaptchaTrader.SUBMIT_URL, post={"api_key": PYLOAD_KEY, "username": self.getConfig("username"), "password": self.getConfig("passkey"), "value": (FORM_FILE, captcha), "type": captchaType}, multipart=True) finally: req.close() response = json_loads(json) if response[0] < 0: raise CaptchaTraderException(response[1]) ticket = response[0] result = response[1] self.logDebug("result %s : %s" % (ticket, result)) return ticket, result
def checkFile(self, url): id = getId(url) self.logDebug("file id is %s" % id) if id: # Use the api to check the current status of the file and fixup data check_url = self.API_ADDRESS + "/link?method=getInfo&format=json&ids=%s" % id result = json_loads(self.load(check_url, decode=True)) item = result["FSApi_Link"]["getInfo"]["response"]["links"][0] self.logDebug("api check returns %s" % item) if item["status"] != "AVAILABLE": self.offline() if item["is_password_protected"] != 0: self.fail("This file is password protected") # ignored this check due to false api information #if item["is_premium_only"] != 0 and not self.premium: # self.fail("need premium account for file") self.pyfile.name = unquote(item["filename"]) # Fix the url and resolve the domain to the correct regional variation url = item["url"] urlparts = re.search(self.URL_DOMAIN_PATTERN, url) if urlparts: url = urlparts.group("prefix") + self.getDomain() + urlparts.group("suffix") self.logDebug("localised url is %s" % url) return url else: self.fail("Invalid URL")
def handleFree(self): action, inputs = self.parseHtmlForm('id="frm-downloadDialog-freeDownloadForm"') if not action or not inputs: self.parseError("free download form") self.logDebug('inputs.keys() = ' + str(inputs.keys())) # get and decrypt captcha if all(key in inputs for key in ('captcha_value', 'captcha_id', 'captcha_key')): # Old version - last seen 9.12.2013 self.logDebug('Using "old" version') captcha_value = self.decryptCaptcha("http://img.uloz.to/captcha/%s.png" % inputs['captcha_id']) self.logDebug('CAPTCHA ID: ' + inputs['captcha_id'] + ', CAPTCHA VALUE: ' + captcha_value) inputs.update({'captcha_id': inputs['captcha_id'], 'captcha_key': inputs['captcha_key'], 'captcha_value': captcha_value}) elif all(key in inputs for key in ('captcha_value', 'timestamp', 'salt', 'hash')): # New version - better to get new parameters (like captcha reload) because of image url - since 6.12.2013 self.logDebug('Using "new" version') xapca = self.load("http://www.ulozto.net/reloadXapca.php", get={"rnd": str(int(time.time()))}) self.logDebug('xapca = ' + str(xapca)) data = json_loads(xapca) captcha_value = self.decryptCaptcha(str(data['image'])) self.logDebug('CAPTCHA HASH: ' + data['hash'] + ', CAPTCHA SALT: ' + str(data['salt']) + ', CAPTCHA VALUE: ' + captcha_value) inputs.update({'timestamp': data['timestamp'], 'salt': data['salt'], 'hash': data['hash'], 'captcha_value': captcha_value}) else: self.parseError("CAPTCHA form changed") self.multiDL = True self.download("http://www.ulozto.net" + action, post=inputs, cookies=True, disposition=True)
def process(self, pyfile): if re.match(self.__pattern__, pyfile.url): new_url = pyfile.url elif not self.account: self.logError(_("Please enter your %s account or deactivate this plugin") % "Fastix") self.fail("No Fastix account provided") else: self.logDebug("Old URL: %s" % pyfile.url) api_key = self.account.getAccountData(self.user) api_key = api_key["api"] url = "http://fastix.ru/api_v2/?apikey=%s&sub=getdirectlink&link=%s" % (api_key, pyfile.url) page = self.load(url) data = json_loads(page) self.logDebug("Json data: %s" % str(data)) if "error\":true" in page: self.offline() else: new_url = data["downloadlink"] if new_url != pyfile.url: self.logDebug("New URL: %s" % new_url) if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown"): #only use when name wasnt already set pyfile.name = self.getFilename(new_url) self.download(new_url, disposition=True) check = self.checkDownload({"error": "<title>An error occurred while processing your request</title>", "empty": re.compile(r"^$")}) if check == "error": self.retry(reason="An error occurred while generating link.", wait_time=60) elif check == "empty": self.retry(reason="Downloaded File was empty.", wait_time=60)
def api_response(self, api="captcha", post=False, multipart=False): req = getRequest() req.c.setopt(HTTPHEADER, ["Accept: application/json", "User-Agent: pyLoad %s" % self.core.version]) if post: if not isinstance(post, dict): post = {} post.update({"username": self.getConfig("username"), "password": self.getConfig("passkey")}) res = None try: json = req.load("%s%s" % (self.API_URL, api), post=post, multipart=multipart) self.logDebug(json) res = json_loads(json) if "error" in res: raise DeathByCaptchaException(res['error']) elif "status" not in res: raise DeathByCaptchaException(str(res)) except BadHeader, e: if 403 == e.code: raise DeathByCaptchaException('not-logged-in') elif 413 == e.code: raise DeathByCaptchaException('invalid-captcha') elif 503 == e.code: raise DeathByCaptchaException('service-overload') elif e.code in (400, 405): raise DeathByCaptchaException('invalid-request') else: raise
def handlePremium(self): premium_url = None if self.__name__ == "FileserveCom": #try api download response = self.load("http://app.fileserve.com/api/download/premium/", post={"username": self.user, "password": self.account.getAccountData(self.user)["password"], "shorten": self.file_id}, decode=True) if response: response = json_loads(response) if response['error_code'] == "302": premium_url = response['next'] elif response['error_code'] in ["305", "500"]: self.tempOffline() elif response['error_code'] in ["403", "605"]: self.resetAccount() elif response['error_code'] in ["606", "607", "608"]: self.offline() else: self.logError(response['error_code'], response['error_message']) self.download(premium_url or self.pyfile.url) if not premium_url: check = self.checkDownload({"login": re.compile(self.NOT_LOGGED_IN_PATTERN)}) if check == "login": self.account.relogin(self.user) self.retry(reason=_("Not logged in."))
def get_info(cls, url="", html=""): info = super(YadiSk, cls).get_info(url, html) if html: if 'idclient' not in info: info['idclient'] = "" for _i in xrange(32): info ['idclient'] += random.choice('0123456abcdef') m = re.search(r'<script id="models-client" type="application/json">(.+?)</script>', html) if m: api_data = json_loads(m.group(1)) try: for sect in api_data: if 'model' in sect: if sect['model'] == "config": info['version'] = sect['data']['version'] info['sk'] = sect['data']['sk'] elif sect['model'] == "resource": info['id'] = sect['data']['id'] info['size'] = sect['data']['meta']['size'] info['name'] = sect['data']['name'] except Exception, e: info['status'] = 8 info['error'] = _("Unexpected server response: %s") % e.message else: info['status'] = 8 info['error'] = _("could not find required json data")
def loadAccountInfo(self, user, req): validuntil = None trafficleft = None premium = False sid = None try: sid = self.getAccountData(user).get('sid', None) assert sid html = req.load("%s/info" % self.API_URL, get={'sid': sid}) self.logDebug("API:USERINFO", html) json = json_loads(html) if json['response_status'] == 200: if "reset_in" in json['response']: self.scheduleRefresh(user, json['response']['reset_in']) validuntil = json['response']['expire_date'] trafficleft = float(json['response']['traffic_left'] ) / 1024 #@TODO: Remove `/ 1024` in 0.4.10 premium = True else: self.logError(json['response_details']) except Exception, e: self.logError(e)
def get_info(urls): result = [] regex = re.compile(DailymotionCom.__pattern__) apiurl = "https://api.dailymotion.com/video/%s" request = {'fields': "access_error,status,title"} for url in urls: id = regex.match(url).group('ID') html = get_url(apiurl % id, get=request) info = json_loads(html) name = info['title'] + ".mp4" if "title" in info else url if "error" in info or info['access_error']: status = "offline" else: status = info['status'] if status in ("ready", "published"): status = "online" elif status in ("waiting", "processing"): status = "temp. offline" else: status = "offline" result.append((name, 0, statusMap[status], url)) return result
def handleFree(self): found = re.search(self.WAIT_PATTERN, self.html) if found: self.setWait(int(found.group(1)) * 60) self.wait() self.retry() # Get download token found = re.search(self.VARS_PATTERN, self.html) if not found: self.parseError('VARS') vfid, delay = found.groups() response = json_loads(self.load('http://bayfiles.com/ajax_download', get={ "_": time() * 1000, "action": "startTimer", "vfid": vfid}, decode=True)) if not "token" in response or not response['token']: self.fail('No token') self.setWait(int(delay)) self.wait() self.html = self.load('http://bayfiles.com/ajax_download', get={ "token": response['token'], "action": "getLink", "vfid": vfid}) # Get final link and download found = re.search(self.LINK_PATTERN, self.html) if not found: self.parseError("Free link") self.startDownload(found.group(1))
def getJsonResponse(self, get_dict, post_dict, field): json_response = json_loads(self.load('https://filepost.com/files/get/', get = get_dict, post = post_dict)) self.logDebug(json_response) if not 'js' in json_response: self.parseError('JSON %s 1' % field) # i changed js_answer to json_response['js'] since js_answer is nowhere set. # i don't know the JSON-HTTP specs in detail, but the previous author # accessed json_response['js']['error'] as well as js_answer['error']. # see the two lines commented out with "# ~?". if 'error' in json_response['js']: if json_response['js']['error'] == 'download_delay': self.retry(json_response['js']['params']['next_download']) # ~? self.retry(js_answer['params']['next_download']) elif 'Wrong file password' in json_response['js']['error']: return None elif 'You entered a wrong CAPTCHA code' in json_response['js']['error']: return None elif 'CAPTCHA Code nicht korrekt' in json_response['js']['error']: return None elif 'CAPTCHA' in json_response['js']['error']: self.logDebug('error response is unknown, but mentions CAPTCHA -> return None') return None else: self.fail(json_response['js']['error']) # ~? self.fail(js_answer['error']) if not 'answer' in json_response['js'] or not field in json_response['js']['answer']: self.parseError('JSON %s 2' % field) return json_response['js']['answer'][field]
def process(self, pyfile): if re.match(self.__pattern__, pyfile.url): new_url = pyfile.url elif not self.account: self.logError( _("Please enter your %s account or deactivate this plugin") % "Real-debrid") self.fail("No Real-debrid account provided") else: self.logDebug("Old URL: %s" % pyfile.url) password = self.getPassword().splitlines() if not password: password = "" else: password = password[0] url = "https://real-debrid.com/ajax/unrestrict.php?lang=en&link=%s&password=%s&time=%s" % ( quote(pyfile.url, ""), password, int(time() * 1000)) page = self.load(url) data = json_loads(page) self.logDebug("Returned Data: %s" % data) if data["error"] != 0: if data["message"] == "Your file is unavailable on the hoster.": self.offline() else: self.logWarning(data["message"]) self.tempOffline() else: if self.pyfile.name is not None and self.pyfile.name.endswith( '.tmp') and data["file_name"]: self.pyfile.name = data["file_name"] self.pyfile.size = parseFileSize(data["file_size"]) new_url = data['generated_links'][0][-1] if self.getConfig("https"): new_url = new_url.replace("http://", "https://") else: new_url = new_url.replace("https://", "http://") if new_url != pyfile.url: self.logDebug("New URL: %s" % new_url) if pyfile.name.startswith("http") or pyfile.name.startswith( "Unknown") or pyfile.name.endswith('..'): #only use when name wasnt already set pyfile.name = self.getFilename(new_url) self.download(new_url, disposition=True) check = self.checkDownload({ "error": "<title>An error occured while processing your request</title>" }) if check == "error": #usual this download can safely be retried self.retry(reason="An error occured while generating link.", wait_time=60)
def loadAccountInfo(self, user, req): premium = False validuntil = -1 trafficleft = None json_data = req.load( 'http://www.simply-premium.com/api/user.php?format=json') self.logDebug("JSON data: %s" % json_data) json_data = json_loads(json_data) if 'vip' in json_data['result'] and json_data['result']['vip']: premium = True if 'timeend' in json_data['result'] and json_data['result']['timeend']: validuntil = float(json_data['result']['timeend']) if 'remain_traffic' in json_data['result'] and json_data['result'][ 'remain_traffic']: trafficleft = float(json_data['result']['remain_traffic'] ) / 1024 #@TODO: Remove `/ 1024` in 0.4.10 return { "premium": premium, "validuntil": validuntil, "trafficleft": trafficleft }
def getInfo(urls): # DDLStorage API Documentation: # http://www.ddlstorage.com/cgi-bin/api_req.cgi?req_type=doc ids = dict() for url in urls: m = re.search(DdlstorageCom.__pattern__, url) ids[m.group('ID')] = url for chunk in chunks(ids.keys(), 5): api = getURL('http://www.ddlstorage.com/cgi-bin/api_req.cgi', post={ 'req_type': 'file_info_free', 'client_id': 53472, 'file_code': ','.join(chunk), 'sign': md5('file_info_free%d%s%s' % (53472, ','.join(chunk), '25JcpU2dPOKg8E2OEoRqMSRu068r0Cv3')).hexdigest() }) api = api.replace('<pre>', '').replace('</pre>', '') api = json_loads(api) result = list() for el in api: if el['status'] == 'online': result.append((el['file_name'], int(el['file_size']), 2, ids[el['file_code']])) else: result.append( (ids[el['file_code']], 0, 1, ids[el['file_code']])) yield result
def loadAccountInfo(self, req): xml = req.load(self.API_URL + "/user?method=getInfo&format=json", post={ "u": self.loginname, "p": self.password }, decode=True) self.logDebug("account status retrieved from api %s" % xml) json = json_loads(xml) if json["FSApi_User"]["getInfo"]["status"] != "success": self.logError(_("Invalid login retrieving user details")) return {"validuntil": -1, "trafficleft": -1, "premium": False} premium = json["FSApi_User"]["getInfo"]["response"]["users"]["user"][ "is_premium"] if premium: validuntil = json["FSApi_User"]["getInfo"]["response"]["users"][ "user"]["premium_expiration"] validuntil = int(mktime(strptime(validuntil, "%Y-%m-%d %H:%M:%S"))) else: validuntil = -1 return { "validuntil": validuntil, "trafficleft": -1, "premium": premium }
def handle_free(self, pyfile): action, inputs = self.parse_html_form('id="frm-downloadDialog-freeDownloadForm"') if not action or not inputs: self.error(_("Free download form not found")) self.log_debug("inputs.keys = " + str(inputs.keys())) #: Get and decrypt captcha if all(key in inputs for key in ("captcha_value", "captcha_id", "captcha_key")): #: Old version - last seen 9.12.2013 self.log_debug('Using "old" version') captcha_value = self.captcha.decrypt("http://img.uloz.to/captcha/%s.png" % inputs['captcha_id']) self.log_debug("CAPTCHA ID: " + inputs['captcha_id'] + ", CAPTCHA VALUE: " + captcha_value) inputs.update({'captcha_id': inputs['captcha_id'], 'captcha_key': inputs['captcha_key'], 'captcha_value': captcha_value}) elif all(key in inputs for key in ("captcha_value", "timestamp", "salt", "hash")): #: New version - better to get new parameters (like captcha reload) because of image url - since 6.12.2013 self.log_debug('Using "new" version') xapca = self.load("http://www.ulozto.net/reloadXapca.php", get={'rnd': str(int(time.time()))}) self.log_debug("xapca = " + str(xapca)) data = json_loads(xapca) captcha_value = self.captcha.decrypt(str(data['image'])) self.log_debug("CAPTCHA HASH: " + data['hash'], "CAPTCHA SALT: " + str(data['salt']), "CAPTCHA VALUE: " + captcha_value) inputs.update({'timestamp': data['timestamp'], 'salt': data['salt'], 'hash': data['hash'], 'captcha_value': captcha_value}) else: self.error(_("CAPTCHA form changed")) self.download("http://www.ulozto.net" + action, post=inputs)
def checkFile(self, url): id = getId(url) self.logDebug("file id is %s" % id) if id: # Use the api to check the current status of the file and fixup data check_url = self.API_ADDRESS + "/link?method=getInfo&format=json&ids=%s" % id result = json_loads(self.load(check_url, decode=True)) item = result["FSApi_Link"]["getInfo"]["response"]["links"][0] self.logDebug("api check returns %s" % item) if item["status"] != "AVAILABLE": self.offline() if item["is_password_protected"] != 0: self.fail("This file is password protected") # ignored this check due to false api information #if item["is_premium_only"] != 0 and not self.premium: # self.fail("need premium account for file") self.pyfile.name = unquote(item["filename"]) # Fix the url and resolve the domain to the correct regional variation url = item["url"] urlparts = re.search(self.URL_DOMAIN_PATTERN, url) if urlparts: url = urlparts.group( "prefix") + self.getDomain() + urlparts.group("suffix") self.logDebug("localised url is %s" % url) return url else: self.fail("Invalid URL")
def getAccountStatus(self, user, req): password = self.getAccountData(user)['password'] salt = hashlib.sha256(password).hexdigest() encrypted = PBKDF2(password, salt, iterations=1000).hexread(32) return json_loads(req.load("http://www2.smoozed.com/api/login", get={'auth': user, 'password': encrypted}))
def get_json_response(self, get_dict, post_dict, field): res = json_loads(self.load('https://filepost.com/files/get/', get=get_dict, post=post_dict)) self.log_debug(res) if not 'js' in res: self.error(_("JSON %s 1") % field) #: I changed js_answer to res['js'] since js_answer is nowhere set. #: I don't know the JSON-HTTP specs in detail, but the previous author #: Accessed res['js']['error'] as well as js_answer['error']. #: See the two lines commented out with "# ~?". if 'error' in res['js']: if res['js']['error'] == "download_delay": self.retry(wait_time=res['js']['params']['next_download']) #: ~? self.retry(wait_time=js_answer['params']['next_download']) elif 'Wrong file password' in res['js']['error'] \ or 'You entered a wrong CAPTCHA code' in res['js']['error'] \ or 'CAPTCHA Code nicht korrekt' in res['js']['error']: return None elif 'CAPTCHA' in res['js']['error']: self.log_debug("Error response is unknown, but mentions CAPTCHA") return None else: self.fail(res['js']['error']) if not 'answer' in res['js'] or not field in res['js']['answer']: self.error(_("JSON %s 2") % field) return res['js']['answer'][field]
def loadAccountInfo(self, user, req): data = self.getAccountData(user) r = req.load('http://gen.linksnappy.com/lseAPI.php', get={'act': 'USERDETAILS', 'username': user, 'password': hashlib.md5(data['password']).hexdigest()}) self.logDebug("JSON data: " + r) j = json_loads(r) if j['error']: return {"premium": False} validuntil = j['return']['expire'] if validuntil == 'lifetime': validuntil = -1 elif validuntil == 'expired': return {"premium": False} else: validuntil = float(validuntil) if 'trafficleft' not in j['return'] or isinstance(j['return']['trafficleft'], str): trafficleft = -1 else: trafficleft = self.parseTraffic("%d MB" % j['return']['trafficleft']) return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
def handleFree(self, pyfile): m = re.search(self.LINK_FREE_PATTERN, self.html) if m is None: self.error(_("LINK_FREE_PATTERN not found")) url = m.group(1) self.logDebug(('FREEUSER' if m.group(2) == 'download' else 'GUEST') + ' URL', url) res = json_loads(self.load("http://115.com" + url, decode=False)) if "urls" in res: mirrors = res['urls'] elif "data" in res: mirrors = res['data'] else: mirrors = None for mr in mirrors: try: url = mr['url'].replace("\\", "") self.logDebug("Trying URL: " + url) self.download(url) break except: continue else: self.fail(_("No working link found"))
def getHosters(self): hostings = json_loads(self.getURL("https://www.rapideo.pl/clipboard.php?json=3").strip()) hostings_domains = [domain for row in hostings for domain in row["domains"] if row["sdownload"] == "0"] self.logDebug(hostings_domains) return hostings_domains
def do_recaptcha(self): self.logDebug('Trying to solve captcha') captcha_key = re.search(self.CAPTCHA_KEY_PATTERN, self.html).group(1) shortencode = re.search(self.CAPTCHA_SHORTENCODE_PATTERN, self.html).group(1) url = re.search(self.CAPTCHA_DOWNLOAD_PATTERN, self.html).group(1) recaptcha = ReCaptcha(self) for i in range(5): challenge, code = recaptcha.challenge(captcha_key) response = json_loads(self.load(self.file_info['HOST'] + '/rest/captcha/test', post={'challenge': challenge, 'response': code, 'shortencode': shortencode})) self.logDebug("reCaptcha response : %s" % response) if response == True: self.correctCaptcha() break else: self.invalidCaptcha() else: self.fail("Invalid captcha") return url
def getHoster(self): page = getURL( "http://fastix.ru/api_v2/?apikey=5182964c3f8f9a7f0b00000a_kelmFB4n1IrnCDYuIFn2y&sub=allowed_sources" ) host_list = json_loads(page) host_list = host_list['allow'] return host_list
def getHoster(self): # If no accounts are available there will be no hosters available if not self.account or not self.account.canUse(): return [] # Get account data (user, data) = self.account.selectAccount() # Get supported hosters list from premiumize.me using the json API v1 (see https://secure.premiumize.me/?show=api) answer = getURL( "https://api.premiumize.me/pm-api/v1.php?method=hosterlist¶ms[login]=%s¶ms[pass]=%s" % (user, data['password'])) data = json_loads(answer) # If account is not valid thera are no hosters available if data['status'] != 200: return [] # Extract hosters from json file hosters = set(data['result']['hosterlist']) # Read config to check if certain hosters should not be handled configMode = self.getConfig('hosterListMode') if configMode in ("listed", "unlisted"): configList = set( self.getConfig('hosterList').strip().lower().replace( '|', ',').replace(';', ',').split(',')) configList.discard(u'') if configMode == "listed": hosters &= configList else: hosters -= configList return list(hosters)
def decrypt(self, pyfile): self.html = self.load(pyfile.url) found = re.search(self.ML_LINK_PATTERN, self.html) ml_url = found.group(1) if found else None json_list = json_loads(self.load("http://multiupload.com/progress/", get = { "d": re.search(self.__pattern__, pyfile.url).group(1), "r": str(int(time()*1000)) })) new_links = [] prefered_set = map(lambda s: s.lower().split('.')[0], set(self.getConfig("preferedHoster").split('|'))) if ml_url and 'multiupload' in prefered_set: new_links.append(ml_url) for link in json_list: if link['service'].lower() in prefered_set and int(link['status']) and not int(link['deleted']): url = self.getLocation(link['url']) if url: new_links.append(url) if not new_links: ignored_set = map(lambda s: s.lower().split('.')[0], set(self.getConfig("ignoredHoster").split('|'))) if 'multiupload' not in ignored_set: new_links.append(ml_url) for link in json_list: if link['service'].lower() not in ignored_set and int(link['status']) and not int(link['deleted']): url = self.getLocation(link['url']) if url: new_links.append(url) if new_links: self.core.files.addLinks(new_links, self.pyfile.package().id) else: self.fail('Could not extract any links')
def getHoster(self): # If no accounts are available there will be no hosters available if not self.account or not self.account.canUse(): return [] # Get account data (user, data) = self.account.selectAccount() # Get supported hosters list from premiumize.me using the json API v1 (see https://secure.premiumize.me/?show=api) answer = getURL("https://api.premiumize.me/pm-api/v1.php?method=hosterlist¶ms[login]=%s¶ms[pass]=%s" % (user, data['password'])) data = json_loads(answer) # If account is not valid thera are no hosters available if data['status'] != 200: return [] # Extract hosters from json file hosters = set(data['result']['hosterlist']) # Read config to check if certain hosters should not be handled configMode = self.getConfig('hosterListMode') if configMode in ("listed", "unlisted"): configList = set(self.getConfig('hosterList').strip().lower().replace('|',',').replace(';',',').split(',')) configList.discard(u'') if configMode == "listed": hosters &= configList else: hosters -= configList return list(hosters)