def check_errors(self): if "<valid>0</valid>" in self.data or ( "You are not allowed to download from this host" in self.data and self.premium): self.account.relogin() self.retry() elif "NOTFOUND" in self.data: self.offline() elif "downloadlimit" in self.data: self.log_warning(self._("Reached maximum connctions")) self.retry(5, 60, self._("Reached maximum connctions")) elif "trafficlimit" in self.data: self.log_warning(self._("Reached daily limit for this host")) self.retry(wait=seconds.to_midnight(), msg="Daily limit for this host reached") elif "hostererror" in self.data: self.log_warning( self. _("Hoster temporarily unavailable, waiting 1 minute and retry" )) self.retry(5, 60, self._("Hoster is temporarily unavailable"))
def handle_multi(self, pyfile): if not self.account: self.fail( self. _("Only registered or premium users can use url leech feature" )) #: Only tested with easybytez.com self.data = self.load("http://www.{}/".format(self.PLUGIN_DOMAIN)) action, inputs = self.parse_html_form() upload_id = "{:012}".format(int(random.random() * 10**12)) action += upload_id + "&js_on=1&utype=prem&upload_type=url" inputs["tos"] = "1" inputs["url_mass"] = pyfile.url inputs["up1oad_type"] = "url" self.log_debug(action, inputs) #: Wait for file to upload to easybytez.com self.req.set_option("timeout", 600) self.data = self.load(action, post=inputs) self.check_errors() action, inputs = self.parse_html_form("F1") if not inputs: self.retry( msg=self.info.get("error") or self._("TEXTAREA F1 not found")) self.log_debug(inputs) stmsg = inputs["st"] if stmsg == "OK": self.data = self.load(action, post=inputs) elif "Can not leech file" in stmsg: self.retry(20, timedelta(minutes=3).seconds, self._("Can not leech file")) elif "today" in stmsg: self.retry( wait=seconds.to_midnight(), msg=self._("You've used all Leech traffic today"), ) else: self.fail(stmsg) #: Get easybytez.com link for uploaded file m = re.search(self.LINK_LEECH_PATTERN, self.data) if m is None: self.error(self._("LINK_LEECH_PATTERN not found")) self.link = self.load(m.group(1), just_header=True).get("location")
def handle_free(self, pyfile): if r">Only premium users can download this file" in self.data: self.fail(self._("Only premium users can download this file")) m = re.search( r"Next free download from your ip will be available in <b>(\d+)\s*minutes", self.data, ) if m is not None: self.wait(int(m.group(1)) * 60, True) elif "The daily downloads limit from your IP is exceeded" in self.data: self.log_warning( self._( "You have reached your daily downloads limit for today")) self.wait(seconds.to_midnight(), True) self.log_debug("URL: " + self.req.http.last_effective_url) m = re.match(self.__pattern__, self.req.http.last_effective_url) fileID = m.group("ID") if m else self.info["pattern"]["ID"] m = re.search(r"recaptcha/api/challenge\?k=(\w+)", self.data) if m is not None: self.captcha = ReCaptcha(pyfile) captcha_key = m.group(1) get_data = {"type": "recaptcha"} get_data["capture"], get_data[ "challenge"] = self.captcha.challenge(captcha_key) html = self.load("http://extabit.com/file/{}/".format(fileID), get=get_data) res = json.loads(html) if "ok" in res: self.captcha.correct() else: self.retry_captcha() else: self.error(self._("Captcha")) if "href" not in res: self.error(self._("Bad JSON response")) self.data = self.load("http://extabit.com/file/{}{}".format( fileID, res["href"])) m = re.search(self.LINK_FREE_PATTERN, self.data) if m is None: self.error(self._("LINK_FREE_PATTERN not found")) self.link = m.group(1)
def get_waiting_time(self): if not self.data: self.download_html() if "Your Traffic is used up for today" in self.data: self.want_reconnect = True return seconds.to_midnight() timestring = re.search( r"\s*var\s(?:downloadWait|time)\s=\s(\d*)[\d.]*;", self.data) if timestring: return int(timestring.group(1)) else: return 60
def check_errors(self): super().check_errors() m = re.search(self.DOWNLOAD_LIMIT_ERROR_PATTERN, self.data) if m is not None: self.log_warning(m.group(0)) if m.group(1) == "daily": wait_time = seconds.to_midnight() else: wait_time = timedelta(hours=1).seconds self.retry(wait=wait_time, msg=m.group(0)) m = re.search(self.IP_BLOCKED_ERROR_PATTERN, self.data) if m is not None: msg = self._( "You can't download more than one file within a certain time period in free mode" ) self.log_warning(msg) self.retry(wait=timedelta(hours=24).seconds, msg=msg)
def handle_premium(self, pyfile): json_data = self.api_response( "stream", self.account.user, self.account.info["login"]["password"], url=pyfile.url, ) if json_data["hasErrors"]: error_msg = json_data["ErrorMSG"] or "Unknown error" if error_msg in ( "Customer reached daily limit for current hoster", "Accounts are maxed out for current hoster", ): self.retry(wait=seconds.to_midnight()) self.fail(error_msg) self.resume_download = json_data["con_resume"] self.chunk_limit = json_data.get("con_max", 1) or 1 self.download(json_data["url"], fixurl=False)
def handle_free(self, pyfile): self.req.http.last_url = pyfile.url self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With: XMLHttpRequest"]) json_data = self.get_json_response( "https://rapidu.net/ajax.php", get={"a": "getLoadTimeToDownload"}, post={"_go": ""}, ) if str(json_data["timeToDownload"]) == "stop": self.log_warning( self._("You've reach your daily download transfer")) self.retry( 10, wait=seconds.to_midnight(), msg=self._("You've reach your daily download transfer"), ) self.set_wait(int(json_data["timeToDownload"]) - int(time.time())) self.captcha = ReCaptcha(pyfile) response, challenge = self.captcha.challenge(self.RECAPTCHA_KEY) self.wait() json_data = self.get_json_response( "https://rapidu.net/ajax.php", get={"a": "getCheckCaptcha"}, post={ "_go": "", "captcha1": challenge, "captcha2": response, "fileId": self.info["pattern"]["ID"], }, ) if json_data["message"] == "success": self.link = json_data["url"]
def check_errors(self): if "<code>5</code>" in self.data: self.account.relogin() self.retry() elif "<code>9</code>" in self.data: self.offline() elif "downloadlimit" in self.data: self.log_warning(self._("Reached maximum connctions")) self.retry(5, 60, self._("Reached maximum connctions")) elif "trafficlimit" in self.data: self.log_warning(self._("Reached daily limit")) self.retry(wait=seconds.to_midnight(), msg="Daily limit for this host reached") elif "<code>8</code>" in self.data: self.log_warning( self. _("Hoster temporarily unavailable, waiting 1 minute and retry" )) self.retry(5, 60, self._("Hoster is temporarily unavailable"))
def handle_free(self): self.data = self.load(self.url) action = self.load(self.url, post={"checkDownload": "check"}) action = json.loads(action) self.log_debug(action) if "fail" in action: if action["fail"] == "timeLimit": self.data = self.load( self.url, post={ "checkDownload": "showError", "errorType": "timeLimit" }, ) self.do_long_wait(re.search(self.LONG_WAIT_PATTERN, self.data)) elif action["fail"] == "parallelDownload": self.log_warning( self._("Parallel download error, now waiting 60s")) self.retry(wait=60, msg=self._("parallelDownload")) else: self.fail( self._("Download check returned: {}").format( action["fail"])) elif "success" in action: if action["success"] == "showCaptcha": self.do_captcha() self.do_timmer() elif action["success"] == "showTimmer": self.do_timmer() else: self.error(self._("Unknown server response")) #: Show download link res = self.load(self.url, post={"downloadLink": "show"}) self.log_debug(f"Show downloadLink response: {res}") if "fail" in res: self.error(self._("Couldn't retrieve download url")) #: This may either download our file or forward us to an error page self.download(self.url, post={"download": "normal"}) self.log_debug(self.req.http.last_effective_url) check = self.scan_download({ "expired": self.LINK_EXPIRED_PATTERN, "wait": re.compile(self.LONG_WAIT_PATTERN), "limit": self.DL_LIMIT_PATTERN, }) if check == "expired": self.log_debug("Download link was expired") self.retry() elif check == "wait": self.do_long_wait(self.last_check) elif check == "limit": self.log_warning(self._("Download limited reached for today")) self.wait(seconds.to_midnight(), True) self.retry() #: Ease issue with later downloads appearing to be in parallel self.thread.m.reconnecting.wait(3)
def handle_free(self, pyfile): m = re.search(r'<span class="time">([\d:]+)<', self.data) if m is not None: wait_time = sum( int(d) * 60**i for i, d in enumerate(reversed(m.group(1).split(":")))) else: wait_time = 0 self.captcha = ReCaptcha(pyfile) captcha_key = self.captcha.detect_key() if captcha_key: response, challenge = self.captcha.challenge(captcha_key) post_data = { "doaction": "validateCaptcha", "recaptcha_challenge_field": challenge, "recaptcha_response_field": response, "fileid": self.info["pattern"]["ID"], } catcha_result = json.loads( self.load("http://www.datafile.com/files/ajax.html", post=post_data)) if not catcha_result["success"]: self.retry_captcha() self.captcha.correct() self.wait(wait_time) post_data["doaction"] = "getFileDownloadLink" post_data["token"] = catcha_result["token"] file_info = json.loads( self.load("http://www.datafile.com/files/ajax.html", post=post_data)) if file_info["success"]: self.link = file_info["link"] self.log_debug(f"URL:{self.link}") else: m = re.search(r"error\.html\?code=(\d+)", self.req.last_effective_url) if m is not None: error_code = int(m.group(1)) if error_code in (2, 3): self.offline() elif error_code == 7: wait_time = seconds.to_midnight() self.retry(wait=wait_time, msg=self._("Download limit exceeded")) elif error_code == 9: self.temp_offline() else: self.log_debug(f"Unknown error code {error_code}")