def handleMulti(self, pyfile): if not self.account: self.fail( _("Only registered or premium users can use url leech feature") ) # only tested with easybytez.com self.html = self.load("http://www.%s/" % self.HOSTER_DOMAIN) action, inputs = self.parseHtmlForm() upload_id = "%012d" % int(random() * 10**12) action += upload_id + "&js_on=1&utype=prem&upload_type=url" inputs['tos'] = '1' inputs['url_mass'] = pyfile.url inputs['up1oad_type'] = 'url' self.logDebug(action, inputs) self.req.setOption("timeout", 600) #: wait for file to upload to easybytez.com self.html = self.load(action, post=inputs) self.checkErrors() action, inputs = self.parseHtmlForm('F1') if not inputs: if self.errmsg: self.retry(reason=self.errmsg) else: self.error(_("TEXTAREA F1 not found")) self.logDebug(inputs) stmsg = inputs['st'] if stmsg == 'OK': self.html = self.load(action, post=inputs) elif 'Can not leech file' in stmsg: self.retry(20, 3 * 60, _("Can not leech file")) elif 'today' in stmsg: self.retry(wait_time=secondsToMidnight(gmt=2), reason=_("You've used all Leech traffic today")) else: self.fail(stmsg) # get easybytez.com link for uploaded file m = re.search(self.LINK_LEECH_PATTERN, self.html) if m is None: self.error(_("LINK_LEECH_PATTERN not found")) header = self.load(m.group(1), just_header=True, decode=True) if 'location' in header: #: Direct download link self.link = header['location']
def handleFree(self, pyfile): self.html = self.load(self.url) action = self.load(self.url, post={"checkDownload": "check"}, decode=True) action = json_loads(action) self.logDebug(action) if "fail" in action: if action['fail'] == "timeLimit": self.html = self.load(self.url, post={"checkDownload": "showError", "errorType": "timeLimit"}, decode=True) self.doLongWait(re.search(self.LONG_WAIT_PATTERN, self.html)) elif action['fail'] == "parallelDownload": self.logWarning(_("Parallel download error, now waiting 60s")) self.retry(wait_time=60, reason=_("parallelDownload")) else: self.fail(_("Download check returned: %s") % action['fail']) elif "success" in action: if action['success'] == "showCaptcha": self.doCaptcha() self.doTimmer() elif action['success'] == "showTimmer": self.doTimmer() else: self.error(_("Unknown server response")) # show download link res = self.load(self.url, post={"downloadLink": "show"}, decode=True) self.logDebug("Show downloadLink response: %s" % res) if "fail" in res: self.error(_("Couldn't retrieve download url")) # this may either download our file or forward us to an error page self.download(self.url, post={"download": "normal"}) self.logDebug(self.req.http.lastEffectiveURL) check = self.checkDownload({"expired": self.LINK_EXPIRED_PATTERN, "wait" : re.compile(self.LONG_WAIT_PATTERN), "limit" : self.DAILY_LIMIT_PATTERN}) if check == "expired": self.logDebug("Download link was expired") self.retry() elif check == "wait": self.doLongWait(self.lastCheck) elif check == "limit": self.logWarning(_("Download limited reached for today")) self.setWait(secondsToMidnight(gmt=2), True) self.wait() self.retry() self.thread.m.reconnecting.wait(3) # Ease issue with later downloads appearing to be in parallel
def checkErrors(self): m = re.search(self.ERROR_PATTERN, self.html) if m is None: self.errmsg = None else: self.errmsg = m.group(1).strip() self.logWarning(re.sub(r"<.*?>", " ", self.errmsg)) if 'wait' in self.errmsg: wait_time = sum( int(v) * { "hr": 3600, "hour": 3600, "min": 60, "sec": 1, "": 1 }[u.lower()] for v, u in re.findall( r'(\d+)\s*(hr|hour|min|sec|)', self.errmsg, re.I)) self.wait(wait_time, wait_time > 300) elif 'country' in self.errmsg: self.fail(_("Downloads are disabled for your country")) elif 'captcha' in self.errmsg: self.invalidCaptcha() elif 'premium' in self.errmsg and 'require' in self.errmsg: self.fail(_("File can be downloaded by premium users only")) elif 'limit' in self.errmsg: if 'day' in self.errmsg: delay = secondsToMidnight(gmt=2) retries = 3 else: delay = 1 * 60 * 60 retries = 24 self.wantReconnect = True self.retry(retries, delay, _("Download limit exceeded")) elif 'countdown' in self.errmsg or 'Expired' in self.errmsg: self.retry(reason=_("Link expired")) elif 'maintenance' in self.errmsg or 'maintainance' in self.errmsg: self.tempOffline() elif 'up to' in self.errmsg: self.fail(_("File too large for free download")) else: self.wantReconnect = True self.retry(wait_time=60, reason=self.errmsg) if self.errmsg: self.info['error'] = self.errmsg else: self.info.pop('error', None)
def handle_free(self, pyfile): self.html = self.load(self.url) action = self.load(self.url, post={"checkDownload": "check"}, decode=True) action = json_loads(action) self.logDebug(action) if "fail" in action: if action['fail'] == "timeLimit": self.html = self.load(self.url, post={"checkDownload": "showError", "errorType": "timeLimit"}, decode=True) self.doLongWait(re.search(self.LONG_WAIT_PATTERN, self.html)) elif action['fail'] == "parallelDownload": self.logWarning(_("Parallel download error, now waiting 60s")) self.retry(wait_time=60, reason=_("parallelDownload")) else: self.fail(_("Download check returned: %s") % action['fail']) elif "success" in action: if action['success'] == "showCaptcha": self.doCaptcha() self.doTimmer() elif action['success'] == "showTimmer": self.doTimmer() else: self.error(_("Unknown server response")) # show download link res = self.load(self.url, post={"downloadLink": "show"}, decode=True) self.logDebug("Show downloadLink response: %s" % res) if "fail" in res: self.error(_("Couldn't retrieve download url")) # this may either download our file or forward us to an error page self.download(self.url, post={"download": "normal"}) self.logDebug(self.req.http.lastEffectiveURL) check = self.checkDownload({"expired": self.LINK_EXPIRED_PATTERN, "wait" : re.compile(self.LONG_WAIT_PATTERN), "limit" : self.DAILY_LIMIT_PATTERN}) if check == "expired": self.logDebug("Download link was expired") self.retry() elif check == "wait": self.doLongWait(self.lastCheck) elif check == "limit": self.logWarning(_("Download limited reached for today")) self.setWait(secondsToMidnight(gmt=2), True) self.wait() self.retry() self.thread.m.reconnecting.wait(3) #: Ease issue with later downloads appearing to be in parallel
def handleMulti(self, pyfile): if not self.account: self.fail(_("Only registered or premium users can use url leech feature")) # only tested with easybytez.com self.html = self.load("http://www.%s/" % self.HOSTER_DOMAIN) action, inputs = self.parseHtmlForm() upload_id = "%012d" % int(random() * 10 ** 12) action += upload_id + "&js_on=1&utype=prem&upload_type=url" inputs['tos'] = '1' inputs['url_mass'] = pyfile.url inputs['up1oad_type'] = 'url' self.logDebug(action, inputs) self.req.setOption("timeout", 600) #: wait for file to upload to easybytez.com self.html = self.load(action, post=inputs) self.checkErrors() action, inputs = self.parseHtmlForm('F1') if not inputs: if self.errmsg: self.retry(reason=self.errmsg) else: self.error(_("TEXTAREA F1 not found")) self.logDebug(inputs) stmsg = inputs['st'] if stmsg == 'OK': self.html = self.load(action, post=inputs) elif 'Can not leech file' in stmsg: self.retry(20, 3 * 60, _("Can not leech file")) elif 'today' in stmsg: self.retry(wait_time=secondsToMidnight(gmt=2), reason=_("You've used all Leech traffic today")) else: self.fail(stmsg) # get easybytez.com link for uploaded file m = re.search(self.LINK_LEECH_PATTERN, self.html) if m is None: self.error(_("LINK_LEECH_PATTERN not found")) header = self.load(m.group(1), just_header=True, decode=True) if 'location' in header: #: Direct download link self.link = header['location']
def get_waiting_time(self): if not self.html: self.download_html() if "Your Traffic is used up for today" in self.html: self.wantReconnect = True return secondsToMidnight(gmt=2) timestring = re.search('\s*var\s(?:downloadWait|time)\s=\s(\d*)[\d.]*;', self.html) if timestring: return int(timestring.group(1)) else: return 60
def checkErrors(self): m = re.search(self.ERROR_PATTERN, self.html) if m is None: self.errmsg = None else: self.errmsg = m.group(1).strip() self.logWarning(re.sub(r"<.*?>", " ", self.errmsg)) if 'wait' in self.errmsg: wait_time = sum(int(v) * {"hr": 3600, "hour": 3600, "min": 60, "sec": 1, "": 1}[u.lower()] for v, u in re.findall(r'(\d+)\s*(hr|hour|min|sec|)', self.errmsg, re.I)) self.wait(wait_time, wait_time > 300) elif 'country' in self.errmsg: self.fail(_("Downloads are disabled for your country")) elif 'captcha' in self.errmsg: self.invalidCaptcha() elif 'premium' in self.errmsg and 'require' in self.errmsg: self.fail(_("File can be downloaded by premium users only")) elif 'limit' in self.errmsg: if 'days' in self.errmsg: delay = secondsToMidnight(gmt=2) retries = 3 else: delay = 1 * 60 * 60 retries = 24 self.wantReconnect = True self.retry(retries, delay, _("Download limit exceeded")) elif 'countdown' in self.errmsg or 'Expired' in self.errmsg: self.retry(reason=_("Link expired")) elif 'maintenance' in self.errmsg or 'maintainance' in self.errmsg: self.tempOffline() elif 'up to' in self.errmsg: self.fail(_("File too large for free download")) else: self.wantReconnect = True self.retry(wait_time=60, reason=self.errmsg) if self.errmsg: self.info['error'] = self.errmsg else: self.info.pop('error', None)
def handleFree(self, pyfile): if r">Only premium users can download this file" in self.html: self.fail(_("Only premium users can download this file")) m = re.search( r"Next free download from your ip will be available in <b>(\d+)\s*minutes", self.html) if m: self.wait(int(m.group(1)) * 60, True) elif "The daily downloads limit from your IP is exceeded" in self.html: self.logWarning( _("You have reached your daily downloads limit for today")) self.wait(secondsToMidnight(gmt=2), True) self.logDebug("URL: " + self.req.http.lastEffectiveURL) m = re.match(self.__pattern, self.req.http.lastEffectiveURL) fileID = m.group('ID') if m else self.info['pattern']['ID'] m = re.search(r'recaptcha/api/challenge\?k=(\w+)', self.html) if m: recaptcha = ReCaptcha(self) captcha_key = m.group(1) for _i in xrange(5): get_data = {"type": "recaptcha"} get_data['capture'], get_data[ 'challenge'] = recaptcha.challenge(captcha_key) res = json_loads( self.load("http://extabit.com/file/%s/" % fileID, get=get_data)) if "ok" in res: self.correctCaptcha() break else: self.invalidCaptcha() else: self.fail(_("Invalid captcha")) else: self.error(_("Captcha")) if not "href" in res: self.error(_("Bad JSON response")) self.html = self.load("http://extabit.com/file/%s%s" % (fileID, res['href'])) m = re.search(self.LINK_FREE_PATTERN, self.html) if m is None: self.error(_("LINK_FREE_PATTERN not found")) self.link = m.group(1)
def get_waiting_time(self): if not self.html: self.download_html() if "Your Traffic is used up for today" in self.html: self.wantReconnect = True return secondsToMidnight(gmt=2) timestring = re.search( '\s*var\s(?:downloadWait|time)\s=\s(\d*)[\d.]*;', self.html) if timestring: return int(timestring.group(1)) else: return 60
def handle_free(self, pyfile): if r">Only premium users can download this file" in self.html: self.fail(_("Only premium users can download this file")) m = re.search(r"Next free download from your ip will be available in <b>(\d+)\s*minutes", self.html) if m: self.wait(int(m.group(1)) * 60, True) elif "The daily downloads limit from your IP is exceeded" in self.html: self.logWarning(_("You have reached your daily downloads limit for today")) self.wait(secondsToMidnight(gmt=2), True) self.logDebug("URL: " + self.req.http.lastEffectiveURL) m = re.match(self.__pattern, self.req.http.lastEffectiveURL) fileID = m.group('ID') if m else self.info['pattern']['ID'] m = re.search(r'recaptcha/api/challenge\?k=(\w+)', self.html) if m: recaptcha = ReCaptcha(self) captcha_key = m.group(1) for _i in xrange(5): get_data = {"type": "recaptcha"} get_data['capture'], get_data['challenge'] = recaptcha.challenge(captcha_key) res = json_loads(self.load("http://extabit.com/file/%s/" % fileID, get=get_data)) if "ok" in res: self.correctCaptcha() break else: self.invalidCaptcha() else: self.fail(_("Invalid captcha")) else: self.error(_("Captcha")) if not "href" in res: self.error(_("Bad JSON response")) self.html = self.load("http://extabit.com/file/%s%s" % (fileID, res['href'])) m = re.search(self.LINK_FREE_PATTERN, self.html) if m is None: self.error(_("LINK_FREE_PATTERN not found")) self.link = m.group(1)
def checkErrors(self): if '<valid>0</valid>' in self.html or ( "You are not allowed to download from this host" in self.html and self.premium): self.account.relogin(self.user) self.retry() elif "NOTFOUND" in self.html: self.offline() elif "downloadlimit" in self.html: self.logWarning(_("Reached maximum connctions")) self.retry(5, 60, _("Reached maximum connctions")) elif "trafficlimit" in self.html: self.logWarning(_("Reached daily limit for this host")) self.retry(wait_time=secondsToMidnight(gmt=2), reason="Daily limit for this host reached") elif "hostererror" in self.html: self.logWarning(_("Hoster temporarily unavailable, waiting 1 minute and retry")) self.retry(5, 60, _("Hoster is temporarily unavailable"))
def handleFree(self, pyfile): for _i in xrange(5): self.html = self.load('https://unrestrict.li/unrestrict.php', post={'link': pyfile.url, 'domain': 'long'}) self.logDebug("JSON data: " + self.html) if self.html: break else: self.logInfo(_("Unable to get API data, waiting 1 minute and retry")) self.retry(5, 60, "Unable to get API data") if 'Expired session' in self.html \ or ("You are not allowed to download from this host" in self.html and self.premium): self.account.relogin(self.user) self.retry() elif "File offline" in self.html: self.offline() elif "You are not allowed to download from this host" in self.html: self.fail(_("You are not allowed to download from this host")) elif "You have reached your daily limit for this host" in self.html: self.logWarning(_("Reached daily limit for this host")) self.retry(5, secondsToMidnight(gmt=2), "Daily limit for this host reached") elif "ERROR_HOSTER_TEMPORARILY_UNAVAILABLE" in self.html: self.logInfo(_("Hoster temporarily unavailable, waiting 1 minute and retry")) self.retry(5, 60, "Hoster is temporarily unavailable") self.html = json_loads(self.html) self.link = self.html.keys()[0] self.api_data = self.html[self.link] if hasattr(self, 'api_data'): self.setNameSize()
def checkErrors(self): if '<valid>0</valid>' in self.html or ( "You are not allowed to download from this host" in self.html and self.premium): self.account.relogin(self.user) self.retry() elif "NOTFOUND" in self.html: self.offline() elif "downloadlimit" in self.html: self.logWarning(_("Reached maximum connctions")) self.retry(5, 60, _("Reached maximum connctions")) elif "trafficlimit" in self.html: self.logWarning(_("Reached daily limit for this host")) self.retry(wait_time=secondsToMidnight(gmt=2), reason="Daily limit for this host reached") elif "hostererror" in self.html: self.logWarning( _("Hoster temporarily unavailable, waiting 1 minute and retry") ) self.retry(5, 60, _("Hoster is temporarily unavailable"))
def handle_free(self, pyfile): action, inputs = self.parseHtmlForm('id="ifree_form"') if not action: self.error(_("ifree_form")) pyfile.size = float(inputs['sssize']) self.logDebug(action, inputs) inputs['desc'] = "" self.html = self.load(urlparse.urljoin("http://letitbit.net/", action), post=inputs) m = re.search(self.SECONDS_PATTERN, self.html) seconds = int(m.group(1)) if m else 60 self.logDebug("Seconds found", seconds) m = re.search(self.CAPTCHA_CONTROL_FIELD, self.html) recaptcha_control_field = m.group(1) self.logDebug("ReCaptcha control field found", recaptcha_control_field) self.wait(seconds) res = self.load("http://letitbit.net/ajax/download3.php", post=" ") if res != '1': self.error(_("Unknown response - ajax_check_url")) self.logDebug(res) recaptcha = ReCaptcha(self) response, challenge = recaptcha.challenge() post_data = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": response, "recaptcha_control_field": recaptcha_control_field} self.logDebug("Post data to send", post_data) res = self.load("http://letitbit.net/ajax/check_recaptcha.php", post=post_data) self.logDebug(res) if not res: self.invalidCaptcha() if res == "error_free_download_blocked": self.logWarning(_("Daily limit reached")) self.wait(secondsToMidnight(gmt=2), True) if res == "error_wrong_captcha": self.invalidCaptcha() self.retry() elif res.startswith('['): urls = json_loads(res) elif res.startswith('http://'): urls = [res] else: self.error(_("Unknown response - captcha check")) self.link = urls[0]
def handleFree(self, pyfile): action, inputs = self.parseHtmlForm('id="ifree_form"') if not action: self.error(_("ifree_form")) pyfile.size = float(inputs['sssize']) self.logDebug(action, inputs) inputs['desc'] = "" self.html = self.load(urljoin("http://letitbit.net/", action), post=inputs) m = re.search(self.SECONDS_PATTERN, self.html) seconds = int(m.group(1)) if m else 60 self.logDebug("Seconds found", seconds) m = re.search(self.CAPTCHA_CONTROL_FIELD, self.html) recaptcha_control_field = m.group(1) self.logDebug("ReCaptcha control field found", recaptcha_control_field) self.wait(seconds) res = self.load("http://letitbit.net/ajax/download3.php", post=" ") if res != '1': self.error(_("Unknown response - ajax_check_url")) self.logDebug(res) recaptcha = ReCaptcha(self) response, challenge = recaptcha.challenge() post_data = { "recaptcha_challenge_field": challenge, "recaptcha_response_field": response, "recaptcha_control_field": recaptcha_control_field } self.logDebug("Post data to send", post_data) res = self.load("http://letitbit.net/ajax/check_recaptcha.php", post=post_data) self.logDebug(res) if not res: self.invalidCaptcha() if res == "error_free_download_blocked": self.logWarning(_("Daily limit reached")) self.wait(secondsToMidnight(gmt=2), True) if res == "error_wrong_captcha": self.invalidCaptcha() self.retry() elif res.startswith('['): urls = json_loads(res) elif res.startswith('http://'): urls = [res] else: self.error(_("Unknown response - captcha check")) self.link = urls[0]