def handle_free(self):
        self.data = self.load(self.url)
        action = self.load(self.url, post={'checkDownload': "check"})
        action = json.loads(action)
        self.log_debug(action)

        if "fail" in action:
            if action['fail'] == "timeLimit":
                self.data = self.load(self.url, post={'checkDownload': "showError", 'errorType': "timeLimit"})

                self.do_long_wait(re.search(self.LONG_WAIT_PATTERN, self.data))

            elif action['fail'] == "parallelDownload":
                self.log_warning(_("Parallel download error, now waiting 60s"))
                self.retry(wait=60, msg=_("parallelDownload"))

            else:
                self.fail(_("Download check returned: %s") % action['fail'])

        elif "success" in action:
            if action['success'] == "showCaptcha":
                self.do_captcha()
                self.do_timmer()
            elif action['success'] == "showTimmer":
                self.do_timmer()

        else:
            self.error(_("Unknown server response"))

        #: Show download link
        res = self.load(self.url, post={'downloadLink': "show"})
        self.log_debug("Show downloadLink response: %s" % res)
        if "fail" in res:
            self.error(_("Couldn't retrieve download url"))

        #: This may either download our file or forward us to an error page
        self.download(self.url, post={'download': "normal"})
        self.log_debug(self.req.http.lastEffectiveURL)

        check = self.check_file({'expired': self.LINK_EXPIRED_PATTERN,
                                    'wait'   : re.compile(self.LONG_WAIT_PATTERN),
                                    'limit'  : self.DL_LIMIT_PATTERN})

        if check == "expired":
            self.log_debug("Download link was expired")
            self.retry()

        elif check == "wait":
            self.do_long_wait(self.last_check)

        elif check == "limit":
            self.log_warning(_("Download limited reached for today"))
            self.wait(seconds_to_midnight(), True)
            self.retry()

        self.thread.m.reconnecting.wait(3)  #: Ease issue with later downloads appearing to be in parallel
示例#2
0
    def handle_multi(self, pyfile):
        if not self.account:
            self.fail(_("Only registered or premium users can use url leech feature"))

        #: Only tested with easybytez.com
        self.html = self.load("http://www.%s/" % self.HOSTER_DOMAIN)

        action, inputs = self.parse_html_form()

        upload_id = "%012d" % int(random.random() * 10 ** 12)
        action += upload_id + "&js_on=1&utype=prem&upload_type=url"

        inputs['tos'] = '1'
        inputs['url_mass'] = pyfile.url
        inputs['up1oad_type'] = 'url'

        self.log_debug(action, inputs)

        self.req.setOption("timeout", 600)  #: Wait for file to upload to easybytez.com

        self.html = self.load(action, post=inputs)

        self.check_errors()

        action, inputs = self.parse_html_form('F1')
        if not inputs:
            self.retry(reason=self.info['error'] if 'error' in self.info else _("TEXTAREA F1 not found"))

        self.log_debug(inputs)

        stmsg = inputs['st']

        if stmsg == 'OK':
            self.html = self.load(action, post=inputs)

        elif 'Can not leech file' in stmsg:
            self.retry(20, 3 * 60, _("Can not leech file"))

        elif 'today' in stmsg:
            self.retry(wait_time=seconds_to_midnight(gmt=2), reason=_("You've used all Leech traffic today"))

        else:
            self.fail(stmsg)

        #: Get easybytez.com link for uploaded file
        m = re.search(self.LINK_LEECH_PATTERN, self.html)
        if m is None:
            self.error(_("LINK_LEECH_PATTERN not found"))

        header = self.load(m.group(1), just_header=True)

        if 'location' in header:  #: Direct download link
            self.link = header['location']
    def get_waiting_time(self):
        if not self.data:
            self.download_html()

        if "Your Traffic is used up for today" in self.data:
            self.wantReconnect = True
            return seconds_to_midnight()

        timestring = re.search('\s*var\s(?:downloadWait|time)\s=\s(\d*)[\d.]*;', self.data)
        if timestring:
            return int(timestring.group(1))
        else:
            return 60
示例#4
0
    def handle_free(self, pyfile):
        if r">Only premium users can download this file" in self.html:
            self.fail(_("Only premium users can download this file"))

        m = re.search(r"Next free download from your ip will be available in <b>(\d+)\s*minutes", self.html)
        if m:
            self.wait(int(m.group(1)) * 60, True)
        elif "The daily downloads limit from your IP is exceeded" in self.html:
            self.log_warning(_("You have reached your daily downloads limit for today"))
            self.wait(seconds_to_midnight(gmt=2), True)

        self.log_debug("URL: " + self.req.http.lastEffectiveURL)
        m = re.match(self.__pattern__, self.req.http.lastEffectiveURL)
        fileID = m.group('ID') if m else self.info['pattern']['ID']

        m = re.search(r'recaptcha/api/challenge\?k=(\w+)', self.html)
        if m:
            recaptcha = ReCaptcha(self)
            captcha_key = m.group(1)

            for _i in xrange(5):
                get_data = {'type': "recaptcha"}
                get_data['capture'], get_data['challenge'] = recaptcha.challenge(captcha_key)
                res = json_loads(self.load("http://extabit.com/file/%s/" % fileID, get=get_data))
                if "ok" in res:
                    self.captcha.correct()
                    break
                else:
                    self.captcha.invalid()
            else:
                self.fail(_("Invalid captcha"))
        else:
            self.error(_("Captcha"))

        if not "href" in res:
            self.error(_("Bad JSON response"))

        self.html = self.load("http://extabit.com/file/%s%s" % (fileID, res['href']))

        m = re.search(self.LINK_FREE_PATTERN, self.html)
        if m is None:
            self.error(_("LINK_FREE_PATTERN not found"))

        self.link = m.group(1)
示例#5
0
    def check_errors(self):
        if self.html.get('code') == 302:  #@NOTE: This is not working. It should by if 302 Moved Temporarily then... But I don't now how to implement it.
            self.account.relogin(self.user)
            self.retry()

        elif "<code>9</code>" in self.html:
            self.offline()

        elif "downloadlimit" in self.html:
            self.log_warning(_("Reached maximum connctions"))
            self.retry(5, 60, _("Reached maximum connctions"))

        elif "trafficlimit" in self.html:
            self.log_warning(_("Reached daily limit"))
            self.retry(wait_time=seconds_to_midnight(gmt=2), reason="Daily limit for this host reached")

        elif "<code>8</code>" in self.html:
            self.log_warning(_("Hoster temporarily unavailable, waiting 1 minute and retry"))
            self.retry(5, 60, _("Hoster is temporarily unavailable"))
示例#6
0
    def check_errors(self):
        if '<valid>0</valid>' in self.html or (
                "You are not allowed to download from this host" in self.html and self.premium):
            self.account.relogin(self.user)
            self.retry()

        elif "NOTFOUND" in self.html:
            self.offline()

        elif "downloadlimit" in self.html:
            self.log_warning(_("Reached maximum connctions"))
            self.retry(5, 60, _("Reached maximum connctions"))

        elif "trafficlimit" in self.html:
            self.log_warning(_("Reached daily limit for this host"))
            self.retry(wait_time=seconds_to_midnight(gmt=2), reason="Daily limit for this host reached")

        elif "hostererror" in self.html:
            self.log_warning(_("Hoster temporarily unavailable, waiting 1 minute and retry"))
            self.retry(5, 60, _("Hoster is temporarily unavailable"))
    def handle_free(self, pyfile):
        action, inputs = self.parse_html_form('id="ifree_form"')
        if not action:
            self.error(_("Form not found"))

        pyfile.size = float(inputs["sssize"])
        self.log_debug(action, inputs)
        inputs["desc"] = ""

        self.data = self.load(urlparse.urljoin("http://letitbit.net/", action), post=inputs)

        m = re.search(self.SECONDS_PATTERN, self.data)
        seconds = int(m.group(1)) if m else 60

        self.log_debug("Seconds found", seconds)

        m = re.search(self.CAPTCHA_CONTROL_FIELD, self.data)
        recaptcha_control_field = m.group(1)

        self.log_debug("ReCaptcha control field found", recaptcha_control_field)

        self.wait(seconds)

        res = self.load("http://letitbit.net/ajax/download3.php", post=" ")
        if res != "1":
            self.error(_("Unknown response - ajax_check_url"))

        self.log_debug(res)

        recaptcha = ReCaptcha(self)
        response, challenge = recaptcha.challenge()

        post_data = {
            "recaptcha_challenge_field": challenge,
            "recaptcha_response_field": response,
            "recaptcha_control_field": recaptcha_control_field,
        }

        self.log_debug("Post data to send", post_data)

        res = self.load("http://letitbit.net/ajax/check_recaptcha.php", post=post_data)

        self.log_debug(res)

        if not res or res == "error_wrong_captcha":
            self.retry_captcha()

        elif res == "error_free_download_blocked":
            self.log_warning(_("Daily limit reached"))
            self.wait(seconds_to_midnight(), True)

        elif res.startswith("["):
            urls = json.loads(res)

        elif res.startswith("http://"):
            urls = [res]

        else:
            self.error(_("Unknown response - captcha check"))

        self.link = urls[0]