Ejemplo n.º 1
0
class NCryptIn(Crypter):
    __name__    = "NCryptIn"
    __type__    = "crypter"
    __version__ = "1.41"
    __status__  = "testing"

    __pattern__ = r'http://(?:www\.)?ncrypt\.in/(?P<TYPE>folder|link|frame)-([^/\?]+)'
    __config__  = [("activated"         , "bool"          , "Activated"                       , True     ),
                   ("use_premium"       , "bool"          , "Use premium account if available", True     ),
                   ("folder_per_package", "Default;Yes;No", "Create folder for each package"  , "Default")]

    __description__ = """NCrypt.in decrypter plugin"""
    __license__     = "GPLv3"
    __authors__     = [("fragonib", "fragonib[AT]yahoo[DOT]es"),
                       ("stickell", "*****@*****.**")]


    JK_KEY = "jk"
    CRYPTED_KEY = "crypted"

    NAME_PATTERN = r'<meta name="description" content="(?P<N>.+?)"'


    def setup(self):
        self.package = None
        self.cleaned_html = None
        self.links_source_order = ["cnl2", "rsdf", "ccf", "dlc", "web"]
        self.protection_type = None


    def decrypt(self, pyfile):
        #: Init
        self.package = pyfile.package()
        pack_links = []
        pack_name = self.package.name
        folder_name = self.package.folder

        #: Deal with single links
        if self.is_single_link():
            pack_links.extend(self.handle_single_link())

        #: Deal with folders
        else:

            #: Request folder home
            self.data = self.request_folder_home()
            self.cleaned_html = self.remove_html_crap(self.data)
            if not self.is_online():
                self.offline()

            #: Check for folder protection
            if self.is_protected():
                self.data = self.unlock_protection()
                self.cleaned_html = self.remove_html_crap(self.data)
                self.handle_errors()

            #: Prepare package name and folder
            (pack_name, folder_name) = self.get_package_info()

            #: Extract package links
            for link_source_type in self.links_source_order:
                pack_links.extend(self.handle_link_source(link_source_type))
                if pack_links:  #: Use only first source which provides links
                    break
            pack_links = set(pack_links)

        #: Pack and return links
        if pack_links:
            self.packages = [(pack_name, pack_links, folder_name)]


    def is_single_link(self):
        link_type = re.match(self.__pattern__, self.pyfile.url).group('TYPE')
        return link_type in ("link", "frame")


    def request_folder_home(self):
        return self.load(self.pyfile.url)


    def remove_html_crap(self, content):
        patterns = (r'(type="hidden".*?(name=".*?")?.*?value=".*?")',
                    r'display:none;">(.*?)</(div|span)>',
                    r'<div\s+class="jdownloader"(.*?)</div>',
                    r'<table class="global">(.*?)</table>',
                    r'<iframe\s+style="display:none(.*?)</iframe>')
        for pattern in patterns:
            rexpr = re.compile(pattern, re.S)
            content = re.sub(rexpr, "", content)
        return content


    def is_online(self):
        if "Your folder does not exist" in self.cleaned_html:
            self.log_debug("File not m")
            return False
        return True


    def is_protected(self):
        form = re.search(r'<form.*?name.*?protected.*?>(.*?)</form>', self.cleaned_html, re.S)
        if form:
            content = form.group(1)
            for keyword in ("password", "captcha"):
                if keyword in content:
                    self.protection_type = keyword
                    self.log_debug("Links are %s protected" % self.protection_type)
                    return True
        return False


    def get_package_info(self):
        m = re.search(self.NAME_PATTERN, self.data)
        if m is not None:
            name = folder = m.group('N').strip()
            self.log_debug("Found name [%s] and folder [%s] in package info" % (name, folder))
        else:
            name = self.package.name
            folder = self.package.folder
            self.log_debug("Package info not m, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
        return name, folder


    def unlock_protection(self):
        postData = {}

        form = re.search(r'<form name="protected"(.*?)</form>', self.cleaned_html, re.S).group(1)

        #: Submit package password
        if "password" in form:
            password = self.get_password()
            self.log_debug("Submitting password [%s] for protected links" % password)
            postData['password'] = password

        #: Resolve anicaptcha
        if "anicaptcha" in form:
            self.log_debug("Captcha protected")
            captchaUri = re.search(r'src="(/temp/anicaptcha/.+?)"', form).group(1)
            captcha = self.captcha.decrypt("http://ncrypt.in" + captchaUri)
            self.log_debug("Captcha resolved [%s]" % captcha)
            postData['captcha'] = captcha

        #: Resolve recaptcha
        if "recaptcha" in form:
            self.log_debug("ReCaptcha protected")
            captcha_key = re.search(r'\?k=(.*?)"', form).group(1)
            self.log_debug("Resolving ReCaptcha with key [%s]" % captcha_key)
            self.captcha = ReCaptcha(self.pyfile)
            response, challenge = self.captcha.challenge(captcha_key)
            postData['recaptcha_challenge_field'] = challenge
            postData['recaptcha_response_field']  = response

        #: Resolve circlecaptcha
        if "circlecaptcha" in form:
            self.log_debug("CircleCaptcha protected")
            captcha_img_url = "http://ncrypt.in/classes/captcha/circlecaptcha.php"
            coords = self.captcha.decrypt(captcha_img_url, input_type="png", output_type='positional', ocr="CircleCaptcha")
            self.log_debug("Captcha resolved, coords %s" % coords)
            postData['circle.x'] = coords[0]
            postData['circle.y'] = coords[1]

        #: Unlock protection
        postData['submit_protected'] = 'Continue to folder'
        return self.load(self.pyfile.url, post=postData)


    def handle_errors(self):
        if self.protection_type == "password":
            if "This password is invalid!" in self.cleaned_html:
                self.fail(_("Wrong password"))

        if self.protection_type == "captcha":
            if "The securitycheck was wrong" in self.cleaned_html:
                self.retry_captcha()
            else:
                self.captcha.correct()


    def handle_link_source(self, link_source_type):
        #: Check for JS engine
        require_js_engine = link_source_type in ("cnl2", "rsdf", "ccf", "dlc")
        if require_js_engine and not self.js:
            self.log_debug("No JS engine available, skip %s links" % link_source_type)
            return []

        #: Select suitable handler
        if link_source_type == "single":
            return self.handle_single_link()
        if link_source_type == "cnl2":
            return self.handle_CNL2()
        elif link_source_type in ("rsdf", "ccf", "dlc"):
            return self.handle_container(link_source_type)
        elif link_source_type == "web":
            return self.handle_web_links()
        else:
            self.error(_('Unknown source type "%s"') % link_source_type)


    def handle_single_link(self):
        self.log_debug("Handling Single link")
        pack_links = []

        #: Decrypt single link
        decrypted_link = self.decrypt_link(self.pyfile.url)
        if decrypted_link:
            pack_links.append(decrypted_link)

        return pack_links


    def handle_CNL2(self):
        self.log_debug("Handling CNL2 links")
        pack_links = []

        if 'cnl2_output' in self.cleaned_html:
            try:
                (vcrypted, vjk) = self._get_cipher_params()
                for (crypted, jk) in zip(vcrypted, vjk):
                    pack_links.extend(self._get_links(crypted, jk))

            except Exception:
                self.fail(_("Unable to decrypt CNL2 links"))

        return pack_links


    def handle_containers(self):
        self.log_debug("Handling Container links")
        pack_links = []

        pattern = r'/container/(rsdf|dlc|ccf)/(\w+)'
        containersLinks = re.findall(pattern, self.data)
        self.log_debug("Decrypting %d Container links" % len(containersLinks))
        for containerLink in containersLinks:
            link = "http://ncrypt.in/container/%s/%s.%s" % (containerLink[0], containerLink[1], containerLink[0])
            pack_links.append(link)

        return pack_links


    def handle_web_links(self):
        self.log_debug("Handling Web links")
        pattern = r'(http://ncrypt\.in/link-.*?=)'
        links = re.findall(pattern, self.data)

        pack_links = []
        self.log_debug("Decrypting %d Web links" % len(links))
        for i, link in enumerate(links):
            self.log_debug("Decrypting Web link %d, %s" % (i + 1, link))
            decrypted_link = self.decrypt(link)
            if decrypted_link:
                pack_links.append(decrypted_link)

        return pack_links


    def decrypt_link(self, link):
        try:
            url = link.replace("link-", "frame-")
            link = self.load(url, just_header=True)['location']
            return link

        except Exception, detail:
            self.log_debug("Error decrypting link %s, %s" % (link, detail))
Ejemplo n.º 2
0
class FileserveCom(Hoster):
    __name__    = "FileserveCom"
    __type__    = "hoster"
    __version__ = "0.67"
    __status__  = "testing"

    __pattern__ = r'http://(?:www\.)?fileserve\.com/file/(?P<ID>[^/]+)'
    __config__  = [("activated", "bool", "Activated", True)]

    __description__ = """Fileserve.com hoster plugin"""
    __license__     = "GPLv3"
    __authors__     = [("jeix"     , "*****@*****.**"  ),
                       ("mkaay"    , "*****@*****.**"     ),
                       ("Paul King", None                 ),
                       ("zoidberg" , "*****@*****.**")]


    URLS = ["http://www.fileserve.com/file/",
            "http://www.fileserve.com/link-checker.php",
            "http://www.fileserve.com/checkReCaptcha.php"]

    CAPTCHA_KEY_PATTERN   = r'var reCAPTCHA_publickey=\'(.+?)\''
    LONG_WAIT_PATTERN     = r'<li class="title">You need to wait (\d+) (\w+) to start another download\.</li>'
    LINK_EXPIRED_PATTERN  = r'Your download link has expired'
    DL_LIMIT_PATTERN      = r'Your daily download limit has been reached'
    NOT_LOGGED_IN_PATTERN = r'<form (name="loginDialogBoxForm"|id="login_form")|<li><a href="/login\.php">Login</a></li>'


    def setup(self):
        self.resume_download = self.multiDL = self.premium
        self.file_id = re.match(self.__pattern__, self.pyfile.url).group('ID')
        self.url     = "%s%s" % (self.URLS[0], self.file_id)

        self.log_debug("File ID: %s URL: %s" % (self.file_id, self.url))


    def process(self, pyfile):
        pyfile.name, pyfile.size, status, self.url = check_file(self, [self.url])[0]
        if status != 2:
            self.offline()
        self.log_debug("File Name: %s Size: %d" % (pyfile.name, pyfile.size))

        if self.premium:
            self.handle_premium()
        else:
            self.handle_free()


    def handle_free(self):
        self.data = self.load(self.url)
        action = self.load(self.url, post={'checkDownload': "check"})
        action = json.loads(action)
        self.log_debug(action)

        if "fail" in action:
            if action['fail'] == "timeLimit":
                self.data = self.load(self.url, post={'checkDownload': "showError", 'errorType': "timeLimit"})

                self.do_long_wait(re.search(self.LONG_WAIT_PATTERN, self.data))

            elif action['fail'] == "parallelDownload":
                self.log_warning(_("Parallel download error, now waiting 60s"))
                self.retry(wait=60, msg=_("parallelDownload"))

            else:
                self.fail(_("Download check returned: %s") % action['fail'])

        elif "success" in action:
            if action['success'] == "showCaptcha":
                self.do_captcha()
                self.do_timmer()
            elif action['success'] == "showTimmer":
                self.do_timmer()

        else:
            self.error(_("Unknown server response"))

        #: Show download link
        res = self.load(self.url, post={'downloadLink': "show"})
        self.log_debug("Show downloadLink response: %s" % res)
        if "fail" in res:
            self.error(_("Couldn't retrieve download url"))

        #: This may either download our file or forward us to an error page
        self.download(self.url, post={'download': "normal"})
        self.log_debug(self.req.http.lastEffectiveURL)

        check = self.scan_download({'expired': self.LINK_EXPIRED_PATTERN,
                                    'wait'   : re.compile(self.LONG_WAIT_PATTERN),
                                    'limit'  : self.DL_LIMIT_PATTERN})

        if check == "expired":
            self.log_debug("Download link was expired")
            self.retry()

        elif check == "wait":
            self.do_long_wait(self.last_check)

        elif check == "limit":
            self.log_warning(_("Download limited reached for today"))
            self.wait(seconds_to_midnight(), True)
            self.retry()

        self.thread.m.reconnecting.wait(3)  #: Ease issue with later downloads appearing to be in parallel


    def do_timmer(self):
        res = self.load(self.url, post={'downloadLink': "wait"})
        self.log_debug("Wait response: %s" % res[:80])

        if "fail" in res:
            self.fail(_("Failed getting wait time"))

        if self.__name__ == "FilejungleCom":
            m = re.search(r'"waitTime":(\d+)', res)
            if m is None:
                self.fail(_("Cannot get wait time"))
            wait_time = int(m.group(1))
        else:
            wait_time = int(res) + 3

        self.wait(wait_time)


    def do_captcha(self):
        captcha_key = re.search(self.CAPTCHA_KEY_PATTERN, self.data).group(1)
        self.captcha = ReCaptcha(self.pyfile)

        response, challenge = self.captcha.challenge(captcha_key)
        html = self.load(self.URLS[2],
                         post={'recaptcha_challenge_field'  : challenge,
                               'recaptcha_response_field'   : response,
                               'recaptcha_shortencode_field': self.file_id})
        res = json.loads(html)
        if res['success']:
            self.captcha.correct()
        else:
            self.retry_captcha()


    def do_long_wait(self, m):
        wait_time = (int(m.group(1)) * {'seconds': 1, 'minutes': 60, 'hours': 3600}[m.group(2)]) if m else 12 * 60
        self.wait(wait_time, True)
        self.retry()


    def handle_premium(self):
        premium_url = None
        if self.__name__ == "FileserveCom":
            #: Try api download
            res = self.load("http://app.fileserve.com/api/download/premium/",
                            post={'username': self.account.user,
                                  'password': self.account.get_login('password'),
                                  'shorten': self.file_id})
            if res:
                res = json.loads(res)
                if res['error_code'] == "302":
                    premium_url = res['next']

                elif res['error_code'] in ["305", "500"]:
                    self.temp_offline()

                elif res['error_code'] in ["403", "605"]:
                    self.restart(premium=False)

                elif res['error_code'] in ["606", "607", "608"]:
                    self.offline()

                else:
                    self.log_error(res['error_code'], res['error_message'])

        self.download(premium_url or self.pyfile.url)

        if not premium_url and \
           self.scan_download({'login': re.compile(self.NOT_LOGGED_IN_PATTERN)}):
            self.account.relogin()
            self.retry(msg=_("Not logged in"))
Ejemplo n.º 3
0
class ExtabitCom(SimpleHoster):
    __name__    = "ExtabitCom"
    __type__    = "hoster"
    __version__ = "0.72"
    __status__  = "testing"

    __pattern__ = r'http://(?:www\.)?extabit\.com/(file|go|fid)/(?P<ID>\w+)'
    __config__  = [("activated"   , "bool", "Activated"                                        , True),
                   ("use_premium" , "bool", "Use premium account if available"                 , True),
                   ("fallback"    , "bool", "Fallback to free download if premium fails"       , True),
                   ("chk_filesize", "bool", "Check file size"                                  , True),
                   ("max_wait"    , "int" , "Reconnect if waiting time is greater than minutes", 10  )]

    __description__ = """Extabit.com hoster plugin"""
    __license__     = "GPLv3"
    __authors__     = [("zoidberg", "*****@*****.**")]


    NAME_PATTERN = r'<th>File:</th>\s*<td class="col-fileinfo">\s*<div title="(?P<N>.+?)">'
    SIZE_PATTERN = r'<th>Size:</th>\s*<td class="col-fileinfo">(?P<S>.+?)</td>'
    OFFLINE_PATTERN = r'>File not found<'
    TEMP_OFFLINE_PATTERN = r'>(File is temporary unavailable|No download mirror)<'

    LINK_FREE_PATTERN = r'[\'"](http://guest\d+\.extabit\.com/\w+/.*?)[\'"]'


    def handle_free(self, pyfile):
        if r'>Only premium users can download this file' in self.data:
            self.fail(_("Only premium users can download this file"))

        m = re.search(r'Next free download from your ip will be available in <b>(\d+)\s*minutes', self.data)
        if m is not None:
            self.wait(int(m.group(1)) * 60, True)
        elif "The daily downloads limit from your IP is exceeded" in self.data:
            self.log_warning(_("You have reached your daily downloads limit for today"))
            self.wait(seconds_to_midnight(), True)

        self.log_debug("URL: " + self.req.http.lastEffectiveURL)
        m = re.match(self.__pattern__, self.req.http.lastEffectiveURL)
        fileID = m.group('ID') if m else self.info['pattern']['ID']

        m = re.search(r'recaptcha/api/challenge\?k=(\w+)', self.data)
        if m is not None:
            self.captcha = ReCaptcha(pyfile)
            captcha_key = m.group(1)

            get_data = {'type': "recaptcha"}
            get_data['capture'], get_data['challenge'] = self.captcha.challenge(captcha_key)

            html = self.load("http://extabit.com/file/%s/" % fileID, get=get_data)
            res = json.loads(html)

            if "ok" in res:
                self.captcha.correct()
            else:
                self.retry_captcha()
        else:
            self.error(_("Captcha"))

        if not "href" in res:
            self.error(_("Bad JSON response"))

        self.data = self.load("http://extabit.com/file/%s%s" % (fileID, res['href']))

        m = re.search(self.LINK_FREE_PATTERN, self.data)
        if m is None:
            self.error(_("LINK_FREE_PATTERN not found"))

        self.link = m.group(1)
Ejemplo n.º 4
0
class OboomCom(Hoster):
    __name__    = "OboomCom"
    __type__    = "hoster"
    __version__ = "0.44"
    __status__  = "testing"

    __pattern__ = r'https?://(?:www\.)?oboom\.com/(?:#(?:id=|/)?)?(?P<ID>\w{8})'
    __config__  = [("activated", "bool", "Activated", True)]

    __description__ = """Oboom.com hoster plugin"""
    __license__     = "GPLv3"
    __authors__     = [("stanley", "*****@*****.**")]


    RECAPTCHA_KEY = "6LdqpO0SAAAAAJGHXo63HyalP7H4qlRs_vff0kJX"


    def setup(self):
        self.chunk_limit = 1
        self.multiDL = self.resume_download = self.premium


    def process(self, pyfile):
        self.pyfile.url.replace(".com/#id=", ".com/#")
        self.pyfile.url.replace(".com/#/", ".com/#")
        self.data = self.load(pyfile.url)
        self.get_file_id(self.pyfile.url)
        self.get_session_token()
        self.get_fileInfo(self.session_token, self.file_id)
        self.pyfile.name = self.file_name
        self.pyfile.size = self.file_size
        if not self.premium:
            self.solve_captcha()
        self.get_download_ticket()
        self.download("http://%s/1.0/dlh" % self.download_domain, get={'ticket': self.download_ticket, 'http_errors': 0})


    def load_url(self, url, get={}):
        return json.loads(self.load(url, get))


    def get_file_id(self, url):
        self.file_id = re.match(OboomCom.__pattern__, url).group('ID')


    def get_session_token(self):
        if self.premium:
            accountInfo = self.account.get_data()
            if "session" in accountInfo:
                self.session_token = accountInfo['session']
            else:
                self.fail(_("Could not retrieve premium session"))
        else:
            apiUrl = "http://www.oboom.com/1.0/guestsession"
            result = self.load_url(apiUrl)
            if result[0] == 200:
                self.session_token = result[1]
            else:
                self.fail(_("Could not retrieve token for guest session. Error code: %s") % result[0])


    def solve_captcha(self):
        self.captcha = ReCaptcha(self.pyfile)
        response, challenge = self.captcha.challenge(self.RECAPTCHA_KEY)

        apiUrl = "http://www.oboom.com/1.0/download/ticket"
        params = {'recaptcha_challenge_field': challenge,
                  'recaptcha_response_field': response,
                  'download_id': self.file_id,
                  'token': self.session_token}

        result = self.load_url(apiUrl, params)

        if result[0] == 200:
            self.download_token = result[1]
            self.download_auth  = result[2]
            self.captcha.correct()
            self.wait(30)

        elif result[0] == 403:
                if result[1] == -1:  #: Another download is running
                    self.set_wait(15 * 60)
                else:
                    self.set_wait(result[1])
                    self.set_reconnect(True)

                self.wait()
                self.retry(5)

        elif result[0] == 400 and result[1] == "forbidden":
            self.retry(5, 15 * 60, _("Service unavailable"))

        else:
            self.retry_captcha()


    def get_fileInfo(self, token, fileId):
        apiUrl = "http://api.oboom.com/1.0/info"
        params = {'token': token, 'items': fileId, 'http_errors': 0}

        result = self.load_url(apiUrl, params)
        if result[0] == 200:
            item = result[1][0]
            if item['state'] == "online":
                self.file_size = item['size']
                self.file_name = item['name']
            else:
                self.offline()
        else:
            self.fail(_("Could not retrieve file info. Error code %s: %s") % (result[0], result[1]))


    def get_download_ticket(self):
        apiUrl = "http://api.oboom.com/1/dl"
        params = {'item': self.file_id, 'http_errors': 0}
        if self.premium:
            params['token'] = self.session_token
        else:
            params['token'] = self.download_token
            params['auth'] = self.download_auth

        result = self.load_url(apiUrl, params)
        if result[0] == 200:
            self.download_domain = result[1]
            self.download_ticket = result[2]
        elif result[0] == 421:
            self.retry(wait=result[2] + 60, msg=_("Connection limit exceeded"))
        else:
            self.fail(_("Could not retrieve download ticket. Error code: %s") % result[0])
Ejemplo n.º 5
0
class LuckyShareNet(SimpleHoster):
    __name__    = "LuckyShareNet"
    __type__    = "hoster"
    __version__ = "0.13"
    __status__  = "testing"

    __pattern__ = r'https?://(?:www\.)?luckyshare\.net/(?P<ID>\d{10,})'
    __config__  = [("activated"   , "bool", "Activated"                                        , True),
                   ("use_premium" , "bool", "Use premium account if available"                 , True),
                   ("fallback"    , "bool", "Fallback to free download if premium fails"       , True),
                   ("chk_filesize", "bool", "Check file size"                                  , True),
                   ("max_wait"    , "int" , "Reconnect if waiting time is greater than minutes", 10  )]

    __description__ = """LuckyShare.net hoster plugin"""
    __license__     = "GPLv3"
    __authors__     = [("stickell", "*****@*****.**")]


    INFO_PATTERN = r'<h1 class=\'file_name\'>(?P<N>\S+)</h1>\s*<span class=\'file_size\'>Filesize: (?P<S>[\d.,]+)(?P<U>[\w^_]+)</span>'
    OFFLINE_PATTERN = r'There is no such file available'


    def parse_json(self, rep):
        if 'AJAX Error' in rep:
            html = self.load(self.pyfile.url)
            m = re.search(r'waitingtime = (\d+);', html)
            if m is not None:
                seconds = int(m.group(1))
                self.log_debug("You have to wait %d seconds between free downloads" % seconds)
                self.retry(wait=seconds)
            else:
                self.error(_("Unable to detect wait time between free downloads"))
        elif 'Hash expired' in rep:
            self.retry(msg=_("Hash expired"))
        return json.loads(rep)


    #@TODO: There should be a filesize limit for free downloads
    #:       Some files could not be downloaded in free mode
    def handle_free(self, pyfile):
        rep = self.load(r'http://luckyshare.net/download/request/type/time/file/' + self.info['pattern']['ID'])

        self.log_debug("JSON: " + rep)

        json_data = self.parse_json(rep)
        self.wait(json_data['time'])

        self.captcha = ReCaptcha(pyfile)

        response, challenge = self.captcha.challenge()
        rep = self.load(r'http://luckyshare.net/download/verify/challenge/%s/response/%s/hash/%s' %
                        (challenge, response, json_data['hash']))

        self.log_debug("JSON: " + rep)

        if 'Verification failed' in rep:
            self.retry_captcha()

        elif 'link' in rep:
            self.captcha.correct()
            json_data.update(self.parse_json(rep))
            if json_data['link']:
                self.link = json_data['link']