def handle_free(self, pyfile):
        m = re.search('<h2>((Daily )?Download Limit)</h2>', self.data)
        if m is not None:
            pyfile.error = encode(m.group(1))
            self.log_warning(pyfile.error)
            self.retry(6, (6 * 60 if m.group(2) else 15) * 60, pyfile.error)

        ajax_url = "http://uploading.com/files/get/?ajax"
        self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
        self.req.http.lastURL = pyfile.url

        res = json.loads(self.load(ajax_url, post={'action': 'second_page', 'code': self.info['pattern']['ID']}))

        if 'answer' in res and 'wait_time' in res['answer']:
            wait_time = int(res['answer']['wait_time'])
            self.log_info(_("Waiting %d seconds") % wait_time)
            self.wait(wait_time)
        else:
            self.error(_("No AJAX/WAIT"))

        res = json.loads(self.load(ajax_url, post={'action': 'get_link', 'code': self.info['pattern']['ID'], 'pass': '******'}))

        if 'answer' in res and 'link' in res['answer']:
            url = res['answer']['link']
        else:
            self.error(_("No AJAX/URL"))

        self.data = self.load(url)
        m = re.search(r'<form id="file_form" action="(.*?)"', self.data)
        if m is not None:
            url = m.group(1)
        else:
            self.error(_("No URL"))

        self.link = url
예제 #2
0
    def handle_free(self, pyfile):
        data = {'ukey': self.info['pattern']['ID']}

        m = re.search(self.AB1_PATTERN, self.data)
        if m is None:
            self.error(_("__AB1"))
        data['__ab1'] = m.group(1)

        recaptcha = ReCaptcha(self)

        m = re.search(self.RECAPTCHA_PATTERN, self.data)
        captcha_key = m.group(1) if m else recaptcha.detect_key()

        if captcha_key is None:
            self.error(_("ReCaptcha key not found"))

        response, challenge = recaptcha.challenge(captcha_key)
        self.account.form_data = {'recaptcha_challenge_field': challenge,
                                  'recaptcha_response_field' : response}
        self.account.relogin()
        self.retry(2)

        json_url = "http://filecloud.io/download-request.json"
        res = self.load(json_url, post=data)
        self.log_debug(res)
        res = json.loads(res)

        if "error" in res and res['error']:
            self.fail(res)

        self.log_debug(res)
        if res['captcha']:
            data['ctype'] = "recaptcha"
            data['recaptcha_response'], data['recaptcha_challenge'] = recaptcha.challenge(captcha_key)

            json_url = "http://filecloud.io/download-request.json"
            res = self.load(json_url, post=data)
            self.log_debug(res)
            res = json.loads(res)

            if "retry" in res and res['retry']:
                self.retry_captcha()
            else:
                self.captcha.correct()


        if res['dl']:
            self.data = self.load('http://filecloud.io/download.html')

            m = re.search(self.LINK_FREE_PATTERN % self.info['pattern']['ID'], self.data)
            if m is None:
                self.error(_("LINK_FREE_PATTERN not found"))

            if "size" in self.info and self.info['size']:
                self.check_data = {'size': int(self.info['size'])}

            self.link = m.group(1)
        else:
            self.fail(_("Unexpected server response"))
예제 #3
0
    def handle_premium(self, pyfile):
        user, info = self.account.select()

        res = self.load("https://premium.rpnet.biz/client_api.php",
                        get={'username': user,
                             'password': info['login']['password'],
                             'action'  : "generate",
                             'links'   : pyfile.url})

        self.log_debug("JSON data: %s" % res)
        link_status = json.loads(res)['links'][0]  #: Get the first link... since we only queried one

        #: Check if we only have an id as a HDD link
        if 'id' in link_status:
            self.log_debug("Need to wait at least 30 seconds before requery")
            self.wait(30)  #: Wait for 30 seconds
            #: Lets query the server again asking for the status on the link,
            #: We need to keep doing this until we reach 100
            attemps = 30
            my_try = 0
            while (my_try <= attemps):
                self.log_debug("Try: %d ; Max Tries: %d" % (my_try, attemps))
                res = self.load("https://premium.rpnet.biz/client_api.php",
                                get={'username': user,
                                     'password': info['login']['password'],
                                     'action'  : "downloadInformation",
                                     'id'      : link_status['id']})
                self.log_debug("JSON data hdd query: %s" % res)
                download_status = json.loads(res)['download']

                if download_status['status'] == "100":
                    link_status['generated'] = download_status['rpnet_link']
                    self.log_debug("Successfully downloaded to rpnet HDD: %s" % link_status['generated'])
                    break
                else:
                    self.log_debug("At %s%% for the file download" % download_status['status'])

                self.wait(30)
                my_try += 1

            if my_try > attemps:  #: We went over the limit!
                self.fail(_("Waited for about 15 minutes for download to finish but failed"))

        if 'generated' in link_status:
            self.link = link_status['generated']
            return
        elif 'error' in link_status:
            self.fail(link_status['error'])
        else:
            self.fail(_("Something went wrong, not supposed to enter here"))
예제 #4
0
 def grab_hosters(self, user, password, data):
     html = self.load("http://fastix.ru/api_v2",
                   get={'apikey': "5182964c3f8f9a7f0b00000a_kelmFB4n1IrnCDYuIFn2y",
                        'sub'   : "allowed_sources"})
     host_list = json.loads(html)
     host_list = host_list['allow']
     return host_list
    def handle_premium(self):
        premium_url = None
        if self.__name__ == "FileserveCom":
            #: Try api download
            res = self.load("http://app.fileserve.com/api/download/premium/",
                            post={'username': self.account.user,
                                  'password': self.account.get_login('password'),
                                  'shorten': self.file_id})
            if res:
                res = json.loads(res)
                if res['error_code'] == "302":
                    premium_url = res['next']

                elif res['error_code'] in ["305", "500"]:
                    self.temp_offline()

                elif res['error_code'] in ["403", "605"]:
                    self.restart(premium=False)

                elif res['error_code'] in ["606", "607", "608"]:
                    self.offline()

                else:
                    self.log_error(res['error_code'], res['error_message'])

        self.download(premium_url or self.pyfile.url)

        if not premium_url and \
           self.check_file({'login': re.compile(self.NOT_LOGGED_IN_PATTERN)}):
            self.account.relogin()
            self.retry(msg=_("Not logged in"))
def get_info(urls):
    result  = []
    regex   = re.compile(DailymotionCom.__pattern__)
    apiurl  = "https://api.dailymotion.com/video/%s"
    request = {'fields': "access_error,status,title"}

    for url in urls:
        id   = regex.match(url).group('ID')
        html = get_url(apiurl % id, get=request)
        info = json.loads(html)

        name = info['title'] + ".mp4" if "title" in info else url

        if "error" in info or info['access_error']:
            status = "offline"

        else:
            status = info['status']

            if status in ("ready", "published"):
                status = "online"

            elif status in ("waiting", "processing"):
                status = "temp. offline"

            else:
                status = "offline"

        result.append((name, 0, statusMap[status], url))

    return result
    def grab_hosters(self, user, password, data):
        json_data = self.load("http://www.simply-premium.com/api/hosts.php", get={'format': "json", 'online': 1})
        json_data = json.loads(json_data)

        host_list = [element['regex'] for element in json_data['result']]

        return host_list
    def send_to_transmission(self, url):
        transmission_rpc_url = self.get_config('rpc_url')
        client_request_id = self.classname + "".join(random.choice('0123456789ABCDEF') for _i in xrange(4))
        req = get_request()

        try:
            response = self.load(transmission_rpc_url,
                                 post=json.dumps({'arguments': {'filename': url},
                                                  'method'   : 'torrent-add',
                                                  'tag'      : client_request_id}),
                                 req=req)

        except Exception, e:
            if isinstance(e, BadHeader) and e.code == 409:
                headers = dict(re.findall(r"(?P<name>.+?): (?P<value>.+?)\r?\n", req.header))
                session_id = headers['X-Transmission-Session-Id']
                req.c.setopt(pycurl.HTTPHEADER, ["X-Transmission-Session-Id: %s" % session_id])
                try:
                    response = self.load(transmission_rpc_url,
                                         post=json.dumps({'arguments': {'filename': url},
                                                          'method'   : 'torrent-add',
                                                          'tag'      : client_request_id}),
                                         req=req)

                    res = json.loads(response)
                    if "result" in res:
                        self.log_debug("Result: %s" % res['result'])

                except Exception, e:
                    self.log_error(e)
예제 #9
0
    def grab_hosters(self, user, password, data):
        hostings         = json.loads(self.load("https://www.nopremium.pl/clipboard.php?json=3").strip())
        hostings_domains = [domain for row in hostings for domain in row['domains'] if row['sdownload'] == "0"]

        self.log_debug(hostings_domains)

        return hostings_domains
예제 #10
0
    def get_info(cls, url="", html=""):
        info = super(YadiSk, cls).get_info(url, html)

        if html:
            if 'idclient' not in info:
                info['idclient'] = ""
                for _i in xrange(32):
                    info ['idclient']  += random.choice('0123456abcdef')

            m = re.search(r'<script id="models-client" type="application/json">(.+?)</script>', html)
            if m is not None:
                api_data = json.loads(m.group(1))
                try:
                    for sect in api_data:
                        if 'model' in sect:
                            if sect['model'] == "config":
                                info['version'] = sect['data']['version']
                                info['sk']  = sect['data']['sk']

                            elif sect['model'] == "resource":
                                info['id']   = sect['data']['id']
                                info['size'] = sect['data']['meta']['size']
                                info['name'] = sect['data']['name']

                except Exception, e:
                    info['status'] = 8
                    info['error'] = _("Unexpected server response: %s") % e.message

            else:
                info['status'] = 8
                info['error'] = _("could not find required json data")
    def grab_info(self, user, password, data):
        r = self.load('http://gen.linksnappy.com/lseAPI.php',
                      get={'act'     : 'USERDETAILS',
                           'username': user,
                           'password': hashlib.md5(password).hexdigest()})

        self.log_debug("JSON data: " + r)

        j = json.loads(r)

        if j['error']:
            return {'premium': False}

        validuntil = j['return']['expire']

        if validuntil == "lifetime":
            validuntil = -1

        elif validuntil == "expired":
            return {'premium': False}

        else:
            validuntil = float(validuntil)

        if 'trafficleft' not in j['return'] or isinstance(j['return']['trafficleft'], str):
            trafficleft = -1
        else:
            trafficleft = self.parse_traffic(j['return']['trafficleft'], "MB")

        return {'premium'    : True       ,
                'validuntil' : validuntil ,
                'trafficleft': trafficleft}
    def handle_free(self, pyfile):
        m = re.search(self.LINK_FREE_PATTERN, self.data)
        if m is None:
            self.error(_("LINK_FREE_PATTERN not found"))

        url = m.group(1)

        self.log_debug(('FREEUSER' if m.group(2) == "download" else 'GUEST') + ' URL', url)

        res = json.loads(self.load(urlparse.urljoin("http://115.com/", url), decode=False))
        if "urls" in res:
            mirrors = res['urls']

        elif "data" in res:
            mirrors = res['data']

        else:
            mirrors = None

        for mr in mirrors:
            try:
                self.link = mr['url'].replace("\\", "")
                self.log_debug("Trying URL: " + self.link)
                break

            except Exception:
                pass
        else:
            self.fail(_("No working link found"))
    def handle_free(self, pyfile):
        try:
            song_id = re.search(r'sounds:(\d+)"', self.data).group(1)

        except Exception:
            self.error(_("Could not find song id"))

        try:
            client_id = re.search(r'"clientID":"(.+?)"', self.data).group(1)

        except Exception:
            client_id = "b45b1aa10f1ac2941910a7f0d10f8e28"

        #: Url to retrieve the actual song url
        streams = json.loads(self.load("https://api.soundcloud.com/tracks/%s/streams" % song_id,
                             get={'client_id': client_id}))

        regex = re.compile(r'[^\d]')
        http_streams = sorted([(key, value) for key, value in streams.items() if key.startswith('http_')],
                              key=lambda t: regex.sub(t[0], ''),
                              reverse=True)

        self.log_debug("Streams found: %s" % (http_streams or "None"))

        if http_streams:
            stream_name, self.link = http_streams[0 if self.get_config('quality') == "Higher" else -1]
            pyfile.name += '.' + stream_name.split('_')[1].lower()
    def grab_info(self, user, password, data):
        validuntil  = None
        trafficleft = None
        premium     = False
        sid         = None

        try:
            sid = data.get('sid', None)

            html = self.load(urlparse.urljoin(self.API_URL, "info"),
                             get={'sid': sid})

            self.log_debug("API:USERINFO", html)

            jso = json.loads(html)

            if jso['response_status'] == 200:
                if "reset_in" in jso['response']:
                    self._schedule_refresh(user, jso['response']['reset_in'])

                validuntil  = jso['response']['expire_date']
                trafficleft = float(jso['response']['traffic_left']) / 1024  #@TODO: Remove `/ 1024` in 0.4.10
                premium     = True
            else:
                self.log_error(jso['response_details'])

        except Exception, e:
            self.log_error(e, trace=True)
예제 #15
0
    def get_account_status(self, user, password):
        #: Using the rpnet API, check if valid premium account
        res = self.load("https://premium.rpnet.biz/client_api.php",
                            get={'username': user, 'password': password,
                                 'action': "showAccountInformation"})
        self.log_debug("JSON data: %s" % res)

        return json.loads(res)
 def api_response(self, **kwargs):
     """
     Dispatch a call to the api, see megacrypter.com/api_doc
     """
     self.log_debug("JSON request: " + json.dumps(kwargs))
     res = self.load(self.API_URL, post=json.dumps(kwargs))
     self.log_debug("API Response: " + res)
     return json.loads(res)
예제 #17
0
    def get_json_response(self, *args, **kwargs):
        res = self.load(*args, **kwargs)
        if not res.startswith('{'):
            self.retry()

        self.log_debug(res)

        return json.loads(res)
예제 #18
0
 def signin(self, user, password, data):
     jsonResponse = self.load(self.API_URL,
                              get={'action'  : 'connectUser',
                                   'login'   : user,
                                   'password': password})
     res = json.loads(jsonResponse)
     if res['response_code'] != "ok":
         self.fail_login()
예제 #19
0
    def signin(self, user, password, data):
        jsondata = self.load("https://api.over-load.me/account.php",
                             get={'user': user,
                                  'auth': password}).strip()

        data = json.loads(jsondata)

        if data['err'] == 1:
            self.fail_login()
예제 #20
0
    def handle_free(self):
        self.data = self.load(self.url)
        action = self.load(self.url, post={'checkDownload': "check"})
        action = json.loads(action)
        self.log_debug(action)

        if "fail" in action:
            if action['fail'] == "timeLimit":
                self.data = self.load(self.url, post={'checkDownload': "showError", 'errorType': "timeLimit"})

                self.do_long_wait(re.search(self.LONG_WAIT_PATTERN, self.data))

            elif action['fail'] == "parallelDownload":
                self.log_warning(_("Parallel download error, now waiting 60s"))
                self.retry(wait=60, msg=_("parallelDownload"))

            else:
                self.fail(_("Download check returned: %s") % action['fail'])

        elif "success" in action:
            if action['success'] == "showCaptcha":
                self.do_captcha()
                self.do_timmer()
            elif action['success'] == "showTimmer":
                self.do_timmer()

        else:
            self.error(_("Unknown server response"))

        #: Show download link
        res = self.load(self.url, post={'downloadLink': "show"})
        self.log_debug("Show downloadLink response: %s" % res)
        if "fail" in res:
            self.error(_("Couldn't retrieve download url"))

        #: This may either download our file or forward us to an error page
        self.download(self.url, post={'download': "normal"})
        self.log_debug(self.req.http.lastEffectiveURL)

        check = self.check_file({'expired': self.LINK_EXPIRED_PATTERN,
                                    'wait'   : re.compile(self.LONG_WAIT_PATTERN),
                                    'limit'  : self.DL_LIMIT_PATTERN})

        if check == "expired":
            self.log_debug("Download link was expired")
            self.retry()

        elif check == "wait":
            self.do_long_wait(self.last_check)

        elif check == "limit":
            self.log_warning(_("Download limited reached for today"))
            self.wait(seconds_to_midnight(), True)
            self.retry()

        self.thread.m.reconnecting.wait(3)  #: Ease issue with later downloads appearing to be in parallel
예제 #21
0
    def api_response(self, **kwargs):
        """
        Dispatch a call to the api, see https://mega.co.nz/#developers
        """
        #: Generate a session id, no idea where to obtain elsewhere
        uid = random.randint(10 << 9, 10 ** 10)

        res = self.load(self.API_URL, get={'id': uid}, post=json.dumps([kwargs]))
        self.log_debug("Api Response: " + res)
        return json.loads(res)
예제 #22
0
    def get_account_status(self, user, password):
        answer = self.load("http://www.free-way.bz/ajax/jd.php",  #@TODO: Revert to `https` in 0.4.10
                          get={'id': 4, 'user': user, 'pass': password})

        self.log_debug("Login: %s" % answer)

        if answer == "Invalid login":
            self.fail_login()

        return json.loads(answer)
예제 #23
0
    def decrypt_attr(self, data, key):
        k, iv, meta_mac = self.get_cipher_key(key)
        cbc             = AES.new(k, AES.MODE_CBC, "\0" * 16)
        attr            = decode(cbc.decrypt(self.b64_decode(data)))

        self.log_debug("Decrypted Attr: %s" % attr)
        if not attr.startswith("MEGA"):
            self.fail(_("Decryption failed"))

        #: Data is padded, 0-bytes must be stripped
        return json.loads(re.search(r'{.+?}', attr).group(0))
예제 #24
0
    def grab_hosters(self, user, password, data):
        reponse   = self.load("http://www.mega-debrid.eu/api.php", get={'action': "getHosters"})
        json_data = json.loads(reponse)

        if json_data['response_code'] == "ok":
            host_list = [element[0] for element in json_data['hosters']]
        else:
            self.log_error(_("Unable to retrieve hoster list"))
            host_list = []

        return host_list
예제 #25
0
    def signin(self, user, password, data):
        api = json.loads(self.load("https://fastix.ru/api_v2/",
                                   get={'sub'     : "get_apikey",
                                        'email'   : user,
                                        'password': password}))

        if 'error' in api:
            self.fail_login(api['error_txt'])

        else:
            data['apikey'] = api['apikey']
예제 #26
0
    def grab_info(self, user, password, data):
        html = self.load("http://app.fileserve.com/api/login/",
                         post={'username': user,
                               'password': password,
                               'submit': "Submit+Query"})
        res = json.loads(html)

        if res['type'] == "premium":
            validuntil = time.mktime(time.strptime(res['expireTime'], "%Y-%m-%d %H:%M:%S"))
            return {'trafficleft': res['traffic'], 'validuntil': validuntil}
        else:
            return {'premium': False, 'trafficleft': None, 'validuntil': None}
예제 #27
0
    def grab_info(self, user, password, data):
        jsonResponse = self.load(self.API_URL,
                                 get={'action'  : 'connectUser',
                                      'login'   : user,
                                      'password': password})
        res = json.loads(jsonResponse)

        if res['response_code'] == "ok":
            return {'premium': True, 'validuntil': float(res['vip_end']), 'status': True}
        else:
            self.log_error(res)
            return {'status': False, 'premium': False}
예제 #28
0
    def signin(self, user, password, data):
        data['usr'] = user
        data['pwd'] = hashlib.sha1(hashlib.md5(password).hexdigest()).hexdigest()

        try:
            response = json.loads(self.run_auth_query())

        except Exception:
            self.fail_login()

        if "errno" in response.keys():
            self.fail_login()
    def _get_links(self, id, page=1):
        gridFile = self.load("http://turbobit.net/downloadfolder/gridFile",
                             get={'rootId': id, 'rows': 200, 'page': page})
        grid = json.loads(gridFile)

        if grid['rows']:
            for i in grid['rows']:
                yield i['id']
            for id in self._get_links(id, page + 1):
                yield id
        else:
            return
예제 #30
0
    def handle_premium(self, pyfile):
        premium_key = self.account.get_login("password")

        json_data = [self.account.user, ["download/direct_links", {"pass": premium_key, "link": pyfile.url}]]
        api_rep = self.load("http://api.letitbit.net/json", post={"r": json.dumps(json_data)})
        self.log_debug("API Data: " + api_rep)
        api_rep = json.loads(api_rep)

        if api_rep["status"] == "FAIL":
            self.fail(api_rep["data"])

        self.link = api_rep["data"][0][0]
예제 #31
0
 def search(self, trakttitles):
     for title in trakttitles:
         tmdb_link = "https://api.themoviedb.org/3/search/movie?api_key=4e33dc1073b5ad87851d8a4f506dc096&query=" + urllib2.quote(
             title.encode('utf-8')) + "&language=de"
         #print tmdb_link
         json_data = self.load(
             "https://api.themoviedb.org/3/search/movie?api_key=4e33dc1073b5ad87851d8a4f506dc096&query="
             + urllib2.quote(title.encode('utf-8')) + "&language=de")
         json_data = json.loads(json_data)
         #r = getURL(tmdb_link)
         #config = r.json()
         if len(json_data["results"]) > 0:
             orig_tmdb_title = replaceUmlauts(
                 json_data["results"][0]["original_title"])
             german_tmdb_title = replaceUmlauts(
                 json_data["results"][0]["title"])
         else:
             continue
         searchLink_orig = "http://www.hd-area.org/?s=search&q=" + urllib2.quote(
             orig_tmdb_title.encode('utf-8'))
         searchLink_german = "http://www.hd-area.org/?s=search&q=" + urllib2.quote(
             german_tmdb_title.encode('utf-8'))
         page_orig = getURL(searchLink_orig)
         page_german = getURL(searchLink_german)
         soup_orig = BeautifulSoup(page_orig)
         soup_german = BeautifulSoup(page_german)
         self.log_debug('Suche "%s" auf HDArea' % german_tmdb_title)
         for content_german in soup_german.findAll(
                 "div", {"class": "whitecontent contentheight"}):
             searchLinks_german = content_german.findAll("a")
             if len(searchLinks_german) > 0:
                 for link in searchLinks_german:
                     href = link["href"]
                     releaseName = link.get_text()
                     season = re.compile(
                         '.*S\d|\Sd{2}|eason\d|eason\d{2}.*')
                     if (self.get_config("quality") in releaseName
                         ) and not any(
                             word.lower() in releaseName.lower()
                             for word in self.get_config("rejectList").
                             split(";")) and not season.match(releaseName):
                         req_page = href.get_text()
                         soup_ = BeautifulSoup(req_page)
                         links = soup_.findAll("span",
                                               {"style": "display:inline;"})
                         for link in links:
                             url = link.a["href"]
                             for hoster in self.get_config("hoster").split(
                                     ";"):
                                 if hoster.lower() in link.text.lower():
                                     self.log_info('ADDED: "' + title +
                                                   '" Releasename: ' +
                                                   releaseName)
                                     self.pyload.api.addPackage(
                                         title, url.split('"'), 0)
                                     self.items_to_pyload.append(
                                         title + "  ||  Link: " + href)
                                     self.store(title, 'downloaded')
                         break
             else:
                 self.log_debug(
                     'keine Suchergebnisse mit deutschem Titel gefunden: "%s"'
                     % german_tmdb_title)
                 self.log_debug("suche mit englischem Titel: %s" %
                                orig_tmdb_title)
                 for content_orig in soup_orig.findAll(
                         "div", {"class": "whitecontent contentheight"}):
                     searchLinks_orig = content_orig.findAll("a")
                     for link in searchLinks_orig:
                         href = link["href"]
                         releaseName = link.get_text()
                         season = re.compile(
                             '.*S\d|\Sd{2}|eason\d|eason\d{2}.*')
                         if (self.get_config("quality")
                                 in releaseName) and not any(
                                     word.lower() in releaseName.lower()
                                     for word in self.get_config(
                                         "rejectList").split(";")
                                 ) and not season.match(releaseName):
                             req_page = href.get_text()
                             soup_ = BeautifulSoup(req_page)
                             links = soup_.findAll(
                                 "span", {"style": "display:inline;"})
                             for link in links:
                                 url = link.a["href"]
                                 for hoster in self.get_config(
                                         "hoster").split(";"):
                                     if hoster.lower() in link.text.lower():
                                         self.log_info('ADDED: "' + title +
                                                       '" Releasename: ' +
                                                       releaseName)
                                         self.pyload.api.addPackage(
                                             title, url.split('"'), 0)
                                         self.items_to_pyload.append(
                                             title + "  ||  Link: " + href)
                                         self.store(title, 'downloaded')
                             break