Example #1
0
    def notify(self,
               event,
               msg="",
               key=self.getConfig('apikey')):

        if not key:
            return

        if self.core.isClientConnected() and not self.getConfig('ignoreclient'):
            return

        elapsed_time = time.time() - self.last_notify

        if elapsed_time < self.getConf("sendtimewait"):
            return

        if elapsed_time > 60:
            self.notifications = 0

        elif self.notifications >= self.getConf("sendpermin"):
            return


        getURL("http://www.notifymyandroid.com/publicapi/notify",
               get={'apikey'     : key,
                    'application': "pyLoad",
                    'event'      : event,
                    'description': msg})

        self.last_notify    = time.time()
        self.notifications += 1
Example #2
0
 def periodical(self):
     self.items_to_queue = []
     self.items_to_collector = []
     for site in ("top-rls", "movies", "Old_Stuff"):
         address = "http://hd-area.org/index.php?s=" + site
         req_page = getURL(address)
         soup = BeautifulSoup(req_page)
         self.get_title(soup)
     if self.get_config("cinedubs") == True:
         address = "http://hd-area.org/index.php?s=Cinedubs"
         req_page = getURL(address)
         soup = BeautifulSoup(req_page)
         self.get_title(soup)
     if len(self.get_config("pushoverapi")) > 2:
         notifyPushover(self.get_config("pushoverapi"), self.items_to_queue, "QUEUE") if len(
             self.items_to_queue
         ) > 0 else True
         notifyPushover(self.get_config("pushoverapi"), self.items_to_collector, "COLLECTOR") if len(
             self.items_to_collector
         ) > 0 else True
     if len(self.get_config("pushbulletapi")) > 2:
         notifyPushbullet(self.get_config("pushbulletapi"), self.items_to_queue, "QUEUE") if len(
             self.items_to_queue
         ) > 0 else True
         notifyPushbullet(self.get_config("pushbulletapi"), self.items_to_collector, "COLLECTOR") if len(
             self.items_to_collector
         ) > 0 else True
Example #3
0
    def notify(self, event, msg="", key=None):

        key = key or self.key
        if not key:
            return

        if self.core.isClientConnected(
        ) and not self.getConfig('ignoreclient'):
            return

        elapsed_time = time.time() - self.last_notify

        if elapsed_time < self.getConfig("sendtimewait"):
            return

        if elapsed_time > 60:
            self.notifications = 0

        elif self.notifications >= self.getConfig("sendpermin"):
            return

        getURL("http://www.notifymyandroid.com/publicapi/notify",
               get={
                   'apikey': key,
                   'application': "pyLoad",
                   'event': event,
                   'description': msg
               })

        self.last_notify = time.time()
        self.notifications += 1

        return True
Example #4
0
 def periodical(self):
     self.items_to_queue = []
     self.items_to_collector = []
     for site in ('top-rls', 'movies', 'Old_Stuff'):
         address = ('http://hd-area.org/index.php?s=' + site)
         req_page = getURL(address)
         soup = BeautifulSoup(req_page)
         self.get_title(soup)
     if self.get_config("cinedubs") == True:
         address = ('http://hd-area.org/index.php?s=Cinedubs')
         req_page = getURL(address)
         soup = BeautifulSoup(req_page)
         self.get_title(soup)
     if len(self.get_config('pushoverapi')) > 2:
         notifyPushover(self.get_config("pushoverapi"), self.items_to_queue,
                        "QUEUE") if len(self.items_to_queue) > 0 else True
         notifyPushover(
             self.get_config("pushoverapi"), self.items_to_collector,
             "COLLECTOR") if len(self.items_to_collector) > 0 else True
     if len(self.get_config('pushbulletapi')) > 2:
         notifyPushbullet(self.get_config("pushbulletapi"),
                          self.items_to_queue,
                          "QUEUE") if len(self.items_to_queue) > 0 else True
         notifyPushbullet(
             self.get_config("pushbulletapi"), self.items_to_collector,
             "COLLECTOR") if len(self.items_to_collector) > 0 else True
Example #5
0
def getInfo(urls):
    for url in urls:
        h = getURL(url, just_header=True)
        m = re.search(r'Location: (.+)\r\n', h)
        if m and not re.match(m.group(1), FilefactoryCom.__pattern__):  #: It's a direct link! Skipping
            yield (url, 0, 3, url)
        else:  #: It's a standard html page
            yield parseFileInfo(FilefactoryCom, url, getURL(url))
Example #6
0
def getInfo(urls):
    for url in urls:
        header = getURL(url, just_header=True)
        if 'Location: http://cloudzer.net/404' in header:
            file_info = (url, 0, 1, url)
        else:
            file_info = parseFileInfo(CloudzerNet, url, getURL(url, decode=True))
        yield file_info
Example #7
0
def getInfo(urls):
    for url in urls:
        header = getURL(url, just_header=True)
        if 'Location: http://cloudzer.net/404' in header:
            file_info = (url, 0, 1, url)
        else:
            file_info = parseFileInfo(CloudzerNet, url, getURL(url,
                                                               decode=True))
        yield file_info
Example #8
0
    def processCaptcha(self, task):
        result = None

        with open(task.captchaFile, 'rb') as f:
            data = f.read()
        data = b64encode(data)
        self.logDebug("%s : %s" % (task.captchaFile, data))
        if task.isPositional():
            mouse = 1
        else:
            mouse = 0

        response = getURL(self.API_URL,
                          post={
                              "apikey": self.getConfig("passkey"),
                              "prio": self.getConfig("prio"),
                              "confirm": self.getConfig("confirm"),
                              "captchaperhour":
                              self.getConfig("captchaperhour"),
                              "maxtimeout": self.getConfig("timeout"),
                              "selfsolve": self.getConfig("selfsolve"),
                              "pyload": "1",
                              "source": "pyload",
                              "base64": "1",
                              "mouse": mouse,
                              "file-upload-01": data,
                              "action": "usercaptchaupload"
                          })

        if response.isdigit():
            self.logInfo(
                _("New CaptchaID from upload: %s : %s") %
                (response, task.captchaFile))

            for _ in xrange(1, 100, 1):
                response2 = getURL(self.API_URL,
                                   get={
                                       "apikey": self.getConfig("passkey"),
                                       "id": response,
                                       "pyload": "1",
                                       "source": "pyload",
                                       "action": "usercaptchacorrectdata"
                                   })

                if response2 != "":
                    break

                time.sleep(3)

            result = response2
            task.data["ticket"] = response
            self.logInfo("result %s : %s" % (response, result))
            task.setResult(result)
        else:
            self.logError("Bad upload: %s" % response)
            return False
Example #9
0
    def processCaptcha(self, task):
        result = None

        with open(task.captchaFile, "rb") as f:
            data = f.read()
        data = b64encode(data)
        self.logDebug("%s : %s" % (task.captchaFile, data))
        if task.isPositional():
            mouse = 1
        else:
            mouse = 0

        response = getURL(
            self.API_URL,
            post={
                "apikey": self.getConfig("passkey"),
                "prio": self.getConfig("prio"),
                "confirm": self.getConfig("confirm"),
                "captchaperhour": self.getConfig("captchaperhour"),
                "maxtimeout": self.getConfig("timeout"),
                "pyload": "1",
                "source": "pyload",
                "base64": "1",
                "mouse": mouse,
                "file-upload-01": data,
                "action": "usercaptchaupload",
            },
        )

        if response.isdigit():
            self.logInfo(_("NewCaptchaID from upload: %s : %s" % (response, task.captchaFile)))

            for i in range(1, 100, 1):
                response2 = getURL(
                    self.API_URL,
                    get={
                        "apikey": self.getConfig("passkey"),
                        "id": response,
                        "pyload": "1",
                        "source": "pyload",
                        "action": "usercaptchacorrectdata",
                    },
                )

                if response2 != "":
                    break

                time.sleep(3)

            result = response2
            task.data["ticket"] = response
            self.logInfo("result %s : %s" % (response, result))
            task.setResult(result)
        else:
            self.logError("Bad upload: %s" % response)
            return False
Example #10
0
def getInfo(urls):
    for url in urls:
        h = getURL(url, just_header=True)
        m = re.search(r'Location: (.+)\r\n', h)
        if m and not re.match(
                m.group(1),
                FilefactoryCom.__pattern__):  #: It's a direct link! Skipping
            yield (url, 0, 3, url)
        else:  #: It's a standard html page
            yield parseFileInfo(FilefactoryCom, url, getURL(url))
Example #11
0
def respond(ticket, value):
    conf = join(expanduser("~"), "ct.conf")
    f = open(conf, "rb")
    try:
        getURL("http://captchatrader.com/api/respond",
            post={"is_correct": value,
                  "username": f.readline().strip(),
                  "password": f.readline().strip(),
                  "ticket": ticket})
    except Exception, e :
        print "CT Exception:", e
        log(DEBUG, str(e))
Example #12
0
def getInfo(urls):
    for url in urls:
        header = getURL(url, just_header=True)
        if 'Location: http://cloudzer.net/404' in header:
            file_info = (url, 0, 1, url)
        else:
            if url.endswith('/'):
                api_data = getURL(url + 'status')
            else:
                api_data = getURL(url + '/status')
            name, size = api_data.splitlines()
            size = parseFileSize(size)
            file_info = (name, size, 2, url)
        yield file_info
Example #13
0
def getInfo(urls):
    for url in urls:
        header = getURL(url, just_header=True)
        if 'Location: http://cloudzer.net/404' in header:
            file_info = (url, 0, 1, url)
        else:
            if url.endswith('/'):
                api_data = getURL(url + 'status')
            else:
                api_data = getURL(url + '/status')
            name, size = api_data.splitlines()
            size = parseFileSize(size)
            file_info = (name, size, 2, url)
        yield file_info
Example #14
0
def respond(ticket, value):
    conf = join(expanduser("~"), "ct.conf")
    f = open(conf, "rb")
    try:
        getURL("http://captchatrader.com/api/respond",
               post={
                   "is_correct": value,
                   "username": f.readline().strip(),
                   "password": f.readline().strip(),
                   "ticket": ticket
               })
    except Exception, e:
        print "CT Exception:", e
        log(DEBUG, str(e))
Example #15
0
    def getInfo(cls, url="", html=""):
        info   = cls.apiInfo(url)
        online = True if info['status'] is 2 else False

        try:
            info['pattern'] = re.match(cls.__pattern__, url).groupdict()  #: pattern groups will be saved here

        except Exception:
            info['pattern'] = {}

        if not html and not online:
            if not url:
                info['error']  = "missing url"
                info['status'] = 1

            elif info['status'] is 3:
                try:
                    html = getURL(url, cookies=cls.COOKIES, decode=not cls.TEXT_ENCODING)

                    if isinstance(cls.TEXT_ENCODING, basestring):
                        html = unicode(html, cls.TEXT_ENCODING)

                except BadHeader, e:
                    info['error'] = "%d: %s" % (e.code, e.content)

                    if e.code is 404:
                        info['status'] = 1

                    elif e.code is 503:
                        info['status'] = 6

                except Exception:
                    pass
Example #16
0
    def getHoster(self):
        json_data = getURL('http://multi-debrid.com/api.php?hosts',
                           decode=True)
        self.logDebug('JSON data: ' + json_data)
        json_data = json_loads(json_data)

        return json_data['hosts']
Example #17
0
 def getHoster(self):
     page = getURL(
         "http://fastix.ru/api_v2/?apikey=5182964c3f8f9a7f0b00000a_kelmFB4n1IrnCDYuIFn2y&sub=allowed_sources"
     )
     host_list = json_loads(page)
     host_list = host_list['allow']
     return host_list
Example #18
0
    def getInfo(cls, url="", html=""):
        info   = cls.apiInfo(url)
        online = True if info['status'] is 2 else False

        try:
            info['pattern'] = re.match(cls.__pattern__, url).groupdict()  #: pattern groups will be saved here

        except Exception:
            info['pattern'] = {}

        if not html and not online:
            if not url:
                info['error']  = "missing url"
                info['status'] = 1

            elif info['status'] is 3:
                try:
                    html = getURL(url, cookies=cls.COOKIES, decode=not cls.TEXT_ENCODING)

                    if isinstance(cls.TEXT_ENCODING, basestring):
                        html = unicode(html, cls.TEXT_ENCODING)

                except BadHeader, e:
                    info['error'] = "%d: %s" % (e.code, e.content)

                    if e.code is 404:
                        info['status'] = 1

                    elif e.code is 503:
                        info['status'] = 6

                except Exception:
                    pass
Example #19
0
    def getRtUpdate(self):
        rtUpdate = self.getStorage("rtUpdate")
        if not rtUpdate:
            if (
                self.getStorage("version") != self.__version__
                or int(self.getStorage("timestamp", 0)) + 86400000 < timestamp()
            ):
                # that's right, we are even using jdownloader updates
                rtUpdate = getURL("http://update0.jdownloader.org/pluginstuff/tbupdate.js")
                rtUpdate = self.decrypt(rtUpdate.splitlines()[1])
                # but we still need to fix the syntax to work with other engines than rhino
                rtUpdate = re.sub(
                    r"for each\(var (\w+) in(\[[^\]]+\])\)\{",
                    r"zza=\2;for(var zzi=0;zzi<zza.length;zzi++){\1=zza[zzi];",
                    rtUpdate,
                )
                rtUpdate = re.sub(r"for\((\w+)=", r"for(var \1=", rtUpdate)

                self.logDebug("rtUpdate")
                self.setStorage("rtUpdate", rtUpdate)
                self.setStorage("timestamp", timestamp())
                self.setStorage("version", self.__version__)
            else:
                self.logError("Unable to download, wait for update...")
                self.tempOffline()

        return rtUpdate
Example #20
0
    def coreReady(self):
        page = getURL("http://linkdecrypter.com/")
        m = re.search(r'<b>Supported\(\d+\)</b>: <i>([^+<]*)', page)
        if not m:
            self.logError(_("Crypter list not found"))
            return

        builtin = [
            name.lower()
            for name in self.core.pluginManager.crypterPlugins.keys()
        ]
        builtin.extend(["downloadserienjunkiesorg"])

        crypter_pattern = re.compile("(\w[\w.-]+)")
        online = []
        for crypter in m.group(1).split(', '):
            m = re.match(crypter_pattern, crypter)
            if m and remove_chars(m.group(1), "-.") not in builtin:
                online.append(m.group(1).replace(".", "\\."))

        if not online:
            self.logError(_("Crypter list is empty"))
            return

        regexp = r"https?://([^.]+\.)*?(%s)/.*" % "|".join(online)

        dict = self.core.pluginManager.crypterPlugins[self.__name__]
        dict["pattern"] = regexp
        dict["re"] = re.compile(regexp)

        self.logDebug("REGEXP: " + regexp)
Example #21
0
def getInfo(urls):
    result = []
    
    for url in urls:
        
        html = getURL(url)
        if re.search(PutlockerCom.PATTERN_OFFLINE, html):
            result.append((url, 0, 1, url))
        else:
            name = re.search(PutlockerCom.PATTERN_FILENAME_1, html)
            if name is None:
                name = re.search(PutlockerCom.PATTERN_FILENAME_2, html)
            if name is None:
                result.append((url, 0, 1, url))
                continue
                
            name = name.group(1)
            
            # size = re.search(PutlockerCom.PATTERN_FILESIZE, html)
            # if size is None:
                # result.append((url, 0, 1, url))
                # continue
            
            # size = size.group(1)
            
            result.append((name, 0, 2, url))        
    yield result
Example #22
0
    def _captchaResponse(self, task, correct):
        type = "correct" if correct else "refund"

        if 'ticket' not in task.data:
            self.logDebug("No CaptchaID for %s request (task: %s)" % (type, task))
            return

        passkey = self.getConfig("passkey")

        for _i in xrange(3):
            res = getURL(self.API_URL,
                         get={'action' : "usercaptchacorrectback",
                              'apikey' : passkey,
                              'api_key': passkey,
                              'correct': "1" if correct else "2",
                              'pyload' : "1",
                              'source' : "pyload",
                              'id'     : task.data["ticket"]})

            self.logDebug("Request %s: %s" % (type, res))

            if res == "OK":
                break

            sleep(5)
        else:
            self.logDebug("Could not send %s request: %s" % (type, res))
Example #23
0
 def getCredits(self):
     response = getURL(self.GETCREDITS_URL,
                   post = {"key": self.getConfig("passkey")}
                   )
                                                                      
     data = dict([x.split(' ',1) for x in response.splitlines()])
     return int(data['Left'])
Example #24
0
    def check_for_new_or_removed_hosters(self, hosters):
        #get the old hosters
        old_hosters = hosters.keys()

        #load the current hosters from vipleech4u.com
        page = getURL('http://vipleech4u.com/hosts.php')
        current_hosters = self.HOSTER_PATTERN.findall(page)
        current_hosters = [x.lower() for x in current_hosters]

        #let's look for new hosters
        new_hosters = []

        for hoster in current_hosters:
            if not hoster in old_hosters:
                new_hosters.append(hoster)

        #let's look for removed hosters
        removed_hosters = []

        for hoster in old_hosters:
            if not hoster in current_hosters:
                removed_hosters.append(hoster)

        if new_hosters:
            self.logDebug(
                'The following new hosters were found on vipleech4u.com: %s' %
                str(new_hosters))

        if removed_hosters:
            self.logDebug(
                'The following hosters were removed from vipleech4u.com: %s' %
                str(removed_hosters))

        if not (new_hosters and removed_hosters):
            self.logDebug('The hoster list is still valid.')
Example #25
0
    def loadPatterns(self):
        page = getURL("http://linkdecrypter.com/")
        m = re.search(r'<b>Supported\(\d+\)</b>: <i>([^+<]*)', page)
        if not m:
            self.logError(_("Crypter list not found"))
            return

        builtin = [name.lower() for name in self.core.pluginManager.crypterPlugins.keys()]
        builtin.extend(["downloadserienjunkiesorg"])

        crypter_pattern = re.compile("(\w[\w.-]+)")
        online = []
        for crypter in m.group(1).split(', '):
            m = re.match(crypter_pattern, crypter)
            if m and remove_chars(m.group(1), "-.") not in builtin:
                online.append(m.group(1).replace(".", "\\."))

        if not online:
            self.logError(_("Crypter list is empty"))
            return

        regexp = r"https?://([^.]+\.)*?(%s)/.*" % "|".join(online)

        dict = self.core.pluginManager.crypterPlugins[self.__name__]
        dict["pattern"] = regexp
        dict["re"] = re.compile(regexp)

        self.logDebug("REGEXP: " + regexp)
Example #26
0
    def getHoster(self):
        json_data = getURL('http://www.simply-premium.com/api/hosts.php?format=json&online=1')
        json_data = json_loads(json_data)

        host_list = [element['host'] for element in json_data['result']]

        return host_list
Example #27
0
def getInfo(urls):
    # DDLStorage API Documentation:
    # http://www.ddlstorage.com/cgi-bin/api_req.cgi?req_type=doc
    ids = dict()
    for url in urls:
        m = re.search(DdlstorageCom.__pattern__, url)
        ids[m.group('ID')] = url

    for chunk in chunks(ids.keys(), 5):
        api = getURL('http://www.ddlstorage.com/cgi-bin/api_req.cgi',
                     post={
                         'req_type':
                         'file_info_free',
                         'client_id':
                         53472,
                         'file_code':
                         ','.join(chunk),
                         'sign':
                         md5('file_info_free%d%s%s' %
                             (53472, ','.join(chunk),
                              '25JcpU2dPOKg8E2OEoRqMSRu068r0Cv3')).hexdigest()
                     })
        api = api.replace('<pre>', '').replace('</pre>', '')
        api = json_loads(api)

        result = list()
        for el in api:
            if el['status'] == 'online':
                result.append((el['file_name'], int(el['file_size']), 2,
                               ids[el['file_code']]))
            else:
                result.append(
                    (ids[el['file_code']], 0, 1, ids[el['file_code']]))
        yield result
Example #28
0
    def getHoster(self):
        json_data = getURL("http://unrestrict.li/api/jdownloader/hosts.php?format=json")
        json_data = json_loads(json_data)

        host_list = [element["host"] for element in json_data["result"]]

        return host_list
Example #29
0
    def getHoster(self):
        json_data = getURL('http://unrestrict.li/api/jdownloader/hosts.php?format=json')
        json_data = json_loads(json_data)

        host_list = [element['host'] for element in json_data['result']]

        return host_list
Example #30
0
    def periodical(self):
        html_parser = HTMLParser.HTMLParser()
        self.items_to_pyload = []
        address = "https://trakt.tv/users/%s/watchlist" % self.get_config("traktuser")
        page = getURL(address)
        soup = BeautifulSoup(page)
        trakttitles = []
        # Get Trakt Watchlist Titles
        for all in soup.findAll("div", {"class": "titles"}):
            for title in all.findAll("h3"):
                title = title.get_text()
                title = replaceUmlauts(html_parser.unescape(title))
                storage = self.retrieve(title)
                if storage == "downloaded":
                    self.log_debug(title + ": already found and downloaded")
                else:
                    trakttitles.append(title)
        self.search(trakttitles)

        # Pushnotification
        if len(self.get_config("pushoverapi")) > 2:
            notifyPushover(self.get_config("pushoverapi"), self.items_to_pyload) if len(
                self.items_to_pyload
            ) > 0 else True
        if len(self.get_config("pushbulletapi")) > 2:
            notifyPushbullet(self.get_config("pushbulletapi"), self.items_to_pyload) if len(
                self.items_to_pyload
            ) > 0 else True
def getInfo(urls):
    file_info = list()
    list_ids = dict()

    # Create a dict id:url. Will be used to retrieve original url
    for url in urls:
        m = re.search(FilefactoryCom.__pattern__, url)
        list_ids[m.group('id')] = url

    # WARN: There could be a limit of urls for request
    post_data = {'func': 'links', 'links': '\n'.join(urls)}
    rep = getURL('http://www.filefactory.com/tool/links.php', post=post_data, decode=True)

    # Online links
    for m in re.finditer(
            r'innerText">\s*<h1 class="name">(?P<N>.+) \((?P<S>[\w.]+) (?P<U>\w+)\)</h1>\s*<p>http://www.filefactory.com/file/(?P<ID>\w+).*</p>\s*<p class="hidden size">',
            rep):
        file_info.append((m.group('N'), parseFileSize(m.group('S'), m.group('U')), 2, list_ids[m.group('ID')]))

    # Offline links
    for m in re.finditer(
            r'innerText">\s*<h1>(http://www.filefactory.com/file/(?P<ID>\w+)/)</h1>\s*<p>\1</p>\s*<p class="errorResponse">Error: file not found</p>',
            rep):
        file_info.append((list_ids[m.group('ID')], 0, 1, list_ids[m.group('ID')]))

    return file_info
Example #32
0
def getInfo(urls):
    # DDLStorage API Documentation:
    # http://www.ddlstorage.com/cgi-bin/api_req.cgi?req_type=doc
    ids = dict()
    for url in urls:
        m = re.search(DdlstorageCom.__pattern__, url)
        ids[m.group('ID')] = url

    for chunk in chunks(ids.keys(), 5):
        api = getURL('http://www.ddlstorage.com/cgi-bin/api_req.cgi',
                     post={'req_type': 'file_info_free',
                           'client_id': 53472,
                           'file_code': ','.join(chunk),
                           'sign': md5('file_info_free%d%s%s' % (53472, ','.join(chunk),
                                                                 '25JcpU2dPOKg8E2OEoRqMSRu068r0Cv3')).hexdigest()})
        api = api.replace('<pre>', '').replace('</pre>', '')
        api = json_loads(api)

        result = list()
        for el in api:
            if el['status'] == 'online':
                result.append((el['file_name'], int(el['file_size']), 2, ids[el['file_code']]))
            else:
                result.append((ids[el['file_code']], 0, 1, ids[el['file_code']]))
        yield result
Example #33
0
    def apiInfo(cls, url):
        info = super(UploadedTo, cls).apiInfo(url)

        for _i in xrange(5):
            html = getURL("http://uploaded.net/api/filemultiple",
                          get={
                              "apikey": cls.API_KEY,
                              'id_0': re.match(cls.__pattern__,
                                               url).group('ID')
                          },
                          decode=True)

            if html != "can't find request":
                api = html.split(",", 4)
                if api[0] == "online":
                    info.update({
                        'name': api[4].strip(),
                        'size': api[2],
                        'status': 2
                    })
                else:
                    info['status'] = 1
                break
            else:
                time.sleep(3)

        return info
Example #34
0
    def download_api(self):

        # MU API request 
        fileId = self.pyfile.url.split("=")[-1] # Get file id from url
        apiFileId = "id0"
        post = {apiFileId: fileId}
        response = getURL(self.API_URL, post=post, decode = True)    
        self.log.debug("%s: API response [%s]" % (self.__name__, response))
        
        # Translate API response
        parts = [re.split(r"&(?!amp;|#\d+;)", x) for x in re.split(r"&?(?=id[\d]+=)", response)]
        apiHosterMap = dict([elem.split('=') for elem in parts[0]])
        apiFileDataMap = dict([elem.split('=') for elem in parts[1]])        
        self.api = _translateAPIFileInfo(apiFileId, apiFileDataMap, apiHosterMap)

        # File info
        try:
            self.pyfile.status = self.api['status']
            self.pyfile.name = self.api['name'] 
            self.pyfile.size = self.api['size']
        except KeyError:
            self.log.warn("%s: Cannot recover all file [%s] info from API response." % (self.__name__, fileId))
        
        # Fail if offline
        if self.pyfile.status == statusMap['offline']:
            self.offline()
Example #35
0
def getInfo(urls):
    ids = ""
    names = ""

    p = re.compile(RapidshareCom.__pattern__)

    for url in urls:
        r = p.search(url)
        if r.group("name"):
            ids += "," + r.group("id")
            names += "," + r.group("name")
        elif r.group("name_new"):
            ids += "," + r.group("id_new")
            names += "," + r.group("name_new")

    url = "http://api.rapidshare.com/cgi-bin/rsapi.cgi?sub=checkfiles&files=%s&filenames=%s" % (
        ids[1:], names[1:])

    api = getURL(url)
    result = []
    i = 0
    for res in api.split():
        tmp = res.split(",")
        if tmp[4] in ("0", "4", "5"):
            status = 1
        elif tmp[4] == "1":
            status = 2
        else:
            status = 3

        result.append((tmp[1], tmp[2], status, urls[i]))
        i += 1

    yield result
Example #36
0
def getInfo(urls):
    file_info = list()
    list_ids = dict()

    # Create a dict id:url. Will be used to retrieve original url
    for url in urls:
        m = re.search(FilefactoryCom.__pattern__, url)
        list_ids[m.group('id')] = url

    # WARN: There could be a limit of urls for request
    post_data = {'func': 'links', 'links': '\n'.join(urls)}
    rep = getURL('http://www.filefactory.com/tool/links.php',
                 post=post_data,
                 decode=True)

    # Online links
    for m in re.finditer(
            r'innerText">\s*<h1 class="name">(?P<N>.+) \((?P<S>[\w.]+) (?P<U>\w+)\)</h1>\s*<p>http://www.filefactory.com/file/(?P<ID>\w+).*</p>\s*<p class="hidden size">',
            rep):
        file_info.append(
            (m.group('N'), parseFileSize(m.group('S'), m.group('U')), 2,
             list_ids[m.group('ID')]))

    # Offline links
    for m in re.finditer(
            r'innerText">\s*<h1>(http://www.filefactory.com/file/(?P<ID>\w+)/)</h1>\s*<p>\1</p>\s*<p class="errorResponse">Error: file not found</p>',
            rep):
        file_info.append(
            (list_ids[m.group('ID')], 0, 1, list_ids[m.group('ID')]))

    return file_info
Example #37
0
def getInfo(urls):
    result = []  #: [ .. (name, size, status, url) .. ]
    regex = re.compile(DailymotionCom.__pattern__)
    apiurl = "https://api.dailymotion.com/video/"
    request = {"fields": "access_error,status,title"}
    for url in urls:
        id = regex.search(url).group("ID")
        page = getURL(apiurl + id, get=request)
        info = json_loads(page)

        if "title" in info:
            name = info["title"] + ".mp4"
        else:
            name = url

        if "error" in info or info["access_error"]:
            status = "offline"
        else:
            status = info["status"]
            if status in ("ready", "published"):
                status = "online"
            elif status in ("waiting", "processing"):
                status = "temp. offline"
            else:
                status = "offline"

        result.append((name, 0, statusMap[status], url))
    return result
Example #38
0
    def getHoster(self):
        # If no accounts are available there will be no hosters available
        if not self.account or not self.account.canUse():
            print "ReloadCc: No accounts available"
            return []

        # Get account data
        (user, data) = self.account.selectAccount()

        # Get supported hosters list from reload.cc using the json API v1
        query_params = dict(
            via='pyload',
            v=1,
            get_supported='true',
            get_traffic='true',
            user=user
        )

        try:
            query_params.update(dict(hash=self.account.infos[user]['pwdhash']))
        except Exception:
            query_params.update(dict(pwd=data['password']))

        answer = getURL("http://api.reload.cc/login", get=query_params)
        data = json_loads(answer)


        # If account is not valid thera are no hosters available
        if data['status'] != "ok":
            print "ReloadCc: Status is not ok: %s" % data['status']
            return []

        # Extract hosters from json file
        return data['msg']['supportedHosters']
Example #39
0
def getInfo(urls):
    api_url_base = "http://api.share-online.biz/linkcheck.php"

    for chunk in chunks(urls, 90):
        api_param_file = {
            "links":
            "\n".join(
                x.replace("http://www.share-online.biz/dl/", "").rstrip("/")
                for x in chunk)
        }  #api only supports old style links
        src = getURL(api_url_base, post=api_param_file)
        result = []
        for i, res in enumerate(src.split("\n")):
            if not res:
                continue
            fields = res.split(";")

            if fields[1] == "OK":
                status = 2
            elif fields[1] in ("DELETED", "NOT FOUND"):
                status = 1
            else:
                status = 3

            result.append((fields[2], int(fields[3]), status, chunk[i]))
        yield result
Example #40
0
def getInfo(urls):
    result = []
    regex = re.compile(DailymotionCom.__pattern__)
    apiurl = "https://api.dailymotion.com/video/%s"
    request = {"fields": "access_error,status,title"}

    for url in urls:
        id = regex.match(url).group('ID')
        html = getURL(apiurl % id, get=request)
        info = json_loads(html)

        name = info['title'] + ".mp4" if "title" in info else url

        if "error" in info or info['access_error']:
            status = "offline"
        else:
            status = info['status']
            if status in ("ready", "published"):
                status = "online"
            elif status in ("waiting", "processing"):
                status = "temp. offline"
            else:
                status = "offline"

        result.append((name, 0, statusMap[status], url))

    return result
Example #41
0
    def getHoster(self):
        # If no accounts are available there will be no hosters available
        if not self.account or not self.account.canUse():
            return []

        # Get account data
        (user, data) = self.account.selectAccount()

        # Get supported hosters list from premiumize.me using the json API v1 (see https://secure.premiumize.me/?show=api)
        answer = getURL(
            "https://api.premiumize.me/pm-api/v1.php?method=hosterlist&params[login]=%s&params[pass]=%s"
            % (user, data['password']))
        data = json_loads(answer)

        # If account is not valid thera are no hosters available
        if data['status'] != 200:
            return []

        # Extract hosters from json file
        hosters = set(data['result']['hosterlist'])

        # Read config to check if certain hosters should not be handled
        configMode = self.getConfig('hosterListMode')
        if configMode in ("listed", "unlisted"):
            configList = set(
                self.getConfig('hosterList').strip().lower().replace(
                    '|', ',').replace(';', ',').split(','))
            configList.discard(u'')
            if configMode == "listed":
                hosters &= configList
            else:
                hosters -= configList

        return list(hosters)
Example #42
0
 def getInfo(urls):
     for url in urls:
         cj = CookieJar(plugin.__name__)
         if isinstance(plugin.SH_COOKIES, list): set_cookies(cj, plugin.SH_COOKIES)
         file_info = parseFileInfo(plugin, url, getURL(replace_patterns(url, plugin.FILE_URL_REPLACEMENTS), \
             decode = not plugin.SH_BROKEN_ENCODING, cookies = cj))
         yield file_info
Example #43
0
def getInfo(urls):
    ids = ""
    names = ""

    p = re.compile(RapidshareCom.__pattern__)

    for url in urls:
        r = p.search(url)
        if r.group("name"):
            ids+= ","+r.group("id")
            names+= ","+r.group("name")
        elif r.group("name_new"):
            ids+= ","+r.group("id_new")
            names+= ","+r.group("name_new")
    
    url = "http://api.rapidshare.com/cgi-bin/rsapi.cgi?sub=checkfiles&files=%s&filenames=%s" % (ids[1:], names[1:])
    
    api = getURL(url)
    result = []
    i = 0
    for res in api.split():
        tmp = res.split(",")
        if tmp[4] in ("0", "4", "5"): status = 1
        elif tmp[4] == "1": status = 2
        else: status = 3
        
        result.append( (tmp[1], tmp[2], status, urls[i]) )
        i += 1
        
    yield result
Example #44
0
    def _captchaResponse(self, task, correct):
        type = "correct" if correct else "refund"

        if 'ticket' not in task.data:
            self.logDebug("No CaptchaID for %s request (task: %s)" %
                          (type, task))
            return

        passkey = self.getConfig('passkey')

        for _i in xrange(3):
            res = getURL(self.API_URL,
                         get={
                             'action': "usercaptchacorrectback",
                             'apikey': passkey,
                             'api_key': passkey,
                             'correct': "1" if correct else "2",
                             'pyload': "1",
                             'source': "pyload",
                             'id': task.data["ticket"]
                         })

            self.logDebug("Request %s: %s" % (type, res))

            if res == "OK":
                break

            time.sleep(5)
        else:
            self.logDebug("Could not send %s request: %s" % (type, res))
Example #45
0
def getAPIData(urls):
    post = {"apikey": key}

    idMap = {}

    for i, url in enumerate(urls):
        id = getID(url)
        post["id_%s" % i] = id
        idMap[id] = url

    for i in xrange(5):
        api = unicode(getURL("http://uploaded.net/api/filemultiple", post=post, decode=False), 'iso-8859-1')
        if api != "can't find request":
            break
        else:
            sleep(3)

    result = {}

    if api:
        for line in api.splitlines():
            data = line.split(",", 4)
            if data[1] in idMap:
                result[data[1]] = (data[0], data[2], data[4], data[3], idMap[data[1]])

    return result
Example #46
0
    def check_for_new_or_removed_hosters(self, hosters):
        #get the old hosters
        old_hosters = hosters.keys()

        #load the current hosters from vipleech4u.com
        page = getURL('http://vipleech4u.com/hosts.php')
        current_hosters = self.HOSTER_PATTERN.findall(page)
        current_hosters = [x.lower() for x in current_hosters]

        #let's look for new hosters
        new_hosters = []

        for hoster in current_hosters:
            if not hoster in old_hosters:
                new_hosters.append(hoster)

        #let's look for removed hosters
        removed_hosters = []

        for hoster in old_hosters:
            if not hoster in current_hosters:
                removed_hosters.append(hoster)

        if new_hosters:
            self.logDebug('The following new hosters were found on vipleech4u.com: %s' % str(new_hosters))

        if removed_hosters:
            self.logDebug('The following hosters were removed from vipleech4u.com: %s' % str(removed_hosters))

        if not (new_hosters and removed_hosters):
            self.logDebug('The hoster list is still valid.')
Example #47
0
def checkFile(plugin, urls):
    file_info = []
    url_dict = {}

    for url in urls:
        url_dict[re.search(plugin.__pattern__,
                           url).group('id')] = (url, 0, 0, url)
    url_ids = url_dict.keys()
    urls = map(lambda url_id: 'http://www.filefactory.com/file/' + url_id,
               url_ids)

    html = getURL("http://www.filefactory.com/tool/links.php",
                  post={
                      "func": "links",
                      "links": "\n".join(urls)
                  },
                  decode=True)

    for m in re.finditer(plugin.LC_INFO_PATTERN, html):
        if m.group('id') in url_ids:
            url_dict[m.group('id')] = (m.group('name'),
                                       parseFileSize(m.group('size')), 2,
                                       url_dict[m.group('id')][3])

    for m in re.finditer(plugin.LC_OFFLINE_PATTERN, html):
        if m.group('id') in url_ids:
            url_dict[m.group('id')] = (url_dict[m.group('id')][0], 0, 1,
                                       url_dict[m.group('id')][3])

    file_info = url_dict.values()

    return file_info
Example #48
0
    def getHoster(self):
        https = "https" if self.getConfig("https") else "http"
        page = getURL(https +
                      "://www.alldebrid.com/api.php?action=get_host").replace(
                          "\"", "").strip()

        return [x.strip() for x in page.split(",") if x.strip()]
Example #49
0
    def getRtUpdate(self):
        rtUpdate = self.getStorage("rtUpdate")
        if not rtUpdate:
            if self.getStorage("version") != self.__version__ or int(
                    self.getStorage("timestamp", 0)) + 86400000 < timestamp():
                # that's right, we are even using jdownloader updates
                rtUpdate = getURL(
                    "http://update0.jdownloader.org/pluginstuff/tbupdate.js")
                rtUpdate = self.decrypt(rtUpdate.splitlines()[1])
                # but we still need to fix the syntax to work with other engines than rhino
                rtUpdate = re.sub(
                    r'for each\(var (\w+) in(\[[^\]]+\])\)\{',
                    r'zza=\2;for(var zzi=0;zzi<zza.length;zzi++){\1=zza[zzi];',
                    rtUpdate)
                rtUpdate = re.sub(r"for\((\w+)=", r"for(var \1=", rtUpdate)

                self.logDebug("rtUpdate")
                self.setStorage("rtUpdate", rtUpdate)
                self.setStorage("timestamp", timestamp())
                self.setStorage("version", self.__version__)
            else:
                self.logError("Unable to download, wait for update...")
                self.tempOffline()

        return rtUpdate
Example #50
0
    def getHoster(self):
        # If no accounts are available there will be no hosters available
        if not self.account or not self.account.canUse():
            print "ReloadCc: No accounts available"
            return []

        # Get account data
        (user, data) = self.account.selectAccount()

        # Get supported hosters list from reload.cc using the json API v1
        query_params = dict(via='pyload',
                            v=1,
                            get_supported='true',
                            get_traffic='true',
                            user=user)

        try:
            query_params.update(dict(hash=self.account.infos[user]['pwdhash']))
        except Exception:
            query_params.update(dict(pwd=data['password']))

        answer = getURL("http://api.reload.cc/login", get=query_params)
        data = json_loads(answer)

        # If account is not valid thera are no hosters available
        if data['status'] != "ok":
            print "ReloadCc: Status is not ok: %s" % data['status']
            return []

        # Extract hosters from json file
        return data['msg']['supportedHosters']
Example #51
0
def getInfo(urls):
    for url in urls:
        html = getURL("http://www.fshare.vn/check_link.php",
                      post={'action': "check_link", 'arrlinks': url},
                      decode=True)

        yield parseFileInfo(FshareVn, url, html)
Example #52
0
def getInfo(urls):
    result = []  #: [ .. (name, size, status, url) .. ]
    regex = re.compile(DailymotionCom.__pattern__)
    apiurl = "https://api.dailymotion.com/video/"
    request = {"fields": "access_error,status,title"}
    for url in urls:
        id = regex.search(url).group("ID")
        page = getURL(apiurl + id, get=request)
        info = json_loads(page)

        if "title" in info:
            name = info["title"] + ".mp4"
        else:
            name = url

        if "error" in info or info["access_error"]:
            status = "offline"
        else:
            status = info["status"]
            if status in ("ready", "published"):
                status = "online"
            elif status in ("waiting", "processing"):
                status = "temp. offline"
            else:
                status = "offline"

        result.append((name, 0, statusMap[status], url))
    return result
Example #53
0
def getAPIData(urls):
    post = {"apikey": key}

    idMap = {}

    for i, url in enumerate(urls):
        id = getID(url)
        post["id_%s" % i] = id
        idMap[id] = url

    for _ in xrange(5):
        api = unicode(
            getURL("http://uploaded.net/api/filemultiple",
                   post=post,
                   decode=False), 'iso-8859-1')
        if api != "can't find request":
            break
        else:
            sleep(3)

    result = {}

    if api:
        for line in api.splitlines():
            data = line.split(",", 4)
            if data[1] in idMap:
                result[data[1]] = (data[0], data[2], data[4], data[3],
                                   idMap[data[1]])

    return result
Example #54
0
    def process(self, pyfile):
        self.prepare()

        if not re.match(self.__pattern__, self.pyfile.url):
            if self.premium:
                self.handleOverriden()
            else:
                self.fail("Only premium users can download from other hosters with %s" % self.HOSTER_NAME)
        else:
            try:
                # Due to a 0.4.9 core bug self.load would use cookies even if
                # cookies=False. Workaround using getURL to avoid cookies.
                # Can be reverted in 0.5 as the cookies bug has been fixed.
                self.html = getURL(pyfile.url, decode=True)
                self.file_info = self.getFileInfo()
            except PluginParseError:
                self.file_info = None

            self.location = self.getDirectDownloadLink()

            if not self.file_info:
                pyfile.name = html_unescape(
                    unquote(urlparse(self.location if self.location else pyfile.url).path.split("/")[-1])
                )

            if self.location:
                self.startDownload(self.location)
            elif self.premium:
                self.handlePremium()
            else:
                self.handleFree()
Example #55
0
 def getHoster(self):     
     # If no accounts are available there will be no hosters available
     if not self.account or not self.account.canUse():
         return []
     
     # Get account data
     (user, data) = self.account.selectAccount()
     
     # Get supported hosters list from premiumize.me using the json API v1 (see https://secure.premiumize.me/?show=api)
     answer = getURL("https://api.premiumize.me/pm-api/v1.php?method=hosterlist&params[login]=%s&params[pass]=%s" % (user, data['password']))
     data = json_loads(answer)
     
     
     # If account is not valid thera are no hosters available
     if data['status'] != 200:
         return []
     
     # Extract hosters from json file 
     hosters = set(data['result']['hosterlist'])
 
             
     # Read config to check if certain hosters should not be handled
     configMode = self.getConfig('hosterListMode')
     if configMode in ("listed", "unlisted"):
         configList = set(self.getConfig('hosterList').strip().lower().replace('|',',').replace(';',',').split(','))
         configList.discard(u'')
         if configMode == "listed":
             hosters &= configList
         else:
             hosters -= configList
     
     return list(hosters)      
    def process(self, pyfile):
        self.prepare()

        if not re.match(self.__pattern__, self.pyfile.url):
            if self.premium:
                self.handleOverriden()
            else:
                self.fail("Only premium users can download from other hosters with %s" % self.HOSTER_NAME)
        else:
            try:
                # Due to a 0.4.9 core bug self.load would use cookies even if
                # cookies=False. Workaround using getURL to avoid cookies.
                # Can be reverted in 0.5 as the cookies bug has been fixed.
                self.html = getURL(pyfile.url, decode=True)
                self.file_info = self.getFileInfo()
            except PluginParseError:
                self.file_info = None

            self.location = self.getDirectDownloadLink()

            if not self.file_info:
                pyfile.name = html_unescape(unquote(urlparse(
                    self.location if self.location else pyfile.url).path.split("/")[-1]))

            if self.location:
                self.startDownload(self.location)
            elif self.premium:
                self.handlePremium()
            else:
                self.handleFree()
Example #57
0
    def checkForUpdate(self):
        """checks if an update is available, return result"""
        try:
            if self.version == "None":  # No updated known
                version_check = getURL(self.URL,
                                       get={
                                           'v':
                                           self.core.api.getServerVersion()
                                       }).splitlines()
                self.version = version_check[0]

                # Still no updates, plugins will be checked
                if self.version == "None":
                    self.logInfo(_("No Updates for pyLoad"))
                    return version_check[1:]

            self.info["pyload"] = True
            self.logInfo(
                _("***  New pyLoad Version %s available  ***") % self.version)
            self.logInfo(
                _("***  Get it here: http://pyload.org/download  ***"))
        except:
            self.logWarning(_("Not able to connect server for updates"))

        return None  # Nothing will be done
Example #58
0
 def getInfo(urls):
     for url in urls:
         cj = CookieJar(plugin.__name__)
         if isinstance(plugin.SH_COOKIES, list): set_cookies(cj, plugin.SH_COOKIES)
         file_info = parseFileInfo(plugin, url, getURL(replace_patterns(url, plugin.FILE_URL_REPLACEMENTS), \
             decode = not plugin.SH_BROKEN_ENCODING, cookies = cj))
         yield file_info
Example #59
0
def getInfo(urls):
    result = []

    for url in urls:
        file_info = parseFileInfo(StahnuTo, url, getURL("http://stahnu.to/?file=" + re.search(StahnuTo.__pattern__, url).group(3), decode=True)) 
        result.append(file_info)
            
    yield result