def parseFileInfo(self, url='', html=''): info = {"name": url, "size": 0, "status": 3} if hasattr(self, "pyfile"): url = self.pyfile.url if hasattr(self, "req") and self.req.http.code == '404': info['status'] = 1 else: if not html and hasattr(self, "html"): html = self.html if isinstance(self.SH_BROKEN_ENCODING, (str, unicode)): html = unicode(html, self.SH_BROKEN_ENCODING) if hasattr(self, "html"): self.html = html if hasattr(self, "FILE_OFFLINE_PATTERN") and re.search( self.FILE_OFFLINE_PATTERN, html): # File offline info['status'] = 1 else: online = False try: info.update(re.match(self.__pattern__, url).groupdict()) except: pass for pattern in ("FILE_INFO_PATTERN", "FILE_NAME_PATTERN", "FILE_SIZE_PATTERN"): try: info.update( re.search(getattr(self, pattern), html).groupdict()) online = True except AttributeError: continue if online: # File online, return name and size info['status'] = 2 if 'N' in info: info['name'] = replace_patterns( info['N'], self.FILE_NAME_REPLACEMENTS) if 'S' in info: size = replace_patterns( info['S'] + info['U'] if 'U' in info else info['S'], self.FILE_SIZE_REPLACEMENTS) info['size'] = parseFileSize(size) elif isinstance(info['size'], (str, unicode)): if 'units' in info: info['size'] += info['units'] info['size'] = parseFileSize(info['size']) if hasattr(self, "file_info"): self.file_info = info return info['name'], info['size'], info['status'], url
def parseFileInfo(self, url='', html=''): info = {"name": url, "size": 0, "status": 3} if hasattr(self, "pyfile"): url = self.pyfile.url if hasattr(self, "req") and self.req.http.code == '404': info['status'] = 1 else: if not html and hasattr(self, "html"): html = self.html if isinstance(self.SH_BROKEN_ENCODING, (str, unicode)): html = unicode(html, self.SH_BROKEN_ENCODING) if hasattr(self, "html"): self.html = html if hasattr(self, "FILE_OFFLINE_PATTERN") and re.search(self.FILE_OFFLINE_PATTERN, html): # File offline info['status'] = 1 else: online = False try: info.update(re.match(self.__pattern__, url).groupdict()) except: pass for pattern in ("FILE_INFO_PATTERN", "FILE_NAME_PATTERN", "FILE_SIZE_PATTERN"): try: info.update(re.search(getattr(self, pattern), html).groupdict()) online = True except AttributeError: continue if online: # File online, return name and size info['status'] = 2 if 'N' in info: info['name'] = replace_patterns(info['N'], self.FILE_NAME_REPLACEMENTS) if 'S' in info: size = replace_patterns(info['S'] + info['U'] if 'U' in info else info['S'], self.FILE_SIZE_REPLACEMENTS) info['size'] = parseFileSize(size) elif isinstance(info['size'], (str, unicode)): if 'units' in info: info['size'] += info['units'] info['size'] = parseFileSize(info['size']) if hasattr(self, "file_info"): self.file_info = info return info['name'], info['size'], info['status'], url
def loadAccountInfo(self, user, req): html = req.load("http://www.stahnu.to/") found = re.search(r'>VIP: (\d+.*)<', html) trafficleft = parseFileSize(found.group(1)) * 1024 if found else 0 return {"premium": trafficleft > (512 * 1024), "trafficleft": trafficleft, "validuntil": -1}
def getInfo(urls): file_info = list() list_ids = dict() # Create a dict id:url. Will be used to retrieve original url for url in urls: m = re.search(FilefactoryCom.__pattern__, url) list_ids[m.group('id')] = url # WARN: There could be a limit of urls for request post_data = {'func': 'links', 'links': '\n'.join(urls)} rep = getURL('http://www.filefactory.com/tool/links.php', post=post_data, decode=True) # Online links for m in re.finditer( r'innerText">\s*<h1 class="name">(?P<N>.+) \((?P<S>[\w.]+) (?P<U>\w+)\)</h1>\s*<p>http://www.filefactory.com/file/(?P<ID>\w+).*</p>\s*<p class="hidden size">', rep): file_info.append((m.group('N'), parseFileSize(m.group('S'), m.group('U')), 2, list_ids[m.group('ID')])) # Offline links for m in re.finditer( r'innerText">\s*<h1>(http://www.filefactory.com/file/(?P<ID>\w+)/)</h1>\s*<p>\1</p>\s*<p class="errorResponse">Error: file not found</p>', rep): file_info.append((list_ids[m.group('ID')], 0, 1, list_ids[m.group('ID')])) return file_info
def loadAccountInfo(self, user, req): html = req.load("http://www.stahnu.to/") found = re.search(r'>VIP: (\d+.*)<', html) trafficleft = parseFileSize(found.group(1)) * 1024 if found else 0 return {"premium": trafficleft > (512 * 1024), "trafficleft": trafficleft, "validuntil": -1}
def process(self, pyfile): if re.match(self.__pattern__, pyfile.url): new_url = pyfile.url elif not self.account: self.logError( _("Please enter your %s account or deactivate this plugin") % "Real-debrid") self.fail("No Real-debrid account provided") else: self.logDebug("Old URL: %s" % pyfile.url) password = self.getPassword().splitlines() if not password: password = "" else: password = password[0] url = "https://real-debrid.com/ajax/unrestrict.php?lang=en&link=%s&password=%s&time=%s" % ( quote(pyfile.url, ""), password, int(time() * 1000)) page = self.load(url) data = json_loads(page) self.logDebug("Returned Data: %s" % data) if data["error"] != 0: if data["message"] == "Your file is unavailable on the hoster.": self.offline() else: self.logWarning(data["message"]) self.tempOffline() else: if self.pyfile.name is not None and self.pyfile.name.endswith( '.tmp') and data["file_name"]: self.pyfile.name = data["file_name"] self.pyfile.size = parseFileSize(data["file_size"]) new_url = data['generated_links'][0][-1] if self.getConfig("https"): new_url = new_url.replace("http://", "https://") else: new_url = new_url.replace("https://", "http://") if new_url != pyfile.url: self.logDebug("New URL: %s" % new_url) if pyfile.name.startswith("http") or pyfile.name.startswith( "Unknown") or pyfile.name.endswith('..'): #only use when name wasnt already set pyfile.name = self.getFilename(new_url) self.download(new_url, disposition=True) check = self.checkDownload({ "error": "<title>An error occured while processing your request</title>" }) if check == "error": #usual this download can safely be retried self.retry(reason="An error occured while generating link.", wait_time=60)
def loadAccountInfo(self, user, req): password = self.accounts[user]['password'] api_data = req.load('http://www.ddlstorage.com/cgi-bin/api_req.cgi', post={'req_type': 'user_info', 'client_id': 53472, 'user_login': user, 'user_password': md5(password).hexdigest(), 'sign': md5('user_info%d%s%s%s' % (53472, user, md5(password).hexdigest(), '25JcpU2dPOKg8E2OEoRqMSRu068r0Cv3')).hexdigest()}) api_data = api_data.replace('<pre>', '').replace('</pre>', '') self.logDebug('Account Info API data: ' + api_data) api_data = json_loads(api_data) if api_data['status'] != 'OK': # 'status' must be always OK for a working account return {"premium": False, "valid": False} if api_data['account_type'] == 'REGISTERED': premium = False validuntil = None else: premium = True validuntil = int(mktime(strptime(api_data['premium_expire'], "%Y-%m-%d %H:%M:%S"))) if api_data['usr_bandwidth_available'] == 'UNLIMITED': trafficleft = -1 else: trafficleft = parseFileSize(api_data['usr_bandwidth_available']) / 1024 return {"premium": premium, "validuntil": validuntil, "trafficleft": trafficleft}
def checkTrafficLeft(self): # check if user logged in found = re.search(self.USER_CREDIT_PATTERN, self.html) if not found: self.account.relogin(self.user) self.html = self.load(self.pyfile.url, cookies=True, decode=True) found = re.search(self.USER_CREDIT_PATTERN, self.html) if not found: return False # check user credit try: credit = parseFileSize( found.group(1).replace(' ', ''), found.group(2)) self.logInfo("Premium download for %i KiB of Credit" % (self.pyfile.size / 1024)) self.logInfo("User %s has %i KiB left" % (self.user, credit / 1024)) if credit < self.pyfile.size: self.logInfo("Not enough credit to download file %s" % self.pyfile.name) return False except Exception, e: # let's continue and see what happens... self.logError('Parse error (CREDIT): %s' % e)
def handlePremium(self, pyfile): data = json_loads( self.load("https://real-debrid.com/ajax/unrestrict.php", get={ 'lang': "en", 'link': pyfile.url, 'password': self.getPassword(), 'time': int(time.time() * 1000) })) self.logDebug("Returned Data: %s" % data) if data['error'] != 0: if data['message'] == "Your file is unavailable on the hoster.": self.offline() else: self.logWarning(data['message']) self.tempOffline() else: if pyfile.name and pyfile.name.endswith( '.tmp') and data['file_name']: pyfile.name = data['file_name'] pyfile.size = parseFileSize(data['file_size']) self.link = data['generated_links'][0][-1] if self.getConfig('ssl'): self.link = self.link.replace("http://", "https://") else: self.link = self.link.replace("https://", "http://")
def checkFile(plugin, urls): file_info = [] url_dict = {} for url in urls: url_dict[re.search(plugin.__pattern__, url).group('id')] = (url, 0, 0, url) url_ids = url_dict.keys() urls = map(lambda url_id: 'http://www.filefactory.com/file/' + url_id, url_ids) html = getURL("http://www.filefactory.com/tool/links.php", post={ "func": "links", "links": "\n".join(urls) }, decode=True) for m in re.finditer(plugin.LC_INFO_PATTERN, html): if m.group('id') in url_ids: url_dict[m.group('id')] = (m.group('name'), parseFileSize(m.group('size')), 2, url_dict[m.group('id')][3]) for m in re.finditer(plugin.LC_OFFLINE_PATTERN, html): if m.group('id') in url_ids: url_dict[m.group('id')] = (url_dict[m.group('id')][0], 0, 1, url_dict[m.group('id')][3]) file_info = url_dict.values() return file_info
def getInfo(urls): file_info = list() list_ids = dict() # Create a dict id:url. Will be used to retrieve original url for url in urls: m = re.search(FilefactoryCom.__pattern__, url) list_ids[m.group('id')] = url # WARN: There could be a limit of urls for request post_data = {'func': 'links', 'links': '\n'.join(urls)} rep = getURL('http://www.filefactory.com/tool/links.php', post=post_data, decode=True) # Online links for m in re.finditer( r'innerText">\s*<h1 class="name">(?P<N>.+) \((?P<S>[\w.]+) (?P<U>\w+)\)</h1>\s*<p>http://www.filefactory.com/file/(?P<ID>\w+).*</p>\s*<p class="hidden size">', rep): file_info.append( (m.group('N'), parseFileSize(m.group('S'), m.group('U')), 2, list_ids[m.group('ID')])) # Offline links for m in re.finditer( r'innerText">\s*<h1>(http://www.filefactory.com/file/(?P<ID>\w+)/)</h1>\s*<p>\1</p>\s*<p class="errorResponse">Error: file not found</p>', rep): file_info.append( (list_ids[m.group('ID')], 0, 1, list_ids[m.group('ID')])) return file_info
def process(self, pyfile): if not self.account: self.logError( _("Please enter your AllDebrid account or deactivate this plugin" )) self.fail("No AllDebrid account provided") self.log.debug("AllDebrid: Old URL: %s" % pyfile.url) if re.match(self.__pattern__, pyfile.url): new_url = pyfile.url else: password = self.getPassword().splitlines() if not password: password = "" else: password = password[0] url = "http://www.alldebrid.com/service.php?link=%s&json=true&pw=%s" % ( pyfile.url, password) page = self.load(url) data = json_loads(page) self.log.debug("Json data: %s" % str(data)) if data["error"]: if data["error"] == "This link isn't available on the hoster website.": self.offline() else: self.logWarning(data["error"]) self.tempOffline() else: if self.pyfile.name and not self.pyfile.name.endswith('.tmp'): self.pyfile.name = data["filename"] self.pyfile.size = parseFileSize(data["filesize"]) new_url = data["link"] if self.getConfig("https"): new_url = new_url.replace("http://", "https://") else: new_url = new_url.replace("https://", "http://") self.log.debug("AllDebrid: New URL: %s" % new_url) if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown"): #only use when name wasnt already set pyfile.name = self.getFilename(new_url) self.download(new_url, disposition=True) check = self.checkDownload({ "error": "<title>An error occured while processing your request</title>", "empty": re.compile(r"^$") }) if check == "error": self.retry(reason="An error occured while generating link.", wait_time=60) else: if check == "empty": self.retry(reason="Downloaded File was empty.", wait_time=60)
def process(self, pyfile): self.pyfile = pyfile self.pyfile.url = re.sub("(video|image|audio|flash)", "download", self.pyfile.url) self.html = self.load(pyfile.url) if "File Not Found" in self.html: self.offline() filenameMatch = re.search( "File Name:.*?<font color=\"#666666\".*?>(.*?)</font>", self.html, re.DOTALL) filesizeMatch = re.search( "File Size:.*?<font color=\"#666666\".*?>([^<]+)</font>", self.html, re.DOTALL) if not filenameMatch or not filesizeMatch: self.offline() filename = filenameMatch.group(1) filesize = filesizeMatch.group(1) if filename.strip() == "": self.offline() pyfile.name = filename pyfile.size = parseFileSize(filesize) if '<input name="download"' not in self.html: self.fail("No download form") self.html = self.load(pyfile.url, post={ "download": 1, "imageField.x": random.randrange(160), "imageField.y": random.randrange(60) }) dllinkMatch = re.search("var link_enc\\=new Array\\(\\'(.*?)\\'\\)", self.html) if dllinkMatch: dllink = re.sub("\\'\\,\\'", "", dllinkMatch.group(1)) else: self.fail("Plugin defect") self.setWait(51) self.wait() self.download(dllink) check = self.checkDownload({ "unav": "/images/download.gif", "404": "404 - Not Found" }) #print check if check == "unav": self.fail("Plugin defect") elif check == "404": self.offline()
def process(self, pyfile): if re.match(self.__pattern__, pyfile.url): new_url = pyfile.url elif not self.account: self.logError( _("Please enter your %s account or deactivate this plugin") % "Over-Load") self.fail("No Over-Load account provided") else: self.logDebug("Old URL: %s" % pyfile.url) data = self.account.getAccountData(self.user) page = self.load("https://api.over-load.me/getdownload.php", get={ "auth": data["password"], "link": pyfile.url }) data = json_loads(page) self.logDebug("Returned Data: %s" % data) if data["err"] == 1: self.logWarning(data["msg"]) self.tempOffline() else: if pyfile.name is not None and pyfile.name.endswith( '.tmp') and data["filename"]: pyfile.name = data["filename"] pyfile.size = parseFileSize(data["filesize"]) new_url = data["downloadlink"] if self.getConfig("https"): new_url = new_url.replace("http://", "https://") else: new_url = new_url.replace("https://", "http://") if new_url != pyfile.url: self.logDebug("New URL: %s" % new_url) if pyfile.name.startswith("http") or pyfile.name.startswith( "Unknown") or pyfile.name.endswith('..'): # only use when name wasn't already set pyfile.name = self.getFilename(new_url) self.download(new_url, disposition=True) check = self.checkDownload({ "error": "<title>An error occured while processing your request</title>" }) if check == "error": # usual this download can safely be retried self.retry(reason="An error occured while generating link.", wait_time=60)
def loadAccountInfo(self, user, req): html = req.load("http://www.fastshare.cz/user", decode=True) found = re.search(self.CREDIT_PATTERN, html) if found: trafficleft = parseFileSize(found.group(1)) / 1024 premium = True if trafficleft else False else: trafficleft = None premium = False return {"validuntil": -1, "trafficleft": trafficleft, "premium": premium}
def process(self, pyfile): if re.match(self.__pattern__, pyfile.url): new_url = pyfile.url elif not self.account: self.logError(_("Please enter your %s account or deactivate this plugin") % "Real-debrid") self.fail("No Real-debrid account provided") else: self.logDebug("Old URL: %s" % pyfile.url) password = self.getPassword().splitlines() if not password: password = "" else: password = password[0] url = "https://real-debrid.com/ajax/unrestrict.php?lang=en&link=%s&password=%s&time=%s" % ( quote(pyfile.url, ""), password, int(time() * 1000)) page = self.load(url) data = json_loads(page) self.logDebug("Returned Data: %s" % data) if data["error"] != 0: if data["message"] == "Your file is unavailable on the hoster.": self.offline() else: self.logWarning(data["message"]) self.tempOffline() else: if pyfile.name is not None and pyfile.name.endswith('.tmp') and data["file_name"]: pyfile.name = data["file_name"] pyfile.size = parseFileSize(data["file_size"]) new_url = data['generated_links'][0][-1] if self.getConfig("https"): new_url = new_url.replace("http://", "https://") else: new_url = new_url.replace("https://", "http://") if new_url != pyfile.url: self.logDebug("New URL: %s" % new_url) if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown") or pyfile.name.endswith('..'): #only use when name wasnt already set pyfile.name = self.getFilename(new_url) self.download(new_url, disposition=True) check = self.checkDownload( {"error": "<title>An error occured while processing your request</title>"}) if check == "error": #usual this download can safely be retried self.retry(wait_time=60, reason="An error occured while generating link.")
def loadAccountInfo(self, user, req): #self.relogin(user) html = req.load("http://www.multishare.cz/profil/", decode=True) found = re.search(self.TRAFFIC_LEFT_PATTERN, html) trafficleft = parseFileSize(found.group('S'), found.group('U')) / 1024 if found else 0 self.premium = True if trafficleft else False html = req.load("http://www.multishare.cz/", decode=True) mms_info = dict(re.findall(self.ACCOUNT_INFO_PATTERN, html)) return dict(mms_info, **{"validuntil": -1, "trafficleft": trafficleft})
def loadAccountInfo(self, user, req): html = req.load("http://www.quickshare.cz/premium", decode = True) found = re.search(r'Stav kreditu: <strong>(.+?)</strong>', html) if found: trafficleft = parseFileSize(found.group(1)) / 1024 premium = True if trafficleft else False else: trafficleft = None premium = False return {"validuntil": -1, "trafficleft": trafficleft, "premium": premium}
def loadAccountInfo(self, user, req): html = req.load("http://www.fastshare.cz/user", decode=True) found = re.search(r"(?:Kredit|Credit)\s*: </td><td>(.+?) ", html) if found: trafficleft = parseFileSize(found.group(1)) / 1024 premium = True if trafficleft else False else: trafficleft = None premium = False return {"validuntil": -1, "trafficleft": trafficleft, "premium": premium}
def loadAccountInfo(self, user, req): html = req.load("http://egofiles.com") if "You are logged as a Free User" in html: return {"premium": False, "validuntil": None, "trafficleft": None} m = re.search(self.PREMIUM_ACCOUNT_PATTERN, html) if m: validuntil = int(time.mktime(time.strptime(m.group("P"), "%Y-%m-%d %H:%M:%S"))) trafficleft = parseFileSize(m.group("T"), m.group("U")) / 1024 return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft} else: self.logError("Unable to retrieve account information - Plugin may be out of date")
def process(self, pyfile): if not self.account: self.logError(_("Please enter your AllDebrid account or deactivate this plugin")) self.fail("No AllDebrid account provided") self.log.debug("AllDebrid: Old URL: %s" % pyfile.url) if re.match(self.__pattern__, pyfile.url): new_url = pyfile.url else: password = self.getPassword().splitlines() if not password: password = "" else: password = password[0] url = "http://www.alldebrid.com/service.php?link=%s&json=true&pw=%s" %(pyfile.url, password) page = self.load(url) data = json_loads(page) self.log.debug("Json data: %s" % str(data)) if data["error"]: if data["error"] == "This link isn't available on the hoster website.": self.offline() else: self.logWarning(data["error"]) self.tempOffline() else: if self.pyfile.name and not self.pyfile.name.endswith('.tmp'): self.pyfile.name = data["filename"] self.pyfile.size = parseFileSize(data["filesize"]) new_url = data["link"] if self.getConfig("https"): new_url = new_url.replace("http://", "https://") else: new_url = new_url.replace("https://", "http://") self.log.debug("AllDebrid: New URL: %s" % new_url) if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown"): #only use when name wasnt already set pyfile.name = self.getFilename(new_url) self.download(new_url, disposition=True) check = self.checkDownload( {"error": "<title>An error occured while processing your request</title>","empty": re.compile(r"^$")}) if check == "error": self.retry(reason="An error occured while generating link.", wait_time=60) else: if check == "empty": self.retry(reason="Downloaded File was empty.", wait_time=60)
def loadAccountInfo(self, user, req): #self.relogin(user) html = req.load("http://www.multishare.cz/profil/", decode=True) found = re.search(self.TRAFFIC_LEFT_PATTERN, html) trafficleft = parseFileSize(found.group('S'), found.group('U')) / 1024 if found else 0 self.premium = True if trafficleft else False html = req.load("http://www.multishare.cz/", decode=True) mms_info = dict(re.findall(self.ACCOUNT_INFO_PATTERN, html)) return dict(mms_info, **{"validuntil": -1, "trafficleft": trafficleft})
def getInfo(urls): for url in urls: header = getURL(url, just_header=True) if 'Location: http://cloudzer.net/404' in header: file_info = (url, 0, 1, url) else: if url.endswith('/'): api_data = getURL(url + 'status') else: api_data = getURL(url + '/status') name, size = api_data.splitlines() size = parseFileSize(size) file_info = (name, size, 2, url) yield file_info
def getInfo(urls): for url in urls: header = getURL(url, just_header=True) if 'Location: http://cloudzer.net/404' in header: file_info = (url, 0, 1, url) else: if url.endswith('/'): api_data = getURL(url + 'status') else: api_data = getURL(url + '/status') name, size = api_data.splitlines() size = parseFileSize(size) file_info = (name, size, 2, url) yield file_info
def checkFile(plugin, urls): html = getURL(plugin.URLS[1], post={"urls": "\n".join(urls)}, decode=True) file_info = [] for li in re.finditer(plugin.LINKCHECK_TR, html, re.DOTALL): try: cols = re.findall(plugin.LINKCHECK_TD, li.group(1)) if cols: file_info.append( (cols[1] if cols[1] != '--' else cols[0], parseFileSize(cols[2]) if cols[2] != '--' else 0, 2 if cols[3].startswith('Available') else 1, cols[0])) except Exception, e: continue
def process(self, pyfile): self.pyfile = pyfile self.pyfile.url = re.sub("(video|image|audio|flash)","download",self.pyfile.url) self.html = self.load(pyfile.url) if "File Not Found" in self.html: self.offline() filenameMatch = re.search("File Name:.*?<font color=\"#666666\".*?>(.*?)</font>", self.html, re.DOTALL) filesizeMatch = re.search("File Size:.*?<font color=\"#666666\".*?>([^<]+)</font>", self.html, re.DOTALL) if not filenameMatch or not filesizeMatch: self.offline() filename = filenameMatch.group(1) filesize = filesizeMatch.group(1) if filename.strip() == "": self.offline() pyfile.name = filename pyfile.size = parseFileSize(filesize) if '<input name="download"' not in self.html: self.fail("No download form") self.html = self.load(pyfile.url, post={ "download": 1, "imageField.x": random.randrange(160), "imageField.y": random.randrange(60)}) dllinkMatch = re.search("var link_enc\\=new Array\\(\\'(.*?)\\'\\)", self.html) if dllinkMatch: dllink = re.sub("\\'\\,\\'", "", dllinkMatch.group(1)) else: self.fail("Plugin defect") self.setWait(51) self.wait() self.download(dllink) check = self.checkDownload({ "unav": "/images/download.gif", "404": "404 - Not Found" }) #print check if check == "unav": self.fail("Plugin defect") elif check == "404": self.offline()
def parseFileInfo(self, url='', html=''): if not html and hasattr(self, "html"): html = self.html name, size, status, found, fileid = url, 0, 3, None, None if re.search(self.FILE_OFFLINE_PATTERN, html): # File offline status = 1 else: found = re.search(self.FILE_INFO_PATTERN, html) if found: name, fileid = html_unescape(found.group('N')), found.group('ID') size = parseFileSize(found.group('S')) status = 2 return name, size, status, fileid
def checkFile(plugin, urls): html = getURL(plugin.URLS[1], post={"urls": "\n".join(urls)}, decode=True) file_info = [] for li in re.finditer(plugin.LINKCHECK_TR, html, re.DOTALL): try: cols = re.findall(plugin.LINKCHECK_TD, li.group(1)) if cols: file_info.append(( cols[1] if cols[1] != '--' else cols[0], parseFileSize(cols[2]) if cols[2] != '--' else 0, 2 if cols[3].startswith('Available') else 1, cols[0])) except Exception, e: continue
def parseFileInfo(self, url = '', html = ''): if not html and hasattr(self, "html"): html = self.html name, size, status, found, fileid = url, 0, 3, None, None if re.search(self.FILE_OFFLINE_PATTERN, html): # File offline status = 1 else: found = re.search(self.FILE_INFO_PATTERN, html) if found: name, fileid = html_unescape(found.group('N')), found.group('ID') size = parseFileSize(found.group('S')) status = 2 return name, size, status, fileid
def loadAccountInfo(self, user, req): html = req.load("http://www.fastshare.cz/user", decode=True) found = re.search(r'(?:Kredit|Credit)\s*: </td><td>(.+?) ', html) if found: trafficleft = parseFileSize(found.group(1)) / 1024 premium = True if trafficleft else False else: trafficleft = None premium = False return { "validuntil": -1, "trafficleft": trafficleft, "premium": premium }
def loadAccountInfo(self, user, req): html = req.load("http://www.fastshare.cz/user", decode=True) found = re.search(self.CREDIT_PATTERN, html) if found: trafficleft = parseFileSize(found.group(1)) / 1024 premium = True if trafficleft else False else: trafficleft = None premium = False return { "validuntil": -1, "trafficleft": trafficleft, "premium": premium }
def loadAccountInfo(self, user, req): html = req.load("http://www.quickshare.cz/premium", decode=True) found = re.search(r'Stav kreditu: <strong>(.+?)</strong>', html) if found: trafficleft = parseFileSize(found.group(1)) / 1024 premium = True if trafficleft else False else: trafficleft = None premium = False return { "validuntil": -1, "trafficleft": trafficleft, "premium": premium }
def loadAccountInfo(self, user, req): self.html = req.load("https://filer.net/profile") # Free user if re.search(self.FREE_PATTERN, self.html): return {"premium": False, "validuntil": None, "trafficleft": None} until = re.search(self.WALID_UNTIL_PATTERN, self.html) traffic = re.search(self.TRAFFIC_PATTERN, self.html) if until and traffic: validuntil = int(time.mktime(time.strptime(until.group(1), "%d.%m.%Y %H:%M:%S"))) trafficleft = parseFileSize(traffic.group(1)) / 1024 return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft} else: self.logError('Unable to retrieve account information - Plugin may be out of date') return {"premium": False, "validuntil": None, "trafficleft": None}
def loadAccountInfo(self, user, req): password = self.accounts[user]['password'] api_data = req.load( 'http://www.ddlstorage.com/cgi-bin/api_req.cgi', post={ 'req_type': 'user_info', 'client_id': 53472, 'user_login': user, 'user_password': md5(password).hexdigest(), 'sign': md5('user_info%d%s%s%s' % (53472, user, md5(password).hexdigest(), '25JcpU2dPOKg8E2OEoRqMSRu068r0Cv3')).hexdigest() }) api_data = api_data.replace('<pre>', '').replace('</pre>', '') self.logDebug('Account Info API data: ' + api_data) api_data = json_loads(api_data) if api_data[ 'status'] != 'OK': # 'status' must be always OK for a working account return {"premium": False, "valid": False} if api_data['account_type'] == 'REGISTERED': premium = False validuntil = None else: premium = True validuntil = int( mktime( strptime(api_data['premium_expire'], "%Y-%m-%d %H:%M:%S"))) if api_data['usr_bandwidth_available'] == 'UNLIMITED': trafficleft = -1 else: trafficleft = parseFileSize( api_data['usr_bandwidth_available']) / 1024 return { "premium": premium, "validuntil": validuntil, "trafficleft": trafficleft }
def loadAccountInfo(self, req): req.load("http://oron.com/?op=change_lang&lang=german") src = req.load("http://oron.com/?op=my_account").replace("\n", "") validuntil = re.search(r"<td>Premiumaccount läuft bis:</td>\s*<td>(.*?)</td>", src) if validuntil: validuntil = validuntil.group(1) validuntil = int(mktime(strptime(validuntil, "%d %B %Y"))) trafficleft = re.search(r'<td>Download Traffic verfügbar:</td>\s*<td>(.*?)</td>', src).group(1) self.logDebug("Oron left: " + formatSize(parseFileSize(trafficleft))) trafficleft = int(self.parseTraffic(trafficleft)) premium = True else: validuntil = -1 trafficleft = None premium = False tmp = {"validuntil": validuntil, "trafficleft": trafficleft, "premium" : premium} return tmp
def process(self, pyfile): if re.match(self.__pattern__, pyfile.url): new_url = pyfile.url elif not self.account: self.logError(_("Please enter your %s account or deactivate this plugin") % "Over-Load") self.fail("No Over-Load account provided") else: self.logDebug("Old URL: %s" % pyfile.url) data = self.account.getAccountData(self.user) page = self.load("https://api.over-load.me/getdownload.php", get={"auth": data["password"], "link": pyfile.url}) data = json_loads(page) self.logDebug("Returned Data: %s" % data) if data["err"] == 1: self.logWarning(data["msg"]) self.tempOffline() else: if pyfile.name is not None and pyfile.name.endswith('.tmp') and data["filename"]: pyfile.name = data["filename"] pyfile.size = parseFileSize(data["filesize"]) new_url = data["downloadlink"] if self.getConfig("https"): new_url = new_url.replace("http://", "https://") else: new_url = new_url.replace("https://", "http://") if new_url != pyfile.url: self.logDebug("New URL: %s" % new_url) if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown") or pyfile.name.endswith('..'): # only use when name wasn't already set pyfile.name = self.getFilename(new_url) self.download(new_url, disposition=True) check = self.checkDownload( {"error": "<title>An error occured while processing your request</title>"}) if check == "error": # usual this download can safely be retried self.retry(reason="An error occured while generating link.", wait_time=60)
def loadAccountInfo(self, user, req): html = req.load("http://egofiles.com") if 'You are logged as a Free User' in html: return {"premium": False, "validuntil": None, "trafficleft": None} m = re.search(self.PREMIUM_ACCOUNT_PATTERN, html) if m: validuntil = int( time.mktime(time.strptime(m.group('P'), "%Y-%m-%d %H:%M:%S"))) trafficleft = parseFileSize(m.group('T'), m.group('U')) / 1024 return { "premium": True, "validuntil": validuntil, "trafficleft": trafficleft } else: self.logError( 'Unable to retrieve account information - Plugin may be out of date' )
def process(self, pyfile): #self.load("http://oron.com/?op=change_lang&lang=german") # already logged in, so the above line shouldn't be necessary self.html = self.load(self.pyfile.url, ref=False, decode=True).encode("utf-8").replace("\n", "") if "File could not be found" in self.html or "Datei nicht gefunden" in self.html or \ "This file has been blocked for TOS violation." in self.html: self.offline() self.html = self.html.replace("\t", "") m = re.search(self.FILE_INFO_PATTERN, self.html) if m: pyfile.name = m.group(1) pyfile.size = parseFileSize(m.group(2), m.group(3)) self.logDebug("File Size: %s" % pyfile.formatSize()) else: self.logDebug("Name and/or size not found.") if self.account: self.handlePremium() else: self.handleFree()
def getInfo(urls): result = [] for url in urls: html = getURL(url).replace("\n", "") html = html.replace("\t", "") if "File could not be found" in html: result.append((url, 0, 1, url)) continue m = re.search(OronCom.FILE_INFO_PATTERN, html, re.MULTILINE) if m: name = m.group(1) size = parseFileSize(m.group(2), m.group(3)) else: name = url size = 0 result.append((name, size, 2, url)) yield result
def checkTrafficLeft(self): # check if user logged in found = re.search(self.USER_CREDIT_PATTERN, self.html) if not found: self.account.relogin(self.user) self.html = self.load(self.pyfile.url, cookies=True, decode=True) found = re.search(self.USER_CREDIT_PATTERN, self.html) if not found: return False # check user credit try: credit = parseFileSize(found.group(1).replace(' ',''), found.group(2)) self.logInfo("Premium download for %i KiB of Credit" % (self.pyfile.size / 1024)) self.logInfo("User %s has %i KiB left" % (self.user, credit / 1024)) if credit < self.pyfile.size: self.logInfo("Not enough credit to download file %s" % self.pyfile.name) return False except Exception, e: # let's continue and see what happens... self.logError('Parse error (CREDIT): %s' % e)
class WarserverCz(Account): __name__ = "WarserverCz" __version__ = "0.02" __type__ = "account" __description__ = """Warserver.cz account plugin""" __author_name__ = ("zoidberg") __author_mail__ = ("*****@*****.**") VALID_UNTIL_PATTERN = ur'<li>Neomezené stahování do: <strong>(.+?)<' TRAFFIC_LEFT_PATTERN = ur'<li>Kredit: <strong>(.+?)<' DOMAIN = "http://www.warserver.cz" def loadAccountInfo(self, user, req): html = req.load("%s/uzivatele/prehled" % self.DOMAIN, decode=True) validuntil = trafficleft = None premium = False found = re.search(self.VALID_UNTIL_PATTERN, html) if found: self.logDebug("VALID_UNTIL", found.group(1)) try: #validuntil = mktime(strptime(found.group(1), "%d %B %Y")) premium = True trafficleft = -1 except Exception, e: self.logError(e) found = re.search(self.TRAFFIC_LEFT_PATTERN, html) if found: self.logDebug("TRAFFIC_LEFT", found.group(1)) trafficleft = parseFileSize( (found.group(1).replace(" ", ""))) // 1024 premium = True if trafficleft > 1 << 18 else False return ({ "validuntil": validuntil, "trafficleft": trafficleft, "premium": premium })
def checkFile(plugin, urls): url_dict = {} for url in urls: url_dict[re.search(plugin.__pattern__, url).group('id')] = (url, 0, 0, url) url_ids = url_dict.keys() urls = map(lambda url_id: 'http://www.filefactory.com/file/' + url_id, url_ids) html = getURL("http://www.filefactory.com/tool/links.php", post = {"func": "links", "links": "\n".join(urls)}, decode=True) for m in re.finditer(plugin.LC_INFO_PATTERN, html): if m.group('id') in url_ids: url_dict[m.group('id')] = (m.group('name'), parseFileSize(m.group('size')), 2, url_dict[m.group('id')][3]) for m in re.finditer(plugin.LC_OFFLINE_PATTERN, html): if m.group('id') in url_ids: url_dict[m.group('id')] = (url_dict[m.group('id')][0], 0, 1, url_dict[m.group('id')][3]) file_info = url_dict.values() return file_info
def handlePremium(self, pyfile): https = "https" if self.getConfig('ssl') else "http" data = self.account.getAccountData(self.user) page = self.load(https + "://api.over-load.me/getdownload.php", get={'auth': data['password'], 'link': pyfile.url}) data = json_loads(page) self.logDebug(data) if data['error'] == 1: self.logWarning(data['msg']) self.tempOffline() else: if pyfile.name and pyfile.name.endswith('.tmp') and data['filename']: pyfile.name = data['filename'] pyfile.size = parseFileSize(data['filesize']) http_repl = ["http://", "https://"] self.link = data['downloadlink'].replace(*http_repl if self.getConfig('ssl') else *http_repl[::-1])
def handlePremium(self, pyfile): https = "https" if self.getConfig('ssl') else "http" data = self.account.getAccountData(self.user) page = self.load(https + "://api.over-load.me/getdownload.php", get={'auth': data['password'], 'link': pyfile.url}) data = json_loads(page) self.logDebug(data) if data['error'] == 1: self.logWarning(data['msg']) self.tempOffline() else: if pyfile.name is not None and pyfile.name.endswith('.tmp') and data['filename']: pyfile.name = data['filename'] pyfile.size = parseFileSize(data['filesize']) http_repl = ["http://", "https://"] self.link = data['downloadlink'].replace(*http_repl if self.getConfig('ssl') else *http_repl[::-1])
def getInfo(urls): result = [] for url in urls: html = getURL(url) if re.search(FileshareInUa.PATTERN_OFFLINE, html): result.append((url, 0, 1, url)) else: name = re.search(FileshareInUa.PATTERN_FILENAME, html) if name is None: result.append((url, 0, 1, url)) continue name = name.group(1) size = re.search(FileshareInUa.PATTERN_FILESIZE, html) size = parseFileSize(size.group(1)) result.append((name, size, 3, url)) yield result
def getInfo(urls): result = [] for url in urls: html = getURL(url) if re.search(FileshareInUa.PATTERN_OFFLINE, html): result.append((url, 0, 1, url)) else: name = re.search(FileshareInUa.PATTERN_FILENAME, html) if name is None: result.append((url, 0, 1, url)) continue name = name.group(1) size = re.search(FileshareInUa.PATTERN_FILESIZE, html) size = parseFileSize(size.group(1)) result.append((name, size, 3, url)) yield result
def handlePremium(self, pyfile): password = self.getPassword() data = json_loads(self.load("http://www.alldebrid.com/service.php", get={'link': pyfile.url, 'json': "true", 'pw': password})) self.logDebug("Json data", data) if data['error']: if data['error'] == "This link isn't available on the hoster website.": self.offline() else: self.logWarning(data['error']) self.tempOffline() else: if pyfile.name and not pyfile.name.endswith('.tmp'): pyfile.name = data['filename'] pyfile.size = parseFileSize(data['filesize']) self.link = data['link'] if self.getConfig('ssl'): self.link = self.link.replace("http://", "https://") else: self.link = self.link.replace("https://", "http://")
def loadAccountInfo(self, user, req): self.html = req.load("https://filer.net/profile") # Free user if re.search(self.FREE_PATTERN, self.html): return {"premium": False, "validuntil": None, "trafficleft": None} until = re.search(self.WALID_UNTIL_PATTERN, self.html) traffic = re.search(self.TRAFFIC_PATTERN, self.html) if until and traffic: validuntil = int( time.mktime(time.strptime(until.group(1), "%d.%m.%Y %H:%M:%S"))) trafficleft = parseFileSize(traffic.group(1)) / 1024 return { "premium": True, "validuntil": validuntil, "trafficleft": trafficleft } else: self.logError( 'Unable to retrieve account information - Plugin may be out of date' ) return {"premium": False, "validuntil": None, "trafficleft": None}
def handlePremium(self, pyfile): password = self.getPassword() data = json_loads(self.load("http://www.alldebrid.com/service.php", get={'link': pyfile.url, 'json': "true", 'pw': password})) self.logDebug("Json data", data) if data['error']: if data['error'] == "This link isn't available on the hoster website.": self.offline() else: self.logWarning(data['error']) self.tempOffline() else: if pyfile.name and not pyfile.name.endswith('.tmp'): pyfile.name = data['filename'] pyfile.size = parseFileSize(data['filesize']) self.link = data['link'] if self.getConfig('ssl'): self.link = self.link.replace("http://", "https://") else: self.link = self.link.replace("https://", "http://")
def parseTraffic(self, string): #returns kbyte return parseFileSize(string) / 1024
def parseTraffic(self, value, unit=None): #: return bytes if not unit and not isinstance(value, basestring): unit = "KB" return parseFileSize(value, unit)
continue else: online = True if online: info['status'] = 2 if 'N' in info['pattern']: info['name'] = replace_patterns(urllib.unquote(info['pattern']['N'].strip()), cls.NAME_REPLACEMENTS) if 'S' in info['pattern']: size = replace_patterns(info['pattern']['S'] + info['pattern']['U'] if 'U' in info['pattern'] else info['pattern']['S'], cls.SIZE_REPLACEMENTS) info['size'] = parseFileSize(size) elif isinstance(info['size'], basestring): unit = info['units'] if 'units' in info else None info['size'] = parseFileSize(info['size'], unit) if 'H' in info['pattern']: hashtype = info['pattern']['T'] if 'T' in info['pattern'] else "hash" info[hashtype] = info['pattern']['H'] if not info['pattern']: info.pop('pattern', None) return info
if found: premium = True trafficleft = -1 try: self.logDebug(found.group(1)) validuntil = mktime(strptime(found.group(1), "%d %B %Y")) except Exception, e: self.logError(e) else: found = re.search(self.TRAFFIC_LEFT_PATTERN, html) if found: trafficleft = found.group(1) if "Unlimited" in trafficleft: premium = True else: trafficleft = parseFileSize(trafficleft) / 1024 return ({ "validuntil": validuntil, "trafficleft": trafficleft, "premium": premium }) def login(self, user, data, req): html = req.load('%slogin.html' % self.MAIN_PAGE, decode=True) action, inputs = parseHtmlForm('name="FL"', html) if not inputs: inputs = {"op": "login", "redirect": self.MAIN_PAGE} inputs.update({"login": user, "password": data['password']})
if found: premium = True trafficleft = -1 try: self.logDebug(found.group(1)) validuntil = mktime(strptime(found.group(1), "%d %B %Y")) except Exception, e: self.logError(e) else: found = re.search(self.TRAFFIC_LEFT_PATTERN, html) if found: trafficleft = found.group(1) if "Unlimited" in trafficleft: premium = True else: trafficleft = parseFileSize(trafficleft) / 1024 return {"validuntil": validuntil, "trafficleft": trafficleft, "premium": premium} def login(self, user, data, req): html = req.load('http://www.easybytez.com/login.html', decode=True) action, inputs = parseHtmlForm('name="FL"', html) inputs.update({"login": user, "password": data['password'], "redirect": "http://www.easybytez.com/"}) html = req.load(action, post=inputs, decode=True) if 'Incorrect Login or Password' in html or '>Error<' in html: self.wrongPassword()
def parseTraffic(self, string): #returns kbyte return parseFileSize(string) / 1024
continue else: online = True if online: info['status'] = 2 if 'N' in info['pattern']: info['name'] = replace_patterns(urllib.unquote(info['pattern']['N'].strip()), cls.NAME_REPLACEMENTS) if 'S' in info['pattern']: size = replace_patterns(info['pattern']['S'] + info['pattern']['U'] if 'U' in info['pattern'] else info['pattern']['S'], cls.SIZE_REPLACEMENTS) info['size'] = parseFileSize(size) elif isinstance(info['size'], basestring): unit = info['units'] if 'units' in info else None info['size'] = parseFileSize(info['size'], unit) if 'H' in info['pattern']: hashtype = info['pattern']['T'] if 'T' in info['pattern'] else "hash" info[hashtype] = info['pattern']['H'] if not info['pattern']: info.pop('pattern', None) return info