def _episode_worker(self, link): # First extract the anime and episodes ids results = re.search(r'/watch/([0-9]+)/.+?/([0-9]+)', link) id = results.group(1) episode_number = int(results.group(2)) if not self._should_process(episode_number): return self.logger.info('Processing episode {}'.format(episode_number)) # Then get the downlaod link src = self.session.post('http://moetube.net/rui.php', data={ 'id': id, 'ep': episode_number, 'chk': 1 }).text try: quality = get_quality(src) except: return self._add_source(episode_number, Source(src, quality)) self.logger.info('Done processing episode {}'.format(episode_number))
def unshorten(url, quality=None): '''Inspired from https://github.com/Zanzibar82/plugin.video.streamondemand/ blob/bf655c445e77be57ef4ece84f11b5899a41e0939/servers/openload.py.''' import js2py html = requests.get(url).text # Extract the obfuscated code from which we can deduce the url matches = re.findall(r'(゚ω゚ノ= /`m´)ノ ~┻━┻.+?)</script>', html) matches = [AADecoder(m).decode() for m in matches] logger.debug(matches) # Search the index (the page have two urls, we need to select the correct # one). js = re.search(r'window.+[\n\s]?.+= (.+?);', matches[0]).group(1) index = js2py.eval_js(js) logger.debug(index) # Now we get the valid url section = matches[index] function = re.search(r'window.+\[\d\]=(\(.+\));window', section).group(1) url = conv(function) logger.debug(matches) if quality is None: quality = get_quality(url) return [Source(url, quality)]
def unshorten(url, quality=None): html = requests.get(url).text src = re.search(r'downloadlink: "(.+?)",', html).group(1) if quality is None: logger.warning('[videonest] quality was not passed') quality = get_quality(src) return [Source(src, quality)]
def unshorten(url, quality=None): soup = BeautifulSoup(requests.get(url).text, 'html.parser') src = '' if quality is None: logger.warning('[{{name}}] quality was not passed') quality = get_quality(src) return [Source(src, quality)]
def unshorten(url, quality=None): soup = BeautifulSoup(requests.get(url).text, 'html.parser') link = soup.find('a', {'id': 'download-btn'}).get('href') logger.debug('[solidfiles] Found {}'.format(link)) if quality is None: quality = get_quality(link) return [Source(link, quality)]
def unshorten(url, quality=None): html = requests.get(url).text if 'File was deleted' in html: logger.warning('[mp4upload] File at {} was deleted'.format(url)) return [] src = re.search(r'"file": "(.+)"', html).group(1) if quality is None: logger.warning('[mp4upload] quality was not passed') quality = get_quality(src) return [Source(src, quality)]
def unshorten(url, quality=None): html = requests.get(url).text logger.debug(html) frame_html = str(base64.b64decode( re.search(r'atob\(\'(.+)\'\)', html).group(1) )) src = re.search(r'<source src="(.+?)" type="', frame_html).group(1) logger.debug('[stream.moe] found source {}'.format(src)) if quality is None: logger.warning('[stream.moe] quality was not passed') quality = get_quality(src) return [Source(src, quality)]
def unshorten(url, quality=None): source = requests.get(url).text # Quoted url to a page that redirects to the source quoted_url = re.search(r'_url = "(.+?)";', source).group(1) query_string = urllib.parse.urlparse( urllib.parse.unquote(quoted_url)).query # The script just redirect by decoding the passed base64 url encoded_url = urllib.parse.parse_qs(query_string)['url'][0] src = base64.b64decode(encoded_url).decode('utf-8') if quality is None: logger.warning('[playbb] quality was not passed') quality = get_quality(src) return [Source(src, quality)]
def unshorten(url, quality=None): import js2py html = requests.get(url).text javascript = '{}.split(\'|\')[1]'.format( re.search(r'\["fmt_stream_map"\,(".+?")\]', html).group(1)) logger.debug('Executing: {}'.format(javascript)) src = js2py.eval_js(javascript) logger.debug('[google drive] found source {}'.format(src)) if quality is None: logger.warning('[google drive] quality was not passed') quality = get_quality(src) return [Source(src, quality)]
def unshorten(url, quality=None): soup = BeautifulSoup(requests.get(url).text, 'html.parser') form = soup.find('form', {'name': 'F1'}) payload = {} fields = ['op', 'id', 'rand', 'referer', 'method_free', 'method_premium'] for input in form.select('input'): if input.get('name') not in fields: continue payload[input.get('name')] = input.get('value') logger.debug('[tufiles] {}'.format(payload)) src = requests.post(url, data=payload, stream=True).url if quality is None: logger.warning('[tusfiles] quality was not passed') quality = get_quality(src) return [Source(src, quality)]
def unshorten(url, quality=None): if not url.startswith('https'): url = 'https' + url[4:] def get_payload(source, selector, fields): soup = BeautifulSoup(source, 'html.parser') form = soup.select(selector) payload = {} for input in soup.find_all('input'): if input.get('name') not in fields: continue payload[input.get('name')] = input.get('value') return payload download_1_payload = get_payload( requests.get(url).text, 'form', ['op', 'usr_login', 'id', 'fname', 'referer', 'method_free']) download_2_payload = get_payload( requests.post(url, data=download_1_payload).text, 'form[name="F1"]', ['op', 'usr_login', 'id', 'fname', 'referer', 'method_free']) soup = BeautifulSoup( requests.post(url, data=download_2_payload).text, 'html.parser') src = soup.select('.text-center a')[0].get('href') if quality is None: logger.warning('[upload.af] quality was not passed') quality = get_quality(src) return [Source(src, quality)]
def unshorten(url, quality=None): id = re.search(r'https?://bakavideo.tv/embed/(.+)', url).group(1) data = requests.get( 'https://bakavideo.tv/get/files.embed?f={}'.format(id)).json() html = base64.b64decode(data['content']).decode('utf-8').replace( '\n', '').replace('\t', '') soup = BeautifulSoup(html, 'html.parser') source_div = src = soup.find('source') if not source_div: return None src = source_div.get('src') logger.debug('[bakavideo] found source {}'.format(src)) if quality is None: logger.warning('[bakavideo] quality was not passed') quality = get_quality(src) return [Source(src, quality)]