Example #1
0
 def __call__(self, environ, start_response):
     method = environ.get("REQUEST_METHOD")
     path = environ.get("PATH_INFO", "").lstrip("/")
     if not path:
         return self.wsgi_redirect(start_response, "/tale/")
     if path.startswith("tale/"):
         if method in ("GET", "POST"):
             if method == "POST":
                 clength = int(environ["CONTENT_LENGTH"])
                 if clength > 1e6:
                     raise ValueError("Maximum content length exceeded")
                 inputstream = environ["wsgi.input"]
                 qs = inputstream.read(clength)
                 if sys.version_info >= (3, 0):
                     qs = qs.decode("utf-8")
             elif method == "GET":
                 qs = environ.get("QUERY_STRING", "")
             if sys.version_info < (3, 0):
                 parameters = singlyfy_parameters(parse_qs(qs))
                 for key, value in parameters.items():
                     parameters[key] = value.decode("UTF-8")
             else:
                 parameters = singlyfy_parameters(parse_qs(qs, encoding="UTF-8"))
             return self.wsgi_route(environ, path[5:], parameters, start_response)
         else:
             return self.wsgi_invalid_request(start_response)
     return self.wsgi_not_found(start_response)
def vk(url):
    try:
        try: oid, id = urlparse.parse_qs(urlparse.urlparse(url).query)['oid'][0] , urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0]
        except: oid, id = re.compile('\/video(.*)_(.*)').findall(url)[0]
        try: hash = urlparse.parse_qs(urlparse.urlparse(url).query)['hash'][0]
        except: hash = vk_hash(oid, id)

        u = 'http://api.vk.com/method/video.getEmbed?oid=%s&video_id=%s&embed_hash=%s' % (oid, id, hash)
 
        result = client.request(u)
        result = re.sub(r'[^\x00-\x7F]+',' ', result)

        try: result = json.loads(result)['response']
        except: result = vk_private(oid, id)

        url = []
        try: url += [{'quality': 'HD', 'url': result['url720']}]
        except: pass
        try: url += [{'quality': 'SD', 'url': result['url540']}]
        except: pass
        try: url += [{'quality': 'SD', 'url': result['url480']}]
        except: pass
        if not url == []: return url
        try: url += [{'quality': 'SD', 'url': result['url360']}]
        except: pass
        if not url == []: return url
        try: url += [{'quality': 'SD', 'url': result['url240']}]
        except: pass

        if not url == []: return url

    except:
        return
Example #3
0
def get_access_keys():
    
    #Request Token
    app_oauth = OAuth1(keys.CONSUMER_KEY, client_secret = keys.CONSUMER_SECRET)
    authorize_data = requests.post(url = REQUEST_TOKEN_URL, auth = app_oauth)
    authorize_credentials = parse_qs(authorize_data.content);
    authorize_key = authorize_credentials.get('oauth_token')[0]
    authorize_secret = authorize_credentials.get('oauth_token_secret')[0]

    #Authorize Token
    authorization_url = AUTHORIZE_BASE_URL + authorize_key
    print 'Authorize this application at: ' + authorization_url
    verify_pin = raw_input('Enter your verification pin: ')

    #Obtain Access Token
    access_token = OAuth1(keys.CONSUMER_KEY,
                          client_secret = keys.CONSUMER_SECRET,
                          resource_owner_key = authorize_key,
                          resource_owner_secret = authorize_secret,
                          verifier = verify_pin)
    access_data = requests.post(url = ACCESS_TOKEN_URL, auth = access_token)
    print access_data
    #Need to add error code handling
    if access_data.status_code == 200:
        access_credentials = parse_qs(access_data.content)
        access_token = access_credentials.get('oauth_token')[0]
        access_secret = access_credentials.get('oauth_token_secret')[0]
        print 'Add the following to plat/apikeys/twitter_keys.py:'
        print "ACCESS_KEY = '" + access_token + "'"
        print "ACCESS_SECRET = '" + access_secret + "'"
        exit()
    else:
        print 'Some sort of error'
        exit()
Example #4
0
    def test_url_sort_options(self):
        feed = 'http://www.my.jobs/jobs/feed/rss?date_sort=False'

        # Test to make sure sort by "Relevance" has '&date_sort=False' added
        # a single time
        feed_url = url_sort_options(feed, "Relevance")
        parsed = urlparse(feed_url)
        query = parse_qs(parsed.query)
        self.assertEquals(parsed.path, "/jobs/feed/rss")
        self.assertEquals(query['date_sort'], [u'False'])
        # If a frequency isn't specified, days_ago should be missing from
        # the url.
        self.assertNotIn('days_ago', query)

        # Test to make sure sort by "Date" doesn't have anything added
        feed_url = url_sort_options(feed, "Date")
        self.assertEquals(feed_url, "http://www.my.jobs/jobs/feed/rss")

        # Test to make sure that passing in a frequency does in fact
        # add the frequency to the feed url.
        feed_url = url_sort_options(feed, "Relevance", frequency='D')
        query = parse_qs(urlparse(feed_url).query)
        self.assertEquals(query['days_ago'][0], '1')
        feed_url = url_sort_options(feed, "Relevance", frequency='W')
        query = parse_qs(urlparse(feed_url).query)
        self.assertEquals(query['days_ago'][0], '7')
        feed_url = url_sort_options(feed, "Relevance", frequency='M')
        query = parse_qs(urlparse(feed_url).query)
        self.assertEqual(query['days_ago'][0], '30')
Example #5
0
 def setup_oauth(self):
     # Request token
     oauth = OAuth1(CONSUMER_KEY, client_secret=CONSUMER_SECRET)
     r = requests.post(url=REQUEST_TOKEN_URL, auth=oauth)
     credentials = parse_qs(r.content)
     
     resource_owner_key = credentials.get('oauth_token')[0]
     resource_owner_secret = credentials.get('oauth_token_secret')[0]
     
     # Authorize
     authorize_url = AUTHORIZE_URL + resource_owner_key
     print 'Please go here and authorize: ' + authorize_url
     
     verifier = raw_input('Please input the verifier: ')
     oauth = OAuth1(CONSUMER_KEY,
                    client_secret=CONSUMER_SECRET,
                    resource_owner_key=resource_owner_key,
                    resource_owner_secret=resource_owner_secret,
                    verifier=verifier)
     
     # Finally, Obtain the Access Token
     r = requests.post(url=ACCESS_TOKEN_URL, auth=oauth)
     credentials = parse_qs(r.content)
     token = credentials.get('oauth_token')[0]
     secret = credentials.get('oauth_token_secret')[0]
     
     return token, secret
Example #6
0
    def __init__(self, method, url, content=""):
        """
			method:		"POST" or "GET"
			url:		complete url
			content:	request contents(optional)

			* parameters both in content and url are given precent encoded.
			* but here I'd better decode them and store them in a dict
		"""
        self.method = method
        self.content = content
        result = urlparse.urlsplit(url)
        # parse base_url
        # 	e.g.: "https://xxxx/xxx/x" and
        # 	url(query included)
        # 	e.g.:"/xxx/x?a=1"
        self.base_url = "%s://%s%s" % (result.scheme, result.netloc, result.path)
        self.url = result.path
        if result.query != "":
            self.url += "?" + result.query
            # break params into dict
        self.params = urlparse.parse_qs(result.query)
        # parse content as well ...
        # note: result of urlparse.parse_qs seems decoded...
        self.params.update(urlparse.parse_qs(content))
        # iron value assuming there is only one possible value in value list
        self.params = dict((k, v[0]) for k, v in self.params.iteritems())
    def send_head(self):
        """Common code for GET and HEAD commands.

        Here be dragons

        """

        # src http://stackoverflow.com/a/5075477
        url = self.path
        parsed = urlparse.urlparse(url)

        query = parsed.query
        if query:
            # remove last character '/' in query
            if query[-1] == "/":
                query = query[:-1]
            try:
                data = "".join(urlparse.parse_qs(query)["data"])
                secret_try = "".join(urlparse.parse_qs(query)["secret"])
            except:
                self.send_error(400, "Bad Request")
                return None
            if secret_try != secret:
                self.send_error(401, "Unauthorized")
                return None

            # src http://stackoverflow.com/questions/13745648/running-bash-script-from-within-python
            subprocess.call(script_name + " " + data, shell=True)

        self.send_response(200)
Example #8
0
def gather_teams(years):
        """Gathers team names and ID numbers in the specified league"""
        for year in years:
		url = "http://games.espn.go.com/ffl/standings?leagueId=%s&seasonId=%s" % (args.league, year)
        	ourUrl = opener.open(url).read()
	        soup = BeautifulSoup(ourUrl)
		for num,division in enumerate(soup.findAll(bgcolor = '#ffffff', id = re.compile(r'\d'))):
	        	for i in division.findAll('tr', {'class' : 'evenRow bodyCopy sortableRow'}):
				title = i.find('td').text
				owner = string.capwords(title[title.find("(")+1:title.find(")")])
				pf = i.find('td', {'class': 'sortablePF'}).text
				pa = i.find('td', {'class': 'sortablePA'}).text
		                parsed = urlparse.urlparse(i.a['href']) #parse url parameters
        		        id = urlparse.parse_qs(parsed.query)['teamId'][0]
	                	name = i.a.text
		                teams.append(TeamID(name,owner,int(id),pf,pa,year,num+1))
			for i in division.findAll('tr', {'class' : 'oddRow bodyCopy sortableRow'}):
				title = i.find('td').text
		                owner = string.capwords(title[title.find("(")+1:title.find(")")])
        		        pf = i.find('td', {'class': 'sortablePF'}).text
                		pa = i.find('td', {'class': 'sortablePA'}).text
				parsed = urlparse.urlparse(i.a['href']) #parse url parameters
	        	        id = urlparse.parse_qs(parsed.query)['teamId'][0]
        	        	name = i.a.text
                		teams.append(TeamID(name,owner,int(id),pf,pa,year,num+1))
Example #9
0
def resolve(url):
    try:
        id  = urlparse.parse_qs(urlparse.urlparse(url).query)['c'][0] 
        try:
            referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        except:
            referer= url 
        url = 'http://castamp.com/embed.php?c=%s&vwidth=640&vheight=380'%id
        pageUrl=url

        
        result = client.request(url, referer=referer,headers = {'Host':'www.castamp.com'})
        result = urllib.unquote(result).replace('unescape(','').replace("'+'",'')
        rplcs = re.findall('=(.+?).replace\([\"\'](.+?)[\"\']\s*,\s*[\"\']([^\"\']*)[\"\']',result)
        result = re.sub('\/\*[^*]+\*\/','',result)
        var = re.compile('var\s(.+?)\s*=\s*[\'\"](.+?)[\'\"]').findall(result)
        var_dict = dict(var)
        file = re.compile('\'file\'\s*:\s*(.+?),').findall(result)[-1]
        file = var_dict[file]
        rtmp = re.compile('(rtmp://[^\"\']+)').findall(result)[0]
        for r in rplcs:
            file = file.replace(r[1],r[2])
        url = rtmp + ' playpath=' + file + ' swfUrl=http://p.castamp.com/cplayer.swf' + ' flashver=' + constants.flash_ver() + ' live=true timeout=15 swfVfy=1 pageUrl=' + pageUrl
        
        return url
    
    except:
        return
    def assertMetaEqual(self, meta1, meta2):
        meta1next = meta1.pop('next')
        meta2next = meta2.pop('next')
        meta1previous = meta1.pop('previous')
        meta2previous = meta2.pop('previous')

        meta1next_query = None
        meta2next_query = None
        meta1previous_query = None
        meta2previous_query = None

        self.assertEqual(meta1, meta2)

        if meta1next is not None:
            meta1next = urlparse.urlparse(meta1next)
            meta1next_query = urlparse.parse_qs(meta1next.query, strict_parsing=True)
        if meta2next is not None:
            meta2next = urlparse.urlparse(meta2next)
            meta2next_query = urlparse.parse_qs(meta2next.query, strict_parsing=True)
        if meta1previous is not None:
            meta1previous = urlparse.urlparse(meta1previous)
            meta1previous_query = urlparse.parse_qs(meta1previous.query, strict_parsing=True)
        if meta2previous is not None:
            meta2previous = urlparse.urlparse(meta2previous)
            meta2previous_query = urlparse.parse_qs(meta2previous.query, strict_parsing=True)

        self.assertEqual(getattr(meta1next, 'path', None), getattr(meta2next, 'path', None))
        self.assertEqual(getattr(meta1previous, 'path', None), getattr(meta2previous, 'path', None))

        self.assertEqual(meta1next_query, meta2next_query)
        self.assertEqual(meta1previous_query, meta2previous_query)
Example #11
0
    def movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote_plus(title)
            query = urlparse.urljoin(self.base_link, query)

            result = self.request(query, 'movie_table')
            result = client.parseDOM(result, 'div', attrs = {'class': 'movie_table'})

            title = cleantitle.get(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'img', ret='alt')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [i[0] for i in result if title == cleantitle.get(i[1])][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
            except: pass
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #12
0
def resolve(url):
    try:
        addonid = 'script.module.liveresolver'

        user, password = control.setting('ustvnow_email'), control.setting('ustvnow_pass')
        if (user == '' or password == ''):
            user, password = control.addon(addonid).getSetting('ustvnow_email'), control.addon(addonid).getSetting('ustvnow_pass')
        if (user == '' or password == ''): return ''

        token = urllib.urlencode({'username': user, 'password': password})
        token = 'http://m-api.ustvnow.com/gtv/1/live/login?%s&device=gtv&redir=0' % token
        token = json.loads(client.request(token))['token']

        result = 'http://m-api.ustvnow.com/gtv/1/live/playingnow?token=%s' % token
        result = json.loads(client.request(result))

        try:
            scode = urlparse.parse_qs(urlparse.urlparse(url).query)['scode'][0]
        except:
            stream_code = urlparse.parse_qs(urlparse.urlparse(url).query)['stream_code'][0]
            scode = [i['scode'] for i in result['results'] if i['stream_code'] == stream_code][0]

        key = (result['globalparams']['passkey']).replace('key=', '')

        url = 'http://m-api.ustvnow.com/stream/1/live/view?token=%s&key=%s&scode=%s' % (token, key, scode)
        url = json.loads(client.request(url))['stream']

        return url
    except:
        return ''
Example #13
0
    def test_expired_creating_keystone_token(self):
        CONF.oauth1.access_token_duration = -1
        consumer = self._create_single_consumer()
        consumer_id = consumer.get('id')
        consumer_secret = consumer.get('secret')
        self.consumer = oauth1.Consumer(consumer_id, consumer_secret)
        self.assertIsNotNone(self.consumer.key)

        url, headers = self._create_request_token(self.consumer,
                                                  self.project_id)
        content = self.post(url, headers=headers)
        credentials = urlparse.parse_qs(content.result)
        request_key = credentials.get('oauth_token')[0]
        request_secret = credentials.get('oauth_token_secret')[0]
        self.request_token = oauth1.Token(request_key, request_secret)
        self.assertIsNotNone(self.request_token.key)

        url = self._authorize_request_token(request_key)
        body = {'roles': [{'id': self.role_id}]}
        resp = self.put(url, body=body, expected_status=200)
        self.verifier = resp.result['token']['oauth_verifier']

        self.request_token.set_verifier(self.verifier)
        url, headers = self._create_access_token(self.consumer,
                                                 self.request_token)
        content = self.post(url, headers=headers)
        credentials = urlparse.parse_qs(content.result)
        access_key = credentials.get('oauth_token')[0]
        access_secret = credentials.get('oauth_token_secret')[0]
        self.access_token = oauth1.Token(access_key, access_secret)
        self.assertIsNotNone(self.access_token.key)

        url, headers, body = self._get_oauth_token(self.consumer,
                                                   self.access_token)
        self.post(url, headers=headers, body=body, expected_status=401)
Example #14
0
 def connect(self, mongo_uri):
     db_name, options = None, {}
     u = urlparse.urlparse(mongo_uri)
     if u.scheme == "file":
         path = u.path
         if "?" in u.path:
             path, self._options = u.path.split("?", 1)
             self._options = urlparse.parse_qs(self._options) if self._options else {}
         path = u.netloc + path
         self.db = _MongoImportFileSet(path)
         if "uniq" in self._options and "md5" in self._options["uniq"]:
             self._quick_uniq = _IdHashPairs(path)
     elif u.scheme == "mongodb":
         if "?" in u.path and u.query == "":
             # url didn't parse it properly u.path is '/dbname?options
             db_name, self._options = u.path.split("?", 1)
             self._options = urlparse.parse_qs(self._options) if self._options else {}
         else:
             db_name = u.path
         if db_name.startswith("/"):
             db_name = db_name[1:]
             # print 'Connecting to db %s on %s with options.' % (db_name, mongo_uri, options)
         mongo = pymongo.Connection(mongo_uri)
         self.db = mongo[db_name]
         if "uniq" in self._options and "md5" in self._options["uniq"]:
             self._quick_uniq = False
     else:
         raise ValueError("Invalid URI scheme: '%s'. Can only accept 'file' or 'mongodb'" % u.scheme)
Example #15
0
def resolve(url):
    try:
        try:
            cid  = urlparse.parse_qs(urlparse.urlparse(url).query)['cid'][0] 
        except:
            cid = re.compile('channel/(.+?)(?:/|$)').findall(url)[0]

        
        try:
            referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        except:
            referer='http://castalba.tv'
        
        url = 'http://castalba.tv/embed.php?cid=%s&wh=600&ht=380&r=%s'%(cid,urlparse.urlparse(referer).netloc)
        pageUrl=url

        result = client.request(url, referer=referer,mobile=True)
        result = decryptionUtils.doDemystify(result)
        result=urllib.unquote(result)
        var = re.compile('var\s(.+?)\s*=\s*[\'\"](.+?)[\'\"]').findall(result)
        var_dict = dict(var)

        if 'm3u8' in result:
            url = re.compile('(?:var\s*)?file.+?\s*=\s*(?:unescape\()[\'\"](.+?)[\'\"]').findall(result)[-1]
            url = 'http://' + url + '.m3u8'
            url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': url,'X-Requested-With':constants.get_shockwave()})
            log("Castalba: Found m3u8 url: " + url)
            
        else:
            try:
                filePath = re.compile("'file'\s*:\s*(?:unescape\()?'(.+?)'").findall(result)[0]
                
            except:
                file = re.findall('(?:var\s*)?file\s*=\s*(?:unescape\()?(?:\'|\")(.+?)(?:\'|\")',result)[-1]
                try:
                    file2 = re.findall("'file':\s*unescape\(file\)\s*\+\s*unescape\('(.+?)'\)",result)[0]
                    filePath = file+file2
                except:
                    filePath = file
            swf = re.compile("'flashplayer'\s*:\s*\"(.+?)\"").findall(result)[0]
            
            sm = re.findall("'streamer':(.+?),",result)[0]
            strm_funcs = re.findall('function\s*(.+?)\s*\{([^\}]+)',result,flags=re.DOTALL)
            for f in strm_funcs:
                if f[0] in ['(p,a,c,k,e,r)','()']:
                    continue
                if '%s'%f[0] in sm:
                    strm_func = f[1]
                    break
            strm_func = re.sub('\s//[^;]+','',strm_func)
            streamer = 'rtmp://' +  re.findall('.*["\'/](\d{1,3}\.\d{1,3}\.\d{1,3}\.[^"\'/]+)["\'/]',strm_func)[0] + '/live'
            streamer = streamer.replace('///','//')
            url = streamer  + ' playpath=' + filePath +' swfUrl=' + swf + ' flashver=' + constants.flash_ver() +' live=true timeout=15 swfVfy=true pageUrl=' + pageUrl
            log("Castalba: Found rtmp link: " + url)

        return url
    
    except:
        log("Castalba: Resolver failed. Returning...")
        return
Example #16
0
    def server_response(self_httplib, path, method, body, header):
      parsed_url = urlparse.urlparse(path.lstrip('/'))

      if method == 'GET':
        parsed_qs = urlparse.parse_qs(parsed_url.query)
      else:
        parsed_qs = urlparse.parse_qs(body)

      if parsed_url.path == 'getComputerInformation' and \
         'computer_id' in parsed_qs:
        slap_computer = slapos.slap.Computer(parsed_qs['computer_id'][0])
        slap_computer._software_release_list = []
        partition = slapos.slap.ComputerPartition(parsed_qs['computer_id'][0],
            '0')
        partition._need_modification = True
        sr = slapos.slap.SoftwareRelease()
        sr._software_release = 'http://sr/'
        partition._software_release_document = sr
        partition._requested_state = 'stopped'
        slap_computer._computer_partition_list = [partition]
        return (200, {}, xml_marshaller.xml_marshaller.dumps(slap_computer))
      if parsed_url.path == 'softwareInstanceError' and \
         method == 'POST' and 'computer_partition_id' in parsed_qs:
        self.error += 1
        self.assertEqual(parsed_qs['computer_partition_id'][0], '0')
        return (200, {}, '')
      else:
        return (404, {}, '')
Example #17
0
    def do_GET(self):
        self.send_response(200)
        self.send_header("Content-type", 'text/plain')
        self.end_headers()

        parsed_path = urlparse.urlparse(self.path)
        print self.path
        if '/requestAds.html' in parsed_path.path:
            res = urlparse.parse_qs(parsed_path.query)
            self.deal_req_ads(base64.decodestring(unquote(res['req'][0])))

        elif '/winnoticeAds.html' in parsed_path.path:
            res = urlparse.parse_qs(parsed_path.query)
            self.deal_win_notice(res)

        elif '/showmonUrl.html' in parsed_path.path:
            res = urlparse.parse_qs(parsed_path.query)
            self.deal_showmon_url(res)
        elif '/click_url' in parsed_path.path:
            res = urlparse.parse_qs(parsed_path.query)
            self.deal_click_url(res)
        elif '/requestAdViewAds.html' in parsed_path.path:
            res = urlparse.parse_qs(parsed_path.query)
            self.deal_req_adview(res)
	elif '/adview_ec.jsp' in parsed_path.path or '/adview_es.jsp' in parsed_path.path:
	    self.wfile.write(self.path)
	    logger.info('adview %s', self.path)
 def test_alias_custom_path(self):
     query_string = KISSmetrics.request.alias(key='foo', person='bar', identity='baz', path='get')
     assert urlparse(query_string).path == '/get'
     query_string = urlparse(query_string).query
     assert parse_qs(query_string)['_k'] == ['foo']
     assert parse_qs(query_string)['_p'] == ['bar']
     assert parse_qs(query_string)['_n'] == ['baz']
Example #19
0
    def server_response(self_httplib, path, method, body, header):
      parsed_url = urlparse.urlparse(path.lstrip('/'))

      if method == 'GET':
        parsed_qs = urlparse.parse_qs(parsed_url.query)
      else:
        parsed_qs = urlparse.parse_qs(body)

      if parsed_url.path == 'getComputerInformation' and \
         'computer_id' in parsed_qs:
        slap_computer = slapos.slap.Computer(parsed_qs['computer_id'][0])
        slap_computer._software_release_list = []
        partition = slapos.slap.ComputerPartition(parsed_qs['computer_id'][0],
            '0')
        partition._need_modification = True
        sr = slapos.slap.SoftwareRelease()
        sr._software_release = 'http://sr/'
        partition._software_release_document = sr
        partition._requested_state = 'stopped'
        slap_computer._computer_partition_list = [partition]
        return (200, {}, xml_marshaller.xml_marshaller.dumps(slap_computer))
      if parsed_url.path == 'softwareInstanceError' and \
         method == 'POST' and 'computer_partition_id' in parsed_qs:
        self.error = True
        self.assertEqual(parsed_qs['computer_partition_id'][0], '0')
        # XXX: Hardcoded dropPrivileges line ignore
        error_log = '\n'.join([line for line in parsed_qs['error_log'][0].splitlines()
                               if 'dropPrivileges' not in line])
        # end XXX
        self.assertEqual(error_log, 'The promise %r timed out' % 'timed_out_promise')
        return (200, {}, '')
      else:
        return (404, {}, '')
Example #20
0
def resolve(url):
    #try:

        referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        headers = { 'Referer': referer,
                                 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                                 'Content-Type' :'application/x-www-form-urlencoded',
                                 'Connection' : 'keep-alive',
                                 'Host' : 'www.zoomtv.me',
                                 'Origin' : urlparse.urlparse(referer).netloc,
                                 'User-Agent' : client.agent()
                                 }
        fid = urlparse.parse_qs(urlparse.urlparse(url).query)['v'][0]
        pid = urlparse.parse_qs(urlparse.urlparse(url).query)['pid'][0]
        url = 'http://www.zoomtv.me/embed.php?v=%s&vw=650&vh=450'%fid
        pageUrl = url
        
        
        #get desktop stream
        #headers.update({ 'User-Agent' : 'Apple-iPhone/701.341' })
        post_data = urllib.urlencode({'uagent':'Apple-iPhone/701.341', 'pid':pid})
        result = req(url,post_data,headers)
        log(result)
        
        rtmp = re.findall('.*[^\w](\w+)\s*=.{0,20}(rtmp[^\']*).*(?:streamer.{0,20}\1).*',result)[0]
        
    
        #for HQ links(no rtmp)
        if rtmp is None:
            return streamer + '|%s' % urllib.urlencode({'user-agent':client.agent(),'Referer':referer})

        url = rtmp + ' playpath=' + file + ' swfUrl=http://static.zoomtv.me/player/jwplayer.6.7.4.swf flashver=' +constants.flash_ver() + ' conn=S:' + file + ' conn=S:'+ts+' conn=S:'+sg+' conn=S:'+auth+' live=1 timeout=15 token=H69d331eccdf347b swfVfy=1 pageUrl=' + pageUrl

        return url
 def test_record_custom_path(self):
     query_string = KISSmetrics.request.record(key='foo', person='bar', event='fizzed', path='get')
     assert urlparse(query_string).path == '/get'
     query_string = urlparse(query_string).query
     assert parse_qs(query_string)['_k'] == ['foo']
     assert parse_qs(query_string)['_p'] == ['bar']
     assert parse_qs(query_string)['_n'] == ['fizzed']
    def get_movie(self, imdb, title, year):
        try:
            query = urlparse.urljoin(self.base_link, self.moviesearch_link + urllib.quote_plus(title))

            result = cloudflare.source(query)
            if result == None: result = client.source(self.__proxy() + urllib.quote_plus(query))

            r = client.parseDOM(result, 'li', attrs = {'class': 'first element.+?'})
            r += client.parseDOM(result, 'li', attrs = {'class': 'element.+?'})
            r += client.parseDOM(result, 'header', attrs = {'class': 'entry-header'})

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.compile('(.+? [(]\d{4}[)])').findall(i[1])) for i in result]
            result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
Example #23
0
def get_msg(hinfo, binding, response=False):
    if binding == BINDING_SOAP:
        msg = hinfo["data"]
    elif binding == BINDING_HTTP_POST:
        _inp = hinfo["data"][3]
        i = _inp.find(TAG1)
        i += len(TAG1) + 1
        j = _inp.find('"', i)
        msg = _inp[i:j]
    elif binding == BINDING_HTTP_ARTIFACT:
        # either by POST or by redirect
        if hinfo["data"]:
            _inp = hinfo["data"][3]
            i = _inp.find(TAG1)
            i += len(TAG1) + 1
            j = _inp.find('"', i)
            msg = _inp[i:j]
        else:
            parts = urlparse(hinfo["url"])
            msg = parse_qs(parts.query)["SAMLart"][0]
    else: # BINDING_HTTP_REDIRECT
        parts = urlparse(hinfo["headers"][0][1])
        msg = parse_qs(parts.query)["SAMLRequest"][0]

    return msg
Example #24
0
def resolve(url):
    try:
        referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]

        page = urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0]
        page = 'http://p2pcast.tv/stream.php?id=%s&live=0&p2p=0&stretching=uniform' % page

        result = client.request(page, referer=referer)


        try:
            swf = re.compile('src\s*=[\'|\"](.+?player.+?\.js)[\'|\"]').findall(result)[0]
            swf = client.request(swf)
            swf = re.compile('flashplayer\s*:\s*[\'|\"](.+?)[\'|\"]').findall(swf)[0]
        except:
            swf = 'http://cdn.p2pcast.tv/jwplayer.flash.swf'


        url = re.compile('url\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)[0]
        url = base64.b64decode(url)
        url = '%s|User-Agent=%s&Referer=%s' % (url, urllib.quote_plus(client.agent()), urllib.quote_plus(swf))

        return url
    except:
        return
Example #25
0
def do_facebook(url, environ, headers, options, cache):
    log('fb stuff')

    query = urlparse.urlparse(url).query

    if 'code' in query:
        # get real token from code
        code = urlparse.parse_qs(query)['code'][0]
        eurl = "https://graph.facebook.com/oauth/access_token?client_id={app_id}&redirect_uri={redirect_uri}&client_secret={app_secret}&code={code_parameter}".format(
            app_id=FBAPPID, app_secret=FBSECRET, code_parameter=code, redirect_uri="http://morss.it/:facebook/")
        token = urlparse.parse_qs(urllib2.urlopen(eurl).read().strip())['access_token'][0]

        # get long-lived access token
        eurl = "https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id={app_id}&client_secret={app_secret}&fb_exchange_token={short_lived_token}".format(
            app_id=FBAPPID, app_secret=FBSECRET, short_lived_token=token)
        values = urlparse.parse_qs(urllib2.urlopen(eurl).read().strip())

        ltoken = values['access_token'][0]
        expires = int(time.time() + int(values['expires'][0]))

        headers['set-cookie'] = 'token={token}; Path=/'.format(token=ltoken)

    # headers
    headers['status'] = '303 See Other'
    headers['location'] = 'http://{domain}/'.format(domain=environ['SERVER_NAME'])

    log('fb done')
    return
Example #26
0
def setup_oauth():
    """Authorize your app via identifier."""
    # Request token
    oauth = OAuth1(APP_KEY, client_secret=APP_SECRET)
    r = requests.post(url=REQUEST_TOKEN_URL, auth=oauth)
    credentials = parse_qs(r.content)

    resource_owner_key = credentials.get("oauth_token")[0]
    resource_owner_secret = credentials.get("oauth_token_secret")[0]

    # Authorize
    authorize_url = AUTHORIZE_URL + resource_owner_key
    print "Please go here and authorize: " + authorize_url

    verifier = raw_input("Please input the verifier: ")
    oauth = OAuth1(
        APP_KEY,
        client_secret=APP_SECRET,
        resource_owner_key=resource_owner_key,
        resource_owner_secret=resource_owner_secret,
        verifier=verifier,
    )

    # Finally, Obtain the Access Token
    r = requests.post(url=ACCESS_TOKEN_URL, auth=oauth)
    credentials = parse_qs(r.content)
    token = credentials.get("oauth_token")[0]
    secret = credentials.get("oauth_token_secret")[0]

    return token, secret
 def HandleSetInducedError(self, path):
    query = urlparse.urlparse(path)[4]
    self.account_lock.acquire()
    code = 200;
    response = 'Success'
    error = sync_pb2.ClientToServerResponse.Error()
    try:
      error_type = urlparse.parse_qs(query)['error']
      action = urlparse.parse_qs(query)['action']
      error.error_type = int(error_type[0])
      error.action = int(action[0])
      try:
        error.url = (urlparse.parse_qs(query)['url'])[0]
      except KeyError:
        error.url = ''
      try:
        error.error_description =(
        (urlparse.parse_qs(query)['error_description'])[0])
      except KeyError:
        error.error_description = ''
      self.account.SetInducedError(error)
      response = ('Error = %d, action = %d, url = %s, description = %s' %
                  (error.error_type, error.action,
                   error.url,
                   error.error_description))
    except error:
      response = 'Could not parse url'
      code = 400
    finally:
      self.account_lock.release()
    return (code, '<html><title>SetError: %d</title><H1>%d %s</H1></html>' %
               (code, code, response))
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            query = '%s "Season %s" "Episode %s"' % (url, season, episode)
            query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query))

            result = cloudflare.source(query)
            if result == None: result = client.source(self.__proxy() + urllib.quote_plus(query))

            r = client.parseDOM(result, 'li', attrs = {'class': 'first element.+?'})
            r += client.parseDOM(result, 'li', attrs = {'class': 'element.+?'})
            r += client.parseDOM(result, 'header', attrs = {'class': 'entry-header'})

            tvshowtitle = cleantitle.tv(url)

            result = [(client.parseDOM(i, 'a', ret='href'), re.compile('(.+?): Season (\d*).+?Episode (\d*)').findall(i)) for i in r]
            result = [(i[0][0], i[1][-1]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], i[1][0].split('>')[-1], i[1][1], i[1][2]) for i in result]
            result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])]
            result = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
    def _get_unit_lesson_from(self, data):
        """Extract unit and lesson id from exercise data submission."""

        # we need to figure out unit and lesson id for the exercise;
        # we currently have no direct way of doing it, so we have to do it
        # indirectly == ugly...; an exercise captures a page URL where it was
        # embedded; we can parse that URL out and find all the interesting
        # parts from the query string

        unit_id = 0
        lesson_id = 0
        json = transforms.loads(data)
        if json:
            location = json.get('location')
            if location:
                location = urllib2.unquote(location)
                params_map = urlparse.parse_qs(location)
                ity_ef_origin = params_map.get('ity_ef_origin')
                if ity_ef_origin:
                    ity_ef_origin = ity_ef_origin[0]
                    origin_path = urlparse.urlparse(ity_ef_origin)
                    if origin_path.query:
                        query = urlparse.parse_qs(origin_path.query)
                        unit_id = self._int_list_to_int(query.get('unit'))
                        lesson_id = self._int_list_to_int(query.get('lesson'))

                        # when we are on the first lesson of a unit, leson_id is
                        # not present :(; look it up
                        if not lesson_id:
                            lessons = self.get_course().get_lessons(unit_id)
                            if lessons:
                                lesson_id = lessons[0].lesson_id

        return unit_id, lesson_id
Example #30
0
def resolve(url):
    try:
        try:
            referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        except:
            referer=url


        id = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
        url = 'http://www.finecast.tv/embed4.php?u=%s&vw=640&vh=450'%id

        headers=[("User-Agent", client.agent()), ("Referer", referer)]
        cj = get_cj()

        result = unCaptcha.performCaptcha(url, cj, headers = headers)
        result = decryptionUtils.doDemystify(result)
        cj.save (cookieFile,ignore_discard=True)
        


        file = re.findall('[\'\"](.+?.stream)[\'\"]',result)[0]
        auth = re.findall('[\'\"](\?wmsAuthSign.+?)[\'\"]',result)[0]
        rtmp = 'http://play.finecast.tv:1935/live/%s/playlist.m3u8%s'%(file,auth)

        #url = rtmp +  ' playpath=' + file + ' swfUrl=http://www.finecast.tv/player6/jwplayer.flash.swf flashver=' + constants.flash_ver() + ' live=1 timeout=14 pageUrl=' + url
        return rtmp

        
    except:
        return
Example #31
0
def parse_URI(uri, on_pr=None):
    import twist
    from twist import COIN

    if ':' not in uri:
        if not twist.is_address(uri):
            raise BaseException("Not a twist address")
        return {'address': uri}

    u = urlparse.urlparse(uri)
    if u.scheme != 'twist':
        raise BaseException("Not a twist URI")
    address = u.path

    # python for android fails to parse query
    if address.find('?') > 0:
        address, query = u.path.split('?')
        pq = urlparse.parse_qs(query)
    else:
        pq = urlparse.parse_qs(u.query)

    for k, v in pq.items():
        if len(v) != 1:
            raise Exception('Duplicate Key', k)

    out = {k: v[0] for k, v in pq.items()}
    if address:
        if not twist.is_address(address):
            raise BaseException("Invalid twist address:" + address)
        out['address'] = address
    if 'amount' in out:
        am = out['amount']
        m = re.match('([0-9\.]+)X([0-9])', am)
        if m:
            k = int(m.group(2)) - 8
            amount = Decimal(m.group(1)) * pow(Decimal(10), k)
        else:
            amount = Decimal(am) * COIN
        out['amount'] = int(amount)
    if 'message' in out:
        out['message'] = out['message'].decode('utf8')
        out['memo'] = out['message']
    if 'time' in out:
        out['time'] = int(out['time'])
    if 'exp' in out:
        out['exp'] = int(out['exp'])
    if 'sig' in out:
        out['sig'] = twist.base_decode(out['sig'], None, base=58).encode('hex')

    r = out.get('r')
    sig = out.get('sig')
    name = out.get('name')
    if r or (name and sig):

        def get_payment_request_thread():
            import paymentrequest as pr
            if name and sig:
                s = pr.serialize_request(out).SerializeToString()
                request = pr.PaymentRequest(s)
            else:
                request = pr.get_payment_request(r)
            on_pr(request)

        t = threading.Thread(target=get_payment_request_thread)
        t.setDaemon(True)
        t.start()

    return out
Example #32
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None:
                raise Exception()

            if not (self.api and not self.api == ''):
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = int(
                data['year']
            ) if 'year' in data and not data['year'] == None else None
            season = int(
                data['season']
            ) if 'season' in data and not data['season'] == None else None
            episode = int(
                data['episode']
            ) if 'episode' in data and not data['episode'] == None else None
            query = '%s S%02dE%02d' % (
                title, season,
                episode) if 'tvshowtitle' in data else '%s %d' % (title, year)

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query += ' lang:%s' % self.language[0]
            query = urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, self.search_link)

            hostDict = hostprDict + hostDict

            iterations = self.streamLimit / self.streamIncrease
            last = self.streamLimit - (iterations * self.streamIncrease)
            if not last:
                iterations = iterations - 1
                last = self.streamIncrease
            iterations = iterations + 1

            seen_urls = set()
            for type in self.types:
                searchFrom = 0
                searchCount = self.streamIncrease
                for offset in range(iterations):
                    if iterations == offset + 1: searchCount = last
                    urlNew = url % (type, self.api, query, searchCount,
                                    searchFrom)
                    searchFrom = searchFrom + self.streamIncrease

                    results = client.request(urlNew)
                    results = json.loads(results)

                    apistatus = results['status']
                    if apistatus != 'success': break

                    results = results['result']

                    added = False
                    for result in results:
                        jsonName = result['title']
                        jsonSize = result['sizeinternal']
                        jsonExtension = result['extension']
                        jsonLanguage = result['lang']
                        jsonHoster = result['hostername'].lower()
                        jsonLink = result['hosterurls'][0]['url']

                        if jsonLink in seen_urls: continue
                        seen_urls.add(jsonLink)

                        if not hdlr in jsonName.upper(): continue

                        if not self.releaseValid(title, jsonName):
                            continue  # filter non en releases

                        if not jsonHoster in hostDict: continue

                        if jsonExtension == 'rar': continue

                        quality, info = source_utils.get_release_quality(
                            jsonName)
                        info.append(self.formatSize(jsonSize))
                        info.append(jsonName)
                        info = '|'.join(info)

                        sources.append({
                            'source': jsonHoster,
                            'quality': quality,
                            'language': jsonLanguage,
                            'url': jsonLink,
                            'info': info,
                            'direct': False,
                            'debridonly': False
                        })
                        added = True

                    if not added:
                        break

            return sources
        except:
            return sources
Example #33
0
def get_params(url):
    return dict(urlparse.parse_qs(urlparse.urlparse(url).query))
Example #34
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            html = client.request(url)
            posts = client.parseDOM(html, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'a', ret='href')
                    s = re.search('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', post)
                    s = s.groups()[0] if s else '0'
                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('2DDL - Exception: \n' + str(failure))
            return sources
Example #35
0
 def _get_url_param(self, url):
     result = url_parse.urlparse(url)
     return url_parse.parse_qs(result.query, True)
Example #36
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         if debrid.status() is False:
             raise Exception()
         if debrid.tor_enabled() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) \
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         html = client.request(url)
         html = html.replace('&nbsp;', ' ')
         try:
             results = client.parseDOM(html,
                                       'table',
                                       attrs={'id': 'searchResult'})[0]
         except:
             return sources
         rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
         if rows is None:
             return sources
         for entry in rows:
             try:
                 try:
                     name = re.findall(
                         'class="detLink" title=".+?">(.+?)</a>', entry,
                         re.DOTALL)[0]
                     name = client.replaceHTMLCodes(name)
                     #t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I)
                     if not cleantitle.get(title) in cleantitle.get(name):
                         continue
                 except:
                     continue
                 y = re.findall(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                     name)[-1].upper()
                 if not y == hdlr:
                     continue
                 try:
                     seeders = int(
                         re.findall('<td align="right">(.+?)</td>', entry,
                                    re.DOTALL)[0])
                 except:
                     continue
                 if self.min_seeders > seeders:
                     continue
                 try:
                     link = 'magnet:%s' % (re.findall(
                         'a href="magnet:(.+?)"', entry, re.DOTALL)[0])
                     link = str(
                         client.replaceHTMLCodes(link).split('&tr')[0])
                     if link in str(sources):
                         continue
                 except:
                     continue
                 quality, info = source_utils.get_release_quality(
                     name, name)
                 try:
                     size = re.findall(
                         '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                         entry)[-1]
                     div = 1 if size.endswith(('GB', 'GiB')) else 1024
                     size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                     size = '%.2f GB' % size
                     info.append(size)
                 except:
                     pass
                 info = ' | '.join(info)
                 sources.append({
                     'source': 'Torrent',
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'info': info,
                     'direct': False,
                     'debridonly': True
                 })
             except:
                 continue
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check:
             sources = check
         return sources
     except:
         return sources
Example #37
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)

            if 'tvshowtitle' in data:
                urls = self.__get_episode_urls(data)
            else:
                urls = self.__get_movie_urls(data)

            for url in urls:
                response = client.request(url)

                encrypted = re.findall('embedVal="(.+?)"', response)[0]
                decrypted = self.__decrypt(encrypted)

                storage = json.loads(decrypted)

                for location in storage['videos']:
                    if 'sources' in location:
                        for source in location['sources']:
                            try:
                                link = source['file']

                                if 'google' in link or 'blogspot' in link:
                                    quality = directstream.googletag(
                                        link)[0]['quality']

                                    if 'lh3.googleusercontent' in link:
                                        link = directstream.googleproxy(link)

                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': quality,
                                        'language': 'en',
                                        'url': link,
                                        'direct': True,
                                        'debridonly': False
                                    })

                                else:
                                    continue

                            except Exception:
                                continue

                    elif 'url' in location:
                        if 'http' in location['url']:
                            continue

                        url = urlparse.urljoin(self.cdn_link, location['url'])

                        response = client.request(url)
                        manifest = json.loads(response)

                        for video in manifest:
                            try:
                                quality = video['label']
                                link = video['file']

                                sources.append({
                                    'source': 'CDN',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': link,
                                    'direct': True,
                                    'debridonly': False
                                })

                            except Exception:
                                continue

            return sources

        except Exception:
            return
Example #38
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            if 'exact' in data and data['exact']:
                query = title = data[
                    'tvshowtitle'] if 'tvshowtitle' in data else data['title']
                year = None
                season = None
                episode = None
                pack = False
                packCount = None
            else:
                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']
                year = int(
                    data['year']
                ) if 'year' in data and not data['year'] == None else None
                season = int(
                    data['season']
                ) if 'season' in data and not data['season'] == None else None
                episode = int(
                    data['episode']) if 'episode' in data and not data[
                        'episode'] == None else None
                pack = data['pack'] if 'pack' in data else False
                packCount = data['packcount'] if 'packcount' in data else None

                if 'tvshowtitle' in data:
                    if pack: query = '%s %d' % (title, season)
                    else: query = '%s S%02dE%02d' % (title, season, episode)
                else:
                    query = '%s %d' % (title, year)
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.search_link)
            category = self.category_tvshows if (
                'tvshowtitle' in data and not data['tvshowtitle'] == None
                and not data['tvshowtitle'] == '') else self.category_movies

            torrents = json.loads(
                client.request(url,
                               post={
                                   'query': urllib.quote_plus(query),
                                   'filters[category]': str(category),
                                   'filters[adult]': 'false',
                                   'filters[field]': 'seeds',
                                   'filters[sort]': 'desc',
                                   'filters[time]': '4',
                                   'limit': '1000',
                                   'offset': '0',
                               }))['content']

            for torrent in torrents:
                jsonName = torrent['name']
                jsonLink = torrent['magnet']
                try:
                    jsonSize = int(torrent['size']) * 1048576
                except:
                    jsonSize = None
                try:
                    jsonSeeds = int(torrent['seeds'])
                except:
                    jsonSeeds = None

                # Metadata
                meta = metadata.Metadata(name=jsonName,
                                         title=title,
                                         year=year,
                                         season=season,
                                         episode=episode,
                                         pack=pack,
                                         packCount=packCount,
                                         link=jsonLink,
                                         size=jsonSize,
                                         seeds=jsonSeeds)

                # Ignore
                if meta.ignore(False):
                    continue

                # Add
                sources.append({
                    'url': jsonLink,
                    'debridonly': False,
                    'direct': False,
                    'source': 'torrent',
                    'language': self.language[0],
                    'quality': meta.videoQuality(),
                    'metadata': meta,
                    'file': jsonName
                })

            return sources
        except:
            return sources
Example #39
0
    def sources(self, url, hostDict, hostprDict):
        self._sources = []    
        try:
            if url == None: return self._sources
            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)

            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.show  = True if 'tvshowtitle' in data else False
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s' % (data['tvshowtitle']) if\
                'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            
            url = self.search_link % urllib.quote_plus(query)
            ref = urlparse.urljoin(self.base_link, self.search_link1 % urllib.quote_plus(query))
            url = urlparse.urljoin(self.base_link, url)
            self.scraper = cfscrape.create_scraper()
            self.scraper.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
            self.scraper.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
            self.scraper.headers['Referer'] = 'http://www.ddlvalley.me/'
            self.scraper.headers['Host'] = 'www.ddlvalley.me'
            self.scraper.headers['Upgrade-Insecure-Requests'] = '1'
            sess = self.scraper.get(self.base_link)
            self.scraper.headers['Cookie'] = ''
            for key, value in self.scraper.cookies.iteritems(): 
                self.scraper.headers['Cookie'] += '%s=%s;'%(key, value)
            dts = datetime.datetime.utcnow() + datetime.timedelta(days=1)
            self.scraper.headers['Cookie'] += 'noprpkedvhozafiwrcnt=1; noprpkedvhozafiwrexp=%s'%dts.strftime("%a, %d %b %Y %H:%M:%S GMT")
            self.scraper.headers['Referer'] = ref
            r = self.scraper.get(url).content
            u = r
            
            next_page = True
            num = 1
            while next_page:
                try:
                    np = re.findall('<link rel="next" href="([^"]+)', u)[0]
                    u = self.scraper.get(np).content
                    r += u
                except: next_page = False

            items = dom_parser2.parse_dom(r, 'h2')
            items = [dom_parser2.parse_dom(i.content, 'a', req=['href','rel','title','data-wpel-link']) for i in items]
            items = [(i[0].content, i[0].attrs['href']) for i in items]
            items = [(i[0], i[1]) for i in items if cleantitle.get_simple(i[0].split(self.hdlr)[0].lower()) == cleantitle.get_simple(title.lower())]
            threads = []
            for i in items: threads.append(workers.Thread(self._get_sources, i[0], i[1], hostDict, hostprDict))
            for i in threads:
                i.start(); time.sleep(0.5)
            
            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.5)
            return self._sources
        except:
            return self._sources
Example #40
0
    def parse(self, response):

        try:
            # print response.body
            hxs = Selector(response)
            # goods_container_nodes = hxs.xpath('//div[@class="am-hide itemsdata"]')
            goods_container_nodes = hxs.xpath(
                '//div[@class="public-commodity-size default-style library-list"]'
            )
            # print goods_container_nodes

            for goods_node in goods_container_nodes:
                item = Item()

                item_url = goods_node.xpath(
                    './div[@class="public-option"]/a/@href').extract()[0]
                print item_url
                # https://item.taobao.com/item.htm?id=537180158515
                # https://detail.tmall.com/item.htm?id=559026008988
                query = urlparse.parse_qs(
                    urlparse.urlparse(item_url).query, True)
                item['item_id'] = query['id'][0]

                # item['item_id'] = goods_node.xpath('./@data-itemid').extract()[0]

                # item['title'] = goods_node.xpath('./@data-itemtitle').extract()[0]
                item['title'] = goods_node.xpath(
                    './div[@class="commodity_name am-text-sm am-padding-bottom-sm"]/a[1]/span/text()'
                ).extract()[0]
                # item['desc'] = goods_node.xpath('./@data-itemdesc').extract()[0]

                # coupon_url = goods_node.xpath('./@data-couponurl').extract()[0]
                # http://shop.m.taobao.com/shop/coupon.htm?sellerId=2086961343&activityId=3a499ad0fcb14a478b08430075950543
                coupon_url = goods_node.xpath(
                    './div[@class="commodity_name am-text-sm am-padding-bottom-sm"]/a[2]/@href'
                ).extract()[0]
                print coupon_url, item['item_id']

                try:
                    if coupon_url is not None:
                        query = urlparse.parse_qs(
                            urlparse.urlparse(coupon_url).query, True)

                        if 'activity_id' in query:
                            activity_id = query['activity_id'][0]
                        elif 'activityId' in query:
                            activity_id = query['activityId'][0]

                        if 'seller_id' in query:
                            seller_id = query['seller_id'][0]
                        elif 'sellerId' in query:
                            seller_id = query['sellerId'][0]

                        item['coupon_activity_id'] = activity_id  #优惠券id
                        item['coupon_seller_id'] = seller_id  #卖家id
                except Exception as e:
                    print e

                yield item

        except Exception, what:
            print "-----------", what
Example #41
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            titleYear = '%s %s' % (title, str(data['year']))
            year = int(
                data['year']
            ) if 'year' in data and not data['year'] == None else None
            season = int(
                data['season']
            ) if 'season' in data and not data['season'] == None else None
            episode = int(
                data['episode']
            ) if 'episode' in data and not data['episode'] == None else None

            query = data['imdb'] if 'imdb' in data and not data[
                'imdb'] == None else title
            url = urlparse.urljoin(self.base_link, self.search_link) % query
            result = json.loads(client.request(url))

            movie = result['data']['movies'][0]
            name = movie['title_long'] + ' '
            torrents = movie['torrents']

            for torrent in torrents:
                quality = torrent['quality']
                if quality.lower() == '3d':
                    quality += ' HD1080'
                jsonName = name + quality
                jsonSize = torrent['size_bytes']
                jsonSeeds = torrent['seeds']
                jsonHash = torrent['hash']
                jsonLink = network.Container(jsonHash).torrentMagnet(
                    title=titleYear)

                # Metadata
                meta = metadata.Metadata(name=jsonName,
                                         title=title,
                                         year=year,
                                         season=season,
                                         episode=episode,
                                         link=jsonLink,
                                         size=jsonSize,
                                         seeds=jsonSeeds)
                jsonLink = network.Container(jsonHash).torrentMagnet(
                    title=meta.title(extended=True))

                # Ignore
                if meta.ignore(False):
                    continue

                # Add
                sources.append({
                    'url': jsonLink,
                    'debridonly': False,
                    'direct': False,
                    'source': 'torrent',
                    'language': self.language[0],
                    'quality': meta.videoQuality(),
                    'info': meta.information(),
                    'file': jsonName
                })

            return sources
        except:
            return sources
Example #42
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]

                    u = post.split('Download%252BLinks.png', 1)[-1]
                    u = client.parseDOM(u, 'div', attrs={'style': '.+?'})
                    u = [re.findall('<a href="(.+?)"', i) for i in u]
                    u = [i[0] for i in u if i]

                    items += [(t, i) for i in u]
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [i.lower() for i in fmt]

                    if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                    if any(i in ['extras'] for i in fmt): raise Exception()
                    if 'hindi' in fmt and not 'dual' in fmt: raise Exception()

                    if '1080p' in fmt: quality = '1080p'
                    elif '720p' in fmt: quality = 'HD'
                    else: quality = 'SD'
                    if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
                    elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'

                    info = []

                    if '3d' in fmt: info.append('3D')

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)(?:Gb|mb))', name)[-1]
                        div = 1 if size.endswith('Gb') else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
Example #43
0
    def get_tabular_part(self, path):
        """

        Parameters
        ----------
        path: str
            OS-translated path to an hdf or h5r file on the dataserver computer. 
            Append the part of the file to read after the file extension, e.g. 
            .h5r/Events. Return format (for arrays) can additionally be 
            specified, as can slices
            using the following syntax: test.h5r/FitResults.json?from=0&to=100. 
            Supported array formats include json and npy.

        Returns
        -------
        f: BytesIO
            Requested part of the file encoded as bytes

        """
        from PYME.IO import h5rFile, clusterResults

        # parse path
        ext = '.h5r' if '.h5r' in path else '.hdf'
        # TODO - should we just use the the untranslated path?
        filename, details = path.split(ext + os.sep)
        filename = filename + ext  # path to file on dataserver disk
        query = urlparse.urlparse(details).query
        details = details.strip('?' + query)
        if '.' in details:
            part, return_type = details.split('.')
        else:
            part, return_type = details, ''

        try:
            with h5rFile.openH5R(filename) as h5f:
                if part == 'Metadata':
                    wire_data, output_format = clusterResults.format_results(
                        h5f.mdh, return_type)
                else:
                    # figure out if we have any slicing to do
                    query = urlparse.parse_qs(query)
                    start = int(query.get('from', [0])[0])
                    end = None if 'to' not in query.keys() else int(
                        query['to'][0])
                    wire_data, output_format = clusterResults.format_results(
                        h5f.getTableData(part, slice(start, end)),
                        '.' + return_type)

            f, length = self._string_to_file(wire_data)
            self.send_response(200)
            self.send_header(
                "Content-Type",
                output_format if output_format else 'application/octet-stream')
            self.send_header("Content-Length", length)
            #self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
            self.end_headers()
            return f

        except IOError:
            self.send_error(404,
                            "File not found - %s, [%s]" % (self.path, path))
Example #44
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                ep = data['episode']
                url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
                    self.base_link, cleantitle.geturl(
                        data['tvshowtitle']), int(data['season']), ep)
                r = client.request(url,
                                   headers=headers,
                                   timeout='10',
                                   output='geturl')

                if url == None:
                    url = self.searchShow(data['tvshowtitle'], data['season'],
                                          aliases, headers)

            else:
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            if url == None: raise Exception()

            r = client.request(url, headers=headers, timeout='10')
            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
            if 'tvshowtitle' in data:
                ep = data['episode']
                links = client.parseDOM(r,
                                        'a',
                                        attrs={'episode-data': ep},
                                        ret='player-data')
            else:
                links = client.parseDOM(r, 'a', ret='player-data')

            for link in links:
                if '123movieshd' in link or 'seriesonline' in link:
                    r = client.request(link, headers=headers, timeout='10')
                    r = re.findall('(https:.*?redirector.*?)[\'\"]', r)

                    for i in r:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'language':
                                'en',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass
                else:
                    try:
                        host = re.findall(
                            '([\w]+[.][\w]+)$',
                            urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if not host in hostDict: raise Exception()
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')

                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Series9 - Exception: \n' + str(failure))
            return sources
Example #45
0
    def test_get_image_details_with_limit(self):
        request = fakes.HTTPRequest.blank('/v2/fake/images/detail?limit=2')
        response = self.controller.detail(request)
        response_list = response["images"]
        response_links = response["images_links"]

        server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
        server_href = "http://localhost/v2/fake/servers/" + server_uuid
        server_bookmark = "http://localhost/fake/servers/" + server_uuid
        alternate = "%s/fake/images/%s"

        expected = [{
            'id': '123',
            'name': 'public image',
            'metadata': {'key1': 'value1'},
            'updated': NOW_API_FORMAT,
            'created': NOW_API_FORMAT,
            'status': 'ACTIVE',
            'minDisk': 10,
            'progress': 100,
            'minRam': 128,
            "links": [{
                "rel": "self",
                "href": "http://localhost/v2/fake/images/123",
            },
            {
                "rel": "bookmark",
                "href": "http://localhost/fake/images/123",
            },
            {
                "rel": "alternate",
                "type": "application/vnd.openstack.image",
                "href": alternate % (glance.generate_glance_url(), 123),
            }],
        },
        {
            'id': '124',
            'name': 'queued snapshot',
            'metadata': {
                u'instance_uuid': server_uuid,
                u'user_id': u'fake',
            },
            'updated': NOW_API_FORMAT,
            'created': NOW_API_FORMAT,
            'status': 'SAVING',
            'minDisk': 0,
            'progress': 25,
            'minRam': 0,
            'server': {
                'id': server_uuid,
                "links": [{
                    "rel": "self",
                    "href": server_href,
                },
                {
                    "rel": "bookmark",
                    "href": server_bookmark,
                }],
            },
            "links": [{
                "rel": "self",
                "href": "http://localhost/v2/fake/images/124",
            },
            {
                "rel": "bookmark",
                "href": "http://localhost/fake/images/124",
            },
            {
                "rel": "alternate",
                "type": "application/vnd.openstack.image",
                "href": alternate % (glance.generate_glance_url(), 124),
            }],
        }]

        self.assertThat(expected, matchers.DictListMatches(response_list))

        href_parts = urlparse.urlparse(response_links[0]['href'])
        self.assertEqual('/v2/fake/images', href_parts.path)
        params = urlparse.parse_qs(href_parts.query)

        self.assertThat({'limit': ['2'], 'marker': ['124']},
                        matchers.DictMatches(params))
Example #46
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = data.get('url')
            episode = int(data.get('episode', 1))

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'streams'})

            rels = dom_parser.parse_dom(r, 'ul', attrs={'class': 'nav'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = dom_parser.parse_dom(
                rels,
                'a',
                attrs={'href': re.compile('#stream_\d*')},
                req='href')
            rels = [(re.findall('stream_(\d+)', i.attrs['href']),
                     re.findall('flag-(\w{2})', i.content)) for i in rels if i]
            rels = [(i[0][0], ['subbed'] if i[1][0] != 'de' else [])
                    for i in rels if i[0] and 'de' in i[1]]

            for id, info in rels:
                rel = dom_parser.parse_dom(r,
                                           'div',
                                           attrs={'id': 'stream_%s' % id})
                rel = [(dom_parser.parse_dom(
                    i, 'div', attrs={'id': 'streams_episodes_%s' % id}),
                        dom_parser.parse_dom(i, 'tr')) for i in rel]
                rel = [(i[0][0].content,
                        [x for x in i[1] if 'fa-desktop' in x.content])
                       for i in rel if i[0] and i[1]]
                rel = [(i[0], dom_parser.parse_dom(i[1][0].content, 'td'))
                       for i in rel if i[1]]
                rel = [(i[0], re.findall('\d{3,4}x(\d{3,4})$',
                                         i[1][0].content)) for i in rel
                       if i[1]]
                rel = [(i[0], source_utils.label_to_quality(i[1][0]))
                       for i in rel if len(i[1]) > 0]

                for html, quality in rel:
                    try:
                        s = dom_parser.parse_dom(
                            html,
                            'a',
                            attrs={
                                'href':
                                re.compile('#streams_episodes_%s_\d+' % id)
                            })
                        s = [(dom_parser.parse_dom(
                            i,
                            'div',
                            attrs={'data-loop': re.compile('\d+')},
                            req='data-loop'), dom_parser.parse_dom(i, 'span'))
                             for i in s]
                        s = [(i[0][0].attrs['data-loop'], [
                            x.content for x in i[1] if '<strong' in x.content
                        ]) for i in s if i[0]]
                        s = [(i[0],
                              re.findall('<.+?>(\d+)</.+?> (.+?)$', i[1][0]))
                             for i in s if len(i[1]) > 0]
                        s = [(i[0], i[1][0]) for i in s if len(i[1]) > 0]
                        s = [(i[0], int(i[1][0]),
                              re.findall('Episode (\d+):',
                                         i[1][1]), re.IGNORECASE) for i in s
                             if len(i[1]) > 1]
                        s = [(i[0], i[1],
                              int(i[2][0]) if len(i[2]) > 0 else -1)
                             for i in s]
                        s = [(i[0], i[2] if i[2] >= 0 else i[1]) for i in s]
                        s = [i[0] for i in s if i[1] == episode][0]

                        enc = dom_parser.parse_dom(
                            html,
                            'div',
                            attrs={
                                'id':
                                re.compile('streams_episodes_%s_%s' % (id, s))
                            },
                            req='data-enc')[0].attrs['data-enc']

                        hosters = dom_parser.parse_dom(
                            html,
                            'a',
                            attrs={
                                'href':
                                re.compile('#streams_episodes_%s_%s' % (id, s))
                            })
                        hosters = [
                            dom_parser.parse_dom(i, 'i', req='class')
                            for i in hosters
                        ]
                        hosters = [
                            re.findall('hoster-(\w+)',
                                       ' '.join([x.attrs['class'] for x in i]))
                            for i in hosters if i
                        ][0]
                        hosters = [(source_utils.is_host_valid(
                            re.sub('(co|to|net|pw|sx|tv|moe|ws|icon)$', '', i),
                            hostDict), i) for i in hosters]
                        hosters = [(i[0][1], i[1]) for i in hosters
                                   if i[0] and i[0][0]]

                        info = ' | '.join(info)

                        for source, hoster in hosters:
                            sources.append({
                                'source': source,
                                'quality': quality,
                                'language': 'de',
                                'url': [enc, hoster],
                                'info': info,
                                'direct': False,
                                'debridonly': False,
                                'checkquality': True
                            })
                    except:
                        pass

            return sources
        except:
            return sources
Example #47
0
def ddg_href(url):
    if url.startswith('/'):
        q = url.partition('?')[2]
        url = parse_qs(q.encode('utf-8'))['uddg'][0].decode('utf-8')
    return url
Example #48
0
    def do_PUT(self):
        if self.timeoutTesting:
            #exp = time.time() + float(self.timeoutTesting)
            #while time.time() < exp:
            #    y = pow(5, 7)
            time.sleep(float(self.timeoutTesting)
                       )  #wait 10 seconds to force a timeout on the clients
            #print('waited ... ')

        if self.bandwidthTesting:
            #just read file and dump contents
            r = self.rfile.read(int(self.headers['Content-Length']))
            self.send_response(200)
            self.send_header("Content-Length", "0")
            self.end_headers()
            return

        if self.path.lstrip('/').startswith('__aggregate'):
            #paths starting with __aggregate are special, and trigger appends to an existing file rather than creation
            #of a new file.
            self._doAggregate()
            return

        if self.path.lstrip('/').startswith('__pyramid'):
            #paths starting with __part_pyramid are special, and trigger operations on PartialPyramids rather than creation
            #of a new file.
            self._do_part_pyramid()
            return

        path = self.translate_path(self.path)
        # move up here to make sure we actually get the data (and clear it from our queue, even if we are going to, e.g.
        # 405 it later)
        data = self._get_data()

        if os.path.exists(path):
            #Do not overwrite - we use write-once semantics
            self.send_error(405, "File already exists %s" % path)

            #self.end_headers()
            return None
        else:
            dir, file = os.path.split(path)
            #if not os.path.exists(dir):
            #    os.makedirs(dir)
            makedirs_safe(dir)

            query = urlparse.parse_qs(urlparse.urlparse(self.path).query)

            if file == '':
                #we're  just making a directory
                pass
            elif 'MirrorSource' in query.keys():
                #File content is not in message content. This computer should
                #fetch the results from another computer in the cluster instead
                #used for online duplication

                r = requests.get(query['MirrorSource'][0], timeout=.1)

                with open(path, 'wb') as f:
                    f.write(r.content)

                #set the file to read-only (reflecting our write-once semantics
                os.chmod(path, 0o440)

                if USE_DIR_CACHE:
                    cl.dir_cache.update_cache(path, len(r.content))

            else:
                #the standard case - use the contents of the put request
                with open(path, 'wb') as f:
                    #shutil.copyfileobj(self.rfile, f, int(self.headers['Content-Length']))
                    f.write(data)

                    #set the file to read-only (reflecting our write-once semantics
                    os.chmod(path, 0o440)

                    if USE_DIR_CACHE:
                        cl.dir_cache.update_cache(path, len(data))

            self.send_response(200)
            self.send_header("Content-Length", "0")
            self.end_headers()
            return
Example #49
0
def sso_callback_get():
    sso_mode = settings.app.sso

    if sso_mode not in (GOOGLE_AUTH, GOOGLE_DUO_AUTH, GOOGLE_YUBICO_AUTH,
                        SLACK_AUTH, SLACK_DUO_AUTH, SLACK_YUBICO_AUTH,
                        SAML_AUTH, SAML_DUO_AUTH, SAML_YUBICO_AUTH,
                        SAML_OKTA_AUTH, SAML_OKTA_DUO_AUTH,
                        SAML_OKTA_YUBICO_AUTH, SAML_ONELOGIN_AUTH,
                        SAML_ONELOGIN_DUO_AUTH, SAML_ONELOGIN_YUBICO_AUTH):
        return flask.abort(405)

    state = flask.request.args.get('state')
    sig = flask.request.args.get('sig')

    tokens_collection = mongo.get_collection('sso_tokens')
    doc = tokens_collection.find_and_modify(query={
        '_id': state,
    },
                                            remove=True)

    if not doc:
        return flask.abort(404)

    query = flask.request.query_string.split('&sig=')[0]
    test_sig = base64.urlsafe_b64encode(
        hmac.new(str(doc['secret']), query, hashlib.sha512).digest())

    if sig != test_sig:
        return flask.abort(401)

    params = urlparse.parse_qs(query)

    if doc.get('type') == SAML_AUTH:
        username = params.get('username')[0]
        email = params.get('email', [None])[0]
        org_name = params.get('org', [None])[0]

        if not username:
            return flask.abort(406)

        org_id = settings.app.sso_org
        if org_name:
            org = organization.get_by_name(org_name, fields=('_id'))
            if org:
                org_id = org.id

        valid, org_id_new, groups = sso.plugin_sso_authenticate(
            sso_type='saml',
            user_name=username,
            user_email=email,
            remote_ip=utils.get_remote_addr(),
            sso_org_names=[org_name],
        )
        if valid:
            org_id = org_id_new or org_id
        else:
            logger.error(
                'Saml plugin authentication not valid',
                'sso',
                username=username,
            )
            return flask.abort(401)
    elif doc.get('type') == SLACK_AUTH:
        username = params.get('username')[0]
        email = None
        user_team = params.get('team')[0]
        org_names = params.get('orgs', [''])[0]
        org_names = org_names.split(',')

        valid, org_name = sso.verify_slack(username, user_team, org_names)
        if not valid:
            return flask.abort(401)

        if org_name:
            org_names = [org_name]

        org_id = settings.app.sso_org
        for org_name in org_names:
            org = organization.get_by_name(org_name, fields=('_id'))
            if org:
                org_id = org.id
                break

        valid, org_id_new, groups = sso.plugin_sso_authenticate(
            sso_type='slack',
            user_name=username,
            user_email=email,
            remote_ip=utils.get_remote_addr(),
            sso_org_names=org_names,
        )
        if valid:
            org_id = org_id_new or org_id
        else:
            logger.error(
                'Slack plugin authentication not valid',
                'sso',
                username=username,
            )
            return flask.abort(401)
    else:
        username = params.get('username')[0]
        email = username

        valid = sso.verify_google(username)
        if not valid:
            return flask.abort(401)

        org_id = settings.app.sso_org

        valid, org_id_new, groups = sso.plugin_sso_authenticate(
            sso_type='google',
            user_name=username,
            user_email=email,
            remote_ip=utils.get_remote_addr(),
        )
        if valid:
            org_id = org_id_new or org_id
        else:
            logger.error(
                'Google plugin authentication not valid',
                'sso',
                username=username,
            )
            return flask.abort(401)

    if DUO_AUTH in sso_mode:
        if settings.app.sso_duo_mode == 'passcode':
            token = utils.generate_secret()

            tokens_collection = mongo.get_collection('sso_tokens')
            tokens_collection.insert({
                '_id': token,
                'type': DUO_AUTH,
                'username': username,
                'email': email,
                'org_id': org_id,
                'groups': groups,
                'timestamp': utils.now(),
            })

            duo_page = static.StaticFile(settings.conf.www_path,
                                         'duo.html',
                                         cache=False,
                                         gzip=False)

            if settings.app.theme == 'dark':
                duo_page.data = duo_page.data.replace('<body>',
                                                      '<body class="dark">')
            duo_page.data = duo_page.data.replace('<%= token %>', token)

            return duo_page.get_response()
        else:
            duo_auth = sso.Duo(
                username=username,
                factor=settings.app.sso_duo_mode,
                remote_ip=utils.get_remote_addr(),
                auth_type='Key',
            )
            valid = duo_auth.authenticate()
            if valid:
                valid, org_id_new, groups2 = sso.plugin_sso_authenticate(
                    sso_type='duo',
                    user_name=username,
                    user_email=email,
                    remote_ip=utils.get_remote_addr(),
                )
                if valid:
                    org_id = org_id_new or org_id
                else:
                    logger.error(
                        'Duo plugin authentication not valid',
                        'sso',
                        username=username,
                    )
                    return flask.abort(401)

                groups = ((groups or set()) | (groups2 or set())) or None
            else:
                logger.error(
                    'Duo authentication not valid',
                    'sso',
                    username=username,
                )
                return flask.abort(401)

    if YUBICO_AUTH in sso_mode:
        token = utils.generate_secret()

        tokens_collection = mongo.get_collection('sso_tokens')
        tokens_collection.insert({
            '_id': token,
            'type': YUBICO_AUTH,
            'username': username,
            'email': email,
            'org_id': org_id,
            'groups': groups,
            'timestamp': utils.now(),
        })

        yubico_page = static.StaticFile(settings.conf.www_path,
                                        'yubico.html',
                                        cache=False,
                                        gzip=False)

        if settings.app.theme == 'dark':
            yubico_page.data = yubico_page.data.replace(
                '<body>', '<body class="dark">')
        yubico_page.data = yubico_page.data.replace('<%= token %>', token)

        return yubico_page.get_response()

    org = organization.get_by_id(org_id)
    if not org:
        return flask.abort(405)

    usr = org.find_user(name=username)
    if not usr:
        usr = org.new_user(name=username,
                           email=email,
                           type=CERT_CLIENT,
                           auth_type=sso_mode,
                           groups=list(groups) if groups else None)
        usr.audit_event('user_created',
                        'User created with single sign-on',
                        remote_addr=utils.get_remote_addr())

        event.Event(type=ORGS_UPDATED)
        event.Event(type=USERS_UPDATED, resource_id=org.id)
        event.Event(type=SERVERS_UPDATED)
    else:
        if usr.disabled:
            return flask.abort(403)

        if groups and groups - set(usr.groups or []):
            usr.groups = list(set(usr.groups or []) | groups)
            usr.commit('groups')

        if usr.auth_type != sso_mode:
            usr.auth_type = sso_mode
            usr.commit('auth_type')

    key_link = org.create_user_key_link(usr.id, one_time=True)

    usr.audit_event(
        'user_profile',
        'User profile viewed from single sign-on',
        remote_addr=utils.get_remote_addr(),
    )

    return utils.redirect(utils.get_url_root() + key_link['view_url'])
Example #50
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url == None:
             return sources
         if debrid.status() is False:
             raise Exception()
         if debrid.tor_enabled() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = urlparse.urljoin(
             self.base_link,
             self.search_link.format(query[0].lower(),
                                     cleantitle.geturl(query)))
         r = client.request(url)
         r = client.parseDOM(r, 'tbody')[0]
         posts = client.parseDOM(r, 'tr')
         posts = [i for i in posts if 'magnet:' in i]
         for post in posts:
             post = post.replace('&nbsp;', ' ')
             name = client.parseDOM(post, 'a', ret='title')[1]
             t = name.split(hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '',
                                          t)) == cleantitle.get(title):
                 continue
             try:
                 y = re.findall(
                     '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                     name, re.I)[-1].upper()
             except:
                 y = re.findall(
                     '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                     re.I)[-1].upper()
             if not y == hdlr:
                 continue
             links = client.parseDOM(post, 'a', ret='href')
             magnet = [
                 i.replace('&amp;', '&') for i in links if 'magnet:' in i
             ][0]
             url = magnet.split('&tr')[0]
             if url in str(sources):
                 continue
             quality, info = source_utils.get_release_quality(name, name)
             try:
                 size = re.findall(
                     '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                     post)[0]
                 div = 1 if size.endswith(('GB', 'GiB')) else 1024
                 size = float(
                     re.sub('[^0-9|/.|/,]', '', size.replace(',',
                                                             '.'))) / div
                 size = '%.2f GB' % size
             except:
                 size = '0'
             info.append(size)
             info = ' | '.join(info)
             sources.append({
                 'source': 'Torrent',
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': True
             })
         return sources
     except:
         return sources
Example #51
0
 def testRequestBuilderLimitNegativeOne(self):
     """ Should skip limit = 100 param if limit is set to -1
     """
     zot = z.Zotero("myuserID", "user", "myuserkey")
     zot.add_parameters(limit=-1, start=7)
     self.assertEqual(parse_qs("start=7&format=json"), parse_qs(zot.url_params))
Example #52
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            posts = []

            if 'tvshowtitle' in data:
                query = '%s S%02d' % (data['tvshowtitle'], int(data['season']))
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

                referer = self.search_link2 % urllib.quote_plus(query)
                referer = urlparse.urljoin(self.search_base_link, referer)

                url = self.search_link % urllib.quote_plus(query)
                url = urlparse.urljoin(self.search_base_link, url)

                result = client.request(url,
                                        headers=self.search_header_link,
                                        referer=referer)
                try:
                    posts += json.loads(re.findall('({.+?})$',
                                                   result)[0])['results']
                except:
                    pass

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            referer = self.search_link2 % urllib.quote_plus(query)
            referer = urlparse.urljoin(self.search_base_link, referer)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.search_base_link, url)

            result = client.request(url,
                                    headers=self.search_header_link,
                                    referer=referer)
            try:
                posts += json.loads(re.findall('({.+?})$',
                                               result)[0])['results']
            except:
                pass

            links = []
            dupes = []

            for post in posts:
                try:
                    name = post['post_title']
                    url = post['post_name']

                    if url in dupes: raise Exception()
                    dupes.append(url)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if y.isdigit(): cat = 'movie'
                    elif 'S' in y and 'E' in y: cat = 'episode'
                    elif 'S' in y: cat = 'tvshow'

                    if cat == 'movie': hdlr = data['year']
                    elif cat == 'episode':
                        hdlr = 'S%02dE%02d' % (int(
                            data['season']), int(data['episode']))
                    elif cat == 'tvshow':
                        hdlr = 'S%02d' % int(data['season'])

                    if not y == hdlr: raise Exception()

                    items = []

                    content = post['post_content']

                    try:
                        items += zip([
                            i for i in client.parseDOM(content, 'p')
                            if 'Release Name:' in i
                        ], [
                            i for i in client.parseDOM(content, 'p')
                            if '<strong>Download' in i
                        ])
                    except:
                        pass

                    try:
                        items += client.parseDOM(content,
                                                 'p',
                                                 attrs={'style': '.+?'})
                    except:
                        pass

                    for item in items:
                        try:
                            if type(item) == tuple:
                                item = '######URL######'.join(item)

                            fmt = re.sub(
                                '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                                '', name.upper())
                            fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                            fmt = [i.lower() for i in fmt]

                            if any(
                                    i.endswith(('subs', 'sub', 'dubbed',
                                                'dub')) for i in fmt):
                                raise Exception()
                            if any(i in ['extras'] for i in fmt):
                                raise Exception()

                            if '1080p' in fmt: quality = '1080p'
                            elif '720p' in fmt: quality = 'HD'
                            else: quality = 'SD'
                            if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt):
                                quality = 'SCR'
                            elif any(i in [
                                    'camrip', 'tsrip', 'hdcam', 'hdts',
                                    'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'
                            ] for i in fmt):
                                quality = 'CAM'

                            info = []

                            if '3d' in fmt: info.append('3D')

                            try:
                                if cat == 'tvshow': raise Exception()
                                size = re.findall(
                                    '(\d+(?:\.|/,|)\d+(?:\s+|)(?:GB|GiB|MB|MiB))',
                                    item)[0].strip()
                                div = 1 if size.endswith(
                                    ('GB', 'GiB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except:
                                pass

                            info = ' | '.join(info)

                            url = item.rsplit('######URL######')[-1]
                            url = zip(client.parseDOM(url, 'a'),
                                      client.parseDOM(url, 'a', ret='href'))

                            for i in url:
                                links.append({
                                    'url': i[1],
                                    'quality': quality,
                                    'info': info,
                                    'host': i[0],
                                    'cat': cat
                                })
                        except:
                            pass

                except:
                    pass

            check = [i for i in links if not i['quality'] == 'CAM']
            if len(check) > 0: links = check

            for i in links:
                try:
                    url = i['url']
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if i['cat'] == 'tvshow':
                        if not i['quality'] in ['1080p', 'HD']:
                            raise Exception()
                        if not any(i['host'].lower() in x for x in hostprDict):
                            raise Exception()
                        url = client.request(url)
                        url = client.parseDOM(url, 'ol')[0]
                        url = client.parseDOM(
                            url, 'div',
                            attrs={'style': '.+?'})[int(data['episode']) - 1]

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostprDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': i['quality'],
                        'provider': 'releaseBB',
                        'url': url,
                        'info': i['info'],
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
Example #53
0
 def test_default_override(self):
     email = '*****@*****.**'
     url = urlparse(gravatar.url(email=email, rating='x'))
     query = parse_qs(url.query)
     assert_equal(query, {'rating': ['x']})
##########################################################################
##########################################################################
###############################Lets boogie################################
##########################################################################
##########################################################################

# Initiate session handler
# it's mandatory we use a session, auth0 sets a CSRF cookie and we pick it up with this.
session = requests.Session()


#Like a good OAUTH client, Auth0 Set's a STATE parameter, even though we're using SAML and there's already a CSRF token.
#We need to get the state from the IDP entry url and parse it out to use again later.
r = session.get(idpentryurl, allow_redirects=False)
t = r.headers['location']
state = parse_qs(t)['/login?state']

# Programmatically get the SAML assertion
# Opens the initial IdP url and follows all of the HTTP302 redirects, and
# gets the resulting login page
formresponse = session.get(idpentryurl)



# Parse the response and extract all the necessary values
# in order to build a dictionary of all of the form values the IdP expects
formsoup = BeautifulSoup(formresponse.text.decode('utf8'),"html.parser")
payload = {}

for inputtag in formsoup.find_all(re.compile('(INPUT|input)')):
    name = inputtag.get('name','')
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            content_type = 'episode' if 'tvshowtitle' in data else 'movie'

            years = (data['year'], str(int(data['year'])+1), str(int(data['year'])-1))

            if content_type == 'movie':
                title = cleantitle.get(data['title'])
                localtitle = cleantitle.get(data['localtitle'])
                ids = [data['imdb']]

                r = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title", "originaltitle", "file"]}, "id": 1}' % years)
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['movies']

                r = [i for i in r if str(i['imdbnumber']) in ids or title in [cleantitle.get(i['title'].encode('utf-8')), cleantitle.get(i['originaltitle'].encode('utf-8'))]]
                r = [i for i in r if not i['file'].encode('utf-8').endswith('.strm')][0]

                r = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"properties": ["streamdetails", "file"], "movieid": %s }, "id": 1}' % str(r['movieid']))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['moviedetails']
            elif content_type == 'episode':
                title = cleantitle.get(data['tvshowtitle'])
                localtitle = cleantitle.get(data['localtvshowtitle'])
                season, episode = data['season'], data['episode']
                ids = [data['imdb'], data['tvdb']]

                r = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title"]}, "id": 1}' % years)
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['tvshows']

                r = [i for i in r if str(i['imdbnumber']) in ids or title in [cleantitle.get(i['title'].encode('utf-8')), cleantitle.get(i['originaltitle'].encode('utf-8'))]][0]

                r = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["file"], "tvshowid": %s }, "id": 1}' % (str(season), str(episode), str(r['tvshowid'])))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['episodes']

                r = [i for i in r if not i['file'].encode('utf-8').endswith('.strm')][0]

                r = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"properties": ["streamdetails", "file"], "episodeid": %s }, "id": 1}' % str(r['episodeid']))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['episodedetails']

            url = r['file'].encode('utf-8')

            try: quality = int(r['streamdetails']['video'][0]['width'])
            except: quality = -1

            if quality >= 2160: quality = '4K'
            if quality >= 1440: quality = '1440p'
            if quality >= 1080: quality = '1080p'
            if 720 <= quality < 1080: quality = 'HD'
            if quality < 720: quality = 'SD'

            info = []
            try:
                f = control.openFile(url) ; s = f.size() ; f.close()
                s = '%.2f GB' % (float(s)/1024/1024/1024)
                info.append(s)
            except:
                pass
            try:
                e = urlparse.urlparse(url).path.split('.')[-1].upper()
                info.append(e)
            except:
                pass
            info = ' | '.join(info)
            info = info.encode('utf-8')

            sources.append({'source': '0', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'local': True, 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
Example #56
0
 def ytid(self, url):
     """Get YouTube's video ID for the specified video"""
     return urlparse.parse_qs(urlparse.urlparse(url).query)["v"][0]
Example #57
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            try:
                r = client.request(url)
                links = zip(
                    client.parseDOM(
                        r,
                        'a',
                        attrs={
                            'class':
                            'btn btn-default magnet-button stats-action banner-button'
                        },
                        ret='href'),
                    client.parseDOM(r, 'td', attrs={'class': 'size'}))

                for link in links:
                    url = link[0].replace('&amp;', '&')
                    url = re.sub(r'(&tr=.+)&dn=', '&dn=',
                                 url)  # some links on bitlord &tr= before &dn=
                    url = url.split('&tr=')[0]
                    if 'magnet' not in url:
                        continue

                    size = int(link[1])

                    if any(x in url.lower() for x in [
                            'french', 'italian', 'spanish', 'truefrench',
                            'dublado', 'dubbed'
                    ]):
                        continue

                    name = url.split('&dn=')[1]
                    t = name.split(hdlr)[0].replace(data['year'], '').replace(
                        '(', '').replace(')', '').replace('&', 'and')
                    if cleantitle.get(t) != cleantitle.get(title):
                        continue

                    if hdlr not in name:
                        continue

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        if size < 5.12: raise Exception()
                        size = float(size) / 1024
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    sources.append({
                        'source': 'torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

                return sources

            except:
                source_utils.scraper_error('BITLORD')
                return sources

        except:
            source_utils.scraper_error('BITLORD')
            return sources
Example #58
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user != '' and self.password != ''):  #raise Exception()

                login = urlparse.urljoin(self.base_link, '/login.html')

                post = urllib.urlencode({
                    'username': self.user,
                    'password': self.password,
                    'submit': 'Login'
                })

                cookie = client.request(login,
                                        post=post,
                                        output='cookie',
                                        close=False)

                r = client.request(login,
                                   post=post,
                                   cookie=cookie,
                                   output='extended')

                headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            else:
                headers = {}

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                query = urlparse.urljoin(
                    self.base_link, self.search_link %
                    urllib.quote_plus(cleantitle.getsearch(title)))
                query2 = urlparse.urljoin(
                    self.base_link,
                    self.search_link % re.sub('\s', '+', title))
                r = client.request(query)
                r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
                if len(r) == 0:
                    r = client.request(query2)
                    r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
                r = zip(client.parseDOM(r, 'a', ret='href'),
                        client.parseDOM(r, 'a', ret='title'),
                        client.parseDOM(r, 'a', ret='data-url'))

                if 'tvshowtitle' in data:
                    cltitle = cleantitle.get(title + 'season' + season)
                    cltitle2 = cleantitle.get(title +
                                              'season%02d' % int(season))
                else:
                    cltitle = cleantitle.get(title)

                r = [
                    i for i in r if cltitle == cleantitle.get(i[1])
                    or cltitle2 == cleantitle.get(i[1])
                ]
                id = [re.findall('/(\d+)$', i[2])[0] for i in r][0]

                ajx = urlparse.urljoin(self.base_link,
                                       '/ajax/movie_episodes/' + id)

                r = client.request(ajx)
                if 'episode' in data:
                    eids = re.findall(
                        r'title=\\"Episode\s+%02d.*?data-id=\\"(\d+)' %
                        int(episode), r)
                else:
                    eids = re.findall(r'title=.*?data-id=\\"(\d+)', r)

                for eid in eids:
                    try:
                        ajx = 'ajax/movie_token?eid=%s&mid=%s&_=%d' % (
                            eid, id, int(time.time() * 1000))
                        ajx = urlparse.urljoin(self.base_link, ajx)
                        r = client.request(ajx)
                        [x, y] = re.findall(r"_x='([^']+)',\s*_y='([^']+)'",
                                            r)[0]
                        ajx = 'ajax/movie_sources/%s?x=%s&y=%s' % (eid, x, y)
                        ajx = urlparse.urljoin(self.base_link, ajx)
                        r = client.request(ajx)
                        r = json.loads(r)
                        r = r['playlist'][0]['sources']
                        for i in r:
                            try:
                                label = source_utils.label_to_quality(
                                    i['label'])
                            except:
                                label = 'SD'
                            sources.append({
                                'source': 'cdn',
                                'quality': label,
                                'language': 'en',
                                'url': i['file'],
                                'direct': True,
                                'debridonly': False
                            })
                    except:
                        pass

            return sources
        except:
            return sources
Example #59
0
                                    isFolder=True)

    xbmcplugin.endOfDirectory(addon_handle)


if len(sys.argv[2]) == 0:
    # create the data folder if it doesn't exist
    data_path = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile'))
    if not os.path.exists(data_path):
        os.makedirs(data_path)
    if not os.path.exists(getAuthorizationFile()):
        authorize()

    mainMenu()
else:
    values = urlparse.parse_qs(sys.argv[2][1:])
    if 'video' in values and values['video'][0] == 'True':
        playShow(values)
    elif 'menu' in values:
        menu = values['menu'][0]
        if menu == LIVE_CHANNELS:
            liveChannelsMenu()
        elif menu == LIVE_PROGRAMS:
            liveProgramsMenu()
        elif menu == SHOWS:
            showsMenu(values)
    elif 'smil' in values:
        smil = values['smil'][0]
        labels = dict(urlparse.parse_qsl(values['labels'][0]))
        image = values['image'][0]
        playSmil(smil, labels, image)
Example #60
0
    def test_reauth(self):
        """Test re-authentication redirection sequence."""
        response = self.app.get(
            self.url('controls.auth'),
            extra_environ=self.default_environ,
            status=200,
        )
        assert 'selected value="disabled"' in response, 'Expected existing cert_auth value of "disabled" not found.'

        # Pretend to try to change our cert_auth options while our auth is too old
        environ = copy.deepcopy(self.default_environ)
        environ['tests.auth_trust'] = ['openid']
        response = self.app.post(
            self.url('controls.auth'),
            params=[('cert_auth', u'allowed')],
            extra_environ=environ,
            status=303,
        )
        # We should be redirected to the login/re-auth page
        try:
            return_key = parse_qs(urlparse(
                response.headers['location'])[4])['return_key'][0]
        except KeyError:
            raise AssertionError('Return key not in login GET request.')
        response = self.app.get(response.headers['location'],
                                extra_environ=environ)
        assert 'need to re-authenticate' in response, 'Did not appear to give a message about the need to re-authenticate.'
        assert 'name="openid_identifier"' in response, 'Did not appear to prompt for OpenID URL.'
        assert 'value="{0}"'.format(
            return_key
        ) in response, 'Could not find the return key hidden input in the login page.'

        # Spoof a successful OpenID login
        spoofer = self.spoofer
        spoofer.update(user=u'user', accept=True)
        user = model.session.query(model.User).filter_by(id=self.user.id).one()
        idurl = model.IdentityURL()
        idurl.url = spoofer.url
        user.identity_urls.append(idurl)
        model.session.flush()
        response = self.app.post(
            self.url('account.login_begin'),
            params=[
                ('return_key', return_key),
                ('openid_identifier', idurl.url),
            ],
            extra_environ=environ,
            status=303,
        )
        location = response.headers['location']
        try:
            return_url = parse_qs(urlparse(location)[4])['openid.return_to'][0]
        except ValueError:
            raise AssertionError('Return URL not in OpenID OP login request.')
        path, params = spoofer.spoof(location)
        assert path == self.url('account.login_finish'
                                ), 'Unexpected redirect path: {0}'.format(path)
        assert 'return_key={0}'.format(
            return_key
        ) in params, 'Return key did not appear in the OpenID redirect URL.'
        #TODO actually make the re-auth influence the trust status
        response = self.app.get(
            path,
            params=params,
            extra_environ=environ,
            status=303,
        )

        # We should now be redirected to the Authentication Options page,
        # with the contents of our original POST set as the
        # default/selected parameters (but the actual change should not yet
        # have taken place).

        # XXX prefer to avoid setting this explictly
        environ['tests.auth_trust'].append('openid_recent')
        pu = urlparse(response.headers['location'])
        path, params = pu[2], pu[4]
        assert path == self.url(
            'controls.auth'), 'Unexpected redirect path: {0}'.format(path)
        assert 'return_key={0}'.format(
            return_key
        ) in params, 'Return key did not appear in the post-re-auth redirect URL.'

        # Submission should have been turned into a POST, so we should get
        # another redirect
        response = self.app.get(
            path,
            params=params,
            extra_environ=environ,
            status=303,
        )
        pu = urlparse(response.headers['location'])
        path, params = pu[2], pu[4]
        assert path == self.url(
            'controls.auth'), 'Unexpected redirect path: {0}'.format(path)

        response = self.app.get(
            path,
            params=params,
            extra_environ=environ,
            status=200,
        )
        cert_auth = model.session.query(
            model.User).filter_by(id=self.user.id).one().cert_auth
        assert cert_auth != u'disabled', 'Failed to automatically submit a form after an OpenID re-auth detour.'