Пример #1
0
def parse(url, body, **kwargs):
    base, qstr = url.split('?')
    qstr = dict(urlparse.parse_qsl(qstr))
    obj = json.loads(body)['query']['results']['json']
    dom = html.fromstring(obj['items_html'])
    items = []

    for e in dom.xpath('//li[@data-item-type="tweet"]'):
        url = 'https://twitter.com' + e.xpath(
            './/a[contains(@class, "detail")]/@href')[0]
        date = datetime.utcfromtimestamp(
            int(e.xpath('.//small[@class="time"]//span/@data-time')[0]))
        content = e.xpath(
            './/p[contains(@class, "tweet-text")]')[0].text_content()
        author = e.xpath('.//span[contains(@class, "username")]/b/text()')[0]
        items.append({
            'url': url,
            'date': date,
            'author': author,
            'content': content
        })

    if obj['has_more_items']:
        cursor = obj['scroll_cursor']
        qstr['q'] = parse_yql(qstr['q'], cursor)
        next_page = base + '?' + urlencode(qstr)
    else:
        next_page = None

    return tpl.render(items=items, next_page=next_page)
Пример #2
0
def parse(url, body, **kwargs):
    base, qstr = url.split('?')
    qstr = dict(urlparse.parse_qsl(qstr))
    obj = json.loads(body)['query']['results']['json']
    dom = html.fromstring(obj['items_html'])
    items = []

    for e in dom.xpath('//li[@data-item-type="tweet"]'):
        url = 'https://twitter.com'+e.xpath('.//a[contains(@class, "detail")]/@href')[0]
        date = datetime.utcfromtimestamp(int(e.xpath('.//small[@class="time"]//span/@data-time')[0]))
        content = e.xpath('.//p[contains(@class, "tweet-text")]')[0].text_content()
        author = e.xpath('.//span[contains(@class, "username")]/b/text()')[0]
        items.append({
                        'url':url,
                        'date':date,
                        'author':author,
                        'content':content
                    })

    if obj['has_more_items']:
        cursor = obj['scroll_cursor']
        qstr['q'] = parse_yql(qstr['q'], cursor)
        next_page = base+'?'+urlencode(qstr)
    else:
        next_page = None

    return tpl.render(items=items, next_page=next_page)
Пример #3
0
 def _parse_argv(self):
     try:
         path = sys.argv[2]
         self.params = dict(urlparse.parse_qsl(path[1:]))
         self.widget_handle = int(sys.argv[1])
     except Exception:
         self.params = {}
Пример #4
0
def set_language_ex(request):
    next = request.POST.get('next', request.GET.get('next'))
    if not is_safe_url(url=next, host=request.get_host()):
        next = request.META.get('HTTP_REFERER')
        if not is_safe_url(url=next, host=request.get_host()):
            next = '/'

    # remove lang from query
    scheme, netloc, path, query, fragment = urlparse.urlsplit(next)
    parsed_query = urlparse.parse_qsl(query)
    altered = False
    for k, v in parsed_query[:]:
        if LANG_GET_KEY == k:
            parsed_query.remove((k, v))
            altered = True
    if altered:
        query = urllib.urlencode(parsed_query)
        next = urlparse.urlunsplit((scheme, netloc, path, query, fragment))

    response = http.HttpResponseRedirect(next)
    if request.method == 'POST':
        lang_code = request.POST.get('language', None)
        if lang_code and check_for_language(lang_code):
            if hasattr(request, 'session'):
                request.session[LANGUAGE_SESSION_KEY] = lang_code
            else:
                response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
                                    max_age=settings.LANGUAGE_COOKIE_AGE,
                                    path=settings.LANGUAGE_COOKIE_PATH,
                                    domain=settings.LANGUAGE_COOKIE_DOMAIN)
    return response
Пример #5
0
    def tonzbget(self, args, hname):

        if ('data' not in args):
            return 0

        if ('nzbget_url' in self.cgen):
            if (len(self.cgen['nzbget_url'])):
                rq_url = 'http://' + self.cgen['nzbget_user'] + ':' + self.cgen['nzbget_pwd'] + '@' + self.cgen[
                    'nzbget_url'] + '/xmlrpc'
                print rq_url
                try:
                    server = ServerProxy(rq_url)
                except Exception as e:
                    print 'Error contacting NZBGET ' + str(e)
                    return 0

                try:
                    myrq = args['data'].replace("warp?", "")
                    pulrlparse = dict(urlparse.parse_qsl(myrq))
                    if ('m' in args):
                        pulrlparse['m'] = args['m']

                    # ~ print pulrlparse
                    res = self.wrp.beam(pulrlparse)
                    # ~ print res.headers

                    if ('Location' in res.headers):
                        # ~ for redirect
                        log.info('tonzbget: Warp is treated as 302 redirector')
                        geturl_rq = res.headers['Location']
                        r = requests.get(geturl_rq, verify=False)
                        nzbname = 'nzbfromNZBmegasearcH'
                        if ('content-disposition' in r.headers):
                            rheaders = r.headers['content-disposition']
                            idxsfind = rheaders.find('=')
                            if (idxsfind != -1):
                                nzbname = rheaders[idxsfind + 1:len(rheaders)].replace('"', '')
                        nzbcontent64 = standard_b64encode(r.content)
                        server.append(nzbname, '', False, nzbcontent64)
                    else:
                        # ~ for downloaded
                        log.info('tonzbget: Warp gets full content')
                        nzbname = 'nzbfromNZBmegasearcH'
                        if ('content-disposition' in res.headers):
                            rheaders = res.headers['content-disposition']
                            idxsfind = rheaders.find('=')
                            if (idxsfind != -1):
                                nzbname = rheaders[idxsfind + 1:len(rheaders)].replace('"', '')
                        # ~ print res.data
                        nzbcontent64 = standard_b64encode(res.data)
                        server.append(nzbname, '', False, nzbcontent64)


                except Exception as e:
                    # ~ print 'Error connecting server or downloading nzb '+str(e)
                    log.info('Error connecting server or downloading nzb: ' + str(e))
                    return 0

                return 1
Пример #6
0
    def make_requests_from_url(self, url):

        kw = self.macro.query(url)
        us = urlparse.urlsplit(url)
        qstr = dict(urlparse.parse_qsl(us.query))
        base = urlparse.urlunsplit(us._replace(query=''))
        meta = {'keyword':kw}
        return FormRequest(base, formdata=qstr, method=self.start_method, headers=self.headers, cookies=self.cookies, dont_filter=True, meta=meta)
Пример #7
0
def generate_urls(obj, macro):
    try:
        if type(obj)==list:
            for url in obj:
                yield macro.expand(url)

        elif type(obj)==dict:
            base = macro.expand(obj['base'].encode('utf-8'))
            us = urlparse.urlsplit(base)
            qstr = dict(urlparse.parse_qsl(us.query))
            qstr.update(obj.get('qstr', {}))
            base = urlparse.urlunsplit(us._replace(query=''))

            for k,v in qstr.iteritems():
                if type(v)==dict and type(v['val'])==unicode:
                    v = v['val'].encode(v.get('enc', 'utf-8'), errors='ignore')
                qstr[k] = macro.expand(v)

            if 'keywords' in obj:
                kw_obj = obj['keywords']

                sub = kw_obj.get('sub')
                if sub:
                    frm = sub.get('from')
                    to = sub.get('to')
                    sub = functools.partial(re.sub, frm, to)
                else:
                    sub = lambda x:x

                for kw in load_keywords(kw_obj):

                    if kw==MAGIC:
                        yield 'http://0.0.0.0'
                        continue

                    key = kw_obj['name'].encode('utf-8')
                    val = kw
                    col = kw_obj.get('col', 0)
                    sep = kw_obj.get('sep')
                    if col>0:
                        val = val.split(sep)[col-1]
                    val = sub(val)
                    if kw_obj.get('query', True):
                        qstr.update({key:val})
                        url = base+'?'+urlencode(qstr)
                    else:
                        val = val.encode(kw_obj.get('enc', 'utf-8'), errors='ignore') if type(val)==unicode else str(val)
                        url = base.replace(key, val)+'?'+urlencode(qstr)
                    macro.update({'sep':sep})
                    macro.bind(url, kw)
                    yield url
            else:
                url = base+'?'+urlencode(qstr)
                yield url

    except Exception as ex:
        log.msg(u'cannot generate urls: {}'.format(ex), level=log.ERROR)
        raise CloseSpider()
Пример #8
0
 def serve(self, f, environ, start_response, download=False, **kwargs):
     pkg = f[1]
     table = f[2]
     field = f[3]
     pkey = f[4]
     url = self.site.db.table('%s.%s' %(pkg,table)).readColumns(columns='$%s' %field,pkey=pkey)
     p = urlparse.urlparse(url)
     urlkwargs = dict(urlparse.parse_qsl(p.query))
     kwargs.update(urlkwargs)
     print 'URL:',url, '\nRESULT:',p.path.split('/')[1:], '\nkwargs', kwargs
     return super(VolumesStaticHandler, self).serve(p.path.split('/')[1:],environ,start_response,download=download,**kwargs)
Пример #9
0
def parse_yql(yql, cursor):
    url = re.search(r"""https://[^'"]*""", yql).group(0)
    base, qstr = url.split('?')
    qstr = dict(urlparse.parse_qsl(qstr))
    qstr.update({
        'scroll_cursor': cursor,
        'src': 'typd',
        'mode': 'relevance',
        'include_available_features': 1,
        'include_entities': 1,
        'page': int(qstr.get('page', 1)) + 1
    })
    url = base + '?' + urlencode(qstr)
    yql = re.sub(r"""https://[^'"]*""", url, yql)
    return yql
Пример #10
0
    def testLinkGen(self):
        tmpl = Template("{% load amazon_fps from amazon_fps_tags %}{% amazon_fps obj %}")
        html = tmpl.render(Context({"obj": self.fps}))
        # get the integration link url
        dom = minidom.parseString(html)
        url = dom.getElementsByTagName("a")[0].attributes["href"].value
        parsed = urlparse.urlparse(url)
        query_dict = dict(urlparse.parse_qsl(parsed.query))

        self.assertEquals(parsed.scheme, "https")
        self.assertEquals(parsed.netloc, "authorize.payments-sandbox.amazon.com")
        self.assertEquals(parsed.path, "/cobranded-ui/actions/start")

        self.assertDictContainsSubset(self.fields, query_dict)
        self.assertEquals(query_dict["callerKey"], settings.MERCHANT_SETTINGS["amazon_fps"]["AWS_ACCESS_KEY"])
Пример #11
0
def makeRequest(context, request):
    method = request.params.get("httpMethod")
    url = request.params.get("url")
    headers_qs = request.params.get("headers", "")
    headers = dict(urlparse.parse_qsl(headers_qs))
    data = request.params.get("postData")

    requester = getattr(requests, method.lower())
    resp = requester(url, headers=headers, data=data)

    response = Response(resp.text)
    response.status_int = resp.status_code
    response.headers.update(resp.headers)
    print "finished"
    return response
Пример #12
0
def makeRequest(context, request):
    method = request.params.get('httpMethod')
    url = request.params.get('url')
    headers_qs = request.params.get('headers', '')
    headers = dict(urlparse.parse_qsl(headers_qs))
    data = request.params.get('postData')

    requester = getattr(requests, method.lower())
    resp = requester(url, headers=headers, data=data)

    response = Response(resp.text)
    response.status_int = resp.status_code
    response.headers.update(resp.headers)
    print 'finished'
    return response
Пример #13
0
def parse_yql(yql, cursor):
    url = re.search(r"""https://[^'"]*""", yql).group(0)
    base, qstr = url.split('?')
    qstr = dict(urlparse.parse_qsl(qstr))
    qstr.update({
            'scroll_cursor': cursor,
            'src': 'typd',
            'mode': 'relevance',
            'include_available_features': 1,
            'include_entities':1,
            'page':int(qstr.get('page', 1))+1
        })
    url = base+'?'+urlencode(qstr)
    yql = re.sub(r"""https://[^'"]*""", url, yql)
    return yql
Пример #14
0
    def testLinkGen(self):
        tmpl = Template("{% load render_integration from billing_tags %}{% render_integration obj %}")
        html = tmpl.render(Context({"obj": self.fps}))
        # get the integration link url
        dom = minidom.parseString(html)
        url = dom.getElementsByTagName('a')[0].attributes['href'].value
        parsed = urlparse.urlparse(url)
        query_dict = dict(urlparse.parse_qsl(parsed.query))

        self.assertEquals(parsed.scheme, 'https')
        self.assertEquals(parsed.netloc, 'authorize.payments-sandbox.amazon.com')
        self.assertEquals(parsed.path, '/cobranded-ui/actions/start')

        self.assertDictContainsSubset(self.fields, query_dict)
        self.assertEquals(query_dict['callerKey'], settings.MERCHANT_SETTINGS['amazon_fps']['AWS_ACCESS_KEY'])
Пример #15
0
    def _parse_argv(self):
        base_url = sys.argv[0]
        path = sys.argv[2]

        try:
            args = path[1:]
            self.params = dict(urlparse.parse_qsl(args))
            ''' workaround to get the correct values for titles with special characters
            '''
            if ('title=\'\"' and '\"\'') in args:
                start_pos = args.find('title=\'\"')
                end_pos = args.find('\"\'')
                clean_title = args[start_pos + 8:end_pos]
                self.params['title'] = clean_title

        except Exception:
            self.params = {}
Пример #16
0
 def serve(self, f, environ, start_response, download=False, **kwargs):
     pkg = f[1]
     table = f[2]
     field = f[3]
     pkey = f[4]
     url = self.site.db.table('%s.%s' % (pkg, table)).readColumns(
         columns='$%s' % field, pkey=pkey)
     p = urlparse.urlparse(url)
     urlkwargs = dict(urlparse.parse_qsl(p.query))
     kwargs.update(urlkwargs)
     print 'URL:', url, '\nRESULT:', p.path.split(
         '/')[1:], '\nkwargs', kwargs
     return super(VolumesStaticHandler, self).serve(p.path.split('/')[1:],
                                                    environ,
                                                    start_response,
                                                    download=download,
                                                    **kwargs)
Пример #17
0
    def testLinkGen(self):
        tmpl = Template(
            "{% load render_integration from billing_tags %}{% render_integration obj %}"
        )
        html = tmpl.render(Context({"obj": self.fps}))
        # get the integration link url
        dom = minidom.parseString(html)
        url = dom.getElementsByTagName('a')[0].attributes['href'].value
        parsed = urlparse.urlparse(url)
        query_dict = dict(urlparse.parse_qsl(parsed.query))

        self.assertEquals(parsed.scheme, 'https')
        self.assertEquals(parsed.netloc,
                          'authorize.payments-sandbox.amazon.com')
        self.assertEquals(parsed.path, '/cobranded-ui/actions/start')

        self.assertDictContainsSubset(self.fields, query_dict)
        self.assertEquals(
            query_dict['callerKey'],
            settings.MERCHANT_SETTINGS['amazon_fps']['AWS_ACCESS_KEY'])
Пример #18
0
def querystring_parse(querystring="", url="", parse_url=False):
    """解析查询字符串成tuple列表

    :arg querystring: a querystring
    """
    from urllib2 import urlparse
    if parse_url:
        querystring = urlparse.urlparse(url).query

    queryparams = urlparse.parse_qsl(querystring)
    for i, (k, v) in enumerate(queryparams):
        if k == 'msp':
            queryparams.extend(querystring_parse(v))
        elif k == 'mscn':
            try:
                v1 = v.decode('utf-8')
                queryparams.append((k, v1))
            except Exception, e:
                queryparams.append((k, u'unknown'))
                print(v, str(e))
            finally:
Пример #19
0
def generate_urls(obj, macro):
    try:
        if type(obj) == list:
            for url in obj:
                yield macro.expand(url)

        elif type(obj) == dict:
            base = macro.expand(obj['base'].encode('utf-8'))
            us = urlparse.urlsplit(base)
            qstr = dict(urlparse.parse_qsl(us.query))
            qstr.update(obj.get('qstr', {}))
            base = urlparse.urlunsplit(us._replace(query=''))

            for k, v in qstr.iteritems():
                if type(v) == dict and type(v['val']) == unicode:
                    v = v['val'].encode(v.get('enc', 'utf-8'), errors='ignore')
                qstr[k] = macro.expand(v)

            if 'keywords' in obj:
                kw_obj = obj['keywords']
                for kw in load_keywords(kw_obj):
                    key = kw_obj['name'].encode('utf-8')
                    val = kw.encode(
                        kw_obj.get('enc', 'utf-8'),
                        errors='ignore') if type(kw) == unicode else str(kw)
                    if kw_obj.get('query', True):
                        qstr.update({key: val})
                        url = base + '?' + urlencode(qstr)
                    else:
                        url = base.replace(key, val) + '?' + urlencode(qstr)
                    yield url
            else:
                url = base + '?' + urlencode(qstr)
                yield url

    except Exception as ex:
        log.msg(u'cannot generate urls: {}'.format(ex), level=log.ERROR)
        raise CloseSpider()
Пример #20
0
def generate_urls(obj, macro):
    try:
        if type(obj)==list:
            for url in obj:
                yield macro.expand(url)

        elif type(obj)==dict:
            base = macro.expand(obj['base'].encode('utf-8'))
            us = urlparse.urlsplit(base)
            qstr = dict(urlparse.parse_qsl(us.query))
            qstr.update(obj.get('qstr', {}))
            base = urlparse.urlunsplit(us._replace(query=''))

            for k,v in qstr.iteritems():
                if type(v)==dict and type(v['val'])==unicode:
                    v = v['val'].encode(v.get('enc', 'utf-8'), errors='ignore')
                qstr[k] = macro.expand(v)

            if 'keywords' in obj:
                kw_obj = obj['keywords']
                for kw in load_keywords(kw_obj):
                    key = kw_obj['name'].encode('utf-8')
                    val = kw.encode(kw_obj.get('enc', 'utf-8'), errors='ignore') if type(kw)==unicode else str(kw)
                    if kw_obj.get('query', True):
                        qstr.update({key:val})
                        url = base+'?'+urlencode(qstr)
                    else:
                        url = base.replace(key, val)+'?'+urlencode(qstr)
                    yield url
            else:
                url = base+'?'+urlencode(qstr)
                yield url

    except Exception as ex:
        log.msg(u'cannot generate urls: {}'.format(ex), level=log.ERROR)
        raise CloseSpider()
Пример #21
0
 def _parse_argv(self):
     try:
         args = sys.argv[1]
         self.params = dict(urlparse.parse_qsl(args))
     except Exception:
         self.params = {}
Пример #22
0
def qs_to_dict(qs):
  return dict(urlparse.parse_qsl(qs))
Пример #23
0
                    set_label(dns_server["ip"] + " [" +
                              dns_server["country_name"] + ", " +
                              dns_server["asn"] + "]")

                else:
                    set_label(dns_server["ip"] + " [" +
                              dns_server["country_name"] + "]")

            else:
                set_label(str(dns_server["ip"]))


if __name__ == "__main__":

    paramstring = sys.argv[2][1:]
    params = dict(urlparse.parse_qsl(paramstring))

    #################################
    #           1st Start           #
    #################################
    if params == {}:
        xbmcplugin.setPluginCategory(_handle, "category")
        xbmcplugin.setContent(_handle, "videos")

        leak_id = randint(1000000, 9999999)
        for x in range(0, 10):
            ping(".".join([str(x), str(leak_id), "bash.ws"]))

        url = "https://bash.ws/dnsleak/test/" + str(leak_id) + "?json"
        response = requests.get(url)
        parsed_data = json.loads(response.content)
Пример #24
0
	def download_hook(self, downloadurl, urlidx):
		resinfo = {}
		resinfo['headers']=[]
		resinfo['content']=[]
		resinfo['url']=''
		resinfo['id'] = urlidx
		
		#~ try cache hit
		ret = self.checkcache(downloadurl)
		if( ret != -1):
			resinfo = {}
			with open(self.collect_info[ret]['fname'], 'rt') as fp:
				resinfo['content'] = fp.read()
			resinfo['url'] = self.collect_info[ret]['url']
			resinfo['headers'] = self.collect_info[ret]['headers']
			#~ overwrites the old id with the new one
			resinfo['id'] = urlidx
			print 'Nzb analyzer: cache hit'
			self.nzbdata.append(resinfo)
			return resinfo
		#~ download it
		try:
			if(len(downloadurl)):
				resinfo['url']=downloadurl
				myrq = downloadurl.replace("warp?", "")
				pulrlparse = dict(urlparse.parse_qsl(myrq))		
				
				#~ print pulrlparse
				#~ create context for threaded download

				
				with self.app.test_request_context():
					res = self.wrp_localcpy[urlidx].beam(pulrlparse)	
					
				if( (res is None) or (hasattr(res, 'headers') == False) ):
					return resinfo	

				if('Location'   in res.headers):
					#~ for redirect
					geturl_rq = res.headers['Location']
					r = requests.get(geturl_rq, verify=False)
					nzbname = 'nzbfromNZBmegasearcH'
					if('content-disposition' in r.headers):
						rheaders = r.headers['content-disposition']
						resinfo['headers'] =  r.headers['content-disposition']
						idxsfind = rheaders.find('=')
						if(idxsfind != -1):
							nzbname = rheaders[idxsfind+1:len(rheaders)].replace('"','')
				
					resinfo['content'] =  r.content.encode('utf-8')

					self.nzbdata.append(resinfo)
					
					#~ saves it for caching
					#~ saves it for caching in case that filesize < 20M
					if(len(resinfo['content']) < self.MAX_ALLOWED_CACHE):
						self.checkcache_makespace()
						f = tempfile.NamedTemporaryFile(delete=False)					
						cached_info = {}
						cached_info['url']=resinfo['url']
						cached_info['headers']=resinfo['headers']
						cached_info['fname']=f.name
						f.write(resinfo['content'])
						f.close()
						self.collect_info.append(cached_info)
					return resinfo
							
			
				else:
					nzbname = 'nzbfromNZBmegasearcH'
					if('content-disposition' in res.headers):
						rheaders = res.headers['content-disposition']
						resinfo['headers'] =  rheaders
						idxsfind = rheaders.find('=')
						if(idxsfind != -1):
							nzbname = rheaders[idxsfind+1:len(rheaders)].replace('"','')

					resinfo['content'] = res.data.encode('utf-8')
					self.nzbdata.append(resinfo)

					#~ saves it for caching in case that filesize < 20M
					if(len(resinfo['content']) < self.MAX_ALLOWED_CACHE):
						self.checkcache_makespace()
						f = tempfile.NamedTemporaryFile(delete=False)					
						cached_info = {}					
						cached_info['url']=resinfo['url']
						cached_info['headers']=resinfo['headers']
						cached_info['fname']=f.name
						f.write(resinfo['content'])
						f.close()
						self.collect_info.append(cached_info)	

					return resinfo

																		
		except Exception as e:
			log.info('Error downloading nzb: '+str(e))
			return resinfo

		return resinfo
Пример #25
0
    def download_hook(self, downloadurl, urlidx):
        resinfo = {}
        resinfo['headers'] = []
        resinfo['content'] = []
        resinfo['url'] = ''
        resinfo['id'] = urlidx

        #~ try cache hit
        ret = self.checkcache(downloadurl)
        if (ret != -1):
            resinfo = {}
            with open(self.collect_info[ret]['fname'], 'rt') as fp:
                resinfo['content'] = fp.read()
            resinfo['url'] = self.collect_info[ret]['url']
            resinfo['headers'] = self.collect_info[ret]['headers']
            #~ overwrites the old id with the new one
            resinfo['id'] = urlidx
            print 'Nzb analyzer: cache hit'
            self.nzbdata.append(resinfo)
            return resinfo
        #~ download it
        try:
            if (len(downloadurl)):
                resinfo['url'] = downloadurl
                myrq = downloadurl.replace("warp?", "")
                pulrlparse = dict(urlparse.parse_qsl(myrq))

                #~ print pulrlparse
                #~ create context for threaded download

                with self.app.test_request_context():
                    res = self.wrp_localcpy[urlidx].beam(pulrlparse)

                if ((res is None) or (hasattr(res, 'headers') == False)):
                    return resinfo

                if ('Location' in res.headers):
                    #~ for redirect
                    geturl_rq = res.headers['Location']
                    r = requests.get(geturl_rq, verify=False)
                    nzbname = 'nzbfromNZBmegasearcH'
                    if ('content-disposition' in r.headers):
                        rheaders = r.headers['content-disposition']
                        resinfo['headers'] = r.headers['content-disposition']
                        idxsfind = rheaders.find('=')
                        if (idxsfind != -1):
                            nzbname = rheaders[idxsfind +
                                               1:len(rheaders)].replace(
                                                   '"', '')

                    resinfo['content'] = r.content.encode('utf-8')

                    self.nzbdata.append(resinfo)

                    #~ saves it for caching
                    #~ saves it for caching in case that filesize < 20M
                    if (len(resinfo['content']) < self.MAX_ALLOWED_CACHE):
                        self.checkcache_makespace()
                        f = tempfile.NamedTemporaryFile(delete=False)
                        cached_info = {}
                        cached_info['url'] = resinfo['url']
                        cached_info['headers'] = resinfo['headers']
                        cached_info['fname'] = f.name
                        f.write(resinfo['content'])
                        f.close()
                        self.collect_info.append(cached_info)
                    return resinfo

                else:
                    nzbname = 'nzbfromNZBmegasearcH'
                    if ('content-disposition' in res.headers):
                        rheaders = res.headers['content-disposition']
                        resinfo['headers'] = rheaders
                        idxsfind = rheaders.find('=')
                        if (idxsfind != -1):
                            nzbname = rheaders[idxsfind +
                                               1:len(rheaders)].replace(
                                                   '"', '')

                    resinfo['content'] = res.data.encode('utf-8')
                    self.nzbdata.append(resinfo)

                    #~ saves it for caching in case that filesize < 20M
                    if (len(resinfo['content']) < self.MAX_ALLOWED_CACHE):
                        self.checkcache_makespace()
                        f = tempfile.NamedTemporaryFile(delete=False)
                        cached_info = {}
                        cached_info['url'] = resinfo['url']
                        cached_info['headers'] = resinfo['headers']
                        cached_info['fname'] = f.name
                        f.write(resinfo['content'])
                        f.close()
                        self.collect_info.append(cached_info)

                    return resinfo

        except Exception as e:
            log.info('Error downloading nzb: ' + str(e))
            return resinfo

        return resinfo
Пример #26
0
    def tonzbget(self, args, hname):

        if ('data' not in args):
            return 0

        if ('nzbget_url' in self.cgen):
            if (len(self.cgen['nzbget_url'])):
                rq_url = 'http://' + self.cgen['nzbget_user'] + ':' + self.cgen[
                    'nzbget_pwd'] + '@' + self.cgen['nzbget_url'] + '/xmlrpc'
                print rq_url
                try:
                    server = ServerProxy(rq_url)
                except Exception as e:
                    print 'Error contacting NZBGET ' + str(e)
                    return 0

                try:
                    myrq = args['data'].replace("warp?", "")
                    pulrlparse = dict(urlparse.parse_qsl(myrq))
                    if ('m' in args):
                        pulrlparse['m'] = args['m']

                    #~ print pulrlparse
                    res = self.wrp.beam(pulrlparse)
                    #~ print res.headers

                    if ('Location' in res.headers):
                        #~ for redirect
                        log.info('tonzbget: Warp is treated as 302 redirector')
                        geturl_rq = res.headers['Location']
                        r = requests.get(geturl_rq, verify=False)
                        nzbname = 'nzbfromNZBmegasearcH'
                        if ('content-disposition' in r.headers):
                            rheaders = r.headers['content-disposition']
                            idxsfind = rheaders.find('=')
                            if (idxsfind != -1):
                                nzbname = rheaders[idxsfind +
                                                   1:len(rheaders)].replace(
                                                       '"', '')
                        nzbcontent64 = standard_b64encode(r.content)
                        server.append(nzbname, '', False, nzbcontent64)
                    else:
                        #~ for downloaded
                        log.info('tonzbget: Warp gets full content')
                        nzbname = 'nzbfromNZBmegasearcH'
                        if ('content-disposition' in res.headers):
                            rheaders = res.headers['content-disposition']
                            idxsfind = rheaders.find('=')
                            if (idxsfind != -1):
                                nzbname = rheaders[idxsfind +
                                                   1:len(rheaders)].replace(
                                                       '"', '')
                        #~ print res.data
                        nzbcontent64 = standard_b64encode(res.data)
                        server.append(nzbname, '', False, nzbcontent64)

                except Exception as e:
                    #~ print 'Error connecting server or downloading nzb '+str(e)
                    log.info('Error connecting server or downloading nzb: ' +
                             str(e))
                    return 0

                return 1
Пример #27
0
from urllib2 import urlparse
import webbrowser

consumer = oauth.Consumer(CONSUMER_KEY, CONSUMER_SECRET)
client = oauth.Client(consumer)
client.ca_certs = certifi.where()

# Step 1: Get a request token. This is a temporary token that is used for 
# having the user authorize an access token and to sign the request to obtain 
# said access token.

resp, content = client.request(REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
    raise Exception("Invalid response %s." % resp['status'])

request_token = dict(urlparse.parse_qsl(content))

# these are intermediate tokens and not needed later
#print "Request Token:"
#print "    - oauth_token        = %s" % request_token['oauth_token']
#print "    - oauth_token_secret = %s" % request_token['oauth_token_secret']
#print 

# Step 2: Redirect to the provider. Since this is a CLI script we do not 
# redirect. In a web application you would redirect the user to the URL
# below, specifying the additional parameter oauth_callback=<your callback URL>.

webbrowser.open("%s?oauth_token=%s" % (
        AUTHORIZE_URL, request_token['oauth_token']))

# Since we didn't specify a callback, the user must now enter the PIN displayed in