def get_lat_lon_from_short_uri(suri): headers = {} #{'Content-Type' : 'application/x-www-form-urlencoded'} url = "https://www.googleapis.com/urlshortener/v1/url?" geourl = "https://maps.googleapis.com/maps/api/geocode/json?" params = { 'shortUrl' : suri, 'key' : 'AIzaSyCPzFQxhmB_eauqMHIzmU7BkLTOPb89bqU' } json_response = requests.get(url, params=params, headers=headers) lurl = json_response.json().get('longUrl', "Error") if lurl.startswith('https://maps.app.goo.gl/?link='): lurl = lurl[len('https://maps.app.goo.gl/?link='):] uqurl = urllib.unquote(lurl) # .decode('utf8') if uqurl.startswith('https://www.google.com/maps/place/'): urlquery = urllib.splitquery(uqurl)[0] if urlquery.find('/data'): data = urlquery.split('/data')[1].split('!') lat, lon = data[-2][2:], data[-1][2:].split('?')[0] if lat != '1': return lat, lon elif uqurl.startswith('https://maps.google.com/?q='): urlquery = urllib.splitquery(uqurl) urlparams = urlparse.parse_qs(urlquery[1])['q'][0] params['address'] = urlparams del params['shortUrl'] json_result = requests.get(geourl, params=params, headers=headers).json() location = json_result["results"][0]["geometry"]["location"] return location["lat"], location["lng"] print "lurl needs more analysis", lurl return 'FIX_LAT', 'FIX_LON'
def __connect(self, verb, url, payload={}): headers = { 'Date': self.__getRFC822Date(), 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', } splitUrl = urllib.splitquery(url) path = splitUrl[0] if splitUrl[1] is not None: query = splitUrl[1] fullUrl = '%s/%s.%s?%s' % (self.base, path, self.ext, query) else: query = '' fullUrl = '%s/%s.%s' % (self.base, path, self.ext) verb = verb.upper() http = httplib2.Http() # Building the Authentication md5 = hashlib.md5() md5.update(self.secretKey) secret = md5.hexdigest() fpath = urllib.splitquery('/' + '/'.join(fullUrl.split('/')[3:]))[0] payload = urllib.urlencode(payload) signMe = '%s\n%s\n%s\n%s\n%s\n%s\n' % (verb, headers['Date'], fpath, query, payload, secret ) md5 = hashlib.md5() md5.update(signMe) headers['Cerb-Auth'] = '%s:%s' % (self.accessKey, md5.hexdigest()) # Now we perform the request if verb == 'PUT' or verb == 'POST': headers['Content-Length'] = str(len(str(payload))) response, data = http.request(fullUrl, verb, headers=headers, body=payload) else: response, data = http.request(fullUrl, verb, headers=headers) if DEBUG: print '--- REQUEST ---' print verb, fullUrl print 'HEADERS:' for header in headers: print '%20s : %s' % (header, headers[header]) print 'PAYLOAD:\n%s' % payload print '\n--- RESPONSE ---' print 'HEADERS:' for header in response: print '%20s : %s' % (header, response[header]) print 'PAYLOAD:' print data return json.loads(data)
def testQueryStringCreation(self): url = TEST_URL page = MapApplicationPage(self.browser, url) page.checkTitle(PAGE_TITLE) time.sleep(1) sharingUrl = page.getSharingUrl() qs = urlparse.parse_qs(urllib.splitquery(sharingUrl)[1]) for field in ['showNoImporta', 'showWhatever', 'showArbitrary', 'cartogram', 'showCities']: if field in qs: self.assertTrue(qs[field][0] == 't') for field in ['noImportaIndex', 'whateverIndex', 'arbitraryIndex']: if field in qs: self.assertTrue(qs[field][0] == '0') self.assertTrue(qs['markerLng'][0] == '-100', 'has marker lng') self.assertTrue(qs['markerLat'][0] == '40', 'has marker lat') self.assertTrue(qs['lat'][0] == '38', 'has lat') self.assertTrue(qs['lng'][0] == '-95', 'has lng') self.assertTrue(qs['zoom'][0] == '4', 'has zoom') page.showAsCartogram(False) time.sleep(1) self.queryStringCreationHelper(page, 'cartogram', 'f') page.showLayerset('noImporta', False) self.queryStringCreationHelper(page, 'showNoImporta', 'f'); page.showLayerset('whatever', False) sharingUrl = page.getSharingUrl() qs = urlparse.parse_qs(urllib.splitquery(sharingUrl)[1]) self.assertTrue(qs['showWhatever'][0] == 'f', 'showWhatever=f not shown') page.showCities(False) time.sleep(1) self.queryStringCreationHelper(page, 'showCities', 'f') page.changeLayerToIndex('whatever', 1) self.queryStringCreationHelper(page, 'whateverIndex', '1') page.changeLayerToIndex('arbitrary', 1) self.queryStringCreationHelper(page, 'arbitraryIndex', '1') page.changeLayerToIndex('noImporta', 1) self.queryStringCreationHelper(page, 'noImportaIndex', '1') page.zoomIn() time.sleep(1) # zoom takes a minute to settle self.queryStringCreationHelper(page, 'zoom', '5') # TODO not quite sure how to change center or marker location page.tearDown()
def init(self,title,appkey): self.template_values = { 'title': "Top Performing Fund", 'appkey': "8a53f225-ab16-44b1-a239-aefaab198247", 'body': "Invalid Session", } self.template_values['title'] = title self.template_values['appkey'] = appkey self.template_values['host']=urllib.splitquery(self.request.url)[0] self.first_time = self.request.get("txtweb-message") self.txtweb_mobile = self.request.get("txtweb-mobile") self.txtweb_message = self.request.get("txtweb-message") self.txtweb_password = self.request.get("txtweb-password") if self.first_time: self.save(("query", urllib.splitquery(self.request.url)[-1]))
def __init__(self, soup, parent=False): """ Parse the form attributes and fields from the soup. Make sure to get the action right. When parent is set, then the parent element is used as anchor for the search for form elements. """ self._extra_args = {} self.soup = soup # Make sure to use base strings, not unicode for attr, value in soup.attrMap.iteritems(): setattr(self, str(attr), str(value)) # Set right anchor point for harvest if parent: self.soup = soup.parent # Harvest input elements. self._args = {} for item in self.soup.findAll("input"): # Make sure to initialize to '' to avoid None strings to appear # during submit self._args[str(item.get("name"))] = item.get("value") or "" # Harvest url self.scheme, self.host, self.action = urlsplit(self.action) self.action, args = urllib.splitquery(self.action) if args: args = args.split("&") for arg in args: attr, value = urllib.splitvalue(arg) self._extra_args[str(attr)] = value or ""
def refine_url(url, arg_drop_list = []): """Returns a refined url with all the arguments mentioned in arg_drop_list dropped.""" if len(arg_drop_list) == 0: return url query = urlparse.urlsplit(url)[3] new_query = '&'.join(['='.join(j) for j in filter(lambda x: x[0] not in arg_drop_list, [i.split('=') for i in query.split('&')])]) return (urllib.splitquery(url)[0] + '?' + new_query.rstrip('&')).rstrip('?')
def do_GET(self): """Serve a GET request.""" # print "....................", threading.currentThread().getName() mpath, margs = urllib.splitquery(self.path) print(mpath, margs) print type(mpath) if mpath.find('correct') and margs != None: dst = '1' file_name = margs[:margs.find('=')] if margs[-1] == '1': dst = '0' shutil.move("./" + file_name, "./" + dst + '/' + file_name) f = StringIO() f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">') f.write("<html>\n<title>Corrected</title>\n") f.write("<body>\n<h2>Thanks for your correction</h2>\n") f.write("<br><a href=\"%s\">back</a>" % self.headers['referer']) f.write("<br><small>Powered By: [email protected].</small>") f.write("</body></html>") length = f.tell() f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html") self.send_header("Content-Length", str(length)) self.end_headers() if f: self.copyfile(f, self.wfile) f.close() else: f = self.send_head() if f: self.copyfile(f, self.wfile) f.close()
def process(self, req_type): print self.requestline if req_type == 2 and self.path.startswith("/uid_idx"): query = urllib.splitquery(self.path)[1].split("&") print "---", query uid = urllib.splitvalue(query[1])[1] f_url = urllib.splitvalue(query[0])[1] print "---url=", f_url print "---uid=", uid resp_uid = "9bd2936001794d0381d76a37b74a6dff" uid_idx = 1 content = { # "uid": int(2), "uid": str(resp_uid), "uid_idx": int(uid_idx) } print "***resp", content # url, uid = query.split("&") # print url, uid # time.sleep(1) content = json.dumps(content) f = io.BytesIO() f.write(content) f.seek(0) self.send_response(400) # self.send_header("Content-type", "application/json") self.send_header("Content-Length", str(len(content))) # self.end_headers() shutil.copyfileobj(f, self.wfile)
def _make_qs_uri_for_list(self, uri, info): """ Add the query string for an access to a list. If the 'info' flag is set then the _related query string modifier is specified. In all cases, the _id modifier is set. @param uri: The URI to which the necessary query string should be added. @param info: A flag indicating whether the caller wishes to receive related information about referenced resources. @return: A ready-formatted URI, with the required query string attached. """ path, qs = urllib.splitquery(uri) qs_list = [ self.id_qs ] if info: qs_list.append(self.related_qs) more_qs = "&".join(qs_list) if not qs: qs = "" else: qs += "&" qs += more_qs uri = "%s?%s" % (path, qs) return uri
def process(self): if '?' in self.path: query = urllib.splitquery(self.path) path = query[0] queryParams = {} for qp in query[1].split('&'): kv = qp.split('=') if (len(kv) < 2): continue queryParams[kv[0]] = urllib.unquote(kv[1]).decode( "utf-8", 'ignore') try: address = (queryParams.get("addr"), int(queryParams.get("port"))) addresses.append(address) self.response("") except Exception as e: logging.error( "get address unsuccessfully, exception: {0}".format(e)) self.responseHelp() else: self.responseHelp()
def _splitsuffix(self, url): 'Split the suffix off of a url.' garbage, pathpart = urllib.splittype(url) garbage, path = urllib.splithost(pathpart or '') pathpart, garbage = urllib.splitquery(pathpart or '') pathpart, garbage = urllib.splitattr(pathpart or '') return os.path.splitext(pathpart)[1]
def _make_qs_uri_for_list(self, uri, info): """ Add the query string for an access to a list. If the 'info' flag is set then the _related query string modifier is specified. In all cases, the _id modifier is set. @param uri: The URI to which the necessary query string should be added. @param info: A flag indicating whether the caller wishes to receive related information about referenced resources. @return: A ready-formatted URI, with the required query string attached. """ path, qs = urllib.splitquery(uri) qs_list = [self.id_qs] if info: qs_list.append(self.related_qs) more_qs = "&".join(qs_list) if not qs: qs = "" else: qs += "&" qs += more_qs uri = "%s?%s" % (path, qs) return uri
def do_GET(self): if re.match("/images/[bcdWDF][123456789eswnrgw]\.png", self.path): self.send_response(200) self.send_header('Content-Type', 'image/png') self.send_header('Cache-Control', 'max-age=86400, must-revalidate') self.end_headers() from os import curdir, sep filename = curdir + sep + self.path print filename f = open(curdir + sep + self.path, 'rb') self.wfile.write(f.read()) f.close() else: self.printCustomTextHTTPResponse(200) query_string = urllib.unquote_plus(self.path) path, query = urllib.splitquery(query_string) print query_string, path, query parameters = dict(urllib.splitvalue(v) for v in query.split("&")) if query else {} print parameters if 'sit' in parameters: situation = parameters['sit'] else: situation = None #else: #situation = query_string[1:] print "situation:", situation page = get_page(situation) self.wfile.write('<html>') self.wfile.write(page) self.wfile.write('</html>')
def runAddon(self, url): urlScheme = urlparse.urlparse(url) if urlScheme.scheme == 'plugin': # Plugin diferente pluginId, urlArgs = urllib.splitquery(url) self.theGlobals['sys'].argv[0] = pluginId self.theGlobals['sys'].argv[2] = '?' + (urlArgs or '') self.theGlobals['sys'].argv[1] += 1 self.kodiDirectory = [] actualID = urlScheme.netloc addonDir = xbmc.translatePath('special://home/addons/' + actualID) if self.addonID != actualID: self.addonID = actualID self.theGlobals['sys'].modules = newModules( self.theGlobals['sys'].modules) self.sourceCode = self.getCompiledAddonSource(actualID) try: msg = self.theGlobals['sys'].argv[0] + urllib.unquote( self.theGlobals['sys'].argv[2]) self.log(msg, xbmc.LOGNONE) KodiAddonImporter.install_meta(addonDir, addonRoot, hookId='kodi_imports', atype='kodi') KodiAddonImporter.get_initial_state() exec(self.sourceCode, self.theGlobals) KodiAddonImporter.restore_initial_state() KodiAddonImporter.remove_meta('kodi_imports') except: self.logger.exception('') else: # Ejecuci�n de media (archivo, etc, etc) urlLink, sep, userAgent = url.partition('|') self.log('Opening: ' + urlLink, xbmc.LOGNOTICE) webbrowser.open(urlLink)
def do_POST(self): mpath,margs=urllib.splitquery(self.path) datas = self.rfile.read(int(self.headers['content-length'])) requestIPAddress = self.client_address decodeJson = json.loads(datas) rpcname = decodeJson["rpcname"] rpcparams = decodeJson["rpcparams"] if rpcname == "login": rpcparams["request_IP"] = requestIPAddress print rpcparams response = call(rpcname, rpcparams) #sessionId = response["response"]["sessionId"] #print sessionId else: print rpcparams response = call(rpcname, rpcparams) print mpath, margs, datas print response print type(response) response = json.dumps(response) print response self.protocal_version = "HTTP/1.1" self.send_response(200) self.send_header("Welcome", "Contect") self.end_headers() self.wfile.write(response)
def do_POST(self): path = self.path query = urllib.splitquery(path) action = query[0][1:] datas = self.rfile.read(int(self.headers['content-length'])) datas = urllib.unquote(datas).decode("utf-8", 'ignore') try: args = json.loads(datas) buf = "" if action in self.actions: func = getattr(self, action) func(args) self.send_response(200) buf = '''{ "status":"ok" } ''' else: self.send_response(404) buf = '''{ "status":"action not found" } ''' except Exception as e: self.send_response(500) buf = '''{{ "status":"{}" }} ''' buf = buf.format(e) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(buf)
def do_GET(self): print "GET" if '?' in self.path: query = urllib.splitquery(self.path) print query queryParams = {} if query[1]: #接收get参数 for qp in query[1].split('&'): kv = qp.split('=') queryParams[kv[0]] = urllib.unquote(kv[1]).decode( "utf-8", 'ignore') print queryParams #将现有参数POST出去 params = urllib.unquote(query[1]) flag = False resp = "" (flag, resp) = webPostCommand("http://127.0.0.1:4545/", params, 30) print flag, resp if not flag: self.send_response(501, 'server error') return else: self.send_response(200, resp) return else: self.send_response(501, "can't find get params") return
def do_GET(self): self._set_headers() self.wfile.write("<html><body><h1>hi!</h1></body></html>") if '?' in self.path: query = urllib.splitquery(self.path) action = query[0] if query[1]:#接收get参数 queryParams = {} for qp in query[1].split('&'): kv = qp.split('=') queryParams[kv[0]] = urllib.unquote(kv[1]).decode("utf-8", 'ignore') content+= kv[0]+':'+queryParams[kv[0]]+"\r\n" #指定返回编码 enc="UTF-8" content = content.encode(enc) f = io.BytesIO() f.write(content) f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html; charset=%s" % enc) self.send_header("Content-Length", str(len(content))) self.end_headers() shutil.copyfileobj(f,self.wfile)
def createContext(metadata, incident_settings, results, sessionKey, payload): server_info = getServerInfo(sessionKey) context = { } context.update({ "job_id" : metadata["job_id"] }) context.update({ "alert_time" : metadata["alert_time"] }) context.update({ "owner" : metadata["owner"] }) context.update({ "name" : metadata["alert"] }) context.update({ "title" : metadata["title"] }) context.update({ "alert" : { "impact": metadata["impact"], "urgency": metadata["urgency"], "priority": metadata["priority"], "expires": metadata["ttl"] } }) context.update({ "app" : metadata["app"] }) context.update({ "category" : incident_settings['category'] }) context.update({ "subcategory" : incident_settings['subcategory'] }) context.update({ "tags" : incident_settings['tags'] }) context.update({ "results_link" : payload['results_link'] }) split_results_path = urllib.splitquery(payload['results_link'])[0].split('/') view_path = '/'.join(split_results_path[:-1]) + '/' view_link = view_path + 'alert?' + urllib.urlencode({'s': metadata['entry'][0]['links'].get('alternate') }) context.update({ "view_link" : view_link }) context.update({ "server" : { "version": server_info["version"], "build": server_info["build"], "serverName": server_info["serverName"] } }) if "fields" in results: result_context = { "result" : results["fields"][0] } context.update(result_context) results_context = { "results" : results["fields"] } context.update(results_context) return context
def __extract_m3u8__(self, m3u8): if m3u8: m3u8_parts = re.compile( '#EXTINF:\d+,\s+(http://data.video.iqiyi.com/videos/\S+)' ).findall(m3u8) rex_filename = re.compile('/([a-z|A-Z|0-9]+)\.([A-Z|a-z|0-9]+)\?') filenames = [] reverse_parts = m3u8_parts[::-1] reverse_part_tails = [] for i in reverse_parts: res = rex_filename.search(i) if res.group(1) not in filenames: filenames.append(res.group(1)) reverse_part_tails.append(i) # complete total file part_tails = reverse_part_tails[::-1] ret = [] for i in part_tails: path, query_str = urllib.splitquery(i) query_dict = extract_query(query_str) query_dict['start'] = '0' query_str = urllib.urlencode(query_dict) ret.append(path + '?' + query_str) return ret return []
def loadSplitPointsFromMeta(metaUri): if not metaUri.startswith('meta+'): raise ValueError('not a meta table: %r' % metaUri) metaUri = metaUri[5:] import urllib, cgi p, q = urllib.splitquery(metaUri) q = cgi.parse_qs(q or '') tableName = q.get('name', [''])[0] if not tableName: raise ValueError('meta uri needs table name') from util.zero import zeroEncode, zeroDecode pred = '%r <= row < %r and column = "location"' % \ (zeroEncode(tableName, '\x01', ''), zeroEncode(tableName, '\x02', '')) rows = [] import pykdi for r, c, t, v in pykdi.Table(metaUri).scan(pred): n, x, r = zeroDecode(r) rows.append(r) f = 1.0 / (len(rows) + 1) return [((i + 1) * f, r) for i, r in enumerate(rows)]
def loadSplitPointsFromMeta(metaUri): if not metaUri.startswith('meta+'): raise ValueError('not a meta table: %r' % metaUri) metaUri = metaUri[5:] import urllib,cgi p,q = urllib.splitquery(metaUri) q = cgi.parse_qs(q or '') tableName = q.get('name',[''])[0] if not tableName: raise ValueError('meta uri needs table name') from util.zero import zeroEncode,zeroDecode pred = '%r <= row < %r and column = "location"' % \ (zeroEncode(tableName, '\x01', ''), zeroEncode(tableName, '\x02', '')) rows = [] import pykdi for r,c,t,v in pykdi.Table(metaUri).scan(pred): n,x,r = zeroDecode(r) rows.append(r) f = 1.0 / (len(rows) + 1) return [((i+1)*f, r) for i,r in enumerate(rows)]
def runAddon(self, url): urlScheme = urlparse.urlparse(url) if urlScheme.scheme == 'plugin': # Plugin diferente pluginId, urlArgs = urllib.splitquery(url) self.theGlobals['sys'].argv[0] = pluginId self.theGlobals['sys'].argv[2] = '?' + (urlArgs or '') self.theGlobals['sys'].argv[1] += 1 self.kodiDirectory = [] actualID = urlScheme.netloc addonDir = xbmc.translatePath('special://home/addons/' + actualID) if self.addonID != actualID: self.addonID = actualID self.theGlobals['sys'].modules = newModules(self.theGlobals['sys'].modules) self.sourceCode = self.getCompiledAddonSource(actualID) try: msg = self.theGlobals['sys'].argv[0] + urllib.unquote(self.theGlobals['sys'].argv[2]) self.log(msg, xbmc.LOGNONE) KodiAddonImporter.install_meta(addonDir, addonRoot, hookId = 'kodi_imports', atype = 'kodi') KodiAddonImporter.get_initial_state() exec(self.sourceCode, self.theGlobals) KodiAddonImporter.restore_initial_state() KodiAddonImporter.remove_meta('kodi_imports') except: self.logger.exception('') else: # Ejecuci�n de media (archivo, etc, etc) urlLink, sep, userAgent = url.partition('|') self.log('Opening: ' + urlLink, xbmc.LOGNOTICE) webbrowser.open(urlLink)
def _get_host_from_uri (self, uri): hostport = None host = None dnssdhost = None (scheme, rest) = urllib.splittype (uri) if scheme == 'hp' or scheme == 'hpfax': if rest.startswith ("/net/"): (rest, ipparam) = urllib.splitquery (rest[5:]) if ipparam != None and ipparam.startswith("ip="): hostport = ipparam[3:] else: if ipparam != None and ipparam.startswith("zc="): dnssdhost = ipparam[3:] else: return None, None else: return None, None elif scheme == 'dnssd' or scheme == 'mdns': # The URIs of the CUPS "dnssd" backend do not contain the host # name of the printer return None, None else: (hostport, rest) = urllib.splithost (rest) if hostport == None: return None, None if hostport: (host, port) = urllib.splitport (hostport) return host, dnssdhost
def __init__(self, soup, parent=False): ''' Parse the form attributes and fields from the soup. Make sure to get the action right. When parent is set, then the parent element is used as anchor for the search for form elements. ''' self._extra_args = {} self.soup = soup # Make sure to use base strings, not unicode for attr, value in soup.attrMap.iteritems(): setattr(self, str(attr), str(value)) # Set right anchor point for harvest if parent: self.soup = soup.parent # Harvest input elements self._args = {} for item in self.soup.findAll('input'): self._args[str(item.get('name'))] = item.get('value') # Harvest url self.scheme, self.host, self.action = urlsplit(self.action) self.action, args = urllib.splitquery(self.action) if args: args = args.split('&') for arg in args: attr, value = urllib.splitvalue(arg) self._extra_args[str(attr)] = value
def find_page(self, req): """ """ _page = None page_info = {} path, query = urllib.splitquery(req['uri']) pn = os.path.basename(path) name = None if pages_handles.has_key(pn): name = pages_handles[pn] page_info['pagename'] = pn page_info['handler'] = None page_info['result'] = None page_info['message'] = "find page handler" if name: print "--" * 8 print "pagename = ", pn cmd = 'import ' + name + ' as dut_page' print "==", "exec", cmd exec (cmd) #print "==done","exec",cmd _page = dut_page page_info['handler'] = _page return _page, page_info
def get(self): self.response.headers['Content-Type'] = 'text/plain' testing_type = cgi.escape(self.request.get('type')) self.response.out.write('#testing'+testing_type+'\n') ##db if testing_type == "db": t1 = testdb(name="dbname",value="dbvalue") t1.put() t2 = testdb(name="dbname2",value="dbvalue2") t2.put() alltestdb = testdb.all().filter("name =", "dbname2").fetch(10) if alltestdb[0].value == "dbvalue2": self.response.out.write('+result: db test ok!\n') alltestdb[0].value = "dbvalue3" alltestdb[0].put() key_name = str(alltestdb[0].key()) self.response.out.write(str(alltestdb[0].key())) newinc = db.get(db.Key(key_name)) newinc.value = "123434" newinc.put() ##get token //pls remove if testing_type == "parse_url": query=urllib.splitquery("http://api.t.sina.com.cn/oauth/authorize?oauth_token=b4609686ef4fccbbc447d797b2e0ff36&oauth_callback=http%3A%2F%2Ftwsinabot.appspot.com%2Fcallback%3FUser%3Did1")[1] tmp_token = urlparse.parse_qs(query)['oauth_token'] self.response.out.write("+result:\t"+tmp_token[0]) if testing_type == "Get User Name": pass
def document_quote (document): """Quote given document.""" doc, query = urllib.splitquery(document) doc = url_quote_part(doc, '/=,') if query: return "%s?%s" % (doc, query) return doc
def get_sector_id(self, redirect_uri, client_info): """ Pick the sector id given a number of factors :param redirect_uri: The redirect_uri used :param client_info: Information provided by the client in the client registration :return: A sector_id or None """ _redirect_uri = urlparse.unquote(redirect_uri) part = urlparse.urlparse(_redirect_uri) if part.fragment: raise ValueError (_base, _query) = urllib.splitquery(_redirect_uri) sid = "" try: if _base in client_info["si_redirects"]: sid = client_info["sector_id"] except KeyError: try: uit = client_info["subject_type"] if uit == "pairwise": sid = _base except KeyError: pass return sid
def process(self, type): content ="" if '?' in self.path: query = urllib.splitquery(self.path) action = query[0] if query[1]:#接收get参数 queryParams = {} for qp in query[1].split('&'): kv = qp.split('=') queryParams[kv[0]] = urllib.unquote(kv[1]).decode("utf-8", 'ignore') logging.info("接受参数:{}".format(queryParams)) action = queryParams['action'] channel_id = '' if action == "DescribeLVBChannel": channel_id = queryParams['channel_id'] content = self.getDescribeLVBInfo(action, channel_id) logging.info("结果:{}".format(content)) #指定返回编码 enc="UTF-8" content = content.encode(enc) f = io.BytesIO() f.write(content) f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html; charset=%s" % enc) self.send_header("Content-Length", str(len(content))) self.end_headers() shutil.copyfileobj(f,self.wfile)
def do_GET(self): log = open("/tmp/log.txt","w") log.write("Calling Handler %s\n" % self.path) try: _, query_args = urllib.splitquery(self.path) arguments = dict([ urllib.splitvalue(query_arg) for query_arg in query_args.split('&') ]) session_id = arguments.get('sessionid') log.write("Session id: %s\n" % session_id) if session_id is None: self.send_error(400) self.end_headers() self.wfile.write("fail: sessionid argument is required") else: try: log.write("Changing password...\n") change_password(session_id) log.write("Password changed\n") except Exception, e: log.write("There was an error %s\n", e) traceback.print_exc(file=log) traceback.print_exc() self.send_error(500) self.end_headers() self.wfile.write("Internal error: %s" % str(e)) else: log.write("Sending 'ok'...\n")
def __init__(self, soup, parent=False): ''' Parse the form attributes and fields from the soup. Make sure to get the action right. When parent is set, then the parent element is used as anchor for the search for form elements. ''' self._extra_args = {} self.soup = soup # Make sure to use base strings, not unicode for attr, value in soup.attrMap.iteritems(): setattr(self, str(attr), str(value)) # Set right anchor point for harvest if parent: self.soup = soup.parent # Harvest input elements. self._args = {} for item in self.soup.findAll('input'): # Make sure to initialize to '' to avoid None strings to appear # during submit self._args[str(item.get('name'))] = item.get('value') or '' # Harvest url self.scheme, self.host, self.action = urlsplit(self.action) self.action, args = urllib.splitquery(self.action) if args: args = args.split('&') for arg in args: attr, value = urllib.splitvalue(arg) self._extra_args[str(attr)] = value or ''
def process(self,type): content ="" if type==1:#post方法,接收post参数 datas = self.rfile.read(int(self.headers['content-length'])) datas = urllib.unquote(datas).decode("utf-8", 'ignore')#指定编码方式 datas = transDicts(datas)#将参数转换为字典 if datas.has_key('data'): content = "data:"+datas['data']+"\r\n" query = urllib.splitquery(self.path) action = query[0] queryParams = {} if '?' in self.path: if query[1]:#接收get参数 for qp in query[1].split('&'): kv = qp.split('=') queryParams[kv[0]] = urllib.unquote(kv[1]).decode("utf-8", 'ignore') #print "queryParams:" + kv[0]+"==="+ queryParams[kv[0]] #content+= kv[0]+':'+queryParams[kv[0]]+"\r\n" content_type,f = page(action,queryParams) self.send_response(200) self.send_header("Content-type", content_type) #self.send_header("Content-Length", str(len(content))) self.end_headers() shutil.copyfileobj(f,self.wfile)
def do_GET(self): self._set_headers() self.wfile.write("<html><body><h1>hi!</h1></body></html>") if '?' in self.path: query = urllib.splitquery(self.path) action = query[0] if query[1]: #接收get参数 queryParams = {} for qp in query[1].split('&'): kv = qp.split('=') queryParams[kv[0]] = urllib.unquote(kv[1]).decode( "utf-8", 'ignore') content += kv[0] + ':' + queryParams[kv[0]] + "\r\n" #指定返回编码 enc = "UTF-8" content = content.encode(enc) f = io.BytesIO() f.write(content) f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html; charset=%s" % enc) self.send_header("Content-Length", str(len(content))) self.end_headers() shutil.copyfileobj(f, self.wfile)
def process(self, type): content = "" if type == 1: #post方法,接收post参数 datas = self.rfile.read(int(self.headers['content-length'])) datas = urllib.unquote(datas).decode("utf-8", 'ignore') #指定编码方式 datas = transDicts(datas) #将参数转换为字典 if datas.has_key('data'): content = "data:" + datas['data'] + "\r\n" query = urllib.splitquery(self.path) action = query[0] queryParams = {} if '?' in self.path: if query[1]: #接收get参数 for qp in query[1].split('&'): kv = qp.split('=') queryParams[kv[0]] = urllib.unquote(kv[1]).decode( "utf-8", 'ignore') #print "queryParams:" + kv[0]+"==="+ queryParams[kv[0]] #content+= kv[0]+':'+queryParams[kv[0]]+"\r\n" content_type, f = page(action, queryParams) self.send_response(200) self.send_header("Content-type", content_type) #self.send_header("Content-Length", str(len(content))) self.end_headers() shutil.copyfileobj(f, self.wfile)
def do_GET(self): #print 'self.path:', self.path if '?' in self.path: mpath,margs = urllib.splitquery(self.path) #print('mpath:', mpath) #print('margs', margs) content = margs.split('=') #print 'content', content #mid, weibo = content[1].split('\\t') weibo = content[-1] weibo = urllib.unquote(weibo) result = predict(fasttext_model, processtext, weibo)#.encode('utf8')) self.protocal_version = 'HTTP/1.1' self.send_response(200) encoding = sys.getfilesystemencoding() self.send_header("Content-type", "text/html; charset=%s" % encoding) self.end_headers() content = result #self.wfile.write('weibo_fenci:%s' % weibo) self.wfile.write('Predict Result:%s' % content)
def process(self, type): content = "" if type == 1: # post方法,接收post参数 datas = self.rfile.read(int(self.headers['content-length'])) print datas datas = urllib.unquote(datas).decode("utf-8", 'ignore') # 指定编码方式 #datas = transDicts(datas) # 将参数转换为字典 #if datas.has_key('data'): # content = "data:" + datas['data'] + "\r\n" if '?' in self.path: query = urllib.splitquery(self.path) action = query[0] if query[1]: # 接收get参数 queryParams = {} for qp in query[1].split('&'): kv = qp.split('=') queryParams[kv[0]] = urllib.unquote(kv[1]).decode("utf-8", 'ignore') content += kv[0] + ':' + queryParams[kv[0]] + "\r\n" # 指定返回编码 enc = "UTF-8" content = content.encode(enc) f = io.BytesIO() f.write(content) f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html; charset=%s" % enc) self.send_header("Content-Length", str(len(content))) self.end_headers() shutil.copyfileobj(f, self.wfile)
def do_GET(self): log = open("/tmp/log.txt", "w") log.write("Calling Handler %s\n" % self.path) try: _, query_args = urllib.splitquery(self.path) arguments = dict([ urllib.splitvalue(query_arg) for query_arg in query_args.split('&') ]) session_id = arguments.get('sessionid') log.write("Session id: %s\n" % session_id) if session_id is None: self.send_error(400) self.end_headers() self.wfile.write("fail: sessionid argument is required") else: try: log.write("Changing password...\n") change_password(session_id) log.write("Password changed\n") except Exception, e: log.write("There was an error %s\n", e) traceback.print_exc(file=log) traceback.print_exc() self.send_error(500) self.end_headers() self.wfile.write("Internal error: %s" % str(e)) else: log.write("Sending 'ok'...\n")
def start(self, destfile=None, destfd=None): self._info = urllib.urlopen(self._url) self._outf = None self._fname = None if destfd and not destfile: raise ValueError('Must provide destination file too when' ' specifying file descriptor') if destfile: self._suggested_fname = os.path.basename(destfile) self._fname = os.path.abspath(os.path.expanduser(destfile)) if destfd: # Use the user-supplied destination file descriptor self._outf = destfd else: self._outf = os.open(self._fname, os.O_RDWR | os.O_TRUNC | os.O_CREAT, 0644) else: fname = self._get_filename_from_headers(self._info.headers) self._suggested_fname = fname garbage_, path = urllib.splittype(self._url) garbage_, path = urllib.splithost(path or "") path, garbage_ = urllib.splitquery(path or "") path, garbage_ = urllib.splitattr(path or "") suffix = os.path.splitext(path)[1] (self._outf, self._fname) = tempfile.mkstemp(suffix=suffix, dir=self._destdir) fcntl.fcntl(self._info.fp.fileno(), fcntl.F_SETFD, os.O_NDELAY) self._srcid = GLib.io_add_watch(self._info.fp.fileno(), GLib.IO_IN | GLib.IO_ERR, self._read_next_chunk)
def document_quote(document): """Quote given document.""" doc, query = urllib.splitquery(document) doc = url_quote_part(doc, '/=,') if query: return "%s?%s" % (doc, query) return doc
def do_GET(self): content = "" # self._writeheaders() if '?' in self.path: #如果带有参数 # http://ip:8765/?test=data query = urllib.splitquery(self.path) action = query[0] print "action====", action if query[1]: queryParams = {} for qp in query[1].split('&'): kv = qp.split('=') queryParams[kv[0]] = urllib.unquote(kv[1]).decode( "utf-8", 'ignore') content += kv[0] + ':' + queryParams[kv[0]] + "\r\n" enc = "UTF-8" content = content.encode(enc) f = io.BytesIO() f.write(content) print "content=====", content f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html;charset=%s" % enc) self.send_header("Content-Length", str(len(content))) self.end_headers() shutil.copyfileobj(f, self.wfile) self.wfile.write(200)
def makeReqHeaders(self): range_format = self.url.range_format Range = (self.progress.begin + self.progress.go_inc, self.progress.end) req_path = self.target.path req_headers = dict(self.url.headers.items()) if range_format[0] == '&': path, query = splitquery(self.target.path) query_dict = extract_query(query) range_format = range_format % Range for i in range_format[1:].split('&'): param_key, param_value = splitvalue(i) query_dict[param_key] = param_value new_query = urlencode(query_dict) req_path = '%s?%s' % (path, new_query) else: range_field = range_format % Range key_value = [i.strip() for i in range_field.split(':')] key = key_value[0] value = key_value[1] add_headers = {key: value, 'Accept-Ranges': 'bytes'} req_headers.update(add_headers) return req_path, req_headers
def find_page(self, req): """ """ _page = None page_info = {} # TODO : find page handler path, query = urllib.splitquery(req['uri']) pn = os.path.basename(path) name = None if pages_handles.has_key(pn): name = pages_handles[pn] page_info['pagename'] = (req['method'] + ' ' + pn) page_info['handler'] = None page_info['result'] = None page_info['message'] = "No page handler" if name: #print "--"*8 print '' print '==', "pagename = ", pn print '' cmd = 'import ' + name + ' as dut_page' #print "==","exec",cmd exec (cmd) #print "==done","exec",cmd _page = dut_page page_info['handler'] = _page else: #print '++++++',pn pass return _page, page_info
def run(self, url): xbmc = self.theGlobals['xbmc'] urlScheme = urlparse.urlparse(url) if urlScheme.scheme != 'plugin': return # Plugin diferente pluginId, urlArgs = urllib.splitquery(url) self.theGlobals['sys'].argv = [ pluginId, self.theGlobals['sys'].argv[1] + 1, '?' + (urlArgs or '') ] self.addonID = actualID = urlScheme.netloc addonDir = xbmc.translatePath('special://home/addons/' + actualID) if addonDir.startswith('vrt:%s' % os.path.sep): self.vrtDisk.installPathHook() sys.path.insert(0, addonDir) sourceCode = self.getVrtDiskAddonSource() else: sourceCode = self.getCompiledAddonSource(actualID) self.importer.setAddonDir(addonDir) try: exec(sourceCode, self.theGlobals) except Exception as e: xbmc.log(str(e), xbmc.LOGERROR) msg = traceback.format_exc() xbmc.log(msg, xbmc.LOGERROR) self.answ = None return self.answ
def _get_host_from_uri(self, uri): hostport = None host = None dnssdhost = None (scheme, rest) = urllib.splittype(uri) if scheme == 'hp' or scheme == 'hpfax': if rest.startswith("/net/"): (rest, ipparam) = urllib.splitquery(rest[5:]) if ipparam != None and ipparam.startswith("ip="): hostport = ipparam[3:] else: if ipparam != None and ipparam.startswith("zc="): dnssdhost = ipparam[3:] else: return None, None else: return None, None elif scheme == 'dnssd' or scheme == 'mdns': # The URIs of the CUPS "dnssd" backend do not contain the host # name of the printer return None, None else: (hostport, rest) = urllib.splithost(rest) if hostport == None: return None, None if hostport: (host, port) = urllib.splitport(hostport) if type(host) == unicode: host = host.encode('utf-8') if type(dnssdhost) == unicode: dnssdhost = dnssdhost.encode('utf-8') return host, dnssdhost
def process(self): content = "" if '?' in self.path: query = urllib.splitquery(self.path) action = query[0] # in fact, it's useless. if query[1]: # load get params query_params = {} for qp in query[1].split('&'): kv = qp.split('=') query_params[kv[0]] = urllib.unquote(kv[1]) print query_params content = self.actions(query_params) # encode content enc = "UTF-8" content = content.encode(enc) f = io.BytesIO() f.write(content) f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html; charset=%s" % enc) self.send_header("Content-Length", str(len(content))) self.end_headers() shutil.copyfileobj(f, self.wfile)
def do_GET(self): """Handle GET request. self.path contains path, which should be displayed starting from the root of the backup. """ path, query = urllib.splitquery(self.path.decode('utf-8')) node = self._find(path, self._root) if node is None: self.send_response(404) self.end_headers() else: options = (self._renderer.options_from_query(query) if query else {}) try: http_code, http_headers, http_payload = \ self._renderer.render(node, options) except: self.send_response(503) self.send_header('Content-Type', 'text/plain') self.end_headers() self.wfile.write(''.join(traceback.format_exception(*sys.exc_info()))) raise self.send_response(http_code) for header_name, header_value in http_headers: self.send_header(header_name, header_value) self.end_headers() self.wfile.write(http_payload)
def runAddon(self, url): urlScheme = urlparse.urlparse(url) if urlScheme.scheme == 'plugin': # Plugin diferente pluginId, urlArgs = urllib.splitquery(url) self.theGlobals['sys'].argv[0] = pluginId self.theGlobals['sys'].argv[2] = '?' + (urlArgs or '') self.theGlobals['sys'].argv[1] += 1 self.kodiDirectory = [] actualID = urlScheme.netloc if self.addonID != actualID: self.addonID = actualID self.theGlobals['sys'].path = self.setPythonPath( actualID) + self.ORIGINAL_PYTHONPATH self.theGlobals['sys'].modules = newModules( self.theGlobals['sys'].modules) self.sourceCode = self.getCompiledAddonSource(actualID) try: msg = self.theGlobals['sys'].argv[0] + urllib.unquote( self.theGlobals['sys'].argv[2]) self.log(msg, xbmc.LOGNONE) exec(self.sourceCode, self.theGlobals) except: self.logger.exception('') else: # Ejecuci�n de media (archivo, etc, etc) urlLink, sep, userAgent = url.partition('|') self.log('Opening: ' + urlLink, xbmc.LOGNOTICE) webbrowser.open(urlLink)
def do_GET(self): path = self.path #拆分url(也可根据拆分的url获取Get提交才数据),可以将不同的path和参数加载不同的html页面,或调用不同的方法返回不同的数据,来实现简单的网站或接口 query = urllib.splitquery(path) self.send_response(200) self.send_header("Content-type","text/html") self.send_header("test","This is test!") self.end_headers() buf = '''''<!DOCTYPE HTML> <html> <head><title>Get page</title></head> <body> <form action="post_page" method="post"> action: <input type="text" name="action" /><br /> stock: <input type="text" name="stock" /><br /> price: <input type="text" name="price" /><br /> nun: <input type="text" name="nun" /><br /> <input type="submit" value="POST" /> </form> </body> </html>''' self.wfile.write(buf)
def do_GET(self): if re.match("/images/[bcdWDF][123456789eswnrgw]\.png", self.path): self.send_response(200) self.send_header('Content-Type', 'image/png') self.send_header('Cache-Control', 'max-age=86400, must-revalidate') self.end_headers() from os import curdir, sep filename = curdir + sep + self.path print filename f = open(curdir + sep + self.path, 'rb') self.wfile.write(f.read()) f.close() else: self.printCustomTextHTTPResponse(200) query_string = urllib.unquote_plus(self.path) path, query = urllib.splitquery(query_string) print query_string, path, query parameters = dict( urllib.splitvalue(v) for v in query.split("&")) if query else {} print parameters if 'sit' in parameters: situation = parameters['sit'] else: situation = None #else: #situation = query_string[1:] print "situation:", situation page = get_page(situation) self.wfile.write('<html>') self.wfile.write(page) self.wfile.write('</html>')
def process(self,type): content ="" if type==1:#post方法,接收post参数 datas = self.rfile.read(int(self.headers['content-length'])) datas = urllib.unquote(datas).decode("utf-8", 'ignore')#指定编码方式 datas = transDicts(datas)#将参数转换为字典 if datas.has_key('data'): content = "data:"+datas['data']+"\r\n" print self.path if '?' in self.path: query = urllib.splitquery(self.path) print query action = query[0] if query[1]:#接收get参数 queryParams = {} for qp in query[1].split('&'): kv = qp.split('=') queryParams[kv[0]] = urllib.unquote(kv[1]).decode("utf-8", 'ignore') content+= kv[0]+':'+queryParams[kv[0]]+"\r\n" #指定返回编码 enc="UTF-8" content = content.encode(enc) f = io.BytesIO() f.write(content) f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html; charset=%s" % enc) self.send_header("Content-Length", str(len(content))) self.end_headers() shutil.copyfileobj(f,self.wfile)
def start(self, destfile=None, destfd=None): urllib._urlopener = OLPCURLopener() self._info = urllib.urlopen(self._url) self._outf = None self._fname = None if destfd and not destfile: raise ValueError('Must provide destination file too when' ' specifying file descriptor') if destfile: self._suggested_fname = os.path.basename(destfile) self._fname = os.path.abspath(os.path.expanduser(destfile)) if destfd: # Use the user-supplied destination file descriptor self._outf = destfd else: self._outf = os.open(self._fname, os.O_RDWR | os.O_TRUNC | os.O_CREAT, 0644) else: fname = self._get_filename_from_headers(self._info.headers) self._suggested_fname = fname garbage_, path = urllib.splittype(self._url) garbage_, path = urllib.splithost(path or "") path, garbage_ = urllib.splitquery(path or "") path, garbage_ = urllib.splitattr(path or "") suffix = os.path.splitext(path)[1] (self._outf, self._fname) = tempfile.mkstemp(suffix=suffix, dir=self._destdir) fcntl.fcntl(self._info.fp.fileno(), fcntl.F_SETFD, os.O_NDELAY) self._srcid = GObject.io_add_watch(self._info.fp.fileno(), GObject.IO_IN | GObject.IO_ERR, self._read_next_chunk)
def _renameFlow(self, state, t): """state is a honeysnap.flow.flow_state object, t = response or request""" #print "_renameFlow:state", state.fname rflow = freverse(state.flow) #print '_renameFlow:rflow ', rflow rs = self.statemgr.find_flow_state(rflow) if rs is not None: if rs.decoded is not None and state.decoded is not None: #print "Both halves decoded" user_agent = "UNKNOWN" url = 'UNKNOWN' r1 = rs.decoded if t == 'request': try: url = urllib.splitquery(state.decoded.uri)[0] realname = url.rsplit("/", 1)[-1] except AttributeError: realname = 'index.html' try: url = state.decoded.headers['host'] + url user_agent = state.decoded.headers['user-agent'] except KeyError: pass # reverse flows to get right sense for file renaming temp = rs rs = state state = temp if t == 'response': url = urllib.splitquery(r1.uri)[0] realname = url.rsplit("/", 1)[-1] try: user_agent = r1.headers['user-agent'] url = r1.headers['host'] + url except KeyError: # probably something like a CONNECT pass if realname == '' or realname == '/' or not realname: realname = 'index.html' fn = renameFile(state, realname) id, m5 = self.id.identify(state) outstring = "%s -> %s, %s (%s) at %s\n" % ( state.flow.src, state.flow.dst, url, user_agent, self.tf(state.ts)) outstring = outstring + "\tfile: %s, filetype: %s, md5 sum: %s\n" % ( fn, id, m5) self.add_flow(state.ts, state.flow.src, state.flow.dst, outstring)
def set_page(url, number): base, query = urllib.splitquery(url) query = query or u'' s = re_page.sub(u'', query).lstrip(u'&') if s: s += u'&' s = u'?' + s + 'page=%d'%number return s
def _split_path(self, path): """Return the path and the query string as a dictionary.""" path_only, query_string = urllib.splitquery(path) if query_string: query_dict = dict([i.split('=') for i in query_string.split('&')]) else: query_dict = {} return path_only, query_dict
def do_PATCH(self): mpath, margs = urllib.splitquery(self.path) datas = self.rfile.read(int(self.headers['content-length'])) self.send_json_response({'method':'PATCH', 'path': self.path, 'data': datas, 'mpath':mpath, 'margs':margs})
def request(app, path="/", method="GET", data=None, host="0.0.0.0:8080", headers=None, https=False): """Makes request to this application for the specified path and method. Response will be a storage object with data, status and headers. >>> urls = ("/hello", "hello") >>> app = application(urls, globals()) >>> class hello: ... def GET(self): ... web.header('Content-Type', 'text/plain') ... return "hello" ... >>> response = app.request("/hello") >>> response.data 'hello' >>> response.status '200 OK' >>> response.headers['Content-Type'] 'text/plain' To use https, use https=True. >>> urls = ("/redirect", "redirect") >>> app = application(urls, globals()) >>> class redirect: ... def GET(self): raise web.seeother("/foo") ... >>> response = app.request("/redirect") >>> response.headers['Location'] 'http://0.0.0.0:8080/foo' >>> response = app.request("/redirect", https=True) >>> response.headers['Location'] 'https://0.0.0.0:8080/foo' The headers argument specifies HTTP headers as a mapping object such as a dict. """ path, maybe_query = urllib.splitquery(path) query = maybe_query or "" env = dict(HTTP_HOST=host, REQUEST_METHOD=method, PATH_INFO=path, QUERY_STRING=query, HTTPS=https) headers = headers or {} translation_table = string.maketrans(string.ascii_lowercase + "-", string.ascii_uppercase + "_") for k, v in headers.items(): env["HTTP_" + string.translate(k, translation_table)] = v if data: import StringIO q = urllib.urlencode(data) env["wsgi.input"] = StringIO.StringIO(q) response = web.storage() def start_response(status, headers): response.status = status response.headers = dict(headers) response.data = "".join(app.wsgifunc()(env, start_response)) return response
def fixurl(url): domain, query = urllib.splitquery(url) new_url = None if query: query = re.sub("utm_(source|medium|campaign)\=([^&]+)&?", "", query) new_url = urlparse.urljoin(domain, "?" + query) else: new_url = domain return new_url
def _uris_to_tuples(uris): tup = [] for uri in uris: base, query = urllib.splitquery(uri) if query: tup.append((base, query)) else: tup.append((base,"")) return tup