def __init__(self, environ, start_response): self.environ = environ self.start_response = start_response self.is_post = False self.query_string = parse_qs(environ["QUERY_STRING"]) self.page_name = environ["PATH_INFO"] if environ["REQUEST_METHOD"] == "POST": if self.page_name == "/profile_picture": self.form = FieldStorage(fp=environ['wsgi.input'], environ=environ, keep_blank_values=True) #A nested FieldStorage instance holds the file self.fileitem = self.form["file"] else: request_body_size = int(environ.get('CONTENT_LENGTH', 0)) request_body = environ['wsgi.input'].read(request_body_size) self.options = parse_qs(request_body) self.is_post = True else: self.options = []
def main(self, environ, start_response): from cgi import parse_qs query = parse_qs(environ.get('QUERY_STRING')) if '_escaped_fragment_' in query.keys(): unescaped_query = parse_qs(query['_escaped_fragment_'][0][1:]) output = BuildResult().getStaticResultsHtml(unescaped_query) elif environ.get('REQUEST_METHOD') == 'POST': TYPES = ['nor', 'fir', 'wat', 'ele', 'gra', 'ice', 'fig', 'poi', 'gro', 'fly', 'psy', 'bug', 'roc', 'gho', 'dra', 'dar', 'ste'] wsgi_input = environ['wsgi.input'] query = parse_qs(wsgi_input.read()) corrected_query = {} if 'evo' in query.keys(): corrected_query['evo'] = query['evo'] if 'trt' in query.keys(): corrected_query['trt'] = query['trt'] for t in TYPES: if t in query.keys(): corrected_query[t] = ['%s_%s' % (query[t + 'c'][0], query[t][0])] output = BuildResult().getStaticResultsHtml(corrected_query) else: output = BuildResult().getHtml() response_headers = [('Content-Type', 'text/html'), ('Content-Length', str(len(output)))] start_response(self.status, response_headers) return [output]
def get_user_dict(self,oauth_token,oauth_token_secret,verifier): # oauth_token,oauth_token_secret,verifierを入れるとaccess_token,access_token_secret,screen_nameを返す # パラメータの準備 params = PARAMS.copy() params['oauth_token'] = oauth_token params['oauth_verifier'] = verifier # パラメーターからsignarureを生成 key = CONSUMER_SECRET + '&' + oauth_token_secret message = self.gen_message('GET',ACCESS_TOKEN_URL,params) signature = self.gen_signature(key,message) # パラメーターにsignatureを追加 params['oauth_signature'] = signature # URLを生成。 GETメソッドなので、パラメータはURLの後ろに以下略 params = urllib.parse.urlencode(params) url = ACCESS_TOKEN_URL + '?' + params # access_token と access_token_secret と screen_nameを取得 result = urllib.request.urlopen(url).read() access_token = cgi.parse_qs(result)[b'oauth_token'][0].decode() access_token_secret = cgi.parse_qs(result)[b'oauth_token_secret'][0].decode() screen_name = cgi.parse_qs(result)[b'screen_name'][0].decode() # user_dict 生成 self.user_dict = { 'access_token':access_token, 'access_token_secret':access_token_secret, 'screen_name':screen_name # 'icon':そのうち入れるかもしれないけどめんどくさいから後回し } return self.user_dict
def test_signed_Indico_request_creation(self): """urlutils - test creation of signed Indico requests""" signed_Indico_request_url = create_Indico_request_url("https://indico.cern.ch", "categ", "", [1, 7], "xml", {'onlypublic': 'yes', 'order': 'title', 'from': 'today', 'to': 'tomorrow'}, '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000', _timestamp=1234) # Are we at least acccessing correct base url? self.assert_(signed_Indico_request_url.startswith("https://indico.cern.ch/export/categ/1-7.xml?")) # Check parameters self.assertEqual(parse_qs(signed_Indico_request_url)["order"], ['title']) self.assertEqual(parse_qs(signed_Indico_request_url)["timestamp"], ['1234']) # Check signature exists and is correct self.assertEqual(parse_qs(signed_Indico_request_url)["signature"], ['e984e0c683e36ce3544372f23a397fd2400f4954'])
def test_license_to_choose_redirect(): # Make sure we redirect from /license/* to /choose/ and keep the # GET parameters response = TESTAPP.get( '/license/zero/results?' 'license-class=zero&name=ZeroMan&work_title=SubZero') redirected_response = response.follow() assert_equal(urlparse.urlsplit(response.location)[2], '/choose/zero/results') qs = cgi.parse_qs(urlparse.urlsplit(response.location)[3]) assert_equal( qs, {'license-class': ['zero'], 'name': ['ZeroMan'], 'work_title': ['SubZero']}) # Also make sure that POST redirects work response = TESTAPP.post( '/license/zero/results', {'license-class': 'zero', 'name': 'ZeroMan', 'work_title': 'SubZero'}) redirected_response = response.follow() assert_equal(urlparse.urlsplit(response.location)[2], '/choose/zero/results') qs = cgi.parse_qs(urlparse.urlsplit(response.location)[3]) assert_equal( qs, {'license-class': ['zero'], 'name': ['ZeroMan'], 'work_title': ['SubZero']})
def execute( self ): " Should return True if the execution completed successfully " self.fromScratch() jobID = self.ns.submitJob( 'TEST', 'bla', '', '', '', '' ) ns_client = self.getNetScheduleService( 'TEST', 'scenario301' ) output = execAny( ns_client, 'SST2 ' + jobID ) values = parse_qs( output, True, True ) if values[ 'job_status' ] != ['Pending'] or \ values.has_key( 'job_exptime' ) == False: raise Exception( "Unexpected SST2 output" ) time1 = int( values[ 'job_exptime' ][ 0 ] ) time.sleep( 5 ) output = execAny( ns_client, 'SST2 ' + jobID ) values = parse_qs( output, True, True ) time2 = int( values[ 'job_exptime' ][ 0 ] ) if time2 - time1 < 3: raise Exception( "SST2 does not change the job " "expiration while it must" ) return True
def __call__(self, environ, start_response): """Implement WSGI interface""" if environ['QUERY_STRING'] == '': return self.usage_documentation.__call__(environ, start_response) try: # parse params query = self.parse(parse_qs(environ['QUERY_STRING'])) query._verify_parameters() self.output_format = query.output_format except Exception as e: message = str(e) ftype = parse_qs(environ['QUERY_STRING']).get('format', [''])[0] if ftype == 'json': self.output_format = 'json' else: self.output_format = 'iaga2002' error_body = self.error(400, message, environ, start_response) return [error_body] try: # fetch timeseries timeseries = self.fetch(query) # format timeseries timeseries_string = self.format_data( query, timeseries, start_response, environ) if isinstance(timeseries_string, str): timeseries_string = timeseries_string.encode('utf8') except Exception as e: if self.error_stream: print("Error processing request: %s" % str(e), file=self.error_stream) message = "Server error." error_body = self.error(500, message, environ, start_response) return [error_body] return [timeseries_string]
def __call__(self,environ, start_response): path = environ['PATH_INFO'] method = environ['REQUEST_METHOD'] if method == 'POST': try: size = int(environ['CONTENT_LENGTH']) raw_string = environ['wsgi.input'].read(size) except (TypeError, ValueError): raw_string = "" try: query_string = str(raw_string) except: query_string = "" param = cgi.parse_qs(query_string) parameters = {} for k,v in param.items(): parameters[k] = v.pop() return self.process(environ, start_response, parameters) if method == 'GET': query_string = environ['QUERY_STRING'] param = cgi.parse_qs(query_string) parameters = {} for k,v in param.items(): parameters[k] = v.pop() return self.process(environ, start_response, parameters)
def callback(environ, start_response, oauth_config, oauth_provider): parameters = cgi.parse_qs(environ.get('QUERY_STRING', '')) if 'oauth_token' in parameters and \ 'oauth_verifier' in parameters: oauth_token = parameters['oauth_token'][0] oauth_verifier = parameters['oauth_verifier'][0] else: return error(start_response, message = 'oauth') cookie = Cookie.SimpleCookie() cookie.load(environ.get('HTTP_COOKIE', '')) if config.TOKEN_SECRET in cookie: oauth_token_secret = cookie[config.TOKEN_SECRET].value else: return error(start_response, message = 'secret') parameters = cgi.parse_qs(request_access_token(oauth_token, oauth_token_secret, oauth_verifier, oauth_config)) if oauth_provider == 'snucse' and 'account' in parameters: account = parameters['account'][0] elif oauth_provider == 'twitter' and 'screen_name' in parameters: account = parameters['screen_name'][0] else: return error(start_response, message = 'account') session_id = create_session_id() shown_account = '%s%s' % (config.OAUTH[oauth_provider]['PREFIX'], account) data = { 'session_id': session_id, 'account': shown_account, 'datetime': datetime.datetime.now() } db.session.insert(data) start_response('200 OK', [ ('Refresh', '0; url=/'), ('Content-Type', 'text/html; charset=utf-8'), ('Set-Cookie', '%s=%s; path=/' % (config.SESSION_ID, session_id)) ]) return ['<a href="/">%s Go</a>' % account]
def get_parameters(self): if self._parameters==None: self._get_first_line() import cgi parameters = cgi.parse_qs(self._queryString) length = self.get_headers()['content-length'] ctype, pdict = cgi.parse_header(self.get_headers()['content-type']) if ctype == 'multipart/form-data': params = cgi.parse_multipart(self._rfile, pdict) for param in params.keys(): parameters[param] = params[param] elif length!='': len = int(length) body = self._rfile.read(len) self._rfile.close() import cgi params = cgi.parse_qs(body) for param in params.keys(): parameters[param] = params[param] self._parameters = Parameters(parameters) return self._parameters
def do_POST(self): if not self.do_Auth(): self.showAuthResult("Not authenticated") return #get get_data = cgi.parse_qs(self.path[2:]) cl, cl2 = cgi.parse_header(self.headers.get('content-length')) qs = self.rfile.read(int(cl)) post_data = cgi.parse_qs(qs.decode()) #resolution if post_data.get('resolution') != None: if (len(post_data.get('resolution')) > 0): mode = post_data.get('resolution')[0] if ((mode >= "0") and (mode <= "2")): self.server.SetResolution(int(mode)) else: None return #command if post_data.get('action') != None: cmd = post_data.get('action')[0] result = self.server.robot.sendCommand(cmd) self.wfile.write(result) self.log_error("[COMMAND] %s" % result) return #showPage self.showPage('GET',get_data,post_data)
def myapp(environ,start_response): output = ["<p> WSGI script by George Ivanov </p>", "<form method='POST'>", "<p><input type='text' name='str'></p>", "<p><input type='submit' value='Send!'></p>", "</form>"] response_headers = [('Content-type','text/html')] if environ['REQUEST_METHOD'] == "GET": dict_value = parse_qs(environ['QUERY_STRING']) output.append("<h3> <p> Get data: </p> </h3>") if dict_value: output.append("No data received") for key,value in dict_value.iteritems(): if len(value) == 1: output.append("%s = %s <br>" % (key, value[0])) else: output.append("%s = %s <br>" % (key, value)) else: try: size = int(environ.get('CONTENT_LENGTH', 0)) except: size = 0 request=environ['wsgi.input'].read(size) dict_value = parse_qs(request) output.append("<h3> <p> Post data: </p> </h3>") output.append("Value = %s <br>" % (dict_value['str'][0])) response_headers.append(('Content-length',str(sum([len(x) for x in output])))) start_response('200 OK',response_headers) return output
def test_parse(): """WebService_test.test_parse() Create WebService instance and call parse to confirm that query string values are applied to the correct class attribute. Also confirm that default values are applied correctly. """ service = WebService(TestFactory()) query = service.parse(parse_qs('id=BOU&starttime=2016-06-06' '&endtime=2016-06-07&elements=H,E,Z,F&sampling_period=60' '&format=iaga2002&type=variation')) assert_equals(query.observatory_id, 'BOU') assert_equals(query.starttime, UTCDateTime(2016, 6, 6, 0)) assert_equals(query.endtime, UTCDateTime(2016, 6, 7, 0)) assert_equals(query.elements, ['H', 'E', 'Z', 'F']) assert_equals(query.sampling_period, '60') assert_equals(query.output_format, 'iaga2002') assert_equals(query.data_type, 'variation') # Test that defaults are set for unspecified values now = datetime.now() today = UTCDateTime(year=now.year, month=now.month, day=now.day, hour=0) tomorrow = today + (24 * 60 * 60 - 1) query = service.parse(parse_qs('id=BOU')) assert_equals(query.observatory_id, 'BOU') assert_equals(query.starttime, today) assert_equals(query.endtime, tomorrow) assert_equals(query.elements, ('X', 'Y', 'Z', 'F')) assert_equals(query.sampling_period, '60') assert_equals(query.output_format, 'iaga2002') assert_equals(query.data_type, 'variation') assert_raises(Exception, service.parse, parse_qs('/?id=bad'))
def do_POST(self): self.wfile.write('Do you yahoo?') post_body = self.rfile.read(int(self.headers.getheader('content-length'))) logger("Server: POST from client: " + self.client_address[0]) logger("Server: POST content: " + post_body, False) if (post_body.find('canon_url')>0): payload = '{' + urllib.unquote(post_body).replace('+',' ')[9:-1] + '}' msg = json.loads(payload) skypeSendBitbucket(msg, payload) logger("BitBucket: %s commits to %s/%s" % ( str(msg['commits'][0]['author']), str(msg['repository']['name']), str(msg['commits'][0]['branch']) ) ) elif (urllib.unquote(post_body).find('//github.com')>0): payload = '{' + urllib.unquote(post_body)[9:-1] + '}' msg = json.loads(payload) skypeSendGithub(msg, payload) logger("Github: %s commits to %s/%s %s" % ( str(msg['head_commit']['author']['name']), str(msg['repository']['name']), str(msg['ref'].replace('refs/heads/','')), ("true" if str(msg['forced']) == "true" else "") ) ) elif (urllib.unquote(post_body).find('signature=')>0): param = cgi.parse_qs(post_body) msg = json.loads(param['data'][0]) skypeSendUservoice(str(param['event'][0]), msg) logger("Uservoice: %s %s" % (str(param['event'][0]), str(msg)) ) else: param = cgi.parse_qs(post_body) msg = json.loads(param['problem'][0]) skypeSendErrbit(msg, param['errbit_url'][0]) logger("Errbit: %s error on %s" % ( str(msg['app_name']), str(msg['where']) ) )
def application(environ, start_response): try: request_body_size = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError): request_body_size = 0 request_body = environ['wsgi.input'].read(request_body_size) d = parse_qs(request_body) e = parse_qs(environ['QUERY_STRING']) age = d.get('age', [''])[0] hobbies = d.get('hobbies', []) info = e.get('info',[''])[0] # age = e.get('age', [''])[0] # hobbies = e.get('hobbies', []) age = escape(age) hobbies = [escape(hobby) for hobby in hobbies] info = escape(info) response_body = html % (age or 'Empty', ', '.join(hobbies or ['No Hobbies']), info or 'Empty') status = '200 OK' response_headers = [('Content-Type', 'text/html'), ('Content-Length', str(len(response_body)))] start_response(status, response_headers) return [response_body]
def application(env, start_response): pprint.pprint(env) start_response('200 OK', [('Content-Type','text/html, charset=utf-8')]) if env['REQUEST_METHOD'] == 'GET': query = parse_qs(env['QUERY_STRING']) else: try: request_body_size = int(env.get('CONTENT_LENGTH', 0)) except (ValueError): request_body_size = 0 request_body = env['wsgi.input'].read(request_body_size) query = parse_qs(request_body) testq=re.sub('(' + '|'.join(chars.keys()) + ')', replace_chars, query.get('test',[''])[0]) print type(testq)," ",testq.decode('utf-8') Custom.hw('INIT') Custom.cut('TOTAL') Custom.paper('OUT') Custom.feed(testq.decode('utf-8')) Custom.cut('TOTAL') Custom.paper('OUT') Custom.control('FF') # Custom._read_status() return [htmlhead+testq+htmlend]# .encode('utf8')]
def _check_user(request_data): """Проверяет разрешен доступ на сайт Argument: access - True если доступ на сайт разрешет, False если доступ запрещен sessid - новый токен для доступа на сайт по cookie """ # разбираем параметры post запроса try: request_body_size = int(request_data.get('CONTENT_LENGTH', 0)) except (ValueError): request_body_size = 0 access = models.get_admin_access() sessid = None # проверяем введенные логин и пароль if request_body_size: request_body = request_data['wsgi.input'].read(request_body_size) login_info = parse_qs(request_body) if login_info.get("name") and login_info.get("password"): if models.confirm_password(login_info["name"][0], login_info["password"][0]): if login_info.get("remember"): sessid = models.generate_sessid() models.set_admin_access(True) access = True # проверяем пришел ли к нам запрос с кукями if request_data.get('HTTP_COOKIE'): cookie = parse_qs(request_data['HTTP_COOKIE']) if cookie.get("sessid"): if models.confirm_sessid(cookie.get("sessid")): access = True return access, sessid
def do_POST(self): if not self.do_Auth(): self.showAuthResult(u'Не авторизирован') return #GET get_data = cgi.parse_qs(self.path[2:]) cl, cl2 = cgi.parse_header(self.headers.get('content-length')) qs = self.rfile.read(int(cl)) post_data = cgi.parse_qs(qs.decode()) #Разрешение кадра с камеры if post_data.get('resolution') != None: if (len(post_data.get('resolution')) > 0): mode = post_data.get('resolution')[0] if ((mode >= "0") and (mode <= "2")): self.server.SetResolution(int(mode)) else: None return #Комманды if post_data.get('action') != None: cmd = post_data.get('action')[0] result = self.server.robot.sendCommand(cmd) self.wfile.write(result) self.log_error("[COMMAND] %s" % result) return #Обработать запрос self.showPage('GET',get_data,post_data)
def _on_access_token(self, redirect_uri, client_id, client_secret, callback, fields, response): if response.error: logging.warning('Facebook auth error: %s' % str(response)) callback(None) return #print "self.request.arguments" #print self.request.arguments #print "RESPONSE.BODY:" #print response.body session = { "access_token": cgi.parse_qs(response.body)["access_token"][-1], "expires": cgi.parse_qs(response.body).get("expires") } #print "SESSION" #print session #print "\n" self.github_request( path="/user/show", callback=self.async_callback( self._on_get_user_info, callback, session, fields), access_token=session["access_token"], fields=",".join(fields) )
def __init__(self, environ): match = _CHARSET_RE.search(environ.get('CONTENT_TYPE', '')) if match: charset = match.group(1).lower() else: charset = 'utf-8' webob.Request.__init__(self, environ, charset=charset, unicode_errors= 'ignore', decode_param_names=True) self.arguments = {} self.uri = self.path self.query = environ.get("QUERY_STRING", "") self.protocol = environ["wsgi.url_scheme"] if self.query: self.uri += "?" + self.query arguments = cgi.parse_qs(self.query) for name, values in arguments.iteritems(): values = [v.encode('utf-8') for v in values if v] if values: self.arguments[name] = values # Parse request body self.files = {} content_type = self.headers.get("Content-Type", "") if content_type.startswith("application/x-www-form-urlencoded"): for name, values in cgi.parse_qs(self.body).iteritems(): self.arguments.setdefault(name, []).extend(values) elif content_type.startswith("multipart/form-data"): boundary = content_type[30:] if boundary: self._parse_mime_body(boundary)
def fetch_html(url, get=None, post=None, headers=None, cookie_jar=None): if get: if type(get) == str: get = cgi.parse_qs(get) l = list(urlparse.urlparse(url)) g = cgi.parse_qs(l[4]) g.update(get) l[4] = urllib.urlencode(g) url = urlparse.urlunparse(l) if post and type(post) != str: post = urllib.urlencode(post) if cookie_jar == None: cookie_jar = cookielib.CookieJar() if not headers: headers = {'User-Agent': user_agent} else: if 'User-Agent' not in headers: headers['User-Agent'] = user_agent if verbose: print 'fetching', url, '...' request = urllib2.Request(url, post, headers) opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar)) res = opener.open(request).read() if verbose: print 'DONE fetching.' return res
def fetch_html(url, get=None, post=None, headers=None, cookie_jar=None): """ Fetches and returns the html at the given *url*, optionally using *get*, *post*, *header*, and *cookie_jar*. No scraping occurs. This function is used internally by :func:`scrapemark.scrape`. For the behavior of ``headers['User-Agent']`` and *cookie_jar*, read the :func:`scrapemark.scrape` documentation. """ if get: if type(get) == str: get = cgi.parse_qs(get) l = list(urlparse.urlparse(url)) g = cgi.parse_qs(l[4]) g.update(get) l[4] = urllib.urlencode(g) url = urlparse.urlunparse(l) if post and type(post) != str: post = urllib.urlencode(post) if cookie_jar == None: cookie_jar = cookielib.CookieJar() if not headers: headers = {"User-Agent": user_agent} else: if "User-Agent" not in headers: headers["User-Agent"] = user_agent if verbose: print "fetching", url, "..." request = urllib2.Request(url, post, headers) request.add_header("Accept", "text/html") opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar)) res = opener.open(request).read() if verbose: print "DONE fetching." return res
def _parse_parameters(request): parameters = {} if request.url.query: parameters['query'] = parse_qs(request.url.query, keep_blank_values=True) _type, _options = parse_header(request.headers.get('content-type', '')) if _type == 'application/x-www-form-urlencoded': parameters['body'] = parse_qs(request.body, keep_blank_values=True) elif _type in ('application/json', ): try: parameters['body'] = json.loads(request.body) except (NameError, TypeError): pass elif _type == 'application/x-amf': pass elif _type == 'text/x-gwt-rpc': pass elif _type == 'application/xml': pass else: pass return parameters
def __init__(self, sender, conn_id, path, headers, body, *args, **kwargs): self.sender = sender self.path = path self.conn_id = conn_id self.headers = headers self.body = body if self.headers["METHOD"] == "JSON": self.data = json.loads(body) else: self.data = {} # populate arguments with QUERY string self.arguments = {} if "QUERY" in self.headers: query = self.headers["QUERY"] arguments = cgi.parse_qs(query) for name, values in arguments.iteritems(): values = [v for v in values if v] if values: self.arguments[name] = values # handle data, multipart or not if self.method in ("POST", "PUT") and self.content_type: form_encoding = "application/x-www-form-urlencoded" if self.content_type.startswith(form_encoding): arguments = cgi.parse_qs(self.body) for name, values in arguments.iteritems(): values = [v for v in values if v] if values: self.arguments.setdefault(name, []).extend(values)
def containsSameInfo(self, line, line2, extra={}): """ Check if two HTTP request lines contain the same info. We define lines as containing same info, when they have the same scheme, protocol, path and the same query parameters and values. The extra argument should be a dictionary, that will be used to extend the parsed query parameters of the first line. """ scheme, url, protocol = map(string.strip, line.split(' ', 2)) scheme2, url2, protocol2 = map(string.strip, line2.split(' ', 2)) if '?' in url: path, qs = url.split('?', 1) else: path, qs = url, '' if '?' in url2: path2, qs2 = url2.split('?', 1) else: path2, qs2 = url2, '' args = cgi.parse_qs(qs, True) args.update(extra) args2 = cgi.parse_qs(qs2, True) self.assertEquals(scheme, scheme2) self.assertEquals(protocol, protocol2) self.assertEquals(path, path2) self.assertEquals(args, args2)
def parse(self, of='hb', req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS): """Parse buffer to extract records. Format the records using the selected output format.""" (recids, records) = self.parse_and_extract_records(of) if req and cgi.parse_qs(req.args).has_key('jrec'): counter = int(cgi.parse_qs(req.args)['jrec'][0]) - 1 else: counter = 0 for recid in recids: counter += 1 if of in ['hb', None]: html = """ <tr><td valign="top" align="right" style="white-space: nowrap;"> <input name="recid" type="checkbox" value="%(recid)s" /> %(counter)s. </td><td valign="top">%(record)s</td></tr> """ % {'recid': recid, 'counter': counter, 'record': records[recid]} elif of == 'hd': # HTML detailed (hd) is not supported yet # TODO: either disable the hd output format or print it out correctly html = """""" elif of == 'xm': html = records[recid] else: html = None if html: self.add_html_result(html, limit)
def get_parameters(self): if self._parameters==None: self._get_first_line() parameters = parse_qs(self._queryString) length = self.get_header('content-length') ctype, pdict = parse_header(self.get_header('content-type')) if ctype == 'multipart/form-data': params = parse_multipart(self._rfile, pdict) for param in params.keys(): parameters[param] = params[param] elif length!='': len = int(length) body = self._rfile.read(len) self._rfile.close() import cgi params = parse_qs(body) for param in params.keys(): parameters[param] = params[param] for parameter in parameters: list = parameters[parameter] new_list = [] for param in list: new_list.append(join(split(param, "\r\n"), "\n")) parameters[parameter] = new_list #self._parameters = Parameters(parameters) self._parameters = parameters return self._parameters
def __init__(self, core, environ, start_response): request.Request.__init__(self) self._headers = None self._environ = environ self._start_response = start_response self.method = environ['REQUEST_METHOD'] self.path = environ["PATH_INFO"] request.Request.init(self, core) if self.method != 'POST': self.arguments = cgi.parse_qs(self._environ.get('QUERY_STRING')) else: if self._environ.get("CONTENT_TYPE", "").startswith("multipart/form-data"): self.arguments = self._handle_multipart(fp=environ['wsgi.input'], environ=environ) else: self.arguments = cgi.parse_qs(environ['wsgi.input'].read()) for name, value in self.arguments.items(): self.arguments[name] = (len(value) == 1) and value[0] or value if self._environ.get("HTTP_X_REQUESTED_WITH", "") == "XMLHttpRequest": self.is_xhr = True # Force request type when client wait explicitly for "text/event-stream" if self._environ.get("HTTP_ACCEPT", "text/html") == "text/event-stream": self.is_stream = True
def get_info_hash(request, multiple=False): if not multiple: return b2a_hex(cgi.parse_qs(request.query_string)['info_hash'][0]) else: hashes = set() for hash in cgi.parse_qs(request.query_string)['info_hash']: hashes.add(b2a_hex(hash)) return hashes
def assert_urls_match(url_a, url_b): url_a = urlparse(url_a) url_b = urlparse(url_b) assert url_a.scheme == url_b.scheme assert url_a.netloc == url_b.netloc assert url_a.path == url_b.path assert cgi.parse_qs(url_a.query) == cgi.parse_qs(url_b.query)
def get_page(environ, start_response): #logger.debug('get_page: ' + repr((environ['SCRIPT_NAME'], environ['PATH_INFO']))) req_headers = copy_headers_to_dict(environ, exclude=['HTTP_ACCEPT_ENCODING']) wiki_id, base, opener, original_page, wrapped_wiki_base = target(environ) page = environ['PATH_INFO'].lstrip('/') check_auth(environ, start_response, base, opener, req_headers) upstream_handler = None status = httplib.OK params = cgi.parse_qs(environ['QUERY_STRING']) #Note: probably a better solution here: http://code.google.com/p/mimeparse/ accepted_imts = environ.get('HTTP_ACCEPT', '').split(',') #logger.debug('accepted_imts: ' + repr(accepted_imts)) imt = first_item(dropwhile(lambda x: '*' in x, accepted_imts)) #logger.debug('imt: ' + repr(imt)) params_for_moin = {} cache_max_age = CACHE_MAX_AGE # max-age of this response. If set to None, it will not be used if NO_CACHE_PATHS and first_item( dropwhile(lambda x: x not in page, NO_CACHE_PATHS)): cache_max_age = None if 'rev' in params: #XXX: Not compatible with search #params_for_moin = {'rev' : params['rev'][0], 'action': 'recall'} params_for_moin = {'rev': params['rev'][0]} if 'search' in params: searchq = params['search'][0] query = urllib.urlencode({ 'value': searchq, 'action': 'fullsearch', 'context': '180', 'fullsearch': 'Text' }) #?action=fullsearch&context=180&value=foo&=Text url = absolutize('?' + query, base) request = urllib2.Request(url, None, req_headers) ctype = moin.RDF_IMT cache_max_age = None #elif 'action' in params and params['action'][0] == 'recall': elif moin.HTML_IMT in environ.get('HTTP_ACCEPT', ''): params = urllib.urlencode(params_for_moin) url = absolutize(page + '?' + params, base) request = urllib2.Request(url, None, req_headers) ctype = moin.HTML_IMT elif moin.RDF_IMT in environ.get('HTTP_ACCEPT', ''): #FIXME: Make unique flag optional #url = base + '/RecentChanges?action=rss_rc&unique=1&ddiffs=1' url = absolutize('RecentChanges?action=rss_rc&unique=1&ddiffs=1', base) #print >> sys.stderr, (url, base, '/RecentChanges?action=rss_rc&unique=1&ddiffs=1', ) request = urllib2.Request(url, None, req_headers) ctype = moin.RDF_IMT elif moin.ATTACHMENTS_IMT in environ.get('HTTP_ACCEPT', ''): url = absolutize(page + '?action=AttachFile', base) request = urllib2.Request(url, None, req_headers) ctype = moin.ATTACHMENTS_IMT def upstream_handler(): #Sigh. Sometimes you have to break some Tag soup eggs to make a RESTful omlette with closing(opener.open(request)) as resp: rbody = resp.read() doc = htmlparse(rbody) raise_embedded_error(doc) attachment_nodes = doc.xml_select( u'//*[contains(@href, "action=AttachFile") and contains(@href, "do=view")]' ) targets = [] for node in attachment_nodes: target = [ param.split('=', 1)[1] for param in node.href.split(u'&') if param.startswith('target=') ][0] targets.append(target) output = structencoder(indent=u"yes") output.feed( ROOT( E((u'attachments'), (E(u'attachment', {u'href': unicode(t)}) for t in targets)))) return output.read(), ctype #Notes on use of URI parameters - http://markmail.org/message/gw6xbbvx4st6bksw elif ';attachment=' in page: page, attachment = page.split(';attachment=', 1) url = absolutize( page + '?action=AttachFile&do=get&target=' + attachment, base) request = urllib2.Request(url, None, req_headers) def upstream_handler(): with closing(opener.open(request)) as resp: rbody = resp.read() return rbody, dict(resp.info())['content-type'] # elif ';history' in page: cache_max_age = None page, discard = page.split(';history', 1) ctype = moin.XML_IMT def upstream_handler(): revs = scrape_page_history(page, base, opener, req_headers) output = structencoder(indent=u"yes") output.feed( ROOT( E((u'history'), (E( u'rev', { u'id': unicode(r['rev']), u'editor': unicode(r['editor']), u'date': unicode(r['date']).replace(' ', 'T') }) for r in revs)))) return output.read(), ctype elif imt: params_for_moin.update({'mimetype': imt}) params = urllib.urlencode(params_for_moin) url = absolutize(page, base) + '?' + params request = urllib2.Request(url, None, req_headers) ctype = moin.DOCBOOK_IMT else: params_for_moin.update({'action': 'raw'}) params = urllib.urlencode(params_for_moin) url = absolutize(page, base) + '?' + params request = urllib2.Request(url, None, req_headers) ctype = moin.WIKITEXT_IMT try: if upstream_handler: rbody, ctype = upstream_handler() else: with closing(opener.open(request)) as resp: rbody = resp.read() #headers = {moin.ORIG_BASE_HEADER: base} #moin_base = absolutize(wiki_id, base) moin_base_info = base + ' ' + wrapped_wiki_base + ' ' + original_page response_headers = [("Content-Type", ctype), ("Vary", "Accept"), (moin.ORIG_BASE_HEADER, moin_base_info)] if cache_max_age: response_headers.append( ("Cache-Control", "max-age=" + str(cache_max_age))) start_response(status_response(status), response_headers) return rbody except urllib2.URLError, e: if e.code == 401: raise HTTPAuthorizationError(url=request.get_full_url()) if e.code == 403: raise MoinMustAuthenticateError(url=request.get_full_url(), target=wiki_id) if e.code == 404: raise MoinNotFoundError(fronturl=request_uri(environ), backurl=url) else: raise UnexpectedResponseError(url=url, code=e.code, error=str(e))
def _split_url_string(param_str): """Turn URL string into parameters.""" parameters = cgi.parse_qs(param_str, keep_blank_values=False) for k, v in parameters.iteritems(): parameters[k] = urllib.unquote(v[0]) return parameters
def getResultsByDayMonthYear(self, day, month, year): search_date = datetime.date(year, month, day) post_data = urllib.urlencode( ( ("REGFROMDATE.MAINBODY.WPACIS.1.", search_date.strftime(date_format)), ("REGTODATE.MAINBODY.WPACIS.1.", search_date.strftime(date_format)), ("SEARCHBUTTON.MAINBODY.WPACIS.1.", "Search"), )) response = urllib2.urlopen(self.search_url, post_data) contents = response.read() # Let's give scrapers the change to tidy up any rubbish - I'm looking # at you Cannock Chase contents = self._fixHTML(contents) # Check for the no results warning if not contents.count("No Matching Applications Found"): soup = BeautifulSoup.BeautifulSoup(contents) # Get the links to later pages of results. later_pages = soup.findAll( "a", { "href": re.compile( "WPHAPPSEARCHRES\.displayResultsURL.*StartIndex=\d*.*") }) for a in ["initial_search"] + later_pages: if a != "initial_search": url = a['href'] # urllib2 doesn't like this url, to make it happy, we'll # get rid of the BackURL parameter, which we don't need. split_url = urlparse.urlsplit(url) qs = split_url[3] # This gets us a dictionary of key to lists of values qsl = cgi.parse_qsl(qs) # Get rid of BackURL qsl.pop(-1) # I think this is safe, as there are no repeats of parameters new_qs = urllib.urlencode(qsl) url = urlparse.urlunsplit(split_url[:3] + (new_qs, ) + split_url[4:]) this_page_url = urlparse.urljoin(self.base_url, url) response = urllib2.urlopen(this_page_url) contents = response.read() soup = BeautifulSoup.BeautifulSoup(contents) results_table = self._findResultsTable( soup) #.body.find("table", {"class": "apas_tbl"}) trs = self._findTRs(results_table) for tr in trs: self._current_application = PlanningApplication() tds = tr.findAll("td") first_link = tds[0].a['href'] app_id = cgi.parse_qs( urlparse.urlsplit(first_link)[3])['theApnID'][0] self._current_application.date_received = search_date self._current_application.council_reference = app_id self._current_application.info_url = self.info_url % ( app_id) self._current_application.comment_url = self.comment_url % ( app_id) self._current_application.description = tds[ 1].string.strip() address = ' '.join([ x for x in tds[2].contents if isinstance(x, BeautifulSoup.NavigableString) ]).strip() self._current_application.address = address self._current_application.postcode = getPostcodeFromText( address) self._results.addApplication(self._current_application) return self._results
def __init__(self, application, environ): # The web transaction can be enabled/disabled by # the value of the variable "newrelic.enabled" # in the WSGI environ dictionary. We need to check # this before initialising the transaction as needs # to be passed in base class constructor. The # default is None, which would then result in the # base class making the decision based on whether # application or agent as a whole are enabled. enabled = _lookup_environ_setting(environ, 'newrelic.enabled', None) # Initialise the common transaction base class. super(WebTransaction, self).__init__(application, enabled) # Disable transactions for websocket connections. # Also disable autorum if this is a websocket. This is a good idea for # two reasons. First, RUM is unnecessary for websocket transactions # anyway. Secondly, due to a bug in the gevent-websocket (0.9.5) # package, if our _WSGIApplicationMiddleware is applied a websocket # connection cannot be made. if _is_websocket(environ): self.autorum_disabled = True self.enabled = False # Bail out if the transaction is running in a # disabled state. if not self.enabled: return # Will need to check the settings a number of times. settings = self._settings # Check for override settings from WSGI environ. self.background_task = _lookup_environ_setting(environ, 'newrelic.set_background_task', False) self.ignore_transaction = _lookup_environ_setting(environ, 'newrelic.ignore_transaction', False) self.suppress_apdex = _lookup_environ_setting(environ, 'newrelic.suppress_apdex_metric', False) self.suppress_transaction_trace = _lookup_environ_setting(environ, 'newrelic.suppress_transaction_trace', False) self.capture_params = _lookup_environ_setting(environ, 'newrelic.capture_request_params', settings.capture_params) self.autorum_disabled = _lookup_environ_setting(environ, 'newrelic.disable_browser_autorum', not settings.browser_monitoring.auto_instrument) # Make sure that if high security mode is enabled that # capture of request params is still being disabled. # No warning is issued for this in the logs because it # is a per request configuration and would create a lot # of noise. if settings.high_security: self.capture_params = False # WSGI spec says SERVER_PORT "can never be empty string", # but I'm going to set a default value anyway... port = environ.get('SERVER_PORT', None) if port: try: self._port = int(port) except Exception: pass # Extract from the WSGI environ dictionary # details of the URL path. This will be set as # default path for the web transaction. This can # be overridden by framework to be more specific # to avoid metrics explosion problem resulting # from too many distinct URLs for same resource # due to use of REST style URL concepts or # otherwise. request_uri = environ.get('REQUEST_URI', None) if request_uri is None: # The gunicorn WSGI server uses RAW_URI instead # of the more typical REQUEST_URI used by Apache # and other web servers. request_uri = environ.get('RAW_URI', None) script_name = environ.get('SCRIPT_NAME', None) path_info = environ.get('PATH_INFO', None) self._request_uri = request_uri if self._request_uri is not None: # Need to make sure we drop off any query string # arguments on the path if we have to fallback # to using the original REQUEST_URI. Can't use # attribute access on result as only support for # Python 2.5+. self._request_uri = urlparse.urlparse(self._request_uri)[2] if script_name is not None or path_info is not None: if path_info is None: path = script_name elif script_name is None: path = path_info else: path = script_name + path_info self.set_transaction_name(path, 'Uri', priority=1) if self._request_uri is None: self._request_uri = path else: if self._request_uri is not None: self.set_transaction_name(self._request_uri, 'Uri', priority=1) # See if the WSGI environ dictionary includes the # special 'X-Request-Start' or 'X-Queue-Start' HTTP # headers. These header are optional headers that can be # set within the underlying web server or WSGI server to # indicate when the current request was first received # and ready to be processed. The difference between this # time and when application starts processing the # request is the queue time and represents how long # spent in any explicit request queuing system, or how # long waiting in connecting state against listener # sockets where request needs to be proxied between any # processes within the application server. # # Note that mod_wsgi sets its own distinct variables # automatically. Initially it set mod_wsgi.queue_start, # which equated to when Apache first accepted the # request. This got changed to mod_wsgi.request_start # however, and mod_wsgi.queue_start was instead used # just for when requests are to be queued up for the # daemon process and corresponded to the point at which # they are being proxied, after Apache does any # authentication etc. We check for both so older # versions of mod_wsgi will still work, although we # don't try and use the fact that it is possible to # distinguish the two points and just pick up the # earlier of the two. # # Checking for the mod_wsgi values means it is not # necessary to enable and use mod_headers to add X # -Request-Start or X-Queue-Start. But we still check # for the headers and give priority to the explicitly # added header in case that header was added in front # end server to Apache instead. # # Which ever header is used, we accommodate the value # being in seconds, milliseconds or microseconds. Also # handle it being prefixed with 't='. now = time.time() def _parse_time_stamp(time_stamp): """ Converts time_stamp to seconds. Input can be microseconds, milliseconds or seconds Divide the timestamp by the highest resolution divisor. If the result is older than Jan 1 2000, then pick a lower resolution divisor and repeat. It is safe to assume no requests were queued for more than 10 years. """ for divisor in (1000000.0, 1000.0, 1.0): converted_time = time_stamp / divisor # If queue_start is in the future, return 0.0. if converted_time > now: return 0.0 if converted_time > JAN_1_2000: return converted_time return 0.0 queue_time_headers = ('HTTP_X_REQUEST_START', 'HTTP_X_QUEUE_START', 'mod_wsgi.request_start', 'mod_wsgi.queue_start') for queue_time_header in queue_time_headers: value = environ.get(queue_time_header, None) try: if value.startswith('t='): try: self.queue_start = _parse_time_stamp(float(value[2:])) except Exception: pass else: try: self.queue_start = _parse_time_stamp(float(value)) except Exception: pass except Exception: pass if self.queue_start > 0.0: break # Capture query request string parameters, unless we're in # High Security Mode. if not settings.high_security: value = environ.get('QUERY_STRING', None) if value: try: params = urlparse.parse_qs(value, keep_blank_values=True) except Exception: params = cgi.parse_qs(value, keep_blank_values=True) self._request_params.update(params) # Check for Synthetics header if settings.synthetics.enabled and \ settings.trusted_account_ids and settings.encoding_key: try: header_name = 'HTTP_X_NEWRELIC_SYNTHETICS' header = self.decode_newrelic_header(environ, header_name) synthetics = _parse_synthetics_header(header) if synthetics['account_id'] in settings.trusted_account_ids: # Save obfuscated header, because we will pass it along # unchanged in all external requests. self.synthetics_header = environ.get(header_name) if synthetics['version'] == 1: self.synthetics_resource_id = synthetics['resource_id'] self.synthetics_job_id = synthetics['job_id'] self.synthetics_monitor_id = synthetics['monitor_id'] except Exception: pass # Process the New Relic cross process ID header and extract # the relevant details. client_cross_process_id = environ.get('HTTP_X_NEWRELIC_ID') txn_header = environ.get('HTTP_X_NEWRELIC_TRANSACTION') self._process_incoming_cat_headers(client_cross_process_id, txn_header) # Capture WSGI request environ dictionary values. We capture # content length explicitly as will need it for cross process # metrics. self._read_length = int(environ.get('CONTENT_LENGTH') or -1) if settings.capture_environ: for name in settings.include_environ: if name in environ: self._request_environment[name] = environ[name] # Strip query params from referer URL. if 'HTTP_REFERER' in self._request_environment: self._request_environment['HTTP_REFERER'] = _remove_query_string( self._request_environment['HTTP_REFERER']) try: if 'CONTENT_LENGTH' in self._request_environment: self._request_environment['CONTENT_LENGTH'] = int( self._request_environment['CONTENT_LENGTH']) except Exception: del self._request_environment['CONTENT_LENGTH'] # Flags for tracking whether RUM header and footer have been # generated. self.rum_header_generated = False self.rum_footer_generated = False
def _main(self, environ): """ Body of the WSGI callable. Steps: 1. Determine protocol. If HTTPS, identify the user. 2. If js or css is requested, respond. 3. Find the module class and instantiate it. 4. Parse the query string into a dictionary. 5. Call the run() function of the module class. 6. Respond. """ authorizer = None ## Step 1 if environ['REQUEST_SCHEME'] == 'http': # No auth user, dn, user_id = None, None, 0 authlist = [] elif environ['REQUEST_SCHEME'] == 'https': authorizer = self.dynamo_server.manager.master.create_authorizer() # Client DN must match a known user try: dn = WebServer.format_dn(environ['SSL_CLIENT_S_DN']) userinfo = authorizer.identify_user(dn=dn, check_trunc=True) if userinfo is None: raise exceptions.AuthorizationError() user, user_id, dn = userinfo except exceptions.AuthorizationError: self.code = 403 self.message = 'Unknown user. Client name: %s' % environ[ 'SSL_CLIENT_S_DN'] return except: return self._internal_server_error() authlist = authorizer.list_user_auth(user) else: self.code = 400 self.message = 'Only HTTP or HTTPS requests are allowed.' return ## Step 2 mode = environ['SCRIPT_NAME'].strip('/') if mode == 'js' or mode == 'css': try: source = open(HTMLMixin.contents_path + '/' + mode + environ['PATH_INFO']) except IOError: self.code = 404 self.content_type = 'text/plain' return 'Invalid request %s%s.\n' % (mode, environ['PATH_INFO']) else: if mode == 'js': self.content_type = 'text/javascript' else: self.content_type = 'text/css' content = source.read() + '\n' source.close() return content ## Step 3 if mode != 'data' and mode != 'web' and mode != 'registry' and mode != 'phedexdata': # registry and phedexdata for backward compatibility self.code = 404 self.message = 'Invalid request %s.' % mode return if mode == 'phedexdata': mode = 'data' self.phedex_request = environ['PATH_INFO'][1:] module, _, command = environ['PATH_INFO'][1:].partition('/') try: cls = modules[mode][module][command] except KeyError: # Was a new module added perhaps? load_modules() try: # again cls = modules[mode][module][command] except KeyError: self.code = 404 self.message = 'Invalid request %s/%s.' % (module, command) return try: provider = cls(self.modules_config) except: return self._internal_server_error() if provider.must_authenticate and user is None: self.code = 400 self.message = 'Resource only available with HTTPS.' return if provider.write_enabled: self.dynamo_server.manager.master.lock() try: if self.dynamo_server.manager.master.inhibit_write(): # We need to give up here instead of waiting, because the web server processes will be flushed out as soon as # inventory is updated after the current writing process is done self.code = 503 self.message = 'Server cannot execute %s/%s at the moment because the inventory is being updated.' % ( module, command) return else: self.dynamo_server.manager.master.start_write_web( socket.gethostname(), os.getpid()) # stop is called from the DynamoServer upon successful inventory update except: self.dynamo_server.manager.master.stop_write_web() raise finally: self.dynamo_server.manager.master.unlock() if provider.require_authorizer: if authorizer is None: authorizer = self.dynamo_server.manager.master.create_authorizer( ) provider.authorizer = authorizer if provider.require_appmanager: provider.appmanager = self.dynamo_server.manager.master.create_appmanager( ) try: ## Step 4 post_request = None if environ['REQUEST_METHOD'] == 'POST': try: content_type = environ['CONTENT_TYPE'] except KeyError: content_type = 'application/x-www-form-urlencoded' # In principle we should grab CONTENT_LENGTH from environ and only read as many bytes as given, but wsgi.input seems to know where the EOF is try: content_length = environ['CONTENT_LENGTH'] except KeyError: # length -1: rely on wsgi.input having an EOF at the end content_length = -1 post_data = environ['wsgi.input'].read(content_length) # Even though our default content type is URL form, we check if this is a JSON try: json_data = json.loads(post_data) except: if content_type == 'application/json': self.code = 400 self.message = 'Could not parse input.' return else: content_type = 'application/json' provider.input_data = json_data unicode2str(provider.input_data) if content_type == 'application/x-www-form-urlencoded': try: post_request = parse_qs(post_data) except: self.code = 400 self.message = 'Could not parse input.' elif content_type != 'application/json': self.code = 400 self.message = 'Unknown Content-Type %s.' % content_type get_request = parse_qs(environ['QUERY_STRING']) if post_request is not None: for key, value in post_request.iteritems(): if key in get_request: # return dict of parse_qs is {key: list} get_request[key].extend(post_request[key]) else: get_request[key] = post_request[key] unicode2str(get_request) request = {} for key, value in get_request.iteritems(): if key.endswith('[]'): key = key[:-2] request[key] = map(escape, value) else: if len(value) == 1: request[key] = escape(value[0]) else: request[key] = map(escape, value) ## Step 5 caller = WebServer.User(user, dn, user_id, authlist) if self.dynamo_server.inventory.loaded: inventory = self.dynamo_server.inventory.create_proxy() if provider.write_enabled: inventory._update_commands = [] else: inventory = DummyInventory() content = provider.run(caller, request, inventory) if provider.write_enabled: self.dynamo_server._send_updates(inventory) except (exceptions.AuthorizationError, exceptions.ResponseDenied, exceptions.MissingParameter, exceptions.ExtraParameter, exceptions.IllFormedRequest, exceptions.InvalidRequest) as ex: self.code = 400 self.message = str(ex) return except exceptions.TryAgain as ex: self.code = 503 self.message = str(ex) return except: return self._internal_server_error() ## Step 6 self.message = provider.message self.content_type = provider.content_type self.headers = provider.additional_headers if 'callback' in request: self.callback = request['callback'] return content
def Control(self): """Handle POSTS.""" if not master.MASTER_WATCHER.IsMaster(): # We shouldn't be getting requests from the client unless we # are the active instance. stats_collector_instance.Get().IncrementCounter( "frontend_inactive_request_count", fields=["http"]) logging.info("Request sent to inactive frontend from %s", self.client_address[0]) # Get the api version try: api_version = int(cgi.parse_qs(self.path.split("?")[1])["api"][0]) except (ValueError, KeyError, IndexError): # The oldest api version we support if not specified. api_version = 3 try: content_length = self.headers.getheader("content-length") if not content_length: raise IOError("No content-length header provided.") length = int(content_length) request_comms = rdf_flows.ClientCommunication.FromSerializedString( self._GetPOSTData(length)) # If the client did not supply the version in the protobuf we use the get # parameter. if not request_comms.api_version: request_comms.api_version = api_version # Reply using the same version we were requested with. responses_comms = rdf_flows.ClientCommunication( api_version=request_comms.api_version) # TODO: Python's documentation is just plain terrible and # does not explain what `client_address` exactly is or what type does it # have (because its Python, why would they bother) so just to be on the # safe side, we anticipate byte-string addresses in Python 2 and convert # that if needed. On Python 3 these should be always unicode strings, so # once support for Python 2 is dropped this branch can be removed. address = self.client_address[0] if compatibility.PY2 and isinstance(self.client_address[0], bytes): address = address.decode("ascii") source_ip = ipaddress.ip_address(address) if source_ip.version == 6: source_ip = source_ip.ipv4_mapped or source_ip request_comms.orig_request = rdf_flows.HttpRequest( timestamp=rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch( ), raw_headers=utils.SmartStr(self.headers), source_ip=utils.SmartStr(source_ip)) source, nr_messages = self.server.frontend.HandleMessageBundles( request_comms, responses_comms) server_logging.LOGGER.LogHttpFrontendAccess( request_comms.orig_request, source=source, message_count=nr_messages) self.Send(responses_comms.SerializeToString()) except communicator.UnknownClientCertError: # "406 Not Acceptable: The server can only generate a response that is not # accepted by the client". This is because we can not encrypt for the # client appropriately. self.Send("Enrollment required", status=406)
def call(self, **kwargs): """ Create the form fields. .. note:: At upload, all field inputs are checked to match the 'check_value' regular expressions defined in the 'upload_structure_json' instance parameter. """ # Get some parameters path = self._cw.relative_path() if "?" in path: path, param = path.split("?", 1) kwargs.update(parse_qs(param)) form_name = kwargs["form_name"][0] # Get the form fields from configuration file config = load_forms(self._cw.vreg.config) # Create a structure to store values that must be checked before the # insertion in the data base check_struct = {} required_file_fields = {} # Update shortcut to access the uploaded files if 0: with self._cw.cnx._cnx.repo.internal_cnx() as cnx: rset = cnx.execute("Any X Where X is UploadFile") storage = cnx.repo.system_source._storages["UploadFile"]["data"] for index in range(rset.rowcount): entity = rset.get_entity(index, 0) eid = entity.eid if eid not in self._cw.vreg.uploaded_file_names: fpath = storage.current_fs_path(entity, "data") self._cw.vreg.uploaded_file_names[eid] = fpath # If json file missing, generate error page if config == -1: self.w(u'<div class="panel panel-danger">') self.w(u'<div class="panel-heading">') self.w(u'<h2 class="panel-title">ERROR</h2>') self.w(u'</div>') self.w(u'<div class="panel-body">') self.w(u"<h3>Configuration file not found</h3>") self.w(u"Check that the path 'upload_structure_json' " "declared in all-in-one.conf file is set.<br>") self.w(u"Then check that the path declared " "(current path:'{0}') corresponds to a " "json file and restart the instance.".format( self._cw.vreg.config["upload_structure_json"])) self.w(u'</div>') self.w(u'</div>') return -1 # If json can't be read, generate error page if config == -2: self.w(u'<div class="panel panel-danger">') self.w(u'<div class="panel-heading">') self.w(u'<h2 class="panel-title">ERROR</h2>') self.w(u'</div>') self.w(u'<div class="panel-body">') self.w(u"<h3>Configuration unknown</h3>") self.w(u"The json file configuring the form can't be " "read: {0}".format( self._cw.vreg.config["upload_structure_json"])) self.w(u'</div>') self.w(u'</div>') return -1 # Create the form form = self._cw.vreg["forms"].select( "upload-form", self._cw, action="", form_name=form_name) fields_types = {} fields_labels = {} error_to_display = None try: # Go through each field description for field in config[form_name]["Fields"]: # Remove reserved field keys # > rql: a RQL that will be used to initialize another field. # The current field must contain a list. # Must be of the form <RQL>:<field_name>. # Format the RQL string with the user login: use '{}' format # synthax in your RQL to inherit from this functionality. if "rql" in field: rql, dest_name = field.pop("rql").split(":") rql = rql.format(self._cw.user.login) if dest_name not in field: raise ValueError( "'{0}' not in field attributes.".format(dest_name)) if not isinstance(field[dest_name], list): raise ValueError( "'{0}' field attribute is not a list.".format( dest_name)) rset = self._cw.execute(rql) for row in rset.rows: field[dest_name].extend(row) # > type: the field type that must be declared in the registry field_type = field.pop("type") fields_types[field["name"]] = field_type fields_labels[field["name"]] = field["label"] # > style: the css style that will be applied to the field div style = None if "style" in field: style = field.pop("style") # Store the fields that must be checked using a Regex if "check_value" in field: check_struct[field["name"]] = field.pop("check_value") # Check that the upload directory is created # If not display a danger message # Store also required file fields if field_type in ("FileField", "MultipleFileField"): if not os.path.isdir( self._cw.vreg.config["upload_directory"]): self.w(u"<p class='label label-danger'>{0}: File " "field can't be used because the " "'upload_directory' has not been set in " "all-in-ine.conf file or its path cannot be " "created ({1})</p>".format( field.pop("label"), self._cw.vreg.config["upload_directory"])) continue if "required" in field and field["required"]: required_file_fields[field["name"]] = field["label"] # If the field is in the registry add the field to the form # If requested add some custom styles to the field if field_type in DECLARED_FIELDS: form.append_field(DECLARED_FIELDS[field_type](**field)) if style is not None: widget = form.field_by_name( field["name"]).get_widget(form) widget.attrs["style"] = unicode(style) # Otherwise display a danger message else: self.w( u"<p class='label label-danger'>'{0}': Unknown field " "type.</p>".format(field_type)) # If something goes wrong during the form creation, display a danger # message and print the trace in the terminal except ValueError as error: print traceback.format_exc() error_to_display = error.message except: print traceback.format_exc() error_to_display = "The configuration file can't be read." # Display the error message if error_to_display is not None: self.w(u'<div class="panel panel-danger">') self.w(u'<div class="panel-heading">') self.w(u'<h2 class="panel-title">ERROR</h2>') self.w(u'</div>') self.w(u'<div class="panel-body">') self.w(u'<h3>Configuration file syntax error</h3>') self.w(u'{0}<br>'.format(error_to_display)) self.w(u'Please refer to the documentation and make corrections') self.w(u'</div>') self.w(u'</div>') return -1 # Form processings error_to_display = None try: # Retrieve the posted form field values posted = form.process_posted() # Check posted fields errors = self.check_posted(posted, required_file_fields, check_struct) if errors != {}: raise ValidationError(None, {}) # Create the CWUpload entity upload = self._cw.create_entity( "CWUpload", form_name=unicode(form_name), status=u"Quarantine") # Go through the posted form parameters. Deported fields are # stored in UploadFile entities, other fields in UploadField # entities file_eids = [] field_eids = [] file_entities = [] field_entities = [] for field_name, field_value in posted.items(): # > files are deported if isinstance(field_value, Binary): # Create an UploadFile entity extension = ".".join(field_value.filename.split(".")[1:]) entity = self._cw.create_entity( "UploadFile", name=field_name, data=field_value, data_extension=unicode(extension), data_name=field_value.filename) file_eids.append(entity.eid) file_entities.append(entity) # Add relation with the CWUpload entity self._cw.execute("SET U upload_files F WHERE " "U eid %(u)s, F eid %(f)s", {"u": upload.eid, "f" : file_eids[-1]}) # > other fields are stored in the database else: # Create an UploadField entity entity = self._cw.create_entity( "UploadField", name=unicode(field_name), value=unicode(field_value), type=unicode(fields_types[field_name]), label=unicode(fields_labels[field_name])) field_eids.append(entity.eid) field_entities.append(entity) # Add relation with the CWUpload entity self._cw.execute("SET U upload_fields F WHERE " "U eid %(u)s, F eid %(f)s", {"u": upload.eid, "f" : field_eids[-1]}) # Call synchrone check function check_func_desc = config[form_name].get("SynchroneCheck") if check_func_desc is not None: module_name = check_func_desc[:check_func_desc.rfind(".")] func_name = check_func_desc[check_func_desc.rfind(".") + 1:] module = import_module(module_name) check_func = getattr(module, func_name) try: error_to_display = check_func( self._cw.cnx, posted, upload, file_entities, field_entities) except: exc_type, exc_value, exc_tb = sys.exc_info() raise Exception(traceback.format_exc()) finally: if error_to_display is not None: raise ValidationError( None, {None: "<br><br>" + error_to_display}) # Redirection to the created CWUpload entity raise Redirect(self._cw.build_url(eid=upload.eid)) # Handle exceptions except RequestError: pass except ValueError as error: error_to_display = error.message except ValidationError as error: # Check posted fields to concatenate the CW and application errors posted = {} for field in form.iter_modified_fields(): posted[field.name] = form._cw.form[field.name] errors = self.check_posted(posted, required_file_fields, check_struct) concatenated_errors = {} for dict_struct in (errors, error.errors): for key, value in dict_struct.items(): concatenated_errors.setdefault(key, []).append(value) concatenated_errors = dict( (key, " - ".join(value)) for key, value in concatenated_errors.items()) raise ValidationError(None, concatenated_errors) except Redirect: raise except Unauthorized: error_to_display = "You are not allowed to upload data." except: print traceback.format_exc() error_to_display = ("Unexpected error, please contact the service " "administrator.") raise ValidationError( None, {None: "<br><br>" + error_to_display}) # Form rendering self.w(u"<legend>'{0}' upload form</legend>".format( form_name)) form.render(w=self.w, formvalues=self._cw.form) # Display the error message in the page if error_to_display is not None: self._cw.cnx.rollback() self.w(u'<div class="panel panel-danger">') self.w(u'<div class="panel-heading">') self.w(u'<h2 class="panel-title">ULPLOAD ERROR</h2>') self.w(u'</div>') self.w(u'<div class="panel-body">') self.w(u"{0}".format(error_to_display)) self.w(u'</div>') self.w(u'</div>')
def simple_app(environ, start_response): # 修改窗口名,目前只对windows有效 try: os.system("title {}_bdd_server".format(self_name)) except: pass setup_testing_defaults(environ) status = '200 OK' headers = [('Content-type', 'text/plain')] start_response(status, headers) # the environment variable CONTENT_LENGTH may be empty or missing try: request_body_size = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError): request_body_size = 0 # When the method is POST the query string will be sent # in the HTTP request body which is passed by the WSGI server # in the file like wsgi.input environment variable. # 从http request解析请求 request_body = environ['wsgi.input'].read(request_body_size) post = parse_qs(request_body) step_data = json.loads(post['data'][0]) step = step_data['step'].strip() # 1. 解析step # 2. 执行step # 3. 分类step执行结果 # 4. 返回http response # 0:成功,1:业务失败,2:异常 result = 0 if step == '__reset__': print('*********************** run step **********************') print(u'Reset bdd environment...') environment.after_scenario(context, context.scenario) environment.before_scenario(context, context.scenario) resp = {'result': result, 'bdd_server_name': self_name} return base64.b64encode(json.dumps(resp)) else: # 解析请求携带的context _set_context_attrs(context, json.loads(step_data['context_attrs'])) if step_data['context_text']: step_content = step_data['context_text'] else: step_content = step_data['context_table'] step = u'%s\n"""\n%s\n"""' % (step_data['step'], step_content) print('*********************** run step **********************') print(step) context_attrs = {} traceback = '' try: context.execute_steps(step) except AssertionError: result = 1 from core.exceptionutil import full_stack print('*********************** failure **********************') traceback = full_stack() print(traceback.decode('utf-8')) except: result = 2 from core.exceptionutil import full_stack print( '*********************** exception **********************') traceback = full_stack() print(traceback.decode('utf-8')) else: result = 0 context_attrs = context._stack[0] resp = { 'result': result, 'traceback': traceback, 'context_attrs': context_attrs, 'bdd_server_name': self_name } # 传递context时忽略基本类型外的对象 return base64.b64encode(json.dumps(resp, default=_default))
<div class="col-xs-12"><input type="submit" value="Update" class="btn btn-primary btn-block btn-lg"> </div> </div> </form> </div> </div> </div>"""%(name,id)) printHeader() try: query = os.environ['QUERY_STRING'] if len(query) == 0: print ("no suggestion") else: pairs = cgi.parse_qs(query) if "q" in pairs.keys(): q = pairs["q"][0] conn = mysql.connector.connect( user='******', password='', host='127.0.0.1', database='uaa') c = conn.cursor() if "HTTP_COOKIE" in os.environ: cookie = Cookie.SimpleCookie(os.environ["HTTP_COOKIE"]) #print("<p>cookie: %s</p>"% cookie["session"].value) c.execute('SELECT * FROM members WHERE sessionID = %s ',(cookie["session"].value,)) row = c.fetchone() if row: navBar(row[5])
def __init__(self, master, address, data): # query semantics may be used to identify header field values scheme, netloc, path, params, query, fragment = urlparse(address) address = urlunparse((scheme, netloc, path, '', '', '')) headers = cgi.parse_qs(query) # create widgets self.master = master self.root = tktools.make_toplevel(self.master, title="Mail Dialog") self.root.protocol("WM_DELETE_WINDOW", self.cancel_command) self.root.bind("<Alt-w>", self.cancel_command) self.root.bind("<Alt-W>", self.cancel_command) fr, top, botframe = tktools.make_double_frame(self.root) self.text, fr = tktools.make_text_box(top, 80, 24) self.text.tag_config('SUSPICIOUS_HEADER', foreground='red') self.send_button = Button(botframe, text="Send", command=self.send_command) self.send_button.pack(side=LEFT) self.cancel_button = Button(botframe, text="Cancel", command=self.cancel_command) self.cancel_button.pack(side=RIGHT) tktools.unify_button_widths(self.send_button, self.cancel_button) hinfo = _make_sequence_dict(COMMON_HEADERS) variables = { 'to': address, 'subject': data and 'Form posted from Grail' or '', 'mime-version': '1.0', 'x-mailer': GRAILVERSION, 'x-url': LAST_CONTEXT and LAST_CONTEXT.get_baseurl() or '' } if data: variables["content-type"] = "application/x-www-form-urlencoded" else: variables["content-type"] = "text/plain; charset=us-ascii" variables["content-transfer-encoding"] = "7bit" # move default set of query'd headers into variables for header, vlist in headers.items(): header = string.lower(header) if header != 'body': if header not in DISALLOWED_HEADERS: variables[header] = vlist[0] # toss duplicates if not hinfo.has_key(header): hinfo[header] = 15 del headers[header] # insert user-specified extra headers variables = self.add_user_headers(variables) for header in variables.keys(): if not hinfo.has_key(header): hinfo[header] = 19 # write the headers into the buffer variables['date'] = time.ctime(time.time()) hseq = _make_dict_sequence(hinfo) for x, header in hseq: if variables.has_key(header): s = "%s: %s\n" \ % (string.capwords(header, '-'), variables[header]) self.text.insert(END, s) # insert newline self.text.insert(END, '\n', ()) # insert data if data: self.text.insert(END, data) elif headers.has_key('body'): self.text.insert(END, headers['body'][0] + '\n') else: self.add_user_signature() self.text.focus_set()
def parse_request(env): return parse_qs(env['QUERY_STRING'])
def application(environ, start_response): path = environ['PATH_INFO'] method = environ['REQUEST_METHOD'] if method == 'POST': try: request_body_size = int(environ['CONTENT_LENGTH']) request_body = environ['wsgi.input'].read(request_body_size) #http://stackoverflow.com/questions/17417620/python-requests-encoding-post-data decoded_request = urllib2.unquote(request_body).decode('utf-8') logger.info("Received message from worker: %s" % decoded_request[5:]) parsed = json.loads( decoded_request[5:]) # Skip first 5 characters ("body=...") info = "FeedID: " + str( parsed['environment']['id']) + ", Channel: " + str( parsed['triggering_datastream']['id']) + ", Value: " + str( parsed['triggering_datastream']['value']['value'] ) + ", Time: " + str(parsed['triggering_datastream']['at']) logger.info("Received message from worker: %s" % info) #logger.info("Received message from worker: %s" % str(scipy.version.version)) # Begin Processing for Xively forecast = get_forecast(url) # Forecast for 0.1" pqpf, pop, qpf, epoch = get_pqpf(forecast) post_forecast(pqpf * 100) # Forecast for 0.01" pqpf_01 = get_pqpf(forecast, amt=0.01)[0] post_forecast(pqpf_01 * 100, channel='p_rain001') logger.info( "Received message from worker: at: %f, pop: %2d, qpf: %4s, pqpf(0.1): %2.2f, pqpf(0.01): %2.2f" % (epoch, pop, qpf, pqpf * 100, pqpf_01 * 100)) # Get Change in Depth over Time dxdt = dchannel_dt(channel='depth_sonic') dxdt = [-d / 12 for d in dxdt ] #invert values for sonic sensor, convert to mm/5 mins update_channel(value=dxdt[-1], channel='delta_depth_sonic') logger.info("Received message from worker: dQ/dt: %f" % dxdt[-1]) #tf_bottle = sampled_recently(duration = 60) #tf_sonic = sampled_recently(duration = 60, channel = 'depth_sonic') #logger.info("Received message from worker: Recently sampled.. bottle: %s; sonic: %s" % (str(tf_bottle),str(tf_sonic))) algorithm_enabled = get_current_value(channel='enable_sampler') if algorithm_enabled > 0: # # Take a sample if conditions are met # if dxdt[-1] >= min_dflow : # Water level is rising faster than min_dflow # flag = not( sampled_recently(duration = 30) ) # if ( flag ): # post_trigger() # logger.info("Received message from worker: dQ/dt: %f and 30+ minutes since a sample" % dxdt[-1]) # elif pqpf >= 0.50 : # Probability of exceeding rainfall is greater than 50% # flag = not( sampled_recently(duration = 60) ) # if ( flag ): # post_trigger() # logger.info("Received message from worker: pqpf: %2.2f and 60+ minutes since a sample" % pqpf*100) current_state = get_current_value(channel='hydrograph_state') depth_peak = get_current_value(channel='depth_peak') depth_base = get_current_value(channel='depth_base') dxdt_max = get_current_value(channel='delta_depth_max') pqpf_list = get_history(channel='p_rain')[0] depth_list = get_history(channel='depth_sonic')[0] pqpf_avg = .3 * pqpf_list[-3] + .4 * pqpf_list[ -2] + .3 * pqpf_list[-1] # 3-pt Welch Window depth = depth_list[-1] # CHECK THIS if dxdt[-1] >= min_dflow: # Water level is rising faster than min_dflow # hydrograph is now rising if not (current_state == RISING or current_state == PEAKING): current_state = RISING # take sample if one hasn't been taken in past 10 mins flag = not (sampled_recently(duration=10)) # inflection in hydrograph: discharge started increasing again # before a peak was reached elif current_state == PEAKING and dxdt[-1] > dxdt[-2]: current_state = RISING # take sample if one hasn't been taken in past 10 mins flag = not (sampled_recently(duration=10)) if (flag): post_trigger() logger.info( "Received message from worker: dQ/dt: %f triggered and 10+ minutes since a sample" % dxdt[-1]) if dxdt[-1] > dxdt_max: # update dxdt_max #dxdt_max = dxdt[-1] update_channel(channel='delta_depth_max', value=dxdt[-1]) if current_state == BASEFLOW and pqpf_avg / 100 >= 0.10: update_channel( channel='depth_base', value=depth) # update depth_base to baseflow value flag = not (sampled_recently(duration=120)) if (flag): post_trigger() logger.info( "Received message from worker: pqpf: %2.2f and 60+ minutes since a sample" % (pqpf * 100)) if current_state == RISING and dxdt[-1] < dxdt_max and dxdt[ -1] > 0: # hydrograph was rising, but now the rate is slowing down current_state = PEAKING # take sample if one hasn't been taken in past 10 mins flag = not (sampled_recently(duration=10)) if (flag): post_trigger() logger.info( "Received message from worker: dQ/dt: %f falling" % dxdt[-1]) elif (current_state == RISING or current_state == PEAKING) and dxdt[-1] <= 0: # hydrograph has stopped rising and is now transitioning to falling limb current_state = PEAK #depth_peak = depth # update depth_peak to value closest to peak update_channel(channel='depth_peak', value=depth) # take sample if one hasn't been taken in past 10 mins flag = not (sampled_recently(duration=10)) if (flag): post_trigger() logger.info( "Received message from worker: dQ/dt max: %f now increasing less" % dxdt_max) elif current_state == PEAK and depth >= ( depth_base - .75 * (depth_base - depth_peak)): # for ultrasonic, depth_base is always greater than depth_peak # hydrograph now on falling limb. Currently below 75% max current_state = FALLING # take sample if one hasn't been taken in past 10 mins flag = not (sampled_recently(duration=10)) if (flag): post_trigger() logger.info( "Received message from worker: Falling past .75 max depth" ) # Sample if hydrograph rises back above 75% max # elif current_state == FALLING and depth < (depth_base - .75*(depth_base - depth_peak)) : # # for ultrasonic, depth_base is always greater than depth_peak # current_state = RISING # if dxdt[-1] > dxdt_max: # update dxdt_max # #dxdt_max = dxdt[-1] # update_channel(channel='delta_depth_max',value=dxdt[-1]) # # # take sample if one hasn't been taken in past 60 mins # flag = not( sampled_recently(duration = 60) ) # if ( flag ): # post_trigger() # logger.info("Received message from worker: Rising past .75 max depth") # Hydrograph is approaching baseflow. Reset depth variables elif current_state == FALLING and depth >= ( depth_base - .10 * (depth_base - depth_peak)): # for ultrasonic, depth_base is always greater than depth_peak current_state = BASEFLOW update_channel(channel='depth_peak', value=depth_base) #depth_peak = depth_base update_channel(channel='delta_depth_max', value=0) #dxdt_max = 0 # take sample if one hasn't been taken in past 10 mins flag = not (sampled_recently(duration=10)) if (flag): post_trigger() logger.info( "Received message from worker: Returning to baseflow") update_channel(channel='hydrograph_state', value=current_state) except (TypeError, ValueError): logger.warning('Error retrieving request body for async work.') elif method == 'GET': #https://gist.github.com/davidbgk/1311056 d = parse_qs(environ['QUERY_STRING']) # turns the qs to a dict query = 'From GET: %s' % ''.join('%s: %s' % (k, v) for k, v in d.iteritems()) logger.info("Received message from worker: %s" % query) if 'pqpf' in d: parsed_json = get_forecast(url) hour = [] pop = [] qpf = [] for x in range(0, len(parsed_json['hourly_forecast'])): hour.append( time.strftime( '%Y-%m-%d %H:%M', time.gmtime( float(parsed_json['hourly_forecast'][x]['FCTTIME'] ['epoch'])))) pop.append( float(parsed_json['hourly_forecast'][x]['pop']) / 100) qpf.append( float(parsed_json['hourly_forecast'][x]['qpf'] ['english'])) #'metric' poe = calc_pqpf(float(d['pqpf'][0]), pop, qpf) out = '<br>' for x in range(0, 12): out += "Forecast for %16s GMT.....POP %2d...QPF %4s...PoE(%.2f) %2.2f <br>" % ( hour[x], pop[x] * 100, qpf[x], float( d['pqpf'][0]), poe[x] * 100) url_info['amt'] = d['pqpf'][0] url_info['pqpf_str'] = out else: url_info['amt'] = amt url_info[ 'pqpf_str'] = '<a href="?pqpf=0.2">Click</a> to Show Forecast' status = '200 OK' headers = [('Content-type', 'text/html')] start_response(status, headers) return [response.format(**url_info)]
linkType = None try: url = urllib.unquote_plus(params["url"]) except: pass try: name = urllib.unquote_plus(params["name"]) except: pass try: mode = urllib.unquote_plus(params["mode"]) except: pass args = cgi.parse_qs(sys.argv[2][1:]) linkType = '' try: linkType = args.get('linkType', '')[0] except: pass PageNumber = '' try: PageNumber = args.get('limitstart', '')[0] except: PageNumber = '' if PageNumber == None: PageNumber = "" print mode, url
def formdecode(thestring): """Decode a single string back into a form like dictionary.""" from cgi import parse_qs from urllib import unquote_plus return parse_qs(unquote_plus(thestring), True)
def process_refresh_token_response(cls, response): return dict( (key, val[0]) for key, val in cgi.parse_qs(response).iteritems())
def _profiler(req): """ This handler wrap the default handler with a profiler. Profiling data is written into CFG_TMPDIR/invenio-profile-stats-datetime.raw, and is displayed at the bottom of the webpage. To use add profile=1 to your url. To change sorting algorithm you can provide profile=algorithm_name. You can add more than one profile requirement like ?profile=time&profile=cumulative. The list of available algorithm is displayed at the end of the profile. """ args = {} if req.args: args = cgi.parse_qs(req.args) if 'profile' in args: if not isUserSuperAdmin(collect_user_info(req)): return _handler(req) if 'memory' in args['profile']: gc.set_debug(gc.DEBUG_LEAK) ret = _handler(req) req.write("\n<pre>%s</pre>" % gc.garbage) gc.collect() req.write("\n<pre>%s</pre>" % gc.garbage) gc.set_debug(0) return ret from cStringIO import StringIO try: import pstats except ImportError: ret = _handler(req) req.write("<pre>%s</pre>" % "The Python Profiler is not installed!") return ret import datetime date = datetime.datetime.now().strftime('%Y%m%d%H%M%S') filename = '%s/invenio-profile-stats-%s.raw' % (CFG_TMPDIR, date) existing_sorts = pstats.Stats.sort_arg_dict_default.keys() required_sorts = [] profile_dump = [] for sort in args['profile']: if sort not in existing_sorts: sort = 'cumulative' if sort not in required_sorts: required_sorts.append(sort) if sys.hexversion < 0x02050000: import hotshot import hotshot.stats pr = hotshot.Profile(filename) ret = pr.runcall(_handler, req) for sort_type in required_sorts: tmp_out = sys.stdout sys.stdout = StringIO() hotshot.stats.load(filename).strip_dirs().sort_stats( sort_type).print_stats() # pylint: disable=E1103 # This is a hack. sys.stdout was replaced by a StringIO. profile_dump.append(sys.stdout.getvalue()) # pylint: enable=E1103 sys.stdout = tmp_out else: import cProfile pr = cProfile.Profile() ret = pr.runcall(_handler, req) pr.dump_stats(filename) for sort_type in required_sorts: strstream = StringIO() pstats.Stats(filename, stream=strstream).strip_dirs().sort_stats( sort_type).print_stats() profile_dump.append(strstream.getvalue()) profile_dump = '\n'.join(profile_dump) profile_dump += '\nYou can use profile=%s or profile=memory' % existing_sorts req.write("\n<pre>%s</pre>" % profile_dump) return ret elif 'debug' in args and args['debug']: #remote_debugger.start(["3"]) # example starting debugger on demand if remote_debugger: debug_starter = remote_debugger.get_debugger(args['debug']) if debug_starter: try: debug_starter() except Exception, msg: # TODO - should register_exception? raise Exception( 'Cannot start the debugger %s, please read instructions inside remote_debugger module. %s' % (debug_starter.__name__, msg)) else: raise Exception( 'Debugging requested, but no debugger registered: "%s"' % args['debug']) return _handler(req)
def simple_app(environ, start_response): # Returns a dictionary from CGI in which the values are lists, it is the easy way query_string = parse_qs(environ['QUERY_STRING']) #given URL: http://localhost:8000/?addr=0&cmd=id&q=true&arg1=1&arg2=2&arg3=3&arg4=4&arg5=5 #the QUERY_STRING is: addr=0&cmd=id&q=true&arg1=1&arg2=2&arg3=3&arg4=4&arg5=5 addr = query_string.get('addr', [''])[0] # Returns the first addr value cmd_base = query_string.get('cmd', [''])[0] # Returns the first cmd value q = query_string.get('q', [''])[0] # Returns the first q value arg1 = query_string.get('arg1', [''])[0] # Returns the first argument value arg2 = query_string.get('arg2', [''])[0] # Returns the first argument value arg3 = query_string.get('arg3', [''])[0] # Returns the first argument value arg4 = query_string.get('arg4', [''])[0] # Returns the first argument value arg5 = query_string.get('arg5', [''])[0] # Returns the first argument value #check for correctness of command if (addr == ''): addr = '0' # for default of /0/id? if (not (addr.isalnum() and (len(addr) == 1))): status = '400 Bad Request' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [("ERR: addr must be a string of len == 1 that isalnum\n" ).encode('utf-8') + (b"ERR: bad query_string \"addr=" + addr.encode('utf-8') + b"\"\n").decode().encode('utf-8')] return ret if (cmd_base == ''): cmd_base = "id" # default if (not (cmd_base.isalpha())): status = '400 Bad Request' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [("ERR: cmd must be a string that isalpha\n").encode('utf-8') + (b"ERR: bad query_string \"cmd=" + cmd_base.encode('utf-8') + b"\"\n").decode().encode('utf-8')] return ret # start putting the command togather command = "/" + addr + "/" + cmd_base if (q == ''): q = 'true' # default needs a ? after the cmd_base if (not ((q == 'true') or (q == 'false'))): status = '400 Bad Request' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [("ERR: q must be true or false\n").encode('utf-8') + (b"ERR: bad query_string \"q=" + q.encode('utf-8') + b"\"\n").decode().encode('utf-8')] return ret # baggage from looking at GPIB commands for too many years if (q == "true"): command = command + "?" if (len(arg1) >= 1): if (not (arg1.isalnum())): status = '400 Bad Request' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [ ("ERR: arg1 must be a string that isalnum\n").encode('utf-8') + (b"ERR: bad query_string \"arg1=" + arg1.encode('utf-8') + b"\"\n").decode().encode('utf-8') ] return ret command = command + " " + arg1 #ignore arg2 if arg1 not given if (len(arg2) >= 1): if (not (arg2.isalnum())): status = '400 Bad Request' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [ ("ERR: arg2 must be a string that isalnum\n" ).encode('utf-8') + (b"ERR: bad query_string \"arg2=" + arg2.encode('utf-8') + b"\"\n").decode().encode('utf-8') ] return ret command = command + "," + arg2 #ignore arg3 if arg2 not given if (len(arg3) >= 1): if (not (arg3.isalnum())): status = '400 Bad Request' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [("ERR: arg3 must be a string that isalnum\n" ).encode('utf-8') + (b"ERR: bad query_string \"arg3=" + arg3.encode('utf-8') + b"\"\n").decode().encode('utf-8')] return ret command = command + "," + arg3 #ignore arg4 if arg3 not given if (len(arg4) >= 1): if (not (arg4.isalnum())): status = '400 Bad Request' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [("ERR: arg4 must be a string that isalnum\n" ).encode('utf-8') + (b"ERR: bad query_string \"arg4=" + arg4.encode('utf-8') + b"\"\n").decode().encode('utf-8')] return ret command = command + "," + arg4 #ignore arg5 if arg4 not given if (len(arg5) >= 1): if (not (arg5.isalnum())): status = '400 Bad Request' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [("ERR: arg5 must be a string that isalnum\n" ).encode('utf-8') + (b"ERR: bad query_string \"arg5=" + arg5.encode('utf-8') + b"\"\n").decode().encode('utf-8')] return ret command = command + "," + arg5 sio.write( (command + "\n").encode('utf-8') ) # "/0/id?" is like the command I want to send on the serial link sio_echo_cmd = b"" sio_echo_cmd = sio.readline().strip( ) # my serial device echo's the command if (not (len(sio_echo_cmd) >= 1)): status = '503 Service Unavailable' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [("ERR: device did not echo command\n").encode('utf-8') + (b"ERR: \"command=" + command.encode('utf-8') + b"\"\n").decode().encode('utf-8')] return ret if (not (sio_echo_cmd == command.encode('utf-8'))): status = '503 Service Unavailable' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [("ERR: device command echo was bad\n").encode('utf-8') + (b"ERR: \"command=" + command.encode('utf-8') + b"\"\n").decode().encode('utf-8') + (b"ERR: \"echo_cmd=" + sio_echo_cmd + b"\"\n").decode().encode('utf-8')] return ret sio_echo = sio.readline().strip() # and then outpus the JSON sio.write( "\n".encode('utf-8') ) # some commands (e.g. /0/adc 1) will keep outputing at timed intervals, this should stop that if (not (len(sio_echo) >= 1)): status = '503 Service Unavailable' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [("ERR: serial device found but ouput not returned\n" ).encode('utf-8') + (b"ERR: \"command=" + command.encode('utf-8') + b"\"\n").decode().encode('utf-8')] return ret try: json_object = json.loads(sio_echo) except ValueError as e: status = '503 Service Unavailable' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) ret = [ ("ERR: serial device returned bad JSON\n").encode('utf-8') + (b"ERR: \"command=" + command.encode('utf-8') + b"\"\n").decode().encode('utf-8') + (b"ERR: \"sio_echo=" + sio_echo + b"\"\n").decode().encode('utf-8') ] return ret status = '200 OK' headers = [('Content-type', 'text/plain; charset=utf-8')] start_response(status, headers) # JSON is formated as bytestring suitable for transmission with HTTP response headers ret = [(sio_echo + b"\n").decode().encode('utf-8')] return ret
def _handler(req): """ This handler is invoked by mod_python with the apache request.""" try: allowed_methods = ("GET", "POST", "HEAD", "OPTIONS") req.allow_methods(allowed_methods, 1) if req.method not in allowed_methods: raise apache.SERVER_RETURN, apache.HTTP_METHOD_NOT_ALLOWED if req.method == 'OPTIONS': ## OPTIONS is used to now which method are allowed req.headers_out['Allow'] = ', '.join(allowed_methods) raise apache.SERVER_RETURN, apache.OK # Set user agent for fckeditor.py, which needs it here os.environ["HTTP_USER_AGENT"] = req.headers_in.get( 'User-Agent', '') guest_p = isGuestUser(getUid(req)) uri = req.uri if uri == '/': path = [''] else: ## Let's collapse multiple slashes into a single / uri = RE_SLASHES.sub('/', uri) path = uri[1:].split('/') if uri.startswith('/yours') or not guest_p: ## Private/personalized request should not be cached req.headers_out[ 'Cache-Control'] = 'private, no-cache, no-store, max-age=0, must-revalidate' req.headers_out['Pragma'] = 'no-cache' req.headers_out['Vary'] = '*' else: req.headers_out['Cache-Control'] = 'public, max-age=3600' req.headers_out['Vary'] = 'Cookie, ETag, Cache-Control' try: if req.header_only and not RE_SPECIAL_URI.match(req.uri): return root._traverse(req, path, True, guest_p) else: ## bibdocfile have a special treatment for HEAD return root._traverse(req, path, False, guest_p) except TraversalError: raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND except apache.SERVER_RETURN: ## This is one of mod_python way of communicating raise except IOError, exc: if 'Write failed, client closed connection' not in "%s" % exc: ## Workaround for considering as false positive exceptions ## rised by mod_python when the user close the connection ## or in some other rare and not well identified cases. register_exception(req=req, alert_admin=True) raise except Exception: # send the error message, much more convenient than log hunting if remote_debugger: args = {} if req.args: args = cgi.parse_qs(req.args) if 'debug' in args: remote_debugger.error_msg(args['debug']) register_exception(req=req, alert_admin=True) raise # Serve an error by default. raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
def _get_vars(self, media_id): return dict( map(lambda x: (x[0], x[1][0]), cgi.parse_qs(media_id).items()))
def queryValue(arg): query = parse_qs(urlparse(request.uri).query, True) return query.get(arg, [""])[0]
def _handler(req): """ This handler is invoked by mod_python with the apache request.""" allowed_methods = ("GET", "POST", "HEAD", "OPTIONS", "PUT") #req.allow_methods(allowed_methods, 1) #if req.method not in allowed_methods: # raise apache.SERVER_RETURN, apache.HTTP_METHOD_NOT_ALLOWED if req.method == 'OPTIONS': ## OPTIONS is used to now which method are allowed req.headers_out['Allow'] = ', '.join(allowed_methods) raise apache.SERVER_RETURN, apache.OK # Set user agent for fckeditor.py, which needs it here os.environ["HTTP_USER_AGENT"] = req.headers_in.get('User-Agent', '') # Check if REST authentication can be performed if req.args: args = cgi.parse_qs(req.args) if 'apikey' in args and req.is_https(): uid = web_api_key.acc_get_uid_from_request() if uid < 0: raise apache.SERVER_RETURN, apache.HTTP_UNAUTHORIZED else: login_user(uid) guest_p = int(current_user.is_guest) uri = req.uri if uri == '/': path = [''] else: ## Let's collapse multiple slashes into a single / uri = RE_SLASHES.sub('/', uri) path = uri[1:].split('/') if CFG_ACCESS_CONTROL_LEVEL_SITE > 1: ## If the site is under maintainance mode let's return ## 503 to casual crawler to avoid having the site being ## indexed req.status = 503 g = _RE_BAD_MSIE.search(req.headers_in.get('User-Agent', "MSIE 6.0")) bad_msie = g and float(g.group(1)) < 9.0 if uri.startswith('/yours') or not guest_p: ## Private/personalized request should not be cached if bad_msie and req.is_https(): req.headers_out[ 'Cache-Control'] = 'private, max-age=0, must-revalidate' else: req.headers_out[ 'Cache-Control'] = 'private, no-cache, no-store, max-age=0, must-revalidate' req.headers_out['Pragma'] = 'no-cache' req.headers_out['Vary'] = '*' elif not (bad_msie and req.is_https()): req.headers_out['Cache-Control'] = 'public, max-age=3600' req.headers_out['Vary'] = 'Cookie, ETag, Cache-Control' try: if req.header_only and not RE_SPECIAL_URI.match(req.uri): return root._traverse(req, path, True, guest_p) else: ## bibdocfile have a special treatment for HEAD return root._traverse(req, path, False, guest_p) except TraversalError: raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND except apache.SERVER_RETURN: ## This is one of mod_python way of communicating raise except IOError as exc: if 'Write failed, client closed connection' not in "%s" % exc: ## Workaround for considering as false positive exceptions ## rised by mod_python when the user close the connection ## or in some other rare and not well identified cases. register_exception(req=req, alert_admin=True) raise except Exception: # send the error message, much more convenient than log hunting if remote_debugger: args = {} if req.args: args = cgi.parse_qs(req.args) if 'debug' in args: remote_debugger.error_msg(args['debug']) register_exception(req=req, alert_admin=True) raise # Serve an error by default. raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
def extract_post_data(self, environment): if environment.get('CONTENT_TYPE') == 'application/x-www-form-urlencoded': input = environment.get('wsgi.input') size = int(environment.get('CONTENT_LENGTH')) self.body = parse_qs(input.read(size)) input.close()
def run_example(): # setup print '** OAuth Python Library Example **' client = SimpleOAuthClient(REQUEST_TOKEN_URL, ACCESS_TOKEN_URL, AUTHORIZATION_URL) consumer = oauth.OAuthConsumer(CONSUMER_KEY, CONSUMER_SECRET) signature_method_plaintext = oauth.OAuthSignatureMethod_PLAINTEXT() signature_method_hmac_sha1 = oauth.OAuthSignatureMethod_HMAC_SHA1() pause() # get request token print '* Obtain a request token ...' pause() oauth_request = oauth.OAuthRequest.from_consumer_and_token( consumer, callback=CALLBACK_URL, http_url=client.request_token_url) oauth_request.sign_request(signature_method_plaintext, consumer, None) print 'REQUEST (via headers)' pause() token = client.fetch_request_token(oauth_request) print 'GOT' print 'key: %s' % str(token.key) print 'secret: %s' % str(token.secret) print 'callback confirmed? %s' % str(token.callback_confirmed) pause() print '* Authorize the request token ...' pause() oauth_request = oauth.OAuthRequest.from_token_and_callback( token=token, http_url=client.authorization_url) print 'REQUEST (via url query string)' pause() response = client.authorize_token(oauth_request) print 'GOT' print response # get the verifier query = urlparse.urlparse(response)[4] params = cgi.parse_qs(query, keep_blank_values=False) verifier = params['oauth_verifier'][0] print 'verifier: %s' % verifier pause() # get access token print '* Obtain an access token ...' pause() oauth_request = oauth.OAuthRequest.from_consumer_and_token( consumer, token=token, verifier=verifier, http_url=client.access_token_url) oauth_request.sign_request(signature_method_plaintext, consumer, token) print 'REQUEST (via headers)' print 'parameters: %s' % str(oauth_request.parameters) pause() token = client.fetch_access_token(oauth_request) print 'GOT' print 'key: %s' % str(token.key) print 'secret: %s' % str(token.secret) pause() # access some protected resources print '* Access protected resources ...' pause() oauth_request = oauth.OAuthRequest.from_consumer_and_token( consumer, token=token, http_method='GET', http_url=RESOURCE_URL) oauth_request.sign_request(signature_method_hmac_sha1, consumer, token) print 'REQUEST (via get)' print 'parameters: %s' % str(oauth_request.parameters) pause() resource = client.access_resource(oauth_request) print 'GOT' print 'resource:\n %s' % resource pause()
# (at your option) any later version. # # Browsershots is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Extract business names from PayPal logs. """ __revision__ = "$Rev: 2959 $" __date__ = "$Date: 2008-08-14 05:05:48 -0300 (qui, 14 ago 2008) $" __author__ = "$Author: johann $" import os os.environ['DJANGO_SETTINGS_MODULE'] = 'shotserver04.settings' import cgi from shotserver04.paypal.models import PayPalLog for log in PayPalLog.objects.all(): post = cgi.parse_qs(log.raw_post_data) business = post.get('payer_business_name', [''])[0] if business: print business log.update_fields(payer_business_name=business)
def application(env, start_response): body = env['wsgi.input'].read(None) a = cgi.parse_qs(body).get(b'a', [b'1'])[0] start_response('200 OK', [('Content-Type', 'text/plain')]) return [b'a is ' + a + b', body is ' + body]
def post(self,slug=None,postid=None): '''handle trackback''' error = '''<?xml version="1.0" encoding="utf-8"?> <response> <error>1</error> <message>%s</message> </response> ''' success = '''<?xml version="1.0" encoding="utf-8"?> <response> <error>0</error> </response> ''' if not g_blog.allow_trackback: self.response.out.write(error % "Trackback denied.") return self.response.headers['Content-Type'] = "text/xml" if postid: entries = Entry.all().filter("published =", True).filter('post_id =', postid).fetch(1) else: slug=urldecode(slug) entries = Entry.all().filter("published =", True).filter('link =', slug).fetch(1) if not entries or len(entries) == 0 :#or (postid and not entries[0].link.endswith(g_blog.default_link_format%{'post_id':postid})): self.response.out.write(error % "empty slug/postid") return #check code ,rejest spam entry=entries[0] logging.info(self.request.remote_addr+self.request.path+" "+entry.trackbackurl) #key=self.param("code") #if (self.request.uri!=entry.trackbackurl) or entry.is_external_page or not entry.allow_trackback: #import cgi from urlparse import urlparse param=urlparse(self.request.uri) code=param[4] param=cgi.parse_qs(code) if param.has_key('code'): code=param['code'][0] if (not str(entry.key())==code) or entry.is_external_page or not entry.allow_trackback: self.response.out.write(error % "Invalid trackback url.") return coming_url = self.param('url') blog_name = myfilter.do_filter(self.param('blog_name')) excerpt = myfilter.do_filter(self.param('excerpt')) title = myfilter.do_filter(self.param('title')) if not coming_url or not blog_name or not excerpt or not title: self.response.out.write(error % "not enough post info") return import time #wait for half second in case otherside hasn't been published time.sleep(0.5) ## #also checking the coming url is valid and contains our link ## #this is not standard trackback behavior ## try: ## ## result = urlfetch.fetch(coming_url) ## if result.status_code != 200 : ## #or ((g_blog.baseurl + '/' + slug) not in result.content.decode('ascii','ignore')): ## self.response.out.write(error % "probably spam") ## return ## except Exception, e: ## logging.info("urlfetch error") ## self.response.out.write(error % "urlfetch error") ## return comment = Comment.all().filter("entry =", entry).filter("weburl =", coming_url).get() if comment: self.response.out.write(error % "has pinged before") return comment=Comment(author=blog_name, content="...<strong>"+title[:250]+"</strong> " + excerpt[:250] + '...', weburl=coming_url, entry=entry) comment.ip=self.request.remote_addr comment.ctype=COMMENT_TRACKBACK try: comment.save() memcache.delete("/"+entry.link) self.write(success) g_blog.tigger_action("pingback_post",comment) except: self.response.out.write(error % "unknow error")
import sys from wmflabs import db def jsonify(response): return json.dumps(response) #Print header print('Content-type: application/json') print() # Fetch params if 'QUERY_STRING' in os.environ: QS = os.environ['QUERY_STRING'] qs = cgi.parse_qs(QS) try: username = qs['user'][0].replace('_', ' ') except: print('{"error": "nouser"}') sys.exit(0) else: print('{"error": "nouser"}') sys.exit(0) ##### PROGRAM #### conn = db.connect('wikidatawiki') cur = conn.cursor() with cur: sql = 'select count(*) as edit_count from change_tag join revision on rev_id=ct_rev_id where ct_tag_id=155 and rev_user_text="' + username + '";'
def parse_qs(qs): data = cgi.parse_qs(qs, keep_blank_values=True) for k in data.keys(): if len(data[k]) == 1: data[k] = data[k][0] return data
def dict_from_uri(cls, uri): """ Convert a uri string into a descriptor dictionary. Example: - uri: sgtk:descriptor:app_store?name=hello&version=v123 - expected_type: app_store - returns: {'type': 'app_store', 'name': 'hello', 'version': 'v123'} :param uri: uri string :return: dictionary with keys type and all keys specified in the item_keys parameter matched up by items in the uri string. """ parsed_uri = urlparse.urlparse(uri) # example: # # >>> urlparse.urlparse("sgtk:descriptor:app_store?foo=bar&baz=buz") # # ParseResult(scheme='sgtk', netloc='', path='descriptor:app_store', # params='', query='foo=bar&baz=buz', fragment='') # # # NOTE - it seems on some versions of python the result is different. # this includes python2.5 but seems to affect other SKUs as well. # # uri: sgtk:descriptor:app_store?version=v0.1.2&name=tk-bundle # # python 2.6+ expected: ParseResult( # scheme='sgtk', # netloc='', # path='descriptor:app_store', # params='', # query='version=v0.1.2&name=tk-bundle', # fragment='') # # python 2.5 and others: ( # 'sgtk', # '', # 'descriptor:app_store?version=v0.1.2&name=tk-bundle', # '', # '', # '') if parsed_uri.scheme != constants.DESCRIPTOR_URI_PATH_SCHEME: raise TankDescriptorError( "Invalid uri '%s' - must begin with 'sgtk'" % uri) if parsed_uri.query == "": # in python 2.5 and others, the querystring is part of the path (see above) (path, query) = parsed_uri.path.split("?") else: path = parsed_uri.path query = parsed_uri.query split_path = path.split(constants.DESCRIPTOR_URI_SEPARATOR) # e.g. 'descriptor:app_store' -> ('descriptor', 'app_store') if len(split_path ) != 2 or split_path[0] != constants.DESCRIPTOR_URI_PATH_PREFIX: raise TankDescriptorError( "Invalid uri '%s' - must begin with sgtk:descriptor" % uri) descriptor_dict = {} descriptor_dict["type"] = split_path[1] # now pop remaining keys into a dict and key by item_keys # note: using deprecated cfg method for 2.5 compatibility # example: # >>> cgi.parse_qs("path=foo&version=v1.2.3") # {'path': ['foo'], 'version': ['v1.2.3']} for (param, value) in cgi.parse_qs(query).iteritems(): if len(value) > 1: raise TankDescriptorError( "Invalid uri '%s' - duplicate parameters" % uri) descriptor_dict[param] = value[0] return descriptor_dict
def _decoded_input(self): content_type = self._request.META.get('CONTENT_TYPE', _JSON_CONTENT_TYPE) raw_data = self._request.raw_post_data if content_type == _JSON_CONTENT_TYPE: try: raw_dict = simplejson.loads(raw_data) except ValueError, exc: raise exceptions.BadRequest('Error decoding request body: ' '%s\n%r' % (exc, raw_data)) if not isinstance(raw_dict, dict): raise exceptions.BadRequest('Expected dict input, got %s: %r' % (type(raw_dict), raw_dict)) elif content_type == 'application/x-www-form-urlencoded': cgi_dict = cgi.parse_qs(raw_data) # django won't do this for PUT raw_dict = {} for key, values in cgi_dict.items(): value = values[-1] # take last value if multiple were given try: # attempt to parse numbers, booleans and nulls raw_dict[key] = simplejson.loads(value) except ValueError: # otherwise, leave it as a string raw_dict[key] = value else: raise exceptions.RequestError(415, 'Unsupported media type: %s' % content_type) return _InputDict(raw_dict)