def to_dict(self): data = { "id": self.id, "uid": self.uid, "name": self.name, "platform": self.platform, "platform_display": self.PLATFORM.get(self.platform), "logo": self.logo, "auto_publish": self.is_auto_publish, "auto_publish_display": self.IS_AUTO_PUBLISH.get(self.is_auto_publish), "msg_url": settings.DOMAIN + url_unquote(url_for("project_msg", uid=self.uid)), "download_url": settings.DOMAIN + url_unquote(url_for("project_download", uid=self.uid)) } # data["today_download"] = DayCounter.get_counter(self.uid).number # data["total_download"] = DayCounter.get_counters(cid=self.uid).with_entities(db.func.sum(DayCounter.number)).one()[0] return data
def simple(request, graph, startNode, endNode): start = url_unquote(startNode) end = url_unquote(endNode) if not (graph.has_node(start) and graph.has_node(end)): return request.respondJson({'message': 'node not in graph'}, NOT_FOUND) ipaths = nx.shortest_simple_paths(graph, start, end) data = {'paths': tuple(ipaths)} request.respondJson(data)
def deleteEdge(request, graph, startNode, endNode): start = url_unquote(startNode) end = url_unquote(endNode) try: _edge = graph.edge[start][end] except KeyError: return request.respondJson({'message': 'edge not in graph'}, NOT_FOUND) graph.remove_edge(start, end) request.respondJson({'message': 'edge deleted'})
def get_author(self): """Try to guess the author name. Use IP address as last resort.""" try: cookie = url_unquote(self.cookies.get("author", "")) except UnicodeError: cookie = None try: auth = url_unquote(self.environ.get('REMOTE_USER', "")) except UnicodeError: auth = None author = (self.form.get("author") or cookie or auth or self.remote_addr) return author
def showEdge(request, graph, startNode, endNode): start = url_unquote(startNode) end = url_unquote(endNode) try: edge = graph.edge[start][end] except KeyError: return request.respondJson( {'message': 'nodes not in graph or not linked'}, NOT_FOUND) edge = dict(edge) edge.update({'start': start, 'end': end, 'graph': graph.name}) request.respondJson(edge)
def unquote_url_values(endpoint, values): """Preprocessor that URL-decodes the values given in the URL. """ for key, val in values.items(): if isinstance(val, basestring): values[key] = url_unquote(val)
def index(): if request.args.has_key("magnet"): magnet = url_unquote(request.args["magnet"]).encode(request.charset) magnet_xt = url_decode(magnet[magnet.index("?") + 1 :])["xt"] torrent = cache.get(magnet_xt) if not torrent: try: handle = lt.add_magnet_uri( ses, magnet, {"save_path": "./invalid", "paused": False, "auto_managed": False, "duplicate_is_error": False}, ) while not handle.has_metadata(): time.sleep(0.01) handle.pause() info = handle.get_torrent_info() torrent = create_torrent(info) cache.set(magnet_xt, torrent) ses.remove_torrent(handle, lt.options_t.delete_files) except: torrent = cache.get(magnet_xt) response = Response(response=torrent[1], mimetype="application/x-bittorrent") response.headers.add("Content-Disposition", "attachment", filename=torrent[0]) return response return render_template("index.html")
def createEdge(request, graph, startNode, endNode): start = url_unquote(startNode) end = url_unquote(endNode) if not (graph.has_node(start) and graph.has_node(end)): return request.respondJson({'message': 'node not in graph'}, NOT_FOUND) attrib = request.json() attrib['created'] = datetime.utcnow().strftime(DATETIME_FORMAT) graph.add_edge(start, end, **attrib) data = graph.edge[start][end] data.update({'start': start, 'end': end, 'graph': graph.name}) request.respondJson(data, CREATED)
def stories(): user = ns.models.User.query.filter_by(id=g.uid).first() r = None if user is None: r = make_response({ 'status': 'FAIL', 'data': [], }) r.mimetype = 'application/json' else: scraper = Scraper() if 'newscrape_sprefs' in request.cookies: search_prefs = url_unquote( request.cookies['newscrape_sprefs']).split(',') for engine in search_prefs: if 'google' in engine: scraper.add_scrapee(GoogleSearch(user.get_keywords())) elif 'bing' in engine: scraper.add_scrapee(BingSearch(user.get_keywords())) elif 'yahoo' in engine: scraper.add_scrapee(YahooSearch(user.get_keywords())) scraper.scrape() stories = scraper.fetch_results() r = make_response({ 'status': 'OK', 'data': stories, }) r.mimetype = 'application/json' return r
def post(): """ get vulnerable file content :return: """ data = request.json if not data or data == "": return { 'code': 1003, 'msg': 'Only support json, please post json data.' } sid = data.get('sid') file_path = url_unquote(data.get('file_path')) if not sid or sid == '': return {"code": 1002, "msg": "sid is required."} if not file_path or file_path == '': return {'code': 1002, 'msg': 'file_path is required.'} s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=sid)) if not os.path.exists(s_sid_file): return {'code': 1002, 'msg': 'No such target.'} with open(s_sid_file, 'r') as f: target_directory = json.load(f).get('result').get( 'target_directory') if not target_directory or target_directory == '': return {'code': 1002, 'msg': 'No such directory'} if PY2: file_path = map( secure_filename, [path.decode('utf-8') for path in file_path.split('/')]) else: file_path = map(secure_filename, [path for path in file_path.split('/')]) filename = target_directory for _dir in file_path: filename = os.path.join(filename, _dir) if os.path.exists(filename): extension = guess_type(filename) if is_text(filename): with open(filename, 'r') as f: file_content = f.read() else: file_content = 'This is a binary file.' else: return {'code': 1002, 'msg': 'No such file.'} return { 'code': 1001, 'result': { 'file_content': file_content, 'extension': extension } }
def test_quoting(self): self.assert_strict_equal(urls.url_quote(u'\xf6\xe4\xfc'), '%C3%B6%C3%A4%C3%BC') self.assert_strict_equal(urls.url_unquote(urls.url_quote(u'#%="\xf6')), u'#%="\xf6') self.assert_strict_equal(urls.url_quote_plus('foo bar'), 'foo+bar') self.assert_strict_equal(urls.url_unquote_plus('foo+bar'), u'foo bar') self.assert_strict_equal(urls.url_quote_plus('foo+bar'), 'foo%2Bbar') self.assert_strict_equal(urls.url_unquote_plus('foo%2Bbar'), u'foo+bar') self.assert_strict_equal( urls.url_encode({ b'a': None, b'b': b'foo bar' }), 'b=foo+bar') self.assert_strict_equal( urls.url_encode({ u'a': None, u'b': u'foo bar' }), 'b=foo+bar') self.assert_strict_equal( urls.url_fix( u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)'), 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)') self.assert_strict_equal(urls.url_quote_plus(42), '42') self.assert_strict_equal(urls.url_quote(b'\xff'), '%FF')
def showNode(request, graph, nodeName): nodeName = url_unquote(nodeName) try: data = _reprNode(graph, nodeName) except KeyError, err: return request.respondJson({'message': err.message}, NOT_FOUND)
def _get_link_filename(self, _key, **values): link = url_unquote( self.link_to(_key, **values)[len(self.prefix_path):]) link = link.lstrip('/') if not link or link.endswith('/'): link += 'index.html' return os.path.join(self.dest_folder, link)
def handle_payload_v2(app, event, context): headers = Headers(event[u"headers"]) script_name = get_script_name(headers, event.get("requestContext", {})) path_info = event[u"rawPath"] body = event.get("body", "") body = get_body_bytes(event, body) headers["Cookie"] = "; ".join(event.get("cookies", [])) environ = { "CONTENT_LENGTH": str(len(body)), "CONTENT_TYPE": headers.get(u"Content-Type", ""), "PATH_INFO": url_unquote(path_info), "QUERY_STRING": url_encode(event.get(u"queryStringParameters", {})), "REMOTE_ADDR": event.get("requestContext", {}) .get(u"http", {}) .get(u"sourceIp", ""), "REMOTE_USER": event.get("requestContext", {}) .get(u"authorizer", {}) .get(u"principalId", ""), "REQUEST_METHOD": event.get("requestContext", {}) .get("http", {}) .get("method", ""), "SCRIPT_NAME": script_name, "SERVER_NAME": headers.get(u"Host", "lambda"), "SERVER_PORT": headers.get(u"X-Forwarded-Port", "80"), "SERVER_PROTOCOL": "HTTP/1.1", "wsgi.errors": sys.stderr, "wsgi.input": BytesIO(body), "wsgi.multiprocess": False, "wsgi.multithread": False, "wsgi.run_once": False, "wsgi.url_scheme": headers.get(u"X-Forwarded-Proto", "http"), "wsgi.version": (1, 0), "serverless.authorizer": event.get("requestContext", {}).get(u"authorizer"), "serverless.event": event, "serverless.context": context, # TODO: Deprecate the following entries, as they do not comply with the WSGI # spec. For custom variables, the spec says: # # Finally, the environ dictionary may also contain server-defined variables. # These variables should be named using only lower-case letters, numbers, dots, # and underscores, and should be prefixed with a name that is unique to the # defining server or gateway. "API_GATEWAY_AUTHORIZER": event.get("requestContext", {}).get(u"authorizer"), "event": event, "context": context, } environ = setup_environ_items(environ, headers) response = Response.from_app(app, environ) returndict = generate_response(response, event) returndict["multiValueHeaders"]["Access-Control-Allow-Headers"]=["Content-Type"]
def handle_lambda_integration(app, event, context): headers = Headers(event[u"headers"]) script_name = get_script_name(headers, event) path_info = event[u"requestPath"] for key, value in event.get(u"path", {}).items(): path_info = path_info.replace("{%s}" % key, value) path_info = path_info.replace("{%s+}" % key, value) body = event.get("body", {}) body = json.dumps(body) if body else "" body = get_body_bytes(event, body) environ = { "CONTENT_LENGTH": str(len(body)), "CONTENT_TYPE": headers.get(u"Content-Type", ""), "PATH_INFO": url_unquote(path_info), "QUERY_STRING": url_encode(event.get(u"query", {})), "REMOTE_ADDR": event.get("identity", {}).get(u"sourceIp", ""), "REMOTE_USER": event.get("principalId", ""), "REQUEST_METHOD": event.get("method", ""), "SCRIPT_NAME": script_name, "SERVER_NAME": headers.get(u"Host", "lambda"), "SERVER_PORT": headers.get(u"X-Forwarded-Port", "80"), "SERVER_PROTOCOL": "HTTP/1.1", "wsgi.errors": sys.stderr, "wsgi.input": BytesIO(body), "wsgi.multiprocess": False, "wsgi.multithread": False, "wsgi.run_once": False, "wsgi.url_scheme": headers.get(u"X-Forwarded-Proto", "http"), "wsgi.version": (1, 0), "serverless.authorizer": event.get("enhancedAuthContext"), "serverless.event": event, "serverless.context": context, # TODO: Deprecate the following entries, as they do not comply with the WSGI # spec. For custom variables, the spec says: # # Finally, the environ dictionary may also contain server-defined variables. # These variables should be named using only lower-case letters, numbers, dots, # and underscores, and should be prefixed with a name that is unique to the # defining server or gateway. "API_GATEWAY_AUTHORIZER": event.get("enhancedAuthContext"), "event": event, "context": context, } environ = setup_environ_items(environ, headers) response = Response.from_app(app, environ) returndict = generate_response(response, event) if response.status_code >= 300: raise RuntimeError(json.dumps(returndict)) return returndict
def report_print(self, data, context=None): print_data = self._check_direct_print(data) if not print_data['can_print']: return json.dumps({ 'title': _('Printing not allowed!'), 'message': _('Please check your DirectPrint settings or close and open app again' ), 'success': False, 'notify': True, }) rp = print_data['report_policy'] printer_bin = print_data['printer_bin'] printer_id = print_data['printer_id'] # Finally if we reached this place - we can send report to printer. standard_response = self.report_download(data, 'fake-token', context) # If we do not have Content-Disposition headed, than no file-name # was generated (maybe error) content_disposition = standard_response.headers.get( 'Content-Disposition') if not content_disposition: return standard_response report_name = content_disposition.split( "attachment; filename*=UTF-8''")[1] report_name = url_unquote(report_name) ascii_data = base64.b64encode(standard_response.data).decode('ascii') try: params = { 'title': report_name, 'type': print_data['report_type'], 'size': rp and rp.report_paper_id, 'options': { 'bin': printer_bin.name } if printer_bin else {}, } printer_id.printnode_print_b64(ascii_data, params) except Exception as e: _logger.exception(e) se = _serialize_exception(e) error = {'code': 200, 'message': "Odoo Server Error", 'data': se} return request.make_response(html_escape(json.dumps(error))) title = _('Report was sent to printer') message = _('Document "%s" was sent to printer %s') % (report_name, printer_id.name) return json.dumps({ 'title': title, 'message': message, 'success': True, 'notify': request.env.company.im_a_teapot })
def test_quoting(self): assert urls.url_quote(u'\xf6\xe4\xfc') == '%C3%B6%C3%A4%C3%BC' assert urls.url_unquote(urls.url_quote(u'#%="\xf6')) == u'#%="\xf6' assert urls.url_quote_plus('foo bar') == 'foo+bar' assert urls.url_unquote_plus('foo+bar') == 'foo bar' assert urls.url_encode({'a': None, 'b': 'foo bar'}) == 'b=foo+bar' assert urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)') == \ 'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
def make_environ(self): request_url = url_parse(self.path) def shutdown_server(): self.server.shutdown_signal = True url_scheme = 'http' if self.server.ssl_context is None else 'https' if not self.client_address: self.client_address = '<local>' if isinstance(self.client_address, str): self.client_address = (self.client_address, 0) else: pass path_info = url_unquote(request_url.path) environ = { 'wsgi.version': (1, 0), 'wsgi.url_scheme': url_scheme, 'wsgi.input': self.rfile, 'wsgi.errors': sys.stderr, 'wsgi.multithread': self.server.multithread, 'wsgi.multiprocess': self.server.multiprocess, 'wsgi.run_once': False, 'werkzeug.server.shutdown': shutdown_server, 'SERVER_SOFTWARE': self.server_version, 'REQUEST_METHOD': self.command, 'SCRIPT_NAME': '', 'PATH_INFO': wsgi_encoding_dance(path_info), 'QUERY_STRING': wsgi_encoding_dance(request_url.query), # Non-standard, added by mod_wsgi, uWSGI "REQUEST_URI": wsgi_encoding_dance(self.path), # Non-standard, added by gunicorn "RAW_URI": wsgi_encoding_dance(self.path), 'REMOTE_ADDR': self.address_string(), 'REMOTE_PORT': self.port_integer(), 'SERVER_NAME': self.server.server_address[0], 'SERVER_PORT': str(self.server.server_address[1]), 'SERVER_PROTOCOL': self.request_version } for key, value in self.get_header_items(): key = key.upper().replace('-', '_') if key not in ('CONTENT_TYPE', 'CONTENT_LENGTH'): key = 'HTTP_' + key if key in environ: value = "{},{}".format(environ[key], value) environ[key] = value if environ.get('HTTP_TRANSFER_ENCODING', '').strip().lower() == 'chunked': environ['wsgi.input_terminated'] = True environ['wsgi.input'] = DechunkedInput(environ['wsgi.input']) if request_url.scheme and request_url.netloc: environ['HTTP_HOST'] = request_url.netloc return environ
def test_quoting(self): self.assert_strict_equal(urls.url_quote(u'\xf6\xe4\xfc'), '%C3%B6%C3%A4%C3%BC') self.assert_strict_equal(urls.url_unquote(urls.url_quote(u'#%="\xf6')), u'#%="\xf6') self.assert_strict_equal(urls.url_quote_plus('foo bar'), 'foo+bar') self.assert_strict_equal(urls.url_unquote_plus('foo+bar'), u'foo bar') self.assert_strict_equal(urls.url_encode({b'a': None, b'b': b'foo bar'}), 'b=foo+bar') self.assert_strict_equal(urls.url_encode({u'a': None, u'b': u'foo bar'}), 'b=foo+bar') self.assert_strict_equal(urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)'), 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)')
def _file_to_title(self, filepath): sep = os.path.sep name = filepath[len(self.repo_prefix):].strip(sep) # Un-escape special windows filenames and dot files if name.startswith('_') and len(name) > 1: name = name[1:] if self.extension and name.endswith(self.extension): name = name[:-len(self.extension)] return url_unquote(name)
def test_quoting(self): assert urls.url_quote(u"\xf6\xe4\xfc") == "%C3%B6%C3%A4%C3%BC" assert urls.url_unquote(urls.url_quote(u'#%="\xf6')) == u'#%="\xf6' assert urls.url_quote_plus("foo bar") == "foo+bar" assert urls.url_unquote_plus("foo+bar") == "foo bar" assert urls.url_encode({"a": None, "b": "foo bar"}) == "b=foo+bar" assert ( urls.url_fix(u"http://de.wikipedia.org/wiki/Elf (Begriffsklärung)") == "http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29" )
def delete(self): from txplaya.playlistregistry import playlistRegistry playlistName = url_unquote(self.playlistNameArg) playlistRegistry.deletePlaylist(playlistName) event = {'event': 'PlaylistRegistryUpdated', 'data': {'list': playlistRegistry.list_()}} self.mainController.announce(event) return {'msg': 'Playlist deleted'}
def worker(domain, save_rules): """Worker process, fetches url from the front, crawl, push new urls to front and pushes content to sink if matched on a rule""" ctx = zmq.Context() worker_ = ctx.socket(zmq.REQ) worker_.connect('tcp://localhost:5050') saver = ctx.socket(zmq.PUSH) saver.connect('tcp://localhost:5051') urlsink = ctx.socket(zmq.PUSH) urlsink.connect('tcp://localhost:5052') matcher = Map(map(Rule, save_rules)).bind('', '/').match while True: worker_.send('') url = worker_.recv().decode('utf-8') try: q = rq.get(u'http://%s%s' % (domain, url_unquote(url)), allow_redirects = False) except ConnectionError: continue if q.status_code == 301 or q.status_code == 302: redirect = q.headers['location'] if domain in redirect: # only sent to front urlsink.send(redirect.split(domain)[1].encode('utf-8')) continue html = q.content try: _, data = matcher(url) except NotFound: pass else: # needs to be saved, sends html, url, data to saver data = zlib.compress(json.dumps([html, url, data])) saver.send(data) del data fetched = set() for link in fromstring(html).cssselect("a[href]"): link = link.attrib['href'].split('#')[0] if link.startswith('file://') or link.startswith('javascript:'): continue if not link.startswith('http'): fetched.add(link) elif domain in link: fetched.add(link.split(domain)[1]) for l in fetched: urlsink.send(l.encode('utf-8'))
def _file_to_title(self, filepath): _ = self._ if not filepath.startswith(self.repo_prefix): raise error.ForbiddenErr( _("Can't read or write outside of the pages repository")) sep = os.path.sep name = filepath[len(self.repo_prefix):].strip(sep) # Un-escape special windows filenames and dot files if name.startswith('_') and len(name) > 1: name = name[1:] if self.extension and name.endswith(self.extension): name = name[:-len(self.extension)] return url_unquote(name)
def test_quoting(): strict_eq(urls.url_quote(u'\xf6\xe4\xfc'), '%C3%B6%C3%A4%C3%BC') strict_eq(urls.url_unquote(urls.url_quote(u'#%="\xf6')), u'#%="\xf6') strict_eq(urls.url_quote_plus('foo bar'), 'foo+bar') strict_eq(urls.url_unquote_plus('foo+bar'), u'foo bar') strict_eq(urls.url_quote_plus('foo+bar'), 'foo%2Bbar') strict_eq(urls.url_unquote_plus('foo%2Bbar'), u'foo+bar') strict_eq(urls.url_encode({b'a': None, b'b': b'foo bar'}), 'b=foo+bar') strict_eq(urls.url_encode({u'a': None, u'b': u'foo bar'}), 'b=foo+bar') strict_eq(urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)'), 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)') strict_eq(urls.url_quote_plus(42), '42') strict_eq(urls.url_quote(b'\xff'), '%FF')
def wrapped(request, **kwargs): graphName = kwargs.pop(argName) graphName = url_unquote(graphName) try: graph = request.app.graphs[graphName] except KeyError: return request.respondJson({'message': 'graph {0} not found'.format(graphName)}, NOT_FOUND) graph.name = graphName kwargs[argName] = graph return fc(request, **kwargs)
def component_search(search_str): search_str = url_unquote(search_str) cpts = db.session.query(Component).filter(Component.description.like("%"+search_str+"%")).all() if not cpts: notfound_msg = Markup("Could not find software matching the search terms: <b>%s</b>") % (search_str) return render_template('notfound.html', message = notfound_msg) items = list() for cpt in cpts: items.append(component_to_item(cpt)) return render_template('results.html', title = "Search results", items = items)
def handle_lambda_integration(app, event, context): headers = Headers(event["headers"]) script_name = get_script_name(headers, event) path_info = event["requestPath"] for key, value in event.get("path", {}).items(): path_info = path_info.replace("{%s}" % key, value) path_info = path_info.replace("{%s+}" % key, value) body = event.get("body", {}) body = json.dumps(body) if body else "" body = get_body_bytes(event, body) environ = { "CONTENT_LENGTH": str(len(body)), "CONTENT_TYPE": headers.get("Content-Type", ""), "PATH_INFO": url_unquote(path_info), "QUERY_STRING": url_encode(event.get("query", {})), "REMOTE_ADDR": event.get("identity", {}).get("sourceIp", ""), "REMOTE_USER": event.get("principalId", ""), "REQUEST_METHOD": event.get("method", ""), "SCRIPT_NAME": script_name, "SERVER_NAME": headers.get("Host", "lambda"), "SERVER_PORT": headers.get("X-Forwarded-Port", "80"), "SERVER_PROTOCOL": "HTTP/1.1", "wsgi.errors": sys.stderr, "wsgi.input": io.BytesIO(body), "wsgi.multiprocess": False, "wsgi.multithread": False, "wsgi.run_once": False, "wsgi.url_scheme": headers.get("X-Forwarded-Proto", "http"), "wsgi.version": (1, 0), "serverless.authorizer": event.get("enhancedAuthContext"), "serverless.event": event, "serverless.context": context, } environ = setup_environ_items(environ, headers) response = Response.from_app(app, environ) returndict = generate_response(response, event) if response.status_code >= 300: raise RuntimeError(json.dumps(returndict)) return returndict
def login(req): if req.method == 'POST': if "username" not in req.form or "password" not in req.form: return status(req, 400) username = req.form["username"] password = req.form["password"] auth_result = auth_mgr.try_log_in(username, password) if auth_result == AuthManager.USER_NOT_FOUND: return HTMLResponse( render_path( "tmpl/login.htmo", { "base": MOUNT_POINT, "bad_username": True, "bad_password": False, }), status=403, # This one is iffy. ) elif auth_result == AuthManager.PW_WRONG: return HTMLResponse( render_path( "tmpl/login.htmo", { "base": MOUNT_POINT, "bad_username": False, "bad_password": True, }), status=403, # This one is iffy. ) else: id_, expiration = auth_result from_ = url_unquote(req.args.get("from", "")) resp = redirect(MOUNT_POINT + from_, code=303) resp.set_cookie(COOKIE_NAME, id_, expires=expiration, secure=True) return resp if auth_mgr.cookie_to_username(req.cookies.get(COOKIE_NAME)): # Already logged in. return redirect(MOUNT_POINT, code=303) else: resp = HTMLResponse( render_path( "tmpl/login.htmo", { "base": MOUNT_POINT, "bad_username": False, "bad_password": False, }), ) resp.delete_cookie(COOKIE_NAME) return resp
def component_search(search_str): search_str = url_unquote(search_str) cpts = db.session.query(Component).filter( Component.description.like("%" + search_str + "%")).all() if not cpts: notfound_msg = Markup( "Could not find software matching the search terms: <b>%s</b>") % ( search_str) return render_template('notfound.html', message=notfound_msg) items = list() for cpt in cpts: items.append(component_to_item(cpt)) return render_template('results.html', title="Search results", items=items)
def test_quoting(): assert urls.url_quote("\xf6\xe4\xfc") == "%C3%B6%C3%A4%C3%BC" assert urls.url_unquote(urls.url_quote('#%="\xf6')) == '#%="\xf6' assert urls.url_quote_plus("foo bar") == "foo+bar" assert urls.url_unquote_plus("foo+bar") == "foo bar" assert urls.url_quote_plus("foo+bar") == "foo%2Bbar" assert urls.url_unquote_plus("foo%2Bbar") == "foo+bar" assert urls.url_encode({b"a": None, b"b": b"foo bar"}) == "b=foo+bar" assert urls.url_encode({"a": None, "b": "foo bar"}) == "b=foo+bar" assert ( urls.url_fix("http://de.wikipedia.org/wiki/Elf (Begriffsklärung)") == "http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)" ) assert urls.url_quote_plus(42) == "42" assert urls.url_quote(b"\xff") == "%FF"
def listEdges(request, graph, startNode): if not graph.has_node(startNode): return request.respondJson({'message': 'node not in graph'}, NOT_FOUND) start = url_unquote(startNode) iNeighborsEdges = _iterNeigborsEdges(graph, start) attrib = request.json() iNeighborsEdges = _iterFilterEdges(iNeighborsEdges, attrib) offset, limit = _getPaging(request) iNeighborsEdges = islice(iNeighborsEdges, offset, offset + limit) request.respondJson({'neighbors': dict(iNeighborsEdges)})
def test_quoting(): strict_eq(urls.url_quote(u"\xf6\xe4\xfc"), "%C3%B6%C3%A4%C3%BC") strict_eq(urls.url_unquote(urls.url_quote(u'#%="\xf6')), u'#%="\xf6') strict_eq(urls.url_quote_plus("foo bar"), "foo+bar") strict_eq(urls.url_unquote_plus("foo+bar"), u"foo bar") strict_eq(urls.url_quote_plus("foo+bar"), "foo%2Bbar") strict_eq(urls.url_unquote_plus("foo%2Bbar"), u"foo+bar") strict_eq(urls.url_encode({b"a": None, b"b": b"foo bar"}), "b=foo+bar") strict_eq(urls.url_encode({u"a": None, u"b": u"foo bar"}), "b=foo+bar") strict_eq( urls.url_fix(u"http://de.wikipedia.org/wiki/Elf (Begriffsklärung)"), "http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)", ) strict_eq(urls.url_quote_plus(42), "42") strict_eq(urls.url_quote(b"\xff"), "%FF")
def post(): """ get vulnerable file content :return: """ data = request.json if not data or data == "": return {'code': 1003, 'msg': 'Only support json, please post json data.'} sid = data.get('sid') file_path = url_unquote(data.get('file_path')) if not sid or sid == '': return {"code": 1002, "msg": "sid is required."} if not file_path or file_path == '': return {'code': 1002, 'msg': 'file_path is required.'} s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=sid)) if not os.path.exists(s_sid_file): return {'code': 1002, 'msg': 'No such target.'} with open(s_sid_file, 'r') as f: target_directory = json.load(f).get('result').get('target_directory') if not target_directory or target_directory == '': return {'code': 1002, 'msg': 'No such directory'} if PY2: file_path = map(secure_filename, [path.decode('utf-8') for path in file_path.split('/')]) else: file_path = map(secure_filename, [path for path in file_path.split('/')]) filename = target_directory for _dir in file_path: filename = os.path.join(filename, _dir) if os.path.exists(filename): extension = guess_type(filename) if is_text(filename): with open(filename, 'r') as f: file_content = f.read() else: file_content = 'This is a binary file.' else: return {'code': 1002, 'msg': 'No such file.'} return {'code': 1001, 'result': {'file_content': file_content, 'extension': extension}}
def deleteNode(request, graph, nodeName): nodeName = url_unquote(nodeName) if not graph.has_node(nodeName): return request.respondJson({'message': 'node not in graph'}, NOT_FOUND) try: next(graph.neighbors_iter(nodeName)) except StopIteration: # no neighbours, ok to delete pass else: return request.respondJson({'message': 'node has edges'}, CONFLICT) graph.remove_node(nodeName) request.respondJson({'message': 'node deleted'})
def make_environ(self): request_url = url_parse(self.path) def shutdown_server(): self.server.shutdown_signal = True url_scheme = self.server.ssl_context is None and 'http' or 'https' path_info = url_unquote(request_url.path) environ = { 'wsgi.version': (1, 0), 'wsgi.url_scheme': url_scheme, 'wsgi.input': self.rfile, 'wsgi.errors': sys.stderr, 'wsgi.multithread': self.server.multithread, 'wsgi.multiprocess': self.server.multiprocess, 'wsgi.run_once': False, 'werkzeug.server.shutdown': shutdown_server, 'SERVER_SOFTWARE': self.server_version, 'REQUEST_METHOD': self.command, 'SCRIPT_NAME': '', 'PATH_INFO': wsgi_encoding_dance(path_info), 'QUERY_STRING': wsgi_encoding_dance(request_url.query), 'CONTENT_TYPE': self.headers.get('Content-Type', ''), 'CONTENT_LENGTH': self.headers.get('Content-Length', ''), 'REMOTE_ADDR': self.client_address[0], 'REMOTE_PORT': self.client_address[1], 'SERVER_NAME': self.server.server_address[0], 'SERVER_PORT': str(self.server.server_address[1]), 'SERVER_PROTOCOL': self.request_version } for h in self.headers.headers: key, value = h.split(':', 1) key = 'HTTP_' + key.upper().replace('-', '_') value = value.strip() if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): if key in environ: environ[key] += ',' + value else: environ[key] = value if request_url.netloc: environ['HTTP_HOST'] = request_url.netloc return environ
def get(self, name_or_id): if name_or_id == "_all": query = self.stats_query album = Album(name="_all") else: album = get_album(g.session, urls.url_unquote(name_or_id)) if album is None: abort(404, "Album named %s not found" % name_or_id) query = self.stats_query.where(Photo.albums.contains(album)) stats = g.session.execute(query).first() for k, v in stats.items(): setattr(album, k, v) return album
def make_environ(self): request_url = url_parse(self.path) def shutdown_server(): self.server.shutdown_signal = True url_scheme = self.server.ssl_context is None and 'http' or 'https' path_info = url_unquote(request_url.path) environ = { 'REQUEST_UUID': uuid.uuid4().hex, 'wsgi.version': (1, 0), 'wsgi.url_scheme': url_scheme, 'wsgi.input': self.rfile, 'wsgi.errors': sys.stderr, 'wsgi.multithread': self.server.multithread, 'wsgi.multiprocess': self.server.multiprocess, 'wsgi.run_once': False, 'werkzeug.server.shutdown': shutdown_server, 'SERVER_SOFTWARE': self.server_version, 'REQUEST_METHOD': self.command, 'SCRIPT_NAME': '', 'PATH_INFO': wsgi_encoding_dance(path_info), 'QUERY_STRING': wsgi_encoding_dance(request_url.query), 'REMOTE_ADDR': self.address_string(), 'REMOTE_PORT': self.port_integer(), 'SERVER_NAME': self.server.server_address[0], 'SERVER_PORT': str(self.server.server_address[1]), 'SERVER_PROTOCOL': self.request_version } for key, value in self.headers.items(): key = key.upper().replace('-', '_') if key not in ('CONTENT_TYPE', 'CONTENT_LENGTH'): key = 'HTTP_' + key environ[key] = value if environ.get('HTTP_TRANSFER_ENCODING', '').strip().lower() == 'chunked': environ['wsgi.input_terminated'] = True environ['wsgi.input'] = DechunkedInput(environ['wsgi.input']) if request_url.scheme and request_url.netloc: environ['HTTP_HOST'] = request_url.netloc return environ
def find_feature(kind, value): value = url_unquote(value) feature_items = db.session.query(ProvidedItem).filter_by(kind=kind, value=value).all() print value if not feature_items: notfound_msg = Markup("Could not find software providing \"<b>[<i>%s</i>] %s</b>\"") % (kind, value) return render_template('notfound.html', message = notfound_msg) items = list() for feature_item in feature_items: cpt = feature_item.version.component item = component_to_item(cpt) item['name'] = "%s (%s)" % (item['name'], feature_item.version.version) items.append(item) return render_template('results.html', title = "Search results", items = items)
def make_environ(self): """ Create an environment that can be used with werkzeug """ # Derived from werkzeug's WSGIRequestHandler from werkzeug.urls import url_parse, url_unquote from werkzeug._compat import wsgi_encoding_dance request_url = url_parse(self.path) url_scheme = "http" path_info = url_unquote(request_url.path) environ = { 'wsgi.version': (1, 0), 'wsgi.url_scheme': url_scheme, 'wsgi.input': self.rfile, 'wsgi.errors': sys.stderr, 'wsgi.multithread': False, 'wsgi.multiprocess': True, 'wsgi.run_once': False, 'SERVER_SOFTWARE': self.server_version, 'REQUEST_METHOD': self.command, 'SCRIPT_NAME': '', 'PATH_INFO': wsgi_encoding_dance(path_info), 'QUERY_STRING': wsgi_encoding_dance(request_url.query), 'CONTENT_TYPE': self.headers.get('Content-Type', ''), 'CONTENT_LENGTH': self.headers.get('Content-Length', ''), 'REMOTE_ADDR': self.client_address[0], 'REMOTE_PORT': self.client_address[1], 'SERVER_NAME': self.server.server_address[0], 'SERVER_PORT': str(self.server.server_address[1]), 'SERVER_PROTOCOL': self.request_version } for key, value in self.headers.items(): key = 'HTTP_' + key.upper().replace('-', '_') if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): environ[key] = value if request_url.netloc: environ['HTTP_HOST'] = request_url.netloc return environ
def history(request, title): """Display history of changes of a page.""" max_rev = '0' * 40 history = [] title = urls.url_unquote(title) page = hatta.page.get_page(request, title) # only text pages should show a link for diffs can_diff = getattr(page, 'diff_content', False) if title not in request.wiki.storage: _ = request.wiki.gettext raise hatta.error.NotFoundErr(_("Page not found.")) for item in request.wiki.storage.page_history(title): parent = item['parent'] if can_diff: if parent: date_url = request.adapter.build('diff', { 'title': title, 'from_rev': parent, 'to_rev': item['rev'], }) else: date_url = request.adapter.build('revision', { 'title': title, 'rev': item['rev'], }) else: date_url = request.adapter.build('download_rev', { 'title': title, 'rev': item['rev'] }) item['date_url'] = date_url history.append(item) if item['rev']: max_rev = item['rev'] phtml = page.template('history.html', history=history, date_html=hatta.page.date_html, parent_rev=max_rev) resp = response(request, title, phtml, '/history') return resp
def make_environ(self): request_url = url_parse(self.path) def shutdown_server(): self.server.shutdown_signal = True url_scheme = self.server.ssl_context is None and "http" or "https" path_info = url_unquote(request_url.path) environ = { "wsgi.version": (1, 0), "wsgi.url_scheme": url_scheme, "wsgi.input": self.rfile, "wsgi.errors": sys.stderr, "wsgi.multithread": self.server.multithread, "wsgi.multiprocess": self.server.multiprocess, "wsgi.run_once": False, "werkzeug.server.shutdown": shutdown_server, "SERVER_SOFTWARE": self.server_version, "REQUEST_METHOD": self.command, "SCRIPT_NAME": "", "PATH_INFO": wsgi_encoding_dance(path_info), "QUERY_STRING": wsgi_encoding_dance(request_url.query), "REMOTE_ADDR": self.address_string(), "REMOTE_PORT": self.port_integer(), "SERVER_NAME": self.server.server_address[0], "SERVER_PORT": str(self.server.server_address[1]), "SERVER_PROTOCOL": self.request_version, } for key, value in self.headers.items(): key = key.upper().replace("-", "_") if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"): key = "HTTP_" + key environ[key] = value if environ.get("HTTP_TRANSFER_ENCODING", "").strip().lower() == "chunked": environ["wsgi.input_terminated"] = True environ["wsgi.input"] = DechunkedInput(environ["wsgi.input"]) if request_url.scheme and request_url.netloc: environ["HTTP_HOST"] = request_url.netloc return environ
def find_feature(kind, value): value = url_unquote(value) feature_items = db.session.query(ProvidedItem).filter_by( kind=kind, value=value).all() print value if not feature_items: notfound_msg = Markup( "Could not find software providing \"<b>[<i>%s</i>] %s</b>\"") % ( kind, value) return render_template('notfound.html', message=notfound_msg) items = list() for feature_item in feature_items: cpt = feature_item.version.component item = component_to_item(cpt) item['name'] = "%s (%s)" % (item['name'], feature_item.version.version) items.append(item) return render_template('results.html', title="Search results", items=items)
def make_environ(self): request_url = url_parse(self.path) def shutdown_server(): self.server.shutdown_signal = True url_scheme = self.server.ssl_context is None and 'http' or 'https' path_info = url_unquote(request_url.path) environ = { 'wsgi.version': (1, 0), 'wsgi.url_scheme': url_scheme, 'wsgi.input': self.rfile, 'wsgi.errors': sys.stderr, 'wsgi.multithread': self.server.multithread, 'wsgi.multiprocess': self.server.multiprocess, 'wsgi.run_once': False, 'werkzeug.server.shutdown': shutdown_server, 'SERVER_SOFTWARE': self.server_version, 'REQUEST_METHOD': self.command, 'SCRIPT_NAME': '', 'PATH_INFO': wsgi_encoding_dance(path_info), 'QUERY_STRING': wsgi_encoding_dance(request_url.query), 'CONTENT_TYPE': self.headers.get('Content-Type', ''), 'CONTENT_LENGTH': self.headers.get('Content-Length', ''), 'REMOTE_ADDR': self.client_address[0], 'REMOTE_PORT': self.client_address[1], 'SERVER_NAME': self.server.server_address[0], 'SERVER_PORT': str(self.server.server_address[1]), 'SERVER_PROTOCOL': self.request_version } for key, value in self.headers.items(): key = 'HTTP_' + key.upper().replace('-', '_') if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): environ[key] = value if request_url.netloc: environ['HTTP_HOST'] = request_url.netloc return environ
def crawl_page(url): sleep(random() + 0.5) q = rq.get(url) doc = fromstring(q.content) doc.make_links_absolute(url) section_title = doc.cssselect('#ArticleTitle') if len(section_title) == 0: section_title = u'Народна банка - останато' else: section_title = section_title[0].text_content().strip() for link in doc.cssselect("a[href]"): x_url = link.attrib['href'] if not x_url.endswith('.xlsx'): continue doc_title = x_url.split('/')[-1][:-5] doc_title = url_unquote(doc_title).replace('_', ' ').replace('WebBuilder', '').strip() yield section_title, doc_title, url
def make_environ(self): request_url = url_parse(self.path) def shutdown_server(): self.server.shutdown_signal = True url_scheme = self.server.ssl_context is None and "http" or "https" path_info = url_unquote(request_url.path) environ = { "wsgi.version": (1, 0), "wsgi.url_scheme": url_scheme, "wsgi.input": self.rfile, "wsgi.errors": sys.stderr, "wsgi.multithread": self.server.multithread, "wsgi.multiprocess": self.server.multiprocess, "wsgi.run_once": False, "werkzeug.server.shutdown": shutdown_server, "SERVER_SOFTWARE": self.server_version, "REQUEST_METHOD": self.command, "SCRIPT_NAME": "", "PATH_INFO": wsgi_encoding_dance(path_info), "QUERY_STRING": wsgi_encoding_dance(request_url.query), "CONTENT_TYPE": self.headers.get("Content-Type", ""), "CONTENT_LENGTH": self.headers.get("Content-Length", ""), "REMOTE_ADDR": self.client_address[0], "REMOTE_PORT": self.client_address[1], "SERVER_NAME": self.server.server_address[0], "SERVER_PORT": str(self.server.server_address[1]), "SERVER_PROTOCOL": self.request_version, } for key, value in self.headers.items(): key = "HTTP_" + key.upper().replace("-", "_") if key not in ("HTTP_CONTENT_TYPE", "HTTP_CONTENT_LENGTH"): environ[key] = value if request_url.netloc: environ["HTTP_HOST"] = request_url.netloc return environ
def make_environ(self): request_url = url_parse(self.path) # url_scheme = self.server.ssl_context is None and 'http' or 'https' path_info = url_unquote(request_url.path) environ = { 'awsgi.protocol': self, 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.input': self.buffer, 'wsgi.errors': sys.stderr, 'wsgi.multithread': False, 'wsgi.multiprocess': False, 'wsgi.run_once': True, 'REQUEST_METHOD': self.parser.get_method().decode('utf8'), 'SCRIPT_NAME': '', 'PATH_INFO': path_info, 'QUERY_STRING': request_url.query.decode('utf8'), 'CONTENT_TYPE': self.headers.get('Content-Type', ''), 'CONTENT_LENGTH': self.headers.get('Content-Length', ''), 'REMOTE_ADDR': self.transport.get_extra_info('socket').getpeername()[0], 'REMOTE_PORT': self.transport.get_extra_info('socket').getpeername()[1], 'SERVER_NAME': self.transport.get_extra_info('socket').getsockname()[0], 'SERVER_PORT': self.transport.get_extra_info('socket').getsockname()[1], 'SERVER_PROTOCOL': '' } for key, value in self.headers.items(): key = 'HTTP_' + key.upper().replace('-', '_') if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): environ[key] = value if request_url.scheme and request_url.netloc: environ['HTTP_HOST'] = request_url.netloc return environ
def customer_edit(customer_id=None): customer = Customer() msg = "El nuevo cliente se creó satisfactoriamente" if customer_id: customer = Customer.query.get_or_404(customer_id) msg = "El cliente se editó satisfactoriamente" if 'customer_name' in request.cookies and not customer.name: customer.name = url_unquote(request.cookies.get('customer_name')) form = CustomerForm(obj=customer) if form.validate_on_submit(): form.populate_obj(customer) if not customer.id: customer.id = None db.session.add(customer) db.session.commit() flash(msg) resp = make_response( redirect(url_for('customer_detail', customer_id=customer.id))) else: if not form.id.data: form.id.data = None resp = make_response(render_template("customer_edit.html", form=form)) resp.set_cookie("customer_name", '') return resp
def edit(supplier_id=None): supplier = Supplier() msg = u'El nuevo proveedor se creó satisfactoriamente.' if supplier_id: supplier = Supplier.query.get_or_404(supplier_id) msg = u'El proveedor se modificó satisfactoriamente.' if 'supplier_name' in request.cookies and not supplier.name: supplier.name = url_unquote(request.cookies.get('supplier_name')) form = SupplierForm(obj=supplier) if form.validate_on_submit(): form.populate_obj(supplier) if not supplier.id: supplier.id = None # ???? db.session.add(supplier) db.session.commit() flash(msg) resp = make_response( redirect(url_for('.detail', supplier_id=supplier.id))) else: if not form.id.data: form.id.data = None resp = make_response(render_template('supplier/edit.html', form=form)) resp.set_cookie('supplier_name', '') return resp
def _path_encode(x): return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
def create_wsgi_request(event_info, server_name='zappa', script_name=None, trailing_slash=True, binary_support=False, context_header_mappings={} ): """ Given some event_info via API Gateway, create and return a valid WSGI request environ. """ method = event_info['httpMethod'] params = event_info['pathParameters'] query = event_info['queryStringParameters'] # APIGW won't allow multiple entries, ex ?id=a&id=b headers = event_info['headers'] or {} # Allow for the AGW console 'Test' button to work (Pull #735) if context_header_mappings: for key, value in context_header_mappings.items(): parts = value.split('.') header_val = event_info['requestContext'] for part in parts: if part not in header_val: header_val = None break else: header_val = header_val[part] if header_val is not None: headers[key] = header_val # Extract remote user from context if Authorizer is enabled remote_user = None if event_info['requestContext'].get('authorizer'): remote_user = event_info['requestContext']['authorizer'].get('principalId') elif event_info['requestContext'].get('identity'): remote_user = event_info['requestContext']['identity'].get('userArn') # Related: https://github.com/Miserlou/Zappa/issues/677 # https://github.com/Miserlou/Zappa/issues/683 # https://github.com/Miserlou/Zappa/issues/696 # https://github.com/Miserlou/Zappa/issues/836 # https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Summary_table if binary_support and (method in BINARY_METHODS): if event_info.get('isBase64Encoded', False): encoded_body = event_info['body'] body = base64.b64decode(encoded_body) else: body = event_info['body'] if isinstance(body, six.string_types): body = body.encode("utf-8") else: body = event_info['body'] if isinstance(body, six.string_types): body = body.encode("utf-8") # Make header names canonical, e.g. content-type => Content-Type for header in headers.keys(): canonical = header.title() if canonical != header: headers[canonical] = headers.pop(header) path = urls.url_unquote(event_info['path']) if query: query_string = urlencode(query) else: query_string = "" x_forwarded_for = headers.get('X-Forwarded-For', '') if ',' in x_forwarded_for: # The last one is the cloudfront proxy ip. The second to last is the real client ip. # Everything else is user supplied and untrustworthy. remote_addr = x_forwarded_for.split(', ')[-2] else: remote_addr = '127.0.0.1' environ = { 'PATH_INFO': get_wsgi_string(path), 'QUERY_STRING': get_wsgi_string(query_string), 'REMOTE_ADDR': remote_addr, 'REQUEST_METHOD': method, 'SCRIPT_NAME': get_wsgi_string(str(script_name)) if script_name else '', 'SERVER_NAME': str(server_name), 'SERVER_PORT': headers.get('X-Forwarded-Port', '80'), 'SERVER_PROTOCOL': str('HTTP/1.1'), 'wsgi.version': (1, 0), 'wsgi.url_scheme': headers.get('X-Forwarded-Proto', 'http'), 'wsgi.input': body, 'wsgi.errors': stderr, 'wsgi.multiprocess': False, 'wsgi.multithread': False, 'wsgi.run_once': False, } # Input processing if method in ["POST", "PUT", "PATCH", "DELETE"]: if 'Content-Type' in headers: environ['CONTENT_TYPE'] = headers['Content-Type'] # This must be Bytes or None environ['wsgi.input'] = six.BytesIO(body) if body: environ['CONTENT_LENGTH'] = str(len(body)) else: environ['CONTENT_LENGTH'] = '0' for header in headers: wsgi_name = "HTTP_" + header.upper().replace('-', '_') environ[wsgi_name] = str(headers[header]) if script_name: environ['SCRIPT_NAME'] = script_name path_info = environ['PATH_INFO'] if script_name in path_info: environ['PATH_INFO'].replace(script_name, '') if remote_user: environ['REMOTE_USER'] = remote_user if event_info['requestContext'].get('authorizer'): environ['API_GATEWAY_AUTHORIZER'] = event_info['requestContext']['authorizer'] return environ
def to_python(self, value): return url_unquote(url_unquote(url_unquote(value)))
def test_quote_unquote_text(t): assert t == urls.url_unquote(urls.url_quote(t))
def test_bytes_unquoting(): strict_eq(urls.url_unquote(urls.url_quote( u'#%="\xf6', charset='latin1'), charset=None), b'#%="\xf6')
def handle_request(application, event, context): if u"multiValueHeaders" in event: headers = Headers(event["multiValueHeaders"]) else: headers = Headers(event["headers"]) strip_stage_path = os.environ.get("STRIP_STAGE_PATH", "").lower().strip() in [ "yes", "y", "true", "t", "1", ] if u"apigw.tencentcs.com" in headers.get(u"Host", u"") and not strip_stage_path: script_name = "/{}".format(event["requestContext"].get(u"stage", "")) else: script_name = "" path_info = event["path"] base_path = os.environ.get("API_GATEWAY_BASE_PATH") if base_path: script_name = "/" + base_path if path_info.startswith(script_name): path_info = path_info[len(script_name):] or "/" if u"body" in event: body = event[u"body"] or "" else: body = "" if event.get("isBase64Encoded", False): body = base64.b64decode(body) if isinstance(body, string_types): body = to_bytes(body, charset="utf-8") environ = { "CONTENT_LENGTH": str(len(body)), "CONTENT_TYPE": headers.get(u"Content-Type", ""), "PATH_INFO": url_unquote(path_info), "QUERY_STRING": encode_query_string(event), "REMOTE_ADDR": event["requestContext"].get(u"identity", {}).get(u"sourceIp", ""), "REMOTE_USER": event["requestContext"].get(u"authorizer", {}).get(u"principalId", ""), "REQUEST_METHOD": event["httpMethod"], "SCRIPT_NAME": script_name, "SERVER_NAME": headers.get(u"Host", "lambda"), "SERVER_PORT": headers.get(u"X-Forwarded-Port", "80"), "SERVER_PROTOCOL": "HTTP/1.1", "wsgi.errors": sys.stderr, "wsgi.input": BytesIO(body), "wsgi.multiprocess": False, "wsgi.multithread": False, "wsgi.run_once": False, "wsgi.url_scheme": headers.get(u"X-Forwarded-Proto", "http"), "wsgi.version": (1, 0), "serverless.authorizer": event["requestContext"].get(u"authorizer"), "serverless.event": event, "serverless.context": context, # TODO: Deprecate the following entries, as they do not comply with the WSGI # spec. For custom variables, the spec says: # # Finally, the environ dictionary may also contain server-defined variables. # These variables should be named using only lower-case letters, numbers, dots, # and underscores, and should be prefixed with a name that is unique to the # defining server or gateway. "API_GATEWAY_AUTHORIZER": event["requestContext"].get(u"authorizer"), "event": event, "context": context, } for key, value in environ.items(): if isinstance(value, string_types): environ[key] = wsgi_encoding_dance(value) for key, value in headers.items(): key = "HTTP_" + key.upper().replace("-", "_") if key not in ("HTTP_CONTENT_TYPE", "HTTP_CONTENT_LENGTH"): environ[key] = value response = Response.from_app(application, environ) returndict = {u"statusCode": response.status_code} if u"multiValueHeaders" in event: returndict["multiValueHeaders"] = group_headers(response.headers) else: returndict["headers"] = split_headers(response.headers) if event.get("requestContext").get("elb"): # If the request comes from ALB we need to add a status description returndict["statusDescription"] = u"%d %s" % ( response.status_code, HTTP_STATUS_CODES[response.status_code], ) if response.data: mimetype = response.mimetype or "text/plain" if (mimetype.startswith("text/") or mimetype in TEXT_MIME_TYPES) and not response.headers.get( "Content-Encoding", ""): returndict["body"] = response.get_data(as_text=True) returndict["isBase64Encoded"] = False else: returndict["body"] = base64.b64encode( response.data).decode("utf-8") returndict["isBase64Encoded"] = True return returndict
def test_bytes_unquoting(self): self.assert_strict_equal(urls.url_unquote(urls.url_quote( u'#%="\xf6', charset='latin1'), charset=None), b'#%="\xf6')