def decorated_function(*args, **kwargs): template_name = template if template_name is None: template_name = request.endpoint.replace('.', '/') template_path = '%s/%s.html' % (request.accept_languages.best, template_name) ctx = original_function(*args, **kwargs) if ctx is None: ctx = {} try: try: rv = render_template(template_path, **ctx) cl = request.accept_languages.best except TemplateNotFound: rv = render_template('en/%s.html' % template_name, **ctx) cl = 'en' finally: return current_app.response_class(rv, headers={'Content-Language': cl}) except DeadlineExceededError: return current_app.response_class('Sorry, This operation could not be completed in time...' , status=500, mimetype='text/plain')
def send_from_memory(filename): """ :param filename: Name of the file to be loaded. """ if not os.path.isfile(filename): raise NotFound() #if filename is not None: #if not os.path.isabs(filename): #filename = os.path.join(current_app.root_path, filename) mimetype = mimetypes.guess_type(filename)[0] if mimetype is None: mimetype = 'application/octet-stream' if current_app.config['cache_enabled']: data = jsOptimizer().get_file(os.path.abspath(filename), current_app.storekv) else: data = None if data: headers = Headers() headers['Content-Encoding'] = 'gzip' headers['Content-Length'] = len(data) headers['Cache-Control'] = "max-age=172800, public, must-revalidate" rv = current_app.response_class(data, mimetype=mimetype, headers=headers, direct_passthrough=True) else: file = open(filename, 'rb') data = wrap_file(request.environ, file) headers = Headers() rv = current_app.response_class(data, mimetype=mimetype, headers=headers, direct_passthrough=False) return rv
def jsonify(*args, **kwargs): """ Drop in replacement for :func:`flask.jsonify` that also handles list objects as well as a few custom objects like Decimal or datetime. Flask does not support lists by default because it's considered a security risk in most cases but we do need it in certain cases. Since flask's jsonify does not allow passing arbitrary arguments to :func:`json.dumps`, we cannot use it if the output data contains custom types. """ indent = None if current_app.config["JSONIFY_PRETTYPRINT_REGULAR"] \ and not request.is_xhr: indent = 2 if len(args) == 1 and not isinstance(args[0], (dict, UserDict)): return current_app.response_class( json.dumps(args[0], indent=indent, default=default_json_encoder), mimetype="application/json") else: return current_app.response_class( json.dumps(dict(*args, **kwargs), indent=indent, default=default_json_encoder), mimetype='application/json')
def resp(data, schema=None, status_code=200): if schema: response = current_app.response_class(json.dumps(schema.dump(data).data), mimetype='application/json') else: response = current_app.response_class(json.dumps(data), mimetype='application/json') response.status_code = status_code return response
def key_list(): """Enumerate API keys. """ if request.method == 'POST': # Create a new key data = json.loads(request.data) if data.get('key') and data.get('capabilities'): key = data.get('key') el = db['apikeys'].find_one({ 'key': key }) if el is not None: # Key already existing. Should use update. abort(409) caps = data.get('capabilities') # Let's handle both restAdmin serialization and raw edition if isinstance(caps, basestring): caps = caps.split(",") data = { 'key': key, 'capabilities': caps } validate_schema(data, 'key') db['apikeys'].insert(data) load_keys() return current_app.response_class( json.dumps(data, indent=None if request.is_xhr else 2, cls=MongoEncoder), mimetype='application/json') else: abort(401) else: return current_app.response_class( json.dumps(list(db['apikeys'].find()), indent=None if request.is_xhr else 2, cls=MongoEncoder), mimetype='application/json')
def hellojsonp(): callback = request.args.get('callback', False) result = '{"message":"hello world"}' mimetype = 'application/javascript' if callback and callback != '?': content = str(callback) + "(" + result + ");" return current_app.response_class(content, mimetype=mimetype) else: return current_app.response_class(result, mimetype=mimetype)
def get_url(): url = request.form['url'] try: id, content = feed(url) response = current_app.response_class(content) response.headers['X-Feed-Id'] = format_rid(id) except UrlError, ue: response = current_app.response_class(str(ue)) response.headers['X-Feed-Error'] = str(ue)
def element_list(collection): """Generic element listing method. It handles GET and POST requests on element collections. """ # TODO: find a way to pass collection parameter to check_access if request.method == 'HEAD' or request.method == 'OPTIONS': if CONFIG['enable_cross_site_requests']: return Response('', 200, { 'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Methods': 'POST, GET, OPTIONS', 'Access-Control-Allow-Headers': 'Content-Type', }) else: return Response('', 200); if request.method == 'POST': # FIXME: do some sanity checks here (valid properties, existing ids...) # Insert a new element data = request.json if collection == 'annotations': normalize_annotation(data) db[collection].save(clean_json(data)) response = current_app.response_class( json.dumps(restore_json(data), indent=None if request.is_xhr else 2, cls=MongoEncoder), mimetype='application/json') if CONFIG['enable_cross_site_requests']: response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Methods'] = 'POST, GET, OPTIONS' response.headers['Access-Control-Allow-Headers'] = 'Content-Type' return response else: querymap = { 'user': '******', 'creator': 'meta.dc:creator' } querymap.update(SPECIFIC_QUERYMAPS[collection]) if (not request.values.getlist('filter') and not check_capability(get_api_key(), [ "GETunfiltered%s" % el for el in ('elements', collection) ])): raise InvalidAccess("Query too generic.") query = dict( (querymap.get(name, name), value) for (name, value) in ( f.split(':') for f in request.values.getlist('filter') ) if name in querymap ) cursor = db[collection].find(query) response = current_app.response_class( json.dumps(list(restore_json(a) for a in cursor), indent=None if request.is_xhr else 2, cls=MongoEncoder), mimetype='application/json') if CONFIG['enable_cross_site_requests']: response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Methods'] = 'POST, GET, OPTIONS' response.headers['Access-Control-Allow-Headers'] = 'Content-Type' return response
def get(self, id): """ Returns the atom for the given handle Uri: atoms/[id] :param id: Atom handle :return atoms: Returns a JSON representation of an atom list containing the atom. Example: { 'atoms': { 'handle': 6, 'name': '', 'type': 'InheritanceLink', 'outgoing': [2, 1], 'incoming': [], 'truthvalue': { 'type': 'simple', 'details': { 'count': '0.4000000059604645', 'confidence': '0.0004997501382604241', 'strength': '0.5' } }, 'attentionvalue': { 'lti': 0, 'sti': 0, 'vlti': false } } } """ try: atom = self.atomspace[Handle(id)] except IndexError: abort(404, 'Handle not found') json_data = {'atoms': marshal(atom, atom_fields)} # if callback function supplied, pad the JSON data (i.e. JSONP): args = self.reqparse.parse_args() callback = args.get('callback') if callback is not None: response = str(callback) + '(' + json.dumps(json_data) + ');' return current_app.response_class(response, mimetype='application/javascript') else: return current_app.response_class(json.dumps(json_data), mimetype='application/json')
def jsonp(func): callback = request.args.get('callback', False) if callback: data = json.dumps(func) content = str(callback) + '(' + data + ')' mimetype = 'application/javascript' return current_app.response_class(content, mimetype=mimetype) else: data = json.dumps(func) content = data mimetype = 'application/json' return current_app.response_class(content, mimetype=mimetype)
def decorated_function(*args, **kwargs): format_type = request.args.get('format') callback = request.args.get('callback', 'callback') data = str(func(*args, **kwargs)['data']['ip']) if format_type == 'jsonp': content = str(callback) + '({"ip":"' + str(data) + '"});' return current_app.response_class(content, mimetype='application/javascript') elif format_type == 'text': content = str(data) return current_app.response_class(content, mimetype='text/plan') else: return func(*args, **kwargs)
def late(): yield from asyncio.sleep(3) data = { 'data': 'done' } data = json.dumps(data) current_app.response_class(data, headers={ 'Content-Type': 'application/json', }, status=201) return 'done'
def etag_enabled(*args, **kwargs): etag_inner_content = "%s:%s" % (func_tag_content(*args, **kwargs), App.version) etag_server = "\"%s\"" % hashlib.md5(etag_inner_content).hexdigest() etag_client = request.headers.get("If-None-Match") if etag_client and etag_client == etag_server: return current_app.response_class(status=304) result = func(*args, **kwargs) if isinstance(result, current_app.response_class): result.headers["ETag"] = etag_server return result else: return current_app.response_class(result, headers={"Etag": etag_server})
def get_news(oid): """ """ ret = query_news_by_oid(oid) if ret: return current_app.response_class(json.dumps(ret,indent=None if request.is_xhr else 2), mimetype='application/json') abort(404)
def handle_socketio_request(remaining): try: socketio_manage(request.environ, {'': BaseNamespace}, request) except Exception: current_app.logger.exception('Exception while handling socketio connection') raise return current_app.response_class()
def serve_image(key): pic=db.get(key) try: mimetype = 'image/png' except: mimetype = 'application/pdf' return current_app.response_class(pic.image,mimetype=mimetype,direct_passthrough=False)
def jsonify(*args, **kwargs): """ Rewrite jsonify to force use of API_ENcoder unil 0.10 is release """ return current_app.response_class(dumps(dict(*args, **kwargs), indent=None if request.is_xhr else 2), mimetype='application/json')
def create_json_error_answer(exception, status=200): from indico.core.config import Config from indico.core.errors import IndicoError, get_error_description if isinstance(exception, IndicoError): details = exception.toDict() else: exception_data = exception.__dict__ try: _json.dumps(exception_data) except Exception: exception_data = {} details = { 'code': type(exception).__name__, 'type': 'noReport' if ((not session.user and isinstance(exception, Forbidden)) or _is_no_report_error(exception)) else 'unknown', 'message': unicode(get_error_description(exception)), 'data': exception_data, 'requestInfo': get_request_info(), 'inner': traceback.format_exc() } return current_app.response_class(dumps({ 'version': Config.getInstance().getVersion(), 'result': None, 'error': details }), mimetype='application/json', status=status)
def stage4_link(): debug(OKGREEN+UNDERLINE+BOLD + "****** Stage 4: LINK BUTTON ***** " + ENDC) #for k, v in request.args.iteritems(): # print "var: %s = %s" % (k, v) #name = request.get_json().get('jpsurvData', '') #print name jpsurvDataString = request.args.get('jpsurvData', True); info(BOLD+"**** jpsurvDataString ****"+ENDC) info(jpsurvDataString) jpsurvData = json.loads(jpsurvDataString) info(BOLD+"**** jpsurvData ****"+ENDC) for key, value in jpsurvData.iteritems(): info("var: %s = %s" % (key, value)) #Init the R Source rSource = robjects.r('source') rSource('./JPSurvWrapper.R') info(BOLD+"**** Calling getDownloadOutputWrapper ****"+ENDC) getDownloadOutputWrapper = robjects.globalenv['getDownloadOutputWrapper'] rStrVector = getDownloadOutputWrapper(UPLOAD_DIR, jpsurvDataString) downloadLinkFileName = "".join(tuple(rStrVector)) info("Download File Name: %s" % downloadLinkFileName) link = "{\"link\":\"%s\"}" % (downloadLinkFileName) #return json.dumps("{\"start.year\":[1975,2001],\"end.year\":[2001,2011],\"estimcate\":[-0.0167891169889347,-0.0032678676219079]}") #print json.dumps(link) mimetype = 'application/json' content = json.dumps(link); return current_app.response_class(content, mimetype=mimetype)
def process(): responseBody = { 'version': '1.1', 'error': None, 'result': None } requestBody = None try: # init/clear fossil cache clearCache() # read request try: requestBody = request.get_json() Logger.get('rpc').info('json rpc request. request: {0}'.format(requestBody)) except BadRequest: raise RequestError('ERR-R1', 'Invalid mime-type.') if not requestBody: raise RequestError('ERR-R2', 'Empty request.') if 'id' in requestBody: responseBody['id'] = requestBody['id'] # run request responseBody['result'] = ServiceRunner().invokeMethod(str(requestBody['method']), requestBody.get('params', [])) except CausedError as e: try: errorInfo = fossilize(e) except NonFossilizableException as e2: # catch Exceptions that are not registered as Fossils # and log them errorInfo = {'code': '', 'message': str(e2)} Logger.get('dev').exception('Exception not registered as fossil') # NoReport errors (i.e. not logged in) shouldn't be logged if not isinstance(e, NoReportError): Logger.get('rpc').exception('Service request failed. ' 'Request text:\r\n{0}\r\n\r\n'.format(requestBody)) if requestBody: params = requestBody.get('params', []) Sanitization._escapeHTML(params) errorInfo["requestInfo"] = { 'method': str(requestBody['method']), 'params': params, 'origin': str(requestBody.get('origin', 'unknown')) } Logger.get('rpc').debug('Arguments: {0}'.format(errorInfo['requestInfo'])) responseBody['error'] = errorInfo try: jsonResponse = dumps(responseBody, ensure_ascii=True) except UnicodeError: Logger.get('rpc').exception('Problem encoding JSON response') # This is to avoid exceptions due to old data encodings (based on iso-8859-1) responseBody['result'] = fix_broken_obj(responseBody['result']) jsonResponse = encode(responseBody) return app.response_class(jsonResponse, mimetype='application/json')
def jsonify(data, status=200, headers=None): if not isinstance(data, (dict, list)): raise TypeError # Determine JSON indentation indent = None if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] and not request.is_xhr: indent = 2 # Determine if this is a JSONP request callback = request.args.get('callback', False) if callback: content = str(callback) + '(' + json.dumps({ 'meta': { 'status': status, }, 'data': data, }, indent=indent) + ')' mimetype = 'application/javascript' status = 200 else: content = json.dumps(data, indent=indent) mimetype = 'application/json' return current_app.response_class( content, mimetype=mimetype, headers=headers), status
def posts_feed(): base_url = url_for('general.index', _external=True) items = [] posts = Post.get_published(num=10).all() for post in posts: post_url = urljoin(base_url, post.url) # TODO: Add a real description item = RSSItem( title=post.title, link=post_url, description=post.body.split('\r\n', 1)[0], author='{} ({})'.format(post.author.email, post.author.full_name), categories=[tag.name for tag in post.tags], guid=Guid(post_url), pubDate=post.pub_date ) items.append(item) feed_config = current_app.config['BLOG_POSTS_FEED'] rss2_feed = RSS2( title=feed_config['title'], link=base_url, description=feed_config['description'], language='en-us', webMaster=feed_config['webmaster'], lastBuildDate=posts[0].pub_date if posts else None, ttl=1440, items=items ) return current_app.response_class(rss2_feed.to_xml(encoding='utf-8'), mimetype='application/rss+xml')
def decorated_function(*args, **kwargs): callback = request.args.get("callback", False) if callback: content = str(callback) + "(" + str(f().data) + ")" return current_app.response_class(content, mimetype="application/json") else: return f(*args, **kwargs)
def render_pdf(html, stylesheets=None, download_filename=None): """Render a PDF to a response with the correct ``Content-Type`` header. :param html: Either a :class:`weasyprint.HTML` object or an URL to be passed to :func:`flask_weasyprint.HTML`. The latter case requires a request context. :param stylesheets: A list of user stylesheets, passed to :meth:`~weasyprint.HTML.write_pdf` :param download_filename: If provided, the ``Content-Disposition`` header is set so that most web browser will show the "Save as…" dialog with the value as the default filename. :returns: a :class:`flask.Response` object. """ if not hasattr(html, 'write_pdf'): html = HTML(html) pdf = html.write_pdf(stylesheets=stylesheets) response = current_app.response_class(pdf, mimetype='application/pdf') if download_filename: response.headers.add('Content-Disposition', 'attachment', filename=download_filename) return response
def json_response(data, code): json_data = json.dumps(data, indent=None) response = current_app.response_class(json_data, mimetype='application/json; charset=UTF-8') response.status_code = code return response
def jsonify(payload): # Equivalent to flask.jsonify() except the dict() wrapping the payload is removed. indent = None if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] and not request.is_xhr: indent = 2 return current_app.response_class(dumps(payload, indent=indent), mimetype='application/json')
def decorated_function(*args, **kwargs): callback = request.args.get('callback', False) if callback: content = str(callback) + '(' + str(f(*args,**kwargs).data) + ')' return current_app.response_class(content,mimetype='application/javascript') else: return f(*args, **kwargs)
def jsonify_resource(resource): indent = None if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] \ and not request.is_xhr: indent = 2 return current_app.response_class(json.dumps(resource, indent=indent), mimetype='application/json')
def match_download(id): match = model.Match.get_by_id(id) match_data = model.MatchData.get_by_id(id) abort_if(not match) abort_if(not match_data) data = match_data.raw_data abort_if(not data) print len(data) from werkzeug import Headers import time headers = Headers() headers.add('Content-Disposition', 'attachment', filename=match.filename) rv = current_app.response_class( data, mimetype='application/octet-stream', headers=headers, direct_passthrough=True, ) rv.cache_control.public = True rv.cache_control.max_age = 86400 rv.expires = int(time.time() + 86400) return rv
def _process(self): config = Config() config.HTMLExporter.preprocessors = [CppHighlighter] config.HTMLExporter.template_file = 'basic' with self.attachment.file.open() as f: notebook = nbformat.read(f, as_version=4) html_exporter = HTMLExporter(config=config) body, resources = html_exporter.from_notebook_node(notebook) css_code = '\n'.join(resources['inlining'].get('css', [])) nonce = str(uuid4()) html = render_template('previewer_jupyter:ipynb_preview.html', attachment=self.attachment, html_code=body, css_code=css_code, nonce=nonce) response = current_app.response_class(html) # Use CSP to restrict access to possibly malicious scripts or inline JS csp_header = "script-src cdn.mathjax.org 'nonce-{}';".format(nonce) response.headers['Content-Security-Policy'] = csp_header response.headers['X-Webkit-CSP'] = csp_header # IE10 doesn't have proper CSP support, so we need to be more strict response.headers['X-Content-Security-Policy'] = "sandbox allow-same-origin;" return response
def get_image_list(): return current_app.response_class(json.dumps(url_list_success, indent=2), mimetype="application/json")
def json_response(response): return current_app.response_class(json_dumps(response), mimetype='application/json')
def _get(self, id=""): """ Returns a list of atoms matching the specified criteria """ args = self.reqparse.parse_args() type = args.get('type') name = args.get('name') callback = args.get('callback') filter_by = args.get('filterby') sti_min = args.get('stimin') sti_max = args.get('stimax') tv_strength_min = args.get('tvStrengthMin') tv_confidence_min = args.get('tvConfidenceMin') tv_count_min = args.get('tvCountMin') include_incoming = args.get('includeIncoming') include_outgoing = args.get('includeOutgoing') dot_format = args.get('dot') limit = args.get('limit') if id != "": try: atom = self.atomspace[Handle(id)] atoms = [atom] except IndexError: atoms = [] # abort(404, 'Handle not found') else: # First, check if there is a valid filter type, and give it # precedence if it exists valid_filter = False if filter_by is not None: if filter_by == 'stirange': if sti_min is not None: valid_filter = True atoms = self.atomspace.get_atoms_by_av( sti_min, sti_max) else: abort( 400, 'Invalid request: stirange filter requires ' 'stimin parameter') elif filter_by == 'attentionalfocus': valid_filter = True atoms = self.atomspace.get_atoms_in_attentional_focus() # If there is not a valid filter type, proceed to select by type # or name if not valid_filter: if type is None and name is None: atoms = self.atomspace.get_atoms_by_type(types.Atom) elif name is None: atoms = self.atomspace.get_atoms_by_type( types.__dict__.get(type)) else: if type is None: type = 'Node' atoms = self.atomspace.get_atoms_by_name( t=types.__dict__.get(type), name=name) # Optionally, filter by TruthValue if tv_strength_min is not None: atoms = [ atom for atom in atoms if atom.tv.mean >= tv_strength_min ] if tv_confidence_min is not None: atoms = [ atom for atom in atoms if atom.tv.confidence >= tv_confidence_min ] if tv_count_min is not None: atoms = [ atom for atom in atoms if atom.tv.count >= tv_count_min ] # Optionally, include the incoming set if include_incoming in ['True', 'true', '1']: atoms = self.atomspace.include_incoming(atoms) # Optionally, include the outgoing set if include_outgoing in ['True', 'true', '1']: atoms = self.atomspace.include_outgoing(atoms) # Optionally, limit number of atoms returned if limit is not None: if len(atoms) > limit: atoms = atoms[0:limit] # The default is to return the atom set as JSON atoms. Optionally, a # DOT return format is also supported if dot_format not in ['True', 'true', '1']: atom_list = AtomListResponse(atoms) json_data = {'result': atom_list.format()} # if callback function supplied, pad the JSON data (i.e. JSONP): if callback is not None: response = str(callback) + '(' + json.dumps(json_data) + ');' return current_app.response_class( response, mimetype='application/javascript') else: return current_app.response_class(json.dumps(json_data), mimetype='application/json') else: dot_output = dot.get_dot_representation(atoms) return jsonify({'result': dot_output})
def jsonify_status_code(data=None, status=200, mimetype='application/json'): data = data or {} return current_app.response_class(json_dumps(data), status=status, mimetype=mimetype)
def do(self, action, options=None): """ This method executes the defined action in the given event. :param action: :param options: Contains the flask parameters g, request, response and the handler_def configuration :type options: dict :return: """ g = options.get("g") request = options.get("request") handler_def = options.get("handler_def") handler_options = handler_def.get("options", {}) if action == ACTION_TYPE.FORWARD: server_def = handler_options.get("privacyIDEA") pi_server = get_privacyideaserver(server_def) # the new url is the configured server url and the original path url = pi_server.config.url + request.path # We use the original method method = request.method tls = pi_server.config.tls # We also transfer the original payload data = request.all_data if is_true(handler_options.get("forward_client_ip", False)): data["client"] = g.client_ip if handler_options.get("realm"): data["realm"] = handler_options.get("realm") if handler_options.get("resolver"): data["resolver"] = handler_options.get("resolver") log.info(u"Sending {0} request to {1!r}".format(method, url)) requestor = None params = None headers = {} # We need to pass an authorization header if we forward administrative requests if is_true( handler_options.get("forward_authorization_token", False)): auth_token = request.headers.get('PI-Authorization') if not auth_token: auth_token = request.headers.get('Authorization') headers["PI-Authorization"] = auth_token if method.upper() == "GET": params = data data = None requestor = requests.get elif method.upper() == "POST": requestor = requests.post elif method.upper() == "DELETE": requestor = requests.delete if requestor: r = requestor(url, params=params, data=data, headers=headers, verify=tls) # convert requests Response to werkzeug Response response_dict = json.loads(r.text) if "detail" in response_dict: detail = response_dict.setdefault("detail", {}) # In case of exceptions we may not have a detail detail["origin"] = url # We will modify the response! # We can not use flask.jsonify(response_dict) here, since we # would work outside of application context! options["response"] = current_app.response_class( json.dumps(response_dict), mimetype="application/json") options["response"].status_code = r.status_code else: log.warning(u"Unsupported method: {0!r}".format(method)) return True
def json_search(pid_fetcher, search_result, **kwargs): """Test serializer.""" return current_app.response_class( json.dumps([{'test': 'test'}], search_result['hits']['total']), content_type='application/json')
def find_kata_tokens(kata: str): result = mongo_db.data.find_one({'word': kata}) resp = current_app.response_class(response=json.dumps(result, cls=Encoder), status=200, mimetype='application/json') return resp
def json_record(*args, **kwargs): """Test serializer.""" return current_app.response_class( json.dumps({'json_record': 'json_record'}), content_type='application/json')
def xml_record(*args, **kwargs): """Test serializer.""" return current_app.response_class( "<record>TEST</record>", content_type='application/xml')
def salesforce_data(): data = get_salesforce_data() return current_app.response_class(data, mimetype="application/json")
def send_file(filename_or_fp, mimetype=None, as_attachment=False, attachment_filename=None, add_etags=True, cache_timeout=None, conditional=False, headers={}): """Sends the contents of a file to the client. This will use the most efficient method available and configured. By default it will try to use the WSGI server's file_wrapper support. Alternatively you can set the application's :attr:`~Flask.use_x_sendfile` attribute to ``True`` to directly emit an `X-Sendfile` header. This however requires support of the underlying webserver for `X-Sendfile`. By default it will try to guess the mimetype for you, but you can also explicitly provide one. For extra security you probably want to send certain files as attachment (HTML for instance). The mimetype guessing requires a `filename` or an `attachment_filename` to be provided. Please never pass filenames to this function from user sources without checking them first. Something like this is usually sufficient to avoid security problems:: if '..' in filename or filename.startswith('/'): abort(404) .. versionadded:: 0.2 .. versionadded:: 0.5 The `add_etags`, `cache_timeout` and `conditional` parameters were added. The default behavior is now to attach etags. .. versionchanged:: 0.7 mimetype guessing and etag support for file objects was deprecated because it was unreliable. Pass a filename if you are able to, otherwise attach an etag yourself. This functionality will be removed in Flask 1.0 .. versionchanged:: 0.9 cache_timeout pulls its default from application config, when None. :param filename_or_fp: the filename of the file to send. This is relative to the :attr:`~Flask.root_path` if a relative path is specified. Alternatively a file object might be provided in which case `X-Sendfile` might not work and fall back to the traditional method. Make sure that the file pointer is positioned at the start of data to send before calling :func:`send_file`. :param mimetype: the mimetype of the file if provided, otherwise auto detection happens. :param as_attachment: set to `True` if you want to send this file with a ``Content-Disposition: attachment`` header. :param attachment_filename: the filename for the attachment if it differs from the file's filename. :param add_etags: set to `False` to disable attaching of etags. :param conditional: set to `True` to enable conditional responses. :param cache_timeout: the timeout in seconds for the headers. When `None` (default), this value is set by :meth:`~Flask.get_send_file_max_age` of :data:`~flask.current_app`. """ # sleep(5) mtime = None if isinstance(filename_or_fp, string_types): filename = filename_or_fp file = None else: from warnings import warn file = filename_or_fp filename = getattr(file, 'name', None) # XXX: this behavior is now deprecated because it was unreliable. # removed in Flask 1.0 if not attachment_filename and not mimetype \ and isinstance(filename, string_types): warn(DeprecationWarning('The filename support for file objects ' 'passed to send_file is now deprecated. Pass an ' 'attach_filename if you want mimetypes to be guessed.'), stacklevel=2) if add_etags: warn(DeprecationWarning('In future flask releases etags will no ' 'longer be generated for file objects passed to the send_file ' 'function because this behavior was unreliable. Pass ' 'filenames instead if possible, otherwise attach an etag ' 'yourself based on another value'), stacklevel=2) if filename is not None: if not os.path.isabs(filename): filename = os.path.join(current_app.root_path, filename) if mimetype is None and (filename or attachment_filename): mimetype = mimetypes.guess_type(filename or attachment_filename)[0] if mimetype is None: mimetype = 'application/octet-stream' default_headers = Headers() if as_attachment: if attachment_filename is None: if filename is None: raise TypeError('filename unavailable, required for ' 'sending as attachment') attachment_filename = os.path.basename(filename) default_headers.add('Content-Disposition', 'attachment', filename=attachment_filename) if current_app.use_x_sendfile and filename: if file is not None: file.close() default_headers['X-Sendfile'] = filename default_headers['Content-Length'] = os.path.getsize(filename) data = None else: if file is None: file = open(filename, 'rb') mtime = os.path.getmtime(filename) default_headers['Content-Length'] = os.path.getsize(filename) data = wrap_file(request.environ, file) for headername in headers: default_headers[headername] = headers[headername] rv = current_app.response_class(data, mimetype=mimetype, headers=default_headers, direct_passthrough=True) # if we know the file modification date, we can store it as the # the time of the last modification. if mtime is not None: rv.last_modified = int(mtime) rv.cache_control.public = True if cache_timeout is None: cache_timeout = current_app.get_send_file_max_age(filename) if cache_timeout is not None: rv.cache_control.max_age = cache_timeout rv.expires = int(time() + cache_timeout) if add_etags and filename is not None: rv.set_etag('flask-%s-%s-%s' % ( os.path.getmtime(filename), os.path.getsize(filename), adler32( filename.encode('utf-8') if isinstance(filename, text_type) else filename ) & 0xffffffff )) if conditional: rv = rv.make_conditional(request) # make sure we don't send x-sendfile for servers that # ignore the 304 status code for x-sendfile. if rv.status_code == 304: rv.headers.pop('x-sendfile', None) return rv
def _openapi_json(self): """Serve JSON spec file""" # We don't use Flask.jsonify here as it would sort the keys # alphabetically while we want to preserve the order. return current_app.response_class(json.dumps(self.to_dict(), indent=2), mimetype='application/json')
def jsonify(obj, status=200): json_string = pjson.dumps(format_json(obj, camel_case=True)) #print('jsonify', json_string) return current_app.response_class(json_string, mimetype='application/json', status=status)
def api_query(table, id = None): #if censored_table(table): # return abort(404) # parsing the meta parameters _format and _offset format = request.args.get("_format", "html") offset = int(request.args.get("_offset", 0)) DELIM = request.args.get("_delim", ",") fields = request.args.get("_fields", None) sortby = request.args.get("_sort", None) def apierror(msg, flash_extras=[], code=404, table=True): if format == "html": flash_error(msg, *flash_extras) if table: return redirect(url_for(".api_query", table=table)) else: return redirect(url_for(".index")) else: return abort(code, msg % tuple(flash_extras)) if fields: fields = ['id'] + fields.split(DELIM) else: fields = 3 if sortby: sortby = sortby.split(DELIM) if offset > 10000: return apierror("offset %s too large, please refine your query.", [offset]) # preparing the actual database query q try: coll = getattr(db, table) except AttributeError: return apierror("table %s does not exist", [table], table=False) q = {} # if id is set, just go and get it, ignore query parameeters if id is not None: if offset: return apierror("Cannot include offset with id") single_object = True api_logger.info("API query: id = '%s', fields = '%s'" % (id, fields)) if re.match(r'^\d+$', id): id = int(id) else: return apierror("id '%s' must be an integer", [id]) data = coll.lucky({'id':id}, projection=fields) data = [data] if data else [] else: single_object = False for qkey, qval in request.args.items(): from ast import literal_eval try: if qkey.startswith("_"): continue elif qval.startswith("s"): qval = qval[1:] elif qval.startswith("i"): qval = int(qval[1:]) elif qval.startswith("f"): qval = float(qval[1:]) elif qval.startswith("ls"): # indicator, that it might be a list of strings qval = qval[2].split(DELIM) elif qval.startswith("li"): qval = [int(_) for _ in qval[2:].split(DELIM)] elif qval.startswith("lf"): qval = [float(_) for _ in qval[2:].split(DELIM)] elif qval.startswith("py"): # literal evaluation qval = literal_eval(qval[2:]) elif qval.startswith("cs"): # containing string in list qval = { "$contains" : [qval[2:]] } elif qval.startswith("ci"): qval = { "$contains" : [int(qval[2:])] } elif qval.startswith("cf"): qval = { "contains" : [float(qval[2:])] } elif qval.startswith("cpy"): qval = { "$contains" : [literal_eval(qval[3:])] } except Exception: # no suitable conversion for the value, keep it as string pass # update the query q[qkey] = qval # assure that one of the keys of the query is indexed # however, this doesn't assure that the query will be fast... #if q != {} and len(set(q.keys()).intersection(collection_indexed_keys(coll))) == 0: # flash_error("no key in the query %s is indexed.", q) # return redirect(url_for(".api_query", table=table)) # sort = [('fieldname1', 1 (ascending) or -1 (descending)), ...] if sortby is not None: sort = [] for key in sortby: if key.startswith("-"): sort.append((key[1:], -1)) else: sort.append((key, 1)) else: sort = None # executing the query "q" and replacing the _id in the result list # So as not to preserve backwards compatibility (see test_api_usage() test) if table=='ec_curvedata': for oldkey, newkey in zip(['label', 'iso', 'number'], ['Clabel', 'Ciso', 'Cnumber']): if oldkey in q: q[newkey] = q[oldkey] q.pop(oldkey) try: data = list(coll.search(q, projection=fields, sort=sort, limit=100, offset=offset)) except QueryCanceledError: return apierror("Query %s exceeded time limit.", [q], code=500) except KeyError as err: return apierror("No key %s in table %s", [err, table]) except Exception as err: return apierror(str(err)) if single_object and not data: return apierror("no document with id %s found in table %s.", [id, table]) # fixup data for display and json/yaml encoding if 'bytea' in coll.col_type.values(): for row in data: for key, val in row.items(): if type(val) == buffer: row[key] = "[binary data]" #data = [ dict([ (key, val if coll.col_type[key] != 'bytea' else "binary data") for key, val in row.items() ]) for row in data] data = Json.prep(data) # preparing the datastructure start = offset next_req = dict(request.args) next_req["_offset"] = offset url_args = next_req.copy() query = url_for(".api_query", table=table, **next_req) offset += len(data) next_req["_offset"] = offset nxt = url_for(".api_query", table=table, **next_req) # the collected result data = { "table": table, "timestamp": datetime.utcnow().isoformat(), "data": data, "start": start, "offset": offset, "query": query, "next": nxt, "rec_id": 'id' if coll._label_col is None else coll._label_col, } if format.lower() == "json": #return flask.jsonify(**data) # can't handle binary data return current_app.response_class(json.dumps(data, indent=2), mimetype='application/json') elif format.lower() == "yaml": y = yaml.dump(data, default_flow_style=False, canonical=False, allow_unicode=True) return Response(y, mimetype='text/plain') else: # sort displayed records by key (as jsonify and yaml_dump do) data["pretty"] = pretty_document location = table title = "Database - " + location bc = [("Database", url_for(".index")), (table,)] query_unquote = unquote(data["query"]) description = coll.description() if description: title += " (%s)" % description search_schema = [(col, coll.col_type[col]) for col in sorted(coll.search_cols)] extra_schema = [(col, coll.col_type[col]) for col in sorted(coll.extra_cols)] return render_template("collection.html", title=title, search_schema={table: search_schema}, extra_schema={table: extra_schema}, single_object=single_object, query_unquote = query_unquote, url_args = url_args, bread=bc, **data)
def result_response(serializer, data): """Construct flask response.""" return current_app.response_class(response=serializer.dumps( {"result": data}), mimetype="application/json")
def invoke(self, request=None, **kwargs): users = self.query_bus.execute(GetUsersQuery()) users = json.dumps(users) return current_app.response_class(status=200, response=users)
def generic_error_handler(error: int, msg: str): '''Creates a generic error handler for the given error and returns the given message as the content''' return current_app.response_class(response=json.dumps(msg), status=error, mimetype='application/json')
def resp(res): return current_app.response_class( response=json.dumps(res), status=200, mimetype='application/json')
def make_json_response(data): response = current_app.response_class(json.dumps(data), mimetype='application/json') response.status_code = 200 return response
def stream_pods(): generator = client.generator_my_pods_watch() return current_app.response_class(generator, 200, mimetype='text/event-stream', headers=add_custom_headers())
def jsonify(*args, **kwargs): return current_app.response_class(json.dumps(dict(*args, **kwargs), default=json_dumps, indent=None if request.is_xhr else 2), mimetype='application/json')
def members_excel_export(): community = g.community attributes = [attrgetter(a) for a in MEMBERS_EXPORT_ATTRS] BaseModel = db.Model wb = openpyxl.Workbook() if wb.worksheets: wb.remove_sheet(wb.active) ws_title = _("%(community)s members", community=community.name) ws_title = ws_title.strip() if len(ws_title) > 31: # sheet title cannot exceed 31 char. max length ws_title = ws_title[:30] + "…" ws = wb.create_sheet(title=ws_title) row = 0 cells = [] cols_width = [] for _col, label in enumerate(MEMBERS_EXPORT_HEADERS, 1): value = text_type(label) cell = WriteOnlyCell(ws, value=value) cell.font = HEADER_FONT cell.alignment = HEADER_ALIGN cells.append(cell) cols_width.append(len(value) + 1) ws.append(cells) for membership_info in _members_query().all(): row += 1 cells = [] for col, getter in enumerate(attributes): value = None try: value = getter(membership_info) except AttributeError: pass if isinstance(value, (BaseModel, Role)): value = text_type(value) cell = WriteOnlyCell(ws, value=value) cells.append(value) # estimate width value = text_type(cell.value) width = max(len(l) for l in value.split("\n")) + 1 cols_width[col] = max(width, cols_width[col]) ws.append(cells) # adjust columns width MIN_WIDTH = 3 MAX_WIDTH = openpyxl.utils.units.BASE_COL_WIDTH * 4 for idx, width in enumerate(cols_width, 1): letter = openpyxl.utils.get_column_letter(idx) width = min(max(width, MIN_WIDTH), MAX_WIDTH) ws.column_dimensions[letter].width = width fd = BytesIO() wb.save(fd) fd.seek(0) response = current_app.response_class(fd, mimetype=XLSX_MIME) filename = "{}-members-{}.xlsx".format( community.slug, strftime("%d:%m:%Y-%H:%M:%S", gmtime()) ) response.headers["content-disposition"] = 'attachment;filename="{}"'.format( filename ) return response
def chain_raw(chain_id): _, chain = load_chain(chain_id) return app.response_class(response=serialize_chain(chain), status=200, mimetype='application/json')
def send_stream(stream, filename, size, mtime, mimetype=None, restricted=True, as_attachment=False, etag=None, content_md5=None, chunk_size=None, conditional=True, trusted=False): """Send the contents of a file to the client. .. warning:: It is very easy to be exposed to Cross-Site Scripting (XSS) attacks if you serve user uploaded files. Here are some recommendations: 1. Serve user uploaded files from a separate domain (not a subdomain). This way a malicious file can only attack other user uploaded files. 2. Prevent the browser from rendering and executing HTML files (by setting ``trusted=False``). 3. Force the browser to download the file as an attachment (``as_attachment=True``). :param stream: The file stream to send. :param filename: The file name. :param size: The file size. :param mtime: A Unix timestamp that represents last modified time (UTC). :param mimetype: The file mimetype. If ``None``, the module will try to guess. (Default: ``None``) :param restricted: If the file is not restricted, the module will set the cache-control. (Default: ``True``) :param as_attachment: If the file is an attachment. (Default: ``False``) :param etag: If defined, it will be set as HTTP E-Tag. :param content_md5: If defined, a HTTP Content-MD5 header will be set. :param chunk_size: The chunk size. :param conditional: Make the response conditional to the request. (Default: ``True``) :param trusted: Do not enable this option unless you know what you are doing. By default this function will send HTTP headers and MIME types that prevents your browser from rendering e.g. a HTML file which could contain a malicious script tag. (Default: ``False``) :returns: A Flask response instance. """ chunk_size = chunk_size_or_default(chunk_size) # Guess mimetype from filename if not provided. if mimetype is None and filename: mimetype = mimetypes.guess_type(filename)[0] if mimetype is None: mimetype = 'application/octet-stream' # Construct headers headers = Headers() headers['Content-Length'] = size if content_md5: headers['Content-MD5'] = content_md5 if not trusted: # Sanitize MIME type mimetype = sanitize_mimetype(mimetype, filename=filename) # See https://www.owasp.org/index.php/OWASP_Secure_Headers_Project # Prevent JavaScript execution headers['Content-Security-Policy'] = "default-src 'none';" # Prevent MIME type sniffing for browser. headers['X-Content-Type-Options'] = 'nosniff' # Prevent opening of downloaded file by IE headers['X-Download-Options'] = 'noopen' # Prevent cross domain requests from Flash/Acrobat. headers['X-Permitted-Cross-Domain-Policies'] = 'none' # Prevent files from being embedded in frame, iframe and object tags. headers['X-Frame-Options'] = 'deny' # Enable XSS protection (IE, Chrome, Safari) headers['X-XSS-Protection'] = '1; mode=block' # Force Content-Disposition for application/octet-stream to prevent # Content-Type sniffing. if as_attachment or mimetype == 'application/octet-stream': # See https://github.com/pallets/flask/commit/0049922f2e690a6d try: filenames = {'filename': filename.encode('latin-1')} except UnicodeEncodeError: filenames = {'filename*': "UTF-8''%s" % url_quote(filename)} encoded_filename = (unicodedata.normalize('NFKD', filename).encode( 'latin-1', 'ignore')) if encoded_filename: filenames['filename'] = encoded_filename headers.add('Content-Disposition', 'attachment', **filenames) else: headers.add('Content-Disposition', 'inline') # Construct response object. rv = current_app.response_class( FileWrapper(stream, buffer_size=chunk_size), mimetype=mimetype, headers=headers, direct_passthrough=True, ) # Set etag if defined if etag: rv.set_etag(etag) # Set last modified time if mtime is not None: rv.last_modified = int(mtime) # Set cache-control if not restricted: rv.cache_control.public = True cache_timeout = current_app.get_send_file_max_age(filename) if cache_timeout is not None: rv.cache_control.max_age = cache_timeout rv.expires = int(time() + cache_timeout) if conditional: rv = rv.make_conditional(request) return rv
def xml_search(*args, **kwargs): """Test serializer.""" return current_app.response_class( "<collection><record>T1</record><record>T2</record></collection>", content_type='application/xml')
def json_return(dict_variable): return current_app.response_class(json.dumps(dict_variable, indent=2, ensure_ascii=False), content_type='application/json; charset=utf-8')
def jsonify(*args, **kwargs): return current_app.response_class(json_dumps(dict(*args, **kwargs)), mimetype='application/json')
def mng(method, ext): get_permission = current_app.config.get('MENGER_FILTER') filters = [] msr_perm = {} if get_permission: perms = get_permission() filters = perms.get('drill', []) msr_perm = perms.get('measure', {}) if method == 'info': spaces = [] for space in iter_spaces(): if space._name in msr_perm: mp = msr_perm[space._name] msr_info = [{ 'name': m.name, 'label': m.label, } for m in space._measures if m.name in mp] else: msr_info = [{ 'name': m.name, 'label': m.label, } for m in space._measures] space_info = { 'name': space._name, 'measures': msr_info, 'dimensions': [{ 'name': d.name, 'label': d.label, 'levels': [l.label for l in d.levels.values()], } for d in space._dimensions], 'label': space._label, } spaces.append(space_info) return json.jsonify(spaces=spaces) query = expand_query(method) spc_name = query.get('space') # Not cached to avoid trashing other queries if method == 'search': with connect(current_app.config['MENGER_DATABASE']): spc = get_space(spc_name) if not spc: return ('space "%s" not found' % spc_name, 404) name = query.get('dimension') if not hasattr(spc, name): return ('space "%s" has not dimension %s' % (spc_name, name), 404) dim = getattr(spc, name) res = list(dim.search(query['value'], int(query['max_depth']))) return json.jsonify(data=res) # Build unique id for query query_string = json.dumps(sorted(query.items())) h = md5(json.dumps(query_string).encode() + ext.encode()) if filters: filters_str = str(sorted(filters)).encode() h.update(filters_str) qid = h.hexdigest() # Return cached value if any cached_value = fs_cache.get(qid) if cached_value is not None: resp = current_app.response_class(mimetype='application/json', ) accept_encoding = request.headers.get('Accept-Encoding', '') if 'gzip' not in accept_encoding.lower(): resp.set_data(gzip.decompress(cached_value)) else: resp.headers['Content-Encoding'] = 'gzip' resp.set_data(cached_value) return resp res = {} if method == 'drill': with connect(current_app.config['MENGER_DATABASE']): spc = get_space(spc_name) if not spc: return ('space "%s" not found' % spc_name, 404) name = query.get('dimension') if not hasattr(spc, name): return ('space "%s" has not dimension %s' % (spc_name, name), 404) dim = getattr(spc, name) value = tuple(query.get('value', [])) data = list(dim.drill(value)) data.extend(dim.aliases(value)) offset = len(value) mk_label = lambda x: dim.format( value + (x, ), fmt_type='txt', offset=offset) res['data'] = [(d, mk_label(d)) for d in data] elif method == 'dice': with connect(current_app.config['MENGER_DATABASE']): # Add user filters to the permission one query_filters = query.get('filters', []) measures = query['measures'] spc_name = measures[0].split('.')[0] spc = get_space(spc_name) for dim_name, filter_val, depth in query_filters: dim = getattr(spc, dim_name) coord = (None, ) * (depth - 1) + (filter_val, ) filters.append((dim_name, list(dim.glob(coord)))) try: res = do_dice(query, filters, ext) except LimitException as e: return json.jsonify(error='Request too big (%s)' % str(e)) if ext == 'xlsx': output_file = build_xlsx(res) attachment_filename = compute_filename( current_app.config.get('MENGER_EXPORT_PATTERN', '%Y-%m%d')) return send_file(output_file, as_attachment=True, attachment_filename=attachment_filename) else: return ('Unknown method "%s"' % method, 404) if ext not in ('json', 'txt'): return 'Unknown extension "%s"' % ext, 404 # Cache result json_res = json.dumps(res) zipped_res = gzip.compress(json_res.encode()) fs_cache.set(qid, zipped_res) # Return response resp = current_app.response_class(mimetype='application/json', ) accept_encoding = request.headers.get('Accept-Encoding', '') if 'gzip' not in accept_encoding.lower(): resp.set_data(json_res) else: resp.headers['Content-Encoding'] = 'gzip' resp.set_data(zipped_res) return resp
def hello(): res = "<h1>api endpoints</h1><ul><li>api/orders</li><li>api/balance</li></ul>" return current_app.response_class(response=res)
def toastr_messages_js(): return current_app.response_class(render_template('toastr_messages.js'), mimetype='application/javascript')