def check_session_cookie(response: Response) -> Response: u''' The cookies for auth (auth_tkt) and session (ckan) are separate. This checks whether a user is logged in, and determines the validity of the session cookie, removing it if necessary. ''' for cookie in request.cookies: # Remove the ckan session cookie if logged out. if cookie == u'ckan' and not getattr(g, u'user', None): # Check session for valid data (including flash messages) is_valid_cookie_data = False for key, value in session.items(): if not key.startswith(u'_') and value: is_valid_cookie_data = True break if not is_valid_cookie_data: if session.id: log.debug(u'No valid session data - deleting session') log.debug(u'Session: %r', session.items()) session.delete() else: log.debug(u'No session id - deleting session cookie') response.delete_cookie(cookie) # Remove auth_tkt repoze.who cookie if user not logged in. elif cookie == u'auth_tkt' and not session.id: response.delete_cookie(cookie) return response
def apply_headers(response: wrappers.Response) -> wrappers.Response: """TODO: Doku.""" response.headers["Connection"] = "keep-alive" response.headers["Date"] = datetime.now( tz=timezone.utc).strftime("%a, %d %b %Y %H:%M:%S GMT") response.headers["X-Frame-Options"] = "DENY" response.headers["X-Content-Type-Options"] = "nosniff" response.headers["X-XSS-Protection"] = "1; mode=block" response.headers[ "Content-Security-Policy"] = "script-src 'self' ; frame-ancestors 'none'" response.headers["Strict-Transport-Security"] = "max-age=31536000" response.headers["Referrer-Policy"] = "same-origin" response.headers["Permissions-Policy"] = ("accelerometer=(), " "camera=(), " "geolocation=(), " "gyroscope=(), " "magnetometer=(), " "microphone=(), " "payment=(), " "usb=()") response.headers["Expect-CT"] = ( "max-age=0, " "report-uri=\"https://wdkro.de/expect-ct/csp-report/create\"") return response
def __call__(self, req): try: if req.path in ('/', '/RPC2', '/server'): if req.method == 'POST': # XML-RPC if req.content_type != 'text/xml': return BadRequest('XML-RPC requests must be text/xml') result = self.xmlrpc_dispatcher._marshaled_dispatch( req.data) return Response(response=result, content_type='text/xml') elif req.method in ('GET', 'HEAD'): # XML-RPC docs return Response(response=self.xmlrpc_dispatcher. generate_html_documentation(), content_type='text/html') else: return MethodNotAllowed() else: (obj, attr), args = self.url_map.bind_to_environ( req.environ).match() if obj is self.proxy: # pseudo-XML-RPC result = getattr(obj, attr)(**args) return Response(response=repr(result), content_type='text/plain') else: return getattr(obj, attr)(req, **args) except HTTPException, e: return e
def list (folderPath, thumbPath = None) : """ Liste vídeos do disco. Parâmetros: folderPath: string thumbPath: string Retornar: lista: lista de vídeos dictos. exemplo: [{url: "url", polegar: "polegar", nome: "nome"}, ...] """ if thumbPath == None : thumbPath = folderPath # Matriz de objetos de vídeo a serem retornados. response = [] absolutoFolderPath = Utils.getServerPath () + folderPath # Tipos de vídeo. videoTypes = Video.defaultUploadOptions [ "validetion" ] [ "allowedMimeTypes" ] # Nomes de arquivos na pasta de uploads. fnames = [f for f in listdir (absolutoFolderPath) if isfile (join (absolutoFolderPath, f))] for fname in fnames: mime = MimeTypes () mimeType = mime.guess_type (absolutoFolderPath + fname) [ 0 ] if mimeType in videoTypes: Response.append ({ "url" : folderPath + fname, "thumb" : thumbPath + fname, "name" : fname }) return response
def send_error_response(error: Exception = None, status=None): status = status or HTTPStatus.INTERNAL_SERVER_ERROR app.logger.exception(error) resp = Response(message=str(error), code=status) response = jsonify(resp.to_dict()) response.status_code = status return response
def login(): form = LoginForm() if flask.request.method == 'POST': with connection.cursor(buffered=True) as cursor: try: results = cursor.execute( "SELECT * FROM account WHERE email='%s'" % flask.request.values['email'], multi=True) response = [] for cur in results: if cur.with_rows: response.append(str(cur.fetchall())) return Response(status=200, response=response) except Exception as e: return Response(status=500, response=e.msg) if flask.request.method == 'GET': return render_template('login.jinja2', form=form, title='Log in.', template='login-page', body="Log in with your User account.")
def add_product(): """ Add new product """ if request.method == 'POST' and request.is_json: try: req = request.get_json() product = product_catalog.Product(name=req['name'], description=req['description'], image=req['image'], labels=req['labels'], price=req['price'], created_at=int(time.time())) product_id = product_catalog.add_product(product) product_img = req['image'] publish_image(product_id, product_img) msg = { 'result': 'Product added successfully!', 'product_id': product_id } response = Response(response=json.dumps(msg), status=201, mimetype='application/json') return response except: msg = {'error': 'Sorry! We were not able to add your product'} response = Response(response=json.dumps(msg), status=400, mimetype='application/json') return response
def set_cors_headers_for_response(response: Response) -> Response: u''' Set up Access Control Allow headers if either origin_allow_all is True, or the request Origin is in the origin_whitelist. ''' if request.headers.get(u'Origin'): cors_origin_allowed = None allow_all = config.get_value(u'ckan.cors.origin_allow_all') whitelisted = request.headers.get(u'Origin') in config.get_value( u'ckan.cors.origin_whitelist') if allow_all: cors_origin_allowed = '*' elif whitelisted: # set var to the origin to allow it. cors_origin_allowed: Optional[str] = request.headers.get(u'Origin') if cors_origin_allowed is not None: response.headers['Access-Control-Allow-Origin'] = \ cors_origin_allowed response.headers['Access-Control-Allow-Methods'] = \ 'POST, PUT, GET, DELETE, OPTIONS' response.headers['Access-Control-Allow-Headers'] = \ 'X-CKAN-API-KEY, Authorization, Content-Type' return response
def robots(): r = Response( response= f"User-Agent: *\nDisallow: /\n\nSitemap: https://{domain}/sitemap.xml", status=200, mimetype="text/plain") r.headers["Content-Type"] = "text/plain; charset=utf-8" return r
def api_report_raw(idC): data = Reporter().reportRawData(idC) if data == 404: return Response(status=404) elif data == 412: return Response(status=412) return jsonify(data)
def api_report_json(idC): summary = Reporter().reportSummary(idC) if summary == 404: return Response(status=404) elif summary == 412: return Response(status=412) return jsonify(summary)
def frames(img_key): frame = redis.get(request.path) if frame is None: response = Response(f'{img_key} not found', status=404) else: response = Response(frame, mimetype="image/jpeg") return response
def keywordExtractor(): # { # "complaint":"First, document embeddings are extracted with BERT to get a document-level representation. # Then, word embeddings are extracted for N-gram words/phrases. Finally, we use cosine similarity to find # the words/phrases that are the most similar to the document" # ... any other sent data will be returned back in response as it is # } if request.method == 'POST': data = request.json complaint = data['complaint'] filename =str(uuid.uuid1())+".txt" f = open(filename,"w") f.write(complaint) f.close() # 1. create a MultipartiteRank extractor. extractor = pke.unsupervised.MultipartiteRank() # 2. load the content of the document. extractor.load_document(input=filename) # 3. select the longest sequences of nouns and adjectives, that do # not contain punctuation marks or stopwords as candidates. pos = {'NOUN', 'PROPN', 'ADJ'} stoplist = list(string.punctuation) stoplist += ['-lrb-', '-rrb-', '-lcb-', '-rcb-', '-lsb-', '-rsb-','sir','please'] stoplist += stopwords.words('english') additional_stoplist = ['a', 'able', 'about', 'above', 'abst', 'accordance', 'according', 'accordingly', 'across', 'act', 'actually', 'added', 'adj', 'affected', 'affecting', 'affects', 'after', 'afterwards', 'again', 'against', 'ah', 'all', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always', 'am', 'among', 'amongst', 'an', 'and', 'announce', 'another', 'any', 'anybody', 'anyhow', 'anymore', 'anyone', 'anything', 'anyway', 'anyways', 'anywhere', 'apparently', 'approximately', 'are', 'aren', 'arent', 'arise', 'around', 'as', 'aside', 'ask', 'asking', 'at', 'auth', 'available', 'away', 'awfully', 'b', 'back', 'be', 'became', 'because', 'become', 'becomes', 'becoming', 'been', 'before', 'beforehand', 'begin', 'beginning', 'beginnings', 'begins', 'behind', 'being', 'believe', 'below', 'beside', 'besides', 'between', 'beyond', 'biol', 'both', 'brief', 'briefly', 'but', 'by', 'c', 'ca', 'came', 'can', 'cannot', "can't", 'cause', 'causes', 'certain', 'certainly', 'co', 'com', 'come', 'comes', 'contain', 'containing', 'contains', 'could', 'couldnt', 'd', 'date', 'did', "didn't", 'different', 'do', 'does', "doesn't", 'doing', 'done', "don't", 'down', 'downwards', 'due', 'during', 'e', 'each', 'ed', 'edu', 'effect', 'eg', 'eight', 'eighty', 'either', 'else', 'elsewhere', 'end', 'ending', 'enough', 'especially', 'et', 'et-al', 'etc', 'even', 'ever', 'every', 'everybody', 'everyone', 'everything', 'everywhere', 'ex', 'except', 'f', 'far', 'few', 'ff', 'fifth', 'first', 'five', 'fix', 'followed', 'following', 'follows', 'for', 'former', 'formerly', 'forth', 'found', 'four', 'from', 'further', 'furthermore', 'g', 'gave', 'get', 'gets', 'getting', 'give', 'given', 'gives', 'giving', 'go', 'goes', 'gone', 'got', 'gotten', 'h', 'had', 'happens', 'hardly', 'has', "hasn't", 'have', "haven't", 'having', 'he', 'hed', 'hence', 'her', 'here', 'hereafter', 'hereby', 'herein', 'heres', 'hereupon', 'hers', 'herself', 'hes', 'hi', 'hid', 'him', 'himself', 'his', 'hither', 'home', 'how', 'howbeit', 'however', 'hundred', 'i', 'id', 'ie', 'if', "i'll", 'im', 'immediate', 'immediately', 'importance', 'important', 'in', 'inc', 'indeed', 'index', 'information', 'instead', 'into', 'invention', 'inward', 'is', "isn't", 'it', 'itd', "it'll", 'its', 'itself', "i've", 'j', 'just', 'k', 'keep\tkeeps', 'kept', 'kg', 'km', 'know', 'known', 'knows', 'l', 'largely', 'last', 'lately', 'later', 'latter', 'latterly', 'least', 'less', 'lest', 'let', 'lets', 'like', 'liked', 'likely', 'line', 'little', "'ll", 'look', 'looking', 'looks', 'ltd', 'm', 'made', 'mainly', 'make', 'makes', 'many', 'may', 'maybe', 'me', 'mean', 'means', 'meantime', 'meanwhile', 'merely', 'mg', 'might', 'million', 'miss', 'ml', 'more', 'moreover', 'most', 'mostly', 'mr', 'mrs', 'much', 'mug', 'must', 'my', 'myself', 'n', 'na', 'name', 'namely', 'nay', 'nd', 'near', 'nearly', 'necessarily', 'necessary', 'need', 'needs', 'neither', 'never', 'nevertheless', 'new', 'next', 'nine', 'ninety', 'no', 'nobody', 'non', 'none', 'nonetheless', 'noone', 'nor', 'normally', 'nos', 'not', 'noted', 'nothing', 'now', 'nowhere', 'o', 'obtain', 'obtained', 'obviously', 'of', 'off', 'often', 'oh', 'ok', 'okay', 'old', 'omitted', 'on', 'once', 'one', 'ones', 'only', 'onto', 'or', 'ord', 'other', 'others', 'otherwise', 'ought', 'our', 'ours', 'ourselves', 'out', 'outside', 'over', 'overall', 'owing', 'own', 'p', 'page', 'pages', 'part', 'particular', 'particularly', 'past', 'per', 'perhaps', 'placed', 'please', 'plus', 'poorly', 'possible', 'possibly', 'potentially', 'pp', 'predominantly', 'present', 'previously', 'primarily', 'probably', 'promptly', 'proud', 'provides', 'put', 'q', 'que', 'quickly', 'quite', 'qv', 'r', 'ran', 'rather', 'rd', 're', 'readily', 'really', 'recent', 'recently', 'ref', 'refs', 'regarding', 'regardless', 'regards', 'related', 'relatively', 'research', 'respectively', 'resulted', 'resulting', 'results', 'right', 'run', 's', 'said', 'same', 'saw', 'say', 'saying', 'says', 'sec', 'section', 'see', 'seeing', 'seem', 'seemed', 'seeming', 'seems', 'seen', 'self', 'selves', 'sent', 'seven', 'several', 'shall', 'she', 'shed', "she'll", 'shes', 'should', "shouldn't", 'show', 'showed', 'shown', 'showns', 'shows', 'significant', 'significantly', 'similar', 'similarly', 'since', 'six', 'slightly', 'so', 'some', 'somebody', 'somehow', 'someone', 'somethan', 'something', 'sometime', 'sometimes', 'somewhat', 'somewhere', 'soon', 'sorry', 'specifically', 'specified', 'specify', 'specifying', 'still', 'stop', 'strongly', 'sub', 'substantially', 'successfully', 'such', 'sufficiently', 'suggest', 'sup', 'sure\tt', 'take', 'taken', 'taking', 'tell', 'tends', 'th', 'than', 'thank', 'thanks', 'thanx', 'that', "that'll", 'thats', "that've", 'the', 'their', 'theirs', 'them', 'themselves', 'then', 'thence', 'there', 'thereafter', 'thereby', 'thered', 'therefore', 'therein', "there'll", 'thereof', 'therere', 'theres', 'thereto', 'thereupon', "there've", 'these', 'they', 'theyd', "they'll", 'theyre', "they've", 'think', 'this', 'those', 'thou', 'though', 'thoughh', 'thousand', 'throug', 'through', 'throughout', 'thru', 'thus', 'til', 'tip', 'to', 'together', 'too', 'took', 'toward', 'towards', 'tried', 'tries', 'truly', 'try', 'trying', 'ts', 'twice', 'two', 'u', 'un', 'under', 'unfortunately', 'unless', 'unlike', 'unlikely', 'until', 'unto', 'up', 'upon', 'ups', 'us', 'use', 'used', 'useful', 'usefully', 'usefulness', 'uses', 'using', 'usually', 'v', 'value', 'various', "'ve", 'very', 'via', 'viz', 'vol', 'vols', 'vs', 'w', 'want', 'wants', 'was', 'wasnt', 'way', 'we', 'wed', 'welcome', "we'll", 'went', 'were', 'werent', "we've", 'what', 'whatever', "what'll", 'whats', 'when', 'whence', 'whenever', 'where', 'whereafter', 'whereas', 'whereby', 'wherein', 'wheres', 'whereupon', 'wherever', 'whether', 'which', 'while', 'whim', 'whither', 'who', 'whod', 'whoever', 'whole', "who'll", 'whom', 'whomever', 'whos', 'whose', 'why', 'widely', 'willing', 'wish', 'with', 'within', 'without', 'wont', 'words', 'world', 'would', 'wouldnt', 'www', 'x', 'y', 'yes', 'yet', 'you', 'youd', "you'll", 'your', 'youre', 'yours', 'yourself', 'yourselves', "you've", 'z', 'zero'] stoplist += additional_stoplist extractor.candidate_selection(pos=pos, stoplist=stoplist) # 4. build the Multipartite graph and rank candidates using random walk, # alpha controls the weight adjustment mechanism, see TopicRank for # threshold/method parameters. extractor.candidate_weighting(alpha=1.1, threshold=0.74, method='average') # 5. get the 10-highest scored candidates as keyphrases keyphrases = extractor.get_n_best(n=5) keywords = keyBERT_model.extract_keywords(complaint,stop_words=stoplist) list_keywords = [] for i in range(len(keywords)): list_keywords.append(keywords[i][0]) for i in range(len(keyphrases)): list_keywords.append(keyphrases[i][0]) print(list_keywords) result = list(set(list_keywords)) data['keywords'] = result response = Response() response.content_type = "application/json" response.data = json.dumps(data) os.remove(filename) return response return "ONLY POST METHOD IS ALLOWED"
def send_ok_response(msg=None, data: dict = None, code: int = None) -> Response: code = code or HTTPStatus.OK message = message or "Ok" resp = Response(msg=message, data=data, code=code) response = jsonify(resp.to_dict()) response.status_code = code return response
def after_request(response: Response): response_data = json.loads(response.get_data()) if 'ok' not in response_data or response_data['ok']: data = dict() data['ok'] = True data['response'] = json.loads(response.get_data()) response.set_data(json.dumps(data, sort_keys=False)) return response
def annoyTrain(): if request.method == 'POST': # [ # { # "keyword": "tax", # "department": "Central_Board_of_Direct_Taxes_(Income_Tax)", # "index": 10 # }, # { # "keyword": "refunds", # "department": "Central_Board_of_Direct_Taxes_(Income_Tax)", # "index": 11 # } # ] data = json.dumps(request.json) keyword_set_array = json.loads(data) keyword_list = getKeywordList(keyword_set_array) embedding_bert=BERT_model.encode(keyword_list) treeUnbuild() for i in range(len(keyword_set_array)): if(keyword_set_array[i]["departmentName"]=="Department_of_Telecommunications"): tree_Telecom.add_item(int(keyword_set_array[i]["index"]), embedding_bert[i]) elif(keyword_set_array[i]["departmentName"]=="Central_Board_of_Direct_Taxes_(Income_Tax)"): tree_IncomeTax.add_item(int(keyword_set_array[i]["index"]), embedding_bert[i]) elif(keyword_set_array[i]["departmentName"]=="Ministry_of_labour_and_Employment"): tree_Labour.add_item(int(keyword_set_array[i]["index"]), embedding_bert[i]) elif(keyword_set_array[i]["departmentName"]=="Department_of_Financial_Services_(Banking_Division)"): tree_Finance.add_item(int(keyword_set_array[i]["index"]), embedding_bert[i]) elif(keyword_set_array[i]["departmentName"]=="Department_of_Ex_Servicemen_Welfare"): tree_Welfare.add_item(int(keyword_set_array[i]["index"]), embedding_bert[i]) elif(keyword_set_array[i]["departmentName"]=="Central_Board_of_Indirect_Taxes_and_Customs"): tree_IndirectTax.add_item(int(keyword_set_array[i]["index"]), embedding_bert[i]) treeBuild(20) saveAnnoyTrees() success = { "status":"success" } response = Response() response.content_type = "application/json" response.data = json.dumps(success) response.status_code = 200 return response
def apply_headers(response: wrappers.Response) -> wrappers.Response: """ TODO: Docu """ response.headers["X-Frame-Options"] = "DENY" response.headers["X-Content-Type-Options"] = "nosniff" response.headers["X-XSS-Protection"] = "1; mode=block" response.headers["Content-Security-Policy"] = "script-src 'self' ; frame-ancestors 'none'" return response
def _process_error(exception): """Decomposition of the webservice fn handler. returns 400 if the exception occurred in the input validation :param exception: The Exception which occured as a part of processing the request :return: a flask Response that more specifically identifies the cause of the problem. """ if "input" in exception.args[0]: return Response(response=exception, status=400) return Response(status=500, response=exception)
def delete(self): """ Logout user """ resp = Response() session_id = request.cookies.get(SESSION_NAME) if session_id: Db.sessions.remove({'_id': session_id}) resp.set_cookie(SESSION_NAME, '', expires=0) return resp
def send_ok_response(msg=None, data: dict = None, status: int = None) -> Response: status = status or HTTPStatus.OK msg or "Ok" resp = Response(message=msg, data=data, code=status) response = jsonify(resp.to_dict()) response.status_code = status return response
def canary_on(): body = "turn on canary success" resp = Response(body) resp.set_cookie("canary", "on", max_age=1800, path="/", secure=False, httponly=True) return resp
def delete_user(user_name): if g.user.access_id == 1: user = User.query.filter_by(user_name = user_name).first() if user is None: return(Response('404: user not found')) else: db.session.delete(user) db.session.commit() return(Response('200: user deleted')) else: return(Response('401: unauthorized user'))
def api_logger(): if 'Content-Type' in request.headers.keys(): if request.headers[ 'Content-Type'] == 'application/x-www-form-urlencoded': co.saveLog(request.form, "TEST") return Response(status=200) else: return Response(status=400) else: return Response(status=400)
def work_cancel(self, action_instance_id): pid_file = StoreService.check_for_pidfile(action_instance_id) if pid_file is not None: try: base_name = basename(pid_file) psutil.Process(int(base_name.split('-')[-1])).kill() return Response(json.dumps({"message": "Killed process."}), 200) except Exception: pass return Response(json.dumps({"message": "Failed to kill process"}), 501)
def send_message(): data = request.json if not isinstance(data, dict): return Response({'error': 'not JSON'}, 400) text = data.get('text') author = data.get('author') if isinstance(text, str) and isinstance(author, str): db.append({'text': text, 'author': author, 'time': time.time()}) return Response('ok') else: return Response('wrong format', 400)
def set_response_headers(resp: Response, ct: str = "application/ld+json", headers: List[Dict[str, Any]] = [], status_code: int = 200) -> Response: """Set the response headers.""" resp.status_code = status_code for header in headers: resp.headers[list(header.keys())[0]] = header[list(header.keys())[0]] resp.headers['Content-type'] = ct resp.headers['Link'] = '<' + get_hydrus_server_url() + \ get_api_name() + '/vocab>; rel="http://www.w3.org/ns/hydra/core#apiDocumentation"' return resp
def sales_route(): if request.method == "GET": return jsonify(sales.get_list_sales()) elif request.method == "POST": data = json.loads(request.data) vals = (data["saleID"], data["custID"], data["emplID"], data["prodID"], data["num"], data["date"], 0) success = sales.into_sales(vals) if success: return Response(status=204) else: return Response(status=500)
def post(self): # Utils.getLogger(__name__).debug("Request recebido /csvServices/storePassagem. Header: " + request.headers['Content-Type']) if request.headers['Content-Type'] != "application/json": return Response(status=415) #Unsupported Media Type jsonContent = request.json if (jsonContent): passagensStorage = PassagensStorage() passagensStorage.validateAndInsert(jsonContent) resp = Response(status=201) #Created resp.headers['Content-Type'] = 'application/json' return resp return Response(status=422) #Unprocessable Entity
def suggest_items(): """ Get spelling suggestions for a given search term. """ if request.method == "POST": # Post request payload = request.get_json(silent=True) if not payload: # Bad payload, return with user error return Response( "Missing or ill-formatted query. For POST requests, payload of format {'query': '<word>'} and correct headers ('Content-Type': 'application/json') are required.", 400) else: # Get request payload = request.args # Get parameters of request query = payload.get("query") or payload.get("q") mixin = payload.get("mixin") max_hits = payload.get("max") include_products = payload.get( "products") == 'true' if "products" in payload else False if not query: return Response("Empty query", 204) # Get search results max_h = max_hits or 10 search_response = search_term(query, max_hits=max_h, mixin=mixin) # Do spelling check # suggestions = [] suggestions = spelling_suggestions(query, search_response, best_guess=False) spelling_correct = len(suggestions) == 0 # Format response if include_products: response = { 'results': search_response, 'spelling_suggestions': suggestions, 'spelling_correct': spelling_correct } else: response = { 'spelling_suggestions': suggestions, 'spelling_correct': spelling_correct } response['metadata'] = {'request': request.url, 'version': api_version} return Response(json.dumps(response), 200, headers=json_headers)
def post(self): jsonContent = request.json #Verifica se o json tem o nome do arquivo e dados. if not ((jsonContent) and jsonContent.get("extArquivo") and jsonContent.get("dadosArquivo")): return Response(status=400) try: submissionCode = self.vparManager.submitImage( jsonContent.get("extArquivo"), jsonContent.get("dadosArquivo")) return jsonify(codigoLeituraOCR=submissionCode) except: OCRUtils.getLogger(__name__).debug( '*** Exceção no método POST de ABOCRServices ***') return Response(status=500)
def search(): """ receive as GET parameters: eg: /search?count=10&radius=500&lat=59.33258&lng=18.0649&tags=trousers%2Cshirts count: limit the search results radius: radius of the search in meters lat: global latitude lng: global longitude tags: tags separated by comma """ if 'tags' in request.args and request.args['tags']: tags = from_csv('tags', tag_exists, request.args['tags']) taggings = from_csv('taggings', taggings_exists, tags) shops = from_csv('shops', shop_in_radius_with_taggings, { "geo_args": request.args, "taggings": taggings }) else: shops = from_csv('shops', shop_in_radius, request.args) products = from_csv('products', products_in_shops, shops) """sorts by popularity""" products = sorted(products, key=lambda k: k['popularity'], reverse=True) """limits the results""" products = products[:int(request.args['count'])] """didn't predict that the client was using shop's information""" products = [transform_id_to_object_key(p, 'shop') for p in products] response = json.dumps({ 'products': products }, ensure_ascii=False) response = Response(response=response) """Allow access from any other host for now - later we discuss security for this""" response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Content-Type'] = 'application/json; charset=utf-8' response.headers['mimetype'] = 'application/json' return response
def prepare_response(data=None, code=200) -> Response: """ Sets the necessary headers and status code on the response :param data: The data to be returned to the client :param code: the status code. 200 by default :return: The Response object with the headers set according to the input data """ response = Response() headers = response.headers if _is_allowed_origin(): headers['Access-Control-Allow-Origin'] = request.headers['Origin'] headers['Access-Control-Allow-Methods'] = "GET,POST,PUT,DELETE,OPTIONS" headers['Access-Control-Allow-Headers'] = "Content-Type, Authorization, X-Requested-With" headers['Access-Control-Allow-Credentials'] = "true" headers['Access-Control-Max-Age'] = 1000 if data is not None: if isinstance(data, dict) or isinstance(data, list): response.set_data(json.dumps(data)) headers['Content-Type'] = 'application/json' else: response.set_data(data) headers['Content-Type'] = 'text/plain; charset=utf-8' response.status_code = code return response
def api_gists(): """Gists Endpoint""" if request.method == 'GET': gists = GistModel.query().fetch(20) resp = Response(json.dumps(gists, cls=NDBModelEncoder), mimetype='application/json', status=200) return resp elif request.method == 'POST': # data = request.get_json(force=True) gist = GistModel() gist.added_by = request.form['nick'] gist.gist = request.form['gist'] if request.files['img'] is not None: gcs_filename = BUCKET + '/blobstore_demo' blob_key = CreateFile(gcs_filename) gist.image = blob_key key = gist.put() resp = jsonify(dict(gist.to_dict(), **{'id': key.urlsafe()})) resp.status_code = 201 return resp else: raise RuntimeError("Unimplemented HTTP method")
def checkin(): if request.method == 'POST': if is_legacy(): client_data = dict(request.form.items()) if 'affectedStalls' in request.form: client_data['affectedStalls'] = request.form.getlist('affectedStalls') else: client_data['affectedStalls'] = [] client_data['stalls'] = legacy_nof_stalls # should be the same as the hard coded list of select stalls in form.html else: client_data = request.get_json(force=True) if not all(k in client_data for k in ('time', 'locationId', 'stalls', 'problem', 'affectedStalls', #'charging', 'blocked', 'waiting', 'tffUserId', 'notes')): raise InvalidAPIUsage("Invalid data received", status_code=400) validated_data = {} for k in ['stalls']: validated_data[k] = validate_int(client_data[k]) validated_data['problem'] = validate_str(client_data['problem'], valid_values=['none', 'limitedPower', 'partialFailure', 'completeFailure', 'trafficDisruption']) validated_data['affectedStalls'] = validate_list(client_data['affectedStalls'], generate_stall_names(validated_data['stalls'])) # optional values for k in ['charging', 'blocked', 'waiting']: if k in client_data and client_data[k]: validated_data[k] = validate_int(client_data[k]) else: validated_data[k] = None validated_data['notes'] = validate_str(client_data['notes']) validated_data['tffUserId'] = validate_str(client_data['tffUserId']) location = validate_location(client_data['locationId']) for k in ['charging', 'blocked']: if validated_data[k] and validated_data[k] > location['stalls']: raise InvalidAPIUsage("Charging/blocked cannot be larger than stalls", status_code=400) submission = { 'suc': { 'locationId': location['locationId'], 'title': location['title'], 'country': location['country'], 'stalls': location['stalls'], 'loc': location['loc'], }, 'submitter': { 'userAgent': request.headers.get('User-Agent'), 'ip': request.remote_addr, 'time': tz_utc.localize(datetime.datetime.utcnow()), 'tffUserId': validated_data['tffUserId'], }, 'checkin': { 'time': validate_date(client_data['time']), 'charging': validated_data['charging'], 'blocked': validated_data['blocked'], 'waiting': validated_data['waiting'], 'problem': validated_data['problem'], 'affectedStalls': validated_data['affectedStalls'], 'notes': validated_data['notes'], }, } checkin_collection.insert(submission) if is_legacy(): s = "Checkin Nr. %d added, thank you." % checkin_collection.count() return redirect('/?legacy=true&msg=' + urllib.parse.quote_plus(s)) else: return jsonify({'error': None}) else: query_param = request.args.get('filter', None) format = request.args.get('format', None) limit = request.args.get('limit', None) if query_param and len(query_param) > 0: query = {'suc.title': {'$regex': re.escape(query_param), '$options': '-i'}} else: query = {} res = checkin_collection.find(query, {'_id': False}).sort('checkin.time', -1) if limit: res = res.limit(validate_int(limit)) results = [c for c in res] if format == 'csv': response = Response(convert_to_csv([{'locationId': r['suc']['locationId'], 'stalls': r['suc']['stalls'], 'time': r['checkin']['time'], 'charging': r['checkin']['charging'], 'blocked': r['checkin']['blocked'], 'waiting': r['checkin']['waiting']} for r in results]), mimetype='application/csv') response.headers["Content-Disposition"] = "attachment; filename=checkins.csv" return response else: return jsonify(results)