def thumbnail(request): matchdict = request.matchdict photo_album_id = matchdict['photo_album_id'] photo_id = matchdict['photo_id'] photo_album = models.PhotoAlbum.objects.with_id(photo_album_id) photo = photo_album.get_photo(photo_id) extension = photo.image.filename[photo.image.filename.rfind('.')+1:] if photo.image.thumbnail: image = photo.image.thumbnail response = Response() if extension.lower() in ['jpg', 'jpeg']: response.content_type='image/jpeg' elif extension.lower() in ['png']: response.content_type='image/png' img = Image.open(image) img_format = img.format if photo.orientation == 'vertical': img = img.transpose(Image.ROTATE_90) tmp_img = tempfile.TemporaryFile() img.save(tmp_img, format=img_format) tmp_img.seek(0) response.body_file = tmp_img return response
def test_json_xsrf(self): def json_response(string_value): resp = Response(string_value) resp.status = 200 resp.content_type = 'application/json' filter_json_xsrf(resp) # a view returning a vulnerable json response should issue a warning for value in [ '["value1", "value2"]', # json array ' \n ["value1", "value2"] ', # may include whitespace '"value"', # strings may contain nasty characters in UTF-7 ]: resp = Response(value) resp.status = 200 resp.content_type = 'application/json' filter_json_xsrf(resp) assert len(self.get_logs()) == 1, "Expected warning: %s" % value # a view returning safe json response should not issue a warning for value in [ '{"value1": "value2"}', # json object ' \n {"value1": "value2"} ', # may include whitespace 'true', 'false', 'null', # primitives '123', '-123', '0.123', # numbers ]: resp = Response(value) resp.status = 200 resp.content_type = 'application/json' filter_json_xsrf(resp) assert len(self.get_logs()) == 0, "Unexpected warning: %s" % value
def download_view(context, request): response = Response(context.data) filename = context.frs.basename(context.vpath) mt, encoding = mimetypes.guess_type(filename) if isinstance(context, Page): response.content_type = 'text/html' # mt or 'text/plain' else: response.content_type = mt or 'text/plain' return response
def getResponse(self, reqPath): with open(reqPath, "rb") as fp: ret = Response(body=fp.read()) if reqPath.endswith(".js"): ret.content_type = "text/javascript" if reqPath.endswith(".css"): ret.content_type = "text/css" if reqPath.endswith(".ico"): ret.content_type = "image/x-icon" self.log.info("Request for URL %s, inferred MIME type %s", reqPath, ret.content_type) return ret
def test_json_xsrf(self): # a view returning a json list should issue a warning resp = Response(json.dumps(('value1', 'value2'))) resp.status = 200 resp.content_type = 'application/json' filter_json_xsrf(resp) self.assertEquals(len(self.get_logs()), 1) # json lists can also start end end with spaces resp = Response(" ('value1', 'value2') ") resp.status = 200 resp.content_type = 'application/json' filter_json_xsrf(resp) self.assertEquals(len(self.get_logs()), 1)
def export(self, extm, params, req): csv_dialect = params.pop('csv_dialect', 'excel') csv_encoding = params.pop('csv_encoding', 'utf_8') fields = [] for field in extm.export_view: if isinstance(field, PseudoColumn): continue fields.append(field) if csv_encoding not in _encodings: raise ValueError('Unknown encoding specified') res = Response() loc = get_localizer(req) now = datetime.datetime.now() res.last_modified = now if csv_dialect in ('excel', 'excel-tab'): res.content_type = 'application/vnd.ms-excel' else: res.content_type = 'text/csv' res.charset = _encodings[csv_encoding][0] res.cache_control.no_cache = True res.cache_control.no_store = True res.cache_control.private = True res.cache_control.must_revalidate = True res.headerlist.append(('X-Frame-Options', 'SAMEORIGIN')) if PY3: res.content_disposition = \ 'attachment; filename*=UTF-8\'\'%s-%s.csv' % ( urllib.parse.quote(loc.translate(extm.menu_name), ''), now.date().isoformat() ) else: res.content_disposition = \ 'attachment; filename*=UTF-8\'\'%s-%s.csv' % ( urllib.quote(loc.translate(extm.menu_name).encode(), ''), now.date().isoformat() ) for prop in ('__page', '__start', '__limit'): if prop in params: del params[prop] data = extm.read(params, req)['records'] res.app_iter = csv_generator( data, fields, csv_dialect, encoding=csv_encoding, localizer=loc, model=extm ) return res
def show_img(request): '''画像表示 ''' file_name = request.matchdict['file_name'] md = model(request.db) file = md.get_file(file_name) response = Response() if file is not None: response.content_type = file.content_type response.app_iter = file else: response.content_type = 'image/jpeg' response.app_iter = open('nopict.jpg', 'rb') return response
def api(request): try: # Parse the request params = _parse_request(request) # Remove auth stuff del params["sid"] del params["token"] # Perform the lookup lookdown_result = _lookdown(VENDORS, **params) if lookdown_result is None: response = "[]" else: response = {"data": lookdown_result} result = Response(json.dumps(response)) except HTTPException as exc: result = exc result.text = exc.detail except: log.error("Server Error", exc_info=True) result = Response("Server error", status=500) result.content_type = "application/json" return result
def getContextAvatar(context, request): """ """ urlHash = request.matchdict['urlHash'] AVATAR_FOLDER = request.registry.settings.get('avatar_folder') context_image_filename = '%s/%s.jpg' % (AVATAR_FOLDER, urlHash) if not os.path.exists(context_image_filename): mmdb = MADMaxDB(context.db) found_context = mmdb.contexts.getItemsByurlHash(urlHash) if len(found_context) > 0: twitter_username = found_context[0]['twitterUsername'] downloadTwitterUserImage(twitter_username, context_image_filename) if os.path.exists(context_image_filename): filename = urlHash # Calculate time since last download and set if we have to redownload or not modification_time = os.path.getmtime(context_image_filename) hours_since_last_modification = (time.time() - modification_time) / 60 / 60 if hours_since_last_modification > 3: mmdb = MADMaxDB(context.db) found_context = mmdb.contexts.getItemsByurlHash(urlHash) twitter_username = found_context[0]['twitterUsername'] downloadTwitterUserImage(twitter_username, context_image_filename) else: context_image_filename = '%s/missing.jpg' % (AVATAR_FOLDER) data = open(context_image_filename).read() image = Response(data, status_int=200) image.content_type = 'image/jpeg' return image
def state_post(context, request): id = request.matchdict["id"] object_type = request.POST.get('type', 'string') operation_type = request.POST.get("op", "set") parameters = {} for k, v in request.POST.iteritems(): if k not in ["op", "type"]: parameters[k] = v # Create redis client client = redisconnect.connect() # Create top level st = state.State("user", client) obj = st.get_or_create(id, object_type) obj.op(operation_type, **parameters) resp = Response() resp.content_type = "text/plain" resp.text = u"OK" # return the old value of the key return resp
def render_to_response(self, ctx, req): res = Response(self.render(ctx, req)) res.vary = 'Accept' res.content_type = self.send_mimetype or self.mimetype if self.charset: res.content_type += '; charset=%s' % self.charset return res
def translation_template(request): resp = Response() resp.content_disposition = 'attachment; filename=oscad.pot' resp.app_iter = resource_stream('oscad', 'locale/oscad.pot') # otherwise Firefox thinks its a PowerPoint resp.content_type = 'text/plain' return resp
def structured_text_view(context, request): """ Filesystem-based STX view """ result = stx2html(context.source) response = Response(result) response.content_type = 'text/html' return response
def download_peopledirectory_xml(context, request): response = Response(dump_peopledir(context)) response.content_type = 'application/xml' # suggest a filename based on the report name response.headers.add('Content-Disposition', 'attachment;filename=%s.xml' % str(context.__name__)) return response
def gpxprocess(request): class trkpt: def __init__(self, latitude, longitude): self.latitude = latitude self.longitude = longitude trkptlist=list() gpx_ns = "http://www.topografix.com/GPX/1/1" filename = request.POST['gpx'].filename input_file = request.POST['gpx'].file root = etree.parse(input_file).getroot() trackSegments = root.getiterator("{%s}trkseg"%gpx_ns) for trackSegment in trackSegments: for trackPoint in trackSegment: lat=trackPoint.attrib['lat'] lon=trackPoint.attrib['lon'] new_trkpt=trkpt(lat,lon) trkptlist.append(new_trkpt) reduced_trkpts=reduce_trackpoints(trkptlist) json_string=create_json_for_db(reduced_trkpts) track=Track(timetools.now(),len(trkptlist),0,'00:00:00',None,None,None,None,None,None,None,json_string) DBSession.add(track) DBSession.flush() #raise HTTPFound(request.route_url('track','fromgpx',track.id)) route=request.route_url('track','fromgpx',track.id) response = Response('<a href="%s">%s</a>' % (route,route)) response.content_type = 'text/html' return(response)
def plot(request): """ http://stackoverflow.com/a/5515994/185820 """ import cStringIO from matplotlib.figure import Figure from matplotlib.backends.backend_agg import FigureCanvasAgg x, y = 4, 4 qs = parse_qs(request.query_string) if 'x' in qs: x = int(qs['x'][0]) if 'y' in qs: y = int(qs['y'][0]) fig = Figure(figsize=[x, y]) ax = fig.add_axes([.1, .1, .8, .8]) ax.scatter([1, 2], [3, 4]) canvas = FigureCanvasAgg(fig) # write image data to a string buffer and get the PNG image bytes buf = cStringIO.StringIO() canvas.print_png(buf) data = buf.getvalue() # write image bytes back to the browser response = Response(data) response.content_type = 'image/png' response.content_length = len(data) return response
def export(request): dbsession = DBSession() root = dbsession.query(MyModel).filter(MyModel.name==u'root').first() auth = _get_basicauth_credentials(request) data = parseData(folderbase=REDIRECTIONS_PATH) entorns = [dict(id=int(a),entorn=data['entorns'][a]) for a in data['entorns'].keys()] instances = [data['instancies'][a] for a in data['instancies'].keys()] json_data_list = [] for ins in instances: if len(ins['urls'])>0: url = ins['urls'][0]['gwurl'].replace('https','http') json_data = dict(url=url, zeoport=ins['zeoport'], debugport=ins['debugport'], mountpoint=ins['mountpoint'], plonesite=ins['plonesite'], title=ins['title'], entorn=ins['entorn'], ) json_data_list.append(json_data) response = Response(json.dumps(json_data_list)) response.content_type = 'application/json' return response
def __call__(self): # # find the filename, css files, and format they wanted # filename = params['filename'] || 'RegionReport' # format = params['format'] || 'html' # css_to_include = (params['css'] && params['css'].split(',')) || [] # grab some params filename = self.request.params.get('filename', 'RegionReport') css_inclusions = self.request.params.get('css', '') css_inclusions = css_inclusions.split(',') # start our response response = Response() # use this to write body content to (better than a giant memory-hogging string) body = response.body_file # tell the client this is a downloadable html file response.content_type='application/octet-stream' response.content_disposition='attachment; filename="' + filename + '.html"' response.headers['Content-Desciption'] = 'File Transfer' # unnecessary? # don't cache this file response.cache_expires(0) # sets various cache headers # now start filling out the body body.write("<html><head>\n") # add in the css they wanted for css in css_inclusions: # skip a blank css file (from splitting a blank string, for example) if len(css) == 0: continue # throw away path in case we're being hacked css_file = os.path.join( os.path.dirname(__file__), '..', 'static', 'css', # also replace extension with .css coz SECURITAY os.path.splitext(os.path.basename(css))[0] + '.css' ) css_content = '' try: with file(css_file) as f: css_content = f.read() except IOError: css_content = '/* could not load "' + cgi.escape(css, True) + '" */' body.write("<style>" + css_content + "</style>\n") content = self.request.params.get('content', '(no content was supplied)') content = content.replace( '<img src="/', '<img src="' + self.request.route_url('home') ) body.write("</head><body><div id='report'>\n") body.write(content) body.write("\n</div></body></html>\n") return response
def process_upload(self): """ Process a single upload. Also see: https://github.com/valums/file-uploader/blob/master/server/readme.md :result: Status object with URL of the created item (on success) or error message on failure. :rtype: dict """ fs = self.request.POST["qqfile"] # We can fail hard, as somebody is trying to cheat on us if that fails. assert isinstance(fs, FieldStorage) try: factory = self.factory_by_name(self.request.POST["content_type"]) except KeyError as e: result = {"success": False, "error": e.message} else: name = title_to_name(fs.filename, blacklist=self.context.keys()) self.context[name] = node = factory.from_field_storage(fs) node.title = fs.filename result = {"success": True, "url": self.request.resource_url(node)} # FineUploader expects JSON with Content-Type 'text/plain' response = Response(json.dumps(result)) response.content_type = "text/plain" return response
def response(self, request, error): """ Render an API Response Create a Response object, similar to the JSONP renderer [TODO: re-factor in to the JSONP renderer] Return the Response object with the appropriate error code """ jsonp_render = request.registry._jsonp_render default = jsonp_render._make_default(request) val = self.serializer(self.envelope(success=False, error=error.error), default=default, **jsonp_render.kw) callback = request.GET.get(jsonp_render.param_name) response = Response("", status=200) # API Error code is always 200 if callback is None: ct = 'application/json' response.status = error.code response.body = val else: ct = 'application/javascript' response.text = '%s(%s)' % (callback, val) if response.content_type == response.default_content_type: response.content_type = ct return response
def buildResponse(self, payload=None): """ """ data = payload == None and self.data or payload response = Response(data, status_int=self.status_code) response.content_type = self.response_content_type return response
def getBadges(req): '''Retrieve a user's badge information''' key = db.get(req.matchdict['user']) res = Response(status=404) if key.exists: # generate proper response body body = None if 'type' not in req.matchdict: body = {'badges': key.data['badges']} elif req.matchdict['type'] in ['achieved','inprogress','desired']: badgeType = 'inProgress' if req.matchdict['type'] == 'inprogress' else req.matchdict['type'] body = {badgeType: key.data['badges'][badgeType]} if body != None: hash = util.genETag(body) if_none_match = req.headers['If-None-Match'] if 'If-None-Match' in req.headers.keys() else None if if_none_match not in ['*',hash]: res.status = 200 res.content_type = 'application/json' res.headers['ETag'] = hash res.json = body else: res.status = 304 return res
def getContextAvatar(context, request): """ Get context avatar To the date, this is only implemented to work integrated with Twitter. """ chash = context['hash'] twitter_username = context['twitterUsername'] base_folder = request.registry.settings.get('avatar_folder') avatar_folder = get_avatar_folder(base_folder, 'contexts', chash) context_image_filename = '%s/%s' % (avatar_folder, chash) api = get_twitter_api(request.registry) if not os.path.exists(context_image_filename): download_twitter_user_image(api, twitter_username, context_image_filename) if os.path.exists(context_image_filename): # Calculate time since last download and set if we have to re-download or not modification_time = os.path.getmtime(context_image_filename) hours_since_last_modification = (time.time() - modification_time) / 60 / 60 if hours_since_last_modification > 3: download_twitter_user_image(api, twitter_username, context_image_filename) else: context_image_filename = '{}/missing-context.png'.format(base_folder) data = open(context_image_filename).read() image = Response(data, status_int=200) image.content_type = 'image/png' return image
def linkchecks_view(request): try: results = DBSession.query(LinkCheck).all() except DBAPIError: return Response(conn_err_msg, content_type='text/plain', status_int=500) if results is None: res = {'urls': []} else: urls = [] for linkcheck in results: if linkcheck.active: start_url = linkcheck.url else: start_url = 'inactive' urls.append({ 'name': linkcheck.root_url, 'start': start_url, 'options': ' '.join(linkchecker_options(linkcheck)), 'url': request.route_url('editlinkcheck', check_id=linkcheck.check_id), }) res ={'urls': urls} if request.params.get('format') == 'json': response = Response(json.dumps(res)) response.content_type='application/json' return response else: return res
def upload_dnd(request): check_login(request) save_dir = os.path.join(request.registry.settings['transform_dir'], request.session['upload_dir']) # userfn, if browser does not support naming of blobs, this might be # 'blob', so we need to further uniquefy it. userfn = request.POST['upload'].filename or '' ext = '' mtype = request.POST['upload'].headers.get('content-type') if mtype is not None: ext = mimetypes.guess_extension(mtype) or '' # If it has an extension (a dot and three of four characters at the end), # strip it userfn = re.compile('\.\w{3,4}$').sub('', userfn) fn = userfn + '_' + datetime.datetime.now().strftime('%s') + ext # No point in using an iterator, we need the entire content for the zip # file anyway fob = request.POST['upload'].file blk = fob.read() with open(os.path.join(save_dir, fn), 'w') as fp: fp.write(blk) # Update upload.zip append_zip(os.path.join(save_dir, 'upload.zip'), fn, blk) response = Response(json.dumps({'url': fn})) response.content_type = 'application/json' return response
def find_entities(self): #page_limit, current_page, start, end = self._get_pagin_data() user = get_current() dace_catalog = find_catalog('dace') system_catalog = find_catalog('system') novaideo_catalog = find_catalog('novaideo') filter_schema = FindEntitiesJson(self.context, self.request) try: appstruct = filter_schema.calculate_posted_filter() except Exception as e: return {'items': [], 'total_count': 0, 'error': True, 'message': '{}: {}'.format( e.__class__.__name__, e.args[0])} if appstruct is None: return {'items': [], 'total_count': 0, 'error': True, 'message': 'appstruct is None'} content_types_tree = appstruct['metadata_filter'].get('content_types', {}).copy() content_types = list(content_types_tree.keys()) appstruct['metadata_filter'] = appstruct.get('metadata_filter', {}) appstruct['metadata_filter']['content_types'] = content_types appstruct['dace'] = dace_catalog appstruct['system'] = system_catalog appstruct['novaideo'] = novaideo_catalog entities = find_entities( user=user, sort_on='release_date', include_site=True, **appstruct) def dumps(obj): """return values of attributes descibed in the colander schema node 'node' """ registry = get_current_registry() content_type = registry.content.typeof(obj) fields = content_types_tree.get(content_type, {}) result, to_add = get_obj_value(obj, fields) if result is None: return {}, to_add return result, to_add def merge_items(old_items, new_items): for item in new_items: oid = item.get('@id', None) if oid and oid not in old_items: old_items[oid] = item elif oid: old_item = old_items[oid] old_items[oid] = merge_dicts([item, old_item]) items = {} for entity in entities: values, to_add = dumps(entity) to_add.append(values) merge_items(items, to_add) result = {'items': list(items.values()), 'total_count': len(items)} response = Response() response.content_type = "application/json" response.text = json.dumps(result, indent=2) return response
def getUserAvatar(context, request): """ Get user avatar """ base_folder = request.registry.settings.get('avatar_folder') username = request.matchdict['username'] named_size = request.matchdict.get('size', '') filename = '' # First attempt to find an existing named size avatar # If image is not sized, this will fallback to regular avatar. avatar_folder = get_avatar_folder(base_folder, 'people', username, size=named_size) if os.path.exists(os.path.join(avatar_folder, username)): filename = username # If we were loking for a named size avatar, reaching here # menans we did not found it, so fallback to base avatar elif named_size: avatar_folder = get_avatar_folder(base_folder, 'people', username) if os.path.exists(os.path.join(avatar_folder, username)): filename = username # At this point we should have a filename set, if not, it means that we # couldn't locate any size of the requested avatar. In this case, set the # missing avatar filename, based on context and size and located at root # avatars folder avatar_folder = avatar_folder if filename else get_avatar_folder(base_folder) named_size_sufix = '-{}'.format(named_size) if named_size else '' filename = filename if filename else 'missing-people.png'.format(context, named_size_sufix) data = open(os.path.join(avatar_folder, filename)).read() image = Response(data, status_int=200) image.content_type = 'image/png' return image
def test_process_response_nonhtml(self): response = Response() response.content_type = 'text/plain' request = Request.blank('/') toolbar = self._makeOne(request, [DummyPanel]) toolbar.process_response(response) self.assertTrue(response.processed)
def __call__(self): response = Response() response.content_disposition = 'attachment; filename="{}"'.format(self.context.filename) response.charset = 'utf-8' response.content_type = self.context.content_type response.body_file = self.context.content.open() response.content_length = self.context.size return response
def stream(request): response = Response() response.headers.update({'Access-Control-Allow-Origin': '*'}) response.content_type = 'text/event-stream' response.cache_expires(0) response.app_iter = produce() return response
except TypeError: raise ValueError("Invalid hex encoding of password") if len(password) < 1 or len(password) > 256: msg = 'Password "%s" must be between 1 and 256 bytes' raise ValueError(msg % password) del body['input'] parameters = validate_parameters(body) except ValueError, e: response = Response(str(e)) response.status = 400 return response else: key = scrypt.hash(password, **parameters) output = binascii.hexlify(key) response = Response(json.dumps({'output': output})) response.content_type = "application/json" return response def do_healthcheck(request): """A simple healthcheck route. Just returns 'OK'.""" return Response("OK") def make_wsgi_app(): config = Configurator() config.add_route('do_scrypt', '/', request_method='POST') config.add_route('do_healthcheck', '/', request_method='GET') config.add_view(do_scrypt, route_name='do_scrypt') config.add_view(do_healthcheck, route_name='do_healthcheck') return config.make_wsgi_app()
def _print_metrics_http(self, context, request): res = Response() res.content_type = 'text/plain; version=0.0.4' res.text = self.metric_print() return res
def get_file_from_attempt(request): """ Get a portion of a package bound to an Attempt. Get a specific member, by name (raw): `/api/:api_id/files/:attempt_id/:target?file=:member&raw=true` Get a specific member, by name: `/api/:api_id/files/:attempt_id/:target.zip?file=:member` Get more than one specific members, by name: `/api/:api_id/files/:attempt_id/:target.zip?file=:member&file=:member2` Get the full package: `/api/:api_id/files/:attempt_id/:target.zip?full=true` """ has_body = False attempt_id = request.matchdict.get('attempt_id', None) target = request.matchdict.get('target', None) try: attempt = request.db.query(models.Attempt).get(attempt_id) except DataError: return HTTPNotFound() if attempt is None: return HTTPNotFound() is_full = asbool(request.GET.get('full', False)) is_raw = asbool(request.GET.get('raw', False)) if is_full and is_raw: return HTTPBadRequest() response = Response(status_code=200) # Get the full package. if is_full: response.content_type = 'application/zip' response.app_iter = open(attempt.filepath, 'rb') has_body = True elif is_raw: member_name = request.GET.get('file') response.content_type = 'text/xml' response.app_iter = attempt.analyzer.get_fp(member_name) has_body = True else: response.content_type = 'application/zip' # Get partial portions of the package. files = [member for attr, member in request.GET.items() if attr == 'file'] try: if files: response.app_iter = attempt.analyzer.subzip(*files) has_body = True except ValueError: return HTTPBadRequest() return response if has_body else HTTPBadRequest()
def find_entities(self): #page_limit, current_page, start, end = self._get_pagin_data() user = get_current() dace_catalog = find_catalog('dace') system_catalog = find_catalog('system') novaideo_catalog = find_catalog('novaideo') filter_schema = FindEntitiesJson(self.context, self.request) try: appstruct = filter_schema.calculate_posted_filter() except Exception as e: return { 'items': [], 'total_count': 0, 'error': True, 'message': '{}: {}'.format(e.__class__.__name__, e.args[0]) } if appstruct is None: return { 'items': [], 'total_count': 0, 'error': True, 'message': 'appstruct is None' } content_types_tree = appstruct['metadata_filter'].get( 'content_types', {}).copy() content_types = list(content_types_tree.keys()) appstruct['metadata_filter'] = appstruct.get('metadata_filter', {}) appstruct['metadata_filter']['content_types'] = content_types appstruct['dace'] = dace_catalog appstruct['system'] = system_catalog appstruct['novaideo'] = novaideo_catalog entities = find_entities(user=user, sort_on='release_date', include_site=True, **appstruct) def dumps(obj): """return values of attributes descibed in the colander schema node 'node' """ registry = get_current_registry() content_type = registry.content.typeof(obj) fields = content_types_tree.get(content_type, {}) result, to_add = get_obj_value(obj, fields) if result is None: return {}, to_add return result, to_add def merge_items(old_items, new_items): for item in new_items: oid = item.get('@id', None) if oid and oid not in old_items: old_items[oid] = item elif oid: old_item = old_items[oid] old_items[oid] = merge_dicts([item, old_item]) items = {} for entity in entities: values, to_add = dumps(entity) to_add.append(values) merge_items(items, to_add) result = {'items': list(items.values()), 'total_count': len(items)} response = Response() response.content_type = "application/json" response.text = json.dumps(result, indent=2) return response
def json_response(string_value): resp = Response(string_value) resp.status = 200 resp.content_type = 'application/json' filter_json_xsrf(resp)
def __call__(self, request=None): request = from_webob_request(request) response = Response() response.body = self.app(**request) response.content_type = 'application/json' return response
def process_handler(request): _ctypes = {'xml': 'application/xml', 'json': 'application/json'} def _d(x, do_split=True): if x is not None: x = x.strip() if x is None or len(x) == 0: return None, None if '.' in x: (pth, dot, extn) = x.rpartition('.') assert (dot == '.') if extn in _ctypes: return pth, extn return x, None log.debug(request) if request.matchdict is None: raise exc.exception_response(404) if request.body: try: request.matchdict.update(request.json_body) except ValueError as ex: pass entry = request.matchdict.get('entry', 'request') path = list(request.matchdict.get('path', [])) match = request.params.get('q', request.params.get('query', None)) # Enable matching on scope. match = (match.split('@').pop() if match and not match.endswith('@') else match) log.debug("match={}".format(match)) if 0 == len(path): path = ['entities'] alias = path.pop(0) path = '/'.join(path) # Ugly workaround bc WSGI drops double-slashes. path = path.replace(':/', '://') msg = "handling entry={}, alias={}, path={}" log.debug(msg.format(entry, alias, path)) pfx = None if 'entities' not in alias: pfx = request.registry.aliases.get(alias, None) if pfx is None: raise exc.exception_response(404) path, ext = _d(path, True) if pfx and path: q = "{%s}%s" % (pfx, path) path = "/%s/%s" % (alias, path) else: q = path # TODO - sometimes the client sends > 1 accept header value with ','. accept = str(request.accept).split(',')[0] if (not accept or '*/*' in accept) and ext: accept = _ctypes[ext] try: accepter = MediaAccept(accept) for p in request.registry.plumbings: state = { entry: True, 'headers': { 'Content-Type': None }, 'accept': accepter, 'url': request.current_route_url(), 'select': q, 'match': match.lower() if match else match, 'path': path, 'stats': {} } r = p.process(request.registry.md, state=state, raise_exceptions=True, scheduler=request.registry.scheduler) if r is not None: response = Response() response.headers.update(state.get('headers', {})) ctype = state.get('headers').get('Content-Type', None) if not ctype: r, t = _fmt(r, accepter) ctype = t response.text = b2u(r) response.size = len(r) response.content_type = ctype cache_ttl = int(state.get('cache', 0)) response.expires = (datetime.now() + timedelta(seconds=cache_ttl)) return response except ResourceException as ex: import traceback log.debug(traceback.format_exc()) log.warn(ex) raise exc.exception_response(409) except BaseException as ex: import traceback log.debug(traceback.format_exc()) log.error(ex) raise exc.exception_response(500) raise exc.exception_response(404)
except KeyError, e: result = { 'success': False, 'error': e.message, } else: name = title_to_name(fs.filename, blacklist=self.context.keys()) self.context[name] = node = factory.from_field_storage(fs) node.title = fs.filename result = { "success": True, "url": self.request.resource_url(node), } # FineUploader expects JSON with Content-Type 'text/plain' response = Response(json.dumps(result)) response.content_type = 'text/plain' return response def includeme(config): """ Pyramid includeme hook. :param config: app config :type config: :class:`pyramid.config.Configurator` """ config.scan(__name__)
if ext == "xls": data = tools.xls_to_list(tmp) elif ext == "csv": data = tmp.read_data() data = tools.Text.as_latin_1(data) data = tools.Text.to_ascii(data) data = tools.csv_to_list(data) else: data = tmp.read_text() tmp.delete() if isinstance(data, basestring) is False: data = tools.json(data) r = Response(data) r.content_type = "application/javascript" return r r = Response(data) r.content_type = "text/html" return r def build_beacon_query(beacon): if isinstance(beacon, basestring) is True: beacon = tools.parse_beacon(beacon) hash = None try: hash = beacon["_hash_"] beacon.pop("_hash_")
def generate_response(data): resp = Response() resp.body = json.dumps(data) resp.headerlist.append(('Access-Control-Allow-Origin', '*')) resp.content_type = 'application/javascript; charset=utf-8' return resp
def rrid(request): """ Receive an article, parse RRIDs, resolve them, create annotations, log results """ if request.method == 'OPTIONS': response = Response() request_headers = request.headers[ 'Access-Control-Request-Headers'].lower() request_headers = re.findall('\w(?:[-\w]*\w)', request_headers) response_headers = ['access-control-allow-origin'] for req_acoa_header in request_headers: if req_acoa_header not in response_headers: response_headers.append(req_acoa_header) response_headers = ','.join(response_headers) response.headers.update({ 'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Headers': '%s' % response_headers }) response.status_int = 204 return response h = HypothesisUtils(username=username, token=api_token, group=group) target_uri = urlparse.parse_qs(request.text)['uri'][0] params = {'limit': 200, 'uri': target_uri} query_url = h.query_url_template.format(query=urlencode(params, True)) obj = h.authenticated_api_query(query_url) rows = obj['rows'] tags = set() for row in rows: if row['group'] != h.group: # api query returns unwanted groups continue elif row['user'] != 'acct:' + h.username + '@hypothes.is': continue for tag in row['tags']: if tag.startswith('RRID'): tags.add(tag) html = urlparse.parse_qs(request.text)['data'][0] print(target_uri) found_rrids = {} try: matches = re.findall('(.{0,10})(RRID:\s*)([_\w\-:]+)([^\w].{0,10})', html.replace('–', '-')) existing = [] for match in matches: print(match) prefix = match[0] exact = match[2] if 'RRID:' + exact in tags: print('skipping %s, already annotated' % exact) continue new_tags = [] if exact in existing: new_tags.append('RRIDCUR:Duplicate') else: existing.append(exact) found_rrids[exact] = None suffix = match[3] print('\t' + exact) resolver_uri = 'https://scicrunch.org/resolver/%s.xml' % exact r = requests.get(resolver_uri) print(r.status_code) xml = r.content found_rrids[exact] = r.status_code if r.status_code < 300: root = etree.fromstring(xml) if root.findall('error'): s = 'Resolver lookup failed.' s += '<hr><p><a href="%s">resolver lookup</a></p>' % resolver_uri r = h.create_annotation_with_target_using_only_text_quote( url=target_uri, prefix=prefix, exact=exact, suffix=suffix, text=s, tags=new_tags + ['RRIDCUR:Unresolved']) print('ERROR') else: data_elements = root.findall('data')[0] s = '' data_elements = [ (e.find('name').text, e.find('value').text) for e in data_elements ] # these shouldn't duplicate citation = [(n, v) for n, v in data_elements if n == 'Proper Citation'] name = [(n, v) for n, v in data_elements if n == 'Name'] data_elements = citation + name + sorted( [(n, v) for n, v in data_elements if (n != 'Proper Citation' or n != 'Name') and v is not None]) for name, value in data_elements: if (name == 'Reference' or name == 'Mentioned In Literature' ) and value is not None and value.startswith( '<a class'): if len(value) > 500: continue # nif-0000-30467 fix keep those pubmed links short! s += '<p>%s: %s</p>' % (name, value) s += '<hr><p><a href="%s">resolver lookup</a></p>' % resolver_uri r = h.create_annotation_with_target_using_only_text_quote( url=target_uri, prefix=prefix, exact=exact, suffix=suffix, text=s, tags=new_tags + ['RRID:' + exact]) else: s = 'Resolver lookup failed.' r = h.create_annotation_with_target_using_only_text_quote( url=target_uri, prefix=prefix, exact=exact, suffix=suffix, text=s, tags=new_tags + ['RRIDCUR:Unresolved']) except: print(traceback.print_exc()) results = ', '.join(found_rrids.keys()) r = Response(results) r.content_type = 'text/plain' r.headers.update({'Access-Control-Allow-Origin': '*'}) try: now = datetime.now().isoformat()[0:19].replace(':', '').replace('-', '') fname = 'rrid-%s.log' % now s = 'URL: %s\n\nResults: %s\n\nCount: %s\n\nText:\n\n%s' % ( target_uri, results, len(found_rrids), html) with open(fname, 'wb') as f: f.write(s.encode('utf-8')) except: print(traceback.print_exc()) return r
def generate_text_response(data): resp = Response() resp.body = str(data) resp.headerlist.append(('Access-Control-Allow-Origin', '*')) resp.content_type = 'text/plain; charset=utf-8' return resp
def process_handler(request: Request) -> Response: """ The main request handler for pyFF. Implements API call hooks and content negotiation. :param request: the HTTP request object :return: the data to send to the client """ _ctypes = { 'xml': 'application/samlmetadata+xml;application/xml;text/xml', 'json': 'application/json' } def _d(x: Optional[str], do_split: bool = True) -> Tuple[Optional[str], Optional[str]]: """ Split a path into a base component and an extension. """ if x is not None: x = x.strip() if x is None or len(x) == 0: return None, None if '.' in x: (pth, dot, extn) = x.rpartition('.') assert dot == '.' if extn in _ctypes: return pth, extn return x, None log.debug(f'Processing request: {request}') if request.matchdict is None: raise exc.exception_response(400) if request.body: try: request.matchdict.update(request.json_body) except ValueError as ex: pass entry = request.matchdict.get('entry', 'request') path_elem = list(request.matchdict.get('path', [])) match = request.params.get('q', request.params.get('query', None)) # Enable matching on scope. match = match.split( '@').pop() if match and not match.endswith('@') else match log.debug("match={}".format(match)) if not path_elem: path_elem = ['entities'] alias = path_elem.pop(0) path = '/'.join(path_elem) # Ugly workaround bc WSGI drops double-slashes. path = path.replace(':/', '://') msg = "handling entry={}, alias={}, path={}" log.debug(msg.format(entry, alias, path)) pfx = None if 'entities' not in alias: pfx = request.registry.aliases.get(alias, None) if pfx is None: raise exc.exception_response(404) # content_negotiation_policy is one of three values: # 1. extension - current default, inspect the path and if it ends in # an extension, e.g. .xml or .json, always strip off the extension to # get the entityID and if no accept header or a wildcard header, then # use the extension to determine the return Content-Type. # # 2. adaptive - only if no accept header or if a wildcard, then inspect # the path and if it ends in an extension strip off the extension to # get the entityID and use the extension to determine the return # Content-Type. # # 3. header - future default, do not inspect the path for an extension and # use only the Accept header to determine the return Content-Type. policy = config.content_negotiation_policy # TODO - sometimes the client sends > 1 accept header value with ','. accept = str(request.accept).split(',')[0] valid_accept = accept and not ('application/*' in accept or 'text/*' in accept or '*/*' in accept) new_path: Optional[str] = path path_no_extension, extension = _d(new_path, True) accept_from_extension = accept if extension: accept_from_extension = _ctypes.get(extension, accept) if policy == 'extension': new_path = path_no_extension if not valid_accept: accept = accept_from_extension elif policy == 'adaptive': if not valid_accept: new_path = path_no_extension accept = accept_from_extension if not accept: log.warning('Could not determine accepted response type') raise exc.exception_response(400) q: Optional[str] if pfx and new_path: q = f'{{{pfx}}}{new_path}' new_path = f'/{alias}/{new_path}' else: q = new_path try: accepter = MediaAccept(accept) for p in request.registry.plumbings: state = { entry: True, 'headers': { 'Content-Type': None }, 'accept': accepter, 'url': request.current_route_url(), 'select': q, 'match': match.lower() if match else match, 'path': new_path, 'stats': {}, } r = p.process(request.registry.md, state=state, raise_exceptions=True, scheduler=request.registry.scheduler) log.debug(f'Plumbing process result: {r}') if r is None: r = [] response = Response() _headers = state.get('headers', {}) response.headers.update(_headers) ctype = _headers.get('Content-Type', None) if not ctype: r, t = _fmt(r, accepter) ctype = t response.text = b2u(r) response.size = len(r) response.content_type = ctype cache_ttl = int(state.get('cache', 0)) response.expires = datetime.now() + timedelta(seconds=cache_ttl) return response except ResourceException as ex: import traceback log.debug(traceback.format_exc()) log.warning(f'Exception from processing pipeline: {ex}') raise exc.exception_response(409) except BaseException as ex: import traceback log.debug(traceback.format_exc()) log.error(f'Exception from processing pipeline: {ex}') raise exc.exception_response(500) if request.method == 'GET': raise exc.exception_response(404)
def login_user(request): try: email = request.json_body.get('email') firebase_token = request.json_body.get('firebaseToken') is_anonymous = request.json_body.get('isAnonymous') firebase_user_id = request.json_body.get('firebaseUserId') google_token = request.json_body.get('googleToken') branch_data = request.json_body.get('branchData') prev_firebase_user_id = request.json_body.get('prevFirebaseUserId') except ValueError: raise ValidationError('ERR_INVALID_AUTH_PARAM') if get_is_production() or email != 'oice-dev': try: auth.verify_id_token(firebase_token) except ValueError: raise ValidationError('ERR_FIREBASE_AUTH_ERROR') except AppIdentityError: raise ValidationError('ERR_INVALID_FIREBASE_TOKEN') old_auth_id = authenticated_userid(request) fetch_username = email if is_anonymous and firebase_user_id: fetch_username = firebase_user_id # Init these bool here to avoid scope issue is_first_login = False is_trial_ended = False log_dict = { 'topic': 'actionUser', 'isAnonymous': 'true' if is_anonymous else 'false', 'isDeeplink': 'false', } if branch_data: log_dict.update({ 'channel': dict_get_value(branch_data, ['~channel'], 'direct'), 'isDeeplink': 'true', }) log_dict = set_basic_info_referrer_log( dict_get_value(branch_data, ['+referrer'], 'none'), dict_get_value(branch_data, ['referrer2'], 'none'), log_dict) oice_source = OiceQuery(DBSession).get_by_uuid( dict_get_value(branch_data, ['uuid'])) if oice_source: log_dict = set_basic_info_oice_source_log( oice_source.story.users[0], oice_source, log_dict) try: user = UserQuery(DBSession).fetch_user_by_email( email=fetch_username).one() except NoResultFound: user = User(email=fetch_username, is_anonymous=is_anonymous) if firebase_user_id: user.display_name = firebase_user_id DBSession.add(user) DBSession.flush() is_first_login = True is_trial_ended = False # log log_dict.update({'action': 'createUser'}) log_dict = set_basic_info_user_log(user, log_dict) log_dict = set_basic_info_log(request, log_dict) log_message(KAFKA_TOPIC_USER, log_dict) else: user.last_login_at = datetime.datetime.utcnow() if not user.is_anonymous: sample_story = StoryQuery(DBSession).get_sample_story( user.language) story = next((user_story for user_story in user.stories if sample_story.id == user_story.fork_of), None) if not story: story = fork_story(DBSession, sample_story) sample_oice = OiceQuery(DBSession).get_sample_oice( language=user.language) oice = fork_oice(DBSession, story, sample_oice) user.stories.append(story) if user.is_trial: if user.is_paid( ) and user.expire_date < datetime.datetime.utcnow(): user.role = 'user' update_user_mailchimp_stage(user=user, stage=5) if user.is_free(): user.is_trial = False is_trial_ended = True else: # if user.is_free() and not user.expire_date: # Disabled trial due to busines request # UserOperations.start_trial(user) is_trial_ended = False is_first_login = False if not old_auth_id or request.headers.get('x-oice-app-version'): # log is_redeem_account = prev_firebase_user_id and firebase_user_id != prev_firebase_user_id log_dict.update({ 'action': 'redeemAccount' if is_redeem_account else 'login', }) log_dict = set_basic_info_user_log(user, log_dict) log_dict = set_basic_info_log(request, log_dict) log_message(KAFKA_TOPIC_USER, log_dict) if is_redeem_account: handle_anonymous_user_app_story_progress(is_existing_user=True, \ prev_user_email=prev_firebase_user_id, \ new_user=user) photo_url = request.json_body.get('photoURL', None) if photo_url and user.avatar_storage is None: r = requests.get(photo_url) avatar = BytesIO(r.content) factory = pyramid_safile.get_factory() handle = factory.create_handle('avatar.png', avatar) user.import_handle(handle) language = request.json_body.get('language', None) normalized_language = None if language and user.language is None: normalized_language = normalize_language(language) if normalized_language: user.language = normalized_language # derive ui_language when creating user user.ui_language = normalize_ui_language(normalized_language) if (is_first_login or user.is_anonymous) and google_token: display_name = request.json_body.get('displayName', None) if email: user.email = email if not display_name: display_name = email.split('@')[0] if display_name: user.display_name = display_name sample_story = StoryQuery(DBSession).get_sample_story( normalized_language) story = fork_story(DBSession, sample_story) sample_oice = OiceQuery(DBSession).get_sample_oice( language=normalized_language) oice = fork_oice(DBSession, story, sample_oice) # open a public library for new user library = create_user_public_library(DBSession, user.display_name) user.stories.append(story) user.libraries.append(library) user.libraries_selected.append(library) # pre-select default libraries for new user default_libs = LibraryQuery(DBSession).fetch_default_libs() user.libraries_purchased.extend(default_libs) user.libraries_selected.extend(default_libs) # Disabled trial due to busines request # UserOperations.start_trial(user) user.last_login_at = datetime.datetime.utcnow() subscribe_mailchimp(google_token, user, language=language) # update elastic search when create user update_elastic_search_user(user.display_name, email) if is_first_login and request.headers.get('x-oice-app-version'): # log log_dict.update({'action': 'bindAccount'}) log_dict = set_basic_info_user_log(user, log_dict) log_dict = set_basic_info_log(request, log_dict) log_message(KAFKA_TOPIC_USER, log_dict) handle_anonymous_user_app_story_progress(is_existing_user=False, \ prev_user_email=prev_firebase_user_id, \ new_user=user) user.is_anonymous = False serialize_user = user.serialize() serialize_user['isFirstLogin'] = is_first_login serialize_user['isTrialEnded'] = is_trial_ended serialize_user['intercomUserHash'] = hmac.new( bytes(get_intercom_secret_key().encode('utf-8')), bytes(str(user.id).encode('utf-8')), digestmod=hashlib.sha256).hexdigest() response = Response() response.status_code = 200 response.headers = remember(request, user.email) response.content_type = 'application/json' response.charset = 'UTF-8' response.text = json.dumps({'code': 200, 'user': serialize_user}) return response