def login_cheat(request): # TODO: test next = request.params.get('next') or request.route_url('dashboard') login = request.json_body.get('login', '') passwordhash = request.json_body.get('passwordhash', '') log.debug("Logging in with cheat method:" + login) user = DBSession.query(User).filter_by(login=login).first() if user and user.password.hash == passwordhash: log.debug("Login successful") client = Client(user_id=user.id) user.clients.append(client) DBSession.add(client) DBSession.flush() headers = remember(request, principal=client.id) response = Response() response.headers = headers locale_id = user.default_locale_id if not locale_id: locale_id = 1 response.set_cookie(key='locale_id', value=str(locale_id)) response.set_cookie(key='client_id', value=str(client.id)) headers = remember(request, principal=client.id) return response log.debug("Login unsuccessful for " + login) return HTTPUnauthorized(location=request.route_url('login'))
def download_peopledirectory_xml(context, request): response = Response(dump_peopledir(context)) response.content_type = 'application/xml' # suggest a filename based on the report name response.headers.add('Content-Disposition', 'attachment;filename=%s.xml' % str(context.__name__)) return response
def modify_event(request): dbase = Dbase() if 'event_uid' in request.POST and dbase.get_user_privelege(get_username()) == 1: event_uid = request.POST["event_uid"] if 'title' in request.POST: dbase.modifyEvent(event_uid, name=request.POST['title']) if 'location' in request.POST: dbase.modifyEvent(event_uid, location=request.POST['location']) if 'description' in request.POST: dbase.modifyEvent(event_uid, description=request.POST['description']) if 'event_start' in request.POST: newstart = parse(params["event_start"]) dbase.modifyEvent(event_uid, event_start=newstart) if 'event_end' in request.POST: newend = parse(params["event_end"]) dbase.modifyEvent(event_uid, event_end=newend) if 'event_registration_start' in request.POST: new_registration_start = parse(params["event_registration_start"]) dbase.modifyEvent(event_uid, event_registration_start=new_registration_start) if 'event_registration_end' in request.POST: new_registration_end = parse(params["event_registration_end"]) dbase.modifyEvent(event_uid, event_registration_end=new_registration_end) if 'event_approval_start' in request.POST: new_approval_start = parse(params["event_approval_start"]) dbase.modifyEvent(event_uid, event_approval_start=new_approval_start) if 'event_approval_end' in request.POST: new_approval_end = parse(params["event_approval_end"]) dbase.modifyEvent(event_uid, event_approval_end=new_approval_end) return Response() response = Response() response.status = 500 return response
def download_backup(request): encoded_filename = request.matchdict['backup_id'] headers = [] try: filename = base64.b64decode(encoded_filename).decode('utf-8') except TypeError: return HTTPNotFound() backups_dir = get_backups_dir() all_backups = [x for x in os.listdir(backups_dir) if os.path.isfile(os.path.join(backups_dir, x))] if filename not in all_backups: return HTTPNotFound() full_path = os.path.join(backups_dir, filename) if not os.path.isfile(full_path): return HTTPNotFound() headers = [] content_length = os.path.getsize(full_path) headers.append(('Content-Length', str(content_length))) headers.append(('Content-Disposition', str('attachment; filename={0}'.format(filename)))) response = Response(content_type='application/octet-stream') try: response.app_iter = open(full_path, 'rb') except IOError: return HTTPNotFound() response.headerlist += headers return response
def process_upload(self): """ Process a single upload. Also see: https://github.com/valums/file-uploader/blob/master/server/readme.md :result: Status object with URL of the created item (on success) or error message on failure. :rtype: dict """ fs = self.request.POST["qqfile"] # We can fail hard, as somebody is trying to cheat on us if that fails. assert isinstance(fs, FieldStorage) try: factory = self.factory_by_name(self.request.POST["content_type"]) except KeyError as e: result = {"success": False, "error": e.message} else: name = title_to_name(fs.filename, blacklist=self.context.keys()) self.context[name] = node = factory.from_field_storage(fs) node.title = fs.filename result = {"success": True, "url": self.request.resource_url(node)} # FineUploader expects JSON with Content-Type 'text/plain' response = Response(json.dumps(result)) response.content_type = "text/plain" return response
def _response(self, data): if data is None: raise HTTPNotFound() else: response = Response(data) response.headers['Content-Type'] = 'image/png' return response
def httpexception_view(exc, request): # This special case exists for the easter egg that appears on the 404 # response page. We don't generally allow youtube embeds, but we make an # except for this one. if isinstance(exc, HTTPNotFound): request.find_service(name="csp").merge( { "frame-src": ["https://www.youtube-nocookie.com"], "script-src": ["https://www.youtube.com", "https://s.ytimg.com"], } ) try: # Lightweight version of 404 page for `/simple/` if isinstance(exc, HTTPNotFound) and request.path.startswith("/simple/"): response = Response(body="404 Not Found", content_type="text/plain") else: response = render_to_response( "{}.html".format(exc.status_code), {}, request=request ) except LookupError: # We don't have a customized template for this error, so we'll just let # the default happen instead. return exc # Copy over the important values from our HTTPException to our new response # object. response.status = exc.status response.headers.extend( (k, v) for k, v in exc.headers.items() if k not in response.headers ) return response
def get_dues18_invoice(invoice, request): """ Gets the invoice and returns a PDF response. Args: invoice: The invoice for which the PDF is requested. request: The pyramid.request.Request object. Returns: A PDF response in case the invoice exists. Otherwise a redirect to the error page. """ if invoice is None: request.session.flash( u'No invoice found!', 'danger' # message queue for user ) return HTTPFound(request.route_url('error')) if invoice.is_reversal: pdf_file = make_reversal_pdf_pdflatex(invoice) else: pdf_file = make_invoice_pdf_pdflatex(invoice) response = Response(content_type='application/pdf') pdf_file.seek(0) response.app_iter = open(pdf_file.name, "r") return response
def test_process_response_nonhtml(self): response = Response() response.content_type = 'text/plain' request = Request.blank('/') toolbar = self._makeOne(request, [DummyPanel]) toolbar.process_response(response) self.assertTrue(response.processed)
def upload_files(request): """uploads a list of files to the server, creates Link instances in server and returns the created link ids with a response to let the front end request a linkage between the entity and the uploaded files """ # decide if it is single or multiple files file_params = request.POST.getall('file') logger.debug('file_params: %s ' % file_params) try: new_links = upload_files_to_server(request, file_params) except IOError as e: c = StdErrToHTMLConverter(e) response = Response(c.html()) response.status_int = 500 transaction.abort() return response else: # store the link object DBSession.add_all(new_links) logger.debug('created links for uploaded files: %s' % new_links) return { 'link_ids': [link.id for link in new_links] }
def upload(request): if request.content_length/1000000 > 20: return error_response(400, 'Sorry, but the file must be under 20MB.') # Create photo object in database photo = Photo(datetime.today(), request.POST['file'].filename, request.client_addr, request.content_type, request.content_length) DBSession.add(photo) DBSession.flush() # Save uploaded file input_file = request.POST['file'].file input_file.seek(0) if not os.path.exists('data'): os.makedirs('data') if not os.path.exists('data/uploads'): os.makedirs('data/uploads') upload_path = os.path.join('data', 'uploads', str(photo.id)) with open(upload_path, 'w') as f: shutil.copyfileobj(input_file, f) # Check the content type and rename as appropriate mime = magic.from_file(upload_path, mime=True) if mime not in ['image/jpeg', 'image/pjpeg', 'image/gif', 'image/png', 'image/tiff', 'image/x-tiff']: resp = Response('Sorry, but we can only accept jpg, gif, or png files.') resp.status_code = 400 resp.status_string = '400 Bad Request' return resp extension = {'image/jpeg': '.jpg', 'image/pjpeg': '.jpg', 'image/gif': '.gif', 'image/png': '.png', 'image/tiff': '.tiff', 'image/x-tiff': '.tiff'}[mime] os.rename(upload_path, upload_path + extension) photo.content_type = mime return Response('OK')
def pdf_file(request): sbid = request.matchdict['sbid'] req_part = request.matchdict['part'].split('-') monograph = Monograph.get(request.db, sbid) if len(req_part) == 2 and req_part[1] == monograph.isbn: try: pdf_file = request.db.fetch_attachment(monograph._id, monograph.pdf_file['filename'], stream=True) except (couchdbkit.ResourceNotFound, AttributeError): raise exceptions.NotFound() else: parts = get_book_parts(monograph._id, request) try: selected_part = parts[int(req_part[2])] except (IndexError, ValueError): raise exceptions.NotFound() part = Part.get(request.db, selected_part['part_sbid']) try: pdf_file = request.db.fetch_attachment(part._id, part.pdf_file['filename'], stream=True) except (couchdbkit.ResourceNotFound, AttributeError): raise exceptions.NotFound() response = Response(content_type='application/pdf', expires=datetime_rfc822(365)) response.app_iter = pdf_file try: response.etag = str(hash(pdf_file)) except TypeError: #cannot generate a hash for the object, return it without the ETag pass return response
def swf_file(request): sbid = request.matchdict['sbid'] req_part = request.matchdict['part'] monograph = Monograph.get(request.db, sbid) if req_part == monograph.isbn: try: pdf_file = request.db.fetch_attachment(monograph._id, monograph.pdf_file['filename']) except (couchdbkit.ResourceNotFound, AttributeError): raise exceptions.NotFound() else: parts = get_book_parts(monograph._id, request) try: selected_part = parts[int(req_part)] except (IndexError, ValueError): raise exceptions.NotFound() part = Part.get(request.db, selected_part['part_sbid']) try: pdf_file = request.db.fetch_attachment(part._id, part.pdf_file['filename']) except (couchdbkit.ResourceNotFound, AttributeError): raise exceptions.NotFound() swf_file = functions.convert_pdf2swf(pdf_file) response = Response(content_type='application/x-shockwave-flash', expires=datetime_rfc822(365)) response.app_iter = swf_file try: response.etag = str(hash(swf_file)) except TypeError: #cannot generate a hash for the object, return it without the ETag pass return response
def __call__(self): request = self.request user = request.user if not user: return make_401_error(u'Access Denied') if 'airsexport' not in user.cic.ExternalAPIs: return make_401_error(u'Insufficient Permissions') model_state = modelstate.ModelState(request) model_state.schema = AIRSExportOptionsSchema() model_state.form.method = None if not model_state.validate(): if model_state.is_error('DST'): msg = u"Invalid Distribution" elif model_state.is_error("version"): msg = u"Invalid Version" else: msg = u"An unknown error occurred." log.error('AIRS Export Errors: %s: %s', msg, model_state.form.errors) return make_internal_server_error(msg) res = Response(content_type='application/zip', charset=None) res.app_iter, res.length = _zip_stream(request, model_state) res.headers['Content-Disposition'] = 'attachment;filename=Export.zip' return res
def pvc1_show_imageh5(request): # Loads JPEG images from hdf5 file h5_image_file = 'pvc1/pvc1_movie_frames.h5' movie_id = int(request.matchdict['movie_id']) segment_id = int(request.matchdict['segment_id']) frame = int(request.matchdict['frame']) image_dir = 'movie%03u_%03u.images' % (movie_id, segment_id) image_name = 'movie%03u_%03u_%03u.jpeg' % (movie_id, segment_id, frame) path = image_dir + '/' + image_name response = Response(content_type='image/jpeg') h5f = h5py.File(h5_image_file, 'r') try: ds = h5f[path] except KeyError: # missing file, generate an image to return img = Image.new("RGB", (320, 220,), "#cccccc" ) draw = ImageDraw.Draw(img) draw.text((15, 60), image_name + ' missing', fill='#000') f = cStringIO.StringIO() img.save(f, "jpeg") f.seek(0) response.app_iter = f else: dsv = ds.value response.app_iter = dsv h5f.close() return response
def upload_dnd(request): check_login(request) save_dir = os.path.join(request.registry.settings['transform_dir'], request.session['upload_dir']) # userfn, if browser does not support naming of blobs, this might be # 'blob', so we need to further uniquefy it. userfn = request.POST['upload'].filename or '' ext = '' mtype = request.POST['upload'].headers.get('content-type') if mtype is not None: ext = mimetypes.guess_extension(mtype) or '' # If it has an extension (a dot and three of four characters at the end), # strip it userfn = re.compile('\.\w{3,4}$').sub('', userfn) fn = userfn + '_' + datetime.datetime.now().strftime('%s') + ext # No point in using an iterator, we need the entire content for the zip # file anyway fob = request.POST['upload'].file blk = fob.read() with open(os.path.join(save_dir, fn), 'w') as fp: fp.write(blk) # Update upload.zip append_zip(os.path.join(save_dir, 'upload.zip'), fn, blk) response = Response(json.dumps({'url': fn})) response.content_type = 'application/json' return response
def MergeFile(req): def DecreaseLarger(arr, n): for i in xrange(len(arr)): if arr[i] > n: arr[i] -= 1 fname = GetQueryFileName(req.GET) links = Reference.FileLinks(fname) try: n = int(req.GET["n"]) except: return HTTPBadRequest_Param("n") try: o = int(req.GET["o"]) except: return HTTPBadRequest_Param("o") for j in links: if n in j.Depends: j.Depends.remove(n) if not o in j.Depends: j.Depends = sorted(j.Depends + [o]) DecreaseLarger(j.Depends, n) for i in xrange(n, len(links) - 1): f = fname + "_" + str(i + 1) if os.path.exists(f): os.rename(f, fname + "_" + str(i)) if o > n: o -= 1 del links.Links[n] links.Write(fname) resp = Response('{"removed":[' + str(n) + '],"select":' + str(o) + ',"files":[' + ",".join(['{"name":"' + os.path.split(l.Name)[1] + '","type":' + str(l.Type) + ',"deps":[' + ",".join([test(d < 65535, str(d), "-1") for d in l.Depends]) + ']}' for l in links]) + ']}\r\n', request=req) resp.cache_expires(0) return resp
def forbidden_view(context, request): """ View to trap all Forbidden errors and redirect any not logged in users to the login page. For logged in users, a template is rendered - this template probably won't be seen by the user though since there is Javascript handling 401 errors from form posts showing a small pop-up error message instead. :param context: Some object like HTTPForbidden() :param request: Request() object :return: """ user = authenticated_userid(request) if user: # Return a plain forbbiden page try: reason = context.explanation except AttributeError: reason = 'unknown' log.debug("User {!r} tripped Forbidden view, request {!r}, reason {!r}".format( user, request, reason)) response = Response(render('templates/forbidden.jinja2', {})) response.status_int = 401 return response loginurl = request.route_url('saml2-login', _query=(('next', request.path),)) if not request.is_xhr: return HTTPFound(location=loginurl) else: return HTTPXRelocate(loginurl)
def get_ticket(request): """ this view gives a user access to her ticket via URL with code the response is a PDF download """ _code = request.matchdict['code'] _email = request.matchdict['email'] _ticket = PartyTicket.get_by_code(_code) if isinstance(_ticket, NoneType): return HTTPFound(location=request.route_url('party')) if not (_ticket.email == _email): #print("no match!") return HTTPFound(location=request.route_url('party')) # prepare ticket URL with email & code # 'https://events.c3s.cc/ci/p1402/' + _ticket.email + _ticket.email_confirm_code # 'https://192.168.2.128:6544/ci/p1402/' + _ticket.email + _ticket.email_confirm_code _url = request.registry.settings[ 'c3spartyticketing.url'] + '/ci/p1402/' + _ticket.email_confirm_code # return a pdf file pdf_file = make_qr_code_pdf(_ticket, _url) response = Response(content_type='application/pdf') pdf_file.seek(0) # rewind to beginning response.app_iter = open(pdf_file.name, "r") return response
def dynamic_item_img(request): item = Item.from_id(request.matchdict['item_id']) response = Response(content_type='image/jpeg') class Hack(): def __init__(self, img): self.img = img self.idx = 0 def getattr(self, item): print("item {}".format(item)) raise AttributeError def __getattr(self, item): print("item {}".format(item)) raise AttributeError def read(self, block_size = None): if self.idx >= len(self.img): return '' if block_size is None: self.idx = len(self.img) return self.img if self.idx + block_size > len(self.img): r = self.img[self.idx:] self.idx = len(self.img) return r else: r = self.img[self.idx:block_size] self.idx += block_size return r def close(self): pass h = Hack(item.img.img) response.app_iter = FileIter(h) return response
def xhr_filemgr(self): cmd = '' cmd_args = dict() for k in self.request.params: if k == '_': continue if k == 'cmd': cmd = self.request.params[k] else: if k.endswith("[]"): k2 = k.rstrip("[]") cmd_args[k2] = self.request.params.getall(k) else: cmd_args[k] = self.request.params[k] finder = create_finder(self.context, self.request) try: finder.run(cmd, cmd_args) except FinderError as e: L.exception(e) if e.status: self.request.response.status = e.status if 'file' in finder.response: resp = Response() resp.app_iter = finder.response['file'] if finder.headers: for k, v in finder.headers.items(): resp.headers[k] = v return resp else: if finder.headers: for k, v in finder.headers.items(): self.request.response.headers[k] = v return finder.response
def response_wrapper(status_code, message, result=None): resp = Response(status_code=status_code, content_type='application/json') data = {'status_code':status_code, 'message':message} if result is not None: data['result'] = result resp.body = json.dumps(data) return resp
def serve(spec): """Resolve the asset ``spec`` to a file path and return a static file response that serves it. If the file isn't found, return a 404. """ # Resolve the spec to a url. url = request.static_url(spec) if url.startswith('//'): url = 'https:' + url # Download the url. r = requests.get(url) if r.status_code != requests.codes.ok: msg = not_found_msg if r.status_code == 404 else err_message return not_found(explanation=msg) # Return the file response. filename = spec.split('/')[-1] disposition = 'attachment; filename="{0}"'.format(filename) mime_type = mimetypes.guess_type(filename)[0] or 'application/octet-stream' response = Response(content_type=mime_type) response.headers['Content-Disposition'] = disposition response.body = r.content return response
def export(request): dbsession = DBSession() root = dbsession.query(MyModel).filter(MyModel.name==u'root').first() auth = _get_basicauth_credentials(request) data = parseData(folderbase=REDIRECTIONS_PATH) entorns = [dict(id=int(a),entorn=data['entorns'][a]) for a in data['entorns'].keys()] instances = [data['instancies'][a] for a in data['instancies'].keys()] json_data_list = [] for ins in instances: if len(ins['urls'])>0: url = ins['urls'][0]['gwurl'].replace('https','http') json_data = dict(url=url, zeoport=ins['zeoport'], debugport=ins['debugport'], mountpoint=ins['mountpoint'], plonesite=ins['plonesite'], title=ins['title'], entorn=ins['entorn'], ) json_data_list.append(json_data) response = Response(json.dumps(json_data_list)) response.content_type = 'application/json' return response
def response(self, request, error): """ Render an API Response Create a Response object, similar to the JSONP renderer [TODO: re-factor in to the JSONP renderer] Return the Response object with the appropriate error code """ jsonp_render = request.registry._jsonp_render default = jsonp_render._make_default(request) val = self.serializer(self.envelope(success=False, error=error.error), default=default, **jsonp_render.kw) callback = request.GET.get(jsonp_render.param_name) response = Response("", status=200) # API Error code is always 200 if callback is None: ct = 'application/json' response.status = error.code response.body = val else: ct = 'application/javascript' response.text = '%s(%s)' % (callback, val) if response.content_type == response.default_content_type: response.content_type = ct return response
def plot(request): """ http://stackoverflow.com/a/5515994/185820 """ import cStringIO from matplotlib.figure import Figure from matplotlib.backends.backend_agg import FigureCanvasAgg x, y = 4, 4 qs = parse_qs(request.query_string) if 'x' in qs: x = int(qs['x'][0]) if 'y' in qs: y = int(qs['y'][0]) fig = Figure(figsize=[x, y]) ax = fig.add_axes([.1, .1, .8, .8]) ax.scatter([1, 2], [3, 4]) canvas = FigureCanvasAgg(fig) # write image data to a string buffer and get the PNG image bytes buf = cStringIO.StringIO() canvas.print_png(buf) data = buf.getvalue() # write image bytes back to the browser response = Response(data) response.content_type = 'image/png' response.content_length = len(data) return response
def dump_entries_to_excel(entries, group_by, bigger_than): wbk = xlwt.Workbook() sheet = wbk.add_sheet("Hours") heading_xf = xlwt.easyxf("font: bold on; align: wrap on, vert centre, horiz center") headings = ("Client", "Project", "Ticket id", "Employee", "Description", "Date", "Time") headings_width = (x * 256 for x in (20, 30, 10, 40, 100, 12, 10)) for colx, value in enumerate(headings): sheet.write(0, colx, value, heading_xf) for i, width in enumerate(headings_width): sheet.col(i).width = width sheet.set_panes_frozen(True) sheet.set_horz_split_pos(1) sheet.set_remove_splits(True) rows, asum = ExcelRow.from_ordered_data(entries, group_by, bigger_than) for j, row in enumerate(rows): row = row.pprint_row() for i, cell in enumerate(row): sheet.write(j + 1, i, *cell) file_path = "/tmp/tmp.xls" wbk.save(file_path) file = open(file_path, "rb") response = Response(content_type="application/vnd.ms-excel", app_iter=file) response.headers["Cache-Control"] = "no-cache" response.content_disposition = 'attachment; filename="report-%s.xls"' % datetime.datetime.now().strftime( "%d-%m-%Y--%H-%M-%S" ) return file, response
def post(self): rows = self.session.query('cid', 'cname', 'uid', 'uname', 'date', 'time').from_statement(""" SELECT c.id as cid, c.name as cname, u.id as uid, u.name as uname, date_trunc('month', t.date) as date, SUM(t.time) as time FROM time_entry t, project p, client c, "user" u WHERE t.project_id = p.id AND p.client_id = c.id AND t.user_id = u.id AND t.deleted = false GROUP BY c.id, c.name, u.id, u.name, date_trunc('month', t.date) ORDER BY date_trunc('month', t.date) """).all() monthly = h.groupby(rows, lambda row: (row[2], row[-2]), lambda row: row[5]) rows = [( row[1], row[3], row[5], row[4].strftime('%Y-%m-%d'), sum(monthly[row[2], row[-2]]), ) for row in rows] stream = self._to_excel(rows) response = Response( content_type='application/vnd.ms-excel', app_iter=stream, ) response.headers['Cache-Control'] = 'no-cache' response.content_disposition = 'attachment; filename="report-%s.xls"' % datetime.datetime.now().strftime('%d-%m-%Y--%H-%M-%S') return response
def failed_conversion(exc, request): # If the view has two formal arguments, the first is the context. # The context is always available as ``request.context`` too. filetype = exc.args[0] if exc.args else "" response = Response('Failed conversion: file of type %s could not be converted. A common cause is a table of contents or other automated index. Remove this from your file, save, and try again.' %filetype) response.status_int = 500 return response
def buildResponse(self, payload=None): """ """ data = payload == None and self.data or payload response = Response(data, status_int=self.status_code) response.content_type = self.response_content_type return response
def not_found(request): """ error display for not found """ return Response(json='Not found', status=404)
def wrapper(request): retval = f(request) return Response( body=json.dumps(retval), content_type='application/json' )
def thisview(request): return Response('this view')
def stub_view_groups(request): """Stub view that returns groups if logged in, fails if not.""" groups = effective_principals(request) return Response(json.dumps(map(str, groups)))
def stub_view_auth(request): """Stub view that returns userid if logged in, fails if not.""" userid = authenticated_userid(request) if userid is None: raise HTTPForbidden return Response(userid)
def stub_view_public(request): """Stub view that returns userid if logged in, None otherwise.""" userid = unauthenticated_userid(request) return Response(str(userid))
def forbidden(request): """ error display for forbidden """ return Response(json='Forbidden', status=403)
def f0(self, context, request): url_base_internal = self.options.url_base_internal if url_base_internal is not None: if not request.url.startswith(url_base_internal): msg = ( 'Given that url_base_internal is set, I was expecting that all urls' ' would start with it.') raise_desc(Exception, msg, request_url=request.url, url_base_internal=url_base_internal) if '//' in urlparse.urlparse(request.url).path: msg = 'This is an invalid URL with 2 slashes: %s' % request.url response = Response(msg) response.status_int = 500 return response if redir: url = request.url p = urlparse.urlparse(url) url2 = url # only do redirection if we have url_base_internal # The redirection is needed because of https; however # for casual use it is likely https is not set up. if self.options.url_base_internal: if '127.0.0.1' in p.netloc: url2 = url2.replace('127.0.0.1', 'localhost') if not p.path.endswith('.html'): if not p.path.endswith('/'): url2 = url2.replace(p.path, p.path + '/') if url2 != url: logger.info('Context: %s' % context) logger.info('Redirection:\n from: %s\n to: %s' % (url, url2)) raise HTTPFound(url2) if request.authenticated_userid: uid = request.authenticated_userid from mcdp_web.main import WebApp app = WebApp.singleton user_db = app.hi.db_view.user_db if not uid in user_db: msg = 'The user is authenticated as "%s" but no such user in DB.' % uid msg += 'We are logging out the user.' logger.warn(msg) headers = forget(request) raise HTTPFound(location=request.url, headers=headers) try: res = f(self, context, request) except HTTPException: raise except Exception as e: msg = 'While running %s:' % (f.__name__) msg += '\n' + indent(traceback.format_exc(e), ' >') logger.error(msg) raise if isinstance(res, Response): return res check_isinstance(res, dict) try: add_other_fields(self, res, request, context=context) except: logger.error('Error after executing view %s' % f) if isinstance(context, Resource): logger.debug(context_display_in_detail(context)) raise return res
def file_upload(request): # Before we do anything, if there isn't an authenticated user with this # request, then we'll go ahead and bomb out. if request.authenticated_userid is None: raise _exc_with_message( HTTPForbidden, "Invalid or non-existent authentication information.", ) # distutils "helpfully" substitutes unknown, but "required" values with the # string "UNKNOWN". This is basically never what anyone actually wants so # we'll just go ahead and delete anything whose value is UNKNOWN. for key in list(request.POST): if request.POST.get(key) == "UNKNOWN": del request.POST[key] # We require protocol_version 1, it's the only supported version however # passing a different version should raise an error. if request.POST.get("protocol_version", "1") != "1": raise _exc_with_message(HTTPBadRequest, "Unknown protocol version.") # Look up all of the valid classifiers all_classifiers = request.db.query(Classifier).all() # Validate and process the incoming metadata. form = MetadataForm(request.POST) form.classifiers.choices = [ (c.classifier, c.classifier) for c in all_classifiers ] if not form.validate(): for field_name in _error_message_order: if field_name in form.errors: break else: field_name = sorted(form.errors.keys())[0] raise _exc_with_message( HTTPBadRequest, "{field}: {msgs[0]}".format( field=field_name, msgs=form.errors[field_name], ), ) # TODO: We need a better method of blocking names rather than just # hardcoding some names into source control. if form.name.data.lower() in {"requirements.txt", "rrequirements.txt"}: raise _exc_with_message( HTTPBadRequest, "The name {!r} is not allowed.".format(form.name.data), ) # Ensure that we have file data in the request. if "content" not in request.POST: raise _exc_with_message( HTTPBadRequest, "Upload payload does not have a file.", ) # Look up the project first before doing anything else, this is so we can # automatically register it if we need to and can check permissions before # going any further. try: project = ( request.db.query(Project) .filter( Project.normalized_name == func.normalize_pep426_name(form.name.data)).one() ) except NoResultFound: # The project doesn't exist in our database, so we'll add it along with # a role setting the current user as the "Owner" of the project. project = Project(name=form.name.data) request.db.add(project) request.db.add( Role(user=request.user, project=project, role_name="Owner") ) # TODO: This should be handled by some sort of database trigger or a # SQLAlchemy hook or the like instead of doing it inline in this # view. request.db.add( JournalEntry( name=project.name, action="create", submitted_by=request.user, submitted_from=request.client_addr, ), ) request.db.add( JournalEntry( name=project.name, action="add Owner {}".format(request.user.username), submitted_by=request.user, submitted_from=request.client_addr, ), ) # Check that the user has permission to do things to this project, if this # is a new project this will act as a sanity check for the role we just # added above. if not request.has_permission("upload", project): raise _exc_with_message( HTTPForbidden, "You are not allowed to upload to {!r}.".format(project.name) ) try: release = ( request.db.query(Release) .filter( (Release.project == project) & (Release.version == form.version.data)).one() ) except NoResultFound: release = Release( project=project, _classifiers=[ c for c in all_classifiers if c.classifier in form.classifiers.data ], dependencies=list(_construct_dependencies( form, { "requires": DependencyKind.requires, "provides": DependencyKind.provides, "obsoletes": DependencyKind.obsoletes, "requires_dist": DependencyKind.requires_dist, "provides_dist": DependencyKind.provides_dist, "obsoletes_dist": DependencyKind.obsoletes_dist, "requires_external": DependencyKind.requires_external, "project_urls": DependencyKind.project_url, } )), **{ k: getattr(form, k).data for k in { # This is a list of all the fields in the form that we # should pull off and insert into our new release. "version", "summary", "description", "license", "author", "author_email", "maintainer", "maintainer_email", "keywords", "platform", "home_page", "download_url", "requires_python", } } ) request.db.add(release) # TODO: This should be handled by some sort of database trigger or a # SQLAlchemy hook or the like instead of doing it inline in this # view. request.db.add( JournalEntry( name=release.project.name, version=release.version, action="new release", submitted_by=request.user, submitted_from=request.client_addr, ), ) # TODO: We need a better solution to this than to just do it inline inside # this method. Ideally the version field would just be sortable, but # at least this should be some sort of hook or trigger. releases = ( request.db.query(Release) .filter(Release.project == project) .all() ) for i, r in enumerate(sorted( releases, key=lambda x: packaging.version.parse(x.version))): r._pypi_ordering = i # Pull the filename out of our POST data. filename = request.POST["content"].filename # Make sure that the filename does not contain any path separators. if "/" in filename or "\\" in filename: raise _exc_with_message( HTTPBadRequest, "Cannot upload a file with '/' or '\\' in the name.", ) # Make sure the filename ends with an allowed extension. if _dist_file_re.search(filename) is None: raise _exc_with_message(HTTPBadRequest, "Invalid file extension.") # Make sure that our filename matches the project that it is being uploaded # to. prefix = pkg_resources.safe_name(project.name).lower() if not pkg_resources.safe_name(filename).lower().startswith(prefix): raise _exc_with_message( HTTPBadRequest, "The filename for {!r} must start with {!r}.".format( project.name, prefix, ) ) # Check the content type of what is being uploaded if (not request.POST["content"].type or request.POST["content"].type.startswith("image/")): raise _exc_with_message(HTTPBadRequest, "Invalid distribution file.") # Check to see if the file that was uploaded exists already or not. if request.db.query( request.db.query(File) .filter(File.filename == filename) .exists()).scalar(): raise _exc_with_message(HTTPBadRequest, "File already exists.") # Check to see if the file that was uploaded exists in our filename log. if (request.db.query( request.db.query(Filename) .filter(Filename.filename == filename) .exists()).scalar()): raise _exc_with_message( HTTPBadRequest, "This filename has previously been used, you should use a " "different version.", ) # The project may or may not have a file size specified on the project, if # it does then it may or may not be smaller or larger than our global file # size limits. file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit])) with tempfile.TemporaryDirectory() as tmpdir: temporary_filename = os.path.join(tmpdir, filename) # Buffer the entire file onto disk, checking the hash of the file as we # go along. with open(temporary_filename, "wb") as fp: file_size = 0 file_hash = hashlib.md5() for chunk in iter( lambda: request.POST["content"].file.read(8096), b""): file_size += len(chunk) if file_size > file_size_limit: raise _exc_with_message(HTTPBadRequest, "File too large.") fp.write(chunk) file_hash.update(chunk) # Actually verify that the md5 hash of the file matches the expected # md5 hash. We probably don't actually need to use hmac.compare_digest # here since both the md5_digest and the file whose file_hash we've # computed comes from the remote user, however better safe than sorry. if not hmac.compare_digest( form.md5_digest.data, file_hash.hexdigest()): raise _exc_with_message( HTTPBadRequest, "The MD5 digest supplied does not match a digest calculated " "from the uploaded file." ) # Check the file to make sure it is a valid distribution file. if not _is_valid_dist_file(temporary_filename, form.filetype.data): raise _exc_with_message( HTTPBadRequest, "Invalid distribution file.", ) # Check that if it's a binary wheel, it's on a supported platform if filename.endswith(".whl"): wheel_info = _wheel_file_re.match(filename) plats = wheel_info.group("plat").split(".") if set(plats) - ALLOWED_PLATFORMS: raise _exc_with_message( HTTPBadRequest, "Binary wheel for an unsupported platform.", ) # Also buffer the entire signature file to disk. if "gpg_signature" in request.POST: has_signature = True with open(os.path.join(tmpdir, filename + ".asc"), "wb") as fp: signature_size = 0 for chunk in iter( lambda: request.POST["gpg_signature"].file.read(8096), b""): signature_size += len(chunk) if signature_size > MAX_SIGSIZE: raise _exc_with_message( HTTPBadRequest, "Signature too large.", ) fp.write(chunk) # Check whether signature is ASCII armored with open(os.path.join(tmpdir, filename + ".asc"), "rb") as fp: if not fp.read().startswith(b"-----BEGIN PGP SIGNATURE-----"): raise _exc_with_message( HTTPBadRequest, "PGP signature is not ASCII armored.", ) else: has_signature = False # TODO: This should be handled by some sort of database trigger or a # SQLAlchemy hook or the like instead of doing it inline in this # view. request.db.add(Filename(filename=filename)) # Store the information about the file in the database. file_ = File( release=release, filename=filename, python_version=form.pyversion.data, packagetype=form.filetype.data, comment_text=form.comment.data, size=file_size, has_signature=bool(has_signature), md5_digest=form.md5_digest.data, ) request.db.add(file_) # TODO: This should be handled by some sort of database trigger or a # SQLAlchemy hook or the like instead of doing it inline in this # view. request.db.add( JournalEntry( name=release.project.name, version=release.version, action="add {python_version} file {filename}".format( python_version=file_.python_version, filename=file_.filename, ), submitted_by=request.user, submitted_from=request.client_addr, ), ) # TODO: We need a better answer about how to make this transactional so # this won't take affect until after a commit has happened, for # now we'll just ignore it and save it before the transaction is # committed. storage = request.find_service(IFileStorage) storage.store(file_.path, os.path.join(tmpdir, filename)) if has_signature: storage.store( file_.pgp_path, os.path.join(tmpdir, filename + ".asc"), ) return Response()
def put_port(request): port_name = request.matchdict['port_name'] port_configs = get_params_from_request(request, put_port_schema) port_service = get_port_service_from_request(request) port_service.configure_port(port_name, port_configs) return Response(status=200)
def home_page(request): """View for the home page.""" file_path = os.path.join(THIS_DIR, 'templates', 'index.html') file_data = io.open(file_path).read() return Response(file_data)
def my_view(request): try: one = DBSession.query(MyModel).filter(MyModel.name == 'one').first() except DBAPIError: return Response(conn_err_msg, content_type='text/plain', status_int=500) return {'one': one, 'project': 'klassenbuch'}
def detail_page(request): """View for the detail page.""" file_path = os.path.join(THIS_DIR, 'templates', 'single.html') file_data = io.open(file_path).read() return Response(file_data)
def preflight_view(context, request): return Response()
def index(self): return Response(render_home_view(base_url='/'))
def edit_page(request): """View for the edit page.""" file_path = os.path.join(THIS_DIR, 'templates', 'edit_entry.html') file_data = io.open(file_path).read() return Response(file_data)
def app_(request): if not request.registry.settings.get("base", "").strip(): return Response("Edit pyramid.ini and set `base` to the appropriate path.") return exc.HTTPFound(location="/_/compare/")
def create_response(headers, body, status): headerlist = [(native_(k), native_(v)) for k, v in headers.items()] return Response(body=body, status=status, headerlist=headerlist, charset='utf8')
def logout(self): headers = forget(self.request) return Response('Logged out', headers=headers)
def ex(exc, request): try: raise except: traceback.print_exc() if request.path.startswith("/api"): return Response(json={"err": True}, status_code=500) return Response(body="err", status_code=500)
def delete(self): return Response('Deleted')
fs = self.request.POST['qqfile'] # We can fail hard, as somebody is trying to cheat on us if that fails. assert isinstance(fs, FieldStorage) try: factory = self.factory_by_name(self.request.POST['content_type']) except KeyError, e: result = { 'success': False, 'error': e.message, } else: name = title_to_name(fs.filename, blacklist=self.context.keys()) self.context[name] = node = factory.from_field_storage(fs) node.title = fs.filename result = { "success": True, "url": self.request.resource_url(node), } # FineUploader expects JSON with Content-Type 'text/plain' response = Response(json.dumps(result)) response.content_type = 'text/plain' return response def includeme(config): config.scan(__name__)
def hello_world(request): return Response('<body><h1>Hello World!</h1></body>')
def login(self): userid = self.request.params.get('userid') headers = remember(self.request, userid) return Response('Logged in as %s' % userid, headers=headers)
redis_client = self.request.registry.redis_client cache_key = CACHE_KEYS['stats_countries'] cached = redis_client.get(cache_key) if cached: data = loads(cached) else: session = self.request.db_ro_session data = countries(session) redis_client.set(cache_key, dumps(data), ex=3600) return {'page_title': 'Cell Statistics', 'metrics': data} def favicon_view(request): return FileResponse(FAVICON_PATH, request=request) def touchicon_view(request): return FileResponse(TOUCHICON_PATH, request=request) _robots_response = Response(content_type='text/plain', body="User-agent: *\n" "Disallow: /leaders\n" "Disallow: /static/\n" "Disallow: /v1/\n") def robotstxt_view(context, request): return _robots_response
def show(self): return Response('Shown')
from mist.core.helpers import view_config except ImportError: from mist.io import config from mist.io.helpers import user_from_request from pyramid.view import view_config from mist.io import methods from mist.io.model import Keypair from mist.io.shell import Shell import mist.io.exceptions as exceptions from mist.io.exceptions import * from mist.io.helpers import get_auth_header log = logging.getLogger(__name__) OK = Response("OK", 200) @view_config(context=Exception) def exception_handler_mist(exc, request): """Here we catch exceptions and transform them to proper http responses This is a special pyramid view that gets triggered whenever an exception is raised from any other view. It catches all exceptions exc where isinstance(exc, context) is True. """ # non-mist exceptions. that shouldn't happen! never! if not isinstance(exc, exceptions.MistError): trace = traceback.format_exc()
def reservation_entry(request): if request.method == 'OPTIONS': return Response(status=200) if request.method == 'GET': return get_reservation(request)
def guardar_obtener_atributo(request): if (request.method == 'POST'): u = Unpickler() entidad = u.restore(request.json_body); dao = ItemAtributoDAO(request) item_dao = ItemDAO(request) item = item_dao.get_by_id(entidad["_item_id"]) a_tipo_dao = AtributoTipoItemDAO(request) asignacion = ItemAtributo(entidad["_item_id"], entidad["_atributo_id"], entidad["_valor"] ) dao.crear(asignacion); asignacion._item = item atributo = a_tipo_dao.get_by_id(entidad["_atributo_id"]) asignacion._atributo = atributo asignacion = ItemAtributoDTO(asignacion) p = Pickler(True, None) aRet = p.flatten(asignacion) item_dao.actualizarReferenciasItemNuevaVersion(item._id) return Response(json.dumps({'sucess': 'true', 'lista':aRet})) elif (request.method == 'GET'): item_id = request.GET.get('_item_id'); p = Pickler(True, None) if (item_id == 0 or item_id == "0" or item_id == None): j_string = p.flatten([]) a_ret = json.dumps({'sucess': True, 'lista':j_string}) return Response(a_ret) itemDAO = ItemDAO(request) item = itemDAO.get_by_id(item_id); atributoTipoItemDAO = AtributoTipoItemDAO(request) atributosTipoItem = atributoTipoItemDAO.get_atributos_by_tipo_id(item._tipo_item_id) # lista=[] # for atributo in atributosTipoItem: # dao = ItemAtributoDAO(request) # actual = dao.get_query().filter(ItemAtributo._item_id == item.id , ItemAtributo._atributo_id == atributo._id).order_by(ItemAtributo._version.desc()).first(); # if actual != None: # lista.append(actual) # aRet = [] # if (len(lista)==0): dao = ItemAtributoDAO(request) entidades = dao.get_query().filter(ItemAtributo._item_id == item._id).all() aRet = entidades for atributov in atributosTipoItem: if atributov._opcional == False: itemAtributo = ItemAtributo(item.id , atributov._id, atributov._defecto) # itemAtributo._item = item # itemAtributo._atributo = atributo # itemAtributoDTO = ItemAtributoDTO(itemAtributo) if len(entidades)==0: dao.crear(itemAtributo); aRet.append(itemAtributo) # else: # for entidad in entidades: # if (itemAtributo._atributo_id != entidad._atributo_id): # aRet.append(itemAtributo) entidadesDTO = []; for entidad in aRet: dao = AtributoTipoItemDAO(request) atributo = dao.get_by_id(entidad._atributo_id) entidad._item = item entidad._atributo = atributo itemAtributoDTO = ItemAtributoDTO(entidad); entidadesDTO.append(itemAtributoDTO) j_string = p.flatten(entidadesDTO) a_ret = json.dumps({'sucess': True, 'lista':j_string}) return Response(a_ret)
def hello_world(request): return Response('Hello %(name)s!' % request.matchdict)