def image(doc_id, fname): bin_file = models.BinaryDocumentContent.query.filter_by(document_id=doc_id, name=fname).first() if(bin_file is None): return "404 Error", 404 if(app.config.get('IS_SQLITE', False)): return Response(stream_with_context(standard_b64decode(bin_file.data)), mimetype=bin_file.mimetype) return Response(stream_with_context(bin_file.data), mimetype=bin_file.mimetype)
def coursesRoute(param): data = {} #If the data is in string format, convert to json if type(request.data) == str: jsonData = json.loads(request.data) else: jsonData = request.form #A GET request is incoming if request.method == 'GET': req = requests.get("http://127.0.0.1:9001/courses" + param, stream = True) return Response(stream_with_context(req.iter_content()), content_type = req.headers['content-type']) #A POST request is incoming elif request.method == 'POST': for k,v in jsonData.iteritems(): data.update({k:v}) req = requests.post("http://127.0.0.1:9001/courses" + param, data=data) return Response(stream_with_context(req.iter_content()), content_type = req.headers['content-type']) #A PUT request is incoming elif request.method == 'PUT': for k,v in jsonData.iteritems(): data.update({k:v}) req = requests.put("http://127.0.0.1:9001/courses" + param, data=data) return Response(stream_with_context(req.iter_content()), content_type = req.headers['content-type']) #A DELETE request is incoming elif request.method == 'DELETE': for k,v in jsonData.iteritems(): data.update({k:v}) req = requests.delete("http://127.0.0.1:9001/courses" + param, data=data) return Response(stream_with_context(req.iter_content()), content_type = req.headers['content-type'])
def render_file(wiki_path): fs_path=os.path.join(wiki.base_path,wiki_path) template_name='page.html' ##MOVED ft=ftype(fs_path) ##MOVED ## fn,fe=os.path.splitext(fs_path) ##MOVED page=None ##MOVED content=None ##MOVED if ft == 'rst': ##MOVED page=RstFile(wiki_path,wiki) ##MOVED elif ft == 'txt': ##MOVED page=PlainFile(wiki_path,wiki) ##MOVED elif ft == 'html': ##MOVED page=HtmlFile(wiki_path,wiki) ##MOVED elif os.path.exists(fs_path): ##MOVED content=send_file(fs_path) page,content=_get_page(wiki_path) sidebar_fs_path=acquire_file('sidebar',wiki_path) if page: # we don't want to be rude and disable ALL autoescaping ## app.jinja_env.autoescape=False if sidebar_fs_path: return Response(stream_with_context(page.render(template_name,page_title=wiki_path, sidebar=file_iterator(sidebar_fs_path))),mimetype='text/html') else: return Response(stream_with_context(page.render(template_name,page_title=wiki_path)),mimetype='text/html') ## app.jinja_env.autoescape=True elif content: return content
def coursesRoute(param): data = {} #A GET request is incoming if request.method == 'GET': req = requests.get("http://127.0.0.1:9001/courses" + param, stream = True) resp = Response(stream_with_context(req.iter_content()), content_type = req.headers['content-type']) return resp, req.status_code #A POST request is incoming elif request.method == 'POST': for k,v in request.form.iteritems(): data.update({k:v}) req = requests.post("http://127.0.0.1:9001/courses" + param, data=data) resp = Response(stream_with_context(req.iter_content()), content_type = req.headers['content-type']) return resp, req.status_code #A PUT request is incoming elif request.method == 'PUT': for k,v in request.form.iteritems(): data.update({k:v}) req = requests.put("http://127.0.0.1:9001/courses" + param, data=data) resp = Response(stream_with_context(req.iter_content()), content_type = req.headers['content-type']) return resp, req.status_code #A DELETE request is incoming elif request.method == 'DELETE': req = requests.delete("http://127.0.0.1:9001/courses" + param, stream = True) resp = Response(stream_with_context(req.iter_content()), content_type = req.headers['content-type']) return resp, req.status_code
def content_item_resource(id,resource): if request.method == 'GET': wrap = request.args.get('wrap') status_code,data,contentType = model.getContentResource(id,resource); if status_code==200: if contentType.startswith("text/html") and wrap is not None: blob = io.BytesIO() for chunk in data: blob.write(chunk) content = blob.getvalue().decode("utf-8").strip() if not content.startswith('<!DOCTYPE'): editorConfig = app.config.get('EDITOR_CONFIG') header = '' bodyStart = '' bodyEnd = '' if editorConfig is not None and wrap=='preview': wheader = editorConfig.get('wrap-header') pheader = editorConfig.get('preview-wrap-header') if pheader is not None: header = pheader elif wheader is not None: header = wheader wbody = editorConfig.get('wrap-body') pbody = editorConfig.get('preview-body-main') if pbody is not None: bodyStart = pbody[0] bodyEnd = pbody[1] elif wbody is not None: bodyStart = wbody[0] bodyEnd = wbody[1] elif editorConfig is not None and wrap=='formatted': wheader = editorConfig.get('wrap-header') if wheader is not None: header = wheader wbody = editorConfig.get('wrap-body') if wbody is not None: bodyStart = wbody[0] bodyEnd = wbody[1] content = """ <!DOCTYPE html> <html> <head><title>""" + resource + '</title>' + header + """ </head> <body> """ + bodyStart + content + bodyEnd + '</body></html>' return Response(stream_with_context(content),content_type = contentType) else: return Response(stream_with_context(data),content_type = contentType) else: abort(status_code) if request.method == 'PUT': status_code,data,contentType = model.updateContentResource(id,resource,request.headers['Content-Type'],request.stream); if status_code==200 or status_code==201: return Response(stream_with_context(data),status=status_code,content_type = contentType) else: return Response(status=status) if request.method == 'DELETE': status = model.deleteContentResource(id,resource) return Response(status=status)
def get_object(start, span, video): # Reference: # https://github.com/mikeboers/PyAV/blob/master/tests/test_seek.py video_path = str(_get_obj_absolute_path(video)) proc = _create_ffmpeg_segment_proc(video_path, start_sec=start, duration_sec=span) def generate(): while True: data = proc.stdout.read(4096) if not data: break yield data headers = Headers([('Content-Type', 'video/mp4')]) response = Response(stream_with_context(generate()), status="200 OK", headers=headers) # Cache control stat = os.stat(video_path) last_modified = stat.st_mtime size = stat.st_size etag = "{}_{}_{}_{}".format(last_modified, size, start, span) response.last_modified = last_modified response.set_etag(etag=etag) response.cache_control.public = True response.cache_control.max_age = \ datetime.timedelta(days=365).total_seconds() response.make_conditional(request) return response
def format_blob(sha, blob, repository): resp = Response(stream_with_context(blob.data)) resp.headers['X-Accel-Buffering'] = 'no' resp.headers['Cache-Control'] = 'no-cache' resp.headers['Content-Length'] = len(blob.data) resp.headers['Content-Type'] = magic.from_buffer(blob.data, mime=True) return resp
def query(): def get_results(): yield '[' first_row = True for row in result_gen: if row is None: yield '' else: row_str = ',\n {}'.format(json.dumps(row[1])) if first_row: row_str = row_str.lstrip(',') first_row = False yield row_str yield '\n]' gp_str = request.args.get('gp', '{}') import re try: gp_match = re.search(r'\{(.*)\}', gp_str).groups(0) if len(gp_match) != 1: raise APIError('Invalid graph pattern') tps = re.split('\. ', gp_match[0]) prefixes, result_gen = get_query_generator(*tps, monitoring=10, **STOA) return Response(stream_with_context(get_results()), mimetype='application/json') except Exception, e: raise APIError('There was a problem with the request: {}'.format(e.message), status_code=500)
def stream_playlist_from(start): def gen(): cur = g.db.execute('SELECT track_id, filename, filetype FROM playlist INNER JOIN tracks ON tracks.id = track_id WHERE position >= ? ORDER BY position ASC', [start]) playlist = [dict(track_id=row[0], filename=row[1], filetype=row[2]) for row in cur.fetchall()] fmt = 'wav' for tr in playlist: if tr['filetype'] == 'wav': # stream directly with app.open_resource(tr['filename'], 'rb') as f: yield from f else: # use ffmpeg with subprocess.Popen(['ffmpeg', '-i', tr['filename'], '-ar', '44100', '-f', fmt, '-'], stdout=subprocess.PIPE) as p: yield from p.stdout f = 's16le' # with open("/home/noah/voice-sample-8k.wav", mode='rb') as f: # yield from f # yield url_for('show_home') # with subprocess.Popen(['ffmpeg', '-i', '/home/noah/back-then.mp3', '-f', 'wav', '-'], stdout=subprocess.PIPE) as p: # yield from p.stdout return Response(stream_with_context(gen()), mimetype='audio/wav')
def index(): def generate(): yield 'Hello ' yield flask.request.args['name'] yield '!' return flask.Response(flask.stream_with_context( Wrapper(generate())))
def download_tsv_flow(flow_id): '''Download entry/exit stats for a given flow :status 200: Download a tab-separated file with entry/exit information for each contract with a given :py:class:`~purchasing.data.flows.Flow` :status 404: Could not find given :py:class:`~purchasing.data.flows.Flow` ''' flow = Flow.query.get(flow_id) if flow: tsv, headers = flow.reshape_metrics_granular() def stream(): yield '\t'.join(headers) + '\n' for contract_id, values in tsv.iteritems(): yield '\t'.join([str(i) for i in values]) + '\n' resp = Response( stream_with_context(stream()), headers={ "Content-Disposition": "attachment; filename=conductor-{}-metrics.tsv".format(flow.flow_name) }, mimetype='text/tsv' ) return resp abort(404)
def export_get(search_type, return_type): '''Export the search results as a file''' search_string = request.args.get('search', '') if search_string == '': abort(400) if return_type not in ('json', 'csv', 'fasta', 'fastaa'): abort(400) query = Query.from_string(search_string, search_type=search_type, return_type=return_type) g.verbose = False search_results = core_search(query) if len(search_results) > 100 and search_type == 'cluster' and return_type == 'fasta': raise TooManyResults('More than 100 search results for FASTA cluster download, please specify a smaller query.') found_bgcs = format_results(query, search_results) if query.return_type == 'json': found_bgcs = [json.dumps(found_bgcs)] def generate(): for line in found_bgcs: yield line + '\n' mime_type = MIME_TYPE_MAP.get(query.return_type, None) return Response(stream_with_context(generate()), mimetype=mime_type)
def get_file(url): url = apply_rewrite_rules(url) range_header = request.headers.get('Range', None) return_headers = Headers() S3Key = get_S3Key(url) try: size = S3Key.size except: return Response(None, 404) if range_header: print "%s: %s (size=%d)" % (url, range_header, size) start_range, end_range = [int(x) for x in range_header.split("=")[1].split("-")] get_headers = {'Range' : "bytes=%d-%d" % (start_range, end_range)} return_headers.add('Accept-Ranges', 'bytes') return_headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(start_range, end_range, size)) return_headers.add('Content-Length', end_range-start_range+1) return_code = 206 else: print "%s: all data (size=%d)" % (url, size) get_headers = {} return_code = 200 S3Key.open_read(headers=get_headers) def stream(S3Key): while True: data = S3Key.resp.read(S3Key.BufferSize) if data: yield data else: raise StopIteration return Response(stream_with_context(stream(S3Key)), return_code, headers=return_headers, direct_passthrough=True)
def stream(): """SSE stream handler""" stream_id = uuid.uuid4().get_hex() rtc.users[stream_id] = WebRTCUser(stream_id) rtc.users_by_stream[stream_id] = rtc.users[stream_id] return Response(stream_with_context(event_stream(stream_id)), mimetype="text/event-stream")
def crowdflower_export(model): def generate(): yield "url\n" for row in recordings_model.get_random_recordings(model): yield "%s\n" % row.url return Response(stream_with_context(generate()), mimetype='text/csv')
def nodes(env): """Fetch all (active) nodes from PuppetDB and stream a table displaying those nodes. Downside of the streaming aproach is that since we've already sent our headers we can't abort the request if we detect an error. Because of this we'll end up with an empty table instead because of how yield_or_stop works. Once pagination is in place we can change this but we'll need to provide a search feature instead. :param env: Search for nodes in this (Catalog and Fact) environment :type env: :obj:`string` """ check_env(env) status_arg = request.args.get('status', '') nodelist = puppetdb.nodes( query='["and", {0}]'.format( ", ".join('["=", "{0}", "{1}"]'.format(field, env) for field in ['catalog_environment', 'facts_environment'])), unreported=app.config['UNRESPONSIVE_HOURS'], with_status=True) nodes = [] for node in yield_or_stop(nodelist): if status_arg: if node.status == status_arg: nodes.append(node) else: nodes.append(node) return Response(stream_with_context( stream_template('nodes.html', nodes=nodes, envs=envs, current_env=env)))
def map_stream(): def event_stream(): pubsub = redis.pubsub() pubsub.subscribe(session['random_userid']) for message in pubsub.listen(): yield 'data: %s\n\n' % message['data'] return Response(stream_with_context(event_stream()), mimetype="text/event-stream")
def fact(fact): """Fetches the specific fact from PuppetDB and displays its value per node for which this fact is known.""" # we can only consume the generator once, lists can be doubly consumed # om nom nom localfacts = [f for f in yield_or_stop(puppetdb.facts(name=fact))] return Response(stream_with_context(stream_template("fact.html", name=fact, facts=localfacts)))
def get_status_events(): swc_response = stream_with_context( _PubSubWrapper('mm-engine-status.*', pattern=True) ) r = Response(swc_response, mimetype='text/event-stream') return r
def index(): def generate(): yield "Hello " yield flask.request.args["name"] yield "!" return flask.Response(flask.stream_with_context(Wrapper(generate())))
def get_scope(stride, span, gididx): index = 'GIDIDX' + gididx.upper() def generate(): yield '<?xml version="1.0" encoding="UTF-8" ?>\n' if STYLE: yield '<?xml-stylesheet type="text/xsl" href="/scopelist.xsl" ?>\n' yield '<objectlist>\n' with open(_get_index_absolute_path(index), 'r') as f: for video in f: video = video.strip() video_path = str(_get_obj_absolute_path(video)) video_meta = _maybe_parse_video_and_get_attrs( video_path=video_path) num_clips = int( ceil(float(video_meta['duration_sec']) / stride)) yield '<count adjust="{}"/>\n'.format(num_clips) for clip in range(num_clips): yield _get_object_element(start=clip * stride, span=span, video=video) + '\n' yield '</objectlist>\n' headers = Headers([('Content-Type', 'text/xml')]) return Response(stream_with_context(generate()), status="200 OK", headers=headers)
def nodes(env): """Fetch all (active) nodes from PuppetDB and stream a table displaying those nodes. Downside of the streaming aproach is that since we've already sent our headers we can't abort the request if we detect an error. Because of this we'll end up with an empty table instead because of how yield_or_stop works. Once pagination is in place we can change this but we'll need to provide a search feature instead. :param env: Search for nodes in this (Catalog and Fact) environment :type env: :obj:`string` """ envs = environments() check_env(env, envs) if env == "*": query = None else: query = AndOperator() query.add(EqualsOperator("catalog_environment", env)) query.add(EqualsOperator("facts_environment", env)) status_arg = request.args.get("status", "") nodelist = puppetdb.nodes(query=query, unreported=app.config["UNRESPONSIVE_HOURS"], with_status=True) nodes = [] for node in yield_or_stop(nodelist): if status_arg: if node.status == status_arg: nodes.append(node) else: nodes.append(node) return Response(stream_with_context(stream_template("nodes.html", nodes=nodes, envs=envs, current_env=env)))
def api_export(node_id): logger.debug('GET api_export') date = request.args.get('date') logger.info("date: %s", str(date)) if not date: raise InvalidUsage("date is empty", status_code=STATUS_Not_Found) r = re.compile('\d{4}-\d{1,2}-\d{1,2}') if not r.match(date): raise InvalidUsage("date format not correct", status_code=STATUS_Not_Found) logger.info("accepted date: %s" %(date)) def generate(): num_lines = 0 for row in export_generator(node_id, date, False, ';'): yield row+"\n" num_lines += 1 if num_lines == 0: raise InvalidUsage("num_lines == 0", status_code=STATUS_Server_Error) else: yield "# %d results\n" % (num_lines) return Response(stream_with_context(generate()), mimetype='text/csv')
def grade(): assignment = request.args.get("assign", "NoneSuch") repo = request.args.get("repo", "NoneSuch") logging.debug("Grading " + assignment + ": " + repo) response = Response(stream_with_context(grade_stream(assignment, repo)), mimetype="text/event-stream") logging.debug("Finished grading " + repo + ": " + str(response)) return response
def signups(): '''Basic dashboard view for category-level signups ''' def stream(): # yield the title columns yield 'first_name\tlast_name\tbusiness_name\temail\tphone_number\t' +\ 'minority_owned\twoman_owned\tveteran_owned\t' +\ 'disadvantaged_owned\tcategories\topportunities\n' vendors = Vendor.query.all() for vendor in vendors: row = vendor.build_downloadable_row() yield '\t'.join([str(i) for i in row]) + '\n' current_app.logger.info('BEACON VENDOR CSV DOWNLOAD') resp = Response( stream_with_context(stream()), headers={ "Content-Disposition": "attachment; filename=vendors-{}.tsv".format(datetime.date.today()) }, mimetype='text/tsv' ) return resp
def get_scope(): """ query string: slice=1:2000:3 start, stop, step get a slice of all data. default=:: distribute=2of8 distribute to the 2nd server out ouf 8. (1-index). default=1of1 """ meta_file = os.path.join(DATAROOT, 'yfcc100m', 'yfcc100m.csv') try: # process query string args slice_str = request.args.get('slice', '::') start, stop, step = map(lambda x: int(x) if x else None, slice_str.split(':')[:3]) start = start or 0 step = step or 1 distribute_str = request.args.get('distribute', '1of1') n, m = map(int, distribute_str.split('of')[:2]) assert step > 0 assert 1 <= n <= m # manipulate start, step to incorporate distribute params start += (n-1) * step step *= m except: abort(400) _log.debug("slice=%s; distribute=%s", slice_str, distribute_str) _log.info("Adjusted slice: start=%s stop=%s step=%s", start, stop, step) def generate(): with open(meta_file, 'r') as f: yield '<?xml version="1.0" encoding="UTF-8" ?>\n' yield '<objectlist>\n' count = 0 for line in itertools.islice(f, start, stop, step): tokens = line.strip().split('\t') # beware empty fields (two consecutive \t) media_hash, ext, is_video = tokens[2], tokens[23], bool(int(tokens[24])) if is_video: continue suffix = "{part1}/{part2}/{media_hash}.{ext}".format( part1=media_hash[:3], part2=media_hash[3:6], media_hash=media_hash, ext='jpg') # png in the file is bogus. yield '<count adjust="1"/>\n' yield _get_object_element(suffix) + '\n' count += 1 yield '</objectlist>\n' headers = Headers([('Content-Type', 'text/xml')]) return Response(stream_with_context(generate()), status="200 OK", headers=headers)
def contribute(): if request.method == 'GET': return render_template('contribute.html', session=session) elif request.method == 'POST': for k in request.form: v = session['contribute'].get(k) if not v or (request.form[k] and request.form[k] != v): session['contribute'][k] = request.form[k] for k,v in session['contribute'].iteritems(): if not v: return render_template('contribute.html', session=session, missing='{} not set!'.format(k)) mpfile = read_mpfile_to_view() if mpfile is None: return render_template( 'home.html', alert='Choose an MPFile!', session=session ) fmt = session['options'][0] try: return Response(stream_with_context(stream_template( 'contribute.html', session=session, content=submit_mpfile( StringIO(mpfile), api_key=session['contribute']['apikey'], site=session['contribute']['site'], dbtype=session['contribute']['dbtype'], fmt=fmt )))) except: pass
def streamPlaylistDownload(): print request.data playlist = request.args.get('playlist') print playlist return tracks = playlist['tracks'] playlist_name = playlist['name'] message = playlist['message'] owner = playlist['user'] playlist_id = int(time.time()) # create playlist createNewPlaylist(playlist_id, playlist_name, message, owner) def generate(): for song in tracks: result_dict = download(song['url'], song['artist'], song['track']) print 'successfully downloaded ' print result_dict saveTrack(playlist_id, song['track'], song['artist'], owner) yield 'data:' + song['artist'] + ' - ' + song['track'] + '\n\n' print 'here handlePlaylistDownload' playlistURL = generatePlaylistURL(owner, playlist_id) yield 'data:' + playlistURL return Response(stream_with_context(generate()))
def get_fragment(): def get_quads(): for prefix in prefixes: yield '@prefix {}: <{}> .\n'.format(prefix, prefixes[prefix]) yield '\n' for chunk in fragment_gen: if chunk is None: yield '' else: headers, (c, s, p, o) = chunk yield u'{} {} {} .\n'.format(s.n3(graph.namespace_manager), p.n3(graph.namespace_manager), o.n3(graph.namespace_manager)) gp_str = request.args.get('gp', '{}') import re try: gp_match = re.search(r'\{(.*)\}', gp_str).groups(0) if len(gp_match) != 1: raise APIError('Invalid graph pattern') tps = re.split('\. ', gp_match[0]) prefixes, fragment_gen = get_fragment_generator(*tps, monitoring=30, **STOA) graph = Graph() for prefix in prefixes: graph.bind(prefix, prefixes[prefix]) return Response(stream_with_context(get_quads()), mimetype='text/n3') except Exception, e: raise APIError('There was a problem with the request: {}'.format(e.message), status_code=500)
def fact(env, fact): """Fetches the specific fact from PuppetDB and displays its value per node for which this fact is known. :param env: Searches for facts in this environment :type env: :obj:`string` :param fact: Find all facts with this name :type fact: :obj:`string` """ envs = environments() check_env(env, envs) # we can only consume the generator once, lists can be doubly consumed # om nom nom render_graph = False if fact in graph_facts: render_graph = True if env == "*": query = None else: query = EqualsOperator("environment", env) localfacts = [f for f in yield_or_stop(puppetdb.facts(name=fact, query=query))] return Response( stream_with_context( stream_template( "fact.html", name=fact, render_graph=render_graph, facts=localfacts, envs=envs, current_env=env ) ) )
def table_data(connection, table, fields, opts): """ Return plain/text CSV for table data, with specified field list and options list """ db = utils.get_db(connection, table) try: results = db.get_table_data(table, fields, opts) except DBConError as db_error: abort(400, db_error.message) # create csv generator for results streamer = utils.csv_generator(results) # stream records as csv res = Response(stream_with_context(streamer())) res.headers['Content-type'] = 'text/plain' return res
def _export_csv(self, return_url): """ Export a CSV of records as a stream. 这是我抄è¢äº†åˆ«äººçš„ """ count, data = self._export_data() # https://docs.djangoproject.com/en/1.8/howto/outputting-csv/ class Echo(object): """ An object that implements just the write method of the file-like interface. """ def write(self, value): """ Write the value by returning it, instead of storing in a buffer. """ return value # writer = csv.writer(Echo()) def generate(): # Append the column titles at the beginning titles = [csv_encode(c[1]) for c in self._export_columns] titles[0] = codecs.BOM_UTF8.decode("utf8") + codecs.BOM_UTF8.decode() + titles[0] yield writer.writerow(titles) for row in data: vals = [csv_encode(self.get_export_value(row, c[0])) for c in self._export_columns] yield writer.writerow(vals) filename = self.get_export_name(export_type='csv') disposition = 'attachment;filename=%s' % (secure_filename(filename),) return Response( stream_with_context(generate()), headers={'Content-Disposition': disposition}, mimetype='text/csv' )
def labeled_images(): global annotation, labels, visible_labels, images args = request.args if 'labels' in args: selected_labels = set(args['labels'].split(',')) labeling = { path: labeled for path, labeled in annotation['labeling'].items() if set(labeled).intersection(selected_labels) } else: labeling = annotation['labeling'] selected_labels = [] if images is None: load_images_paths() perpage = int(args['perpage']) if 'perpage' in args else 25 perpage = min(100, max(1, perpage)) page = int(args['page']) if 'page' in args else 1 max_page = (count_labeled() + perpage - 1) // perpage page = min(max_page, max(1, page)) # Select for page labels_count = count_labeled_for_labels() labeling = dict( sorted(labeling.items())[(page - 1) * perpage:page * perpage]) return Response( stream_with_context( stream_template('preview_images.html', page_type='labeled', labeling=labeling, path_to_base64=path_to_base64, n_images=len(images), n_unlabeled=count_unlabeled(), n_labeled=count_labeled(), visible_labels=visible_labels, labels=list( map(lambda x: x[0], labels_count.most_common())), selected_labels=selected_labels, labels_count=labels_count, page=page, max_page=max_page, ending=ending, n_found=count_labeled(), perpage=perpage)))
def getSong(sid): fmt = request.args.get("format", None) song = Songs.query.filter_by(id=sid).first() if song != None: try: songLocation = song.location for extension in self.supportedExtensions: if songLocation.lower().endswith(extension): songMime = self.supportedMimes[extension] if fmt != None: if extension.lower() != fmt.lower(): songLocation = self.conv.conv( songLocation, fmt, song.id) songMime = self.supportedMimes[fmt] break else: songMime = "audio/*" headers = Headers() headers.add("Content-Transfer-Encoding", "binary") headers.add("Content-Disposition", "inline", filename=song.name.encode('ascii', 'ignore')) headers.add("Content-length", os.path.getsize(songLocation)) headers.add("Accept-Ranges", "bytes") def generate(): with open(songLocation, "rb") as audio: data = audio.read(1024) while data: yield data data = audio.read(1024) return Response(stream_with_context(generate()), mimetype=songMime, headers=headers) except FileNotFoundError as error: self.logger.warning("File Not Found: {}".format( song.location)) return jsonify(status="error", error=str(error)) except Exception as error: return jsonify(status="error", error=str(error)) else: return jsonify(status="error", error="Song doesn't exist")
def startAggregator(projectName): if request.method == 'GET': main_dir = os.path.dirname(os.path.abspath(__file__)) if not os.path.exists(os.path.join(main_dir, 'projects')): return returnResponse(409, "You have no projects") project_path = os.path.join(main_dir, "projects") if not os.path.exists(os.path.join(project_path, projectName)): return returnResponse(409, (projectName + " Does not exist")) config_path = os.path.join( project_path, projectName, "configs", "server.yaml") def load_server_configs(): with open(config_path, 'r') as stream: return yaml.safe_load(stream) # Get server configs server_configs = load_server_configs() # Create strategy strategy_configs = server_configs['strategy'] strategy = fl.server.strategy.FedAvg( min_fit_clients=(strategy_configs['min_fit_clients'] if 'min_fit_clients' in strategy_configs else 2), min_eval_clients=(strategy_configs['min_eval_clients'] if 'min_eval_clients' in strategy_configs else 2), min_available_clients=(strategy_configs['min_available_clients'] if 'min_available_clients' in strategy_configs else 2), ) # Start Flower server for four rounds of federated learning server_ip = server_configs['ip'] if 'ip' in server_configs else '[::]:8080' config = server_configs['config'] #! Make this return a Server started response and a Ended Response def start(): yield "aggregator starting" fl.server.start_server(server_ip, config={"num_rounds": 5}) yield "aggregator closed" return Response(stream_with_context(start()))
def download_csv(csvfile=None): """ Download the audit entry as CSV file. Params can be passed as key-value-pairs. **Example request**: .. sourcecode:: http GET /audit/audit.csv?realm=realm1 HTTP/1.1 Host: example.com Accept: text/csv **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: text/csv { "id": 1, "jsonrpc": "2.0", "result": { "status": true, "value": [ { "serial": "....", "missing_line": "..." } ] }, "version": "privacyIDEA unknown" } """ audit = getAudit(current_app.config) g.audit_object.log({'success': True}) return Response(stream_with_context(audit.csv_generator(request.all_data)), mimetype='text/csv', headers={ "Content-Disposition": ("attachment; " "filename=%s" % csvfile) })
def exec_query(user): """ Only the super user can access the sql api. This is primarily intended for internal recipes which may operate on machines without access to the databse. """ if not user.super_user: raise ForbiddenError("Only the super user can access the SQL API.") if request.method == "POST": q = request_data().get('query', None) if request.method == "GET": q = arg_str('query', default=None) if not q: raise RequestError('A query - "q" is required.') stream = arg_bool('stream', default=True) try: results = db.session.execute(q) except Exception as e: raise RequestError("There was an error executing this query: " "{}".format(e.message)) def generate(): try: for row in ResultIter(results): if stream: yield obj_to_json(row) + "\n" else: yield row except ResourceClosedError: resp = {'success': True} if stream: yield obj_to_json(resp) + "\n" else: yield resp if stream: return Response(stream_with_context(generate())) data = list(generate()) if len(data) == 1: if data[0]['success']: data = data[0] return jsonify(data)
def export_scores(cid, aid): courses, current_course = get_courses(cid) assign = Assignment.query.filter_by(id=aid, course_id=cid).one_or_none() if not Assignment.can(assign, current_user, 'export'): flash('Insufficient permissions', 'error') return abort(401) query = (Score.query.options(db.joinedload('backup')).filter_by( assignment=assign, archived=False)) custom_items = ('time', 'is_late', 'email', 'group') items = custom_items + Enrollment.export_items + Score.export_items def generate_csv(): """ Generate csv export of scores for assignment. Num Queries: ~2N queries for N scores. """ # Yield Column Info as first row yield ','.join(items) + '\n' for score in query: csv_file = StringIO() csv_writer = csv.DictWriter(csv_file, fieldnames=items) submitters = score.backup.enrollment_info() group = [s.user.email for s in submitters] time_str = utils.local_time(score.backup.created, current_course) for submitter in submitters: data = { 'email': submitter.user.email, 'time': time_str, 'is_late': score.backup.is_late, 'group': group } data.update(submitter.export) data.update(score.export) csv_writer.writerow(data) yield csv_file.getvalue() file_name = "{0}.csv".format(assign.name.replace('/', '-')) disposition = 'attachment; filename={0}'.format(file_name) # TODO: Remove. For local performance testing. # return render_template('staff/index.html', data=list(generate_csv())) return Response(stream_with_context(generate_csv()), mimetype='text/csv', headers={'Content-Disposition': disposition})
def accuracy_test(): """Normalizing the test files on the fly. The trick is to have an inner function that uses a generator to generate data and to invoke that function and pass it to a Response object. """ enc = request.form['enc-data'] dec = request.form['dec-data'] def stream_template(template_name, **context): """Returns stream object from stream() instead of string from render() Args: template_name (str): filename of the template **context (tuple): keyword arguments Returns: StreamObject: stream object of the template """ APP.update_template_context(context) template = APP.jinja_env.get_template(template_name) stream = template.stream(context) stream.enable_buffering(5) return stream def generate(): """Returns a generator for Lazy-loading of table rows Yields: dict: The input, expected, and system output """ enc_content = enc.splitlines() dec_content = dec.splitlines() for i, e in enumerate(enc_content[:100]): if e: result = {'enc': e.strip().strip('\n').lower(), 'dec': dec_content[i].strip().strip('\n').lower(), 'res': NORMALIZER.model_api(e.strip().strip('\n').lower())} yield result return Response(stream_with_context( stream_template('accuracy_testing.html', rows=generate(), tagged_words=tagged_words)))
def lint_xml_json(): data = {} errors = lint_the_xml() if errors: data['valid'] = False data['errors'] = [] for error in errors: err = {} err['line'] = error['line'] err['column'] = error['column'] err['desc'] = error['desc'] data['errors'].append(err) else: data['valid'] = True # return pretty printed json return Response(stream_with_context( json.dumps(data, indent=4, sort_keys=True)), mimetype='application/json')
def stream_connect(): emotion_model_path = './Engine/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' emotion_classifier = load_model(emotion_model_path, compile=False) emotion_classifier._make_predict_function() graph = tf.get_default_graph() links = request.args.get('links', '') resolution = request.args.get('resolution', '') link_list = links.split(',') print("links: " + str(link_list)) video_streamer_list = [] for link in link_list: vs = VideoStreamer(link, queueSize=128, resolution=resolution, n_frame=15) video_streamer_list.append([link, vs]) r_engine = RecognitionEngine(video_streamer_list, emotion_classifier, graph, queueSize=128) def generate(): # loop over frames from the video file stream while True: if r_engine.more(): element = r_engine.read() text = "[" + str(element[0]) + "," + str(element[1]) + "]" print(text) yield text + "," else: continue return Response(stream_with_context(generate()))
def exporter(): '''This is the method used to access the exporter metrics''' result_set = 60 if application.config['CB_RESULTSET']: result_set = application.config['CB_RESULTSET'] _value = cb_exporter.run( application.config['CB_DATABASE'], application.config['CB_USERNAME'], application.config['CB_PASSWORD'], result_set) if application.config['CB_STREAMING']: def generate(): for row in _value: yield(row + '\n') return Response(stream_with_context(generate()), mimetype='text/plain') else: metrics_str = '\n' metrics_str = metrics_str.join(_value) return Response(metrics_str, mimetype='text/plain')
def teamserver(path): if headerkey in flask.request.headers.get(header): url = teamserver + path requests_function = method_requests_mapping[flask.request.method] ip = flask.request.remote_addr request = flask.request.base_url ua = flask.request.headers.get('User-Agent') with open("access.log", "a") as log: log.write("Secret Cookie Requested from: " + ip) log.write('\n') request = requests_function(url, stream=True, params=flask.request.args) response = flask.Response(flask.stream_with_context(request.iter_content()), content_type=request.headers['content-type'], status=request.status_code) response.headers['Access-Control-Allow-Origin'] = '*' return response else: return redirect(redirect_url, code=302)
def dl_stream(action, video_id, res_type): video_info = get_video_info(video_id) if res_type == 'video': req = requests.get(video_info.url, stream=True, verify=False) extension = video_info.extension else: req = requests.get(video_info.audio_url, stream=True, verify=False) extension = 'm4a' # file_name = 'filename=' + video_info.id + '.' + extension file_name = 'filename=' + urllib.quote(video_info.title.encode('utf-8')) + '.' + extension file_name = 'attachment; ' + file_name headers = { 'Content-Disposition': file_name, 'Content-Type': 'video/' + extension } return Response(stream_with_context(req.iter_content(chunk_size=buffer_size)), headers=headers)
def upload_file(specialty): if request.method == 'POST': def generate(): spec = Specialty.query.filter_by(name=specialty).first_or_404() f = request.files['file'] wb = openpyxl.load_workbook(f) for ws in wb: if ws.title == "Surgery": for col in ws.iter_cols(min_row=3, min_col=7, max_col=7): for cell in col: if cell.value: name = cell.value invited = ast.literal_eval( ws.cell(cell.row, cell.column - 4).value) if invited: invited = dt.datetime.strptime( invited[0], '%m/%d/%Y') #f = pd.read_excel(f, engine='openpyxl', sheet_name=specialty, header=0, usecols=[0,1,2,3]) #f = f.replace({np.nan: None}) #for index, row in f.iterrows(): # state = row[0] # name = row[1] # invited = ast.literal_eval(row[3]) # if invited: # invited = dt.datetime.strptime(ast.literal_eval(row[3])[0], '%m/%d/%Y') # else: # invited = None # dates = ast.literal_eval(row[2]) # d = [] # for date in dates: # d.append(dt.datetime.strptime(date, '%m/%d/%Y')) # program = Program(name=name, state=state, specialty=spec) # interview = Interview(date=invited,interviewer=program,interviewee=current_user) # dates = list(map(lambda x: Interview_Date(date=x, interviewer=program,interviewee=current_user, invite=interview,full=False), d)) # interview.dates = dates # db.session.add(interview) # db.session.commit() # yield(str(index)) return Response(stream_with_context(generate())) return render_template('upload.html')
def task_status(task_name, task_id): task_in_exec = tasks_directory.get(task_name) if not task_in_exec: return jsonify( make_message(not task_in_exec, message='No {} task availabale with id: {}'.format( task_name, task_id))) def task_status(): while True: task = task_in_exec.AsyncResult(task_id) response = make_message(False, state=task.state) if task.state == 'WORKING': message = random_message() response['status'] = message yield 'event: message\ndata: {}\n\n'.format( json.dumps(response)) time.sleep(1.2) elif task.state != 'FAILURE': if 'status' in task.info: response['status'] = task.info.get('status') if 'results' in task.info: response['results'] = task.info.get('results') yield 'event: message\ndata: {0}\n\n'.format( json.dumps(response)) time.sleep(1.2) else: response['error'] = True response['status'] = str(task.info) yield 'event: message\ndata: {0}\n\n'.format( json.dumps(response)) time.sleep(1.2) headers = Headers() headers.add('Content-Type', 'text/event-stream') headers.add('Cache-Control', 'no-cache') headers.add('Connection', 'keep-alive') return Response(stream_with_context(task_status()), status=200, headers=headers)
def synthesize(): voice = request.args.get('voice', 'en-US_MichaelVoice') accept = request.args.get('accept', 'audio/ogg; codecs=opus') text = request.args.get('text', '') download = request.args.get('download', '') headers = {} if download: headers['content-disposition'] = 'attachment; filename=transcript.ogg' try: req = textToSpeech.synthesize(text, voice, accept) return Response(stream_with_context(req.iter_content()), headers=headers, content_type=req.headers['content-type']) except Exception, e: abort(500)
def phonetize_wordlist_file(): script = None if 'ipa' in request.args: script = 'ipa' if 'sampa' in request.args: script = 'sampa' if 'file' not in request.files: return abort(404) def generate(): file = request.files['file'] for line in file: word = line.strip().decode('utf-8') ret = tools.phonetize.phonetize_word(word, script) for trans in ret: yield f'{word} {trans}\n' return Response(stream_with_context(generate()), mimetype='text/plain')
def backup(index): def generate(): q = request.args.get('q') params = {'size': 10, 'from': 0, 'sort': '@timestamp:desc'} if q: params['q'] = q resp = es.search(index='nexpose-{}-*'.format(index), params=params) while len(resp['hits']['hits']) > 0: for hit in resp['hits']['hits']: yield hit['_source']['message'] yield '\n' params['from'] += 10 resp = es.search(index='nexpose-{}-*'.format(index), params=params) response = Response(stream_with_context(generate())) response.headers['Content-Type'] = 'text/plain' response.headers[ 'Content-Disposition'] = 'attachment; filename={}.log'.format(index) return response
def strain_data_tsv(): """ Dumps strain dataset; Normalizes lat/lon on the way out. """ def generate(): col_list = list(Strain.__mapper__.columns) col_order = [ 1, 0, 3, 4, 5, 7, 8, 9, 10, 28, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 2, 6 ] col_list[:] = [col_list[i] for i in col_order] header = [x.name for x in col_list] yield '\t'.join(header) + "\n" for row in query_strains(issues=False): row = [getattr(row, column.name) for column in col_list] yield '\t'.join(map(str, row)) + "\n" return Response(stream_with_context(generate()), mimetype="text/tab-separated-values")
def do_gdrive_download(df, headers): total_size = int(df.metadata.get('fileSize')) download_url = df.metadata.get('downloadUrl') s = partial( total_size, 1024 * 1024) # I'm downloading BIG files, so 100M chunk size is fine for me def stream(): for byte in s: headers = {"Range": 'bytes=%s-%s' % (byte[0], byte[1])} resp, content = df.auth.Get_Http_Object().request(download_url, headers=headers) if resp.status == 206: yield content else: web.app.logger.info('An error occurred: %s' % resp) return return Response(stream_with_context(stream()), headers=headers)
def generate_data_systemlogs_txt(): if isAdministrator() or isSysMaintainer(): def generate(): result = db.engine.execute(logstmt, 30) first = True yield '{ "data": [\n' for row in result: if not first: yield ',\n' else: first = False yield '["' + '","'.join(map(str, row)) + '"]' yield ']}' return Response(stream_with_context(generate()), mimetype='text/utf-8') else: return render_template('notavailable.html', message="You are not authorized.")
def main(): if not request.form: return render_template('form.html', results="") else: invocation = request.form['invocation'] if os.environ['LOCK'] is "True": return Response(render_template( 'lock.html', results= "<p class=\"red\">There's another test running right now, please try again later.</p><p>If you can't wait though, go ahead and <a href=\"/reset\">reset</a> it.</p>" ), mimetype='text/html'), 403 else: results = invocation_test(invocation) # run test os.environ['LOCK'] = "False" # reset job lock return Response(stream_with_context( stream_template('results.html', results=(''.join(results)))), mimetype='text/html'), 200
def stream_package(db, storage_obj, package: APKType, architecture: APKArch) -> Response: """ Stream the package to the user Args: storage_obj (AbstractAPKStorage): Storage interface for grabbing the package package (APKType): Package to lookup architecture (APKArch): Architecture of the package to lookup """ package_info: MADPackage = lookup_package_info(storage_obj, package, architecture)[0] gen_func: Generator = file_generator(db, storage_obj, package, architecture) if isinstance(gen_func, Response): return gen_func return Response( stream_with_context(gen_func), content_type=package_info.mimetype, headers={ 'Content-Disposition': 'attachment; filename={}'.format(package_info.filename) } )
def execute_and_return_feature_collection(sql): """ Execute a JSON-returning SQL and return HTTP response :type sql: SQL statement that returns a a GeoJSON Feature """ cur = get_cursor() cur.execute(sql) def generate(): yield '{ "result": { "type": "FeatureCollection", "features": [' for idx, row in enumerate(cur): if idx > 0: yield ',' yield json.dumps(row[0]) yield ']}}' cur.close() return Response(stream_with_context(generate()), mimetype='application/json')
def post(self, path): tohtml = request.form.get('toHTML', False) if tohtml: final_convert = self._to_html else: final_convert = self._identity conll_comments = self._get_checked_bool('conll_comments', self._conll_comments) output_header = self._get_checked_bool('output_header', self._output_header) input_text = request.form.get('text') if 'file' in request.files and input_text is None: inp_data = codecs.getreader('UTF-8')(request.files['file']) elif 'file' not in request.files and input_text is not None: inp_data = input_text else: abort( 400, 'ERROR: input text or file (mutually exclusive) not found in request!' ) inp_data = None # Silence dummy IDE required_tools = path.split('/') try: last_prog = build_pipeline(inp_data, required_tools, self._internal_apps, self._presets, conll_comments, self._singleton_store, output_header) except (HeaderError, ModuleError) as e: abort(400, e) last_prog = () # Silence, dummy IDE response = Response(stream_with_context( final_convert((line.encode('UTF-8') for line in last_prog))), direct_passthrough=True, content_type='text/plain; charset=utf-8') if not tohtml: response.headers.set('Content-Disposition', 'attachment', filename='output.txt') return response
def ping(site): def run(s): process = subprocess.Popen(['ping', s], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) while True: output = process.stdout.readline() yield output.strip() + '<br/>' # Do something else return_code = process.poll() if return_code is not None: yield f'RETURN CODE: {return_code}' # Process has finished, read rest of the output for output in process.stdout.readlines(): yield output.strip() + '<br/>' break return Response(stream_with_context(run(site)))
def remove_one_run(file_id): def remove(run_id, remove_from_raw_file_db=False, remove_fits=True): yield f'Removing one {run_id}' run_id = int(run_id) if STIX_MDB.col_packets: yield 'removing packets...' STIX_MDB.col_packets.delete_many({'run_id': run_id}) if STIX_MDB.col_raw_files: if remove_from_raw_file_db: yield 'removing from raw_file db...' STIX_MDB.col_raw_files.delete_many({'_id': run_id}) else: yield 'hide the entry in raw_file db...' STIX_MDB.col_raw_files.update({'_id': run_id}, {'$set': { 'hidden': True }}) if STIX_MDB.col_calibration: yield 'removing from calibration db...' STIX_MDB.col_calibration.delete_many({'run_id': run_id}) if STIX_MDB.col_ql: yield 'removing from QL...' STIX_MDB.col_ql.delete_many({'run_id': run_id}) if STIX_MDB.col_bsd: yield 'removing from bulk science data...' STIX_MDB.col_bsd.delete_many({'run_id': run_id}) if STIX_MDB.col_flares: yield 'removing from auto flare db...' STIX_MDB.col_flares.delete_many({'run_id': run_id}) if STIX_MDB.col_fits: yield 'removing fits file...' if remove_fits: cursor = STIX_MDB.col_fits.find({'file_id': run_id}) for cur in list(cursor): fits_filename = os.path.join(cur['path'], cur['filename']) try: yield f'Removing file: {fits_filename}' os.unlink(fits_filename) except Exception as e: yield 'Failed to remove fits file:{fits_filename} due to: {str(e)}' STIX_MDB.col_fits.delete_many({'file_id': int(run_id)}) return Response(stream_with_context(remove(file_id)))
def download_proxy(url, cookie=""): headers = build_headers headers['Cookie'] = cookie file_name = url.split("/")[-1:][0].split("?")[0] response_headers = { 'Content-Disposition': "attachment; filename={}".format(file_name), 'Content-Type': 'application/octet-stream' } # @stream_with_context def g(): res = requests.get(url, headers=headers, stream=True) for chunk in res.iter_content(chunk_size=512): if chunk: yield chunk response = Response(stream_with_context(g())) response.headers = response_headers return response
def index(): def generator(): import os with open(os.path.join(current_app.config['STATIC_PATH'], 'test.csv'), encoding='utf-8') as f: f_csv = csv.DictReader(f) for row in f_csv: yield ' ' + row['updated_at'] return Response(stream_with_context(generator()), mimetype='text/html') # def generator(): # i = 0 # while i < 5: # yield str(i) # import time # time.sleep(1) # i += 1 # return Response(generator())