def json_nodes(nodes): # format: {BASE}/force-graph # this first redoes the RDF graph and then converts it to JSON. # the code duplication can be fixed with refactoring; more important is whether going through RDF makes sense at all. # I think because RDF does some cleanup to get to "well formed ids" there might be enough of a benefit from reusing that. base = current_app.config('URL_BASE') g = Graph() agora = Namespace(f"{base}/") g.namespace_manager.bind('agora', agora) print(f"jsoing agora using forward links only") node_count = len(nodes) print(f"node count: {node_count}") for node in nodes: add_node(node, g, only_forward=True) d = {} d["nodes"] = [] d["links"] = [] unique_nodes = set() for n0, _, n1 in g.triples((None, None, None)): # this step needed because dicts don't fit in sets in python because they're not hashable. unique_nodes.add(n0) unique_nodes.add(n1) for node in unique_nodes: d["nodes"].append({'id': node, 'name': node, 'val': 1}) for n0, link, n1 in g.triples((None, None, None)): d["links"].append({'source': n0, 'target': n1}) return dumps(d)
def get_db(): if 'db' not in g: g.db = sqllite3.connect(current_app.config('DATABASE'), detect_types=sqllite3.PARSE_DECLTYPES) g.db.row_factory = sqllite3.Row return g.db
def station_detail(station_id): station = get_station(station_id) if station: stations = get_stations() latest_status = session.query(StationStatus) \ .order_by(StationStatus.timestamp.desc()) \ .first() # FIXME if current_app.config.get('USE_PREDICTION', False) and latest_status: import boto3 client = boto3.client('machinelearning') prediction = client.predict( MLModelId=current_app.config('AMAZON_ML_MODEL_ID'), # FIXME Record=build_record_for_prediction(latest_status, stations), PredictEndpoint=current_app.config['AMAZON_ML_ENDPOINT'] ) else: prediction = None if latest_status: latest_station_status = get_status_for_station(station, latest_status) else: latest_station_status = None station_statuses = [] if current_app.config.get('USE_CHART', False): for status in get_statuses(10): s = get_status_for_station(station, status) if s: s['timestamp'] = status.timestamp station_statuses.append(s) return render_template('station_detail.html', stations=stations, station=station, statuses=station_statuses, latest_station_status=latest_station_status, prediction=prediction) else: abort(404)
def delete_dir(): """ 清理文件夹内的文件 """ dirname = current_app.config('SAVEPIC') maxsize = 50 # 文件夹大小大于50M就删除 filesize = getdirsize(dirname) if filesize / 1024 / 1024 > maxsize: os.popen('rm -rf ' + dirname + '/*')
def jobs(): page = request.arg.get('page', default = 1, type = int) pagination = Job.query.paginate( page = page, per_page = current_app.config('ADMIN_PER_PAGE') error_out = Flase) return render_template('admin/jobs.html', pagination = pagination)
def register(email): private_key = generate_rsa_private_key() acme_client = acme_client_for_private_key(current_app.config('ACME_DIRECTORY_URL'), private_key) registration = acme_client.register( messages.NewRegistration.from_data(email=email) ) acme_client.agree_to_tos(registration) return private_key
def register(email): private_key = generate_rsa_private_key() acme_client = acme_client_for_private_key( current_app.config('ACME_DIRECTORY_URL'), private_key) registration = acme_client.register( messages.NewRegistration.from_data(email=email)) acme_client.agree_to_tos(registration) return private_key
def verify_auth_token(token): s = Serializer(current_app.config(['SECRET_KEY'])) try: data = s.loads(token) except SignatureExpired: return None except BadSignature: return None user = User.query.get(data['id']) return user
def followers(username): user = User.query.filter_by(username=username).first() if user is None: flash('Invalid user.') return redirect(url_for('.index')) page = request.args.get('page', 1, type=int) pagination = user.followers.paginate(page, per_page=current_app.config('HelloFlask_FOLLOWERS_PER_PAGE'), error_out=False) follows = [{'user': item.follower, 'timestamp': item.timestamp} for item in pagination.items] return render_template('followers.html', user=user, title='Followers of', endpoint='.followers', pagination=pagination, follows=follows)
def send_mail(to, subject, template, **kwargs): msg = Message( subject, sender=current_app.config(['MAIL_USERNAME']), recipients=[to], ) msg.html = render_template(template, **kwargs) mail.send(msg) # msg = Message( # '测试邮件', # sender='*****@*****.**', # body='test', # recipients=['*****@*****.**'] # ) mail.send()
def generate_token(self, user_id): try: #set up the payload with expiration date payload = { 'exp': datetime.utcnow() + timedelta(minutes=5), 'iat': datetime.utcnow(), 'sub': user_id } #create a byte string token using payload and secret key jwt_string = jwt.encode(payload, current_app.config().get('SECRET'), algorithm='HS256') #return a byte string return jwt_string except Exception as e: #return an error in string format if an exception occurs return str(e)
def get_post_comments(id): # 返回一篇博客文章的评论 post = Post.query.get_or_404(id) page = request.args.get("page", 1, type=int) pagination = post.comments.order_by(Comment.timestamp.asc()).paginate( page, per_page=current_app.config("FLASKY_COMMENTS_PER_PAGE"), error_out=False) comments = pagination.items prev_page = None if pagination.has_prev: prev_page = url_for("api.get_post_comments", id=id, page=page - 1) next_page = None if pagination.hax_next: next_page = url_for("api.get_post_comments", id=id, page=page + 1) return jsonify({ "comments": [comment.to_sjon() for comment in comments], "prev_url": prev_page, "next_url": next_page, "count": pagination.total, })
def default_error_handler(e): message = 'An unhandled exception occurred.' log.exception(message) if not current_app.config('FLASK_DEBUG'): return {'message': message}, 500
def generate_token(self, expiration=600): s = Serializer(current_app.config('SECRET_KEY'), expiration) # 序列化器 return s.dumps({'id': self.id}).decode('utf-8')