def test_db_events(self): with app.test_request_context(): for i in range(1, 10000): event = ObjectMother.get_event() event.name = 'Event' + str(i) db.session.add(event) db.session.commit() url = url_for('sadmin_events.index_view') self.app.get(url, follow_redirects=True) with open("output_events.txt", "w") as text_file: for query in get_debug_queries(): if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT: text_file.write( "SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (query.statement, query.parameters, query.duration, query.context)) text_file.write("\n") for query in get_debug_queries(): if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT: app.logger.warning( "SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (query.statement, query.parameters, query.duration, query.context))
def delete(self, id): user = User.query.get(id) try: db.session.delete(user) db.session.commit() except: app.logger.debug(get_debug_queries())
def debug_info(self): if not json_available or not sqlalchemy_available: return {} queries = get_debug_queries() data = [] for query in queries: is_select = query.statement.strip().lower().startswith('select') _params = '' try: _params = json.dumps(query.parameters) except TypeError: pass # object not JSON serializable hash = hashlib.sha1( current_app.config['SECRET_KEY'] + query.statement + _params).hexdigest() data.append({ 'duration': query.duration, 'sql': format_sql(query.statement, query.parameters), 'raw_sql': query.statement, 'hash': hash, 'params': _params, 'is_select': is_select, 'context_long': query.context, 'context': format_fname(query.context) }) return data
def after_request(response): for query in get_debug_queries(): if query.duration > current_app.config['FLASKY_DB_QUERY_TIMEOUT']: current_app.logger.warning( 'Slow query: {}\nParameters: {}\nDuration: {}s\nContext: {}\n'.format( query.statement, query.parameters, query.duration, query.context)) return response
def after_request(resp): for q in get_debug_queries(): if q.duration > app.config.get('DB_QUERY_TIMEOUT'): app.logger.warning( 'SLOW DB STATEMENT: {0}\n\tParameters: {1}\n\tDuration: {2}\n\tContext: {3}'. format(q.statement, q.parameters, q.duration, q.context)) return resp
def after_request(response): for query in get_debug_queries(): if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n' % (query.statement, query.parameters, query.duration, query.context)) return response
def wrapped(*args, **kwargs): number_of_queries_before = len(get_debug_queries()) result = func(*args, **kwargs) query_list = (get_debug_queries())[number_of_queries_before:] request.environ['query_logged'] = True for query in query_list: if query.duration * 100 >= min_time: current_app.logger.warning( config.Config.QUERY_LOGGING_FORMAT.format( method=request.method, path=request.path, func_name=func.func_name, statement=query.statement, params=query.parameters, duration=query.duration, context=query.context ) ) return result
def display_queries(response): """Display database queries Prints out SQL queries, EXPLAINs for queries above slow_threshold, and a final count of queries after every HTTP request """ if report_type not in ('slow', 'all'): return response slow_threshold = 0.5 # EXPLAIN queries that ran for more than 0.5s queries = get_debug_queries() # We have to copy the queries list below otherwise queries executed # in the for loop will be appended causing an endless loop for query in queries[:]: if report_type == 'slow' and query.duration < slow_threshold: continue logger.info("%.8f %s\n%s\n%s", query.duration, query.context, query.statement, query.parameters) is_select = bool(re.match('SELECT', query.statement, re.I)) if query.duration > slow_threshold and is_select: try: statement = "EXPLAIN " + query.statement engine = SQLAlchemy().get_engine(app) result = engine.execute(statement, query.parameters) logger.info( tabulate(result.fetchall(), headers=result.keys())) except: # pylint: disable=bare-except logger.warning("Statement failed: %s", statement, exc_info=True) return response
def nav_subtitle(self): if not json_available or not sqlalchemy_available: return 'Unavailable' if get_debug_queries: count = len(get_debug_queries()) return "%d %s" % (count, "query" if count == 1 else "queries")
def after_request(response): for query in get_debug_queries(): log_msg = "DB QUERY: %s\nParams: %s\n Context: %s" % (query.statement, query.parameters, query.context) app.logger.warning(log_msg) return response
def after_request(response): for query in get_debug_queries(): if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query %s\nParameters: %s\nDuration: %fs\nContext: %s\n' % (query.statement, query.parameters, query.duration, query.context)) return response
def display_queries(response): """Display database queries Prints out SQL queries, EXPLAINs for queries above slow_threshold, and a final count of queries after every HTTP request """ slow_threshold = 0.5 # EXPLAIN queries that ran for more than 0.5s queries = get_debug_queries() app.logger.info("Total queries: {}".format(len(queries))) if report_type == 'count': return response # We have to copy the queries list below otherwise queries executed # in the for loop will be appended causing an endless loop for query in queries[:]: if report_type == 'slow' and query.duration < slow_threshold: continue app.logger.info("{:.8f} {}\n{}\n{}".format( query.duration, query.context, query.statement, query.parameters)) is_select = bool(re.match('SELECT', query.statement, re.I)) if query.duration > slow_threshold and is_select: try: statement = "EXPLAIN " + query.statement engine = SQLAlchemy().get_engine(app) result = engine.execute(statement, query.parameters) app.logger.info(tabulate(result.fetchall(), headers=result.keys())) except Exception as err: # pylint: disable=broad-except app.logger.warning("Statement failed: {}".format(statement)) app.logger.exception(err) return response
def display_queries(response): """Display database queries Prints out SQL queries, EXPLAINs for queries above slow_threshold, and a final count of queries after every HTTP request """ slow_threshold = 0.5 # EXPLAIN queries that ran for more than 0.5s queries = get_debug_queries() logger.info("Total queries: %s", len(queries)) if report_type == 'count': return response # We have to copy the queries list below otherwise queries executed # in the for loop will be appended causing an endless loop for query in queries[:]: if report_type == 'slow' and query.duration < slow_threshold: continue logger.info( "%.8f %s\n%s\n%s", query.duration, query.context, query.statement, query.parameters) is_select = bool(re.match('SELECT', query.statement, re.I)) if query.duration > slow_threshold and is_select: try: statement = "EXPLAIN " + query.statement engine = SQLAlchemy().get_engine(app) result = engine.execute(statement, query.parameters) logger.info(tabulate(result.fetchall(), headers=result.keys())) except: # pylint: disable=bare-except logger.warning("Statement failed: %s", statement, exc_info=True) return response
def content(self): if not json_available or not sqlalchemy_available: msg = ['Missing required libraries:', '<ul>'] if not json_available: msg.append('<li>simplejson</li>') if not sqlalchemy_available: msg.append('<li>Flask-SQLAlchemy</li>') msg.append('</ul>') return '\n'.join(msg) queries = get_debug_queries() data = [] for query in queries: data.append({ 'duration': query.duration, 'sql': format_sql(query.statement, query.parameters), 'signed_query': dump_query(query.statement, query.parameters), 'context_long': query.context, 'context': format_fname(query.context) }) return self.render('panels/sqlalchemy.html', {'queries': data})
def content(self): if not json_available or not sqlalchemy_available: msg = ['Missing required libraries:', '<ul>'] if not json_available: msg.append('<li>simplejson</li>') if not sqlalchemy_available: msg.append('<li>Flask-SQLAlchemy</li>') msg.append('</ul>') return '\n'.join(msg) queries = get_debug_queries() data = [] for query in queries: is_select = query.statement.strip().lower().startswith('select') _params = '' try: _params = json.dumps(query.parameters) except TypeError: pass # object not JSON serializable hash = hashlib.sha1(current_app.config['SECRET_KEY'] + query.statement + _params).hexdigest() data.append({ 'duration': query.duration, 'sql': format_sql(query.statement, query.parameters), 'raw_sql': query.statement, 'hash': hash, 'params': _params, 'is_select': is_select, 'context_long': query.context, 'context': format_fname(query.context) }) return self.render('panels/sqlalchemy.html', {'queries': data})
def print_db_queries(response): from flask.ext.sqlalchemy import get_debug_queries for query in get_debug_queries(): app.logger.warning("QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (query.statement, query.parameters, query.duration, query.context)) return response
def after_request(response): for query in get_debug_queries(): if query.duration >= DATABASE_QUERY_TIMEOUT: app.logger.warning("SLOW QUERY: %s \nParameters: %sDuration: %fs\nContext: %s\n") % ( query.statement, query.parameters, query.duration, query.context) return response
def respond(self, context, status_code=200, serialize=True, serializers=None, links=None): if serialize: data = self.serialize(context, serializers) else: data = context response = Response( _as_json(data), mimetype='application/json', status=status_code, ) if links: response.headers['Link'] = ', '.join(links) response.headers['changes-api-class'] = self.__class__.__name__ # do some performance logging / send perf data back to the client timer_name = "changes_api_server_perf_method_{}_class_{}".format( request.method, self.__class__.__name__) time_taken = time() - self.start_time statsreporter.stats().log_timing(timer_name, time_taken * 1000) response.headers['changes-server-time'] = time_taken # how much time did we spend waiting on the db db_time_in_sec = sum([q.duration for q in get_debug_queries()]) db_timer_name = "changes_api_total_db_time_method_{}_class_{}".format( request.method, self.__class__.__name__) statsreporter.stats().log_timing(db_timer_name, db_time_in_sec * 1000) response.headers['changes-server-db-time'] = db_time_in_sec return response
def after_request(response): for query in get_debug_queries(): if query.duration > current_app.config['FLASKY_DB_QUERY_TIMEOUT']: current_app.logger.warning( 'Slow query: {}\nParameters: {}\nDuration: {}s\nContext: {}\n'. format(query.statement, query.parameters, query.duration, query.context)) return response
def after_request(response): for query in get_debug_queries(): if query.duration >= DATABASE_QUERY_TIMEOUT: app.logger.warning( "SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (query.statement, query.parameters, query.duration, query.context)) return response
def after_request(response): for query in get_debug_queries(): if query.duration >= current_app.config['ZBLOG_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: {0!s}\nParameters: {1!s}\nDuration: {2!s}\nContext: {3!s}' .format(*query.statement), query.parameters, query.duration, query.context) return response
def after_request(response): for query in get_debug_queries(): if query.duration >= current_app.config['SONGXUE_SLOW_DB_QUERY_TIME']: current_app.logger.warning( u'慢查询: %s\n参数: %s\n执行时间: %fs\n内容: %s\n' % (query.statement, query.parameters, query.duration, query.context)) return response
def after_request(response): for query in get_debug_queries(): if query.duration >= 100000: current_app.logger.warning( 'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n' % (query.statement, query.parameters, query.duration, query.context)) return response
def after_request(response): for query in get_debug_queries(): if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']: current_app.logger.warning( u'慢查询: %s\n参数: %s\n执行时间: %fs\n内容: %s\n' % (query.statement, query.parameters, query.duration, query.context)) return response
def after_request(response): for query in get_debug_queries(): if query.duration >= DATABASE_QUERY_TIMEOUT: app.logger.warning('SLOW QUERY: {0.statement}\n' 'Parameters: {0.parameters}\n' 'Duration: {0.duration:f}s\n' 'Context: {0.context}\n'.format(query)) return response
def after_request(response): if app.config['SQLALCHEMY_RECORD_QUERIES']: for query in get_debug_queries(): if query.duration >= app.config['DATABASE_QUERY_TIMEOUT']: app.logger.warning( "SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (query.statement, query.parameters, query.duration, query.context)) return response
def after_request(response): for query in get_debug_queries(): if query.duration >= current_app.config['SLOW_DB_QUERY_TIME']: s = 'Slow query: %s\nParameters: %s\n' % ( query.statement, query.parameters) s += 'Duration: %f sec\nContext: %s\n' % ( query.duration, query.context) current_app.logger.warning(s) return response
def after_request(response): if app.config['SQLALCHEMY_RECORD_QUERIES']: for query in get_debug_queries(): if query.duration >= app.config['DATABASE_QUERY_TIMEOUT']: app.logger.warning("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (query.statement, query.parameters, query.duration, query.context)) return response
def releaseDB(response): from flask.ext.sqlalchemy import get_debug_queries total_db_duration = 0 total_db_count = 0 for query in get_debug_queries(): total_db_duration += query.duration total_db_count += 1 response.headers['X-DB-QUERY-DURATION'] = '%.3f' % total_db_duration response.headers['X-DB-QUERY-COUNT'] = total_db_count return response
def after_request(response): # 慢查询监视 for query in get_debug_queries(): if query.duration >= app.config['FLASK_SLOW_DB_QUERY_TIME']: app.logger.warning( 'Slow query: {0}\nParameters: {1}\nDuration: {2}\nContext: {3}\n'.format( query.statement, query.parameters, query.duration, query.context ) ) return response
def after_request(response): for query in get_debug_queries(): if query.duration >= app.config['DATABASE_QUERY_TIMEOUT']: app.logger.warning(''' SLOW QUERY: {} Parameters: {} Duration: {} Context: {} '''.format(query.statement, query.parameters, query.duration, query.context)) return response
def after_request(response): for query in get_debug_queries(): if query.duration >= DATABASE_QUERY_TIMEOUT: app.logger.warning( """SLOW QUERY: {} Parameters: {} Duration: {: f}s Context: {} """.format(query.statement, query.parameters, query.duration, query.context)) return response
def delete(self, id): user = User.query.get(id) auditlog( current_user, 'delete', user) try: db.session.delete(user) db.session.commit() except: app.logger.debug(get_debug_queries())
def after_request(response): """This function logs queries that take longer than DATABASE_QUERY_TIMEOUT for profiling. Called after every request. Returns: HTTP request response. """ for query in get_debug_queries(): if query.duration >= DATABASE_QUERY_TIMEOUT: app.logger.warning("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\n Context: %s\n" % (query.statement, query.parameters, query.duration, query.context)) return response
def after_request(response): if PROFILE: for query in get_debug_queries(): if query.duration > DATABASE_QUERY_TIMEOUT: application.logger.warning(( 'SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n' % ( query.statement, query.parameters, query.duration, query.context))) return response
def test_db_events(self): with app.test_request_context(): for i in range(1, 10000): event = ObjectMother.get_event() event.name = 'Event' + str(i) db.session.add(event) db.session.commit() url = url_for('sadmin_events.index_view') self.app.get(url, follow_redirects=True) with open("output_events.txt", "w") as text_file: for query in get_debug_queries(): if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT: text_file.write("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % ( query.statement, query.parameters, query.duration, query.context)) text_file.write("\n") for query in get_debug_queries(): if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT: app.logger.warning("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % ( query.statement, query.parameters, query.duration, query.context))
def after_request(response): """ get_debug_queries() 函数返回一个列表,其元素是请求中执行的查询 遍历 get_debug_queries() 函数获取的列表,把持续时间比设定阈值长的查询写入日志。 写入的日志被设为“警告”等级。如果换成“错误”等级,发 现缓慢的查询时还会发送电子邮件。 """ for query in get_debug_queries(): if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n' % (query.statement, query.parameters, query.duration, query.context)) return response
def after_request(response, *args, **kwargs): if not request.environ.get('query_logged'): for query in get_debug_queries(): if query.duration >= Config.DATABASE_QUERY_TIMEOUT: current_app.logger.warning( Config.QUERY_LOGGING_FORMAT.format( method=request.method, path=request.path, func_name='', statement=query.statement, params=query.parameters, duration=query.duration, context=query.context ) ) return response
def test_query_recording(self): with self.app.test_request_context(): todo = self.Todo('Test 1', 'test') self.db.session.add(todo) self.db.session.commit() queries = sqlalchemy.get_debug_queries() self.assertEqual(len(queries), 1) query = queries[0] self.assertTrue('insert into' in query.statement.lower()) self.assertEqual(query.parameters[0], 'Test 1') self.assertEqual(query.parameters[1], 'test') self.assertTrue('test_sqlalchemy.py' in query.context) self.assertTrue('test_query_recording' in query.context)
def after_request(response): for query in get_debug_queries(): if query.duration >= app.config['DATABASE_QUERY_TIMEOUT']: app.logger.warning( 'SLOW QUERY: %s\n' 'Parameters: %s\n' 'Duration: %fs\n' 'Context: %s\n', query.statement, query.parameters, query.duration, query.context, ) return response
def test_db_users(self): with app.test_request_context(): start = time.clock() for i in range(1, 10000): user = ObjectMother.get_user() user.email = 'User' + str(i) db.session.add(user) db.session.commit() url = url_for('sadmin_users.index_view') self.app.get(url, follow_redirects=True) print time.clock() - start for query in get_debug_queries(): if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT: app.logger.warning("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % ( query.statement, query.parameters, query.duration, query.context))
def challenge_tasks(challenge_slug): "Returns a task for specified challenge" challenge = get_challenge_or_404(challenge_slug, True) parser = reqparse.RequestParser() parser.add_argument('num', type=int, default=1, help='Number of return results cannot be parsed') parser.add_argument('near', type=GeoPoint, help='Near argument could not be parsed') parser.add_argument('assign', type=int, default=1, help='Assign could not be parsed') args = parser.parse_args() osmid = session.get('osm_id') # By default, we return a single task, but no more than 10 num = min(args['num'], 10) assign = args['assign'] near = args['near'] logging.info("{user} requesting {num} tasks from {challenge} near {near} assiging: {assign}".format(user=osmid, num=num, challenge=challenge_slug, near=near, assign=assign)) task_list = [] if near: coordWKT = 'POINT(%s %s)' % (near.lat, near.lon) task_query = Task.query.filter(Task.location.ST_Intersects( ST_Buffer(coordWKT, app.config["NEARBUFFER"]))).limit(num) task_list = [task for task in task_query if challenge.task_available(task, osmid)] if not near or not task_list: # If no location is specified, or no tasks were found, gather # random tasks task_list = [get_random_task(challenge) for _ in range(num)] task_list = filter(None, task_list) # If no tasks are found with this method, then this challenge # is complete if not task_list: # Is this the right error? osmerror("ChallengeComplete", "Challenge {} is complete".format(challenge_slug)) if assign: for task in task_list: action = Action(task.id, "assigned", osmid) task.current_state = action db.session.add(action) db.session.add(task) db.session.commit() logging.info( "{num} tasks found matching criteria".format(num=len(task_list))) tasks = [marshal(task, task_fields) for task in task_list] for query in get_debug_queries(): app.logger.debug(query) return jsonify(tasks=tasks)
def after_request(response): """ Отчет о скорости выполнения запросов к базе данных. statement - запрос SQL, parameters - параметры запроса SQL, duration - продолжителеность выполнения запроса в с, context - строка, определяющая строку в коде, где был запущен запрс """ for query in get_debug_queries(): if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n' % ( query.statement, query.parameters, query.duration, query.context ) ) return response
def sql_debug(response): queries = list(get_debug_queries()) query_str = '' total_duration = 0.0 for q in queries: total_duration += q.duration stmt = str(q.statement % q.parameters).replace('\n', '\n ') query_str += 'Query: {0}\nDuration: {1}ms\n\n'.format(stmt, round(q.duration * 1000, 2)) print '=' * 80 print ' SQL Queries - {0} Queries Executed in {1}ms'.format(len(queries), round(total_duration * 1000, 2)) print '=' * 80 print query_str.rstrip('\n') print '=' * 80 + '\n' return response
def after_request(response): for query in get_debug_queries(): if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n' % (query.statement, query.parameters, query.duration, query.context)) #统计 record = Record() record.blueprint = 'main' record.path = request.path record.ip = request.remote_addr record.code = response.status_code if current_user.id > 0: record.user = current_user.id db.session.add(record) return response
def test_db_users(self): with app.test_request_context(): start = time.clock() for i in range(1, 10000): user = ObjectMother.get_user() user.email = 'User' + str(i) db.session.add(user) db.session.commit() url = url_for('sadmin_users.index_view') self.app.get(url, follow_redirects=True) print time.clock() - start for query in get_debug_queries(): if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT: app.logger.warning( "SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (query.statement, query.parameters, query.duration, query.context))
def get(self): """returns a list of challenges. Optional URL parameters are: difficulty: the desired difficulty to filter on (1=easy, 2=medium, 3=hard) lon/lat: the coordinate to filter on (returns only challenges whose bounding polygons contain this point) example: /api/c/challenges?lon=-100.22&lat=40.45&difficulty=2 all: if true, return all challenges regardless of OSM user home location """ # initialize the parser parser = reqparse.RequestParser() parser.add_argument('difficulty', type=int, choices=["1", "2", "3"], help='difficulty cannot be parsed') parser.add_argument('lon', type=float, help="lon cannot be parsed") parser.add_argument('lat', type=float, help="lat cannot be parsed") parser.add_argument('all', type=bool, help="all cannot be parsed") args = parser.parse_args() difficulty = None contains = None # Try to get difficulty from argument, or users preference difficulty = args['difficulty'] or session.get('difficulty') # for local challenges, first look at lon / lat passed in if args.lon and args.lat: contains = 'POINT(%s %s)' % (args.lon, args.lat) # if there is none, look at the user's home location from OSM elif 'home_location' in session: contains = 'POINT(%s %s)' % tuple(session['home_location']) # get the list of challenges meeting the criteria query = db.session.query(Challenge).filter(Challenge.active == True) if difficulty: query = query.filter(Challenge.difficulty == difficulty) if contains and not args.all: query = query.filter(Challenge.polygon.ST_Contains(contains)) challenges = query.all() app.logger.debug(get_debug_queries()) return challenges
def test_db_sessions(self): with app.test_request_context(): for i in range(1, 10000): session = ObjectMother.get_session() session.name = 'Session' + str(i) db.session.add(session) db.session.commit() url = url_for('sadmin_sessions.display_my_sessions_view') time.clock() self.app.get(url, follow_redirects=True) with open("output_session.txt", "w") as text_file: for query in get_debug_queries(): if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT: text_file.write("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % ( query.statement, query.parameters, query.duration, query.context)) text_file.write("\n")
def process_response(self, request, response): queries = get_debug_queries() self.data = [] for query in queries: self.data.append({ 'duration': query.duration, 'sql': format_sql(query.statement, query.parameters), 'signed_query': dump_query(query.statement, query.parameters), 'context_long': query.context, 'context': format_fname(query.context) }) if self.cache: self.cache.set("DEBUGTOOLBAR:%s" % self.name, self.render_cache())
def test_db_sessions(self): with app.test_request_context(): for i in range(1, 10000): session = ObjectMother.get_session() session.name = 'Session' + str(i) db.session.add(session) db.session.commit() url = url_for('sadmin_sessions.display_my_sessions_view') start = time.clock() self.app.get(url, follow_redirects=True) with open("output_session.txt", "w") as text_file: for query in get_debug_queries(): if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT: text_file.write( "SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (query.statement, query.parameters, query.duration, query.context)) text_file.write("\n")
def after_request(response): """Print out request time""" queries = get_debug_queries() query_time = sum(query.duration for query in queries) start_time, start_clock = flask.g.request_start total = time.time() - start_time total_cpu = time.clock() - start_clock str_ = u"%.2fs (%.2fs CPU and %.2fs for %-2s db queries) '%s %s' %s" payload = (total, total_cpu, query_time, len(queries), request.method, request.path, response.status) performance_logger = getLogger("ggrc.performance") if getattr(settings, "PRODUCTION", True): # Always use INFO level in production performance_logger.info(str_, *payload) return response if total > 1: performance_logger.warning(str_, *payload) else: performance_logger.info(str_, *payload) return response
from flask.ext.sqlalchemy import get_debug_queries from flask.ext.sqlalchemy_cache import FromCache from _base import app, cache, Country with app.app_context(): q = Country.query.order_by(Country.id.asc()) caching_q = q.options(FromCache(cache)) country = caching_q.first() assert 'Brazil' == country.name #assert 'Germany' == countries[1].name print '%d queries executed.' % len(get_debug_queries())
def print_db_stats(response): queries = get_debug_queries() print u'↱ number of queries: {}'.format(len(queries)) return response
def after_request(response): for query in get_debug_queries(): if os.environ['BOOK_CONFIG'] == 'development' or query.duration >= current_app.config['BOOK_SLOW_DB_QUERY_TIME']: current_app.logger.warning('Slow query: %s\n Parameters: %s\n Duration: %f\n Context: %s\n' % (query.statement, query.parameters, query.duration, query.context)) return response
def get_queries(): if get_debug_queries: return get_debug_queries() else: return []
def has_content(self): if not json_available or not sqlalchemy_available: return True # will display an error message return bool(get_debug_queries())