def start(): phrase = request.args.get('query', '').split() COUNTER_PAGES_SERVED.inc() if not phrase: return render_template('index.html', gen_time=g.request_time()) word_ids = [] for word in phrase: word_id = get_word_id(word) print(word_id) if not word_id: return render_template('index.html', gen_time=g.request_time()) word_ids.append(word_id) pages_ids = {} for word_id in word_ids: pages_ids[word_id] = get_pages_id(word_id) pages = reduce(intersect, [pages_ids[word_id] for word_id in pages_ids]) res = [] for page_id in pages: url = get_page_by_id(page_id)['url'] score = get_page_score(page_id) res.append((score, url)) res.sort(reverse=True) return render_template('index.html', gen_time=g.request_time(), result=res)
def primeNo(start, end): # for existing user if session.get('id'): user = TaskUser.query.filter_by(id=session.get('id')).first() user.elapsed_time = user.elapsed_time + " " + g.request_time() user.timestamp = user.timestamp + "," + str(datetime.datetime.now()) user.cntFreq += 1 db.session.commit() else: user = TaskUser(timestamp=str(datetime.datetime.now()), elapsed_time=g.request_time(), cntFreq=1) # adding user db.session.add(user) db.session.commit() session['id'] = user.id dic = {} start = int(start) end = int(end) ans = [] # maintain a temporary list temp = [True] * (end + 1) # 0 and 1 are not prime so marking them False temp[0] = False temp[1] = False # Check all the values from 2 to sqrt(end) and # mark every multiple of them False # Note that we are considering values only from i*i upto end for i in range(2, int(math.sqrt(end) + 1)): if temp[i] != False: for j in range(i * i, end + 1, i): temp[j] = False # Append the values from start to end if they are True for i in range(start, end + 1): if temp[i] == True: ans.append(i) dic[f"Prime numbers between {start} and {end} is "] = ans dic["No. of times this user has requested"] = user.cntFreq dic["elapsed_time"] = user.elapsed_time.split() dic["LengthOfPrimeNumberList"] = len(ans) dic['time_stamp'] = user.timestamp.split(",") # Finally Return the json object return jsonify(dic)
def registry_response(data, status=200, mimetype='application/json', headers=None): """Registry response""" if mimetype == 'application/json': response = make_response(json.dumps(data, indent=4), status) else: if isinstance(data, dict): data = json.dumps(data, indent=4) response = make_response(data, status) response.headers.remove('Content-Type') response.headers.add('Content-Type', mimetype) response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Server', 'Nessemble') try: response.headers.add('X-Response-Time', g.request_time()) except AttributeError: response.headers.add('X-Response-Time', 0) if headers: for header in headers: response.headers.add(header[0], header[1]) return response
def get_activity(): """Get uid and date args and return json object with visit count for requested uid and date.""" required_args = ['uid', 'date'] if not all(x in request.args for x in required_args): abort(400) result_answer = {} try: req_uid = int(request.args['uid']) req_date = dateutil.parser.parse(request.args['date']) #removing time from date if user sent full date format start_date = req_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = start_date + dateutil.relativedelta.relativedelta(days=+1) activities_collection = mongo.db.activities count = activities_collection.count(filter={'uid': req_uid, 'date': {"$gte": start_date, "$lt": end_date}}) #old pymongo style #count = activities_collection.find({'uid': req_uid, 'date': {"$gte": start_date, "$lt": end_date}}, fields={'uid': 1, '_id': 0}).count() except (KeyError, ValueError, OperationFailure) as e: result_answer['status'] = "FAIL" result_answer['error'] = str(e) return jsonify(result_answer) result_answer['status'] = "OK" result_answer['uid'] = req_uid result_answer['count'] = count result_answer['execution_time'] = g.request_time() return jsonify(result_answer)
def page_two(): app.logger.info('This is page two start') processes = cpu_count() pool = Pool(1000) x = pool.map(f, range(processes)) app.logger.info('This is page two end') return jsonify({'request_time': g.request_time(), 'x': x})
def after_request(response): ec_model.close_connection() if flask_app.config['DEBUG']: response.headers.add('x-time-elpased', g.request_time()) return response
def Main(): db = sqlite3.connect(DB_Name) cursor = db.cursor() cursor.execute('SELECT * FROM CustomGraphData') resultset = cursor.fetchall() Fibonacci(28) Regression() r, gr, b = random.randint(0, 9), random.randint(0, 9), random.randint(0, 9) rgb = str(r) + str(gr) + str(b) db.close() request_latency = float(g.request_time()[:-1]) cpu_usage = psutil.cpu_percent(interval=request_latency, percpu=False) data.append({"request_latency": request_latency, "cpu_usage": cpu_usage}) text = '<h1>Main</h1>' text += '<p>Executed main body in ' + str(request_latency) + '</p>' text += '<p>Executed main body in ' + str(g.request_time_2) + '</p>' text += '<p>Executed main body in ' + str(g.request_end_time) + '</p>' text += '<p>CPU usage: ' + str(cpu_usage) + '%</p>' text += '<p>FMD CPU usage: ' + str(last_cpu_read) + '%</p>' text += '<div style="background-color: #' + rgb + ';padding: 4px"></div>' text += '<code style="background-color: #ddd;padding: 5px 20px;display: block;border-radius: 0 0 10px 10px;">' text += '<p>Number of records: ' + str(len(resultset)) + '</p>' text += '<p>Regression Level: ' + str(Regression_Level) + '</p>' text += '<p>Regression Magnitude: ' + str(Regression_Magnitude) + '</p>' text += '</code>' return text
def post_activity(): """Writes json object to storage if its md5sum is correct. Accepts single json object or array.""" if not request.json: abort(400) if not isinstance(request.json, list): activities = [request.json] else: activities = request.json result_answer = {} activities_collection = mongo.db.activities for pos, activity in enumerate(activities): try: recieved_md5 = activity.pop('md5checksum') calculated_md5 = md5.new(json.dumps(activity)).hexdigest() if recieved_md5 == calculated_md5: activity['uid'] = int(activity['uid']) activity['date'] = dateutil.parser.parse(activity['date']) activities_collection.insert_one(activity) result_answer[pos] = "OK" else: result_answer[pos] = "FAIL" except (KeyError, ValueError, OperationFailure) as e: result_answer[pos] = "FAIL" result_answer['execution_time'] = g.request_time() return jsonify(result_answer), 201
def after_request(resp): if resp.is_json: j = resp.get_json() j["responseTime"] = g.request_time() return make_response(jsonify(j), resp.status_code) return resp
def get(self): #count time request g.request_start_time = time.time() response = Auth.get_logged_in_user(new_request=request) user_profile = response[0].get('data') id_user = user_profile.get('user_id') data_file_name = str(get_a_data(id_user)) col_uid = request.args.get('uid', type=str) col_iid = request.args.get('iid', type=str) col_rati = request.args.get('rati', type=str) from_uid = request.args.get('from_uid', type=int) to_uid = request.args.get('to_uid', type=int) from_iid = request.args.get('from_iid', type=int) to_iid = request.args.get('to_iid', type=int) Algorthm = Predit() result = {} result['result'] = Algorthm.from_to(data_file_name, col_uid, col_iid, col_rati, from_uid, to_uid, from_iid, to_iid) g.request_time = lambda: "%.5fs" % (time.time() - g.request_start_time) result['time'] = g.request_time() return result
def debug(*args, **kwargs): g.debugtext += g.request_time() + ': [' with_repr = kwargs.get('with_repr', True) for arg in args: if with_repr: g.debugtext += repr(arg) + ', ' else: g.debugtext += arg + ', ' g.debugtext += ']<br/>\n'
def after_each_request(response): if not response.status_code % 200 < 100: mysql.rollback() else: mysql.commit() app.logger.info('"{} {}" request time: {}'.format( request.method, request.path, g.request_time())) return response
def detect_faces_in_image(file_stream): # Load the uploaded image file img = face_recognition.load_image_file(file_stream) # Get face encodings for any faces in the uploaded image face_encodings = face_recognition.face_encodings(img) face_found = False # Return the result as json result = {"encoding": str(face_encodings), "r.time": g.request_time()} return jsonify(result)
def add_logger(response): TRACE = None SPAN = None if (self.traceHeaderName in request.headers.keys()): # trace can be formatted as "X-Cloud-Trace-Context: TRACE_ID/SPAN_ID;o=TRACE_TRUE" rawTrace = request.headers.get(self.traceHeaderName).split('/') TRACE = rawTrace[0] if ( len(rawTrace) > 1) : SPAN = rawTrace[1].split(';')[0] # https://github.com/googleapis/googleapis/blob/master/google/logging/type/http_request.proto REQUEST = { 'requestMethod': request.method, 'requestUrl': request.url, 'status': response.status_code, 'responseSize': response.content_length, 'latency': g.request_time(), 'remoteIp': request.remote_addr, 'requestSize': request.content_length } if 'user-agent' in request.headers: REQUEST['userAgent'] = request.headers.get('user-agent') if request.referrer: REQUEST['referer'] = request.referrer # add the response status_code based log level response_severity = logging.getLevelName(logging.INFO) if 400 <= response.status_code < 500: response_severity = logging.getLevelName(logging.WARNING) elif response.status_code >= 500: response_severity = logging.getLevelName(logging.ERROR) if response_severity not in self.mLogLevels: self.mLogLevels[response_severity] = getattr(logging, response_severity) # find the log level priority sub-messages; apply the max level to the root log message severity = max(self.mLogLevels, key=self.mLogLevels.get) self.mLogLevels = {} self.transport_parent.send( None, timestamp= datetime.datetime.utcnow(), severity = severity, resource=self.resource, labels=self.labels, trace=TRACE, span_id = SPAN, http_request=REQUEST) #response.headers['x-upstream-service-time'] = g.request_time() return response
def long_request(): max_value = request.args.get('max_value', 10, int) timer_value = request.args.get('timer_value', 1, int) for i in range(1, max_value + 1): time.sleep(i * timer_value) app.logger.info('Loop - {}'.format(i)) return jsonify({ 'max_value': max_value, 'timer_value': timer_value, 'request_time': g.request_time() })
def after_request(response): HISTOGRAM_PAGE_GEN_TIME.observe(g.request_time()) request_id = request.headers['Request-Id'] \ if 'Request-Id' in request.headers else uuid.uuid4() log.info('request', service='web', request_id=request_id, addr=request.remote_addr, path=request.path, args=request.args, method=request.method, response_status=response.status_code) return response
def log_response(res): message = '[%s] -> [%s] from:%s costs:%.3f ms' % ( request.method, request.path, request.remote_addr, float(g.request_time()) * 1000) req_body = request.get_json() if request.get_json() else {} message += "\n\tdata: {\n\t\tpath: %s, \n\t\tquery: %s, \n\t\tbody: %s\n\t} " % ( json.dumps(_request_ctx_stack.top.request.view_args, ensure_ascii=False), json.dumps(request.args, ensure_ascii=False), req_body) # 设置颜色开始(至多3类参数,以m结束):\033[显示方式;前景色;背景色m print('\033[0;34m') print(message) print('\033[0m') # 终端颜色恢复 return res
def log_response(res): message = '[%s] -> [%s] from:%s costs:%.3f ms' % ( request.method, request.path, request.remote_addr, float(g.request_time()) * 1000) req_body = request.get_json() if request.get_json() else {} data = { 'path': _request_ctx_stack.top.request.view_args, 'query': request.args, 'body': req_body } message += '\n\"data\": ' + json.dumps( data, indent=4, ensure_ascii=False) # 设置颜色开始(至多3类参数,以m结束):\033[显示方式;前景色;背景色m print('\033[0;34m') if request.method in ('GET', 'POST', 'PUT', 'DELETE'): print(message) print('\033[0m') # 终端颜色恢复 return res
def log_response(resp): log_config = app.config.get('LOG') if not log_config['REQUEST_LOG']: return resp message = '[%s] -> [%s] from:%s costs:%.3f ms' % ( request.method, request.path, request.remote_addr, float(g.request_time()) * 1000) if log_config['LEVEL'] == 'INFO': app.logger.info(message) elif log_config['LEVEL'] == 'DEBUG': req_body = '{}' try: req_body = request.get_json() if request.get_json() else {} except: pass message += " data:{\n\tparam: %s, \n\tbody: %s\n} " % (json.dumps( request.args, ensure_ascii=False), req_body) app.logger.debug(message) return resp
def get(self): g.request_start_time = time.time() response = Auth.get_logged_in_user(new_request=request) user_profile = response[0].get('data') id_user = user_profile.get('user_id') minsup = request.args.get('minsup', type=float) minconf = request.args.get('minconf', type=float) select_col = request.args.get('sel_col', type=str) store_data = read_all_data_csv(id_user, select_col) Algorthm = Apiori(store_data, minsup, minconf) result = Algorthm.write_json() g.request_time = lambda: "%.5fs" % (time.time() - g.request_start_time) result['time'] = g.request_time() return result
def get(self): g.request_start_time = time.time() response = Auth.get_logged_in_user(new_request=request) user_profile = response[0].get('data') id_user = user_profile.get('user_id') minlen = request.args.get('minlen', type=int) minconf = request.args.get('minconf', type=float) select_col = request.args.get('sel_col', type=str) store_data = read_all_data_csv(id_user, select_col) Algorthm = Fpgrowth(store_data, minlen, minconf) result = {} arr_result = Algorthm.generate_rule() result['rules'] = arr_result result['len'] = len(arr_result) g.request_time = lambda: "%.5fs" % (time.time() - g.request_start_time) result['time'] = g.request_time() return result
def process_response(response): if '/u/' in request.path: return response if request.headers.get('Accept-Encoding') and request.headers['Accept-Encoding'] == 'deflate': logger.debug('compressing resp: %d' % len(response.data)) response.data = zlib.compress(response.data) response.headers['Content-Encoding'] = 'deflate' size = len(response.data) response.headers['Content-Length'] = size if size > 1024: if size < (1024 * 1024): size = str((size / 1024)) + 'KB' else: size = str((size / 1024 / 1024)) + 'MB' logger.debug('content-length %s' % size) logger.debug(request.url) logger.debug('request: %s' % g.request_time()) return response
def json_response(data, filename=None, status=200): data["meta"] = { "api": config.API_FULL_NAME, "render_time": g.request_time(), "license": "Please see https://github.com/ciex/metawahl/master/LICENSE \ for licensing information", } rv = jsonify(data) rv.cache_control.max_age = 300 rv.status_code = status if filename is not None: rv.headers["Content-Type"] = "text/json" rv.headers["Content-Disposition"] = "attachment; filename={}".format( filename) return rv
def get(self): g.request_start_time = time.time() response = Auth.get_logged_in_user(new_request=request) user_profile = response[0].get('data') id_user = user_profile.get('user_id') data_file_name = str(get_a_data(id_user)) col_uid = request.args.get('uid', type=str) col_iid = request.args.get('iid', type=str) col_rati = request.args.get('rati', type=str) value_uid = request.args.get('value_uid', type=float) value_iid = request.args.get('value_iid', type=float) Algorthm = Predit() result = {} result['result'] = Algorthm.SlopeOne(data_file_name, col_uid, col_iid, col_rati, value_uid, value_iid) g.request_time = lambda: "%.5fs" % (time.time() - g.request_start_time) result['time'] = g.request_time() return result
def after_request(response): """ Execute this code after the request execution """ millis = int(round(time.time() * 1000)) service = os.environ.get('APPNAME') code = response.status_code user_ip = request.remote_addr method = request.method.upper() path = request.path req_time = g.request_time() if path == '/ping': return response if not request.data: body = '' else: body = json.dumps(request.get_json()) if service != '' and service != None: service = service.upper() else: service = 'SERVICE' headers = {} for header in request.headers: headers[header[0]] = header[1] data = {'headers': headers, 'body': body} request_log = '%d %s REQUEST %d %s %s %s %d %s %s' % ( millis, service, code, user_ip, method, path, len(body), req_time, json.dumps(data)) logger.get_handler().debug(request_log) return response
def process_response(response): # Return early if we don't have the start time (which means request failed) if '/u/' in request.path or not hasattr(g, 'request_start_time'): return response if request.headers.get('Accept-Encoding') == 'deflate': logger.debug('compressing resp: %d' % len(response.data)) response.data = zlib.compress(response.data) response.headers['Content-Encoding'] = 'deflate' size = len(response.data) response.headers['Content-Length'] = size if size > 1024: if size < (1024 * 1024): size = str((size / 1024)) + 'KB' else: size = str((size / 1024 / 1024)) + 'MB' logger.debug('content-length %s' % size) logger.debug(request.url) logger.debug('request: %s' % g.request_time()) return response
def log_response(resp): log_config = app.property('log') if app.property('log') else { "level": 'INFO' } message = '[%s] - [%s] -> [%s] costs:%.3f ms' % ( request.remote_addr, request.method, request.path, float(g.request_time()) * 1000) if log_config['level'] == 'INFO': app.logger.info(message) elif log_config['level'] == 'DEBUG': req_body = '{}' try: req_body = request.get_json() if request.get_json( ) else request.args except: pass message += " - p: %s, r: %s " % ( json.dumps(req_body, ensure_ascii=False), json.dumps(resp.json, ensure_ascii=False) if resp.is_json else resp.data) app.logger.debug(message) return resp
def log_response(resp): log_config = self._app.config.get("LOG") if not log_config["REQUEST_LOG"]: return resp message = "[%s] -> [%s] from:%s costs:%.3f ms" % ( request.method, request.path, request.remote_addr, float(g.request_time()) * 1000, ) if log_config["LEVEL"] == "INFO": self._app.logger.info(message) elif log_config["LEVEL"] == "DEBUG": req_body = "{}" try: req_body = request.get_json() if request.get_json() else {} except Exception: pass message += " data:{\n\tparam: %s, \n\tbody: %s\n} " % ( json.dumps(request.args, ensure_ascii=False), req_body, ) self._app.logger.debug(message) return resp
def wrd_get(): rsp = json.dumps(clothes.get(int(request.args.get("items")), int(request.args.get("page"))), indent=JSON_DENT) logger.debug("Execution %s", g.request_time()) return rsp
def after_request(response): log.debug('Rendered in %ss', g.request_time()) return response
def ping_service(): url = 'https://authorization-testing.herokuapp.com/pong' r = requests.get(url, auth=new_auth('vcu', 'rams')) pingpong_t = g.request_time() return jsonify({'request time elapsed': pingpong_t}), 201
def index(): activities_collection = mongo.db.activities count = activities_collection.count() return 'Flask-api-mongo is running! Total entries is {0}. Rendered in {0}'.format(count ,g.request_time())