async def view_payment(request, user, saasu_user): try: payment = await saasu.get_payment(request.match_info['transaction_id']) except: raise web.HTTPNotFound(body='Requested payment not found') invoice_ids = [] invoices = {} items = [] if len(payment.get('PaymentItems', [])) > 10: raise web.HTTPTooManyRequests(body='Too many invoices in this payment, sorry') for item in payment.get('PaymentItems', []): if item['InvoiceTransactionId'] not in invoice_ids: try: invoice = await saasu.get_invoice(item['InvoiceTransactionId']) except: raise web.HTTPNotFound(body='Requested payment not found') if invoice['BillingContactId'] == saasu_user['Id']: invoice_ids.append(item['InvoiceTransactionId']) invoices[item['InvoiceTransactionId']] = invoice items.append(item) else: payment['TotalAmount'] -= item['AmountPaid'] payment['PaymentItems'] = items return { 'invoices': invoices, 'payment': payment, }
def abort(code): if code == 400: return web.HTTPBadRequest() elif code == 401: return web.HTTPUnauthorized() elif code == 402: return web.HTTPPaymentRequired() elif code == 403: return web.HTTPForbidden() elif code == 404: return web.HTTPNotFound() elif code == 405: return web.HTTPMethodNotAllowed() elif code == 406: return web.HTTPNotAcceptable() elif code == 407: return web.HTTPProxyAuthenticationRequired() elif code == 408: return web.HTTPRequestTimeout() elif code == 409: return web.HTTPConflict() elif code == 410: return web.HTTPGone() elif code == 411: return web.HTTPLengthRequired() elif code == 412: return web.HTTPPreconditionFailed() elif code == 413: return web.HTTPRequestEntityTooLarge() elif code == 414: return web.HTTPRequestURITooLong() elif code == 415: return web.HTTPUnsupportedMediaType() elif code == 416: return web.HTTPRequestRangeNotSatisfiable() elif code == 417: return web.HTTPExpectationFailed() elif code == 421: return web.HTTPMisdirectedRequest() elif code == 422: return web.HTTPUnprocessableEntity() elif code == 424: return web.HTTPFailedDependency() elif code == 426: return web.HTTPUpgradeRequired() elif code == 428: return web.HTTPPreconditionRequired() elif code == 429: return web.HTTPTooManyRequests() elif code == 431: return web.HTTPRequestHeaderFieldsTooLarge() elif code == 451: return web.HTTPUnavailableForLegalReasons() else: return web.HTTPBadRequest()
async def ip_checkpoint(request, handler): # processing cases with only 1 ip address provided try: _ip = request.headers["X-Forwarded-For"] request_ip = await validate_ip(_ip) if not request_ip: ans = f"Bad ip provided in header: {_ip}" raise web.HTTPBadRequest(text=ans) except KeyError: print("Got header without forwarded ip") ans = "Can't find any IP provided in X-Forwarded-For header" raise web.HTTPBadRequest(text=ans) masked_ip = await bitmask_ip(request_ip, request.app["mask"]) print(f"Got request from {masked_ip}") REDIS_SESSION = request.app["session_db"] REDIS_TIMEOUT = request.app["timeout_db"] current_timeout = REDIS_TIMEOUT.ttl(masked_ip) _body = request.app["html_body_429"] _header = request.app["http_header"] if current_timeout > -1: print(f"{masked_ip} is still timed out for {current_timeout}") _header["Retry-After"] = str(current_timeout) raise web.HTTPTooManyRequests(body=str(_body), headers=_header) elif masked_ip in REDIS_SESSION: if REDIS_SESSION.decrby(masked_ip, 1) < 0: print(f"{masked_ip} exceeded rate limit and got timeout...") REDIS_TIMEOUT.setex(masked_ip, request.app["timeout"], 1) # clear user session for case timeout ttl < session ttl del REDIS_SESSION[masked_ip] _header["Retry-After"] = str(request.app["timeout"]) raise web.HTTPTooManyRequests(body=str(_body), headers=_header) else: print(f"New IP or last session expired") REDIS_SESSION.setex(masked_ip, 60, request.app["max_requests"] - 1) return await handler(request)
async def executeHandler(request): # allow inbound connections from swarm nodes only # use that also to ensure 1 node = 1 socket # allow the maximum of 5 sockets per node ip = request.transport.get_extra_info('peername')[0] max_sockets = 4 if len(request.app.websockets[ip]) == max_sockets: raise web.HTTPTooManyRequests() container_id = request.match_info['container_id'] cmd = request.match_info['cmd'] master, slave = pty.openpty() def callback(master, ws): try: msg = os.read(master, 1024) except OSError: request.app.loop.remove_reader(master) msg = b"Session terminated" ws.send_bytes(msg) command = ["docker", "exec", "-it", container_id, "bash"] proc = subprocess.Popen(command, start_new_session=True, stdin=slave, stdout=slave, stderr=slave) os.close(slave) ws = web.WebSocketResponse(heartbeat=10) await ws.prepare(request) try: request.app.loop.add_reader(master, partial(callback, master, ws)) request.app.websockets[ip].append(ws) async for msg in ws: if msg.tp == MsgType.text: if msg.data == 'close': await ws.close() os.write(master, msg.data.encode()) except Exception as exc: print(exc, flush=1) finally: try: request.app.loop.remove_reader(master) except: pass proc.terminate() os.close(master) request.app.websockets[ip].remove(ws) return ws
async def add_job(self, spec: BaseJobSpec) -> str: if (len(self.jobs_active) >= self.app_config.server.jobs_max): raise web.HTTPTooManyRequests( text="Maximum parallel job limit ({}) reached: Try again later" .format(self.app_config.server.jobs_max)) else: job_type = spec.job_type handler = self.handlers.get(job_type) if (handler is None): raise web.HTTPBadRequest( text="Job.job_type '{}' is not recognised".format( job_type)) job_id = str(generate_guid()) job = Job(job_id, spec, self, handler) async def handle_complete(result): return await self.on_job_complete(job_id, job, job_type, result) job.on("complete", handle_complete) async def handle_critical(err): return await self.on_job_critical(job_id, job, job_type, err) job.on("critical", handle_critical) async def handle_debug(msg): return await self.on_job_debug(job_id, job, job_type, msg) job.on("debug", handle_debug) async def handle_error(err): return await self.on_job_error(job_id, job, job_type, err) job.on("error", handle_error) async def handle_info(msg): return await self.on_job_info(job_id, job, job_type, msg) job.on("info", handle_info) async def handle_progress(progress): return await self.on_job_progress(job_id, job, job_type, progress) job.on("progress", handle_progress) async def handle_warning(msg): return await self.on_job_warning(job_id, job, job_type, msg) job.on("warning", handle_warning) self.jobs_active.append(job) self.jobs_cache[job_id] = job return job_id
async def real_translate(text): req = "https://translate.google.com:443/translate_a/single?client=a&ie=utf-8&oe=utf-8&dt=t&sl=en&tl=ru&q={text}" async with create_session() as sess: async with sess.get(req.format(text=text)) as resp: if resp.status != 200: if resp.status == 429: raise web.HTTPTooManyRequests( text="Too many requests for Google API.") else: raise web.HTTPServiceUnavailable( text=f"Google API returned code: {resp.status}") text = await resp.json() return text[0][0][0]
async def degenerate(request): data = await request.post() email = data.get('email') session = await get_session(request) token = request.match_info['token'] lock = asyncio.Lock() async with lock: if not BRUTES.get(token): BRUTES[token] = 1, time.time() else: tries, last = BRUTES[token] if tries > 60: if time.time() - last > 60: BRUTES[token] = 0, time.time() else: raise web.HTTPTooManyRequests() else: BRUTES[token] = tries + 1, last if not email or '@' not in email: return jinja2.render_template(f'index.html', request, { 'error': "E-mail такого нету или неправильный." }) tokens = get_free_invites(token) print(tokens) try: invite = data['invite'].file invite = decode(Image.open(invite))[0].data invite = base64.b64decode(invite).decode('ascii') except: return jinja2.render_template(f'index.html', request, { 'error': "Вы вообще ЧТО загрузили? xD" }) status, diff = reverse_bin_search(invite, tokens) if status == 'eq': session["invite"] = invite return web.HTTPFound(f'/{token}') else: return jinja2.render_template(f'index.html', request, { 'error': messages[status] })
async def bulk_create(request): error = verify_token(test_config, request) if error: return error if "error500" in request.url.path: return web.HTTPInternalServerError() if "error429" in request.url.path: return web.HTTPTooManyRequests() if test_config.workspace not in request.url.path: return web.HTTPNotFound() _host_data = host_data.copy() _host_data["vulnerabilities"] = [vuln_data.copy()] data = json.loads((await request.read()).decode()) if "ip" not in data["hosts"][0]: return web.HTTPBadRequest() assert _host_data == data["hosts"][0] return web.HTTPCreated()
async def handle_subscribed(self, request: Request) -> StreamResponse: subscriber_id = request.match_info["subscriber_id"] subscriber = await self.subscription_handler.get_subscriber( subscriber_id) if subscriber_id in self.message_bus.active_listener: log.info( f"There is already a listener for subscriber: {subscriber_id}. Reject." ) return web.HTTPTooManyRequests( text="Only one connection per subscriber is allowed!") elif subscriber and subscriber.subscriptions: pending = await self.workflow_handler.list_all_pending_actions_for( subscriber) return await self.listen_to_events( request, subscriber_id, list(subscriber.subscriptions.keys()), pending) else: return web.HTTPNotFound( text= f"No subscriber with this id: {subscriber_id} or no subscriptions" )
def apply_rate_limit(self, request): if self._fail_retry_delay == 0: return now = asyncio.get_event_loop().time() if (len(self._rate_limit_ip_to_due) >= _CLEAN_RATE_LIMIT_THRESHOLD and (now - self._rate_limit_last_cleaned) > _CLEAN_RATE_LIMIT_DELAY): remove = [ k for k, v in self._rate_limit_ip_to_due.items() if now >= v ] for k in remove: del self._rate_limit_ip_to_due[k] self._rate_limit_last_cleaned = now # we use the ip address; this can easily be faked with proxies but it's already putting # enough hassle on potential attackers due = self._rate_limit_ip_to_due.get(request.remote, 0) if now >= due: self._rate_limit_ip_to_due[ request.remote] = now + self._fail_retry_delay else: raise web.HTTPTooManyRequests()
async def mixed_response_handle(request): await sleep_rand() return random.choices( [web.Response(text='Ok response'), web.HTTPTooManyRequests()], weights=[0.6,0.4] )[0]
def tape_library_handler_wrapper( request, action_name, required_params=None, optional_params=None, skip_lock_check=False, ): """This wrapper performs error handling for the API calls. Raises ------ Multiple exceptions see: https://docs.aiohttp.org/en/latest/web_exceptions.html """ # Check parameters if required_params is not None: for param in required_params: if param in request.query: if not request.query[param]: error = { "error": { "description": "empty parameter", "parameter": param, "reason": "empty", "type": "parameter", } } raise web.HTTPUnprocessableEntity(text=json.dumps(error)) else: error = { "error": { "description": "missing parameter", "parameter": param, "reason": "undefined", "type": "parameter", } } raise web.HTTPUnprocessableEntity(text=json.dumps(error)) library = request.app["tape_library"] # Check that library is not locked if not library.running and not skip_lock_check: error = { "error": { "description": "Library is locked", "reason": "locked", "type": "lock", } } raise web.HTTPForbidden(text=json.dumps(error)) # Check library queue if library.check_queue_max_depth_reached(): error = { "error": { "description": "to many requests in progress", "reason": "full", "type": "taskqueue", } } raise web.HTTPTooManyRequests(text=json.dumps(error)) # Check if action is available, run it, catch errors if any if hasattr(library, "action_" + action_name): try: data = getattr(library, "action_" + action_name)(**request.query) except web.HTTPException: raise except Exception as excpt: logging.exception(action_name) error = { "error": { "description": str(excpt), "reason": "internal", "type": "server", } } raise web.HTTPInternalServerError(text=json.dumps(error)) else: error = { "error": { "description": "no such method", "reason": "nosuch", "type": "method", } } raise web.HTTPNotImplemented(text=json.dumps(error)) return web.json_response(data)