def rank_barriers(request: Request, extractor: BarriersRecordExtractor = Depends()): """Rank a subset of small barriers data. Path parameters: <layer> : one of LAYERS Query parameters: * id: list of ids * filters are defined using a lowercased version of column name and a comma-delimited list of values """ log_request(request) df = extractor.extract(ranked_barriers).copy() log.info(f"selected {len(df)} barriers for ranking") # just return tiers and lat/lon cols = ["lat", "lon"] + TIER_FIELDS + CUSTOM_TIER_FIELDS df = df.join(calculate_tiers(df))[cols] # extract extent bounds = df[["lon", "lat"]].agg(["min", "max"]).values.flatten().round(3) return csv_response(df, bounds=bounds)
def post(self, request, *args, **kwargs): serializer = MedicoSerializer(data=request.data) if serializer.is_valid(): datoUser = User.objects.filter(is_superuser=True, is_staff=True).values_list('id') Notificacion.objects.create(titulo='Preregistro', mensaje='Se creo un preregistro', destinatario=datoUser[0][0], remitente=0) return self.create(request, *args, **kwargs) log.info(f'campos incorrectos: {serializer.errors}') raise CamposIncorrectos(serializer.errors)
def query_barriers(request: Request, extractor: StateRecordExtractor = Depends()): """Return subset of barriers based on state abbreviations. Query parameters: id: list of state abbreviations """ log_request(request) df = extractor.extract(barriers)[SB_PUBLIC_EXPORT_FIELDS].copy() df = df.sort_values(by="HasNetwork", ascending=False) df = unpack_domains(df) log.info(f"public query selected {len(df.index)} barriers") return csv_response(df)
def post(self, request, *args, **kwargs): serializer = MensajeSerializer(data=request.data) if serializer.is_valid(): destinatario = self.request.data.get('destinatario') # remitente = self.request.data.get('remitente') print(f'--->destinatario: {destinatario}') Conversacion.objects.filter(destinatario=destinatario).delete() nombre = getNombreSesion(request, destinatario) Conversacion.objects.create(destinatario=destinatario, nombre=nombre) Notificacion.objects.create(titulo='Chat', mensaje='Tiene un nuevo mensaje', destinatario=destinatario, remitente=0) return self.create(request, *args, **kwargs) log.info(f'campos incorrectos: {serializer.errors}') raise CamposIncorrectos(serializer.errors)
def paginar(self): try: size = int(self.size) if size < 1: size = 10 except: size = 10 direc = str(self.direc) if direc.lower() == 'asc': direc = '' else: direc = '-' orderBy = self.orderby if orderBy is not None and orderBy != '': campos = self.serializer.Meta.fields if orderBy not in campos: orderBy = 'id' paginator = Paginator(self.queryset.order_by(direc + orderBy), size) else: paginator = Paginator( self.queryset.order_by(direc + 'id'), size) cuenta = self.queryset.count() Paginacion.totalElements = cuenta Paginacion.totalPages = paginator.num_pages if direc == '-': direc = 'desc' else: direc = 'asc' log.info(f'se obtienen: {cuenta} registros, registros por pagina: {size}, direccion: {direc}, ordenados por: {orderBy}') page = self.page try: queryset = paginator.page(page) except PageNotAnInteger: queryset = paginator.page(1) except EmptyPage: queryset = paginator.page(paginator.num_pages) return self.serializer(queryset, many=True)
def query_dams(request: Request, extractor: DamsRecordExtractor = Depends()): """Return subset of dams based on summary unit ids within layer. Path parameters: layer : one of LAYERS Query parameters: id: list of ids """ log_request(request) df = extractor.extract(ranked_dams) # extract extent bounds = df[["lon", "lat"]].agg(["min", "max"]).values.flatten().round(3) df = df[DAM_FILTER_FIELDS].copy() log.info(f"query selected {len(df)} dams") return csv_response(df, bounds)
from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.requests import Request from fastapi.responses import Response import sentry_sdk from sentry_sdk.integrations.asgi import SentryAsgiMiddleware from api.logger import log from api.settings import ALLOWED_ORIGINS, LOGGING_LEVEL, SENTRY_DSN, API_ROOT_PATH from api.internal import router as internal_router from api.public import router as public_router ### Setup Sentry if SENTRY_DSN: log.info("setting up sentry") sentry_sdk.init(dsn=SENTRY_DSN) ### Create the main API app app = FastAPI(version="1.0", root_path=API_ROOT_PATH) path_prefix = "/api/v1" if API_ROOT_PATH is None else "" ### Add logger @app.on_event("startup") async def startup_event(): logger = log handler = logging.StreamHandler() handler.setFormatter( logging.Formatter("[%(asctime)s] %(levelname)s:\t%(message)s")) logger.addHandler(handler)
def download_dams( request: Request, id: str, layer: Layers = "State", extractor: DamsRecordExtractor = Depends(), custom: bool = False, unranked=False, sort: Scenarios = "NCWC", format: Formats = "csv", ): """Download subset of dams or small barriers data. If `unranked` is `True`, all barriers in the summary units are downloaded. Path parameters: <layer> : one of LAYERS <format> : "csv" Query parameters: * id: list of ids * custom: bool (default: False); set to true to perform custom ranking of subset defined here * unranked: bool (default: False); set to true to include unranked barriers in output * sort: str, one of 'NC', 'WC', 'NCWC' * filters are defined using a lowercased version of column name and a comma-delimited list of values """ log_request(request) filename = f"aquatic_barrier_ranks_{date.today().isoformat()}.{format}" # See if we already already cached the response; # we only do this for unfiltered states and the entire region (ranked or unranked) cache_filename = None has_filters = any(q for q in request.query_params if q in DAM_FILTER_FIELD_MAP) if layer == "State" and format == "csv" and id and not (has_filters or custom): state_hash = sha1(id.encode("UTF8")).hexdigest() suffix = "_ranked" if not unranked else "" cache_filename = CACHE_DIRECTORY / f"{state_hash}{suffix}_dams.zip" if cache_filename and cache_filename.exists(): return zip_file_response(cache_filename, filename.replace(".csv", ".zip")) df = extractor.extract(dams).copy() # include unranked dams - these are joined back later if unranked: full_df = df.copy() # can only calculate ranks for those that have networks and are not excluded from ranking df = df.loc[df.Ranked] # calculate custom ranks if custom: df = df.join(calculate_tiers(df)) if unranked: # join back to full dataset tier_cols = df.columns.difference(full_df.columns) df = full_df.join(df[tier_cols], how="left") df[tier_cols] = df[tier_cols].fillna(-1).astype("int8") log.info(f"selected {len(df):,} dams for download") cols = [c for c in DAM_EXPORT_FIELDS if c in set(df.columns)] df = df[cols] # Sort by tier if f"{sort}_tier" in df.columns: sort_field = f"{sort}_tier" else: sort_field = f"SE_{sort}_tier" df = df.sort_values(by=["HasNetwork", sort_field], ascending=[False, True]) df = unpack_domains(df) ### Get metadata readme = get_readme( filename=filename, barrier_type="dams", fields=df.columns, url=request.base_url, layer=extractor.layer, ids=extractor.ids, ) terms = get_terms(url=request.base_url) if format == "csv": return zip_csv_response( df, filename=filename, extra_str={ "README.txt": readme, "TERMS_OF_USE.txt": terms }, extra_path={"SARP_logo.png": LOGO_PATH}, cache_filename=cache_filename, ) raise NotImplementedError("Other formats not yet supported")