async def news(company:str, start_date: Optional[date] = Query(None), end_date: Optional[date] = Query(None)):
    query = get_news_data(company, start_date, end_date)
    return await connection.get_db().fetch_all(query)
Beispiel #2
0
async def read_items3(q: List[str] = Query(["foo", "bar"])):
    query_items = {"q": q}
    return query_items
Beispiel #3
0
 def __init__(
     self,
     order: Optional[str] = Query(None, description='Format: "id,-updated_at"'),
 ):
     self.order = order.split(",") if order else []
Beispiel #4
0
class TagUpdate(CustomBaseModel):
    id: int
    name: str = Query(default="", min_length=1, max_length=32)

    class Config:
        schema_extra = {"example": {"id": 1, "name": "changed_name"}}
Beispiel #5
0
async def like_me(token: str = Query(None), db: Session = Depends(get_db)):
    liked = get_likes_by_sub(token, db)

    json_compatible_item_data = jsonable_encoder(MyLikesModel(likes=liked))
    return JSONResponse(content=json_compatible_item_data)
Beispiel #6
0
async def network_solution_sequence(
        graph: network_models.Graph = Body(
            ...,
            example={
                "directed":
                True,
                "edges": [
                    {
                        "source": "3",
                        "target": "1"
                    },
                    {
                        "source": "5",
                        "target": "3"
                    },
                    {
                        "source": "7",
                        "target": "1"
                    },
                    {
                        "source": "9",
                        "target": "1"
                    },
                    {
                        "source": "11",
                        "target": "1"
                    },
                    {
                        "source": "13",
                        "target": "3"
                    },
                    {
                        "source": "15",
                        "target": "9"
                    },
                    {
                        "source": "17",
                        "target": "7"
                    },
                    {
                        "source": "19",
                        "target": "17"
                    },
                    {
                        "source": "21",
                        "target": "15"
                    },
                    {
                        "source": "23",
                        "target": "1"
                    },
                    {
                        "source": "25",
                        "target": "5"
                    },
                    {
                        "source": "27",
                        "target": "11"
                    },
                    {
                        "source": "29",
                        "target": "7"
                    },
                    {
                        "source": "31",
                        "target": "11"
                    },
                    {
                        "source": "33",
                        "target": "25"
                    },
                    {
                        "source": "35",
                        "target": "23"
                    },
                    {
                        "source": "4",
                        "target": "2"
                    },
                    {
                        "source": "6",
                        "target": "2"
                    },
                    {
                        "source": "8",
                        "target": "6"
                    },
                    {
                        "source": "10",
                        "target": "2"
                    },
                    {
                        "source": "12",
                        "target": "2"
                    },
                    {
                        "source": "14",
                        "target": "2"
                    },
                    {
                        "source": "16",
                        "target": "12"
                    },
                    {
                        "source": "18",
                        "target": "12"
                    },
                    {
                        "source": "20",
                        "target": "8"
                    },
                    {
                        "source": "22",
                        "target": "6"
                    },
                    {
                        "source": "24",
                        "target": "12"
                    },
                ],
            },
        ),
        min_branch_size: int = Query(4),
) -> Dict[str, Any]:

    task = bg.background_solution_sequence.s(graph=graph.dict(by_alias=True),
                                             min_branch_size=min_branch_size)

    return run_task(task=task,
                    router=router,
                    get_route="get_network_solution_sequence")
Beispiel #7
0
async def amadeus_upload(
        bgTasks: BackgroundTasks,

        file: UploadFile = File(...),
        name: str = Query(..., min_length=4, max_length=60, regex=res.ResourceName),

        Model: enums.MCTextureModel = Query("auto", alias="model"),
        Type: enums.MCTextureType = Query(..., alias="type"),
        
        Private: bool = False,
        Protect: bool = False,

        uploader: Account = Depends(depends.AccountFromRequestForm(alias="auth"))
    ):
    """参数name为要创建的资源名称.\n 
    通过表单API上传图片, 名称"file"\n
    通过表单发送认证, 名称"auth".\n

    可用get query传参: \n
    
    ``` url
    http://127.0.0.1:8000/natrium/amadeus/upload/aResourceName?type=skin&strict=true&model=alex
    ```
    """
    # 常量分配
    Original: bool = True # 在库中是否是第一个被创建的
    OriginalResource: Optional[Resource] = None
    OriginalUploader: Optional[Account] = None

    if Private and Protect:
        raise exceptions.DuplicateRegulations()

    try:
        image: Image.Image = Image.open(BytesIO(await file.read()))
    except PIL.UnidentifiedImageError:
        raise exceptions.NonCompliantMsg({
            "filename": file.filename
        })
    finally:
        await file.close()

    width, height = image.size

    if image.format != "PNG":
        raise exceptions.NonCompliantMsg({
            "image.format": {
                "value": image.format,
                "assert": "PNG"
            }
        })

    if height > config['natrium']['upload']['picture-size']['height'] or\
        width > config['natrium']['upload']['picture-size']['width']:
        raise exceptions.NonCompliantMsg()

    image.resize((
        int(width / 22) * 32 if width % 22 == 0 else width,
        int(width / 17) * 32 if height % 17 == 0 else height
    ))

    pictureContentHash = hashing.PicHash(image)

    attempt_select = orm.select(i for i in Resource if i.PicHash == pictureContentHash)
    if attempt_select.exists():
        # 如果真的有上传的数据一样的
        Original = False
        for i in attempt_select[:]:
            # 询问数据库, 找到原始作者
            # 考虑加入uploader找寻.
            if not i.Origin:
                OriginalResource = i
                OriginalUploader = i.Owner
                break

        if not OriginalResource or\
            not OriginalUploader: # 判断是否找到了, 如果没找到, 说明数据库信息受损
            raise exceptions.BrokenData({
                "PictureHash": pictureContentHash,
                "ExceptionRaisedTime": maya.now()
            })

        # 如果有attempt_select, 就一定有一个origin.
        # 判断是否是原作者闲着没事干, 重新上传了一个.
        if OriginalUploader.Id == uploader.Id:
            raise exceptions.OccupyExistedAddress({
                "originalResource": {
                    "id": OriginalResource.Id,
                    "owner": OriginalUploader.Id
                },
                "uploader": {
                    "id": uploader.Id
                }
            })
        else: # ...或者是其他人, 这种情况我们需要特殊处理
            # 由于Protect的受限度较低, 故放在上面点.
            if OriginalResource.Protect:
                if Protect or Private:
                    raise exceptions.PermissionDenied({
                        "originalResource": {
                            "id": OriginalResource.Id,
                            "owner": OriginalUploader.Id,
                            "protect": OriginalResource.Protect,
                            "private": OriginalResource.Private
                        },
                        "uploader": {
                            "id": uploader.Id
                        }
                    })
                else: # 但是你本来就可以设为这个啊, 为啥要自己整一路去?
                    raise exceptions.OccupyExistedAddress({
                        "originalResource": {
                            "id": OriginalResource.Id,
                            "owner": OriginalUploader.Id,
                            "protect": OriginalResource.Protect,
                        },
                        "uploader": {
                            "id": uploader.Id
                        }
                    })
            elif OriginalResource.IsPrivate:
                # 如果私有, 则不允许任何人上传/使用/设保护/私有等
                raise exceptions.OccupyExistedAddress({
                    "originalResource": {
                        "id": OriginalResource.Id,
                        "owner": OriginalUploader.Id,
                        "protect": OriginalResource.Protect,
                    },
                    "uploader": {
                        "id": uploader.Id
                    }
                })
            else:
                # 你什么私有保护什么的都没开? 别开你自己的私有保护什么的就OK.
                if Protect or Private:
                    raise exceptions.PermissionDenied({
                        "originalResource": {
                            "id": OriginalResource.Id,
                            "owner": OriginalUploader.Id
                        },
                        "uploader": {
                            "id": uploader.Id
                        },
                        "options": {
                            'protect': Protect,
                            "private": Private
                        }
                    })
                else:
                    # 找寻上传者是否也曾经上传过该材质
                    assert_the_same = orm.select(i for i in Resource\
                        if i.PicHash == pictureContentHash and \
                        i.Owner.Id == uploader.Id)
                    if assert_the_same.exists():
                        ats_first: Resource = assert_the_same.first()
                        raise exceptions.OccupyExistedAddress({
                            "ownedResource": {
                                "id": ats_first.Id,
                                "name": ats_first.Name
                            },
                            "uploader": {
                                "id": uploader.Id
                            }
                        })

        if Model == "auto":
            Model = ['steve', 'alex'][skin.isSilmSkin(image)]

        account = Account.get(Id=uploader.Id)
        resource = Resource(
            PicHash = pictureContentHash,
            Name = name,
            PicHeight = height, PicWidth = width,
            Model = Model, Type = Type,
            Owner = account,
            IsPrivate = Private, Protect = Protect,
            Origin = OriginalResource
        )
        if Original:
            bgTasks.add_task(Save, image, pictureContentHash)
        orm.commit()
        return {
            "operator": "success",
            "metadata": resource.format_self(requestHash=True)
        }
Beispiel #8
0
    def query(
        self,
        nelements_max: Optional[float] = Query(
            None,
            description="Maximum value for the number of elements.",
        ),
        nelements_min: Optional[float] = Query(
            None,
            description="Minimum value for the number of elements.",
        ),
        EA_max: Optional[float] = Query(
            None,
            description="Maximum value for the electron affinity in eV.",
        ),
        EA_min: Optional[float] = Query(
            None,
            description="Minimum value for the electron affinity in eV.",
        ),
        IE_max: Optional[float] = Query(
            None,
            description="Maximum value for the ionization energy in eV.",
        ),
        IE_min: Optional[float] = Query(
            None,
            description="Minimum value for the ionization energy in eV.",
        ),
        charge_max: Optional[int] = Query(
            None,
            description="Maximum value for the charge in +e.",
        ),
        charge_min: Optional[int] = Query(
            None,
            description="Minimum value for the charge in +e.",
        ),
        pointgroup: Optional[str] = Query(
            None,
            description="Point of the molecule in Schoenflies notation.",
        ),
        smiles: Optional[str] = Query(
            None,
            description=
            "The simplified molecular input line-entry system (SMILES) \
            representation of the molecule.",
        ),
    ) -> STORE_PARAMS:

        crit = defaultdict(dict)  # type: dict

        d = {
            "nelements": [nelements_min, nelements_max],
            "EA": [EA_min, EA_max],
            "IE": [IE_min, IE_max],
            "charge": [charge_min, charge_max],
        }  # type: dict

        for entry in d:
            if d[entry][0]:
                crit[entry]["$gte"] = d[entry][0]

            if d[entry][1]:
                crit[entry]["$lte"] = d[entry][1]

        if pointgroup:
            crit["pointgroup"] = pointgroup

        if smiles:
            crit["smiles"] = smiles

        return {"criteria": crit}
Beispiel #9
0
def set_urls_for_fetch_pages(urls: Optional[List[str]] = Query(None)):
    if len(urls) > 0:
        postgres_repository.set_current_urls_for_fetch_news(urls)
    return {"msg": "آدرس با موفقیت ثبت شدند."}
Beispiel #10
0
def read_items(u: str = Query("default"), q: List[str] = Query(None)):
    query_items = {"q": q, "u": u}
    return query_items
def get_email_subscription(email: str = Query(...)):
    """ Get the subscription record of a email address."""
    return DB.Subscription.get_subscriptions_by_email(email)
Beispiel #12
0
def get_single_book(pk: int = Path(..., gt=1),
                    pages: int = Query(None, gt=10, le=500)):
    return {"pk": pk, "pages": pages}
Beispiel #13
0
def get_book(q: List[str] = Query(["test", "test2"],
                                  min_length=2,
                                  max_length=5,
                                  description="Search book")):
    return q
Beispiel #14
0
def get_available_datasets(category: str = Query('*', regex='^[*a-z0-9]*$'),
        date: str = Query('*', max_length=8, regex='^[*0-9]*$')):
    return [key.replace(':', '_') for key in redis_connection.scan_iter(
        f'{category}:*:{date}')]
Beispiel #15
0
def station(
    session: Session = Depends(get_database_session),
    engine=Depends(get_database_engine),
    station_code: str = None,
    only_generators: bool = Query(True, description="Show only generators"),
    power_include: Optional[bool] = Query(
        False, description="Include last week of power output"
    ),
    revisions_include: Optional[bool] = Query(
        False, description="Include revisions in records"
    ),
    history_include: Optional[bool] = Query(
        False, description="Include history in records"
    ),
) -> StationSchema:

    station = (
        session.query(Station)
        .filter(Station.code == station_code)
        .filter(Facility.station_id == Station.id)
        .filter(~Facility.code.endswith("NL1"))
    )

    if only_generators:
        station = station.filter(
            Facility.dispatch_type == DispatchType.GENERATOR
        )

    station = station.one_or_none()

    if not station:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND, detail="Station not found"
        )

    if not station.facilities:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail="Station has no facilities",
        )

    station.network = station.facilities[0].network

    if revisions_include:
        revisions = session.query(Revision).all()

        station.revisions = list(
            filter(
                lambda rev: rev.schema == "station"
                and rev.code == station.code,
                revisions,
            )
        )

        for facility in station.facilities:
            facility.revisions = list(
                filter(
                    lambda rev: rev.schema == "facility"
                    and rev.code == facility.code,
                    revisions,
                )
            )

    if power_include:
        pass

    if station.location and station.location.geom:
        __query = """
            select
                code,
                ST_Distance(l.geom, bs.geom, false) / 1000.0 as dist
            from bom_station bs, location l
            where
                l.id = {id}
                and bs.priority < 2
            order by dist
            limit 1
        """.format(
            id=station.location.id
        )

        result = []

        with engine.connect() as c:
            result = list(c.execute(__query))

            if len(result):
                station.location.weather_nearest = {
                    "code": result[0][0],
                    "distance": round(result[0][1], 2),
                }

    return station
Beispiel #16
0
def get_indexed_news(_from: Optional[int] = Query(None, alias="from"),
                     _limit: Optional[int] = Query(None, alias="limit"),
                     from_time: Optional[int] = Query(0),
                     to_time: Optional[int] = Query(10)):
    return postgres_repository.get_indexed_news(_from or 0, _limit or 10, from_time, to_time)
Beispiel #17
0
async def analyze_vibration_signal(
        mp_id: int,
        data_id: int,
        method: AnalyzeRule = Query(None),
        conn: Database = Depends(get_db),
):
    res = await get_by_id(
        conn=conn,
        mp_id=mp_id,
        orm_model=VibData,
        require_mp_type=0,
        data_id=data_id,
    )
    # analysis method dispatch
    if method == AnalyzeRule.hilbert:
        processed_res = hilbert(res["ima"])
        return VibrationEnvelopeSchema(**{
            **processed_res,
            **{
                "id": res["id"],
                "time": res["time"]
            }
        })

    if method == AnalyzeRule.stft:
        processed_res = short_time_fournier_transform(res["ima"])
        return VibrationSTFTSchema(**{
            **processed_res,
            **{
                "id": res["id"],
                "time": res["time"]
            }
        })

    if method == AnalyzeRule.musens:
        processed_res = multi_scale_envelope_spectrum(res["ima"])
        x = json.dumps({
            **processed_res,
            **{
                "id": res["id"],
                "time": str(res["time"])
            }
        })
        return JSONResponse(content=x)

    if method == AnalyzeRule.welch:
        processed_res = welch_spectrum_estimation(res["ima"])
        return VibrationWelchSchema(**{
            **processed_res,
            **{
                "id": res["id"],
                "time": res["time"]
            }
        })

    if method == AnalyzeRule.cumtrapz:
        processed_res = acceleration_to_velocity(res["ima"])
        return VibrationCumtrapzSchema(**{
            **processed_res,
            **{
                "id": res["id"],
                "time": res["time"]
            }
        })

    if method == AnalyzeRule.emd:
        processed_res = empirical_mode_decomposition(res["ima"])
        return VibrationEMDSchema(**{
            **processed_res,
            **{
                "id": res["id"],
                "time": res["time"]
            }
        })
async def pagination(
    skip: int = Query(0, ge=0),
    limit: int = Query(10, ge=0),
) -> Tuple[int, int]:
    capped_limit = min(100, limit)
    return (skip, capped_limit)
Beispiel #19
0
def get_filtered_samples(response: Response,
                         dataset_id: int,
                         page: Optional[int] = None,
                         limit: Optional[int] = None,
                         labels: Optional[List[int]] = Query(None),
                         users: Optional[List[int]] = Query(None),
                         labeled: Optional[bool] = None,
                         free_text: Optional[Union[str, bytes]] = None,
                         divided_labels: Optional[bool] = None,
                         user: User = Depends(get_current_active_user)):
    """
    NOT for usage in connection with Active Learning!

    :param response:            gets Response Header Object from FastAPI, dont fill\\
    :param dataset_id:          dataset_id for dataset\\

    :param limit:               number of samples per page\\
    :param page:                number of page that should be fetched (beginning with 0) \\

    both limit and page need to be filled for paging, returns Total number of elements in the Header in X-Total \\

    :param labeled:             return only labeled samples (true) / unlabeled samples (false)\\
    :param labels:              list of label_ids to filter for add each label with label = label_id\\
    :param divided_labels:      search only for samples, which different users labeled differently\\

    :param users:               list of user_ids to filter for add each user with users = user_id\\

    :param free_text:           freetext search (only one word)\\

    :param user:                the currently active user -> needed for authentication-check\\
    :return:                    list of samples
    """

    # return only current associations, if changed code needs to be adapted
    only_current_associations = True

    dataset = db.query(Dataset).filter(Dataset.id == dataset_id)

    if not dataset:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail="Dataset not found for id: {}.".format(dataset_id))

    query = db.query(Sample).filter(Sample.dataset_id == dataset_id)

    # JOIN table association for later use
    if labels or users:
        query = query.join(Association, Sample.id == Association.sample_id)

    # filter for labels
    if labels:
        for label_id in labels:
            label = db.query(Label).get(label_id)
            if not label:
                raise HTTPException(
                    status_code=status.HTTP_404_NOT_FOUND,
                    detail="Label not found for id: {}.".format(label_id),
                )
        query = query.filter(Association.label_id.in_(labels))

    # filter for users who labeled the sample
    if users:
        for user_id in users:
            user = db.query(User).get(user_id)
            if not user:
                raise HTTPException(
                    status_code=status.HTTP_404_NOT_FOUND,
                    detail="User not found for id: {}.".format(user_id),
                )
        query = query.filter(Association.user_id.in_(users))

    # filter for only labeled or unlabeled datasets
    if labeled is not None:
        if labeled:
            if not (labels or users):
                query = query.join(Association,
                                   Sample.id == Association.sample_id)
        else:
            if users or labels or divided_labels:
                raise HTTPException(
                    status_code=status.HTTP_400_BAD_REQUEST,
                    detail=
                    "Cannot process unlabeled Samples if filters for Labels or Users are set.",
                )
            query = query.filter(Sample.dataset_id == dataset_id,
                                 ~Sample.associations.any())

    # text search
    if free_text:
        # prepare text
        free_text = free_text.replace(" ", " & ")

        sample = db.query(Sample).filter(
            Sample.dataset_id == dataset_id).first()
        content_type = sample.type

        # text search only for content type 'text' and 'table'
        if content_type == "text":
            matched_tables = select([Text.id]).where(
                Text.content.match('{}'.format(free_text)))
            query = query.join(Text).filter(Text.id.in_(matched_tables))
        elif content_type == "table":
            matched_tables = select([Table.id]).where(
                Table.content.match('{}'.format(free_text)))
            query = query.join(Table).filter(Table.id.in_(matched_tables))
        else:
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail=
                "The Dataset with id {} does not have text to search as content."
                .format(dataset_id),
            )

    # filter for divided labels (sample has more than 1 label)
    if divided_labels:
        # rebuild base query, join association 2x with alias
        association1 = aliased(Association)
        association2 = aliased(Association)

        base_query = db.query(Sample) \
            .filter(Sample.dataset_id == dataset_id) \
            .join(association1, Sample.id == association1.sample_id) \
            .join(association2, Sample.id == association2.sample_id) \
            .filter(association1.is_current == only_current_associations) \
            .filter(association1.is_current == only_current_associations)

        # use query as subquery to apply other filters (eg. for labels or users)
        sub_query = query.with_entities(Sample.id).subquery()

        # build new query
        query = base_query \
            .filter(not_(association1.label_id == association2.label_id)) \
            .filter(Sample.id.in_(sub_query)) \
            .group_by(Sample.id).having(func.count(association1.label_id) > 1) \
            .order_by(func.count(association1.label_id).desc())

    # only return samples with no label or a current label
    # All Samples with a current label
    with_current_association = db.query(Sample.id)\
        .join(Association, Sample.id == Association.sample_id)\
        .filter(Association.is_current == only_current_associations)
    # All Samples with a label
    with_association = db.query(Sample.id)\
        .join(Association, Sample.id == Association.sample_id)\
        .subquery()
    # All Samples without any labels
    without_association = db.query(Sample.id)\
        .filter(Sample.id.notin_(with_association))

    valid_samples = with_current_association.union(without_association)

    query = query.filter(Sample.id.in_(valid_samples))

    # limit number of returned elements and paging, return total_elements in header
    if page is not None and limit:
        if page < 0:
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail=
                "Page number needs to be 0 or greater. Page number was: {}.".
                format(page),
            )

        total_elements = query.count()
        response.headers["X-Total"] = "{}".format(total_elements)
        lower_limit = page * limit
        upper_limit = page * limit + limit
        query = query.order_by(Sample.id).slice(lower_limit, upper_limit)

    samples = query.all()
    return samples
class resetPassword(BaseModel):
    mobileNumber: str = Query(...)
    password: str = Query(...)
Beispiel #21
0
def calculate(item_id: int, q: Optional[List[str]] = Query(None)):
    return {"item_id": item_id, "q": q}
class BloodReceive(BaseModel):
    mobileNumber: str = Query(...)
    bloodMessage: str = Query(...)
    latitude: float = Query(...)
    longitude: float = Query(...)
Beispiel #23
0
async def like_me(token: str = Query(None),
                  menu_id: str = Query(None),
                  db: Session = Depends(get_db)):
    success = dislike_this(menu_id, token, db)
    if not success:
        raise HTTPException(409)
class OxygenReceive(BaseModel):
    mobileNumber: str = Query(...)
    oxygenMessage: str = Query(...)
    latitude: float = Query(...)
    longitude: float = Query(...)
Beispiel #25
0
async def read_items(q: str = Query(..., min_length=3, max_length=50)):
    #填None就是默认值   填 ...则是必填项
    results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
    if q:
        results.update({"q": q})
    return results
Beispiel #26
0
def divition(a: float, b: float = Query(..., gt=0)):
    divition = a / b
    return {"Divition": divition}
Beispiel #27
0
async def read_items4(q: str = Query(None, alias="item-query")):
    results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
    if q:
        results.update({"q": q})
    return results
Beispiel #28
0
def get_query(query=Query(...)):
    query = json.loads(query)
    return QueryItem.parse_obj(query)
Beispiel #29
0
 async def tags_list(tags: List[str] = Query(None)) -> ListTagsResponse:
     """
     Check client with list of items in the query. Client should send <url>?tags=1&tags=2&tags=3
     Responds with the sent tags list
     """
     return ListTagsResponse(tags=tags)
async def historical(company:str, start_date: Optional[date] = Query(None), end_date: Optional[date] = Query(None)):
    print(company, start_date, end_date)
    query = get_historical_data(company, start_date, end_date)
    print(query)
    return await connection.get_db().fetch_all(query)