async def register_sample(conn: asyncpg.connection,
                          data: schemas.MsCreateSample) -> int:
    """
    This function registers the main sample collected in samplingfeature table.
    """
    async with conn.transaction():
        samplingfeatureid = await get_samplingfeatureid_from_samplingfeaturecode(
            conn, data.samplingfeaturecode)
        if samplingfeatureid is None:

            annotation = await find_row(conn, "annotations", "annotationtext",
                                        "Non-target mass spectrometry",
                                        schemas.Annotations)
            annotation2 = await find_row(conn, "annotations", "annotationtext",
                                         data.sampletype, schemas.Annotations)
            sampling_feature = schemas.SamplingFeaturesCreate(
                samplingfeatureuuid=uuid.uuid4(),
                samplingfeaturecode=data.samplingfeaturecode,
                samplingfeaturetypecv="Specimen",
                annotations=[
                    annotation.annotationid, annotation2.annotationid
                ],
            )
            logging.info(f"directivename {data.directivename}")
            directive = await find_row(conn,
                                       "directives",
                                       "directivename",
                                       data.directivename,
                                       schemas.Directive,
                                       raise_if_none=True)
            ms_sample_data = schemas.ActionsCreate(
                actiondescription="Registered water sample",
                begindatetime=datetime.utcnow(),
                actiontypecv="Specimen collection",
                methodcode="mass_spec:collect_sample",
                isactionlead=True,
                sampling_features=[sampling_feature],
                directiveids=[directive.directiveid],
                affiliationid=1,
                begindatetimeutcoffset=0,
            )

            completed_sample = await core_queries.do_action(
                conn, ms_sample_data)
            samplingfeatureid = completed_sample.sampling_features[
                0].samplingfeatureid

    return samplingfeatureid
async def register_output(conn: asyncpg.connection,
                          data: schemas.MsCreateOutput):
    """
    This function registers mass_spec processed data.
    It registers results of derivation with a given methodcode in the result table and
    and the corresponding action of derivation in the action table.
    """
    async with conn.transaction():
        action_time = datetime.now()
        ran_ms_convert = schemas.ActionsCreate(
            actiontypecv="Derivation",
            methodcode=data.methodcode,
            begindatetime=action_time,
            sampling_features=[],
            affiliationid=1,
            isactionlead=True,
            begindatetimeutcoffset=0,
        )

        completed_ran_ms_convert = await core_queries.do_action(
            conn, ran_ms_convert)
        units_create = schemas.UnitsCreate(unitstypecv="Dimensionless",
                                           unitsabbreviation="-",
                                           unitsname="")
        units = await core_queries.find_unit(conn,
                                             units_create,
                                             raise_if_none=True)
        variables = await core_queries.find_row(
            conn,
            "variables",
            "variablecode",
            data.variablecode,
            schemas.Variables,
            raise_if_none=True,
        )
        processinglevels = await core_queries.find_row(
            conn,
            "processinglevels",
            "processinglevelcode",
            "0",
            schemas.ProcessingLevels,
            raise_if_none=True,
        )
        resultannotation = schemas.AnnotationsCreate(
            annotationlink=data.resultannotationlink,
            annotationtypecv="Result annotation",
            annotationcode=data.resultannotationcode,
            annotationtext="Check link for file location",
            annotationjson=data.resultannotationjson,
            annotationdatetime=action_time,
            annotationutcoffset=0,
        )

        result = schemas.ResultsCreate(
            samplingfeaturecode=data.samplingfeaturecode,
            statuscv="Complete",
            variableid=variables.variableid,
            unitsid=units.unitsid,  # type: ignore
            processinglevelid=processinglevels.processinglevelid,
            valuecount=0,
            resulttypecv="Measurement",
            sampledmediumcv="Liquid aqueous",
            annotations=[resultannotation],
            resultdatetime=action_time,
            validdatetime=action_time,
            resultdatetimeutcoffset=0,
            validdatetimeutcoffset=0,
            actionid=completed_ran_ms_convert.actionid,
            resultuuid=uuid.uuid4(),
        )

        completed_result = await core_queries.create_result(conn, result)

    return schemas.MsOutput(action=completed_ran_ms_convert,
                            result=completed_result)
Example #3
0
async def post_begroing_result(
        begroing_result: schemas.BegroingResultCreate,
        connection=Depends(api_pool_manager.get_conn),
        niva_user: str = Header(None),
):
    user = await create_or_get_user(connection, niva_user)

    observations_per_method = defaultdict(list)
    for index, species in enumerate(begroing_result.taxons):
        used_method_indices = [
            i for i, e in enumerate(begroing_result.observations[index]) if e
        ]
        if len(used_method_indices) != 1:
            raise ValueError("Must have one and only one method per species")
        observations_per_method[used_method_indices[0]].append(index)

    unit_micr_abundance = await find_unit(
        connection,
        UnitsCreate(
            unitstypecv="Dimensionless",
            unitsabbreviation="-",
            unitsname="Presence or Absence",
        ),
        raise_if_none=True,
    )
    unit_macro_coverage = await find_unit(
        connection,
        UnitsCreate(unitstypecv="Dimensionless",
                    unitsabbreviation="%",
                    unitsname="Percent"),
        raise_if_none=True,
    )

    seconds_unit = await find_unit(
        connection,
        UnitsCreate(unitstypecv="Time",
                    unitsabbreviation="s",
                    unitsname="second"),
        raise_if_none=True,
    )

    result_type_and_unit_dict = {
        "Microscopic abundance": (
            "Category observation",
            unit_micr_abundance.unitsid,
            "Liquid aqueous",
        ),
        "Macroscopic coverage": (
            "Measurement",
            unit_macro_coverage.unitsid,
            "Vegetation",
        ),
    }

    async with connection.transaction():
        for method_index, method_observations in observations_per_method.items(
        ):
            method = begroing_result.methods[method_index]

            data_action = schemas.ActionsCreate(
                affiliationid=user.affiliationid,
                isactionlead=True,
                methodcode=method.methodcode,
                actiontypecv=method.
                methodtypecv,  # This only works when the type is both an action and a method
                begindatetime=begroing_result.date,
                begindatetimeutcoffset=0,
                equipmentids=[],
                directiveids=[e.directiveid for e in begroing_result.projects],
            )

            processing_level = await find_row(
                connection,
                "processinglevels",
                "processinglevelcode",
                "0",
                ProcessingLevels,
            )
            abundance_variable = await find_row(connection, "variables",
                                                "variablenamecv", "Abundance",
                                                Variables)

            completed_action = await post_actions(data_action, connection)
            for result_index in method_observations:

                data_result = schemas.ResultsCreate(
                    samplingfeatureuuid=begroing_result.station.
                    samplingfeatureuuid,
                    actionid=completed_action.actionid,
                    resultuuid=str(uuid.uuid4()),
                    resulttypecv=result_type_and_unit_dict[
                        method.methodname][0],
                    variableid=abundance_variable.variableid,
                    unitsid=result_type_and_unit_dict[method.methodname][1],
                    taxonomicclassifierid=begroing_result.taxons[result_index]
                    ["taxonomicclassifierid"],
                    processinglevelid=processing_level.processinglevelid,
                    valuecount=0,
                    statuscv="Complete",
                    sampledmediumcv=result_type_and_unit_dict[
                        method.methodname][2],
                    dataqualitycodes=[],
                )
                completed_result = await post_results(data_result, connection)
                if method.methodname == "Microscopic abundance":
                    data_categorical_result = schemas.CategoricalResultsCreate(
                        resultid=completed_result.resultid,
                        qualitycodecv="None",
                        datavalue=begroing_result.observations[result_index]
                        [method_index],
                        valuedatetime=begroing_result.date,
                        valuedatetimeutcoffset=0,
                    )
                    await post_categorical_results(data_categorical_result,
                                                   connection)
                else:
                    if begroing_result.observations[result_index][
                            method_index][0] == "<":
                        data_value = begroing_result.observations[
                            result_index][method_index][1:]
                        censor_code = "Less than"
                    else:
                        data_value = begroing_result.observations[
                            result_index][method_index]
                        censor_code = "Not censored"
                    data_measurement_result = schemas.MeasurementResultsCreate(
                        resultid=completed_result.resultid,
                        censorcodecv=censor_code,
                        qualitycodecv="None",
                        aggregationstatisticcv="Sporadic",
                        timeaggregationinterval=0,
                        timeaggregationintervalunitsid=seconds_unit.unitsid,
                        datavalue=data_value,
                        valuedatetime=begroing_result.date,
                        valuedatetimeutcoffset=0,
                    )
                    await post_measurement_results(data_measurement_result,
                                                   connection)
        # TODO: assuming that we have only one project. T*his should also be changed in API endpoint
        observations: List[BegroingObservationValues] = []
        for method_index, method_observations in observations_per_method.items(
        ):
            for result_index in method_observations:
                taxon = begroing_result.taxons[result_index]
                value = begroing_result.observations[result_index][
                    method_index]
                values = BegroingObservationValues(
                    taxon=TaxonomicClassifier(**taxon),
                    method=begroing_result.methods[method_index],
                    value=value,
                )
                observations.append(values)

        mapped = BegroingObservations(
            project=begroing_result.projects[0],
            date=begroing_result.date,
            station=begroing_result.station,
            observations=observations,
        )

        if strtobool(os.environ.get("WRITE_TO_AQUAMONITOR", "false")):
            await store_begroing_results(mapped)

    # TODO: Send email about new bucket_files

    return schemas.BegroingResult(personid=user.personid,
                                  **begroing_result.dict())
async def register_replicas(
        conn: asyncpg.connection,
        data: schemas.MsCreateReplicas) -> schemas.MsReplicas:
    """
    This function registers mass_spec raw data.
    It first registers water replica (a child of the main water sample) in samplingfeature table
    and the corresponding action of specimen fractionation in the action table.
    Next it registers results of operating mass spectrometer in the result table and
    and the corresponding action of specimen analysis in the action table.
    """
    async with conn.transaction():
        action_time = datetime.now()
        parameters_annotation = schemas.AnnotationsCreate(
            annotationjson=data.samplingfeatureannotationjson,
            annotationdatetime=action_time,
            annotationtypecv="Specimen annotation",
            annotationtext="Processing parameters",
            annotationutcoffset=0,
        )

        ms_annotation = await find_row(conn, "annotations", "annotationtext",
                                       "Non-target mass spectrometry",
                                       schemas.Annotations)
        sampling_feature = schemas.SamplingFeaturesCreate(
            samplingfeatureuuid=uuid.uuid4(),
            samplingfeaturecode=data.samplingfeaturecode,
            samplingfeaturetypecv="Specimen",
            relatedsamplingfeatures=[(data.parent_samplingfeatureid,
                                      "Is child of")],
            annotations=[parameters_annotation, ms_annotation.annotationid],
        )

        fractionate_sample_data = schemas.ActionsCreate(
            actiondescription="Fractionate water sample",
            actiontypecv="Specimen fractionation",
            methodcode="mass_spec:fractionate_sample",
            begindatetime=action_time,
            sampling_features=[sampling_feature],
            affiliationid=1,
            isactionlead=True,
            begindatetimeutcoffset=0,
        )
        completed_fractionate_sample = await core_queries.do_action(
            conn, fractionate_sample_data)
        ran_mass_spec_data = schemas.ActionsCreate(
            actiondescription="Operated mass spectrometer",
            actiontypecv="Specimen analysis",
            methodcode="mass_spec:create_data",
            begindatetime=action_time,
            sampling_features=[],
            affiliationid=1,
            isactionlead=True,
            begindatetimeutcoffset=0,
        )
        completed_ran_mass_spec = await core_queries.do_action(
            conn, ran_mass_spec_data)
        units_create = schemas.UnitsCreate(unitstypecv="Dimensionless",
                                           unitsabbreviation="-",
                                           unitsname="")
        units = await core_queries.find_unit(conn,
                                             units_create,
                                             raise_if_none=True)
        variables = await core_queries.find_row(
            conn,
            "variables",
            "variablecode",
            "mass_spec_00",
            schemas.Variables,
            raise_if_none=True,
        )
        processinglevels = await core_queries.find_row(
            conn,
            "processinglevels",
            "processinglevelcode",
            "0",
            schemas.ProcessingLevels,
            raise_if_none=True,
        )
        resultannotation = schemas.AnnotationsCreate(
            annotationlink=data.resultannotationlink,
            annotationtypecv="Result annotation",
            annotationcode="raw",
            annotationtext="Check link for file location",
        )
        result = schemas.ResultsCreate(
            samplingfeaturecode=sampling_feature.samplingfeaturecode,
            statuscv="Complete",
            variableid=variables.variableid,
            unitsid=units.unitsid,  # type: ignore
            processinglevelid=processinglevels.processinglevelid,
            valuecount=0,
            resulttypecv="Measurement",
            sampledmediumcv="Liquid aqueous",
            annotations=[resultannotation],
            resultdatetime=action_time,
            validdatetime=action_time,
            resultdatetimeutcoffset=0,
            validdatetimeutcoffset=0,
            actionid=completed_ran_mass_spec.actionid,
            resultuuid=uuid.uuid4(),
        )
        completed_result = await core_queries.create_result(conn, result)

    return schemas.MsReplicas(
        fractionate_sample=completed_fractionate_sample,
        ran_mass_spec=completed_ran_mass_spec,
        results=completed_result,
    )
Example #5
0
async def post_indices(
        data: schemas.BegroingIndicesCreate,
        connection=Depends(api_pool_manager.get_conn),
        niva_user: str = Header(None),
) -> BegroingIndices:
    user = await create_or_get_user(connection, niva_user)

    data_action = schemas.ActionsCreate(
        affiliationid=user.affiliationid,
        isactionlead=True,
        methodcode="begroing_6",  # code begroing_6 = Begroing Index Calculation
        actiontypecv="Derivation",
        begindatetime=data.date,
        begindatetimeutcoffset=0,
        equipmentids=[],
        directiveids=data.project_ids,
    )
    async with connection.transaction():
        completed_action = await post_actions(data_action, connection)

        processing_level = await find_row(connection, "processinglevels",
                                          "processinglevelcode", "0",
                                          ProcessingLevels)
        dimensionless_unit = await find_unit(
            connection,
            UnitsCreate(
                unitstypecv="Dimensionless",
                unitsabbreviation="PrsAbs",
                unitsname="Presence or Absence",
            ),
            raise_if_none=True,
        )
        seconds_unit = await find_unit(
            connection,
            UnitsCreate(unitstypecv="Time",
                        unitsabbreviation="s",
                        unitsname="second"),
            raise_if_none=True,
        )

        for prefix, submitted_values in data.indices.items():
            for suffix in submitted_values:
                value = submitted_values.get(suffix)
                if value is not None:
                    indexing_type = prefix if suffix == "INDEX" else prefix + " " + suffix
                    variable = await find_row(
                        connection,
                        "variables",
                        "variablenamecv",
                        indexing_type,
                        Variables,
                    )

                    if variable is None:
                        raise ValueError(f"variable {indexing_type} not found")

                    data_result = schemas.ResultsCreate(
                        samplingfeatureuuid=data.station_uuid,
                        actionid=completed_action.actionid,
                        resultuuid=uuid.uuid4(),
                        resulttypecv="Measurement",
                        variableid=variable.variableid,
                        unitsid=dimensionless_unit.unitsid,
                        processinglevelid=processing_level.processinglevelid,
                        valuecount=1,
                        statuscv="Complete",
                        sampledmediumcv="Organism",
                        dataqualitycodes=[],
                    )

                    completed_result = await post_results(
                        data_result, connection)

                    data_measurement_result = schemas.MeasurementResultsCreate(
                        resultid=completed_result.resultid,
                        censorcodecv="Not censored",
                        qualitycodecv="None",
                        aggregationstatisticcv="Unknown",
                        timeaggregationinterval=0,
                        timeaggregationintervalunitsid=seconds_unit.unitsid,
                        datavalue=value,
                        valuedatetime=data.date,
                        valuedatetimeutcoffset=0,
                    )

                    await post_measurement_results(data_measurement_result,
                                                   connection)

    return schemas.BegroingIndices(personid=user.personid, **data.dict())