Beispiel #1
0
    async def add_article(cls, conn: connection, article: ArticleCreation,
                          user: UserInfo) -> int:
        async with conn.transaction():
            try:
                s = await conn.fetchrow(
                    'insert into public."Article" (title, content, user_id) values ($1, $2, $3) returning id',
                    article.title, article.content, user.id)

                if not s['id']:
                    raise Exception()
            except:
                raise HTTPException(400, 'Bad info')

            try:
                tags = await conn.fetch(
                    'select id, name, description from public."Tag" where name = ANY($1::text[])',
                    article.tags)
                print(tags)
                tags = list(dict(i) for i in tags)
                print(tags)

                for tag in tags:
                    t = await conn.fetchrow(
                        'insert into public."Article_Tag" (article_id, tag_id) values ($1, $2) returning article_id',
                        s['id'], tag.get('id'))

                    if not t['article_id']:
                        raise Exception()
            except Exception as e:
                raise e
                raise HTTPException(400, 'Tags are incorrect')

        return s['id']
Beispiel #2
0
 async def del_comment(cls, conn: connection, comment_id: int) -> int:
     async with conn.transaction():
         try:
             return await conn.fetchval(
                 '''delete from public."Comment" where id = $1 returning id''',
                 comment_id)
         except:
             raise HTTPException(400, 'Cant delete comment')
Beispiel #3
0
async def upsert_track_result(conn: asyncpg.connection,
                              track_result: schemas.TrackResultsCreate):
    await shapely_postgres_adapter.set_shapely_adapter(conn)
    async with conn.transaction():
        row = await conn.fetchrow(
            "SELECT samplingfeatureid FROM featureactions WHERE featureactionid = "
            "(SELECT featureactionid FROM results WHERE resultid = $1)",
            track_result.resultid,
        )

        if row["samplingfeatureid"] != track_result.samplingfeatureid:
            raise ValueError("THIS IS ALLL WROOONGNGNGNGNGNNG")
        row = await conn.fetchrow(
            "INSERT INTO trackresults (resultid, intendedtimespacing, intendedtimespacingunitsid, "
            "aggregationstatisticcv) VALUES ($1, $2, $3, $4) "
            "ON CONFLICT (resultid) DO UPDATE SET intendedtimespacing = EXCLUDED.intendedtimespacing returning *",
            track_result.resultid,
            track_result.intendedtimespacing,
            track_result.intendedtimespacingunitsid,
            track_result.aggregationstatisticcv,
        )

        if track_result.track_result_locations:
            location_records = (
                (rec[0], shapely.wkt.loads(f"POINT({rec[1]} {rec[2]})"),
                 rec[3], track_result.samplingfeatureid)
                for rec in track_result.track_result_locations)
            await conn.executemany(
                "INSERT INTO trackresultlocations (valuedatetime, trackpoint, qualitycodecv, "
                "samplingfeatureid) VALUES ($1, ST_SetSRID($2::geometry, 4326), $3, $4) "
                "ON CONFLICT (valuedatetime, samplingfeatureid) DO UPDATE SET "
                "trackpoint = excluded.trackpoint, qualitycodecv = excluded.qualitycodecv",
                location_records,
            )
            # result = await conn.copy_records_to_table(table_name='trackresultlocations',
            #                                           records=location_records, schema_name='odm2')
            # logging.info(result)

        if track_result.track_result_values:
            value_records = ((rec[0], rec[1], rec[2], track_result.resultid)
                             for rec in track_result.track_result_values)
            await conn.executemany(
                "INSERT INTO trackresultvalues (valuedatetime, datavalue, qualitycodecv, resultid) "
                "VALUES ($1, $2, $3, $4) ON CONFLICT (valuedatetime, resultid) DO UPDATE SET "
                "datavalue = excluded.datavalue, qualitycodecv = excluded.qualitycodecv",
                value_records,
            )
            # result = await conn.copy_records_to_table(table_name='trackresultvalues',
            #                                           records=records, schema_name='odm2')
            # logging.info(result)

    return schemas.TrackResultsReport(
        samplingfeatureid=track_result.samplingfeatureid,
        inserted_track_result_values=len(track_result.track_result_values),
        inserted_track_result_locations=len(
            track_result.track_result_locations),
        **row,
    )
Beispiel #4
0
async def create_result(conn: asyncpg.connection,
                        result: schemas.ResultsCreate):
    async with conn.transaction():
        feature_action_create = schemas.FeatureActionsCreate(
            samplingfeatureuuid=result.samplingfeatureuuid,
            samplingfeaturecode=result.samplingfeaturecode,
            actionid=result.actionid,
        )
        feature_action_row = await create_feature_action(
            conn, feature_action_create)
        result_row = await conn.fetchrow(
            "INSERT INTO results (resultuuid, featureactionid, resulttypecv, variableid, unitsid,"
            "taxonomicclassifierid, processinglevelid, resultdatetime, resultdatetimeutcoffset, validdatetime,"
            "validdatetimeutcoffset, statuscv, sampledmediumcv, valuecount) "
            "VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) returning *",
            result.resultuuid,
            feature_action_row.featureactionid,
            result.resulttypecv,
            result.variableid,
            result.unitsid,
            result.taxonomicclassifierid,
            result.processinglevelid,
            result.resultdatetime,
            result.resultdatetimeutcoffset,
            result.validdatetime,
            result.validdatetimeutcoffset,
            result.statuscv,
            result.sampledmediumcv,
            result.valuecount,
        )
        for data_quality_code in result.dataqualitycodes:
            await create_result_data_quality(
                conn,
                schemas.ResultsDataQualityCreate(
                    resultid=result_row["resultid"],
                    dataqualitycode=data_quality_code),
            )
        for annotation_id in await create_or_parse_annotations(
                conn, result.annotations):
            await conn.fetchrow(
                "INSERT INTO resultannotations (resultid, annotationid, begindatetime, enddatetime) "
                "Values ($1, $2, $3, $4) returning *",
                result_row["resultid"],
                annotation_id,
                result.resultdatetime,
                result.validdatetime,
            )
    # Dict allows overwriting of key while pydantic schema does not, featureactionid exists in both return rows
    return schemas.Results(dataqualitycodes=result.dataqualitycodes,
                           **{
                               **result_row,
                               **dict(feature_action_row)
                           })
Beispiel #5
0
    async def add_comment(cls, conn: connection, comment: CommentCreation,
                          user: UserInfo) -> CommentInfo:
        async with conn.transaction():
            try:
                comment_id = await conn.fetchval(
                    '''
                insert into public."Comment" (content, article_id, user_id) values ($1, $2, $3) returning id
                ''', comment.content, comment.article_id, user.id)

                return await cls.get_comment_by_id(conn, comment_id)
            except:
                raise HTTPException(400, 'Cant add comment')
Beispiel #6
0
    async def upd_comment_of_article(cls, conn: connection,
                                     comment: CommentUpdate) -> CommentInfo:
        async with conn.transaction():
            try:
                comment_id = await conn.fetchval(
                    '''update public."Comment"
                    set content = $1, added = NOW()
                    where id = $2 returning id''', comment.content, comment.id)

                return await cls.get_comment_by_id(conn, comment_id)

            except Exception as e:
                raise HTTPException(400, 'Cant update comment')
Beispiel #7
0
async def insert_method(conn: asyncpg.connection,
                        method: schemas.MethodsCreate):
    method_data = {k: v for k, v in method if k != "annotations"}
    async with conn.transaction():
        method_row = await conn.fetchrow(
            make_sql_query("methods", method_data), *method_data.values())
        for annotation_id in await create_or_parse_annotations(
                conn, method.annotations):
            await conn.fetchrow(
                "INSERT INTO methodannotations (methodid, annotationid) Values ($1, $2) returning *",
                method_row["methodid"],
                annotation_id,
            )
    return schemas.Methods(annotations=method.annotations, **method_row)
async def register_sample(conn: asyncpg.connection,
                          data: schemas.MsCreateSample) -> int:
    """
    This function registers the main sample collected in samplingfeature table.
    """
    async with conn.transaction():
        samplingfeatureid = await get_samplingfeatureid_from_samplingfeaturecode(
            conn, data.samplingfeaturecode)
        if samplingfeatureid is None:

            annotation = await find_row(conn, "annotations", "annotationtext",
                                        "Non-target mass spectrometry",
                                        schemas.Annotations)
            annotation2 = await find_row(conn, "annotations", "annotationtext",
                                         data.sampletype, schemas.Annotations)
            sampling_feature = schemas.SamplingFeaturesCreate(
                samplingfeatureuuid=uuid.uuid4(),
                samplingfeaturecode=data.samplingfeaturecode,
                samplingfeaturetypecv="Specimen",
                annotations=[
                    annotation.annotationid, annotation2.annotationid
                ],
            )
            logging.info(f"directivename {data.directivename}")
            directive = await find_row(conn,
                                       "directives",
                                       "directivename",
                                       data.directivename,
                                       schemas.Directive,
                                       raise_if_none=True)
            ms_sample_data = schemas.ActionsCreate(
                actiondescription="Registered water sample",
                begindatetime=datetime.utcnow(),
                actiontypecv="Specimen collection",
                methodcode="mass_spec:collect_sample",
                isactionlead=True,
                sampling_features=[sampling_feature],
                directiveids=[directive.directiveid],
                affiliationid=1,
                begindatetimeutcoffset=0,
            )

            completed_sample = await core_queries.do_action(
                conn, ms_sample_data)
            samplingfeatureid = completed_sample.sampling_features[
                0].samplingfeatureid

    return samplingfeatureid
Beispiel #9
0
async def create_sampling_feature(
        conn: asyncpg.connection,
        sampling_feature: schemas.SamplingFeaturesCreate):
    # Todo: Find a better way to deal with 'Null' geometry
    if sampling_feature.featuregeometrywkt:
        await shapely_postgres_adapter.set_shapely_adapter(conn)
        featuregeometry = shapely.wkt.loads(
            sampling_feature.featuregeometrywkt)
    else:
        featuregeometry = None

    async with conn.transaction():
        sampling_row = await conn.fetchrow(
            "INSERT INTO samplingfeatures (samplingfeatureuuid, samplingfeaturetypecv, samplingfeaturecode, "
            "samplingfeaturename, samplingfeaturedescription, samplingfeaturegeotypecv, featuregeometry, "
            "featuregeometrywkt, elevation_m, elevationdatumcv) "
            "VALUES ($1, $2, $3, $4, $5, $6, "
            "ST_SetSRID($7::geometry, 4326), $8, $9, $10) returning *",
            sampling_feature.samplingfeatureuuid,
            sampling_feature.samplingfeaturetypecv,
            sampling_feature.samplingfeaturecode,
            sampling_feature.samplingfeaturename,
            sampling_feature.samplingfeaturedescription,
            sampling_feature.samplingfeaturegeotypecv,
            featuregeometry,
            sampling_feature.featuregeometrywkt,
            sampling_feature.elevation_m,
            sampling_feature.elevationdatumcv,
        )
        for sampling_feature_id, relation_ship_type in sampling_feature.relatedsamplingfeatures:
            related_sampling_feature_create = schemas.RelatedSamplingFeatureCreate(
                samplingfeatureid=sampling_row["samplingfeatureid"],
                relationshiptypecv=relation_ship_type,
                relatedfeatureid=sampling_feature_id,
            )
            await insert_pydantic_object(conn, "relatedfeatures",
                                         related_sampling_feature_create,
                                         schemas.RelatedSamplingFeature)
        for annotation_id in await create_or_parse_annotations(
                conn, sampling_feature.annotations):
            await conn.fetchrow(
                "INSERT INTO samplingfeatureannotations (samplingfeatureid, annotationid) "
                "Values ($1, $2) returning *",
                sampling_row["samplingfeatureid"],
                annotation_id,
            )
    return schemas.SamplingFeatures(**sampling_row)
Beispiel #10
0
    async def upd_article(cls, conn: connection,
                          article_upd: ArticleUpdate) -> ArticleInfo:

        article = await cls.get_article_by_id(conn, article_upd.id)

        async with conn.transaction():
            res = await conn.fetchval(
                'update public."Article" set'
                '  title = $1, content = $2, is_private = $3'
                '  where id = $4 returning id', article_upd.title,
                article_upd.content, article_upd.is_private, article_upd.id)

            if not res:
                raise HTTPException(400, 'Not updated')

            to_add_tags = list(i for i in article_upd.tags
                               if i not in article.tags)

            to_add_tags = await conn.fetch(
                'select id from public."Tag" where name = ANY($1::text[])',
                to_add_tags)

            to_add_tags = list(i['id'] for i in to_add_tags)

            for i in to_add_tags:
                if not await conn.fetchval(
                        'insert into public."Article_Tag" (article_id, tag_id) values ($1, $2) returning article_id',
                        article_upd.id, i):
                    raise HTTPException(400, "Error adding tags")

            to_del_tags = list(i for i in article.tags
                               if i not in article_upd.tags)

            to_del_tags = await conn.fetch(
                'select id from public."Tag" where name = ANY($1::text[])',
                to_del_tags)

            to_del_tags = list(i['id'] for i in to_del_tags)

            for i in to_del_tags:
                if not await conn.fetchval(
                        'delete from public."Article_Tag" where article_id = $1 and tag_id = $2 returning article_id',
                        article_upd.id, i):
                    raise HTTPException(400, 'Error deleting tags')

        return await cls.get_article_by_id(conn, article_upd.id)
Beispiel #11
0
async def create_or_get_user(conn: asyncpg.connection,
                             user_id_header: str) -> StoredPerson:
    """
    Retrieves stored users based on their email as unique identifier

    If user is not found in people/affiliations tables, a new row is inserted

    TODO: handle name updates, see issue https://github.com/NIVANorge/odm2-postgres-api/issues/44
    """
    user = get_nivaport_user(user_id_header=user_id_header)
    people_row = await conn.fetchrow(
        "SELECT p.*, a.affiliationid, a.primaryemail from odm2.people p "
        "inner join odm2.affiliations a "
        "on p.personid=a.personid WHERE a.primaryemail = $1",
        user.email,
    )

    if not people_row:
        async with conn.transaction():
            name = full_name_to_split_tuple(user.displayName)
            person = await conn.fetchrow(
                "INSERT INTO odm2.people (personfirstname, personmiddlename, personlastname) "
                "VALUES ($1, $2, $3) "
                "RETURNING *",
                *name,
            )

            # TODO: affiliationstartdate should really be fetched from loginprovider,
            #  see https://github.com/NIVANorge/niva-port/issues/188
            # TODO: add in additional fields, like organization++
            affiliation = await conn.fetchrow(
                "INSERT INTO odm2.affiliations (personid, affiliationstartdate, primaryemail) "
                "VALUES ($1, $2, $3) "
                "RETURNING *",
                person["personid"],
                datetime.utcnow(),
                user.email,
            )
            return StoredPerson(**{**person, **affiliation})
    return StoredPerson(**people_row)
Beispiel #12
0
async def upsert_categorical_result(
        conn: asyncpg.connection,
        categorical_result: schemas.CategoricalResultsCreate):
    async with conn.transaction():
        value_keys = ["datavalue", "valuedatetime", "valuedatetimeutcoffset"]
        categoricalresults_data = {
            k: v
            for k, v in categorical_result if k not in value_keys
        }
        categoricalresultvalues_data = {
            k: v
            for k, v in categorical_result
            if k in value_keys or k == "resultid"
        }

        await conn.fetchrow(
            make_sql_query("categoricalresults", categoricalresults_data),
            *categoricalresults_data.values())
        await conn.fetchrow(
            make_sql_query("categoricalresultvalues",
                           categoricalresultvalues_data),
            *categoricalresultvalues_data.values(),
        )
Beispiel #13
0
async def upsert_measurement_result(
        conn: asyncpg.connection,
        measurement_result: schemas.MeasurementResultsCreate):
    async with conn.transaction():
        value_keys = ["datavalue", "valuedatetime", "valuedatetimeutcoffset"]
        measurementresults_data = {
            k: v
            for k, v in measurement_result if k not in value_keys
        }
        measurementresultvalues_data = {
            k: v
            for k, v in measurement_result
            if k in value_keys or k == "resultid"
        }

        await conn.fetchrow(
            make_sql_query("measurementresults", measurementresults_data),
            *measurementresults_data.values())
        await conn.fetchrow(
            make_sql_query("measurementresultvalues",
                           measurementresultvalues_data),
            *measurementresultvalues_data.values(),
        )
Beispiel #14
0
async def insert_taxonomic_classifier(
        conn: asyncpg.connection, taxon: schemas.TaxonomicClassifierCreate):
    taxon_data = {
        k: v
        for k, v in taxon
        if k not in ["annotations", "relatedtaxonomicclassifiers"]
    }
    async with conn.transaction():
        taxon_row = await conn.fetchrow(
            make_sql_query("taxonomicclassifiers", taxon_data),
            *taxon_data.values())
        logging.info(taxon_row)

        for related_taxonomicclassifier_id, relation_ship_type in taxon.relatedtaxonomicclassifiers:
            related_taxonomicclassifier_create = schemas.RelatedTaxonomicClassifierCreate(
                taxonomicclassifierid=taxon_row["taxonomicclassifierid"],
                relationshiptypecv=relation_ship_type,
                relatedtaxonomicclassifierid=related_taxonomicclassifier_id,
            )
            await insert_pydantic_object(
                conn,
                "relatedtaxonomicclassifiers",
                related_taxonomicclassifier_create,
                schemas.RelatedTaxonomicClassifier,
            )

        for annotation_id in await create_or_parse_annotations(
                conn, taxon.annotations):
            await conn.fetchrow(
                "INSERT INTO TaxonomicClassifiersAnnotations (taxonomicclassifierid, annotationid) "
                "Values ($1, $2) returning *",
                taxon_row["taxonomicclassifierid"],
                annotation_id,
            )
    return schemas.TaxonomicClassifier(annotations=taxon.annotations,
                                       **taxon_row)
async def register_output(conn: asyncpg.connection,
                          data: schemas.MsCreateOutput):
    """
    This function registers mass_spec processed data.
    It registers results of derivation with a given methodcode in the result table and
    and the corresponding action of derivation in the action table.
    """
    async with conn.transaction():
        action_time = datetime.now()
        ran_ms_convert = schemas.ActionsCreate(
            actiontypecv="Derivation",
            methodcode=data.methodcode,
            begindatetime=action_time,
            sampling_features=[],
            affiliationid=1,
            isactionlead=True,
            begindatetimeutcoffset=0,
        )

        completed_ran_ms_convert = await core_queries.do_action(
            conn, ran_ms_convert)
        units_create = schemas.UnitsCreate(unitstypecv="Dimensionless",
                                           unitsabbreviation="-",
                                           unitsname="")
        units = await core_queries.find_unit(conn,
                                             units_create,
                                             raise_if_none=True)
        variables = await core_queries.find_row(
            conn,
            "variables",
            "variablecode",
            data.variablecode,
            schemas.Variables,
            raise_if_none=True,
        )
        processinglevels = await core_queries.find_row(
            conn,
            "processinglevels",
            "processinglevelcode",
            "0",
            schemas.ProcessingLevels,
            raise_if_none=True,
        )
        resultannotation = schemas.AnnotationsCreate(
            annotationlink=data.resultannotationlink,
            annotationtypecv="Result annotation",
            annotationcode=data.resultannotationcode,
            annotationtext="Check link for file location",
            annotationjson=data.resultannotationjson,
            annotationdatetime=action_time,
            annotationutcoffset=0,
        )

        result = schemas.ResultsCreate(
            samplingfeaturecode=data.samplingfeaturecode,
            statuscv="Complete",
            variableid=variables.variableid,
            unitsid=units.unitsid,  # type: ignore
            processinglevelid=processinglevels.processinglevelid,
            valuecount=0,
            resulttypecv="Measurement",
            sampledmediumcv="Liquid aqueous",
            annotations=[resultannotation],
            resultdatetime=action_time,
            validdatetime=action_time,
            resultdatetimeutcoffset=0,
            validdatetimeutcoffset=0,
            actionid=completed_ran_ms_convert.actionid,
            resultuuid=uuid.uuid4(),
        )

        completed_result = await core_queries.create_result(conn, result)

    return schemas.MsOutput(action=completed_ran_ms_convert,
                            result=completed_result)
Beispiel #16
0
async def update(connection: asyncpg.connection) -> None:
    schema = """
CREATE TYPE versionstate AS ENUM('success', 'failed', 'deploying', 'pending');
CREATE TYPE resourcestate AS ENUM('unavailable', 'skipped', 'dry', 'deployed', 'failed', 'deploying', 'available',
                                  'cancelled', 'undefined', 'skipped_for_undefined', 'processing_events');
CREATE TYPE resourceaction_type AS ENUM('store', 'push', 'pull', 'deploy', 'dryrun', 'getfact', 'other');
CREATE type change AS ENUM('nochange', 'created', 'purged', 'updated');


-- Table: public.projects
CREATE TABLE IF NOT EXISTS public.project (
    id uuid PRIMARY KEY,
    name varchar NOT NULL UNIQUE
);

-- Table: public.environments
CREATE TABLE IF NOT EXISTS public.environment (
    id uuid PRIMARY KEY,
    name varchar NOT NULL,
    project uuid NOT NULL REFERENCES project(id) ON DELETE CASCADE,
    repo_url varchar DEFAULT '',
    repo_branch varchar DEFAULT '',
    settings JSONB DEFAULT '{}'
);

CREATE UNIQUE INDEX environment_name_project_index ON environment (project, name);

-- Table: public.configurationmodels
CREATE TABLE IF NOT EXISTS public.configurationmodel (
    version integer NOT NULL,
    environment uuid NOT NULL REFERENCES environment(id) ON DELETE CASCADE,
    date timestamp,
    released boolean DEFAULT false,
    deployed boolean DEFAULT false,
    result versionstate DEFAULT 'pending',
    version_info JSONB,
    total integer DEFAULT 0,
    undeployable varchar[],
    skipped_for_undeployable varchar[],
    PRIMARY KEY(environment, version)
);

-- Used in:
--      * data.mark_done_if_done()
-- => This query is exected frequently
CREATE UNIQUE INDEX configurationmodel_env_version_total_index ON configurationmodel (environment, version DESC, total);
-- Used in:
--      * data.get_latest_version()
--      * data.get_version_nr_latest_version()
--      * data.get_increment()
-- => Prevent sort operation on column version
CREATE UNIQUE INDEX configurationmodel_env_released_version_index ON configurationmodel (environment, released, version DESC);

-- Table: public.resources
CREATE TABLE IF NOT EXISTS public.resource (
    environment uuid NOT NULL,
    model integer NOT NULL,
    resource_id varchar NOT NULL,
    resource_version_id varchar NOT NULL,
    agent varchar NOT NULL,
    last_deploy timestamp,
    attributes JSONB,
    attribute_hash varchar,
    status resourcestate DEFAULT 'available',
    provides varchar[] DEFAULT array[]::varchar[],
    PRIMARY KEY(environment, resource_version_id),
    FOREIGN KEY (environment, model) REFERENCES configurationmodel (environment, version) ON DELETE CASCADE
);

-- Used in:
--   * data. get_resources_for_attribute_hash()
CREATE INDEX resource_env_attr_hash_index ON resource (environment, attribute_hash);
-- Used in:
--      * data.get_resources_for_version()
--      * data.get_deleted_resources()
-- => Prevent sequential scan through all resources
CREATE INDEX resource_env_model_agent_index ON resource (environment, model, agent);
-- Used in:
--      * data.get_resources_report()
--      * data.get_latest_version()
--      * data.get_deleted_resources()
-- => Prevent sequential scan through all resources
-- => Prevent sort operation on column model
CREATE INDEX resource_env_resourceid_index ON resource (environment, resource_id, model DESC);
-- Used in:
--      * data.get_deleted_resources()
-- => Prevent costly search through jsonb structures
CREATE INDEX resource_attributes_index ON resource USING gin (attributes jsonb_path_ops);

-- Table: public.resourceaction
CREATE TABLE IF NOT EXISTS public.resourceaction (
    action_id uuid PRIMARY KEY NOT NULL,
    action resourceaction_type NOT NULL,
    started timestamp NOT NULL,
    finished timestamp,
    messages JSONB[],
    status resourcestate,
    changes JSONB DEFAULT '{}'::jsonb,
    change change,
    send_event boolean
);

-- Used in:
--      * data.get_log()
-- => Prevent sort operation on column started
CREATE UNIQUE INDEX resourceaction_action_id_started_index ON resourceaction (action_id, started DESC);
-- Used in:
--      * data.purge_logs()
-- => Prevent sequential scan through all resource actions
CREATE INDEX resourceaction_started_index ON resourceaction (started);

-- Table: public.resourceversionid
-- TODO: FK CONSTRAINT???
CREATE TABLE IF NOT EXISTS public.resourceversionid (
    environment uuid NOT NULL,
    action_id uuid NOT NULL REFERENCES resourceaction (action_id) ON DELETE CASCADE,
    resource_version_id varchar NOT NULL,
    PRIMARY KEY(environment, action_id, resource_version_id)
);

-- Used in:
--      * data.ResourceAction.get_log()
-- => Prevent sequential scan through all resourceversionids in a certain environment
CREATE INDEX resourceversionid_environment_resource_version_id_index ON resourceversionid (environment, resource_version_id);
-- Used in:
--      * data.ResourceAction.get_by_id()
--      * data.ResourceAction.get_list
--      * data.ResourceAction.get_log()
-- => Prevent sequential scan through all resourceversionids
CREATE INDEX resourceversionid_action_id_index ON resourceversionid (action_id);

-- Table: public.code
-- There is no foreign key constraint from code to configurationmodel, since the code is uploaded
-- to the server before the configuration model is created. Working the other was around results
-- in a configuration model which doesn't have the code required to deploy the model.
CREATE TABLE IF NOT EXISTS public.code (
    environment uuid NOT NULL REFERENCES environment(id) ON DELETE CASCADE,
    resource varchar NOT NULL,
    version integer NOT NULL,
    source_refs JSONB,
    PRIMARY KEY(environment, version, resource)
);

-- Table: public.unknownparameter
CREATE TABLE IF NOT EXISTS public.unknownparameter (
    id uuid PRIMARY KEY,
    name varchar NOT NULL,
    environment uuid NOT NULL REFERENCES environment(id) ON DELETE CASCADE,
    source varchar NOT NULL,
    resource_id varchar DEFAULT '',
    version integer NOT NULL,
    metadata JSONB,
    resolved boolean DEFAULT false,
    FOREIGN KEY (environment, version) REFERENCES configurationmodel (environment, version) ON DELETE CASCADE
);

-- Used in:
--      * server.get_version()
CREATE INDEX unknownparameter_env_version_index ON unknownparameter (environment, version);
-- Used in:
--      * server.renew_expired_facts()
CREATE INDEX unknownparameter_resolved_index ON unknownparameter (resolved);

-- Table: public.agentprocess
CREATE TABLE IF NOT EXISTS public.agentprocess (
    hostname varchar NOT NULL,
    environment uuid NOT NULL REFERENCES environment(id) ON DELETE CASCADE,
    first_seen timestamp,
    last_seen timestamp,
    expired timestamp,
    sid uuid NOT NULL PRIMARY KEY
);

-- Used in:
--      * data.get_live()
--      * data.get_live_by_env()
--      * data.get_by_env()
-- => Speed up search for records which have expired set to NULL
CREATE UNIQUE INDEX agentprocess_sid_expired_index ON agentprocess (sid, expired);
-- Used in:
--      * data.get_by_sid()
-- => Prevent sequential scan through all agentprocesses
CREATE INDEX agentprocess_env_expired_index ON agentprocess (environment, expired);

-- Table: public.agentinstance
CREATE TABLE IF NOT EXISTS public.agentinstance (
    id uuid PRIMARY KEY,
    process uuid NOT NULL REFERENCES agentprocess (sid) ON DELETE CASCADE,
    name varchar NOT NULL,
    expired timestamp,
    -- tid is an environment id
    tid uuid NOT NULL
);

-- Used in:
--      * data.active_for()
-- => Prevent sequential scan through all agentinstances
CREATE INDEX agentinstance_expired_tid_endpoint_index ON agentinstance (tid, name, expired);
-- Used in:
--      * expire_session()
-- => Prevent sequential scan through all agentinstances
CREATE INDEX agentinstance_process_index ON agentinstance (process);

-- Table: public.agent
CREATE TABLE IF NOT EXISTS public.agent (
    environment uuid NOT NULL REFERENCES environment(id) ON DELETE CASCADE,
    name varchar NOT NULL,
    last_failover timestamp,
    paused boolean DEFAULT false,
-- primary is a reserved keyword in postgresql ==> change to id_primary
    id_primary uuid REFERENCES agentinstance(id) ON DELETE CASCADE,
    PRIMARY KEY(environment, name)
);

-- Table: public.parameter
CREATE TABLE IF NOT EXISTS public.parameter (
    id uuid PRIMARY KEY,
    name varchar NOT NULL,
    value varchar NOT NULL DEFAULT '',
    environment uuid NOT NULL REFERENCES environment(id) ON DELETE CASCADE,
    resource_id varchar DEFAULT '',
    source varchar NOT NULL,
    updated timestamp,
    metadata JSONB
);

-- Used in:
--      * data.get_updated_before()
-- => Prevent sequential scan through all parameters
CREATE INDEX parameter_updated_index ON parameter (updated);
-- Used in:
--      * data.list_parameters()
--      * server.get_param()
--      * server.resource_action_update()
--      * server.set_param()
-- => Prevent sequential scan through all parameters
CREATE INDEX parameter_env_name_resource_id_index ON parameter (environment, name, resource_id);
-- Used in:
--      * data.list_parameters()
-- => Prevent costly search through jsonb structures
CREATE INDEX parameter_metadata_index ON parameter USING gin (metadata jsonb_path_ops);

-- Table: public.form
CREATE TABLE IF NOT EXISTS public.form (
    environment uuid NOT NULL REFERENCES environment(id) ON DELETE CASCADE,
    form_type varchar NOT NULL UNIQUE,
    options JSONB,
    fields JSONB,
    defaults JSONB,
    field_options JSONB,
    PRIMARY KEY(environment, form_type)
);

-- Table: public.formrecord
CREATE TABLE IF NOT EXISTS public.formrecord(
    id uuid PRIMARY KEY,
    form varchar NOT NULL,
    environment uuid NOT NULL,
    fields JSONB,
    changed timestamp,
    FOREIGN KEY (form) REFERENCES form(form_type) ON DELETE CASCADE
);

-- Used in:
--      * server.list_records()
-- => Prevent sequential scan through all formrecords
CREATE INDEX formrecord_form_index ON formrecord (form);

-- Table: public.compile
CREATE TABLE IF NOT EXISTS public.compile(
    id uuid PRIMARY KEY,
    environment uuid NOT NULL REFERENCES environment(id) ON DELETE CASCADE,
    started timestamp,
    completed timestamp
);

-- Used in:
--      * data.get_reports()
-- => Prevent sort operation on started
CREATE INDEX compile_env_started_index ON compile (environment, started DESC);

-- Table: public.report
CREATE TABLE IF NOT EXISTS public.report(
    id uuid PRIMARY KEY,
    started timestamp NOT NULL,
    completed timestamp NOT NULL,
    command varchar NOT NULL,
    name varchar NOT NULL,
    errstream varchar DEFAULT '',
    outstream varchar DEFAULT '',
    returncode integer,
    compile uuid NOT NULL REFERENCES compile(id) ON DELETE CASCADE
);

-- Used in:
--      * data.get_report()
-- => Prevent sequential scan through all reports
CREATE INDEX report_compile_index ON report (compile);

-- Table: public.dryrun
CREATE TABLE IF NOT EXISTS public.dryrun(
    id uuid PRIMARY KEY,
    environment uuid NOT NULL,
    model integer NOT NULL,
    date timestamp,
    total integer DEFAULT 0,
    todo integer DEFAULT 0,
    resources JSONB DEFAULT '{}'::jsonb,
    FOREIGN KEY (environment, model) REFERENCES configurationmodel (environment, version) ON DELETE CASCADE
);

-- Used in:
--      * server.dryrun_list()
-- => Prevent sequential scan through all dryruns
CREATE INDEX dryrun_env_model_index ON dryrun (environment, model);
"""
    async with connection.transaction():
        await connection.execute(schema)
async def register_replicas(
        conn: asyncpg.connection,
        data: schemas.MsCreateReplicas) -> schemas.MsReplicas:
    """
    This function registers mass_spec raw data.
    It first registers water replica (a child of the main water sample) in samplingfeature table
    and the corresponding action of specimen fractionation in the action table.
    Next it registers results of operating mass spectrometer in the result table and
    and the corresponding action of specimen analysis in the action table.
    """
    async with conn.transaction():
        action_time = datetime.now()
        parameters_annotation = schemas.AnnotationsCreate(
            annotationjson=data.samplingfeatureannotationjson,
            annotationdatetime=action_time,
            annotationtypecv="Specimen annotation",
            annotationtext="Processing parameters",
            annotationutcoffset=0,
        )

        ms_annotation = await find_row(conn, "annotations", "annotationtext",
                                       "Non-target mass spectrometry",
                                       schemas.Annotations)
        sampling_feature = schemas.SamplingFeaturesCreate(
            samplingfeatureuuid=uuid.uuid4(),
            samplingfeaturecode=data.samplingfeaturecode,
            samplingfeaturetypecv="Specimen",
            relatedsamplingfeatures=[(data.parent_samplingfeatureid,
                                      "Is child of")],
            annotations=[parameters_annotation, ms_annotation.annotationid],
        )

        fractionate_sample_data = schemas.ActionsCreate(
            actiondescription="Fractionate water sample",
            actiontypecv="Specimen fractionation",
            methodcode="mass_spec:fractionate_sample",
            begindatetime=action_time,
            sampling_features=[sampling_feature],
            affiliationid=1,
            isactionlead=True,
            begindatetimeutcoffset=0,
        )
        completed_fractionate_sample = await core_queries.do_action(
            conn, fractionate_sample_data)
        ran_mass_spec_data = schemas.ActionsCreate(
            actiondescription="Operated mass spectrometer",
            actiontypecv="Specimen analysis",
            methodcode="mass_spec:create_data",
            begindatetime=action_time,
            sampling_features=[],
            affiliationid=1,
            isactionlead=True,
            begindatetimeutcoffset=0,
        )
        completed_ran_mass_spec = await core_queries.do_action(
            conn, ran_mass_spec_data)
        units_create = schemas.UnitsCreate(unitstypecv="Dimensionless",
                                           unitsabbreviation="-",
                                           unitsname="")
        units = await core_queries.find_unit(conn,
                                             units_create,
                                             raise_if_none=True)
        variables = await core_queries.find_row(
            conn,
            "variables",
            "variablecode",
            "mass_spec_00",
            schemas.Variables,
            raise_if_none=True,
        )
        processinglevels = await core_queries.find_row(
            conn,
            "processinglevels",
            "processinglevelcode",
            "0",
            schemas.ProcessingLevels,
            raise_if_none=True,
        )
        resultannotation = schemas.AnnotationsCreate(
            annotationlink=data.resultannotationlink,
            annotationtypecv="Result annotation",
            annotationcode="raw",
            annotationtext="Check link for file location",
        )
        result = schemas.ResultsCreate(
            samplingfeaturecode=sampling_feature.samplingfeaturecode,
            statuscv="Complete",
            variableid=variables.variableid,
            unitsid=units.unitsid,  # type: ignore
            processinglevelid=processinglevels.processinglevelid,
            valuecount=0,
            resulttypecv="Measurement",
            sampledmediumcv="Liquid aqueous",
            annotations=[resultannotation],
            resultdatetime=action_time,
            validdatetime=action_time,
            resultdatetimeutcoffset=0,
            validdatetimeutcoffset=0,
            actionid=completed_ran_mass_spec.actionid,
            resultuuid=uuid.uuid4(),
        )
        completed_result = await core_queries.create_result(conn, result)

    return schemas.MsReplicas(
        fractionate_sample=completed_fractionate_sample,
        ran_mass_spec=completed_ran_mass_spec,
        results=completed_result,
    )
Beispiel #18
0
async def do_action(conn: asyncpg.connection,
                    action: schemas.ActionsCreate) -> schemas.Action:
    async with conn.transaction():
        method_row = await conn.fetchrow(
            "SELECT methodid FROM methods WHERE methodcode = $1",
            action.methodcode)
        if method_row is None:
            raise HTTPException(status_code=422,
                                detail="Please specify valid methodcode.")
        action_row = await conn.fetchrow(
            "INSERT INTO actions (actiontypecv, methodid, begindatetime, begindatetimeutcoffset,  enddatetime, "
            "enddatetimeutcoffset, actiondescription, actionfilelink) "
            "VALUES ($1, $2, $3, $4, $5, $6, $7, $8) returning *",
            action.actiontypecv,
            method_row["methodid"],
            action.begindatetime,
            action.begindatetimeutcoffset,
            action.enddatetime,
            action.enddatetimeutcoffset,
            action.actiondescription,
            action.actionfilelink,
        )

        action_by = schemas.ActionsByCreate(
            actionid=action_row["actionid"],
            affiliationid=action.affiliationid,
            isactionlead=action.isactionlead,
            roledescription=action.roledescription,
        )
        action_by_row = await insert_pydantic_object(conn, "actionby",
                                                     action_by,
                                                     schemas.ActionsBy)
        for equipmentid in action.equipmentids:
            equipment_used_create = schemas.EquipmentUsedCreate(
                actionid=action_row["actionid"], equipmentid=equipmentid)
            await insert_pydantic_object(conn, "equipmentused",
                                         equipment_used_create,
                                         schemas.EquipmentUsed)

        for directiveid in action.directiveids:
            action_directive_create = schemas.ActionDirectivesCreate(
                actionid=action_row["actionid"], directiveid=directiveid)
            await insert_pydantic_object(conn, "actiondirectives",
                                         action_directive_create,
                                         schemas.ActionDirective)

        for action_id, relation_ship_type in action.relatedactions:
            related_action_create = schemas.RelatedActionCreate(
                actionid=action_row["actionid"],
                relationshiptypecv=relation_ship_type,
                relatedactionid=action_id)
            await insert_pydantic_object(conn, "relatedactions",
                                         related_action_create,
                                         schemas.RelatedAction)

        new_sampling_features = []
        for sampling_feature in action.sampling_features:
            new_sampling_features.append(await create_sampling_feature(
                conn, sampling_feature))

            feature_action = schemas.FeatureActionsCreate(
                samplingfeatureuuid=new_sampling_features[-1].
                samplingfeatureuuid,
                actionid=action_row["actionid"])
            await create_feature_action(conn, feature_action)
    # Dict allows overwriting of key while pydantic schema does not, identical action_id exists in both return rows
    return schemas.Action(
        equipmentids=action.equipmentids,
        methodcode=action.methodcode,
        sampling_features=new_sampling_features,
        **{
            **action_row,
            **dict(action_by_row)
        },
    )