Example #1
0
def generic_report(id):
    aligned_volume_name, pcg_table_name = get_relevant_datastack_info(
        datastack_name)
    session = sqlalchemy_cache.get(aligned_volume_name)
    table = session.query(AnalysisTable).filter(AnalysisTable.id == id).first()

    make_dataset_models(table.analysisversion.dataset, [],
                        version=table.analysisversion.version)

    Model = make_annotation_model(
        table.analysisversion.dataset,
        table.schema,
        table.tablename,
        version=table.analysisversion.version,
    )

    n_annotations = Model.query.count()

    return render_template(
        "generic.html",
        n_annotations=n_annotations,
        dataset=table.analysisversion.dataset,
        analysisversion=table.analysisversion.version,
        version=__version__,
        table_name=table.tablename,
        schema_name=table.schema,
    )
    def test_chunk_ids(self, mat_metadata):
        table_name = mat_metadata["annotation_table_name"]
        schema = mat_metadata["schema_type"]
        model = make_annotation_model(table_name, schema, with_crud_columns=False)

        ids = chunk_ids(mat_metadata, model.id, 2)
        assert list(ids) == [[1, 3], [3, None]]
def add_table_indices(self, bulk_upload_info: dict):
    aligned_volume = bulk_upload_info["aligned_volume"]
    annotation_table_name = bulk_upload_info["annotation_table_name"]
    seg_table_name = bulk_upload_info["seg_table_name"]
    segmentation_source = bulk_upload_info["pcg_table_name"]
    schema = bulk_upload_info["schema"]

    engine = sqlalchemy_cache.get_engine(aligned_volume)

    anno_model = em_models.make_annotation_model(annotation_table_name, schema)
    seg_model = em_models.make_segmentation_model(annotation_table_name,
                                                  schema, segmentation_source)

    # add annotation indexes
    anno_indices = index_cache.add_indices_sql_commands(
        table_name=annotation_table_name, model=anno_model, engine=engine)

    # add segmentation table indexes
    seg_indices = index_cache.add_indices_sql_commands(
        table_name=seg_table_name, model=seg_model, engine=engine)
    add_index_tasks = []
    add_anno_table_index_tasks = [
        add_index.si(aligned_volume, command) for command in anno_indices
    ]
    add_index_tasks.append(add_anno_table_index_tasks)

    add_seg_table_index_tasks = [
        add_index.si(aligned_volume, command) for command in seg_indices
    ]
    add_index_tasks.append(add_seg_table_index_tasks)

    return self.replace(chain(add_index_tasks))
def test_wrong_reference_schmea():
    """Check that non-reference schema skips
    reference schema columns during model creation when
    optional metadata dict is passed with a reference"""
    table_name = "bad_reference_table"
    schema_type = "synapse"
    table_metadata = {"reference_table": "anno_table"}
    ref_anno_model = make_annotation_model(table_name, schema_type,
                                           table_metadata)
    assert not hasattr(ref_anno_model, "target_id")
Example #5
0
def create_annotation_model(mat_metadata, with_crud_columns: bool = True):
    annotation_table_name = mat_metadata.get("annotation_table_name")
    schema_type = mat_metadata.get("schema")
    table_metadata = {"reference_table": mat_metadata.get("reference_table")}
    AnnotationModel = em_models.make_annotation_model(
        table_name=annotation_table_name,
        schema_type=schema_type,
        table_metadata=table_metadata,
        with_crud_columns=with_crud_columns,
    )
    return AnnotationModel
    def test_add_index(self, mat_metadata, db_client):
        database_name = mat_metadata["aligned_volume"]
        table_name = mat_metadata["annotation_table_name"]
        schema = mat_metadata["schema_type"]

        __, engine = db_client

        is_dropped = index_client.drop_table_indices(table_name, engine)
        assert is_dropped is True

        model = make_annotation_model(table_name, schema, with_crud_columns=False)

        indexes = index_client.add_indices_sql_commands(table_name, model, engine)
        for index in indexes:
            index = add_index.s(database_name, index).apply()
            assert "Index" or "Alter" in index.get()
    def _add_annotation_model(self, table_name):
        """ Loads database model for an annotation schema

        Args:
            table_name: Table name for the database
        """
        if table_name in self._models:
            return True
        av = self.this_sqlalchemy_base_session.query(em_models.AnalysisVersion)\
            .filter(em_models.AnalysisVersion.version == self._materialization_version).first()

        base_query = self.this_sqlalchemy_base_session.query(
            em_models.AnalysisTable)
        base_query = base_query.filter(
            em_models.AnalysisTable.analysisversion == av)
        base_query = base_query.filter(
            em_models.AnalysisTable.tablename == table_name)

        schema = base_query.first()
        schema_name = schema.schema
        if schema_name is None:
            schema_name = get_annotation_info(self.dataset_name, table_name,
                                              self._annotation_endpoint)

        try:
            self._models[table_name] = em_models.make_annotation_model(
                dataset=self.dataset_name,
                annotation_type=schema_name,
                table_name=table_name,
                version=self.materialization_version)

            if schema_name == 'synapse':
                self._add_synapse_compartment_model(
                    synapse_table_name=table_name)

            return True
        except Exception as e:
            print(e)
            return False
Example #8
0
def synapse_report(datastack_name, id):
    aligned_volume_name, pcg_table_name = get_relevant_datastack_info(
        datastack_name)
    session = sqlalchemy_cache.get(aligned_volume_name)
    table = session.query(AnalysisTable).filter(AnalysisTable.id == id).first()
    if table.schema != "synapse":
        abort(504, "this table is not a synapse table")

    make_dataset_models(table.analysisversion.datastack, [],
                        version=table.analysisversion.version)

    SynapseModel = make_annotation_model(
        table.analysisversion.dataset,
        "synapse",
        table.tablename,
        version=table.analysisversion.version,
    )
    synapses = SynapseModel.query.count()
    n_autapses = (SynapseModel.query.filter(
        SynapseModel.pre_pt_root_id == SynapseModel.post_pt_root_id).filter(
            and_(SynapseModel.pre_pt_root_id != 0,
                 SynapseModel.post_pt_root_id != 0)).count())
    n_no_root = SynapseModel.query.filter(
        or_(SynapseModel.pre_pt_root_id == 0,
            SynapseModel.post_pt_root_id == 0)).count()

    return render_template(
        "synapses.html",
        num_synapses=synapses,
        num_autapses=n_autapses,
        num_no_root=n_no_root,
        dataset=table.analysisversion.dataset,
        analysisversion=table.analysisversion.version,
        version=__version__,
        table_name=table.tablename,
        schema_name="synapses",
    )
Example #9
0
def cell_type_local_report(datastack_name, id):
    aligned_volume_name, pcg_table_name = get_relevant_datastack_info(
        datastack_name)
    session = sqlalchemy_cache.get(aligned_volume_name)
    table = AnalysisTable.query.filter(AnalysisTable.id == id).first_or_404()
    if table.schema != "cell_type_local":
        abort(504, "this table is not a cell_type_local table")

    make_dataset_models(table.analysisversion.dataset, [],
                        version=table.analysisversion.version)
    CellTypeModel = make_annotation_model(
        table.analysisversion.dataset,
        table.schema,
        table.tablename,
        version=table.analysisversion.version,
    )

    n_annotations = CellTypeModel.query.count()

    cell_type_merge_query = (db.session.query(
        CellTypeModel.pt_root_id,
        CellTypeModel.cell_type,
        func.count(CellTypeModel.pt_root_id).label("num_cells"),
    ).group_by(CellTypeModel.pt_root_id,
               CellTypeModel.cell_type).order_by("num_cells DESC")).limit(100)

    df = pd.read_sql(cell_type_merge_query.statement,
                     db.get_engine(),
                     coerce_float=False)
    return render_template(
        "cell_type_local.html",
        version=__version__,
        schema_name=table.schema,
        table_name=table.tablename,
        dataset=table.analysisversion.dataset,
        table=df.to_html(),
    )
from emannotationschemas.models import make_annotation_model, Base
from emannotationschemas.flatten import flatten_dict
from emannotationschemas import get_schema
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker

# example of initializing mapping of database
DATABASE_URI = "postgres://*****:*****@localhost:5432/minnie"
engine = create_engine(DATABASE_URI, echo=True)
model_dict = make_annotation_model('test', 'synapse')
# assures that all the tables are created
# would be done as a db management task in general
Base.metadata.create_all(engine)

# create a session class
# this will produce session objects to manage a single transaction
Session = sessionmaker(bind=engine)

# some example test data as a raw json compatible blob
synapse_d = {
    "type": "synapse",
    "pre_pt": {
        "position": [5, 5, 10],
        "root_id": 9223372036854775899,
        "supervoxel_id": 89851029364932800
    },
    "post_pt": {
        "position": [10, 5, 10],
        "root_id": 9223372036854775898,
        "supervoxel_id": 106205165316472881
    },
Example #11
0
    def create_annotation_table(
        self,
        table_name: str,
        schema_type: str,
        description: str,
        user_id: str,
        voxel_resolution_x: float,
        voxel_resolution_y: float,
        voxel_resolution_z: float,
        table_metadata: dict = None,
        flat_segmentation_source: str = None,
        with_crud_columns: bool = True,
    ):
        r"""Create new annotation table unless already exists

        Parameters
        ----------
        aligned_volume : str
            name of aligned_volume to attach a new annotation table
        table_name : str
            name of table
        schema_type : str
            Type of schema to use, must be a valid type from EMAnnotationSchemas

        description: str
            a string with a human readable explanation of
            what is in the table. Including who made it
            and any information that helps interpret the fields
            of the annotations.

        user_id: str
            user id for this table

        voxel_resolution_x: float
            voxel_resolution of this annotation table's point in x (typically nm)

        voxel_resolution_y: float
            voxel_resolution of this annotation table's point in y (typically nm)

        voxel_resolution_z: float
            voxel_resolution of this annotation table's point in z (typically nm)

        table_metadata: dict

        flat_segmentation_source: str
            a path to a segmentation source associated with this table
             i.e. 'precomputed:\\gs:\\my_synapse_seg\example1'

        with_crud_columns: bool
            add additional columns to track CRUD operations on rows
        """
        existing_tables = self.check_table_is_unique(table_name)
        if table_metadata:
            reference_table, track_updates = self._parse_table_metadata_params(
                schema_type, table_name, table_metadata, existing_tables)
        else:
            reference_table = None
            track_updates = None

        model = em_models.make_annotation_model(table_name, schema_type,
                                                table_metadata,
                                                with_crud_columns)

        if reference_table and track_updates:
            reference_table_name = self.get_table_sql_metadata(reference_table)
            logging.info(
                f"{table_name} is targeting reference table: {reference_table_name}"
            )
            description = self.create_reference_update_trigger(
                table_name, description, reference_table, model)

        self.base.metadata.tables[model.__name__].create(bind=self.engine)
        creation_time = datetime.datetime.now()

        metadata_dict = {
            "description": description,
            "user_id": user_id,
            "reference_table": reference_table,
            "schema_type": schema_type,
            "table_name": table_name,
            "valid": True,
            "created": creation_time,
            "flat_segmentation_source": flat_segmentation_source,
            "voxel_resolution_x": voxel_resolution_x,
            "voxel_resolution_y": voxel_resolution_y,
            "voxel_resolution_z": voxel_resolution_z,
        }

        logging.info(f"Metadata for table: {table_name} is {metadata_dict}")
        anno_metadata = AnnoMetadata(**metadata_dict)
        self.cached_session.add(anno_metadata)
        self.commit_session()
        logging.info(
            f"Table: {table_name} created using {model} model at {creation_time}"
        )
        return table_name