def insert_linked_segmentation(self, table_name: str, pcg_table_name: str,
                                   segmentations: List[dict]):
        """Insert segmentations by linking to annotation ids. Limited to 10,000 segmentations.
        If more consider using a bulk insert script.

        Parameters
        ----------
        table_name : str
            name of annotation table
        pcg_table_name: str
            name of chunked graph reference table
        segmentations : List[dict]
            List of dictionaries of single segmentation data.
        """
        insertion_limit = 10_000

        if len(segmentations) > insertion_limit:
            raise AnnotationInsertLimitExceeded(len(segmentations),
                                                insertion_limit)

        schema_type = self.get_table_schema(table_name)
        seg_table_name = build_segmentation_table_name(table_name,
                                                       pcg_table_name)

        SegmentationModel = self._cached_table(seg_table_name)
        formatted_seg_data = []

        _, segmentation_schema = self._get_flattened_schema(schema_type)

        for segmentation in segmentations:
            segmentation_data = flatten_dict(segmentation)
            flat_data = self._map_values_to_schema(segmentation_data,
                                                   segmentation_schema)
            flat_data["id"] = segmentation["id"]

            formatted_seg_data.append(flat_data)

        segs = [
            SegmentationModel(**segmentation_data)
            for segmentation_data in formatted_seg_data
        ]

        ids = [data["id"] for data in formatted_seg_data]
        q = self.cached_session.query(SegmentationModel).filter(
            SegmentationModel.id.in_([id for id in ids]))
        ids_exist = self.cached_session.query(q.exists()).scalar()

        if (
                not ids_exist
        ):  # TODO replace this with a filter for ids that are missing from this list
            self.cached_session.add_all(segs)
            seg_ids = [seg.id for seg in segs]
            self.commit_session()
            return seg_ids
        else:
            raise IdsAlreadyExists(
                f"Annotation IDs {ids} already linked in database ")
def test_synapse_flatten():
    schema = SynapseSchema()
    result = schema.load(good_synapse)
    d = flatten_dict(result)

    assert d["pre_pt_position"] == [31, 31, 0]

    result = schema.load(supervoxel_synapse)
    assert d["pre_pt_position"] == [31, 31, 0]

    result = schema.load(supervoxel_rootId_synapse)
    assert d["pre_pt_position"] == [31, 31, 0]

    FlatSynapseSchema = get_flat_schema("synapse")
    schema = FlatSynapseSchema()
    result = schema.load(d)

    assert len(result) == 8
예제 #3
0
def test_synapse_flatten():
    schema = SynapseSchema()
    result = schema.load(good_synapse)
    d = flatten_dict(result)

    assert (d['pre_pt_position'] == [31, 31, 0])

    result = schema.load(supervoxel_synapse)
    assert (d['pre_pt_position'] == [31, 31, 0])

    result = schema.load(supervoxel_rootId_synapse)
    assert (d['pre_pt_position'] == [31, 31, 0])

    FlatSynapseSchema = get_flat_schema('synapse')
    schema = FlatSynapseSchema()
    result = schema.load(d)

    assert (len(result) == 8)
예제 #4
0
    def _get_flattened_schema_data(self, schema_type: str, data: dict) -> dict:
        schema_type = get_schema(schema_type)
        schema = schema_type(context={"postgis": True})
        data = schema.load(data, unknown=EXCLUDE)

        check_is_nested = any(isinstance(i, dict) for i in data.values())
        if check_is_nested:
            data = flatten_dict(data)

        (
            flat_annotation_schema,
            flat_segmentation_schema,
        ) = em_models.split_annotation_schema(schema_type)

        return (
            self._map_values_to_schema(data, flat_annotation_schema),
            self._map_values_to_schema(data, flat_segmentation_schema),
        )
        "position": [10, 5, 10],
        "root_id": 9223372036854775898,
        "supervoxel_id": 106205165316472881
    },
    "ctr_pt": {
        "position": [7, 5, 10]
    },
    "size": 40.5
}
# get the schema to deserialize the test data
SynapseSchema = get_schema('synapse')
schema = SynapseSchema(context={'postgis': True})

# use the schema to deserialize the schema
d = schema.load(synapse_d)
d = flatten_dict(d)

# get the appropriate sqlalchemy model
# for the annotation type and dataset
SynapseModel = model_dict['test']['synapse']

# # create a new model instance with data
synapse = SynapseModel(**d)

# # create a new db session
session = Session()
# add this synapse to database
session.add(synapse)
# commit this transaction to database
session.commit()
def test_synapse_postgis():
    schema = SynapseSchema(context={"postgis": True})
    result = schema.load(good_synapse)
    d = flatten_dict(result)
    assert d["pre_pt_position"] == "POINTZ(31 31 0)"
예제 #7
0
def test_synapse_postgis():
    schema = SynapseSchema(context={'postgis': True})
    result = schema.load(good_synapse)
    d = flatten_dict(result)
    assert (d['pre_pt_position'] == 'POINTZ(31 31 0)')