コード例 #1
0
def _parallel_download_wsi(identifiers, path, argv):
    with Cytomine.connect_from_cli(argv):
        instances = list()
        for _id in identifiers:
            instance = ImageInstance().fetch(_id)
            filepath = os.path.join(path, instance.originalFilename)
            print("Download", filepath)
            instance.download(dest_pattern=filepath, override=False)
            instance.download_path = filepath
            instances.append(instance)
        return instances
コード例 #2
0
def extract_images_or_rois(parameters):
    # work at image level or ROIs by term
    images = ImageInstanceCollection()
    if parameters.cytomine_id_images is not None:
        id_images = parse_domain_list(parameters.cytomine_id_images)
        images.extend([ImageInstance().fetch(_id) for _id in id_images])
    else:
        images = images.fetch_with_filter("project",
                                          parameters.cytomine_id_project)

    slides = [
        CytomineSlide(img, parameters.cytomine_zoom_level) for img in images
    ]
    if parameters.cytomine_id_roi_term is None:
        return slides

    # fetch ROI annotations, all users
    collection = AnnotationCollection(
        terms=[parameters.cytomine_id_roi_term],
        reviewed=parameters.cytomine_reviewed_roi,
        project=parameters.cytomine_id_project,
        showWKT=True,
        includeAlgo=True).fetch()

    slides_map = {slide.image_instance.id: slide for slide in slides}
    regions = list()
    for annotation in collection:
        if annotation.image not in slides_map:
            continue
        slide = slides_map[annotation.image]
        regions.append(
            get_iip_window_from_annotation(slide, annotation,
                                           parameters.cytomine_zoom_level))

    return regions
コード例 #3
0
    def __init__(self, id_img_instance):
        """Construct CytomineSlide objects

        Parameters
        ----------
        id_img_instance: int
            The id of the image instance
        """
        self._img_instance = ImageInstance().fetch(id_img_instance)
コード例 #4
0
def main(argv):
    with CytomineJob.from_cli(argv) as job:
        model_path = os.path.join(str(Path.home()), "models", "thyroid-unet")
        model_filepath = pick_model(model_path, job.parameters.tile_size,
                                    job.parameters.cytomine_zoom_level)
        device = torch.device(job.parameters.device)
        unet = Unet(job.parameters.init_fmaps, n_classes=1)
        unet.load_state_dict(torch.load(model_filepath, map_location=device))
        unet.to(device)
        unet.eval()

        segmenter = UNetSegmenter(device=job.parameters.device,
                                  unet=unet,
                                  classes=[0, 1],
                                  threshold=job.parameters.threshold)

        working_path = os.path.join(str(Path.home()), "tmp")
        tile_builder = CytomineTileBuilder(working_path)
        builder = SSLWorkflowBuilder()
        builder.set_n_jobs(1)
        builder.set_overlap(job.parameters.tile_overlap)
        builder.set_tile_size(job.parameters.tile_size,
                              job.parameters.tile_size)
        builder.set_tile_builder(tile_builder)
        builder.set_border_tiles(Workflow.BORDER_TILES_EXTEND)
        builder.set_background_class(0)
        builder.set_distance_tolerance(1)
        builder.set_seg_batch_size(job.parameters.batch_size)
        builder.set_segmenter(segmenter)
        workflow = builder.get()

        slide = CytomineSlide(img_instance=ImageInstance().fetch(
            job.parameters.cytomine_id_image),
                              zoom_level=job.parameters.cytomine_zoom_level)
        results = workflow.process(slide)

        print("-------------------------")
        print(len(results))
        print("-------------------------")

        collection = AnnotationCollection()
        for obj in results:
            wkt = shift_poly(obj.polygon,
                             slide,
                             zoom_level=job.parameters.cytomine_zoom_level).wkt
            collection.append(
                Annotation(location=wkt,
                           id_image=job.parameters.cytomine_id_image,
                           id_terms=[154005477],
                           id_project=job.project.id))
        collection.save(n_workers=job.parameters.n_jobs)

        return {}
コード例 #5
0
def extract_images_or_rois(parameters):
    id_annotations = parse_domain_list(parameters.cytomine_roi_annotations)
    # if ROI annotations are provided
    if len(id_annotations) > 0:
        image_cache = dict()  # maps ImageInstance id with CytomineSlide object
        zones = list()
        for id_annot in id_annotations:
            annotation = Annotation().fetch(id_annot)
            if annotation.image not in image_cache:
                image_cache[annotation.image] = CytomineSlide(
                    annotation.image, parameters.cytomine_zoom_level)
            window = get_iip_window_from_annotation(
                image_cache[annotation.image], annotation,
                parameters.cytomine_zoom_level)
            zones.append(window)
        return zones

    # work at image level or ROIs by term
    images = ImageInstanceCollection()
    if parameters.cytomine_id_images is not None:
        id_images = parse_domain_list(parameters.cytomine_id_images)
        images.extend([ImageInstance().fetch(_id) for _id in id_images])
    else:
        images = images.fetch_with_filter("project",
                                          parameters.cytomine_id_project)

    slides = [
        CytomineSlide(img, parameters.cytomine_zoom_level) for img in images
    ]
    if parameters.cytomine_id_roi_term is None:
        return slides

    # fetch ROI annotations
    collection = AnnotationCollection(
        terms=[parameters.cytomine_id_roi_term],
        reviewed=parameters.cytomine_reviewed_roi,
        showWKT=True)
    collection.fetch_with_filter(project=parameters.cytomine_id_project)
    slides_map = {slide.image_instance.id: slide for slide in slides}
    regions = list()
    for annotation in collection:
        if annotation.image not in slides_map:
            continue
        slide = slides_map[annotation.image]
        regions.append(
            get_iip_window_from_annotation(slide, annotation,
                                           parameters.cytomine_zoom_level))

    return regions
コード例 #6
0
 def from_id(cls, id_img_instance, zoom_level=0):
     return cls(ImageInstance.fetch(id_img_instance), zoom_level=zoom_level)
コード例 #7
0
    def end_successful_import(self, path: Path, image: Image, *args, **kwargs):
        uf = self.get_uf(path)

        ai = AbstractImage()
        ai.uploadedFile = uf.id
        ai.originalFilename = uf.originalFilename
        ai.width = image.width
        ai.height = image.height
        ai.depth = image.depth
        ai.duration = image.duration
        ai.channels = image.n_intrinsic_channels
        ai.extrinsicChannels = image.n_channels
        if image.physical_size_x:
            ai.physicalSizeX = round(
                convert_quantity(image.physical_size_x, "micrometers"), 6)
        if image.physical_size_y:
            ai.physicalSizeY = round(
                convert_quantity(image.physical_size_y, "micrometers"), 6)
        if image.physical_size_z:
            ai.physicalSizeZ = round(
                convert_quantity(image.physical_size_z, "micrometers"), 6)
        if image.frame_rate:
            ai.fps = round(convert_quantity(image.frame_rate, "Hz"), 6)
        ai.magnification = parse_int(image.objective.nominal_magnification)
        ai.bitPerSample = dtype_to_bits(image.pixel_type)
        ai.samplePerPixel = image.n_channels / image.n_intrinsic_channels
        ai.save()
        self.abstract_images.append(ai)

        asc = AbstractSliceCollection()
        set_channel_names = image.n_intrinsic_channels == image.n_channels
        for c in range(image.n_intrinsic_channels):
            name = None
            color = None
            if set_channel_names:
                name = image.channels[c].suggested_name
                color = image.channels[c].hex_color
            for z in range(image.depth):
                for t in range(image.duration):
                    mime = "image/pyrtiff"  # TODO: remove
                    asc.append(
                        AbstractSlice(ai.id,
                                      uf.id,
                                      mime,
                                      c,
                                      z,
                                      t,
                                      channelName=name,
                                      channelColor=color))
        asc.save()

        properties = PropertyCollection(ai)
        for metadata in image.raw_metadata.values():
            if metadata.value is not None and str(metadata.value) != '':
                properties.append(
                    Property(ai, metadata.namespaced_key, str(metadata.value)))
        try:
            properties.save()
        except CollectionPartialUploadException:
            pass  # TODO: improve handling of this exception, but prevent to fail the import

        uf.status = UploadedFile.DEPLOYED
        uf.update()

        properties = PropertyCollection(ai)
        for k, v in self.user_properties:
            if v is not None and str(v) != '':
                properties.append(Property(ai, k, v))
        try:
            properties.save()
        except CollectionPartialUploadException:
            pass  # TODO: improve handling of this exception, but prevent to fail the import

        instances = []
        for p in self.projects:
            instances.append(ImageInstance(ai.id, p.id).save())
        self.images.append((ai, instances))

        # TODO: temporary add annotations for backwards compatibility.
        #  BUT it should be done by core when an image instance is created.
        if image.n_planes == 1 and len(instances) > 0:
            # TODO: currently only supports metadata annots on 2D images

            metadata_annots = image.annotations
            if len(metadata_annots) > 0:
                metadata_terms = [
                    ma.terms for ma in metadata_annots if len(ma.terms) > 0
                ]
                metadata_terms = set(flatten(metadata_terms))

                for instance in instances:
                    project_id = instance.project
                    project = self.projects.find_by_attribute('id', project_id)
                    ontology_id = project.ontology  # noqa
                    ontology_terms = TermCollection().fetch_with_filter(
                        "project", project_id)
                    terms_id_mapping = {t.name: t.id for t in ontology_terms}

                    for metadata_term in metadata_terms:
                        if metadata_term not in terms_id_mapping:
                            # TODO: user must have ontology rights !
                            term = Term(name=metadata_term,
                                        id_ontology=ontology_id,
                                        color="#AAAAAA").save()
                            terms_id_mapping[term.name] = term.id

                    annots = AnnotationCollection()
                    for metadata_annot in metadata_annots:
                        term_ids = [
                            terms_id_mapping[t] for t in metadata_annot.terms
                        ]
                        properties = [
                            dict(key=k, value=v)
                            for k, v in metadata_annot.properties.items()
                        ]
                        annots.append(
                            Annotation(location=metadata_annot.wkt,
                                       id_image=instance.id,
                                       id_terms=term_ids
                                       if len(term_ids) > 0 else None,
                                       properties=properties
                                       if len(properties) > 0 else None,
                                       user=uf.user))

                    try:
                        annots.save()
                    except CollectionPartialUploadException:
                        pass
コード例 #8
0
    def run(self):
        self.super_admin = Cytomine.get_instance().current_user
        connect_as(self.super_admin, True)

        users = UserCollection().fetch()
        users_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("user-collection")
        ][0]
        remote_users = UserCollection()
        for u in json.load(open(os.path.join(self.working_path, users_json))):
            remote_users.append(User().populate(u))

        roles = ["project_manager", "project_contributor", "ontology_creator"]
        if self.with_images:
            roles += ["image_creator", "image_reviewer"]

        if self.with_userannotations:
            roles += ["userannotation_creator", "userannotationterm_creator"]

        roles = set(roles)
        remote_users = [
            u for u in remote_users
            if len(roles.intersection(set(u.roles))) > 0
        ]

        for remote_user in remote_users:
            user = find_first(
                [u for u in users if u.username == remote_user.username])
            if not user:
                user = copy.copy(remote_user)
                if not user.password:
                    user.password = random_string(8)
                if not self.with_original_date:
                    user.created = None
                    user.updated = None
                user.save()
            self.id_mapping[remote_user.id] = user.id

        # --------------------------------------------------------------------------------------------------------------
        logging.info("1/ Import ontology and terms")
        """
        Import the ontology with terms and relation terms that are stored in pickled files in working_path.
        If the ontology exists (same name and same terms), the existing one is used.
        Otherwise, an ontology with an available name is created with new terms and corresponding relationships.
        """
        ontologies = OntologyCollection().fetch()
        ontology_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("ontology")
        ][0]
        remote_ontology = Ontology().populate(
            json.load(open(os.path.join(self.working_path, ontology_json))))
        remote_ontology.name = remote_ontology.name.strip()

        terms = TermCollection().fetch()
        terms_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("term-collection")
        ]
        remote_terms = TermCollection()
        if len(terms_json) > 0:
            for t in json.load(
                    open(os.path.join(self.working_path, terms_json[0]))):
                remote_terms.append(Term().populate(t))

        def ontology_exists():
            compatible_ontology = find_first([
                o for o in ontologies
                if o.name == remote_ontology.name.strip()
            ])
            if compatible_ontology:
                set1 = set((t.name, t.color) for t in terms
                           if t.ontology == compatible_ontology.id)
                difference = [
                    term for term in remote_terms
                    if (term.name, term.color) not in set1
                ]
                if len(difference) == 0:
                    return True, compatible_ontology
                return False, None
            else:
                return True, None

        i = 1
        remote_name = remote_ontology.name
        found, existing_ontology = ontology_exists()
        while not found:
            remote_ontology.name = "{} ({})".format(remote_name, i)
            found, existing_ontology = ontology_exists()
            i += 1

        # SWITCH to ontology creator user
        connect_as(User().fetch(self.id_mapping[remote_ontology.user]))
        if not existing_ontology:
            ontology = copy.copy(remote_ontology)
            ontology.user = self.id_mapping[remote_ontology.user]
            if not self.with_original_date:
                ontology.created = None
                ontology.updated = None
            ontology.save()
            self.id_mapping[remote_ontology.id] = ontology.id
            logging.info("Ontology imported: {}".format(ontology))

            for remote_term in remote_terms:
                logging.info("Importing term: {}".format(remote_term))
                term = copy.copy(remote_term)
                term.ontology = self.id_mapping[term.ontology]
                term.parent = None
                if not self.with_original_date:
                    term.created = None
                    term.updated = None
                term.save()
                self.id_mapping[remote_term.id] = term.id
                logging.info("Term imported: {}".format(term))

            remote_relation_terms = [(term.parent, term.id)
                                     for term in remote_terms]
            for relation in remote_relation_terms:
                parent, child = relation
                if parent:
                    rt = RelationTerm(self.id_mapping[parent],
                                      self.id_mapping[child]).save()
                    logging.info("Relation term imported: {}".format(rt))
        else:
            self.id_mapping[remote_ontology.id] = existing_ontology.id

            ontology_terms = [
                t for t in terms if t.ontology == existing_ontology.id
            ]
            for remote_term in remote_terms:
                self.id_mapping[remote_term.id] = find_first([
                    t for t in ontology_terms if t.name == remote_term.name
                ]).id

            logging.info(
                "Ontology already encoded: {}".format(existing_ontology))

        # SWITCH USER
        connect_as(self.super_admin, True)

        # --------------------------------------------------------------------------------------------------------------
        logging.info("2/ Import project")
        """
        Import the project (i.e. the Cytomine Project domain) stored in pickled file in working_path.
        If a project with the same name already exists, append a (x) suffix where x is an increasing number.
        """
        projects = ProjectCollection().fetch()
        project_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("project")
        ][0]
        remote_project = Project().populate(
            json.load(open(os.path.join(self.working_path, project_json))))
        remote_project.name = remote_project.name.strip()

        def available_name():
            i = 1
            existing_names = [o.name for o in projects]
            new_name = project.name
            while new_name in existing_names:
                new_name = "{} ({})".format(project.name, i)
                i += 1
            return new_name

        project = copy.copy(remote_project)
        project.name = available_name()
        project.discipline = None
        project.ontology = self.id_mapping[project.ontology]
        project_contributors = [
            u for u in remote_users if "project_contributor" in u.roles
        ]
        project.users = [self.id_mapping[u.id] for u in project_contributors]
        project_managers = [
            u for u in remote_users if "project_manager" in u.roles
        ]
        project.admins = [self.id_mapping[u.id] for u in project_managers]
        if not self.with_original_date:
            project.created = None
            project.updated = None
        project.save()
        self.id_mapping[remote_project.id] = project.id
        logging.info("Project imported: {}".format(project))

        # --------------------------------------------------------------------------------------------------------------
        logging.info("3/ Import images")
        storages = StorageCollection().fetch()
        abstract_images = AbstractImageCollection().fetch()
        images_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("imageinstance-collection")
        ]
        remote_images = ImageInstanceCollection()
        if len(images_json) > 0:
            for i in json.load(
                    open(os.path.join(self.working_path, images_json[0]))):
                remote_images.append(ImageInstance().populate(i))

        remote_images_dict = {}

        for remote_image in remote_images:
            image = copy.copy(remote_image)

            # Fix old image name due to urllib3 limitation
            remote_image.originalFilename = bytes(
                remote_image.originalFilename,
                'utf-8').decode('ascii', 'ignore')
            if remote_image.originalFilename not in remote_images_dict.keys():
                remote_images_dict[remote_image.originalFilename] = [
                    remote_image
                ]
            else:
                remote_images_dict[remote_image.originalFilename].append(
                    remote_image)
            logging.info("Importing image: {}".format(remote_image))

            # SWITCH user to image creator user
            connect_as(User().fetch(self.id_mapping[remote_image.user]))
            # Get its storage
            storage = find_first([
                s for s in storages
                if s.user == Cytomine.get_instance().current_user.id
            ])
            if not storage:
                storage = storages[0]

            # Check if image is already in its storage
            abstract_image = find_first([
                ai for ai in abstract_images
                if ai.originalFilename == remote_image.originalFilename and
                ai.width == remote_image.width and ai.height == remote_image.
                height and ai.resolution == remote_image.resolution
            ])
            if abstract_image:
                logging.info(
                    "== Found corresponding abstract image. Linking to project."
                )
                ImageInstance(abstract_image.id,
                              self.id_mapping[remote_project.id]).save()
            else:
                logging.info("== New image starting to upload & deploy")
                filename = os.path.join(
                    self.working_path, "images",
                    image.originalFilename.replace("/", "-"))
                Cytomine.get_instance().upload_image(
                    self.host_upload, filename, storage.id,
                    self.id_mapping[remote_project.id])
                time.sleep(0.8)

            # SWITCH USER
            connect_as(self.super_admin, True)

        # Waiting for all images...
        n_new_images = -1
        new_images = None
        count = 0
        while n_new_images != len(
                remote_images) and count < len(remote_images) * 5:
            new_images = ImageInstanceCollection().fetch_with_filter(
                "project", self.id_mapping[remote_project.id])
            n_new_images = len(new_images)
            if count > 0:
                time.sleep(5)
            count = count + 1
        print("All images have been deployed. Fixing image-instances...")

        # Fix image instances meta-data:
        for new_image in new_images:
            remote_image = remote_images_dict[new_image.originalFilename].pop()
            if self.with_original_date:
                new_image.created = remote_image.created
                new_image.updated = remote_image.updated
            new_image.reviewStart = remote_image.reviewStart if hasattr(
                remote_image, 'reviewStart') else None
            new_image.reviewStop = remote_image.reviewStop if hasattr(
                remote_image, 'reviewStop') else None
            new_image.reviewUser = self.id_mapping[
                remote_image.reviewUser] if hasattr(
                    remote_image,
                    'reviewUser') and remote_image.reviewUser else None
            new_image.instanceFilename = remote_image.instanceFilename
            new_image.update()
            self.id_mapping[remote_image.id] = new_image.id
            self.id_mapping[remote_image.baseImage] = new_image.baseImage

            new_abstract = AbstractImage().fetch(new_image.baseImage)
            if self.with_original_date:
                new_abstract.created = remote_image.created
                new_abstract.updated = remote_image.updated
            if new_abstract.resolution is None:
                new_abstract.resolution = remote_image.resolution
            if new_abstract.magnification is None:
                new_abstract.magnification = remote_image.magnification
            new_abstract.update()

        print("All image-instances have been fixed.")

        # --------------------------------------------------------------------------------------------------------------
        logging.info("4/ Import user annotations")
        annots_json = [
            f for f in os.listdir(self.working_path) if f.endswith(".json")
            and f.startswith("user-annotation-collection")
        ]
        remote_annots = AnnotationCollection()
        if len(annots_json) > 0:
            for a in json.load(
                    open(os.path.join(self.working_path, annots_json[0]))):
                remote_annots.append(Annotation().populate(a))

        def _add_annotation(remote_annotation, id_mapping, with_original_date):
            if remote_annotation.project not in id_mapping.keys() \
                    or remote_annotation.image not in id_mapping.keys():
                return

            annotation = copy.copy(remote_annotation)
            annotation.project = id_mapping[remote_annotation.project]
            annotation.image = id_mapping[remote_annotation.image]
            annotation.user = id_mapping[remote_annotation.user]
            annotation.term = [id_mapping[t] for t in remote_annotation.term]
            if not with_original_date:
                annotation.created = None
                annotation.updated = None
            annotation.save()

        for user in [
                u for u in remote_users if "userannotation_creator" in u.roles
        ]:
            remote_annots_for_user = [
                a for a in remote_annots if a.user == user.id
            ]
            # SWITCH to annotation creator user
            connect_as(User().fetch(self.id_mapping[user.id]))
            Parallel(n_jobs=-1, backend="threading")(
                delayed(_add_annotation)(remote_annotation, self.id_mapping,
                                         self.with_original_date)
                for remote_annotation in remote_annots_for_user)

            # SWITCH back to admin
            connect_as(self.super_admin, True)

        # --------------------------------------------------------------------------------------------------------------
        logging.info(
            "5/ Import metadata (properties, attached files, description)")
        obj = Model()
        obj.id = -1
        obj.class_ = ""

        properties_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("properties")
        ]
        for property_json in properties_json:
            for remote_prop in json.load(
                    open(os.path.join(self.working_path, property_json))):
                prop = Property(obj).populate(remote_prop)
                prop.domainIdent = self.id_mapping[prop.domainIdent]
                prop.save()

        attached_files_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("attached-files")
        ]
        for attached_file_json in attached_files_json:
            for remote_af in json.load(
                    open(os.path.join(self.working_path, attached_file_json))):
                af = AttachedFile(obj).populate(remote_af)
                af.domainIdent = self.id_mapping[af.domainIdent]
                af.filename = os.path.join(self.working_path, "attached_files",
                                           remote_af.filename)
                af.save()

        descriptions_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("description")
        ]
        for description_json in descriptions_json:
            desc = Description(obj).populate(
                json.load(
                    open(os.path.join(self.working_path, description_json))))
            desc.domainIdent = self.id_mapping[desc.domainIdent]
            desc._object.class_ = desc.domainClassName
            desc._object.id = desc.domainIdent
            desc.save()
コード例 #9
0
    parser.add_argument('--key', help="the property key")
    parser.add_argument('--value', help="the property value")

    parser.add_argument('--cytomine_id_project', dest='id_project', required=False,
                        help="The project to which the property will be added (optional)")
    parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance', required=False,
                        help="The image to which the property will be added (optional)")
    parser.add_argument('--cytomine_id_annotation', dest='id_annotation', required=False,
                        help="The annotation to which the property will be added (optional)")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:

        if params.id_project:
            prop = Property(Project().fetch(params.id_project), key=params.key, value=params.value).save()
            print(prop)

        if params.id_image_instance:
            prop = Property(ImageInstance().fetch(params.id_image_instance), key=params.key, value=params.value).save()
            print(prop)

        if params.id_annotation:
            prop = Property(Annotation().fetch(params.id_annotation), key=params.key, value=params.value).save()
            print(prop)

        """
        You can add property to any Cytomine domain.
        You can also attach a file (see AttachedFile) or add a description (see Description) to any Cytomine domain.
        """
コード例 #10
0
logger.setLevel(logging.INFO)

if __name__ == '__main__':
    parser = ArgumentParser(prog="Cytomine Python client example")

    # Cytomine
    parser.add_argument('--cytomine_host', dest='host',
                        help="The Cytomine host")
    parser.add_argument('--cytomine_public_key', dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key', dest='private_key',
                        help="The Cytomine private key")

    parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance',
                        help="The image to which the tag will be added")
    parser.add_argument('--cytomine_id_tag', dest='id_tag',
                        help="The tag that will be added to the image")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key) as cytomine:
        image = ImageInstance().fetch(params.id_image_instance)
        tag = Tag().fetch(params.id_tag)

        tda = TagDomainAssociation(object=image, tag=tag.id).save()

        # Get the list of tags for the image:
        print("Image {} has tags:".format(image.instanceFilename))
        tdac = TagDomainAssociationCollection(object=image).fetch()
        for tda in tdac:
            print("- {}".format(tda.tagName))
コード例 #11
0
ファイル: test.py プロジェクト: waliens/thyroid-seg
def main(argv):
    with Cytomine.connect_from_cli(argv):
        instance = ImageInstance().fetch(77150955)
コード例 #12
0
def main(argv):
    """

    IMAGES VALID:
    * 005-TS_13C08351_2-2014-02-12 12.22.44.ndpi | id : 77150767
    * 024-12C07162_2A-2012-08-14-17.21.05.jp2 | id : 77150761
    * 019-CP_12C04234_2-2012-08-10-12.49.26.jp2 | id : 77150809

    IMAGES TEST:
    * 004-PF_08C11886_1-2012-08-09-19.05.53.jp2 | id : 77150623
    * 011-TS_13C10153_3-2014-02-13 15.22.21.ndpi | id : 77150611
    * 018-PF_07C18435_1-2012-08-17-00.55.09.jp2 | id : 77150755

    """
    with Cytomine.connect_from_cli(argv):
        parser = ArgumentParser()
        parser.add_argument("-b",
                            "--batch_size",
                            dest="batch_size",
                            default=4,
                            type=int)
        parser.add_argument("-j",
                            "--n_jobs",
                            dest="n_jobs",
                            default=1,
                            type=int)
        parser.add_argument("-e",
                            "--epochs",
                            dest="epochs",
                            default=1,
                            type=int)
        parser.add_argument("-d", "--device", dest="device", default="cpu")
        parser.add_argument("-o",
                            "--overlap",
                            dest="overlap",
                            default=0,
                            type=int)
        parser.add_argument("-t",
                            "--tile_size",
                            dest="tile_size",
                            default=256,
                            type=int)
        parser.add_argument("-z",
                            "--zoom_level",
                            dest="zoom_level",
                            default=0,
                            type=int)
        parser.add_argument("--lr", dest="lr", default=0.01, type=float)
        parser.add_argument("--init_fmaps",
                            dest="init_fmaps",
                            default=16,
                            type=int)
        parser.add_argument("--data_path",
                            "--dpath",
                            dest="data_path",
                            default=os.path.join(str(Path.home()), "tmp"))
        parser.add_argument("-w",
                            "--working_path",
                            "--wpath",
                            dest="working_path",
                            default=os.path.join(str(Path.home()), "tmp"))
        parser.add_argument("-s",
                            "--save_path",
                            dest="save_path",
                            default=os.path.join(str(Path.home()), "tmp"))
        args, _ = parser.parse_known_args(argv)

        os.makedirs(args.save_path, exist_ok=True)
        os.makedirs(args.data_path, exist_ok=True)
        os.makedirs(args.working_path, exist_ok=True)

        # fetch annotations (filter val/test sets + other annotations)
        all_annotations = AnnotationCollection(project=77150529,
                                               showWKT=True,
                                               showMeta=True,
                                               showTerm=True).fetch()
        val_ids = {77150767, 77150761, 77150809}
        test_ids = {77150623, 77150611, 77150755}
        val_test_ids = val_ids.union(test_ids)
        train_collection = all_annotations.filter(lambda a: (
            a.user in {55502856} and len(a.term) > 0 and a.term[0] in
            {35777351, 35777321, 35777459} and a.image not in val_test_ids))
        val_rois = all_annotations.filter(
            lambda a: (a.user in {142954314} and a.image in val_ids and len(
                a.term) > 0 and a.term[0] in {154890363}))
        val_foreground = all_annotations.filter(
            lambda a: (a.user in {142954314} and a.image in val_ids and len(
                a.term) > 0 and a.term[0] in {154005477}))

        train_wsi_ids = list({an.image
                              for an in all_annotations
                              }.difference(val_test_ids))
        val_wsi_ids = list(val_ids)

        download_path = os.path.join(args.data_path,
                                     "crops-{}".format(args.tile_size))
        images = {
            _id: ImageInstance().fetch(_id)
            for _id in (train_wsi_ids + val_wsi_ids)
        }

        train_crops = [
            AnnotationCrop(images[annot.image],
                           annot,
                           download_path,
                           args.tile_size,
                           zoom_level=args.zoom_level)
            for annot in train_collection
        ]
        val_crops = [
            AnnotationCrop(images[annot.image],
                           annot,
                           download_path,
                           args.tile_size,
                           zoom_level=args.zoom_level) for annot in val_rois
        ]

        for crop in train_crops + val_crops:
            crop.download()

        np.random.seed(42)
        dataset = RemoteAnnotationTrainDataset(
            train_crops, seg_trans=segmentation_transform)
        loader = DataLoader(dataset,
                            shuffle=True,
                            batch_size=args.batch_size,
                            num_workers=args.n_jobs,
                            worker_init_fn=worker_init)

        # network
        device = torch.device(args.device)
        unet = Unet(args.init_fmaps, n_classes=1)
        unet.train()
        unet.to(device)

        optimizer = Adam(unet.parameters(), lr=args.lr)
        loss_fn = BCEWithLogitsLoss(reduction="mean")

        results = {
            "train_losses": [],
            "val_losses": [],
            "val_metrics": [],
            "save_path": []
        }

        for e in range(args.epochs):
            print("########################")
            print("        Epoch {}".format(e))
            print("########################")

            epoch_losses = list()
            unet.train()
            for i, (x, y) in enumerate(loader):
                x, y = (t.to(device) for t in [x, y])
                y_pred = unet.forward(x)
                loss = loss_fn(y_pred, y)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                epoch_losses = [loss.detach().cpu().item()] + epoch_losses[:5]
                print("{} - {:1.5f}".format(i, np.mean(epoch_losses)))
                results["train_losses"].append(epoch_losses[0])

            unet.eval()
            # validation
            val_losses = np.zeros(len(val_rois), dtype=np.float)
            val_roc_auc = np.zeros(len(val_rois), dtype=np.float)
            val_cm = np.zeros([len(val_rois), 2, 2], dtype=np.int)

            for i, roi in enumerate(val_crops):
                foregrounds = find_intersecting_annotations(
                    roi.annotation, val_foreground)
                with torch.no_grad():
                    y_pred, y_true = predict_roi(
                        roi,
                        foregrounds,
                        unet,
                        device,
                        in_trans=transforms.ToTensor(),
                        batch_size=args.batch_size,
                        tile_size=args.tile_size,
                        overlap=args.overlap,
                        n_jobs=args.n_jobs,
                        zoom_level=args.zoom_level)

                val_losses[i] = metrics.log_loss(y_true.flatten(),
                                                 y_pred.flatten())
                val_roc_auc[i] = metrics.roc_auc_score(y_true.flatten(),
                                                       y_pred.flatten())
                val_cm[i] = metrics.confusion_matrix(
                    y_true.flatten().astype(np.uint8),
                    (y_pred.flatten() > 0.5).astype(np.uint8))

            print("------------------------------")
            print("Epoch {}:".format(e))
            val_loss = np.mean(val_losses)
            roc_auc = np.mean(val_roc_auc)
            print("> val_loss: {:1.5f}".format(val_loss))
            print("> roc_auc : {:1.5f}".format(roc_auc))
            cm = np.sum(val_cm, axis=0)
            cnt = np.sum(val_cm)
            print("CM at 0.5 threshold")
            print("> {:3.2f}%  {:3.2f}%".format(100 * cm[0, 0] / cnt,
                                                100 * cm[0, 1] / cnt))
            print("> {:3.2f}%  {:3.2f}%".format(100 * cm[1, 0] / cnt,
                                                100 * cm[1, 1] / cnt))
            print("------------------------------")

            filename = "{}_e_{}_val_{:0.4f}_roc_{:0.4f}_z{}_s{}.pth".format(
                datetime.now().timestamp(), e, val_loss, roc_auc,
                args.zoom_level, args.tile_size)
            torch.save(unet.state_dict(), os.path.join(args.save_path,
                                                       filename))

            results["val_losses"].append(val_loss)
            results["val_metrics"].append(roc_auc)
            results["save_path"].append(filename)

        return results
コード例 #13
0
def run(debug=False):
    """
    Gets project image from cytomine

    Args:
        debug (bool): If true will save annotations individually and plot any error

    Example:
      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'dadb7d7a-5822-48f7-ab42-59bce27750ae' --cytomine_private_key 'd73f4602-51d2-4d15-91e4-d4cc175d65fd' --cytomine_id_project 187 --cytomine_id_image_instance 375 --cytomine_id_software 228848

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'b6ebb23c-00ff-427b-be24-87b2a82490df' --cytomine_private_key '6812f09b-3f33-4938-82ca-b23032d377fd' --cytomine_id_project 154 --cytomine_id_image_instance 3643

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_project 197 --cytomine_id_image_instance 2140 --cytomine_id_software 2633

      docker run --gpus all -it --rm --mount type=bind,source=/home/giussepi/Public/environments/Cytomine/cyto_CRLM/,target=/CRLM,bind-propagation=private --network=host ttt --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_project 197 --cytomine_id_image_instance 31296 --cytomine_id_software 79732
    """

    parser = ArgumentParser(prog="Cytomine Python client example")

    # Cytomine connection parameters
    parser.add_argument('--cytomine_host',
                        dest='host',
                        default='demo.cytomine.be',
                        help="The Cytomine host")
    parser.add_argument('--cytomine_public_key',
                        dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key',
                        dest='private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_id_project',
                        dest='id_project',
                        help="The project from which we want the images")
    parser.add_argument('--cytomine_id_software',
                        dest='id_software',
                        help="The software to be used to process the image")
    parser.add_argument('--cytomine_id_image_instance',
                        dest='id_image_instance',
                        help="The image to which the annotation will be added")

    params, _ = parser.parse_known_args(sys.argv[1:])

    with CytomineJob.from_cli(sys.argv[1:]) as cytomine:
        # TODO: To be tested on TITANx
        img = ImageInstance().fetch(params.id_image_instance)
        download_image(img)
        process_wsi_and_save(get_container_image_path(img))
        new_annotations = generate_polygons(get_container_image_path(img),
                                            adapt_to_cytomine=True)
        annotation_collection = None

        for label_key in new_annotations:
            # Sending annotation batches to the server
            for sub_list in chunks(new_annotations[label_key],
                                   ANNOTATION_BATCH):
                if not debug:
                    annotation_collection = AnnotationCollection()

                for exterior_points in sub_list:
                    if debug:
                        annotation_collection = AnnotationCollection()

                    annotation_collection.append(
                        Annotation(location=Polygon(
                            exterior_points.astype(int).reshape(
                                exterior_points.shape[0],
                                exterior_points.shape[2]).tolist()).wkt,
                                   id_image=params.id_image_instance,
                                   id_project=params.id_project,
                                   id_terms=[CYTOMINE_LABELS[label_key]]))

                    if debug:
                        try:
                            annotation_collection.save()
                        except Exception as e:
                            print(
                                exterior_points.astype(int).reshape(
                                    exterior_points.shape[0],
                                    exterior_points.shape[2]).tolist())
                            plt.plot(*Polygon(
                                exterior_points.astype(int).reshape(
                                    exterior_points.shape[0], exterior_points.
                                    shape[2])).exterior.coords.xy)
                            plt.show()
                            # raise(e)
                            print(e)
                        finally:
                            time.sleep(1)

                if not debug:
                    annotation_collection.save()
                    time.sleep(ANNOTATION_SLEEP_TIME)

        # Adding pie chart labels data as image property
        # TODO: Change delete_results_file to True for final test on titanX
        num_pixels_per_label = get_pie_chart_data(
            get_container_image_path(img), delete_results_file=False)

        for percentage, label_ in zip(num_pixels_per_label, Label.names):
            Property(img, key=label_, value='{}%'.format(percentage)).save()

        remove_image_local_copy(img)

        cytomine.job.update(statusComment="Finished.")
コード例 #14
0
        dest='id_annotation',
        required=False,
        help="The annotation to which the property will be added (optional)")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host,
                  public_key=params.public_key,
                  private_key=params.private_key) as cytomine:

        if params.id_project:
            prop = Property(Project().fetch(params.id_project),
                            key=params.key,
                            value=params.value).save()
            print(prop)

        if params.id_image_instance:
            prop = Property(ImageInstance().fetch(params.id_image_instance),
                            key=params.key,
                            value=params.value).save()
            print(prop)

        if params.id_annotation:
            prop = Property(Annotation().fetch(params.id_annotation),
                            key=params.key,
                            value=params.value).save()
            print(prop)
        """
        You can add property to any Cytomine domain.
        You can also attach a file (see AttachedFile) or add a description (see Description) to any Cytomine domain.
        """