コード例 #1
0
def create_annotation_from_slice(_slice,
                                 id_image,
                                 image_height,
                                 id_project,
                                 label=None,
                                 upload_group_id=False):
    """
    Parameters
    ----------
    _slice: AnnotationSlice
    id_image: int
    image_height: int
    id_project: int
    label: int
    upload_group_id: bool

    Returns
    -------
    annotation: Annotation
        An annotation which is NOT saved
    """
    parameters = {
        "location": change_referential(_slice.polygon, image_height).wkt,
        "id_image": id_image,
        "id_project": id_project,
    }
    if upload_group_id:
        parameters["property"] = [{
            "key":
            "label",
            "value":
            _slice.label if label is None else label
        }]
    return Annotation(**parameters)
コード例 #2
0
def _load_rectangles(job: Job, image_id: str, term: int,
                     detections: dict) -> None:

    progress = 10
    job.update(
        progress=progress,
        status=Job.RUNNING,
        statusComment=
        f"Uploading detections of type rectangles to image {image_id} with terms {term}"
    )

    rectangles = _generate_rectangles(detections)

    # Upload annotations to server
    delta = 85 / len(rectangles)
    annotations = AnnotationCollection()
    for rectangle in rectangles:
        annotations.append(
            Annotation(location=rectangle.wkt,
                       id_image=image_id,
                       id_terms=[term]))
        progress += delta
        job.update(progress=int(progress), status=Job.RUNNING)

    annotations.save()
    progress = 100
    job.update(progress=progress,
               status=Job.TERMINATED,
               statusComment="All detections have been uploaded")
コード例 #3
0
def create_track_from_slices(image,
                             slices,
                             depth2slice,
                             id_project,
                             track_prefix="object",
                             label=None,
                             upload_group_id=False,
                             depth="time"):
    """Create an annotation track from a list of AnnotationSlice
    Parameters
    ----------
    image: ImageInstance
        The image instance in which the track is added
    slices: iterable (of AnnotationSlice)
        The polygon slices of the objects to draw
    depth2slice: dict
        A dictionary mapping the depths of the image instance with their respective SliceInstance
    id_project: int
        Project identifier
    track_prefix: str (default: "object")
        A prefix for the track name
    label: int|str (default: None)
        A label for the track
    upload_group_id: bool
        True to upload the group identifier
    depth: str
        Which depth field to read in the AnnotationSlice if both are present. One of {'time', 'depth'}.

    Returns
    -------
    saved_tracks: TrackCollection
        The saved track objects
    annotations: AnnotationCollection
        The annotations associated with the traped. The collection is NOT saved.
    """
    if label is None and len(slices) > 0:
        label = slices[0].label
    track = Track(name="{}-{}".format(track_prefix, label),
                  id_image=image.id,
                  color=None if upload_group_id else DEFAULT_COLOR).save()

    if upload_group_id:
        Property(track, key="label", value=label).save()

    collection = AnnotationCollection()
    for _slice in slices:
        collection.append(
            Annotation(
                location=change_referential(p=_slice.polygon,
                                            height=image.height).wkt,
                id_image=image.id,
                id_project=id_project,
                id_tracks=[track.id],
                slice=depth2slice[_slice.depth if _slice.time is None
                                  or depth == "depth" else _slice.time].id))
    return track, collection
コード例 #4
0
def create_annotation_from_location(location, id_image, image_height,
                                    id_project):
    def change_referential(p, height):
        return affine_transform(p, [1, 0, 0, -1, 0, height])

    parameters = {
        "location": change_referential(location, image_height).wkt,
        "id_image": id_image,
        "id_project": id_project,
    }

    return Annotation(**parameters)
コード例 #5
0
def main(argv):
    with CytomineJob.from_cli(argv) as job:
        model_path = os.path.join(str(Path.home()), "models", "thyroid-unet")
        model_filepath = pick_model(model_path, job.parameters.tile_size,
                                    job.parameters.cytomine_zoom_level)
        device = torch.device(job.parameters.device)
        unet = Unet(job.parameters.init_fmaps, n_classes=1)
        unet.load_state_dict(torch.load(model_filepath, map_location=device))
        unet.to(device)
        unet.eval()

        segmenter = UNetSegmenter(device=job.parameters.device,
                                  unet=unet,
                                  classes=[0, 1],
                                  threshold=job.parameters.threshold)

        working_path = os.path.join(str(Path.home()), "tmp")
        tile_builder = CytomineTileBuilder(working_path)
        builder = SSLWorkflowBuilder()
        builder.set_n_jobs(1)
        builder.set_overlap(job.parameters.tile_overlap)
        builder.set_tile_size(job.parameters.tile_size,
                              job.parameters.tile_size)
        builder.set_tile_builder(tile_builder)
        builder.set_border_tiles(Workflow.BORDER_TILES_EXTEND)
        builder.set_background_class(0)
        builder.set_distance_tolerance(1)
        builder.set_seg_batch_size(job.parameters.batch_size)
        builder.set_segmenter(segmenter)
        workflow = builder.get()

        slide = CytomineSlide(img_instance=ImageInstance().fetch(
            job.parameters.cytomine_id_image),
                              zoom_level=job.parameters.cytomine_zoom_level)
        results = workflow.process(slide)

        print("-------------------------")
        print(len(results))
        print("-------------------------")

        collection = AnnotationCollection()
        for obj in results:
            wkt = shift_poly(obj.polygon,
                             slide,
                             zoom_level=job.parameters.cytomine_zoom_level).wkt
            collection.append(
                Annotation(location=wkt,
                           id_image=job.parameters.cytomine_id_image,
                           id_terms=[154005477],
                           id_project=job.project.id))
        collection.save(n_workers=job.parameters.n_jobs)

        return {}
コード例 #6
0
def main(argv):
    with CytomineJob.from_cli(argv) as job:
        if not os.path.exists(job.parameters.working_path):
            os.makedirs(job.parameters.working_path)

        # create workflow component
        logger = StandardOutputLogger(Logger.INFO)
        random_state = check_random_state(int(job.parameters.rseed))
        tile_builder = CytomineTileBuilder(
            working_path=job.parameters.working_path)
        segmenter = DemoSegmenter(job.parameters.threshold)
        area_rule = ValidAreaRule(job.parameters.min_area)
        classifier = PyxitClassifierAdapter.build_from_pickle(
            job.parameters.pyxit_model_path,
            tile_builder,
            logger,
            random_state=random_state,
            n_jobs=job.parameters.n_jobs,
            working_path=job.parameters.working_path)

        builder = SLDCWorkflowBuilder()
        builder.set_n_jobs(job.parameters.n_jobs)
        builder.set_logger(logger)
        builder.set_overlap(job.parameters.sldc_tile_overlap)
        builder.set_tile_size(job.parameters.sldc_tile_width,
                              job.parameters.sldc_tile_height)
        builder.set_tile_builder(tile_builder)
        builder.set_segmenter(segmenter)
        builder.add_classifier(area_rule,
                               classifier,
                               dispatching_label="valid")
        workflow = builder.get()

        slide = CytomineSlide(job.parameters.cytomine_image_id)
        results = workflow.process(slide)

        # Upload results
        for polygon, label, proba, dispatch in results:
            if label is not None:
                # if image is a window, the polygon must be translated
                if isinstance(slide, ImageWindow):
                    polygon = translate(polygon, slide.abs_offset_x,
                                        slide.abs_offset_y)
                # upload the annotation
                polygon = affine_transform(
                    polygon, [1, 0, 0, -1, 0, slide.image_instance.height])
                annotation = Annotation(
                    location=polygon.wkt,
                    id_image=slide.image_instance.id).save()
                AlgoAnnotationTerm(id_annotation=annotation.id,
                                   id_term=label,
                                   rate=float(proba)).save()
コード例 #7
0
def extract_images_or_rois(parameters):
    id_annotations = parse_domain_list(parameters.cytomine_roi_annotations)
    # if ROI annotations are provided
    if len(id_annotations) > 0:
        image_cache = dict()  # maps ImageInstance id with CytomineSlide object
        zones = list()
        for id_annot in id_annotations:
            annotation = Annotation().fetch(id_annot)
            if annotation.image not in image_cache:
                image_cache[annotation.image] = CytomineSlide(
                    annotation.image, parameters.cytomine_zoom_level)
            window = get_iip_window_from_annotation(
                image_cache[annotation.image], annotation,
                parameters.cytomine_zoom_level)
            zones.append(window)
        return zones

    # work at image level or ROIs by term
    images = ImageInstanceCollection()
    if parameters.cytomine_id_images is not None:
        id_images = parse_domain_list(parameters.cytomine_id_images)
        images.extend([ImageInstance().fetch(_id) for _id in id_images])
    else:
        images = images.fetch_with_filter("project",
                                          parameters.cytomine_id_project)

    slides = [
        CytomineSlide(img, parameters.cytomine_zoom_level) for img in images
    ]
    if parameters.cytomine_id_roi_term is None:
        return slides

    # fetch ROI annotations
    collection = AnnotationCollection(
        terms=[parameters.cytomine_id_roi_term],
        reviewed=parameters.cytomine_reviewed_roi,
        showWKT=True)
    collection.fetch_with_filter(project=parameters.cytomine_id_project)
    slides_map = {slide.image_instance.id: slide for slide in slides}
    regions = list()
    for annotation in collection:
        if annotation.image not in slides_map:
            continue
        slide = slides_map[annotation.image]
        regions.append(
            get_iip_window_from_annotation(slide, annotation,
                                           parameters.cytomine_zoom_level))

    return regions
コード例 #8
0
def _load_polygons(job: Job, image_id: str, term: int,
                   detections: dict) -> None:

    progress = 10
    job.update(
        progress=progress,
        status=Job.RUNNING,
        statusComment=
        f"Uploading detections of type polygons to image {image_id} with terms {term}"
    )

    polygons = _generate_polygons(detections)

    annotation = Annotation(location=polygons.wkt,
                            id_image=image_id,
                            id_terms=[term]).save()

    progress = 85
    job.update(progress=int(progress), status=Job.RUNNING)

    progress = 100
    job.update(progress=progress,
               status=Job.TERMINATED,
               statusComment="All detections have been uploaded")
コード例 #9
0
def run(cyto_job, parameters):

    job = cyto_job.job
    project_id = cyto_job.project
    term_id = parameters.terms_list

    logging.info(f"########### Parameters = {str(parameters)}")
    logging.info(f"########### Term {str(term_id)}")
    logging.info(f"########### Project {str(project_id)}")

    annotations = AnnotationCollection()
    annotations.project = project_id
    annotations.terms = [term_id]
    annotations.fetch()

    progress = 0
    progress_delta = 1.0 / (1.50 * len(annotations))

    job.update(
        progress=progress,
        statusComment=f"Converting annotations from project {project_id}")

    new_annotations = AnnotationCollection()
    for a in annotations:
        if a.location is None:
            a.fetch()
        new_annotations.append(
            Annotation(a.location, a.image, a.term, a.project))
    new_annotations.save(chunk=None)

    job.update(progress=0.25, statusComment=f"Deleting old annotations...")

    for a in annotations:
        a.delete()
        progress += progress_delta
        job.update(progress=progress)
コード例 #10
0
def main(argv):
    with CytomineJob.from_cli(argv) as conn:
        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        statusComment="Initialization...")
        # base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity
        base_path = "/home/mmu/Desktop"
        working_path = os.path.join(base_path, str(conn.job.id))

        #Loading pre-trained Stardist model
        np.random.seed(17)
        lbl_cmap = random_label_cmap()
        #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46
        #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI
        model = StarDist2D(
            None, name='2D_versatile_HE', basedir='/models/'
        )  #use local model file in ~/models/2D_versatile_HE/

        #Select images to process
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        list_imgs = []
        if conn.parameters.cytomine_id_images == 'all':
            for image in images:
                list_imgs.append(int(image.id))
        else:
            list_imgs = [
                int(id_img)
                for id_img in conn.parameters.cytomine_id_images.split(',')
            ]

        #Go over images
        for id_image in conn.monitor(list_imgs,
                                     prefix="Running detection on image",
                                     period=0.1):
            #Dump ROI annotations in img from Cytomine server to local images
            #conn.job.update(status=Job.RUNNING, progress=0, statusComment="Fetching ROI annotations...")
            roi_annotations = AnnotationCollection()
            roi_annotations.project = conn.parameters.cytomine_id_project
            roi_annotations.term = conn.parameters.cytomine_id_roi_term
            roi_annotations.image = id_image  #conn.parameters.cytomine_id_image
            roi_annotations.showWKT = True
            roi_annotations.fetch()
            print(roi_annotations)
            #Go over ROI in this image
            #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1):
            for roi in roi_annotations:
                #Get Cytomine ROI coordinates for remapping to whole-slide
                #Cytomine cartesian coordinate system, (0,0) is bottom left corner
                print(
                    "----------------------------ROI------------------------------"
                )
                roi_geometry = wkt.loads(roi.location)
                print("ROI Geometry from Shapely: {}".format(roi_geometry))
                print("ROI Bounds")
                print(roi_geometry.bounds)
                minx = roi_geometry.bounds[0]
                miny = roi_geometry.bounds[3]
                #Dump ROI image into local PNG file
                roi_path = os.path.join(
                    working_path,
                    str(roi_annotations.project) + '/' +
                    str(roi_annotations.image) + '/' + str(roi.id))
                roi_png_filename = os.path.join(roi_path + '/' + str(roi.id) +
                                                '.png')
                print("roi_png_filename: %s" % roi_png_filename)
                roi.dump(dest_pattern=roi_png_filename, mask=True, alpha=True)
                #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True)

                #Stardist works with TIFF images without alpha channel, flattening PNG alpha mask to TIFF RGB
                im = Image.open(roi_png_filename)
                bg = Image.new("RGB", im.size, (255, 255, 255))
                bg.paste(im, mask=im.split()[3])
                roi_tif_filename = os.path.join(roi_path + '/' + str(roi.id) +
                                                '.tif')
                bg.save(roi_tif_filename, quality=100)
                X_files = sorted(glob(roi_path + '/' + str(roi.id) + '*.tif'))
                X = list(map(imread, X_files))
                n_channel = 3 if X[0].ndim == 3 else X[0].shape[-1]
                axis_norm = (
                    0, 1
                )  # normalize channels independently  (0,1,2) normalize channels jointly
                if n_channel > 1:
                    print("Normalizing image channels %s." %
                          ('jointly' if axis_norm is None or 2 in axis_norm
                           else 'independently'))

                #Going over ROI images in ROI directory (in our case: one ROI per directory)
                for x in range(0, len(X)):
                    print("------------------- Processing ROI file %d: %s" %
                          (x, roi_tif_filename))
                    img = normalize(X[x],
                                    conn.parameters.stardist_norm_perc_low,
                                    conn.parameters.stardist_norm_perc_high,
                                    axis=axis_norm)
                    #Stardist model prediction with thresholds
                    labels, details = model.predict_instances(
                        img,
                        prob_thresh=conn.parameters.stardist_prob_t,
                        nms_thresh=conn.parameters.stardist_nms_t)
                    print("Number of detected polygons: %d" %
                          len(details['coord']))
                    cytomine_annotations = AnnotationCollection()
                    #Go over detections in this ROI, convert and upload to Cytomine
                    for pos, polygroup in enumerate(details['coord'], start=1):
                        #Converting to Shapely annotation
                        points = list()
                        for i in range(len(polygroup[0])):
                            #Cytomine cartesian coordinate system, (0,0) is bottom left corner
                            #Mapping Stardist polygon detection coordinates to Cytomine ROI in whole slide image
                            p = Point(minx + polygroup[1][i],
                                      miny - polygroup[0][i])
                            points.append(p)

                        annotation = Polygon(points)
                        #Append to Annotation collection
                        cytomine_annotations.append(
                            Annotation(
                                location=annotation.wkt,
                                id_image=
                                id_image,  #conn.parameters.cytomine_id_image,
                                id_project=conn.parameters.cytomine_id_project,
                                id_terms=[
                                    conn.parameters.cytomine_id_cell_term
                                ]))
                        print(".", end='', flush=True)

                    #Send Annotation Collection (for this ROI) to Cytomine server in one http request
                    ca = cytomine_annotations.save()

        conn.job.update(status=Job.TERMINATED,
                        progress=100,
                        statusComment="Finished.")
コード例 #11
0
def main(argv):
    base_path = str(Path.home())

    #Available filters
    filters = {
        'binary': BinaryFilter(),
        'adaptive': AdaptiveThresholdFilter(),
        'otsu': OtsuFilter()
    }

    #Connect to Cytomine
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(status=Job.RUNNING,
                      progress=0,
                      statusComment="Initialisation...")

        working_path = os.path.join(base_path, "data", str(cj.job.id))
        if not os.path.exists(working_path):
            os.makedirs(working_path)

        filter = filters.get(cj.parameters.cytomine_filter)

        #Initiatlize the reader to browse the whole image
        whole_slide = WholeSlide(
            cj.get_image_instance(cj.parameters.cytomine_id_image, True))
        reader = CytomineReader(whole_slide,
                                window_position=Bounds(
                                    0, 0, cj.parameters.cytomine_tile_size,
                                    cj.parameters.cytomine_tile_size),
                                zoom=cj.parameters.cytomine_zoom_level,
                                overlap=cj.parameters.cytomine_tile_overlap)
        reader.window_position = Bounds(0, 0, reader.window_position.width,
                                        reader.window_position.height)

        #Browse the slide using reader
        i = 0
        geometries = []
        cj.job.update(progress=1, status_comment="Browsing big image...")

        while True:
            #Read next tile
            reader.read()
            image = reader.data
            #Saving tile image locally
            tile_filename = "%s/image-%d-zoom-%d-tile-%d-x-%d-y-%d.png" % (
                working_path, cj.parameters.cytomine_id_image,
                cj.parameters.cytomine_zoom_level, i, reader.window_position.x,
                reader.window_position.y)
            image.save(tile_filename, "PNG")
            #Apply filtering
            cv_image = np.array(reader.result())
            filtered_cv_image = filter.process(cv_image)
            i += 1
            #Detect connected components
            components = ObjectFinder(filtered_cv_image).find_components()
            #Convert local coordinates (from the tile image) to global coordinates (the whole slide)
            components = whole_slide.convert_to_real_coordinates(
                components, reader.window_position, reader.zoom)
            geometries.extend(
                get_geometries(components, cj.parameters.cytomine_min_area,
                               cj.parameters.cytomine_max_area))

            #Upload annotations (geometries corresponding to connected components) to Cytomine core
            #Upload each geometry and predicted term
            annotations = AnnotationCollection()
            for geometry in geometries:
                pol = shapely.wkt.loads(geometry)
                if pol.is_valid:
                    annotations.append(
                        Annotation(
                            location=geometry,
                            id_image=cj.parameters.cytomine_id_image,
                            id_project=cj.parameters.cytomine_id_project,
                            id_terms=[
                                cj.parameters.cytomine_id_predicted_term
                            ]))
                #Batches of 100 annotations
                if len(annotations) % 100 == 0:
                    annotations.save()
                    annotations = AnnotationCollection()

            annotations.save()
            geometries = []
            if not reader.next(): break

        cj.job.update(progress=50,
                      status_comment=
                      "Detection done, starting Union over whole big image...")

        #Perform Union of geometries (because geometries are computed locally in each tile but objects (e.g. cell clusters) might overlap several tiles)
        host = cj.parameters.cytomine_host.replace("http://", "")
        unioncommand = "groovy -cp \"/lib/jars/*\" /app/union4.groovy http://%s %s %s %d %d %d %d %d %d %d %d %d %d" % (
            host,
            cj._public_key,
            cj._private_key,
            cj.parameters.cytomine_id_image,
            cj.job.userJob,
            cj.parameters.cytomine_id_predicted_term,  #union_term
            cj.parameters.cytomine_union_min_length,  #union_minlength,
            cj.parameters.cytomine_union_bufferoverlap,  #union_bufferoverlap,
            cj.parameters.
            cytomine_union_min_point_for_simplify,  #union_minPointForSimplify,
            cj.parameters.cytomine_union_min_point,  #union_minPoint,
            cj.parameters.cytomine_union_max_point,  #union_maxPoint,
            cj.parameters.cytomine_union_nb_zones_width,  #union_nbzonesWidth,
            cj.parameters.cytomine_union_nb_zones_height
        )  #union_nbzonesHeight)

        os.chdir(base_path)
        print(unioncommand)
        os.system(unioncommand)

    cj.job.update(status=Job.TERMINATED,
                  progress=100,
                  statusComment="Finished.")
コード例 #12
0
    cj.job.update(progress=25, statusComment="Launching workflow...")

    command = "/icy/run.sh {} {}".format(in_path, params.scale3sens)
    call(command, shell=True)

    cj.job.update(progress=60, statusComment="Extracting polygons...")

    for image in images:
        file = str(image.id) + "_results.txt"
        path = in_path + "/" + file
        if (os.path.isfile(path)):
            (X, Y) = readcoords(path)
            for i in range(len(X)):
                circle = Point(X[i], image.height - Y[i])
                new_annotation = Annotation(location=circle.wkt,
                                            id_image=image.id).save()
        else:
            print(path + " does not exist")

    cj.job.update(progress=99, statusComment="Cleaning...")
    for image in images:
        file = str(image.id) + ".tif"
        #path = outDir + "/" + file
        #os.remove(path);
        path = in_path + "/" + file
        os.remove(path)
        path = in_path + "/" + str(image.id) + "_results.txt"
        os.remove(path)

    cj.job.update(status=Job.TERMINATED,
                  progress=100,
コード例 #13
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(progress=1, statusComment="Initialisation")
        cj.log(str(cj.parameters))

        term_ids = [cj.parameters.cytomine_id_predicted_term] \
            if hasattr(cj.parameters, "cytomine_id_predicted_term") else None

        image_ids = [
            int(image_id)
            for image_id in cj.parameters.cytomine_id_images.split(",")
        ]
        images = ImageInstanceCollection().fetch_with_filter(
            "project", cj.parameters.cytomine_id_project)
        images = [image for image in images if image.id in image_ids]

        tile_size = cj.parameters.tile_size
        tile_overlap = cj.parameters.tile_overlap
        filter_func = _get_filter(cj.parameters.filter)
        projection = cj.parameters.projection
        if projection not in ('min', 'max', 'average'):
            raise ValueError("Projection {} is not found".format(projection))

        cj.log("Filter: {}".format(cj.parameters.filter))
        cj.log("Projection: {}".format(projection))
        for image in cj.monitor(images,
                                prefix="Running detection on image",
                                start=5,
                                end=99):

            def worker_tile_func(tile):
                window = tile.np_image
                threshold = filter_func(window)
                return window, threshold

            cj.log("Get tiles for image {}".format(image.instanceFilename))
            sldc_image = CytomineProjectionSlide(image, projection)
            tile_builder = CytomineProjectionTileBuilder("/tmp")
            topology = sldc_image.tile_topology(tile_builder, tile_size,
                                                tile_size, tile_overlap)

            results = generic_parallel(topology, worker_tile_func)
            thresholds = list()
            for result in results:
                tile, output = result
                window, threshold = output
                thresholds.append(threshold)

            global_threshold = int(np.mean(thresholds))
            cj.log("Mean threshold is {}".format(global_threshold))

            def worker_annotations_func(tile):
                filtered = img_as_uint(tile.np_image > global_threshold)
                return mask_to_objects_2d(filtered, offset=tile.abs_offset)

            cj.log(
                "Extract annotations from filtered tiles for image {}".format(
                    image.instanceFilename))
            results = generic_parallel(topology, worker_annotations_func)
            ids, geometries = list(), list()
            for result in results:
                tile, tile_geometries = result
                # Workaround for slow SemanticMerger but geometries shouldn't be filtered at this stage.
                tile_geometries = [
                    g for g in tile_geometries
                    if g.area > cj.parameters.min_area
                ]
                ids.append(tile.identifier)
                geometries.append(tile_geometries)

            cj.log("Merge annotations from filtered tiles for image {}".format(
                image.instanceFilename))
            merged_geometries = SemanticMerger(tolerance=1).merge(
                ids, geometries, topology)
            cj.log("{} merged geometries".format(len(merged_geometries)))

            if cj.parameters.annotation_slices == 'median':
                # By default, if no slice is given, an annotation is added to the median slice
                slice_ids = [None]
            else:
                slices = SliceInstanceCollection().fetch_with_filter(
                    "imageinstance", image.id)
                if cj.parameters.annotation_slices == 'first':
                    slice_ids = [slices[0].id]
                else:
                    slice_ids = [sl.id for sl in slices]

            ac = AnnotationCollection()
            for geometry in merged_geometries:
                if geometry.area > cj.parameters.min_area:
                    for slice_id in slice_ids:
                        ac.append(
                            Annotation(location=change_referential(
                                geometry, image.height).wkt,
                                       id_image=image.id,
                                       id_terms=term_ids,
                                       id_slice=slice_id))
            ac.save()

        cj.job.update(statusComment="Finished.", progress=100)
コード例 #14
0
                        help="The image to which the annotation will be added")
    parser.add_argument(
        '--cytomine_id_term',
        dest='id_term',
        required=False,
        help="The term to associate to the annotations (optional)")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host,
                  public_key=params.public_key,
                  private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:

        # We first add a point in (10,10) where (0,0) is bottom-left corner
        point = Point(10, 10)
        annotation_point = Annotation(
            location=point.wkt, id_image=params.id_image_instance).save()
        if params.id_term:
            AnnotationTerm(annotation_point.id, params.id_term).save()

        # Then, we add a rectangle as annotation
        rectangle = box(20, 20, 100, 100)
        annotation_rectangle = Annotation(
            location=rectangle.wkt, id_image=params.id_image_instance).save()
        if params.id_term:
            AnnotationTerm(annotation_rectangle.id, params.id_term).save()

        # We can also add a property (key-value pair) to an annotation
        Property(annotation_rectangle, key="my_property", value=10).save()

        # Print the list of annotations in the given image:
        annotations = AnnotationCollection()
コード例 #15
0
        dest='id_annotation',
        required=False,
        help="The annotation to which the property will be added (optional)")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host,
                  public_key=params.public_key,
                  private_key=params.private_key) as cytomine:

        if params.id_project:
            prop = Property(Project().fetch(params.id_project),
                            key=params.key,
                            value=params.value).save()
            print(prop)

        if params.id_image_instance:
            prop = Property(ImageInstance().fetch(params.id_image_instance),
                            key=params.key,
                            value=params.value).save()
            print(prop)

        if params.id_annotation:
            prop = Property(Annotation().fetch(params.id_annotation),
                            key=params.key,
                            value=params.value).save()
            print(prop)
        """
        You can add property to any Cytomine domain.
        You can also attach a file (see AttachedFile) or add a description (see Description) to any Cytomine domain.
        """
コード例 #16
0
def run(debug=False):
    """
    Gets project image from cytomine

    Args:
        debug (bool): If true will save annotations individually and plot any error

    Example:
      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'dadb7d7a-5822-48f7-ab42-59bce27750ae' --cytomine_private_key 'd73f4602-51d2-4d15-91e4-d4cc175d65fd' --cytomine_id_project 187 --cytomine_id_image_instance 375 --cytomine_id_software 228848

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'b6ebb23c-00ff-427b-be24-87b2a82490df' --cytomine_private_key '6812f09b-3f33-4938-82ca-b23032d377fd' --cytomine_id_project 154 --cytomine_id_image_instance 3643

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_project 197 --cytomine_id_image_instance 2140 --cytomine_id_software 2633

      docker run --gpus all -it --rm --mount type=bind,source=/home/giussepi/Public/environments/Cytomine/cyto_CRLM/,target=/CRLM,bind-propagation=private --network=host ttt --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_project 197 --cytomine_id_image_instance 31296 --cytomine_id_software 79732
    """

    parser = ArgumentParser(prog="Cytomine Python client example")

    # Cytomine connection parameters
    parser.add_argument('--cytomine_host',
                        dest='host',
                        default='demo.cytomine.be',
                        help="The Cytomine host")
    parser.add_argument('--cytomine_public_key',
                        dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key',
                        dest='private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_id_project',
                        dest='id_project',
                        help="The project from which we want the images")
    parser.add_argument('--cytomine_id_software',
                        dest='id_software',
                        help="The software to be used to process the image")
    parser.add_argument('--cytomine_id_image_instance',
                        dest='id_image_instance',
                        help="The image to which the annotation will be added")

    params, _ = parser.parse_known_args(sys.argv[1:])

    with CytomineJob.from_cli(sys.argv[1:]) as cytomine:
        # TODO: To be tested on TITANx
        img = ImageInstance().fetch(params.id_image_instance)
        download_image(img)
        process_wsi_and_save(get_container_image_path(img))
        new_annotations = generate_polygons(get_container_image_path(img),
                                            adapt_to_cytomine=True)
        annotation_collection = None

        for label_key in new_annotations:
            # Sending annotation batches to the server
            for sub_list in chunks(new_annotations[label_key],
                                   ANNOTATION_BATCH):
                if not debug:
                    annotation_collection = AnnotationCollection()

                for exterior_points in sub_list:
                    if debug:
                        annotation_collection = AnnotationCollection()

                    annotation_collection.append(
                        Annotation(location=Polygon(
                            exterior_points.astype(int).reshape(
                                exterior_points.shape[0],
                                exterior_points.shape[2]).tolist()).wkt,
                                   id_image=params.id_image_instance,
                                   id_project=params.id_project,
                                   id_terms=[CYTOMINE_LABELS[label_key]]))

                    if debug:
                        try:
                            annotation_collection.save()
                        except Exception as e:
                            print(
                                exterior_points.astype(int).reshape(
                                    exterior_points.shape[0],
                                    exterior_points.shape[2]).tolist())
                            plt.plot(*Polygon(
                                exterior_points.astype(int).reshape(
                                    exterior_points.shape[0], exterior_points.
                                    shape[2])).exterior.coords.xy)
                            plt.show()
                            # raise(e)
                            print(e)
                        finally:
                            time.sleep(1)

                if not debug:
                    annotation_collection.save()
                    time.sleep(ANNOTATION_SLEEP_TIME)

        # Adding pie chart labels data as image property
        # TODO: Change delete_results_file to True for final test on titanX
        num_pixels_per_label = get_pie_chart_data(
            get_container_image_path(img), delete_results_file=False)

        for percentage, label_ in zip(num_pixels_per_label, Label.names):
            Property(img, key=label_, value='{}%'.format(percentage)).save()

        remove_image_local_copy(img)

        cytomine.job.update(statusComment="Finished.")
コード例 #17
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        # use only images from the current project
        cj.job.update(progress=1, statusComment="Preparing execution")

        # extract images to process
        if cj.parameters.cytomine_zoom_level > 0 and (
                cj.parameters.cytomine_tile_size != 256
                or cj.parameters.cytomine_tile_overlap != 0):
            raise ValueError(
                "when using zoom_level > 0, tile size should be 256 "
                "(given {}) and overlap should be 0 (given {})".format(
                    cj.parameters.cytomine_tile_size,
                    cj.parameters.cytomine_tile_overlap))

        cj.job.update(
            progress=1,
            statusComment="Preparing execution (creating folders,...).")
        # working path
        root_path = str(Path.home())
        working_path = os.path.join(root_path, "images")
        os.makedirs(working_path, exist_ok=True)

        # load training information
        cj.job.update(progress=5,
                      statusComment="Extract properties from training job.")
        train_job = Job().fetch(cj.parameters.cytomine_id_job)
        properties = PropertyCollection(train_job).fetch().as_dict()
        binary = str2bool(properties["binary"].value)
        classes = parse_domain_list(properties["classes"].value)

        cj.job.update(progress=10, statusComment="Download the model file.")
        attached_files = AttachedFileCollection(train_job).fetch()
        model_file = attached_files.find_by_attribute("filename",
                                                      "model.joblib")
        model_filepath = os.path.join(root_path, "model.joblib")
        model_file.download(model_filepath, override=True)
        pyxit = joblib.load(model_filepath)

        # set n_jobs
        pyxit.base_estimator.n_jobs = cj.parameters.n_jobs
        pyxit.n_jobs = cj.parameters.n_jobs

        cj.job.update(progress=45, statusComment="Build workflow.")
        builder = SSLWorkflowBuilder()
        builder.set_tile_size(cj.parameters.cytomine_tile_size,
                              cj.parameters.cytomine_tile_size)
        builder.set_overlap(cj.parameters.cytomine_tile_overlap)
        builder.set_tile_builder(
            CytomineTileBuilder(working_path, n_jobs=cj.parameters.n_jobs))
        builder.set_logger(StandardOutputLogger(level=Logger.INFO))
        builder.set_n_jobs(1)
        builder.set_background_class(0)
        # value 0 will prevent merging but still requires to run the merging check
        # procedure (inefficient)
        builder.set_distance_tolerance(2 if cj.parameters.union_enabled else 0)
        builder.set_segmenter(
            ExtraTreesSegmenter(
                pyxit=pyxit,
                classes=classes,
                prediction_step=cj.parameters.pyxit_prediction_step,
                background=0,
                min_std=cj.parameters.tile_filter_min_stddev,
                max_mean=cj.parameters.tile_filter_max_mean))
        workflow = builder.get()

        area_checker = AnnotationAreaChecker(
            min_area=cj.parameters.min_annotation_area,
            max_area=cj.parameters.max_annotation_area)

        def get_term(label):
            if binary:
                if "cytomine_id_predict_term" not in cj.parameters:
                    return []
                else:
                    return [int(cj.parameters.cytomine_id_predict_term)]
            # multi-class
            return [label]

        zones = extract_images_or_rois(cj.parameters)
        for zone in cj.monitor(zones,
                               start=50,
                               end=90,
                               period=0.05,
                               prefix="Segmenting images/ROIs"):
            results = workflow.process(zone)

            annotations = AnnotationCollection()
            for obj in results:
                if not area_checker.check(obj.polygon):
                    continue
                polygon = obj.polygon
                if isinstance(zone, ImageWindow):
                    polygon = affine_transform(
                        polygon,
                        [1, 0, 0, 1, zone.abs_offset_x, zone.abs_offset_y])
                polygon = change_referential(polygon, zone.base_image.height)
                if cj.parameters.cytomine_zoom_level > 0:
                    zoom_mult = (2**cj.parameters.cytomine_zoom_level)
                    polygon = affine_transform(
                        polygon, [zoom_mult, 0, 0, zoom_mult, 0, 0])
                annotations.append(
                    Annotation(location=polygon.wkt,
                               id_terms=get_term(obj.label),
                               id_project=cj.project.id,
                               id_image=zone.base_image.image_instance.id))
            annotations.save()

        cj.job.update(status=Job.TERMINATED,
                      status_comment="Finish",
                      progress=100)
コード例 #18
0
    parser.add_argument('--key', help="the property key")
    parser.add_argument('--value', help="the property value")

    parser.add_argument('--cytomine_id_project', dest='id_project', required=False,
                        help="The project to which the property will be added (optional)")
    parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance', required=False,
                        help="The image to which the property will be added (optional)")
    parser.add_argument('--cytomine_id_annotation', dest='id_annotation', required=False,
                        help="The annotation to which the property will be added (optional)")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:

        if params.id_project:
            prop = Property(Project().fetch(params.id_project), key=params.key, value=params.value).save()
            print(prop)

        if params.id_image_instance:
            prop = Property(ImageInstance().fetch(params.id_image_instance), key=params.key, value=params.value).save()
            print(prop)

        if params.id_annotation:
            prop = Property(Annotation().fetch(params.id_annotation), key=params.key, value=params.value).save()
            print(prop)

        """
        You can add property to any Cytomine domain.
        You can also attach a file (see AttachedFile) or add a description (see Description) to any Cytomine domain.
        """
コード例 #19
0
    def end_successful_import(self, path: Path, image: Image, *args, **kwargs):
        uf = self.get_uf(path)

        ai = AbstractImage()
        ai.uploadedFile = uf.id
        ai.originalFilename = uf.originalFilename
        ai.width = image.width
        ai.height = image.height
        ai.depth = image.depth
        ai.duration = image.duration
        ai.channels = image.n_intrinsic_channels
        ai.extrinsicChannels = image.n_channels
        if image.physical_size_x:
            ai.physicalSizeX = round(
                convert_quantity(image.physical_size_x, "micrometers"), 6)
        if image.physical_size_y:
            ai.physicalSizeY = round(
                convert_quantity(image.physical_size_y, "micrometers"), 6)
        if image.physical_size_z:
            ai.physicalSizeZ = round(
                convert_quantity(image.physical_size_z, "micrometers"), 6)
        if image.frame_rate:
            ai.fps = round(convert_quantity(image.frame_rate, "Hz"), 6)
        ai.magnification = parse_int(image.objective.nominal_magnification)
        ai.bitPerSample = dtype_to_bits(image.pixel_type)
        ai.samplePerPixel = image.n_channels / image.n_intrinsic_channels
        ai.save()
        self.abstract_images.append(ai)

        asc = AbstractSliceCollection()
        set_channel_names = image.n_intrinsic_channels == image.n_channels
        for c in range(image.n_intrinsic_channels):
            name = None
            color = None
            if set_channel_names:
                name = image.channels[c].suggested_name
                color = image.channels[c].hex_color
            for z in range(image.depth):
                for t in range(image.duration):
                    mime = "image/pyrtiff"  # TODO: remove
                    asc.append(
                        AbstractSlice(ai.id,
                                      uf.id,
                                      mime,
                                      c,
                                      z,
                                      t,
                                      channelName=name,
                                      channelColor=color))
        asc.save()

        properties = PropertyCollection(ai)
        for metadata in image.raw_metadata.values():
            if metadata.value is not None and str(metadata.value) != '':
                properties.append(
                    Property(ai, metadata.namespaced_key, str(metadata.value)))
        try:
            properties.save()
        except CollectionPartialUploadException:
            pass  # TODO: improve handling of this exception, but prevent to fail the import

        uf.status = UploadedFile.DEPLOYED
        uf.update()

        properties = PropertyCollection(ai)
        for k, v in self.user_properties:
            if v is not None and str(v) != '':
                properties.append(Property(ai, k, v))
        try:
            properties.save()
        except CollectionPartialUploadException:
            pass  # TODO: improve handling of this exception, but prevent to fail the import

        instances = []
        for p in self.projects:
            instances.append(ImageInstance(ai.id, p.id).save())
        self.images.append((ai, instances))

        # TODO: temporary add annotations for backwards compatibility.
        #  BUT it should be done by core when an image instance is created.
        if image.n_planes == 1 and len(instances) > 0:
            # TODO: currently only supports metadata annots on 2D images

            metadata_annots = image.annotations
            if len(metadata_annots) > 0:
                metadata_terms = [
                    ma.terms for ma in metadata_annots if len(ma.terms) > 0
                ]
                metadata_terms = set(flatten(metadata_terms))

                for instance in instances:
                    project_id = instance.project
                    project = self.projects.find_by_attribute('id', project_id)
                    ontology_id = project.ontology  # noqa
                    ontology_terms = TermCollection().fetch_with_filter(
                        "project", project_id)
                    terms_id_mapping = {t.name: t.id for t in ontology_terms}

                    for metadata_term in metadata_terms:
                        if metadata_term not in terms_id_mapping:
                            # TODO: user must have ontology rights !
                            term = Term(name=metadata_term,
                                        id_ontology=ontology_id,
                                        color="#AAAAAA").save()
                            terms_id_mapping[term.name] = term.id

                    annots = AnnotationCollection()
                    for metadata_annot in metadata_annots:
                        term_ids = [
                            terms_id_mapping[t] for t in metadata_annot.terms
                        ]
                        properties = [
                            dict(key=k, value=v)
                            for k, v in metadata_annot.properties.items()
                        ]
                        annots.append(
                            Annotation(location=metadata_annot.wkt,
                                       id_image=instance.id,
                                       id_terms=term_ids
                                       if len(term_ids) > 0 else None,
                                       properties=properties
                                       if len(properties) > 0 else None,
                                       user=uf.user))

                    try:
                        annots.save()
                    except CollectionPartialUploadException:
                        pass
コード例 #20
0
    def run(self):
        self.super_admin = Cytomine.get_instance().current_user
        connect_as(self.super_admin, True)

        users = UserCollection().fetch()
        users_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("user-collection")
        ][0]
        remote_users = UserCollection()
        for u in json.load(open(os.path.join(self.working_path, users_json))):
            remote_users.append(User().populate(u))

        roles = ["project_manager", "project_contributor", "ontology_creator"]
        if self.with_images:
            roles += ["image_creator", "image_reviewer"]

        if self.with_userannotations:
            roles += ["userannotation_creator", "userannotationterm_creator"]

        roles = set(roles)
        remote_users = [
            u for u in remote_users
            if len(roles.intersection(set(u.roles))) > 0
        ]

        for remote_user in remote_users:
            user = find_first(
                [u for u in users if u.username == remote_user.username])
            if not user:
                user = copy.copy(remote_user)
                if not user.password:
                    user.password = random_string(8)
                if not self.with_original_date:
                    user.created = None
                    user.updated = None
                user.save()
            self.id_mapping[remote_user.id] = user.id

        # --------------------------------------------------------------------------------------------------------------
        logging.info("1/ Import ontology and terms")
        """
        Import the ontology with terms and relation terms that are stored in pickled files in working_path.
        If the ontology exists (same name and same terms), the existing one is used.
        Otherwise, an ontology with an available name is created with new terms and corresponding relationships.
        """
        ontologies = OntologyCollection().fetch()
        ontology_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("ontology")
        ][0]
        remote_ontology = Ontology().populate(
            json.load(open(os.path.join(self.working_path, ontology_json))))
        remote_ontology.name = remote_ontology.name.strip()

        terms = TermCollection().fetch()
        terms_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("term-collection")
        ]
        remote_terms = TermCollection()
        if len(terms_json) > 0:
            for t in json.load(
                    open(os.path.join(self.working_path, terms_json[0]))):
                remote_terms.append(Term().populate(t))

        def ontology_exists():
            compatible_ontology = find_first([
                o for o in ontologies
                if o.name == remote_ontology.name.strip()
            ])
            if compatible_ontology:
                set1 = set((t.name, t.color) for t in terms
                           if t.ontology == compatible_ontology.id)
                difference = [
                    term for term in remote_terms
                    if (term.name, term.color) not in set1
                ]
                if len(difference) == 0:
                    return True, compatible_ontology
                return False, None
            else:
                return True, None

        i = 1
        remote_name = remote_ontology.name
        found, existing_ontology = ontology_exists()
        while not found:
            remote_ontology.name = "{} ({})".format(remote_name, i)
            found, existing_ontology = ontology_exists()
            i += 1

        # SWITCH to ontology creator user
        connect_as(User().fetch(self.id_mapping[remote_ontology.user]))
        if not existing_ontology:
            ontology = copy.copy(remote_ontology)
            ontology.user = self.id_mapping[remote_ontology.user]
            if not self.with_original_date:
                ontology.created = None
                ontology.updated = None
            ontology.save()
            self.id_mapping[remote_ontology.id] = ontology.id
            logging.info("Ontology imported: {}".format(ontology))

            for remote_term in remote_terms:
                logging.info("Importing term: {}".format(remote_term))
                term = copy.copy(remote_term)
                term.ontology = self.id_mapping[term.ontology]
                term.parent = None
                if not self.with_original_date:
                    term.created = None
                    term.updated = None
                term.save()
                self.id_mapping[remote_term.id] = term.id
                logging.info("Term imported: {}".format(term))

            remote_relation_terms = [(term.parent, term.id)
                                     for term in remote_terms]
            for relation in remote_relation_terms:
                parent, child = relation
                if parent:
                    rt = RelationTerm(self.id_mapping[parent],
                                      self.id_mapping[child]).save()
                    logging.info("Relation term imported: {}".format(rt))
        else:
            self.id_mapping[remote_ontology.id] = existing_ontology.id

            ontology_terms = [
                t for t in terms if t.ontology == existing_ontology.id
            ]
            for remote_term in remote_terms:
                self.id_mapping[remote_term.id] = find_first([
                    t for t in ontology_terms if t.name == remote_term.name
                ]).id

            logging.info(
                "Ontology already encoded: {}".format(existing_ontology))

        # SWITCH USER
        connect_as(self.super_admin, True)

        # --------------------------------------------------------------------------------------------------------------
        logging.info("2/ Import project")
        """
        Import the project (i.e. the Cytomine Project domain) stored in pickled file in working_path.
        If a project with the same name already exists, append a (x) suffix where x is an increasing number.
        """
        projects = ProjectCollection().fetch()
        project_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("project")
        ][0]
        remote_project = Project().populate(
            json.load(open(os.path.join(self.working_path, project_json))))
        remote_project.name = remote_project.name.strip()

        def available_name():
            i = 1
            existing_names = [o.name for o in projects]
            new_name = project.name
            while new_name in existing_names:
                new_name = "{} ({})".format(project.name, i)
                i += 1
            return new_name

        project = copy.copy(remote_project)
        project.name = available_name()
        project.discipline = None
        project.ontology = self.id_mapping[project.ontology]
        project_contributors = [
            u for u in remote_users if "project_contributor" in u.roles
        ]
        project.users = [self.id_mapping[u.id] for u in project_contributors]
        project_managers = [
            u for u in remote_users if "project_manager" in u.roles
        ]
        project.admins = [self.id_mapping[u.id] for u in project_managers]
        if not self.with_original_date:
            project.created = None
            project.updated = None
        project.save()
        self.id_mapping[remote_project.id] = project.id
        logging.info("Project imported: {}".format(project))

        # --------------------------------------------------------------------------------------------------------------
        logging.info("3/ Import images")
        storages = StorageCollection().fetch()
        abstract_images = AbstractImageCollection().fetch()
        images_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("imageinstance-collection")
        ]
        remote_images = ImageInstanceCollection()
        if len(images_json) > 0:
            for i in json.load(
                    open(os.path.join(self.working_path, images_json[0]))):
                remote_images.append(ImageInstance().populate(i))

        remote_images_dict = {}

        for remote_image in remote_images:
            image = copy.copy(remote_image)

            # Fix old image name due to urllib3 limitation
            remote_image.originalFilename = bytes(
                remote_image.originalFilename,
                'utf-8').decode('ascii', 'ignore')
            if remote_image.originalFilename not in remote_images_dict.keys():
                remote_images_dict[remote_image.originalFilename] = [
                    remote_image
                ]
            else:
                remote_images_dict[remote_image.originalFilename].append(
                    remote_image)
            logging.info("Importing image: {}".format(remote_image))

            # SWITCH user to image creator user
            connect_as(User().fetch(self.id_mapping[remote_image.user]))
            # Get its storage
            storage = find_first([
                s for s in storages
                if s.user == Cytomine.get_instance().current_user.id
            ])
            if not storage:
                storage = storages[0]

            # Check if image is already in its storage
            abstract_image = find_first([
                ai for ai in abstract_images
                if ai.originalFilename == remote_image.originalFilename and
                ai.width == remote_image.width and ai.height == remote_image.
                height and ai.resolution == remote_image.resolution
            ])
            if abstract_image:
                logging.info(
                    "== Found corresponding abstract image. Linking to project."
                )
                ImageInstance(abstract_image.id,
                              self.id_mapping[remote_project.id]).save()
            else:
                logging.info("== New image starting to upload & deploy")
                filename = os.path.join(
                    self.working_path, "images",
                    image.originalFilename.replace("/", "-"))
                Cytomine.get_instance().upload_image(
                    self.host_upload, filename, storage.id,
                    self.id_mapping[remote_project.id])
                time.sleep(0.8)

            # SWITCH USER
            connect_as(self.super_admin, True)

        # Waiting for all images...
        n_new_images = -1
        new_images = None
        count = 0
        while n_new_images != len(
                remote_images) and count < len(remote_images) * 5:
            new_images = ImageInstanceCollection().fetch_with_filter(
                "project", self.id_mapping[remote_project.id])
            n_new_images = len(new_images)
            if count > 0:
                time.sleep(5)
            count = count + 1
        print("All images have been deployed. Fixing image-instances...")

        # Fix image instances meta-data:
        for new_image in new_images:
            remote_image = remote_images_dict[new_image.originalFilename].pop()
            if self.with_original_date:
                new_image.created = remote_image.created
                new_image.updated = remote_image.updated
            new_image.reviewStart = remote_image.reviewStart if hasattr(
                remote_image, 'reviewStart') else None
            new_image.reviewStop = remote_image.reviewStop if hasattr(
                remote_image, 'reviewStop') else None
            new_image.reviewUser = self.id_mapping[
                remote_image.reviewUser] if hasattr(
                    remote_image,
                    'reviewUser') and remote_image.reviewUser else None
            new_image.instanceFilename = remote_image.instanceFilename
            new_image.update()
            self.id_mapping[remote_image.id] = new_image.id
            self.id_mapping[remote_image.baseImage] = new_image.baseImage

            new_abstract = AbstractImage().fetch(new_image.baseImage)
            if self.with_original_date:
                new_abstract.created = remote_image.created
                new_abstract.updated = remote_image.updated
            if new_abstract.resolution is None:
                new_abstract.resolution = remote_image.resolution
            if new_abstract.magnification is None:
                new_abstract.magnification = remote_image.magnification
            new_abstract.update()

        print("All image-instances have been fixed.")

        # --------------------------------------------------------------------------------------------------------------
        logging.info("4/ Import user annotations")
        annots_json = [
            f for f in os.listdir(self.working_path) if f.endswith(".json")
            and f.startswith("user-annotation-collection")
        ]
        remote_annots = AnnotationCollection()
        if len(annots_json) > 0:
            for a in json.load(
                    open(os.path.join(self.working_path, annots_json[0]))):
                remote_annots.append(Annotation().populate(a))

        def _add_annotation(remote_annotation, id_mapping, with_original_date):
            if remote_annotation.project not in id_mapping.keys() \
                    or remote_annotation.image not in id_mapping.keys():
                return

            annotation = copy.copy(remote_annotation)
            annotation.project = id_mapping[remote_annotation.project]
            annotation.image = id_mapping[remote_annotation.image]
            annotation.user = id_mapping[remote_annotation.user]
            annotation.term = [id_mapping[t] for t in remote_annotation.term]
            if not with_original_date:
                annotation.created = None
                annotation.updated = None
            annotation.save()

        for user in [
                u for u in remote_users if "userannotation_creator" in u.roles
        ]:
            remote_annots_for_user = [
                a for a in remote_annots if a.user == user.id
            ]
            # SWITCH to annotation creator user
            connect_as(User().fetch(self.id_mapping[user.id]))
            Parallel(n_jobs=-1, backend="threading")(
                delayed(_add_annotation)(remote_annotation, self.id_mapping,
                                         self.with_original_date)
                for remote_annotation in remote_annots_for_user)

            # SWITCH back to admin
            connect_as(self.super_admin, True)

        # --------------------------------------------------------------------------------------------------------------
        logging.info(
            "5/ Import metadata (properties, attached files, description)")
        obj = Model()
        obj.id = -1
        obj.class_ = ""

        properties_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("properties")
        ]
        for property_json in properties_json:
            for remote_prop in json.load(
                    open(os.path.join(self.working_path, property_json))):
                prop = Property(obj).populate(remote_prop)
                prop.domainIdent = self.id_mapping[prop.domainIdent]
                prop.save()

        attached_files_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("attached-files")
        ]
        for attached_file_json in attached_files_json:
            for remote_af in json.load(
                    open(os.path.join(self.working_path, attached_file_json))):
                af = AttachedFile(obj).populate(remote_af)
                af.domainIdent = self.id_mapping[af.domainIdent]
                af.filename = os.path.join(self.working_path, "attached_files",
                                           remote_af.filename)
                af.save()

        descriptions_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("description")
        ]
        for description_json in descriptions_json:
            desc = Description(obj).populate(
                json.load(
                    open(os.path.join(self.working_path, description_json))))
            desc.domainIdent = self.id_mapping[desc.domainIdent]
            desc._object.class_ = desc.domainClassName
            desc._object.id = desc.domainIdent
            desc.save()
def run(cyto_job, parameters):
    logging.info("----- segmentation_prediction v%s -----", __version__)
    logging.info("Entering run(cyto_job=%s, parameters=%s)", cyto_job,
                 parameters)

    job = cyto_job.job
    project = cyto_job.project
    current_tile_annotation = None

    working_path = os.path.join("tmp", str(job.id))
    if not os.path.exists(working_path):
        logging.info("Creating annotation directory: %s", working_path)
        os.makedirs(working_path)

    try:
        # Initialization
        pyxit_target_width = parameters.pyxit_target_width
        pyxit_target_height = parameters.pyxit_target_height
        tile_size = parameters.cytomine_tile_size
        zoom = parameters.cytomine_zoom_level
        predictionstep = int(parameters.cytomine_predict_step)
        mindev = parameters.cytomine_tile_min_stddev
        maxmean = parameters.cytomine_tile_max_mean

        logging.info("Loading prediction model (local)")
        fp = open(parameters.pyxit_load_from, "r")
        logging.debug(fp)
        pickle.load(fp)  # classes => not needed
        pyxit = pickle.load(fp)
        pyxit.n_jobs = parameters.pyxit_nb_jobs  # multithread subwindows extraction in pyxit
        pyxit.base_estimator.n_jobs = parameters.pyxit_nb_jobs  # multithread tree propagation

        # loop for images in the project id TODO let user specify the images to process
        images = ImageInstanceCollection().fetch_with_filter(
            "project", project.id)
        nb_images = len(images)
        logging.info("# images in project: %d", nb_images)
        progress = 0
        progress_delta = 100 / nb_images

        # Go through all images
        for (i, image) in enumerate(images):
            image_str = "{} ({}/{})".format(image.instanceFilename, i + 1,
                                            nb_images)
            job.update(progress=progress,
                       statusComment="Analyzing image {}...".format(image_str))
            logging.debug(
                "Image id: %d width: %d height: %d resolution: %f magnification: %d filename: %s",
                image.id, image.width, image.height, image.resolution,
                image.magnification, image.filename)

            image.colorspace = "RGB"  # required for correct handling in CytomineReader

            # Create local object to access the remote whole slide
            logging.debug(
                "Creating connector to Slide Image from Cytomine server")
            whole_slide = WholeSlide(image)
            logging.debug("Wholeslide: %d x %d pixels", whole_slide.width,
                          whole_slide.height)

            # endx and endy allow to stop image analysis at a given x, y position  (for debugging)
            endx = parameters.cytomine_endx if parameters.cytomine_endx else whole_slide.width
            endy = parameters.cytomine_endy if parameters.cytomine_endy else whole_slide.height

            # initialize variables and tools for ROI
            nx = tile_size
            ny = tile_size

            local_tile_component = ([(0, 0), (0, ny), (nx, ny), (nx, 0),
                                     (0, 0)], [])

            # We can apply the segmentation model either in the whole slide (including background area), or only within
            # multiple ROIs (of a given term)
            # For example ROI could be generated first using a thresholding step to detect the tissue
            # Here we build a polygon union containing all roi_annotations locations (user or reviewed annotations) to
            # later match tile with roi masks
            if parameters.cytomine_roi_term:
                logging.debug("Retrieving ROI annotations")
                roi_annotations = AnnotationCollection(
                    image=image.id,
                    term=parameters.cytomine_roi_term,
                    showWKT=True,
                    showTerm=True,
                    reviewed=parameters.cytomine_reviewed_roi).fetch()

                roi_annotations_locations = []
                for roi_annotation in roi_annotations:
                    roi_annotations_locations.append(
                        shapely.wkt.loads(roi_annotation.location))
                roi_annotations_union = shapely.ops.unary_union(
                    roi_annotations_locations)

            else:  # no ROI used
                # We build a rectangular roi_mask corresponding to the whole image filled with ones
                logging.debug("Processing all tiles")
                roi_mask = np.ones((ny, nx), dtype=np.bool)

            # Initiate the reader object which browse the whole slide image with tiles of size tile_size
            logging.info("Initiating the Slide reader")
            reader = CytomineReader(
                whole_slide,
                window_position=Bounds(parameters.cytomine_startx,
                                       parameters.cytomine_starty, tile_size,
                                       tile_size),
                zoom=zoom,
                # overlap needed because the predictions at the borders of the tile are removed
                overlap=pyxit_target_width + 1)

            wsi = 0  # tile number

            logging.info("Starting browsing the image using tiles")
            while True:
                tile_component = reader.convert_to_real_coordinates(
                    [local_tile_component])[0]
                tile_polygon = shapely.geometry.Polygon(
                    tile_component[0], tile_component[1])

                # Get rasterized roi mask to match with this tile (if no ROI used, the roi_mask was built before and
                # corresponds to the whole image).
                if parameters.cytomine_roi_term:
                    roi_mask = rasterize_tile_roi_union(
                        nx, ny, tile_polygon, roi_annotations_union, reader)

                if np.count_nonzero(roi_mask) == 0:
                    logging.info(
                        "Tile %d is not included in any ROI, skipping processing",
                        wsi)

                else:
                    # Browse the whole slide image with catch exception
                    while True:
                        try:
                            reader.read()
                            break

                        except socket.timeout:
                            logging.error("Socket timeout for tile %d: %s",
                                          wsi, socket.timeout)
                            time.sleep(1)

                        except socket.error:
                            logging.error("Socket error for tile %d: %s", wsi,
                                          socket.error)
                            time.sleep(1)

                    tile = reader.data

                    # Get statistics about the current tile
                    logging.info("Computing tile %d statistics", wsi)
                    pos = reader.window_position
                    logging.debug(
                        "Tile zoom: %d, posx: %d, posy: %d, poswidth: %d, posheight: %d",
                        zoom, pos.x, pos.y, pos.width, pos.height)
                    tilemean = ImageStat.Stat(tile).mean
                    logging.debug("Tile mean pixel values: %d %d %d",
                                  tilemean[0], tilemean[1], tilemean[2])
                    tilestddev = ImageStat.Stat(tile).stddev
                    logging.debug("Tile stddev pixel values: %d %d %d",
                                  tilestddev[0], tilestddev[1], tilestddev[2])

                    # Criteria to determine if tile is empty, specific to this application
                    if ((tilestddev[0] < mindev and tilestddev[1] < mindev
                         and tilestddev[2] < mindev)
                            or (tilemean[0] > maxmean and tilemean[1] > maxmean
                                and tilemean[2] > maxmean)):
                        logging.info(
                            "Tile %d empty (filtered by min stddev or max mean)",
                            wsi)

                    else:
                        # This tile is not empty, we process it

                        # Add current tile annotation on server just for progress visualization purpose
                        current_tile_annotation = Annotation(
                            tile_polygon.wkt, image.id).save()

                        # Save the tile image locally
                        image_filename = "%s/%d-zoom_%d-tile_%d_x%d_y%d_w%d_h%d.png" \
                                         % (working_path, image.id, zoom, wsi, pos.x, pos.y, pos.width, pos.height)
                        tile.save(image_filename, "PNG")

                        logging.debug("Tile file: %s", image_filename)
                        logging.info("Extraction of subwindows in tile %d",
                                     wsi)
                        width, height = tile.size

                        half_subwindow_width = int(pyxit_target_width / 2)
                        half_subwindow_height = int(pyxit_target_height / 2)

                        # Coordinates of centers of extracted subwindows
                        y_roi = range(half_subwindow_height,
                                      height - half_subwindow_height,
                                      predictionstep)
                        x_roi = range(half_subwindow_width,
                                      width - half_subwindow_width,
                                      predictionstep)
                        logging.info("%d subwindows to extract",
                                     len(x_roi) * len(y_roi))

                        n_jobs = parameters.cytomine_nb_jobs
                        n_jobs, _, starts = _partition_images(
                            n_jobs, len(y_roi))

                        # Parallel extraction of subwindows in the current tile
                        all_data = Parallel(n_jobs=n_jobs)(
                            delayed(_parallel_crop_boxes)
                            (y_roi[starts[k]:starts[k +
                                                    1]], x_roi, image_filename,
                             half_subwindow_width, half_subwindow_height,
                             parameters.pyxit_colorspace)
                            for k in xrange(n_jobs))

                        # Reduce
                        boxes = np.vstack(box for box, _ in all_data)
                        _X = np.vstack([X for _, X in all_data])

                        logging.info("Prediction of subwindows for tile %d",
                                     wsi)
                        # Propagate subwindow feature vectors (X) into trees and get probabilities
                        _Y = pyxit.base_estimator.predict_proba(_X)

                        # Warning: we get output vectors for all classes for pixel (0,0) for all subwindows, then pixel
                        # predictions for pixel (0,1) for all subwindows, ... We do not get predictions window after
                        # window, but output after output
                        # => Y is a list of length m, where m = nb of pixels by subwindow ;
                        #    each element of the list is itself a list of size n, where n = nb of subwindows
                        #    for each subwindow, the probabilities for each class are given

                        # <optimized code
                        logging.info(
                            "Parallel construction of confidence map in current tile"
                        )
                        pixels = range(pyxit_target_width *
                                       pyxit_target_height)
                        n_jobs, _, starts = _partition_images(
                            n_jobs, len(pixels))

                        all_votes_class = Parallel(n_jobs=n_jobs)(
                            delayed(_parallel_confidence_map)
                            (pixels[starts[k]:starts[k + 1]],
                             _Y[starts[k]:starts[k + 1]], boxes, width, height,
                             pyxit.base_estimator.n_classes_[0],
                             pyxit_target_width, pyxit_target_height)
                            for k in xrange(n_jobs))

                        votes_class = all_votes_class[0]
                        for v in all_votes_class[1:]:
                            votes_class += v
                        # optimized code>

                        logging.info("Delete borders")
                        # Delete predictions at borders
                        for k in xrange(0, width):
                            for j in xrange(0, half_subwindow_height):
                                votes_class[j, k, :] = [1, 0]
                            for j in xrange(height - half_subwindow_height,
                                            height):
                                votes_class[j, k, :] = [1, 0]

                        for j in xrange(0, height):
                            for k in xrange(0, half_subwindow_width):
                                votes_class[j, k, :] = [1, 0]
                            for k in xrange(width - half_subwindow_width,
                                            width):
                                votes_class[j, k, :] = [1, 0]

                        votes = np.argmax(votes_class, axis=2) * 255

                        # only predict in roi region based on roi mask
                        votes[np.logical_not(roi_mask)] = 0

                        # process mask
                        votes = process_mask(votes)
                        votes = votes.astype(np.uint8)

                        # Save of confidence map locally
                        logging.info("Creating output tile file locally")
                        output = Image.fromarray(votes)
                        outputfilename = "%s/%d-zoom_%d-tile_%d_xxOUTPUT-%dx%d.png" \
                                         % (working_path, image.id, zoom, wsi, pyxit_target_width, pyxit_target_height)
                        output.save(outputfilename, "PNG")
                        logging.debug("Tile OUTPUT file: %s", outputfilename)

                        # Convert and transfer annotations of current tile
                        logging.info("Find components")
                        components = ObjectFinder(votes).find_components()
                        components = reader.convert_to_real_coordinates(
                            components)
                        polygons = [
                            Polygon(component[0], component[1])
                            for component in components
                        ]

                        logging.info("Uploading annotations...")
                        logging.debug("Number of polygons: %d" % len(polygons))
                        start = time.time()

                        for poly in polygons:
                            geometry = poly.wkt

                            if not poly.is_valid:
                                logging.warning(
                                    "Invalid geometry, try to correct it with buffer"
                                )
                                logging.debug(
                                    "Geometry prior to modification: %s",
                                    geometry)
                                new_poly = poly.buffer(0)
                                if not new_poly.is_valid:
                                    logging.error(
                                        "Failed to make valid geometry, skipping this polygon"
                                    )
                                    continue
                                geometry = new_poly.wkt

                            logging.debug("Uploading geometry %s", geometry)

                            startsingle = time.time()
                            while True:
                                try:
                                    # TODO: save collection of annotations
                                    annot = Annotation(
                                        geometry, image.id,
                                        [parameters.cytomine_predict_term
                                         ]).save()
                                    if not annot:
                                        logging.error(
                                            "Annotation could not be saved ; location = %s",
                                            geometry)
                                    break
                                except socket.timeout, socket.error:
                                    logging.error(
                                        "socket timeout/error add_annotation")
                                    time.sleep(1)

                            endsingle = time.time()
                            logging.debug(
                                "Elapsed time for adding single annotation: %d",
                                endsingle - startsingle)

                        # current time
                        end = time.time()
                        logging.debug(
                            "Elapsed time for adding all annotations: %d",
                            end - start)

                        # Delete current tile annotation (progress visualization)
                        current_tile_annotation.delete()

                wsi += 1

                if not reader.next() or (reader.window_position.x > endx
                                         and reader.window_position.y > endy):
                    break  # end of browsing the whole slide

            # Postprocessing to remove small/large annotations according to min/max area
            if parameters.cytomine_postproc:
                logging.info("Post-processing before union...")
                job.update(progress=progress + progress_delta / 4,
                           statusComment="Post-processing image {}...".format(
                               image_str))
                while True:
                    try:
                        annotations = AnnotationCollection(id_user=job.userJob,
                                                           id_image=image.id,
                                                           showGIS=True)
                        break
                    except socket.timeout, socket.error:
                        logging.error(
                            "Socket timeout/error when fetching annotations")
                        time.sleep(1)

                # remove/edit useless annotations
                start = time.time()
                for annotation in annotations:
                    if (annotation.area == 0
                            or annotation.area < parameters.cytomine_min_size
                            or annotation.area > parameters.cytomine_max_size):
                        annotation.delete()
                    else:
                        logging.debug("Keeping annotation %d", annotation.id)

                end = time.time()
                logging.debug(
                    "Elapsed time for post-processing all annotations: %d" %
                    (end - start))

            # Segmentation model was applied on individual tiles. We need to merge geometries generated from each tile.
            # We use a groovy/JTS script that downloads annotation geometries and perform union locally to relieve the
            # Cytomine server
            if parameters.cytomine_union:
                logging.info("Union of polygons for image %s",
                             image.instanceFilename)
                job.update(
                    progress=progress + progress_delta / 3,
                    statusComment="Union of polygons in image {}...".format(
                        image_str))
                start = time.time()
                union_command = (
                    "groovy -cp \"lib/jars/*\" lib/union4.groovy " +
                    "%s %s %s %d %d %d %d %d %d %d %d %d %d" %
                    (cyto_job._base_url(False), parameters.publicKey,
                     parameters.privateKey, image.id, job.userJob,
                     parameters.cytomine_predict_term,
                     parameters.cytomine_union_min_length,
                     parameters.cytomine_union_bufferoverlap,
                     parameters.cytomine_union_min_point_for_simplify,
                     parameters.cytomine_union_min_point,
                     parameters.cytomine_union_max_point,
                     parameters.cytomine_union_nb_zones_width,
                     parameters.cytomine_union_nb_zones_height))
                logging.info("Union command: %s", union_command)
                os.system(union_command)
                end = time.time()
                logging.info("Elapsed time union: %d s", end - start)

            # Perform classification of detected geometries using a classification model (pkl)
            if parameters.pyxit_post_classification:
                logging.info("Post classification of all candidates")
                job.update(
                    progress=progress + progress_delta * 2 / 3,
                    statusComment="Post-classification in image {}...".format(
                        image_str))

                # Retrieve locally annotations from Cytomine core produced by the segmentation job as candidates
                candidate_annotations = AnnotationCollection(
                    user=job.userJob,
                    image=image.id,
                    showWKT=True,
                    showMeta=True).fetch()

                folder_name = "%s/crops-candidates-%d/zoom-%d/" % (
                    working_path, image.id, zoom)
                if not os.path.exists(folder_name):
                    os.makedirs(folder_name)

                dest_pattern = os.path.join(folder_name, "{id}.png")
                for annotation in candidate_annotations:
                    annotation.dump(dest_pattern, mask=True, alpha=True)
                    # np_image = cv2.imread(annotation.filename, -1)
                    # if np_image is not None:
                    #     alpha = np.array(np_image[:, :, 3])
                    #     image = np.array(np_image[:, :, 0:3])
                    # image[alpha == 0] = (255,255,255)  # to replace surrounding by white
                    # cv2.imwrite(annotation.filename, image)

                logging.debug("Building attributes from %s", folder_name)
                # Extract subwindows from all candidates
                x, y = build_from_dir(folder_name)
                post_fp = open(parameters.pyxit_post_classification_save_to,
                               "r")
                classes = pickle.load(post_fp)
                pyxit = pickle.load(post_fp)
                logging.debug(pyxit)

                # pyxit parameters are in the model file
                y_proba = pyxit.predict_proba(x)
                y_predict = classes.take(np.argmax(y_proba, axis=1), axis=0)
                y_rate = np.max(y_proba, axis=1)

                # We classify each candidate annotation and keep only those predicted as cytomine_predict_term
                for annotation in candidate_annotations:
                    j = np.where(x == annotation.filename)[0][0]
                    new_term = int(y_predict[j])
                    accepted = (new_term == parameters.cytomine_predict_term)
                    logging.debug(
                        "Annotation %d %s during post-classification (class: %d proba: %d)",
                        annotation.id, "accepted" if accepted else "rejected",
                        int(y_predict[j]), y_rate[j])

                    if not accepted:
                        AlgoAnnotationTerm(
                            annotation.id,
                            parameters.cytomine_predict_term).delete()
                        AlgoAnnotationTerm(annotation.id, new_term).save()

                logging.info("End of post-classification")
                # ...

            # Perform stats (counting) in roi area
            if parameters.cytomine_count and parameters.cytomine_roi_term:
                logging.info("Compute statistics")
                # Count number of annotations in roi area
                # Get Rois
                roi_annotations = AnnotationCollection(
                    image=image.id,
                    term=parameters.cytomine_roi_term,
                    showGIS=True).fetch()

                # Count included annotations (term = predict_term) in each ROI
                for roi_annotation in roi_annotations:
                    included_annotations = AnnotationCollection(
                        image=image.id,
                        user=job.userJob,
                        bboxAnnotation=roi_annotation.id).fetch()
                    logging.info(
                        "Stats of image %s: %d annotations included in ROI %d (%d %s)",
                        image.instanceFilename, len(included_annotations),
                        roi_annotation.id, roi_annotation.area,
                        roi_annotation.areaUnit)

            logging.info("Finished processing image %s",
                         image.instanceFilename)
            progress += progress_delta
コード例 #22
0
def extract_annotations_objtrk(out_path, in_image, project_id, track_prefix,
                               **kwargs):
    """
    out_path: str
    in_image: BiaflowsCytomineInput
    project_id: int
    track_prefix: str
    kwargs: dict
    """
    image = in_image.object
    path = os.path.join(out_path, in_image.filename)
    data, dim_order, _ = imread(path, return_order=True)
    ndim = get_dimensionality(dim_order)

    if ndim < 3:
        raise ValueError(
            "Object tracking should be at least 3D (only {} spatial dimension(s) found)"
            .format(ndim))

    tracks = TrackCollection()
    annotations = AnnotationCollection()

    if ndim == 3:
        slices = mask_to_objects_3d(data, time=True, assume_unique_labels=True)
        time_to_image = get_depth_to_slice(image)

        for slice_group in slices:
            curr_tracks, curr_annots = create_tracking_from_slice_group(
                image,
                slice_group,
                slice2point=lambda _slice: _slice.polygon.centroid,
                depth2slice=time_to_image,
                id_project=project_id,
                upload_object=True,
                upload_group_id=True,
                track_prefix=track_prefix + "-object")
            tracks.extend(curr_tracks)
            annotations.extend(curr_annots)
    elif ndim == 4:
        objects = mask_to_objects_3dt(mask=data)
        depths_to_image = get_depth_to_slice(image, depth=("time", "depth"))
        # TODO add tracking lines one way or another
        for time_steps in objects:
            label = time_steps[0][0].label
            track = Track(name="{}-{}".format(track_prefix, label),
                          id_image=image.id,
                          color=DEFAULT_COLOR).save()
            Property(track, key="label", value=label).save()
            annotations.extend([
                Annotation(location=change_referential(
                    p=slice.polygon, height=image.height).wkt,
                           id_image=image.id,
                           id_project=project_id,
                           id_tracks=[track.id],
                           slice=depths_to_image[(slice.time, slice.depth)].id)
                for slices in time_steps for slice in slices
            ])

            tracks.append(track)

    else:
        raise ValueError(
            "Annotation extraction for object tracking does not support masks with more than 4 dims..."
        )

    return tracks, annotations
コード例 #23
0
                path_to_landmarks = os.path.join(params.landmarks, tissue,
                                                 scale, f"{original_name}.csv")

                with open(path_to_landmarks, 'r') as csvfile:

                    f_csv = csv.reader(csvfile,
                                       delimiter=str(','),
                                       quotechar=str('|'))
                    headers = next(f_csv)
                    annotations = AnnotationCollection()

                    for row_landmarks in f_csv:

                        id_landmark = int(row_landmarks[0])

                        # due to Cytomine
                        point = Point(float(row_landmarks[1]),
                                      height - float(row_landmarks[2]))

                        a = Annotation(location=point.wkt,
                                       id_image=image_id,
                                       id_project=params.id_project)
                        a.property = [{
                            "key": "ANNOTATION_GROUP_ID",
                            "value": id_landmark
                        }]
                        annotations.append(a)

                    annotations.save()
コード例 #24
0
def main(argv):
    with CytomineJob.from_cli(argv) as conn:
        conn.job.update(status=Job.RUNNING, progress=0, statusComment='Intialization...')
        base_path = "{}".format(os.getenv('HOME'))  # Mandatory for Singularity
        working_path = os.path.join(base_path, str(conn.job.id))

        # Loading models from models directory
        with tf.device('/cpu:0'):
            h_model = load_model('/models/head_dice_sm_9976.hdf5', compile=False)  # head model
            h_model.compile(optimizer='adam', loss=dice_coef_loss,
                            metrics=['accuracy'])
            op_model = load_model('/models/op_ce_sm_9991.hdf5', compile=True)  # operculum model
            #op_model.compile(optimizer='adam', loss=dice_coef_loss,
                            #metrics=['accuracy'])

        # Select images to process
        images = ImageInstanceCollection().fetch_with_filter('project', conn.parameters.cytomine_id_project)
        if conn.parameters.cytomine_id_images != 'all':  # select only given image instances = [image for image in image_instances if image.id in id_list]
            images = [_ for _ in images if _.id
                      in map(lambda x: int(x.strip()),
                             conn.parameters.cytomine_id_images.split(','))]
        images_id = [image.id for image in images]

        # Download selected images into 'working_directory'
        img_path = os.path.join(working_path, 'images')
        # if not os.path.exists(img_path):
        os.makedirs(img_path)

        for image in conn.monitor(
                images, start=2, end=50, period=0.1,
                prefix='Downloading images into working directory...'):
            fname, fext = os.path.splitext(image.filename)
            if image.download(dest_pattern=os.path.join(
                    img_path,
                    "{}{}".format(image.id, fext))) is not True:  # images are downloaded with image_ids as names
                print('Failed to download image {}'.format(image.filename))

        # Prepare image file paths from image directory for execution
        conn.job.update(progress=50,
                        statusComment="Preparing data for execution..")
        image_paths = glob.glob(os.path.join(img_path, '*'))
        std_size = (1032,1376)   #maximum size that the model can handle
        model_size = 256
        for i in range(len(image_paths)):

            org_img = Image.open(image_paths[i]) 
            
            filename = os.path.basename(image_paths[i])
            fname, fext = os.path.splitext(filename)
            fname = int(fname)
            org_img = img_to_array(org_img)
            img = org_img.copy()
            org_size = org_img.shape[:2]
            asp_ratio = org_size[0] / org_size[1]  #for cropping and upscaling to original size
            if org_size[1] > std_size[1]:
                img = tf.image.resize(img, (675,900), method='nearest')
                img = tf.image.resize_with_crop_or_pad(img, std_size[0],std_size[1])
                h_mask = predict_mask(img, h_model,model_size)
                h_mask = crop_to_aspect(h_mask, asp_ratio)
                h_mask = tf.image.resize(h_mask, std_size, method='nearest')
                h_up_mask = tf.image.resize_with_crop_or_pad(h_mask, 675,900)
                h_up_mask = tf.image.resize(h_up_mask, org_size, method='nearest')
                h_up_mask = np.asarray(h_up_mask).astype(np.uint8)
                _, h_up_mask = cv.threshold(h_up_mask, 0.001, 255, 0)
                kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (17, 17))
                h_up_mask = cv.morphologyEx(h_up_mask, cv.MORPH_OPEN, kernel, iterations=5)
                h_up_mask = cv.morphologyEx(h_up_mask, cv.MORPH_CLOSE, kernel, iterations=1)
                #h_up_mask = cv.erode(h_up_mask ,kernel,iterations = 3)
                #h_up_mask = cv.dilate(h_up_mask ,kernel,iterations = 3)
                h_up_mask = np.expand_dims(h_up_mask, axis=-1)
                
            else:
                h_mask = predict_mask(img, h_model, model_size)
                h_mask = crop_to_aspect(h_mask, asp_ratio)
                h_up_mask = tf.image.resize(h_mask, org_size, method='nearest')
                h_up_mask = np.asarray(h_up_mask).astype(np.uint8)
                _, h_up_mask = cv.threshold(h_up_mask, 0.001, 255, 0)
                kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5))
                #kernel = np.ones((9,9),np.uint8)
                h_up_mask = cv.morphologyEx(h_up_mask, cv.MORPH_CLOSE, kernel, iterations=3)
                h_up_mask = np.expand_dims(h_up_mask, axis=-1)
        
            box = bb_pts(h_up_mask)  # bounding box points for operculum (x_min, y_min, x_max, y_max)
            w = box[0]
            h = box[1]
            tr_h = box[3] - box[1]  # target height
            tr_w = box[2] - box[0]  # target width
            crop_op_img = tf.image.crop_to_bounding_box(org_img, h, w, tr_h, tr_w)

            op_asp_ratio = crop_op_img.shape[0] / crop_op_img.shape[1]
            op_mask = predict_mask(crop_op_img, op_model, model_size)
            op_mask = crop_to_aspect(op_mask, op_asp_ratio)
            op_mask = tf.image.resize(op_mask, (crop_op_img.shape[0], crop_op_img.shape[1]), method='nearest')
            op_up_mask = np.zeros((org_img.shape[0],org_img.shape[1],1)).astype(np.uint8) # array of zeros to be filled with op mask
            op_up_mask[box[1]:box[3], box[0]:box[2]] = op_mask # paste op_mask in org_img (reversing the crop operation)
            #op_up_mask = tf.image.resize_with_crop_or_pad(op_mask, org_size[0], org_size[1])
        

            h_polygon = h_make_polygon(h_up_mask)
            op_polygon = o_make_polygon(op_up_mask)

            conn.job.update(
                status=Job.RUNNING, progress=95,
                statusComment="Uploading new annotations to Cytomine server..")

            annotations = AnnotationCollection()
            annotations.append(Annotation(location=h_polygon[0].wkt, id_image=fname, id_terms=143971108,
                                          id_project=conn.parameters.cytomine_id_project))
            annotations.append(Annotation(location=op_polygon[0].wkt, id_image=fname, id_term=143971084,
                                          id_project=conn.parameters.cytomine_id_project))
            annotations.save()

        conn.job.update(status=Job.TERMINATED, status_comment="Finish", progress=100)  # 524787186
コード例 #25
0
ファイル: run.py プロジェクト: zhang-free/S_Test
def main(argv):
    print(argv)
    with CytomineJob.from_cli(argv) as cj:

        images = ImageInstanceCollection().fetch_with_filter("project", cj.parameters.cytomine_id_project)
        for image in cj.monitor(images, prefix="Running detection on image", period=0.1):
            # Resize image if needed
            resize_ratio = max(image.width, image.height) / cj.parameters.max_image_size
            if resize_ratio < 1:
                resize_ratio = 1

            resized_width = int(image.width / resize_ratio)
            resized_height = int(image.height / resize_ratio)

            image.dump(dest_pattern="/tmp/{id}.jpg", max_size=max(resized_width, resized_height), bits=image.bitDepth)
            img = cv2.imread(image.filename, cv2.IMREAD_GRAYSCALE)

            thresholded_img = cv2.adaptiveThreshold(img, 2**image.bitDepth, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                                    cv2.THRESH_BINARY, cj.parameters.threshold_blocksize,
                                                    cj.parameters.threshold_constant)

            kernel = np.ones((5, 5), np.uint8)
            eroded_img = cv2.erode(thresholded_img, kernel, iterations=cj.parameters.erode_iterations)
            dilated_img = cv2.dilate(eroded_img, kernel, iterations=cj.parameters.dilate_iterations)

            extension = 10
            extended_img = cv2.copyMakeBorder(dilated_img, extension, extension, extension, extension,
                                              cv2.BORDER_CONSTANT, value=2**image.bitDepth)

            components = find_components(extended_img)
            zoom_factor = image.width / float(resized_width)
            for i, component in enumerate(components):
                converted = []
                for point in component[0]:
                    x = int((point[0] - extension) * zoom_factor)
                    y = int(image.height - ((point[1] - extension) * zoom_factor))
                    converted.append((x, y))

                components[i] = Polygon(converted)

            # Find largest component (whole image)
            largest = max(components, key=attrgetter('area'))
            components.remove(largest)

            # Only keep components greater than 5% of whole image
            min_area = int(0.05 * image.width * image.height)

            annotations = AnnotationCollection()
            for component in components:
                if component.area > min_area:
                    annotations.append(Annotation(location=component.wkt, id_image=image.id,
                                                  id_terms=[cj.parameters.cytomine_id_predicted_term],
                                                  id_project=cj.parameters.cytomine_id_project))

                    if len(annotations) % 100 == 0:
                        annotations.save()
                        annotations = AnnotationCollection()

            annotations.save()

        cj.job.update(statusComment="Finished.")
            filename = os.path.basename(image_paths[i])
            fname, fext = os.path.splitext(filename)
            fname  = int(fname)
            org_size = img.shape[:2]

            h_mask = predict_mask(img, h_model)
            size = h_mask.shape[:2]
            cropped_image = cropped(h_mask, img)

            op_mask = predict_mask(cropped_image, op_model)
            op_upsize = cropped_image.shape[:2]

            op_mask = tf.image.resize(op_mask, op_upsize, method='bilinear')
            op_mask = op_pad_up(h_mask, op_mask, size, org_size)
            h_mask = tf.image.resize(h_mask, org_size, method='bilinear')

            h_polygon = make_polygon(h_mask)
            op_polygon = make_polygon(op_mask)

         #   image_id = next((x.id for x in images if x.id == fname), None)
            annotations = AnnotationCollection()
            annotations.append(
                Annotation(location=h_polygon[0].wkt, id_image=fname, id_terms=143971108, id_project=args.p))
            annotations.append(
                Annotation(location=op_polygon[0].wkt, id_image=fname, id_term=143971084, id_project=args.p))
            annotations.save()

        # project 142037659

    # =============================================================================
コード例 #27
0
ファイル: run_as_job.py プロジェクト: urubens/Cytomine-YOLO
def run(argv):
    # CytomineJob.from_cli() uses the descriptor.json to automatically create the ArgumentParser
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(statusComment="Initialization...")
        id_project = cj.parameters.cytomine_id_project
        id_terms = cj.parameters.cytomine_id_terms
        id_tags_for_images = cj.parameters.cytomine_id_tags_for_images
        working_path = cj.parameters.working_path

        terms = TermCollection().fetch_with_filter("project", id_project)
        if id_terms:
            filtered_term_ids = [
                int(id_term) for id_term in id_terms.split(',')
            ]
            filtered_terms = TermCollection()
            for term in terms:
                if term.id in filtered_term_ids:
                    filtered_terms.append(term)
        else:
            filtered_terms = terms

        # Associate YOLO class index to Cytomine term
        classes_filename = os.path.join(working_path, CLASSES_FILENAME)
        with open(classes_filename, 'r') as f:
            classes = f.readlines()
            indexes_terms = {}
            for i, _class in enumerate(classes):
                _class = _class.strip()
                indexes_terms[i] = filtered_terms.find_by_attribute(
                    "name", _class)

        cj.job.update(statusComment="Open model...", progress=1)
        # TODO...

        cj.job.update(statusComment="Predictions...", progress=5)
        images = ImageInstanceCollection(
            tags=id_tags_for_images).fetch_with_filter("project", id_project)
        for image in images:
            print("Prediction for image {}".format(image.instanceFilename))
            # TODO: get predictions from YOLO
            # TODO: I suppose here for the sake of the demo that the output format is the same as input, which is not sure
            # <class> <x_center> <y_center> <width> <height> <proba>
            sample_predictions = [(0, 0.604000000000, 0.493846153846,
                                   0.105600000000, 0.461538461538, 0.9),
                                  (0, 0.409200000000, 0.606153846154,
                                   0.050400000000, 0.095384615385, 0.5)]

            ac = AnnotationCollection()
            for pred in sample_predictions:
                _class, xcenter, ycenter, width, height, proba = pred
                term_ids = [indexes_terms[_class].id
                            ] if _class in indexes_terms.keys() else None
                if term_ids is None:
                    print("No term found for class {}".format(_class))
                geometry = yolo_to_geometry((xcenter, ycenter, width, height),
                                            image.width, image.height)
                properties = [{"key": "probability", "value": proba}]
                ac.append(
                    Annotation(id_image=image.id,
                               id_terms=term_ids,
                               location=geometry.wkt,
                               properties=properties))

            ac.save()

        cj.job.update(statusComment="Finished", progress=100)
コード例 #28
0
def create_tracking_from_slice_group(image,
                                     slices,
                                     slice2point,
                                     depth2slice,
                                     id_project,
                                     upload_object=False,
                                     track_prefix="object",
                                     label=None,
                                     upload_group_id=False):
    """Create a set of tracks and annotations to represent a tracked element. A trackline is created to reflect the
    movement of the object in the image. Optionally the object's polygon can also be uploaded.

    Parameters
    ----------
    image: ImageInstance
        An ImageInstance
    slices: list of AnnotationSlice
        A list of AnnotationSlice of one object
    slice2point: callable
        A function that transform a slice into its representative point to be used for generating the tracking line
    depth2slice: dict
        Maps time step with corresponding SliceInstance
    id_project: int
        Project identifier
    upload_object: bool
        True if the object should be uploaded as well (the trackline is uploaded in any case)
    track_prefix: str
        A prefix for the track name
    label: int (default: None)
        The label of the tracked object
    upload_group_id: bool
        True for uploading the object label with the track

    Returns
    -------
    saved_tracks: TrackCollection
        The saved track objects
    annotations: AnnotationCollection
        The annotations associated with the traped. The collection is NOT saved.
    """
    if label is None and len(slices) > 0:
        label = slices[0].label

    # create tracks
    tracks = TrackCollection()
    object_track = Track(
        "{}-{}".format(track_prefix, label),
        image.id,
        color=None if upload_group_id else DEFAULT_COLOR).save()
    trackline_track = Track(
        "{}-{}-trackline".format(track_prefix, label),
        image.id,
        color=None if upload_group_id else DEFAULT_COLOR).save()
    tracks.extend([object_track, trackline_track])

    if upload_group_id:
        Property(object_track, key="label", value=int(label)).save()
        Property(trackline_track, key="label", value=int(label)).save()

    # create actual annotations
    annotations = AnnotationCollection()
    sorted_group = sorted(slices, key=lambda s: s.time)
    prev_line = []
    for _slice in sorted_group:
        point = slice2point(_slice)
        if point.is_empty:  # skip empty points
            continue
        if len(prev_line) == 0 or not prev_line[-1].equals(point):
            prev_line.append(point)

        if len(prev_line) == 1:
            polygon = slice2point(_slice)
        else:
            polygon = LineString(prev_line)

        depth = _slice.time if _slice.depth is None else _slice.depth
        annotations.append(
            Annotation(location=change_referential(polygon, image.height).wkt,
                       id_image=image.id,
                       slice=depth2slice[depth].id,
                       id_project=id_project,
                       id_tracks=[trackline_track.id]))

        if upload_object:
            annotations.append(
                Annotation(location=change_referential(_slice.polygon,
                                                       image.height).wkt,
                           id_image=image.id,
                           slice=depth2slice[depth].id,
                           id_project=id_project,
                           id_tracks=[object_track.id]))

    return tracks, annotations
コード例 #29
0
        annotations = AnnotationCollection()
        annotations.project = params.id_project
        annotations.showWKT = True
        annotations.showMeta = True
        annotations.showGIS = True
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            print(
                "ID: {} | Image: {} | Project: {} | Term: {} | User: {} | Area: {} | Perimeter: {} | WKT: {}"
                .format(annotation.id, annotation.image, annotation.project,
                        annotation.term, annotation.user, annotation.area,
                        annotation.perimeter, annotation.location))

            annot = Annotation().fetch(annotation.id)
            # Toutes les proprietes (collection) de l annotation
            properties = PropertyCollection(annot).fetch()
            # Une propriété avec une clé spécifique de l'annotation
            propert = Property(annot).fetch(key="ANNOTATION_GROUP_ID")

            image_id = str(annotation.image)

            if image_id in id2info:

                tissue, dye = id2info[image_id]

                path_patch = os.path.join(params.download_path,
                                          str(params.size), tissue, dye,
                                          str(propert.value) + ".jpg")
コード例 #30
0
ファイル: postprocessing.py プロジェクト: delimz/unet
                    tmpres.append(
                        predshape[i].intersection(trueshape[i]).area /
                        predshape[i].union(trueshape[i]).area)
                else:
                    tmpres.append(0.0)
            if np.mean(tmpres) > np.mean(maxres):
                maxres = tmpres
                maxshape = predshape
                maxtrue = trueshape
            else:
                print('nope')
        results.append(maxres)
        shapes.append(maxshape)
        trues.append(maxtrue)

if upload:
    with Cytomine(host=host, public_key=public_key,
                  private_key=private_key) as cytomine:
        num = 0
        for test_img in test_imgs:
            for index in range(len(res[slice_term, test_img])):
                for i in range(len(terms)):
                    new_annotation = Annotation(location=shapes[num][i].wkt,
                                                id_image=test_img,
                                                id_terms=[terms[i]],
                                                id_project=id_project)
                    new_annotation.save()
                num += 1

print(results)