def create_track_from_slices(image,
                             slices,
                             depth2slice,
                             id_project,
                             track_prefix="object",
                             label=None,
                             upload_group_id=False,
                             depth="time"):
    """Create an annotation track from a list of AnnotationSlice
    Parameters
    ----------
    image: ImageInstance
        The image instance in which the track is added
    slices: iterable (of AnnotationSlice)
        The polygon slices of the objects to draw
    depth2slice: dict
        A dictionary mapping the depths of the image instance with their respective SliceInstance
    id_project: int
        Project identifier
    track_prefix: str (default: "object")
        A prefix for the track name
    label: int|str (default: None)
        A label for the track
    upload_group_id: bool
        True to upload the group identifier
    depth: str
        Which depth field to read in the AnnotationSlice if both are present. One of {'time', 'depth'}.

    Returns
    -------
    saved_tracks: TrackCollection
        The saved track objects
    annotations: AnnotationCollection
        The annotations associated with the traped. The collection is NOT saved.
    """
    if label is None and len(slices) > 0:
        label = slices[0].label
    track = Track(name="{}-{}".format(track_prefix, label),
                  id_image=image.id,
                  color=None if upload_group_id else DEFAULT_COLOR).save()

    if upload_group_id:
        Property(track, key="label", value=label).save()

    collection = AnnotationCollection()
    for _slice in slices:
        collection.append(
            Annotation(
                location=change_referential(p=_slice.polygon,
                                            height=image.height).wkt,
                id_image=image.id,
                id_project=id_project,
                id_tracks=[track.id],
                slice=depth2slice[_slice.depth if _slice.time is None
                                  or depth == "depth" else _slice.time].id))
    return track, collection
def extract_tiled_annotations(in_tiles, out_path, nj, label_merging=False):
    """
    in_images: iterable
        List of BiaflowsTile
    out_path: str
        Path of output tiles
    nj: BiaflowsJob
        A BIAflows job object
    label_merging: bool
        True for merging only polygons having the same label. False for merging based on geometry only
    """
    # regroup tiles by original images
    grouped_tiles = defaultdict(list)
    for in_tile in in_tiles:
        grouped_tiles[in_tile.in_image.original_filename].append(in_tile)

    default_tile_builder = DefaultTileBuilder()
    annotations = AnnotationCollection()
    for tiles in grouped_tiles.values():
        # recreate the topology
        in_image = tiles[0].in_image
        topology = BiaflowsSldcImage(in_image, is_2d=True).tile_topology(
            default_tile_builder,
            max_width=nj.flags["tile_width"],
            max_height=nj.flags["tile_height"],
            overlap=nj.flags["tile_overlap"])

        # extract polygons for each tile
        ids, polygons, labels = list(), list(), list()
        label = -1
        for tile in tiles:
            out_tile_path = os.path.join(out_path, tile.filename)
            slices = mask_to_objects_2d(imread(out_tile_path),
                                        offset=tile.tile.abs_offset[::-1])
            ids.append(tile.tile.identifier)
            polygons.append([s.polygon for s in slices])
            labels.append([s.label for s in slices])
            # save label for use after merging
            if len(slices) > 0:
                label = slices[0]

        # merge
        merged = SemanticMerger(tolerance=1).merge(
            ids, polygons, topology, labels=labels if label_merging else None)
        if label_merging:
            merged = merged[0]
        annotations.extend([
            create_annotation_from_slice(
                _slice=AnnotationSlice(p, label),
                id_image=in_image.object.id,
                image_height=in_image.object.height,
                id_project=nj.project.id,
            ) for p in merged
        ])
    return annotations
示例#3
0
def extract_images_or_rois(parameters):
    id_annotations = parse_domain_list(parameters.cytomine_roi_annotations)
    # if ROI annotations are provided
    if len(id_annotations) > 0:
        image_cache = dict()  # maps ImageInstance id with CytomineSlide object
        zones = list()
        for id_annot in id_annotations:
            annotation = Annotation().fetch(id_annot)
            if annotation.image not in image_cache:
                image_cache[annotation.image] = CytomineSlide(
                    annotation.image, parameters.cytomine_zoom_level)
            window = get_iip_window_from_annotation(
                image_cache[annotation.image], annotation,
                parameters.cytomine_zoom_level)
            zones.append(window)
        return zones

    # work at image level or ROIs by term
    images = ImageInstanceCollection()
    if parameters.cytomine_id_images is not None:
        id_images = parse_domain_list(parameters.cytomine_id_images)
        images.extend([ImageInstance().fetch(_id) for _id in id_images])
    else:
        images = images.fetch_with_filter("project",
                                          parameters.cytomine_id_project)

    slides = [
        CytomineSlide(img, parameters.cytomine_zoom_level) for img in images
    ]
    if parameters.cytomine_id_roi_term is None:
        return slides

    # fetch ROI annotations
    collection = AnnotationCollection(
        terms=[parameters.cytomine_id_roi_term],
        reviewed=parameters.cytomine_reviewed_roi,
        showWKT=True)
    collection.fetch_with_filter(project=parameters.cytomine_id_project)
    slides_map = {slide.image_instance.id: slide for slide in slides}
    regions = list()
    for annotation in collection:
        if annotation.image not in slides_map:
            continue
        slide = slides_map[annotation.image]
        regions.append(
            get_iip_window_from_annotation(slide, annotation,
                                           parameters.cytomine_zoom_level))

    return regions
示例#4
0
def extract_images_or_rois(parameters):
    # work at image level or ROIs by term
    images = ImageInstanceCollection()
    if parameters.cytomine_id_images is not None:
        id_images = parse_domain_list(parameters.cytomine_id_images)
        images.extend([ImageInstance().fetch(_id) for _id in id_images])
    else:
        images = images.fetch_with_filter("project",
                                          parameters.cytomine_id_project)

    slides = [
        CytomineSlide(img, parameters.cytomine_zoom_level) for img in images
    ]
    if parameters.cytomine_id_roi_term is None:
        return slides

    # fetch ROI annotations, all users
    collection = AnnotationCollection(
        terms=[parameters.cytomine_id_roi_term],
        reviewed=parameters.cytomine_reviewed_roi,
        project=parameters.cytomine_id_project,
        showWKT=True,
        includeAlgo=True).fetch()

    slides_map = {slide.image_instance.id: slide for slide in slides}
    regions = list()
    for annotation in collection:
        if annotation.image not in slides_map:
            continue
        slide = slides_map[annotation.image]
        regions.append(
            get_iip_window_from_annotation(slide, annotation,
                                           parameters.cytomine_zoom_level))

    return regions
def extract_annotations_prttrk(out_path, in_image, project_id, track_prefix,
                               **kwargs):
    """
    Parameters:
    -----------
    out_path: str
    in_image: BiaflowsCytomineInput
    project_id: int
    name_prefix: str
    kwargs: dict
    """

    image = in_image.object
    path = os.path.join(out_path, in_image.filename)
    data, dim_order, _ = imread(path, return_order=True)
    ndim = get_dimensionality(dim_order)

    if ndim != 3:
        raise ValueError(
            "Annotation extraction for object tracking does not support masks with more than 3 dims..."
        )

    slices = mask_to_points_3d(data, time=True, assume_unique_labels=True)
    time_to_image = get_depth_to_slice(image)

    tracks = TrackCollection()
    annotations = AnnotationCollection()
    for slice_group in slices:
        curr_tracks, curr_annots = create_tracking_from_slice_group(
            image,
            slice_group,
            slice2point=lambda _slice: _slice.polygon,
            depth2slice=time_to_image,
            id_project=project_id,
            upload_object=False,
            track_prefix=track_prefix + "-particle",
            upload_group_id=True)
        tracks.extend(curr_tracks)
        annotations.extend(curr_annots)

    return tracks, annotations
示例#6
0
def _load_rectangles(job: Job, image_id: str, term: int,
                     detections: dict) -> None:

    progress = 10
    job.update(
        progress=progress,
        status=Job.RUNNING,
        statusComment=
        f"Uploading detections of type rectangles to image {image_id} with terms {term}"
    )

    rectangles = _generate_rectangles(detections)

    # Upload annotations to server
    delta = 85 / len(rectangles)
    annotations = AnnotationCollection()
    for rectangle in rectangles:
        annotations.append(
            Annotation(location=rectangle.wkt,
                       id_image=image_id,
                       id_terms=[term]))
        progress += delta
        job.update(progress=int(progress), status=Job.RUNNING)

    annotations.save()
    progress = 100
    job.update(progress=progress,
               status=Job.TERMINATED,
               statusComment="All detections have been uploaded")
示例#7
0
    def upload_annotation(self, predicted_data, project_id):
        self.cj.job.update(progress=95, statusComment="Uploading annotations")

        annotations = AnnotationCollection()
        components = ObjectFinder(predicted_data).find_components()
        locations = []
        for component in components:
            location = Polygon(component[0], component[1])

            if location.is_valid:
                locations.append(location)
            else:
                fixed = fix_geometry(location)

                if fixed.is_valid and not fixed.is_empty:
                    locations.append(fixed)

        for idx, loc in enumerate(locations):
            if not loc.is_valid:
                fixed = fix_geometry(loc)
                if fixed.is_valid and not fixed.is_empty:
                    loc[idx] = fixed

        annotations.extend([
            create_annotation_from_location(loc, self.image_instance.id,
                                            self.image_instance.height,
                                            project_id) for loc in locations
        ])

        annotations.save(chunk=20)
示例#8
0
def run():
    """
    Deletes all the annotations from an image, but those created by a software

    Example:
      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'b6ebb23c-00ff-427b-be24-87b2a82490df' --cytomine_private_key '6812f09b-3f33-4938-82ca-b23032d377fd' --cytomine_id_image_instance 347 --cytomine_id_user 61 --cytomine_id_project 154

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'b6ebb23c-00ff-427b-be24-87b2a82490df' --cytomine_private_key '6812f09b-3f33-4938-82ca-b23032d377fd' --cytomine_id_image_instance 3643 --cytomine_id_user 61 --cytomine_id_project 154

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_image_instance 2140 --cytomine_id_user 58 --cytomine_id_project 197
    """
    parser = ArgumentParser(prog="Cytomine Python client example")

    # Cytomine
    parser.add_argument('--cytomine_host',
                        dest='host',
                        default='demo.cytomine.be',
                        help="The Cytomine host")
    parser.add_argument('--cytomine_public_key',
                        dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key',
                        dest='private_key',
                        help="The Cytomine private key")

    parser.add_argument('--cytomine_id_image_instance',
                        dest='id_image_instance',
                        help="The image with annotations to delete")
    parser.add_argument('--cytomine_id_user',
                        dest='id_user',
                        help="The user with annotations to delete")
    parser.add_argument('--cytomine_id_project',
                        dest='id_project',
                        help="The project with annotations to delete")
    params, _ = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host,
                  public_key=params.public_key,
                  private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:
        # Get the list of annotations
        annotations = AnnotationCollection()
        annotations.image = params.id_image_instance
        # NOTE: use userjob id to retrieve annotations from the job. However, they
        # cannot be deleted.
        annotations.user = params.id_user
        annotations.project = params.id_project
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            annotation.delete()
def mask_convert(mask,
                 image,
                 project_id,
                 mask_2d_fn,
                 mask_3d_fn,
                 track_prefix,
                 upload_group_id=False):
    """Generic function to convert a mask into an annotation collection

    Parameters
    ----------
    mask: ndarray
    image: ImageInstance
    project_id: int
    mask_2d_fn: callable
    mask_3d_fn: callable
    track_prefix: str
    upload_group_id: bool

    Returns
    -------
    tracks: TrackCollection
        Tracks, which have been saved
    annotations: AnnotationCollection
        Annotation which have NOT been saved
    """
    tracks = TrackCollection()
    annotations = AnnotationCollection()
    if mask.ndim == 2:
        slices = mask_2d_fn(mask)
        annotations.extend([
            create_annotation_from_slice(s,
                                         image.id,
                                         image.height,
                                         project_id,
                                         upload_group_id=upload_group_id)
            for s in slices
        ])
    elif mask.ndim == 3:
        slices = mask_3d_fn(mask)
        depth_to_slice = get_depth_to_slice(image)
        for obj_id, obj in enumerate(slices):
            track, curr_annotations = create_track_from_slices(
                image,
                obj,
                label=obj_id,
                depth2slice=depth_to_slice,
                track_prefix=track_prefix,
                id_project=project_id,
                upload_group_id=upload_group_id)
            tracks.append(track)
            annotations.extend(curr_annotations)
    else:
        raise ValueError("Only supports 2D or 3D output images...")
    return tracks, annotations
示例#10
0
def main(argv):
    with CytomineJob.from_cli(argv) as job:
        model_path = os.path.join(str(Path.home()), "models", "thyroid-unet")
        model_filepath = pick_model(model_path, job.parameters.tile_size,
                                    job.parameters.cytomine_zoom_level)
        device = torch.device(job.parameters.device)
        unet = Unet(job.parameters.init_fmaps, n_classes=1)
        unet.load_state_dict(torch.load(model_filepath, map_location=device))
        unet.to(device)
        unet.eval()

        segmenter = UNetSegmenter(device=job.parameters.device,
                                  unet=unet,
                                  classes=[0, 1],
                                  threshold=job.parameters.threshold)

        working_path = os.path.join(str(Path.home()), "tmp")
        tile_builder = CytomineTileBuilder(working_path)
        builder = SSLWorkflowBuilder()
        builder.set_n_jobs(1)
        builder.set_overlap(job.parameters.tile_overlap)
        builder.set_tile_size(job.parameters.tile_size,
                              job.parameters.tile_size)
        builder.set_tile_builder(tile_builder)
        builder.set_border_tiles(Workflow.BORDER_TILES_EXTEND)
        builder.set_background_class(0)
        builder.set_distance_tolerance(1)
        builder.set_seg_batch_size(job.parameters.batch_size)
        builder.set_segmenter(segmenter)
        workflow = builder.get()

        slide = CytomineSlide(img_instance=ImageInstance().fetch(
            job.parameters.cytomine_id_image),
                              zoom_level=job.parameters.cytomine_zoom_level)
        results = workflow.process(slide)

        print("-------------------------")
        print(len(results))
        print("-------------------------")

        collection = AnnotationCollection()
        for obj in results:
            wkt = shift_poly(obj.polygon,
                             slide,
                             zoom_level=job.parameters.cytomine_zoom_level).wkt
            collection.append(
                Annotation(location=wkt,
                           id_image=job.parameters.cytomine_id_image,
                           id_terms=[154005477],
                           id_project=job.project.id))
        collection.save(n_workers=job.parameters.n_jobs)

        return {}
def predict(argv):
    parser = ArgumentParser(prog="Extra-Trees Object Counter Predictor")

    # Cytomine
    parser.add_argument('--cytomine_host', dest='cytomine_host',
                        default='demo.cytomine.be', help="The Cytomine host")
    parser.add_argument('--cytomine_public_key', dest='cytomine_public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key', dest='cytomine_private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_base_path', dest='cytomine_base_path',
                        default='/api/', help="The Cytomine base path")
    parser.add_argument('--cytomine_working_path', dest='cytomine_working_path',
                        default=None, help="The working directory (eg: /tmp)")
    parser.add_argument('--cytomine_id_software', dest='cytomine_software', type=int,
                        help="The Cytomine software identifier")
    parser.add_argument('--cytomine_id_project', dest='cytomine_project', type=int,
                        help="The Cytomine project identifier")

    # Objects
    parser.add_argument('--cytomine_object_term', dest='cytomine_object_term', type=int,
                        help="The Cytomine identifier of object term")

    # Post-processing
    parser.add_argument('--post_threshold', dest='post_threshold', type=float,
                        help="Post-processing discarding threshold")
    parser.add_argument('--post_sigma', dest='post_sigma', type=float,
                        help="Std-dev of Gauss filter applied to smooth prediction")
    parser.add_argument('--post_min_dist', dest='post_min_dist', type=int,
                        help="Minimum distance between two peaks")

    # ROI
    parser.add_argument('--annotation', dest='annotation', type=str, action='append', default=[])
    parser.add_argument('--image', dest='image', type=str, action='append', default=[])

    # Execution
    parser.add_argument('--n_jobs', dest='n_jobs', type=int, default=1, help="Number of jobs")
    parser.add_argument('--verbose', '-v', dest='verbose', type=int, default=0, help="Level of verbosity")
    parser.add_argument('--model_id_job', dest='model_id_job', type=str, default=None, help="Model job ID")
    parser.add_argument('--model_file', dest="model_file", type=str, default=None, help="Model file")

    params, other = parser.parse_known_args(argv)
    if params.cytomine_working_path is None:
        params.cytomine_working_path = os.path.join(tempfile.gettempdir(), "cytomine")
    make_dirs(params.cytomine_working_path)

    params.model_id_job = str2int(params.model_id_job)
    params.image = [str2int(i) for i in params.image]
    params.annotation = [str2int(i) for i in params.annotation]

    # Initialize logger
    logger = StandardOutputLogger(params.verbose)
    for key, val in sorted(vars(params).iteritems()):
        logger.info("[PARAMETER] {}: {}".format(key, val))

    # Start job
    with CytomineJob(params.cytomine_host,
                     params.cytomine_public_key,
                     params.cytomine_private_key,
                     params.cytomine_software,
                     params.cytomine_project,
                     parameters=vars(params),
                     working_path=params.cytomine_working_path,
                     base_path=params.cytomine_base_path,
                     verbose=(params.verbose >= Logger.DEBUG)) as job:
        cytomine = job
        cytomine.update_job_status(job.job, status_comment="Starting...", progress=0)

        cytomine.update_job_status(job.job, status_comment="Loading model...", progress=1)
        logger.i("Loading model...")
        if params.model_file:
            model_file = params.model_file
        else:
            model_job = cytomine.get_job(params.model_id_job)
            model_file = os.path.join(params.cytomine_working_path, "models", str(model_job.software),
                                      "{}.pkl".format(model_job.id))
        with open(model_file, 'rb') as f:
            estimator = pickle.load(f)
            predict_params = vars(params).copy()
            predict_params.pop("image", None)
            predict_params.pop("annotation", None)
            estimator.set_params(**predict_params)

        cytomine.update_job_status(job.job, status_comment="Dumping annotations/images to predict...", progress=3)
        logger.i("Dumping annotations/images to predict...")
        if params.annotation[0] is not None:
            annots = [cytomine.get_annotation(id) for id in params.annotation]
            annots_collection = AnnotationCollection()
            annots_collection._data = annots
            crops = cytomine.dump_annotations(annotations=annots_collection,
                                              dest_path=os.path.join(params.cytomine_working_path, "crops",
                                                                     str(params.cytomine_project)),
                                              desired_zoom=0,
                                              get_image_url_func=Annotation.get_annotation_alpha_crop_url)
            X = crops.data()
        elif params.image[0] is not None:
            image_instances = [cytomine.get_image_instance(id) for id in params.image]
            image_instances = cytomine.dump_project_images(id_project=params.cytomine_project,
                                                           dest_path="/imageinstances/",
                                                           image_instances=image_instances,
                                                           max_size=True)
            X = image_instances
        else:
            X = []

        logger.d("X size: {} samples".format(len(X)))

        for i, x in enumerate(X):
            logger.i("Predicting ID {}...".format(x.id))
            cytomine.update_job_status(job.job, status_comment="Predicting ID {}...".format(x.id),
                                       progress=5 + np.ceil(i / len(X)) * 95)
            y = estimator.predict([x.filename])
            y = estimator.postprocessing([y], **estimator.filter_sk_params(estimator.postprocessing))

            logger.i("Uploading annotations...")
            cytomine.update_job_status(job.job, status_comment="Uploading annotations...")
            upload_annotations(cytomine, x, y, term=params.cytomine_object_term)

        logger.i("Finished.")
        cytomine.update_job_status(job.job, status_comment="Finished.", progress=100)
示例#12
0
                image_id = row[0]
                tissue = row[1]
                height = float(row[3])
                original_name = row[6]
                scale = row[7]

                path_to_landmarks = os.path.join(params.landmarks, tissue,
                                                 scale, f"{original_name}.csv")

                with open(path_to_landmarks, 'r') as csvfile:

                    f_csv = csv.reader(csvfile,
                                       delimiter=str(','),
                                       quotechar=str('|'))
                    headers = next(f_csv)
                    annotations = AnnotationCollection()

                    for row_landmarks in f_csv:

                        id_landmark = int(row_landmarks[0])

                        # due to Cytomine
                        point = Point(float(row_landmarks[1]),
                                      height - float(row_landmarks[2]))

                        a = Annotation(location=point.wkt,
                                       id_image=image_id,
                                       id_project=params.id_project)
                        a.property = [{
                            "key": "ANNOTATION_GROUP_ID",
                            "value": id_landmark
示例#13
0
def main(argv):
    parser = ArgumentParser()
    parser.add_argument(*_cytomine_parameter_name_synonyms("project_id"),
                        dest="project_id",
                        type=int,
                        help="The Cytomine project id.",
                        required=True)
    parser.add_argument(
        "-i",
        "--ignore-existing",
        action="store_true",
        dest="ignore_existing",
        help=
        "Ignore existing ground truth annotation associated with the project. If not specified,"
        " current annotations will be deleted before uploading the new ones.")
    parser.set_defaults(ignore_existing=False)
    options, _ = parser.parse_known_args(argv)

    with Cytomine.connect_from_cli(argv) as cytomine:
        project = Project().fetch(options.project_id)
        print("Project '{}' (#{}): discipline '{}'".format(
            project.name, project.id, project.disciplineShortName))

        if not options.ignore_existing:
            annotations = AnnotationCollection()
            annotations.project = project.id
            annotations.user = cytomine.current_user.id
            annotations.fetch()
            delete_collection(annotations, "annotation")

            tracks = TrackCollection()
            tracks.project = project.id
            tracks.user = cytomine.current_user.id
            tracks.fetch_with_filter("project", project.id)
            tracks._data = [
                t for t in tracks.data() if t.name.startswith("gt-")
            ]
            delete_collection(tracks, "track")

        fake_job = FakeJob(project)
        home = Path.home()
        in_path = os.path.join(home, "data", "in")
        gt_path = os.path.join(home, "data", "gt")
        os.makedirs(in_path)
        os.makedirs(gt_path)
        in_images, gt_images = download_images(fake_job,
                                               in_path,
                                               gt_path,
                                               gt_suffix="_lbl")

        if project.disciplineShortName == "TreTrc":
            # ground truth is contained in swc files so need to
            # convert them into masks beforehand
            print("TreTrc problem: start converting SWC to masks")
            download_attached(in_images, gt_path, do_download=True)
            alternate_gt_path = os.path.join(home, "data", "altgt")
            os.makedirs(alternate_gt_path)
            for in_image in in_images:
                swc_filepath = in_image.attached[0].filepath
                im_size = imageio.volread(
                    in_image.filepath).shape  # Size is Depth,Height,Width
                im_size = im_size[::
                                  -1]  # Invert the size order to Width,Height,Depth
                swc_to_tiff_stack(input_path=swc_filepath,
                                  output_path=os.path.join(
                                      alternate_gt_path, in_image.filename),
                                  im_size=im_size)
            gt_path = alternate_gt_path

        is_2d = guess_dims(gt_path)
        print("Image detected as {}".format("2d" if is_2d else ">2d"))
        upload_data(problemclass=project.disciplineShortName,
                    nj=fake_job,
                    inputs=in_images,
                    out_path=gt_path,
                    is_2d=is_2d,
                    projection=-1)
    parser.add_argument('--cytomine_private_key',
                        dest='private_key',
                        help="The Cytomine private key")

    parser.add_argument('--cytomine_id_image_instance',
                        dest='id_image_instance',
                        help="The image with annotations to delete")
    parser.add_argument('--cytomine_id_user',
                        dest='id_user',
                        help="The user with annotations to delete")
    parser.add_argument('--cytomine_id_project',
                        dest='id_project',
                        help="The project with annotations to delete")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host,
                  public_key=params.public_key,
                  private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:

        # Get the list of annotations
        annotations = AnnotationCollection()
        annotations.image = params.id_image_instance
        annotations.user = params.id_user
        annotations.project = params.id_project
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            annotation.delete()
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_id_image_instance',
                        dest='id_image_instance',
                        help="The image in which we work")
    parser.add_argument('--cytomine_id_roi_term',
                        dest='id_roi_term',
                        help="The term that represents regions of interest")
    parser.add_argument('--cytomine_id_object_term',
                        dest='id_object_term',
                        help="The term that represents objects")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host,
                  public_key=params.public_key,
                  private_key=params.private_key) as cytomine:
        roi_annotations = AnnotationCollection()
        roi_annotations.image = params.id_image_instance
        roi_annotations.term = params.id_roi_term
        roi_annotations.fetch()
        print(roi_annotations)

        for roi_annotation in roi_annotations:
            included_annotations = AnnotationCollection()
            included_annotations.image = params.id_image_instance
            included_annotations.term = params.id_object_term
            included_annotations.annotation = roi_annotation.id
            included_annotations.fetch()
            print("Number of annotations of term {} included in ROI {}: {}".
                  format(params.id_object_term, roi_annotation.id,
                         len(included_annotations)))
    # Cytomine
    parser.add_argument('--cytomine_host', dest='host',
                        default='demo.cytomine.be', help="The Cytomine host")
    parser.add_argument('--cytomine_public_key', dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key', dest='private_key',
                        help="The Cytomine private key")

    parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance',
                        help="The image with annotations to delete")
    parser.add_argument('--cytomine_id_user', dest='id_user',
                        help="The user with annotations to delete")
    parser.add_argument('--cytomine_id_project', dest='id_project',
                        help="The project with annotations to delete")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:

        # Get the list of annotations
        annotations = AnnotationCollection()
        annotations.image = params.id_image_instance
        annotations.user = params.id_user
        annotations.project = params.id_project
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            annotation.delete()
示例#17
0
def run(argv):
    # CytomineJob.from_cli() uses the descriptor.json to automatically create the ArgumentParser
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(statusComment="Initialization...")
        id_project = cj.parameters.cytomine_id_project
        id_terms = cj.parameters.cytomine_id_terms
        id_tags_for_images = cj.parameters.cytomine_id_tags_for_images
        working_path = cj.parameters.working_path

        terms = TermCollection().fetch_with_filter("project", id_project)
        if id_terms:
            filtered_term_ids = [
                int(id_term) for id_term in id_terms.split(',')
            ]
            filtered_terms = TermCollection()
            for term in terms:
                if term.id in filtered_term_ids:
                    filtered_terms.append(term)
        else:
            filtered_terms = terms

        # Associate YOLO class index to Cytomine term
        classes_filename = os.path.join(working_path, CLASSES_FILENAME)
        with open(classes_filename, 'r') as f:
            classes = f.readlines()
            indexes_terms = {}
            for i, _class in enumerate(classes):
                _class = _class.strip()
                indexes_terms[i] = filtered_terms.find_by_attribute(
                    "name", _class)

        cj.job.update(statusComment="Open model...", progress=1)
        # TODO...

        cj.job.update(statusComment="Predictions...", progress=5)
        images = ImageInstanceCollection(
            tags=id_tags_for_images).fetch_with_filter("project", id_project)
        for image in images:
            print("Prediction for image {}".format(image.instanceFilename))
            # TODO: get predictions from YOLO
            # TODO: I suppose here for the sake of the demo that the output format is the same as input, which is not sure
            # <class> <x_center> <y_center> <width> <height> <proba>
            sample_predictions = [(0, 0.604000000000, 0.493846153846,
                                   0.105600000000, 0.461538461538, 0.9),
                                  (0, 0.409200000000, 0.606153846154,
                                   0.050400000000, 0.095384615385, 0.5)]

            ac = AnnotationCollection()
            for pred in sample_predictions:
                _class, xcenter, ycenter, width, height, proba = pred
                term_ids = [indexes_terms[_class].id
                            ] if _class in indexes_terms.keys() else None
                if term_ids is None:
                    print("No term found for class {}".format(_class))
                geometry = yolo_to_geometry((xcenter, ycenter, width, height),
                                            image.width, image.height)
                properties = [{"key": "probability", "value": proba}]
                ac.append(
                    Annotation(id_image=image.id,
                               id_terms=term_ids,
                               location=geometry.wkt,
                               properties=properties))

            ac.save()

        cj.job.update(statusComment="Finished", progress=100)
    # Cytomine
    parser.add_argument('--cytomine_host', dest='host',
                        default='demo.cytomine.be', help="The Cytomine host")
    parser.add_argument('--cytomine_public_key', dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key', dest='private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_id_project', dest='id_project',
                        help="The project from which we want the crop")
    parser.add_argument('--download_path', required=False,
                        help="Where to store images")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:
        annotations = AnnotationCollection()
        annotations.project = params.id_project
        annotations.showWKT = True
        annotations.showMeta = True
        annotations.showGIS = True
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            print("ID: {} | Image: {} | Project: {} | Term: {} | User: {} | Area: {} | Perimeter: {} | WKT: {}".format(
                annotation.id,
                annotation.image,
                annotation.project,
                annotation.term,
                annotation.user,
                annotation.area,
示例#19
0
        "If unset, all images in the project are used.",
        default=None)
    parser.add_argument('--cytomine_id_job')
    params, _ = parser.parse_known_args(sys.argv[1:])

    with Cytomine(params.cytomine_host, params.cytomine_public_key,
                  params.cytomine_private_key) as c:
        id_tags_for_images = params.cytomine_id_tags_for_images
        id_project = params.cytomine_id_project

        image_tags = id_tags_for_images if id_tags_for_images else None
        images = ImageInstanceCollection(tags=image_tags).fetch_with_filter(
            "project", id_project)
        image_ids = [image.id for image in images]

        groundtruths = AnnotationCollection()
        groundtruths.showTerm = True
        groundtruths.showWKT = True
        groundtruths.images = image_ids
        groundtruths.fetch()

        predictions = AnnotationCollection()
        predictions.showTerm = True
        predictions.showWKT = True
        predictions.images = image_ids
        predictions.job = params.cytomine_id_job
        predictions.fetch()

        print("There are  {} groundtruths and {} predictions".format(
            len(groundtruths), len(predictions)))
示例#20
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(progress=1, statusComment="Initialisation")
        cj.log(str(cj.parameters))

        term_ids = [cj.parameters.cytomine_id_predicted_term] \
            if hasattr(cj.parameters, "cytomine_id_predicted_term") else None

        image_ids = [
            int(image_id)
            for image_id in cj.parameters.cytomine_id_images.split(",")
        ]
        images = ImageInstanceCollection().fetch_with_filter(
            "project", cj.parameters.cytomine_id_project)
        images = [image for image in images if image.id in image_ids]

        tile_size = cj.parameters.tile_size
        tile_overlap = cj.parameters.tile_overlap
        filter_func = _get_filter(cj.parameters.filter)
        projection = cj.parameters.projection
        if projection not in ('min', 'max', 'average'):
            raise ValueError("Projection {} is not found".format(projection))

        cj.log("Filter: {}".format(cj.parameters.filter))
        cj.log("Projection: {}".format(projection))
        for image in cj.monitor(images,
                                prefix="Running detection on image",
                                start=5,
                                end=99):

            def worker_tile_func(tile):
                window = tile.np_image
                threshold = filter_func(window)
                return window, threshold

            cj.log("Get tiles for image {}".format(image.instanceFilename))
            sldc_image = CytomineProjectionSlide(image, projection)
            tile_builder = CytomineProjectionTileBuilder("/tmp")
            topology = sldc_image.tile_topology(tile_builder, tile_size,
                                                tile_size, tile_overlap)

            results = generic_parallel(topology, worker_tile_func)
            thresholds = list()
            for result in results:
                tile, output = result
                window, threshold = output
                thresholds.append(threshold)

            global_threshold = int(np.mean(thresholds))
            cj.log("Mean threshold is {}".format(global_threshold))

            def worker_annotations_func(tile):
                filtered = img_as_uint(tile.np_image > global_threshold)
                return mask_to_objects_2d(filtered, offset=tile.abs_offset)

            cj.log(
                "Extract annotations from filtered tiles for image {}".format(
                    image.instanceFilename))
            results = generic_parallel(topology, worker_annotations_func)
            ids, geometries = list(), list()
            for result in results:
                tile, tile_geometries = result
                # Workaround for slow SemanticMerger but geometries shouldn't be filtered at this stage.
                tile_geometries = [
                    g for g in tile_geometries
                    if g.area > cj.parameters.min_area
                ]
                ids.append(tile.identifier)
                geometries.append(tile_geometries)

            cj.log("Merge annotations from filtered tiles for image {}".format(
                image.instanceFilename))
            merged_geometries = SemanticMerger(tolerance=1).merge(
                ids, geometries, topology)
            cj.log("{} merged geometries".format(len(merged_geometries)))

            if cj.parameters.annotation_slices == 'median':
                # By default, if no slice is given, an annotation is added to the median slice
                slice_ids = [None]
            else:
                slices = SliceInstanceCollection().fetch_with_filter(
                    "imageinstance", image.id)
                if cj.parameters.annotation_slices == 'first':
                    slice_ids = [slices[0].id]
                else:
                    slice_ids = [sl.id for sl in slices]

            ac = AnnotationCollection()
            for geometry in merged_geometries:
                if geometry.area > cj.parameters.min_area:
                    for slice_id in slice_ids:
                        ac.append(
                            Annotation(location=change_referential(
                                geometry, image.height).wkt,
                                       id_image=image.id,
                                       id_terms=term_ids,
                                       id_slice=slice_id))
            ac.save()

        cj.job.update(statusComment="Finished.", progress=100)
示例#21
0
    working_path = os.path.join(base_path, str(cj.job.id))
    in_path = os.path.join(working_path, "in")
    makedirs(in_path)
    out_path = os.path.join(working_path, "out")
    makedirs(out_path)

    cj.job.update(progress=1, statusComment="Downloading images...")
    images = ImageInstanceCollection().fetch_with_filter(
        "project", params.cytomine_id_project)

    for image in images:
        image.download(os.path.join(in_path, "{id}.tif"))

    for image in images:
        annotations = AnnotationCollection()
        annotations.image = image.id
        annotations.fetch()
        for annotation in annotations:
            annotation.delete()

    cj.job.update(progress=25, statusComment="Launching workflow...")

    command = "/icy/run.sh {} {}".format(in_path, params.scale3sens)
    call(command, shell=True)

    cj.job.update(progress=60, statusComment="Extracting polygons...")

    for image in images:
        file = str(image.id) + "_results.txt"
        path = in_path + "/" + file
示例#22
0
def main():
    with CytomineJob.from_cli(sys.argv) as conn:
        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        status_comment="Initialization of the training phase")

        # 1. Create working directories on the machine:
        # - WORKING_PATH/in: input images
        # - WORKING_PATH/out: output images
        # - WORKING_PATH/ground_truth: ground truth images
        # - WORKING_PATH/tmp: temporary path

        base_path = "{}".format(os.getenv("HOME"))
        gt_suffix = "_lbl"
        working_path = os.path.join(base_path, str(conn.job.id))
        in_path = os.path.join(working_path, "in/")
        in_txt = os.path.join(in_path, 'txt/')
        out_path = os.path.join(working_path, "out/")
        gt_path = os.path.join(working_path, "ground_truth/")
        tmp_path = os.path.join(working_path, "tmp/")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)
            os.makedirs(out_path)
            os.makedirs(gt_path)
            os.makedirs(tmp_path)
            os.makedirs(in_txt)
        # 2. Download the images (first input, then ground truth image)
        conn.job.update(
            progress=10,
            statusComment="Downloading images (to {})...".format(in_path))
        print(conn.parameters)
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        xpos = {}
        ypos = {}
        terms = {}

        for image in images:
            image.dump(dest_pattern=in_path.rstrip('/') + '/%d.%s' %
                       (image.id, 'jpg'))

            annotations = AnnotationCollection()
            annotations.project = conn.parameters.cytomine_id_project
            annotations.showWKT = True
            annotations.showMeta = True
            annotations.showGIS = True
            annotations.showTerm = True
            annotations.image = image.id
            annotations.fetch()

            for ann in annotations:
                l = ann.location
                if l.rfind('POINT') == -1:
                    pol = shapely.wkt.loads(l)
                    poi = pol.centroid
                else:
                    poi = shapely.wkt.loads(l)
                (cx, cy) = poi.xy
                xpos[(ann.term[0], image.id)] = int(cx[0])
                ypos[(ann.term[0], image.id)] = image.height - int(cy[0])
                terms[ann.term[0]] = 1

        for image in images:
            F = open(in_txt + '%d.txt' % image.id, 'w')
            for t in terms.keys():
                if (t, image.id) in xpos:
                    F.write('%d %d %d %f %f\n' %
                            (t, xpos[(t, image.id)], ypos[(t, image.id)],
                             xpos[(t, image.id)] / float(image.width),
                             ypos[(t, image.id)] / float(image.height)))
            F.close()

        depths = 1. / (2.**np.arange(conn.parameters.model_depth))

        (xc, yc, xr, yr, ims, t_to_i, i_to_t) = getallcoords(in_txt)

        if conn.parameters.cytomine_id_terms == 'all':
            term_list = t_to_i.keys()
        else:
            term_list = [
                int(term)
                for term in conn.parameters.cytomine_id_terms.split(',')
            ]

        if conn.parameters.cytomine_training_images == 'all':
            tr_im = ims
        else:
            tr_im = [
                int(id_im) for id_im in
                conn.parameters.cytomine_training_images.split(',')
            ]

        DATA = None
        REP = None
        be = 0

        #leprogres = 10
        #pr_spacing = 90/len(term_list)
        #print(term_list)
        sfinal = ""
        for id_term in conn.monitor(term_list,
                                    start=10,
                                    end=90,
                                    period=0.05,
                                    prefix="Model building for terms..."):
            sfinal += "%d " % id_term

            (xc, yc, xr, yr) = getcoordsim(in_txt, id_term, tr_im)
            nimages = np.max(xc.shape)
            mx = np.mean(xr)
            my = np.mean(yr)
            P = np.zeros((2, nimages))
            P[0, :] = xr
            P[1, :] = yr
            cm = np.cov(P)
            passe = False
            # additional parameters
            feature_parameters = None
            if conn.parameters.model_feature_type.lower() == 'gaussian':
                std_matrix = np.eye(2) * (
                    conn.parameters.model_feature_gaussian_std**2)
                feature_parameters = np.round(
                    np.random.multivariate_normal(
                        [0, 0], std_matrix,
                        conn.parameters.model_feature_gaussian_n)).astype(int)
            elif conn.parameters.model_feature_type.lower() == 'haar':
                W = conn.parameters.model_wsize
                n = conn.parameters.model_feature_haar_n / (
                    5 * conn.parameters.model_depth)
                h2 = generate_2_horizontal(W, n)
                v2 = generate_2_vertical(W, n)
                h3 = generate_3_horizontal(W, n)
                v3 = generate_3_vertical(W, n)
                sq = generate_square(W, n)
                feature_parameters = (h2, v2, h3, v3, sq)

            for times in range(conn.parameters.model_ntimes):
                if times == 0:
                    rangrange = 0
                else:
                    rangrange = conn.parameters.model_angle

                T = build_datasets_rot_mp(
                    in_path, tr_im, xc, yc, conn.parameters.model_R,
                    conn.parameters.model_RMAX, conn.parameters.model_P,
                    conn.parameters.model_step, rangrange,
                    conn.parameters.model_wsize,
                    conn.parameters.model_feature_type, feature_parameters,
                    depths, nimages, 'jpg', conn.parameters.model_njobs)
                for i in range(len(T)):
                    (data, rep, img) = T[i]
                    (height, width) = data.shape
                    if not passe:
                        passe = True
                        DATA = np.zeros((height * (len(T) + 100) *
                                         conn.parameters.model_ntimes, width))
                        REP = np.zeros(height * (len(T) + 100) *
                                       conn.parameters.model_ntimes)
                        b = 0
                        be = height
                    DATA[b:be, :] = data
                    REP[b:be] = rep
                    b = be
                    be = be + height

            REP = REP[0:b]
            DATA = DATA[0:b, :]

            clf = ExtraTreesClassifier(
                n_jobs=conn.parameters.model_njobs,
                n_estimators=conn.parameters.model_ntrees)
            clf = clf.fit(DATA, REP)

            parameters_hash = {}

            parameters_hash[
                'cytomine_id_terms'] = conn.parameters.cytomine_id_terms
            parameters_hash['model_R'] = conn.parameters.model_R
            parameters_hash['model_RMAX'] = conn.parameters.model_RMAX
            parameters_hash['model_P'] = conn.parameters.model_P
            parameters_hash['model_npred'] = conn.parameters.model_npred
            parameters_hash['model_ntrees'] = conn.parameters.model_ntrees
            parameters_hash['model_ntimes'] = conn.parameters.model_ntimes
            parameters_hash['model_angle'] = conn.parameters.model_angle
            parameters_hash['model_depth'] = conn.parameters.model_depth
            parameters_hash['model_step'] = conn.parameters.model_step
            parameters_hash['window_size'] = conn.parameters.model_wsize
            parameters_hash[
                'feature_type'] = conn.parameters.model_feature_type
            parameters_hash[
                'feature_haar_n'] = conn.parameters.model_feature_haar_n
            parameters_hash[
                'feature_gaussian_n'] = conn.parameters.model_feature_gaussian_n
            parameters_hash[
                'feature_gaussian_std'] = conn.parameters.model_feature_gaussian_std

            model_filename = joblib.dump(clf,
                                         os.path.join(
                                             out_path,
                                             '%d_model.joblib' % (id_term)),
                                         compress=3)[0]
            cov_filename = joblib.dump([mx, my, cm],
                                       os.path.join(
                                           out_path,
                                           '%d_cov.joblib' % (id_term)),
                                       compress=3)[0]
            parameter_filename = joblib.dump(
                parameters_hash,
                os.path.join(out_path, '%d_parameters.joblib' % id_term),
                compress=3)[0]
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=model_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=cov_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIndent=conn.job.id,
                filename=parameter_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            if conn.parameters.model_feature_type == 'haar' or conn.parameters.model_feature_type == 'gaussian':
                add_filename = joblib.dump(
                    feature_parameters,
                    out_path.rstrip('/') + '/' + '%d_fparameters.joblib' %
                    (id_term))[0]
                AttachedFile(
                    conn.job,
                    domainIdent=conn.job.id,
                    filename=add_filename,
                    domainClassName="be.cytomine.processing.Job").upload()

        Property(conn.job, key="id_terms", value=sfinal.rstrip(" ")).save()
        conn.job.update(progress=100,
                        status=Job.TERMINATED,
                        statusComment="Job terminated.")
示例#23
0
def main(argv):
    base_path = str(Path.home())

    #Available filters
    filters = {
        'binary': BinaryFilter(),
        'adaptive': AdaptiveThresholdFilter(),
        'otsu': OtsuFilter()
    }

    #Connect to Cytomine
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(status=Job.RUNNING,
                      progress=0,
                      statusComment="Initialisation...")

        working_path = os.path.join(base_path, "data", str(cj.job.id))
        if not os.path.exists(working_path):
            os.makedirs(working_path)

        filter = filters.get(cj.parameters.cytomine_filter)

        #Initiatlize the reader to browse the whole image
        whole_slide = WholeSlide(
            cj.get_image_instance(cj.parameters.cytomine_id_image, True))
        reader = CytomineReader(whole_slide,
                                window_position=Bounds(
                                    0, 0, cj.parameters.cytomine_tile_size,
                                    cj.parameters.cytomine_tile_size),
                                zoom=cj.parameters.cytomine_zoom_level,
                                overlap=cj.parameters.cytomine_tile_overlap)
        reader.window_position = Bounds(0, 0, reader.window_position.width,
                                        reader.window_position.height)

        #Browse the slide using reader
        i = 0
        geometries = []
        cj.job.update(progress=1, status_comment="Browsing big image...")

        while True:
            #Read next tile
            reader.read()
            image = reader.data
            #Saving tile image locally
            tile_filename = "%s/image-%d-zoom-%d-tile-%d-x-%d-y-%d.png" % (
                working_path, cj.parameters.cytomine_id_image,
                cj.parameters.cytomine_zoom_level, i, reader.window_position.x,
                reader.window_position.y)
            image.save(tile_filename, "PNG")
            #Apply filtering
            cv_image = np.array(reader.result())
            filtered_cv_image = filter.process(cv_image)
            i += 1
            #Detect connected components
            components = ObjectFinder(filtered_cv_image).find_components()
            #Convert local coordinates (from the tile image) to global coordinates (the whole slide)
            components = whole_slide.convert_to_real_coordinates(
                components, reader.window_position, reader.zoom)
            geometries.extend(
                get_geometries(components, cj.parameters.cytomine_min_area,
                               cj.parameters.cytomine_max_area))

            #Upload annotations (geometries corresponding to connected components) to Cytomine core
            #Upload each geometry and predicted term
            annotations = AnnotationCollection()
            for geometry in geometries:
                pol = shapely.wkt.loads(geometry)
                if pol.is_valid:
                    annotations.append(
                        Annotation(
                            location=geometry,
                            id_image=cj.parameters.cytomine_id_image,
                            id_project=cj.parameters.cytomine_id_project,
                            id_terms=[
                                cj.parameters.cytomine_id_predicted_term
                            ]))
                #Batches of 100 annotations
                if len(annotations) % 100 == 0:
                    annotations.save()
                    annotations = AnnotationCollection()

            annotations.save()
            geometries = []
            if not reader.next(): break

        cj.job.update(progress=50,
                      status_comment=
                      "Detection done, starting Union over whole big image...")

        #Perform Union of geometries (because geometries are computed locally in each tile but objects (e.g. cell clusters) might overlap several tiles)
        host = cj.parameters.cytomine_host.replace("http://", "")
        unioncommand = "groovy -cp \"/lib/jars/*\" /app/union4.groovy http://%s %s %s %d %d %d %d %d %d %d %d %d %d" % (
            host,
            cj._public_key,
            cj._private_key,
            cj.parameters.cytomine_id_image,
            cj.job.userJob,
            cj.parameters.cytomine_id_predicted_term,  #union_term
            cj.parameters.cytomine_union_min_length,  #union_minlength,
            cj.parameters.cytomine_union_bufferoverlap,  #union_bufferoverlap,
            cj.parameters.
            cytomine_union_min_point_for_simplify,  #union_minPointForSimplify,
            cj.parameters.cytomine_union_min_point,  #union_minPoint,
            cj.parameters.cytomine_union_max_point,  #union_maxPoint,
            cj.parameters.cytomine_union_nb_zones_width,  #union_nbzonesWidth,
            cj.parameters.cytomine_union_nb_zones_height
        )  #union_nbzonesHeight)

        os.chdir(base_path)
        print(unioncommand)
        os.system(unioncommand)

    cj.job.update(status=Job.TERMINATED,
                  progress=100,
                  statusComment="Finished.")
    def run(self):
        logging.info("Export will be done in directory {}".format(self.project_path))
        os.makedirs(self.project_path)

        if self.with_metadata or self.with_annotation_metadata:
            self.attached_file_path = os.path.join(self.project_path, "attached_files")
            os.makedirs(self.attached_file_path)

        # --------------------------------------------------------------------------------------------------------------
        logging.info("1/ Export project {}".format(self.project.id))
        self.save_object(self.project)

        logging.info("1.1/ Export project managers")
        admins = UserCollection(admin=True).fetch_with_filter("project", self.project.id)
        for admin in admins:
            self.save_user(admin, "project_manager")

        logging.info("1.2/ Export project contributors")
        users = UserCollection().fetch_with_filter("project", self.project.id)
        for user in users:
            self.save_user(user, "project_contributor")

        if self.with_metadata:
            logging.info("1.3/ Export project metadata")
            self.export_metadata([self.project])

        # --------------------------------------------------------------------------------------------------------------
        logging.info("2/ Export ontology {}".format(self.project.ontology))
        ontology = Ontology().fetch(self.project.ontology)
        self.save_object(ontology)

        logging.info("2.1/ Export ontology creator")
        user = User().fetch(ontology.user)
        self.save_user(user, "ontology_creator")

        if self.with_metadata:
            logging.info("2.2/ Export ontology metadata")
            self.export_metadata([ontology])

        # --------------------------------------------------------------------------------------------------------------
        logging.info("3/ Export terms")
        terms = TermCollection().fetch_with_filter("project", self.project.id)
        self.save_object(terms)

        if self.with_metadata:
            logging.info("3.1/ Export term metadata")
            self.export_metadata(terms)

        # --------------------------------------------------------------------------------------------------------------
        logging.info("4/ Export images")
        images = ImageInstanceCollection().fetch_with_filter("project", self.project.id)
        self.save_object(images)

        if self.with_image_download:
            image_path = os.path.join(self.project_path, "images")
            os.makedirs(image_path)

            def _download_image(image, path):
                logging.info("Download file for image {}".format(image))
                image.download(os.path.join(path, image.originalFilename), override=False, parent=True)

            # Temporary use threading as backend, as we need to connect to Cytomine in every other processes.
            Parallel(n_jobs=-1, backend="threading")(delayed(_download_image)(image, image_path) for image in images)

        logging.info("4.1/ Export image creator users")
        image_users = set([image.user for image in images])
        for image_user in image_users:
            user = User().fetch(image_user)
            self.save_user(user, "image_creator")

        logging.info("4.2/ Export image reviewer users")
        image_users = set([image.reviewUser for image in images if image.reviewUser])
        for image_user in image_users:
            user = User().fetch(image_user)
            self.save_user(user, "image_reviewer")

        if self.with_metadata:
            logging.info("4.3/ Export image metadata")
            self.export_metadata(images)

        # --------------------------------------------------------------------------------------------------------------
        logging.info("4/ Export user annotations")
        user_annotations = AnnotationCollection(showWKT=True, showTerm=True, project=self.project.id).fetch()
        self.save_object(user_annotations, filename="user-annotation-collection")

        logging.info("4.1/ Export user annotation creator users")
        annotation_users = set([annotation.user for annotation in user_annotations])
        for annotation_user in annotation_users:
            user = User().fetch(annotation_user)
            self.save_user(user, "userannotation_creator")

        logging.info("4.2/ Export user annotation term creator users")
        annotation_users = set()
        for annotation in user_annotations:
            annotation_users = annotation_users.union(*[
                set(item['user']) for item in annotation.userByTerm if annotation.userByTerm
            ])

        for annotation_user in annotation_users:
            user = User().fetch(annotation_user)
            self.save_user(user, "userannotationterm_creator")

        if self.with_annotation_metadata:
            logging.info("4.3/ Export user annotation metadata")
            self.export_metadata(user_annotations)

        # --------------------------------------------------------------------------------------------------------------
        logging.info("5/ Export users")
        if self.anonymize:
            for i, user in enumerate(self.users):
                user.username = "******".format(i + 1)
                user.firstname = "Anonymized"
                user.lastname = "User {}".format(i + 1)
                user.email = "anonymous{}@unknown.com".format(i + 1)

        self.save_object(self.users)

        # Disabled due to core issue.
        # if self.with_metadata:
        #     logging.info("5.1/ Export user metadata")
        #     self.export_metadata(self.users)

        # --------------------------------------------------------------------------------------------------------------
        if self.with_image_groups:
            logging.info("6/ Export image groups")
            image_groups = ImageGroupCollection().fetch_with_filter("project", self.project.id)
            self.save_object(image_groups)

            if self.with_metadata:
                logging.info("6.1/ Export image group metadata")
                self.export_metadata(image_groups)

            if self.with_image_download:
                image_group_path = os.path.join(self.project_path, "imagegroups")
                os.makedirs(image_group_path)
                for image_group in image_groups:
                    image_group.download(os.path.join(image_group_path, image_group.name), override=False, parent=True)

            image_sequences = ImageGroupImageInstanceCollection()
            for image_group in image_groups:
                image_sequences += ImageGroupImageInstanceCollection().fetch_with_filter("imagegroup", image_group.id)
            self.save_object(image_sequences)

        # --------------------------------------------------------------------------------------------------------------
        logging.info("Finished.")
示例#25
0
from cytomine.models import AnnotationCollection
from patches import Patch
from utils.path import *

host = "http://cytomine.icube.unistra.fr"
public_key = "8da00e26-3bcb-4229-b31d-a2b5937c4e5e"  # check your own keys from your account page in the web interface
private_key = "c0018f6a-8aa1-4791-957b-ab72dce4238d"

term = {''}

if __name__ == '__main__':
    with Cytomine(host=CYTO_HOST,
                  public_key=CYTO_PUB_KEY,
                  private_key=CYTO_PRV_KEY,
                  verbose=logging.INFO) as cytomine:
        annotations = AnnotationCollection()
        annotations.project = "1345"
        annotations.showWKT = True
        annotations.showMeta = True
        annotations.showTerm = True
        annotations.showGIS = True
        annotations.fetch()
        print(annotations)

        f = open("./anno.csv", "w+")
        f.write("ID;Image;Project;Term;User;Area;Perimeter;WKT;TRACK \n")
        for annotation in annotations:
            f.write("{};{};{};{};{};{};{};{}\n".format(
                annotation.id, annotation.image, annotation.project,
                annotation.term, annotation.user, annotation.area,
                annotation.perimeter, annotation.location))
            filename = os.path.basename(image_paths[i])
            fname, fext = os.path.splitext(filename)
            fname  = int(fname)
            org_size = img.shape[:2]

            h_mask = predict_mask(img, h_model)
            size = h_mask.shape[:2]
            cropped_image = cropped(h_mask, img)

            op_mask = predict_mask(cropped_image, op_model)
            op_upsize = cropped_image.shape[:2]

            op_mask = tf.image.resize(op_mask, op_upsize, method='bilinear')
            op_mask = op_pad_up(h_mask, op_mask, size, org_size)
            h_mask = tf.image.resize(h_mask, org_size, method='bilinear')

            h_polygon = make_polygon(h_mask)
            op_polygon = make_polygon(op_mask)

         #   image_id = next((x.id for x in images if x.id == fname), None)
            annotations = AnnotationCollection()
            annotations.append(
                Annotation(location=h_polygon[0].wkt, id_image=fname, id_terms=143971108, id_project=args.p))
            annotations.append(
                Annotation(location=op_polygon[0].wkt, id_image=fname, id_term=143971084, id_project=args.p))
            annotations.save()

        # project 142037659

    # =============================================================================
def main(argv):
    with CytomineJob.from_cli(argv) as conn:
        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        statusComment="Initialization...")
        # base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity
        base_path = "/home/mmu/Desktop"
        working_path = os.path.join(base_path, str(conn.job.id))

        #Loading pre-trained Stardist model
        np.random.seed(17)
        lbl_cmap = random_label_cmap()
        #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46
        #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI
        model = StarDist2D(
            None, name='2D_versatile_HE', basedir='/models/'
        )  #use local model file in ~/models/2D_versatile_HE/

        #Select images to process
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        list_imgs = []
        if conn.parameters.cytomine_id_images == 'all':
            for image in images:
                list_imgs.append(int(image.id))
        else:
            list_imgs = [
                int(id_img)
                for id_img in conn.parameters.cytomine_id_images.split(',')
            ]

        #Go over images
        for id_image in conn.monitor(list_imgs,
                                     prefix="Running detection on image",
                                     period=0.1):
            #Dump ROI annotations in img from Cytomine server to local images
            #conn.job.update(status=Job.RUNNING, progress=0, statusComment="Fetching ROI annotations...")
            roi_annotations = AnnotationCollection()
            roi_annotations.project = conn.parameters.cytomine_id_project
            roi_annotations.term = conn.parameters.cytomine_id_roi_term
            roi_annotations.image = id_image  #conn.parameters.cytomine_id_image
            roi_annotations.showWKT = True
            roi_annotations.fetch()
            print(roi_annotations)
            #Go over ROI in this image
            #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1):
            for roi in roi_annotations:
                #Get Cytomine ROI coordinates for remapping to whole-slide
                #Cytomine cartesian coordinate system, (0,0) is bottom left corner
                print(
                    "----------------------------ROI------------------------------"
                )
                roi_geometry = wkt.loads(roi.location)
                print("ROI Geometry from Shapely: {}".format(roi_geometry))
                print("ROI Bounds")
                print(roi_geometry.bounds)
                minx = roi_geometry.bounds[0]
                miny = roi_geometry.bounds[3]
                #Dump ROI image into local PNG file
                roi_path = os.path.join(
                    working_path,
                    str(roi_annotations.project) + '/' +
                    str(roi_annotations.image) + '/' + str(roi.id))
                roi_png_filename = os.path.join(roi_path + '/' + str(roi.id) +
                                                '.png')
                print("roi_png_filename: %s" % roi_png_filename)
                roi.dump(dest_pattern=roi_png_filename, mask=True, alpha=True)
                #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True)

                #Stardist works with TIFF images without alpha channel, flattening PNG alpha mask to TIFF RGB
                im = Image.open(roi_png_filename)
                bg = Image.new("RGB", im.size, (255, 255, 255))
                bg.paste(im, mask=im.split()[3])
                roi_tif_filename = os.path.join(roi_path + '/' + str(roi.id) +
                                                '.tif')
                bg.save(roi_tif_filename, quality=100)
                X_files = sorted(glob(roi_path + '/' + str(roi.id) + '*.tif'))
                X = list(map(imread, X_files))
                n_channel = 3 if X[0].ndim == 3 else X[0].shape[-1]
                axis_norm = (
                    0, 1
                )  # normalize channels independently  (0,1,2) normalize channels jointly
                if n_channel > 1:
                    print("Normalizing image channels %s." %
                          ('jointly' if axis_norm is None or 2 in axis_norm
                           else 'independently'))

                #Going over ROI images in ROI directory (in our case: one ROI per directory)
                for x in range(0, len(X)):
                    print("------------------- Processing ROI file %d: %s" %
                          (x, roi_tif_filename))
                    img = normalize(X[x],
                                    conn.parameters.stardist_norm_perc_low,
                                    conn.parameters.stardist_norm_perc_high,
                                    axis=axis_norm)
                    #Stardist model prediction with thresholds
                    labels, details = model.predict_instances(
                        img,
                        prob_thresh=conn.parameters.stardist_prob_t,
                        nms_thresh=conn.parameters.stardist_nms_t)
                    print("Number of detected polygons: %d" %
                          len(details['coord']))
                    cytomine_annotations = AnnotationCollection()
                    #Go over detections in this ROI, convert and upload to Cytomine
                    for pos, polygroup in enumerate(details['coord'], start=1):
                        #Converting to Shapely annotation
                        points = list()
                        for i in range(len(polygroup[0])):
                            #Cytomine cartesian coordinate system, (0,0) is bottom left corner
                            #Mapping Stardist polygon detection coordinates to Cytomine ROI in whole slide image
                            p = Point(minx + polygroup[1][i],
                                      miny - polygroup[0][i])
                            points.append(p)

                        annotation = Polygon(points)
                        #Append to Annotation collection
                        cytomine_annotations.append(
                            Annotation(
                                location=annotation.wkt,
                                id_image=
                                id_image,  #conn.parameters.cytomine_id_image,
                                id_project=conn.parameters.cytomine_id_project,
                                id_terms=[
                                    conn.parameters.cytomine_id_cell_term
                                ]))
                        print(".", end='', flush=True)

                    #Send Annotation Collection (for this ROI) to Cytomine server in one http request
                    ca = cytomine_annotations.save()

        conn.job.update(status=Job.TERMINATED,
                        progress=100,
                        statusComment="Finished.")
示例#28
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        # use only images from the current project
        cj.job.update(progress=1, statusComment="Preparing execution")

        # extract images to process
        if cj.parameters.cytomine_zoom_level > 0 and (
                cj.parameters.cytomine_tile_size != 256
                or cj.parameters.cytomine_tile_overlap != 0):
            raise ValueError(
                "when using zoom_level > 0, tile size should be 256 "
                "(given {}) and overlap should be 0 (given {})".format(
                    cj.parameters.cytomine_tile_size,
                    cj.parameters.cytomine_tile_overlap))

        cj.job.update(
            progress=1,
            statusComment="Preparing execution (creating folders,...).")
        # working path
        root_path = str(Path.home())
        working_path = os.path.join(root_path, "images")
        os.makedirs(working_path, exist_ok=True)

        # load training information
        cj.job.update(progress=5,
                      statusComment="Extract properties from training job.")
        train_job = Job().fetch(cj.parameters.cytomine_id_job)
        properties = PropertyCollection(train_job).fetch().as_dict()
        binary = str2bool(properties["binary"].value)
        classes = parse_domain_list(properties["classes"].value)

        cj.job.update(progress=10, statusComment="Download the model file.")
        attached_files = AttachedFileCollection(train_job).fetch()
        model_file = attached_files.find_by_attribute("filename",
                                                      "model.joblib")
        model_filepath = os.path.join(root_path, "model.joblib")
        model_file.download(model_filepath, override=True)
        pyxit = joblib.load(model_filepath)

        # set n_jobs
        pyxit.base_estimator.n_jobs = cj.parameters.n_jobs
        pyxit.n_jobs = cj.parameters.n_jobs

        cj.job.update(progress=45, statusComment="Build workflow.")
        builder = SSLWorkflowBuilder()
        builder.set_tile_size(cj.parameters.cytomine_tile_size,
                              cj.parameters.cytomine_tile_size)
        builder.set_overlap(cj.parameters.cytomine_tile_overlap)
        builder.set_tile_builder(
            CytomineTileBuilder(working_path, n_jobs=cj.parameters.n_jobs))
        builder.set_logger(StandardOutputLogger(level=Logger.INFO))
        builder.set_n_jobs(1)
        builder.set_background_class(0)
        # value 0 will prevent merging but still requires to run the merging check
        # procedure (inefficient)
        builder.set_distance_tolerance(2 if cj.parameters.union_enabled else 0)
        builder.set_segmenter(
            ExtraTreesSegmenter(
                pyxit=pyxit,
                classes=classes,
                prediction_step=cj.parameters.pyxit_prediction_step,
                background=0,
                min_std=cj.parameters.tile_filter_min_stddev,
                max_mean=cj.parameters.tile_filter_max_mean))
        workflow = builder.get()

        area_checker = AnnotationAreaChecker(
            min_area=cj.parameters.min_annotation_area,
            max_area=cj.parameters.max_annotation_area)

        def get_term(label):
            if binary:
                if "cytomine_id_predict_term" not in cj.parameters:
                    return []
                else:
                    return [int(cj.parameters.cytomine_id_predict_term)]
            # multi-class
            return [label]

        zones = extract_images_or_rois(cj.parameters)
        for zone in cj.monitor(zones,
                               start=50,
                               end=90,
                               period=0.05,
                               prefix="Segmenting images/ROIs"):
            results = workflow.process(zone)

            annotations = AnnotationCollection()
            for obj in results:
                if not area_checker.check(obj.polygon):
                    continue
                polygon = obj.polygon
                if isinstance(zone, ImageWindow):
                    polygon = affine_transform(
                        polygon,
                        [1, 0, 0, 1, zone.abs_offset_x, zone.abs_offset_y])
                polygon = change_referential(polygon, zone.base_image.height)
                if cj.parameters.cytomine_zoom_level > 0:
                    zoom_mult = (2**cj.parameters.cytomine_zoom_level)
                    polygon = affine_transform(
                        polygon, [zoom_mult, 0, 0, zoom_mult, 0, 0])
                annotations.append(
                    Annotation(location=polygon.wkt,
                               id_terms=get_term(obj.label),
                               id_project=cj.project.id,
                               id_image=zone.base_image.image_instance.id))
            annotations.save()

        cj.job.update(status=Job.TERMINATED,
                      status_comment="Finish",
                      progress=100)
示例#29
0
文件: run.py 项目: zhang-free/S_Test
def main(argv):
    print(argv)
    with CytomineJob.from_cli(argv) as cj:

        images = ImageInstanceCollection().fetch_with_filter("project", cj.parameters.cytomine_id_project)
        for image in cj.monitor(images, prefix="Running detection on image", period=0.1):
            # Resize image if needed
            resize_ratio = max(image.width, image.height) / cj.parameters.max_image_size
            if resize_ratio < 1:
                resize_ratio = 1

            resized_width = int(image.width / resize_ratio)
            resized_height = int(image.height / resize_ratio)

            image.dump(dest_pattern="/tmp/{id}.jpg", max_size=max(resized_width, resized_height), bits=image.bitDepth)
            img = cv2.imread(image.filename, cv2.IMREAD_GRAYSCALE)

            thresholded_img = cv2.adaptiveThreshold(img, 2**image.bitDepth, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                                    cv2.THRESH_BINARY, cj.parameters.threshold_blocksize,
                                                    cj.parameters.threshold_constant)

            kernel = np.ones((5, 5), np.uint8)
            eroded_img = cv2.erode(thresholded_img, kernel, iterations=cj.parameters.erode_iterations)
            dilated_img = cv2.dilate(eroded_img, kernel, iterations=cj.parameters.dilate_iterations)

            extension = 10
            extended_img = cv2.copyMakeBorder(dilated_img, extension, extension, extension, extension,
                                              cv2.BORDER_CONSTANT, value=2**image.bitDepth)

            components = find_components(extended_img)
            zoom_factor = image.width / float(resized_width)
            for i, component in enumerate(components):
                converted = []
                for point in component[0]:
                    x = int((point[0] - extension) * zoom_factor)
                    y = int(image.height - ((point[1] - extension) * zoom_factor))
                    converted.append((x, y))

                components[i] = Polygon(converted)

            # Find largest component (whole image)
            largest = max(components, key=attrgetter('area'))
            components.remove(largest)

            # Only keep components greater than 5% of whole image
            min_area = int(0.05 * image.width * image.height)

            annotations = AnnotationCollection()
            for component in components:
                if component.area > min_area:
                    annotations.append(Annotation(location=component.wkt, id_image=image.id,
                                                  id_terms=[cj.parameters.cytomine_id_predicted_term],
                                                  id_project=cj.parameters.cytomine_id_project))

                    if len(annotations) % 100 == 0:
                        annotations.save()
                        annotations = AnnotationCollection()

            annotations.save()

        cj.job.update(statusComment="Finished.")
    parser.add_argument('--cytomine_host', dest='host',
                        default='demo.cytomine.be', help="The Cytomine host")
    parser.add_argument('--cytomine_public_key', dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key', dest='private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance',
                        help="The image in which we work")
    parser.add_argument('--cytomine_id_roi_term', dest='id_roi_term',
                        help="The term that represents regions of interest")
    parser.add_argument('--cytomine_id_object_term', dest='id_object_term',
                        help="The term that represents objects")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:
        roi_annotations = AnnotationCollection()
        roi_annotations.image = params.id_image_instance
        roi_annotations.term = params.id_roi_term
        roi_annotations.fetch()
        print(roi_annotations)

        for roi_annotation in roi_annotations:
            included_annotations = AnnotationCollection()
            included_annotations.image = params.id_image_instance
            included_annotations.term = params.id_object_term
            included_annotations.annotation = roi_annotation.id
            included_annotations.fetch()
            print("Number of annotations of term {} included in ROI {}: {}".format(
                params.id_object_term, roi_annotation.id, len(included_annotations)))
示例#31
0
            location=point.wkt, id_image=params.id_image_instance).save()
        if params.id_term:
            AnnotationTerm(annotation_point.id, params.id_term).save()

        # Then, we add a rectangle as annotation
        rectangle = box(20, 20, 100, 100)
        annotation_rectangle = Annotation(
            location=rectangle.wkt, id_image=params.id_image_instance).save()
        if params.id_term:
            AnnotationTerm(annotation_rectangle.id, params.id_term).save()

        # We can also add a property (key-value pair) to an annotation
        Property(annotation_rectangle, key="my_property", value=10).save()

        # Print the list of annotations in the given image:
        annotations = AnnotationCollection()
        annotations.image = params.id_image_instance
        annotations.fetch()
        print(annotations)

        # We can also add multiple annotation in one request:
        annotations = AnnotationCollection()
        annotations.append(
            Annotation(location=point.wkt,
                       id_image=params.id_image_instance,
                       id_project=params.id_project))
        annotations.append(
            Annotation(location=rectangle.wkt,
                       id_image=params.id_image_instance,
                       id_project=params.id_project))
        annotations.save()
示例#32
0
文件: get_data.py 项目: delimz/unet
private_key = params.private_key
id_project = params.id_project
img_path = params.download_path
slice_term_id = params.slice_term
imgs = get_image_map(params)
print(imgs)
overlap = params.overlap
datadir = params.download_path

imgs_l = []
dest_base = params.download_path

with Cytomine(host=host, public_key=public_key,
              private_key=private_key) as cytomine:
    res = {}
    annotations = AnnotationCollection()
    annotations.project = id_project
    annotations.showWKT = True
    annotations.showMeta = True
    annotations.showGIS = True
    annotations.showTerm = True
    annotations.showImage = True
    annotations.fetch()
    print(annotations)
    for annotation in annotations:
        print("ID: {} | Img: {} | Pjct: {} | Term: {} ".format(
            annotation.id, annotation.image, annotation.project,
            annotation.term))
        if len(annotation.term) == 1:
            if (annotation.term[0], annotation.image) not in res.keys():
                res[(annotation.term[0], annotation.image)] = []
        f_csv = csv.reader(csvfile, delimiter=str(','), quotechar=str('|'))
        headers = next(f_csv)

        for row in f_csv:
            image_id = row[0]
            tissue = row[1]
            dye = row[2]

            id2info[image_id] = (tissue, dye)

    with Cytomine(host=params.host,
                  public_key=params.public_key,
                  private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:
        annotations = AnnotationCollection()
        annotations.project = params.id_project
        annotations.showWKT = True
        annotations.showMeta = True
        annotations.showGIS = True
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            print(
                "ID: {} | Image: {} | Project: {} | Term: {} | User: {} | Area: {} | Perimeter: {} | WKT: {}"
                .format(annotation.id, annotation.image, annotation.project,
                        annotation.term, annotation.user, annotation.area,
                        annotation.perimeter, annotation.location))

            annot = Annotation().fetch(annotation.id)
        point = Point(10, 10)
        annotation_point = Annotation(location=point.wkt, id_image=params.id_image_instance).save()
        if params.id_term:
            AnnotationTerm(annotation_point.id, params.id_term).save()

        # Then, we add a rectangle as annotation
        rectangle = box(20, 20, 100, 100)
        annotation_rectangle = Annotation(location=rectangle.wkt, id_image=params.id_image_instance).save()
        if params.id_term:
            AnnotationTerm(annotation_rectangle.id, params.id_term).save()

        # We can also add a property (key-value pair) to an annotation
        Property(annotation_rectangle, key="my_property", value=10).save()

        # Print the list of annotations in the given image:
        annotations = AnnotationCollection()
        annotations.image = params.id_image_instance
        annotations.fetch()
        print(annotations)

        # We can also add multiple annotation in one request:
        annotations = AnnotationCollection()
        annotations.append(Annotation(location=point.wkt, id_image=params.id_image_instance, id_project=params.id_project))
        annotations.append(Annotation(location=rectangle.wkt, id_image=params.id_image_instance, id_project=params.id_project))
        annotations.save()

        # Print the list of annotations in the given image:
        annotations = AnnotationCollection()
        annotations.image = params.id_image_instance
        annotations.fetch()
        print(annotations)