Пример #1
0
def run():
    """
    Deletes all the annotations from an image, but those created by a software

    Example:
      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'b6ebb23c-00ff-427b-be24-87b2a82490df' --cytomine_private_key '6812f09b-3f33-4938-82ca-b23032d377fd' --cytomine_id_image_instance 347 --cytomine_id_user 61 --cytomine_id_project 154

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'b6ebb23c-00ff-427b-be24-87b2a82490df' --cytomine_private_key '6812f09b-3f33-4938-82ca-b23032d377fd' --cytomine_id_image_instance 3643 --cytomine_id_user 61 --cytomine_id_project 154

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_image_instance 2140 --cytomine_id_user 58 --cytomine_id_project 197
    """
    parser = ArgumentParser(prog="Cytomine Python client example")

    # Cytomine
    parser.add_argument('--cytomine_host',
                        dest='host',
                        default='demo.cytomine.be',
                        help="The Cytomine host")
    parser.add_argument('--cytomine_public_key',
                        dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key',
                        dest='private_key',
                        help="The Cytomine private key")

    parser.add_argument('--cytomine_id_image_instance',
                        dest='id_image_instance',
                        help="The image with annotations to delete")
    parser.add_argument('--cytomine_id_user',
                        dest='id_user',
                        help="The user with annotations to delete")
    parser.add_argument('--cytomine_id_project',
                        dest='id_project',
                        help="The project with annotations to delete")
    params, _ = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host,
                  public_key=params.public_key,
                  private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:
        # Get the list of annotations
        annotations = AnnotationCollection()
        annotations.image = params.id_image_instance
        # NOTE: use userjob id to retrieve annotations from the job. However, they
        # cannot be deleted.
        annotations.user = params.id_user
        annotations.project = params.id_project
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            annotation.delete()
Пример #2
0
def run(cyto_job, parameters):

    job = cyto_job.job
    project_id = cyto_job.project
    term_id = parameters.terms_list

    logging.info(f"########### Parameters = {str(parameters)}")
    logging.info(f"########### Term {str(term_id)}")
    logging.info(f"########### Project {str(project_id)}")

    annotations = AnnotationCollection()
    annotations.project = project_id
    annotations.terms = [term_id]
    annotations.fetch()

    progress = 0
    progress_delta = 1.0 / (1.50 * len(annotations))

    job.update(
        progress=progress,
        statusComment=f"Converting annotations from project {project_id}")

    new_annotations = AnnotationCollection()
    for a in annotations:
        if a.location is None:
            a.fetch()
        new_annotations.append(
            Annotation(a.location, a.image, a.term, a.project))
    new_annotations.save(chunk=None)

    job.update(progress=0.25, statusComment=f"Deleting old annotations...")

    for a in annotations:
        a.delete()
        progress += progress_delta
        job.update(progress=progress)
        f_csv = csv.reader(csvfile, delimiter=str(','), quotechar=str('|'))
        headers = next(f_csv)

        for row in f_csv:
            image_id = row[0]
            tissue = row[1]
            dye = row[2]

            id2info[image_id] = (tissue, dye)

    with Cytomine(host=params.host,
                  public_key=params.public_key,
                  private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:
        annotations = AnnotationCollection()
        annotations.project = params.id_project
        annotations.showWKT = True
        annotations.showMeta = True
        annotations.showGIS = True
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            print(
                "ID: {} | Image: {} | Project: {} | Term: {} | User: {} | Area: {} | Perimeter: {} | WKT: {}"
                .format(annotation.id, annotation.image, annotation.project,
                        annotation.term, annotation.user, annotation.area,
                        annotation.perimeter, annotation.location))

            annot = Annotation().fetch(annotation.id)
            # Toutes les proprietes (collection) de l annotation
def main(argv):
    with CytomineJob.from_cli(argv) as conn:
        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        statusComment="Initialization...")
        # base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity
        base_path = "/home/mmu/Desktop"
        working_path = os.path.join(base_path, str(conn.job.id))

        #Loading pre-trained Stardist model
        np.random.seed(17)
        lbl_cmap = random_label_cmap()
        #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46
        #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI
        model = StarDist2D(
            None, name='2D_versatile_HE', basedir='/models/'
        )  #use local model file in ~/models/2D_versatile_HE/

        #Select images to process
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        list_imgs = []
        if conn.parameters.cytomine_id_images == 'all':
            for image in images:
                list_imgs.append(int(image.id))
        else:
            list_imgs = [
                int(id_img)
                for id_img in conn.parameters.cytomine_id_images.split(',')
            ]

        #Go over images
        for id_image in conn.monitor(list_imgs,
                                     prefix="Running detection on image",
                                     period=0.1):
            #Dump ROI annotations in img from Cytomine server to local images
            #conn.job.update(status=Job.RUNNING, progress=0, statusComment="Fetching ROI annotations...")
            roi_annotations = AnnotationCollection()
            roi_annotations.project = conn.parameters.cytomine_id_project
            roi_annotations.term = conn.parameters.cytomine_id_roi_term
            roi_annotations.image = id_image  #conn.parameters.cytomine_id_image
            roi_annotations.showWKT = True
            roi_annotations.fetch()
            print(roi_annotations)
            #Go over ROI in this image
            #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1):
            for roi in roi_annotations:
                #Get Cytomine ROI coordinates for remapping to whole-slide
                #Cytomine cartesian coordinate system, (0,0) is bottom left corner
                print(
                    "----------------------------ROI------------------------------"
                )
                roi_geometry = wkt.loads(roi.location)
                print("ROI Geometry from Shapely: {}".format(roi_geometry))
                print("ROI Bounds")
                print(roi_geometry.bounds)
                minx = roi_geometry.bounds[0]
                miny = roi_geometry.bounds[3]
                #Dump ROI image into local PNG file
                roi_path = os.path.join(
                    working_path,
                    str(roi_annotations.project) + '/' +
                    str(roi_annotations.image) + '/' + str(roi.id))
                roi_png_filename = os.path.join(roi_path + '/' + str(roi.id) +
                                                '.png')
                print("roi_png_filename: %s" % roi_png_filename)
                roi.dump(dest_pattern=roi_png_filename, mask=True, alpha=True)
                #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True)

                #Stardist works with TIFF images without alpha channel, flattening PNG alpha mask to TIFF RGB
                im = Image.open(roi_png_filename)
                bg = Image.new("RGB", im.size, (255, 255, 255))
                bg.paste(im, mask=im.split()[3])
                roi_tif_filename = os.path.join(roi_path + '/' + str(roi.id) +
                                                '.tif')
                bg.save(roi_tif_filename, quality=100)
                X_files = sorted(glob(roi_path + '/' + str(roi.id) + '*.tif'))
                X = list(map(imread, X_files))
                n_channel = 3 if X[0].ndim == 3 else X[0].shape[-1]
                axis_norm = (
                    0, 1
                )  # normalize channels independently  (0,1,2) normalize channels jointly
                if n_channel > 1:
                    print("Normalizing image channels %s." %
                          ('jointly' if axis_norm is None or 2 in axis_norm
                           else 'independently'))

                #Going over ROI images in ROI directory (in our case: one ROI per directory)
                for x in range(0, len(X)):
                    print("------------------- Processing ROI file %d: %s" %
                          (x, roi_tif_filename))
                    img = normalize(X[x],
                                    conn.parameters.stardist_norm_perc_low,
                                    conn.parameters.stardist_norm_perc_high,
                                    axis=axis_norm)
                    #Stardist model prediction with thresholds
                    labels, details = model.predict_instances(
                        img,
                        prob_thresh=conn.parameters.stardist_prob_t,
                        nms_thresh=conn.parameters.stardist_nms_t)
                    print("Number of detected polygons: %d" %
                          len(details['coord']))
                    cytomine_annotations = AnnotationCollection()
                    #Go over detections in this ROI, convert and upload to Cytomine
                    for pos, polygroup in enumerate(details['coord'], start=1):
                        #Converting to Shapely annotation
                        points = list()
                        for i in range(len(polygroup[0])):
                            #Cytomine cartesian coordinate system, (0,0) is bottom left corner
                            #Mapping Stardist polygon detection coordinates to Cytomine ROI in whole slide image
                            p = Point(minx + polygroup[1][i],
                                      miny - polygroup[0][i])
                            points.append(p)

                        annotation = Polygon(points)
                        #Append to Annotation collection
                        cytomine_annotations.append(
                            Annotation(
                                location=annotation.wkt,
                                id_image=
                                id_image,  #conn.parameters.cytomine_id_image,
                                id_project=conn.parameters.cytomine_id_project,
                                id_terms=[
                                    conn.parameters.cytomine_id_cell_term
                                ]))
                        print(".", end='', flush=True)

                    #Send Annotation Collection (for this ROI) to Cytomine server in one http request
                    ca = cytomine_annotations.save()

        conn.job.update(status=Job.TERMINATED,
                        progress=100,
                        statusComment="Finished.")
Пример #5
0
def main(argv):
    parser = ArgumentParser()
    parser.add_argument(*_cytomine_parameter_name_synonyms("project_id"),
                        dest="project_id",
                        type=int,
                        help="The Cytomine project id.",
                        required=True)
    parser.add_argument(
        "-i",
        "--ignore-existing",
        action="store_true",
        dest="ignore_existing",
        help=
        "Ignore existing ground truth annotation associated with the project. If not specified,"
        " current annotations will be deleted before uploading the new ones.")
    parser.set_defaults(ignore_existing=False)
    options, _ = parser.parse_known_args(argv)

    with Cytomine.connect_from_cli(argv) as cytomine:
        project = Project().fetch(options.project_id)
        print("Project '{}' (#{}): discipline '{}'".format(
            project.name, project.id, project.disciplineShortName))

        if not options.ignore_existing:
            annotations = AnnotationCollection()
            annotations.project = project.id
            annotations.user = cytomine.current_user.id
            annotations.fetch()
            delete_collection(annotations, "annotation")

            tracks = TrackCollection()
            tracks.project = project.id
            tracks.user = cytomine.current_user.id
            tracks.fetch_with_filter("project", project.id)
            tracks._data = [
                t for t in tracks.data() if t.name.startswith("gt-")
            ]
            delete_collection(tracks, "track")

        fake_job = FakeJob(project)
        home = Path.home()
        in_path = os.path.join(home, "data", "in")
        gt_path = os.path.join(home, "data", "gt")
        os.makedirs(in_path)
        os.makedirs(gt_path)
        in_images, gt_images = download_images(fake_job,
                                               in_path,
                                               gt_path,
                                               gt_suffix="_lbl")

        if project.disciplineShortName == "TreTrc":
            # ground truth is contained in swc files so need to
            # convert them into masks beforehand
            print("TreTrc problem: start converting SWC to masks")
            download_attached(in_images, gt_path, do_download=True)
            alternate_gt_path = os.path.join(home, "data", "altgt")
            os.makedirs(alternate_gt_path)
            for in_image in in_images:
                swc_filepath = in_image.attached[0].filepath
                im_size = imageio.volread(
                    in_image.filepath).shape  # Size is Depth,Height,Width
                im_size = im_size[::
                                  -1]  # Invert the size order to Width,Height,Depth
                swc_to_tiff_stack(input_path=swc_filepath,
                                  output_path=os.path.join(
                                      alternate_gt_path, in_image.filename),
                                  im_size=im_size)
            gt_path = alternate_gt_path

        is_2d = guess_dims(gt_path)
        print("Image detected as {}".format("2d" if is_2d else ">2d"))
        upload_data(problemclass=project.disciplineShortName,
                    nj=fake_job,
                    inputs=in_images,
                    out_path=gt_path,
                    is_2d=is_2d,
                    projection=-1)
Пример #6
0
from patches import Patch
from utils.path import *

host = "http://cytomine.icube.unistra.fr"
public_key = "8da00e26-3bcb-4229-b31d-a2b5937c4e5e"  # check your own keys from your account page in the web interface
private_key = "c0018f6a-8aa1-4791-957b-ab72dce4238d"

term = {''}

if __name__ == '__main__':
    with Cytomine(host=CYTO_HOST,
                  public_key=CYTO_PUB_KEY,
                  private_key=CYTO_PRV_KEY,
                  verbose=logging.INFO) as cytomine:
        annotations = AnnotationCollection()
        annotations.project = "1345"
        annotations.showWKT = True
        annotations.showMeta = True
        annotations.showTerm = True
        annotations.showGIS = True
        annotations.fetch()
        print(annotations)

        f = open("./anno.csv", "w+")
        f.write("ID;Image;Project;Term;User;Area;Perimeter;WKT;TRACK \n")
        for annotation in annotations:
            f.write("{};{};{};{};{};{};{};{}\n".format(
                annotation.id, annotation.image, annotation.project,
                annotation.term, annotation.user, annotation.area,
                annotation.perimeter, annotation.location))
Пример #7
0
id_project = params.id_project
img_path = params.download_path
slice_term_id = params.slice_term
imgs = get_image_map(params)
print(imgs)
overlap = params.overlap
datadir = params.download_path

imgs_l = []
dest_base = params.download_path

with Cytomine(host=host, public_key=public_key,
              private_key=private_key) as cytomine:
    res = {}
    annotations = AnnotationCollection()
    annotations.project = id_project
    annotations.showWKT = True
    annotations.showMeta = True
    annotations.showGIS = True
    annotations.showTerm = True
    annotations.showImage = True
    annotations.fetch()
    print(annotations)
    for annotation in annotations:
        print("ID: {} | Img: {} | Pjct: {} | Term: {} ".format(
            annotation.id, annotation.image, annotation.project,
            annotation.term))
        if len(annotation.term) == 1:
            if (annotation.term[0], annotation.image) not in res.keys():
                res[(annotation.term[0], annotation.image)] = []
            res[(annotation.term[0],
Пример #8
0
def main():
    with CytomineJob.from_cli(sys.argv) as conn:
        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        status_comment="Initialization of the training phase")

        # 1. Create working directories on the machine:
        # - WORKING_PATH/in: input images
        # - WORKING_PATH/out: output images
        # - WORKING_PATH/ground_truth: ground truth images
        # - WORKING_PATH/tmp: temporary path

        base_path = "{}".format(os.getenv("HOME"))
        gt_suffix = "_lbl"
        working_path = os.path.join(base_path, str(conn.job.id))
        in_path = os.path.join(working_path, "in/")
        in_txt = os.path.join(in_path, 'txt/')
        out_path = os.path.join(working_path, "out/")
        gt_path = os.path.join(working_path, "ground_truth/")
        tmp_path = os.path.join(working_path, "tmp/")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)
            os.makedirs(out_path)
            os.makedirs(gt_path)
            os.makedirs(tmp_path)
            os.makedirs(in_txt)
        # 2. Download the images (first input, then ground truth image)
        conn.job.update(
            progress=10,
            statusComment="Downloading images (to {})...".format(in_path))
        print(conn.parameters)
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        xpos = {}
        ypos = {}
        terms = {}

        for image in images:
            image.dump(dest_pattern=in_path.rstrip('/') + '/%d.%s' %
                       (image.id, 'jpg'))

            annotations = AnnotationCollection()
            annotations.project = conn.parameters.cytomine_id_project
            annotations.showWKT = True
            annotations.showMeta = True
            annotations.showGIS = True
            annotations.showTerm = True
            annotations.image = image.id
            annotations.fetch()

            for ann in annotations:
                l = ann.location
                if l.rfind('POINT') == -1:
                    pol = shapely.wkt.loads(l)
                    poi = pol.centroid
                else:
                    poi = shapely.wkt.loads(l)
                (cx, cy) = poi.xy
                xpos[(ann.term[0], image.id)] = int(cx[0])
                ypos[(ann.term[0], image.id)] = image.height - int(cy[0])
                terms[ann.term[0]] = 1

        for image in images:
            F = open(in_txt + '%d.txt' % image.id, 'w')
            for t in terms.keys():
                if (t, image.id) in xpos:
                    F.write('%d %d %d %f %f\n' %
                            (t, xpos[(t, image.id)], ypos[(t, image.id)],
                             xpos[(t, image.id)] / float(image.width),
                             ypos[(t, image.id)] / float(image.height)))
            F.close()

        depths = 1. / (2.**np.arange(conn.parameters.model_depth))

        (xc, yc, xr, yr, ims, t_to_i, i_to_t) = getallcoords(in_txt)

        if conn.parameters.cytomine_id_terms == 'all':
            term_list = t_to_i.keys()
        else:
            term_list = [
                int(term)
                for term in conn.parameters.cytomine_id_terms.split(',')
            ]

        if conn.parameters.cytomine_training_images == 'all':
            tr_im = ims
        else:
            tr_im = [
                int(id_im) for id_im in
                conn.parameters.cytomine_training_images.split(',')
            ]

        DATA = None
        REP = None
        be = 0

        #leprogres = 10
        #pr_spacing = 90/len(term_list)
        #print(term_list)
        sfinal = ""
        for id_term in conn.monitor(term_list,
                                    start=10,
                                    end=90,
                                    period=0.05,
                                    prefix="Model building for terms..."):
            sfinal += "%d " % id_term

            (xc, yc, xr, yr) = getcoordsim(in_txt, id_term, tr_im)
            nimages = np.max(xc.shape)
            mx = np.mean(xr)
            my = np.mean(yr)
            P = np.zeros((2, nimages))
            P[0, :] = xr
            P[1, :] = yr
            cm = np.cov(P)
            passe = False
            # additional parameters
            feature_parameters = None
            if conn.parameters.model_feature_type.lower() == 'gaussian':
                std_matrix = np.eye(2) * (
                    conn.parameters.model_feature_gaussian_std**2)
                feature_parameters = np.round(
                    np.random.multivariate_normal(
                        [0, 0], std_matrix,
                        conn.parameters.model_feature_gaussian_n)).astype(int)
            elif conn.parameters.model_feature_type.lower() == 'haar':
                W = conn.parameters.model_wsize
                n = conn.parameters.model_feature_haar_n / (
                    5 * conn.parameters.model_depth)
                h2 = generate_2_horizontal(W, n)
                v2 = generate_2_vertical(W, n)
                h3 = generate_3_horizontal(W, n)
                v3 = generate_3_vertical(W, n)
                sq = generate_square(W, n)
                feature_parameters = (h2, v2, h3, v3, sq)

            for times in range(conn.parameters.model_ntimes):
                if times == 0:
                    rangrange = 0
                else:
                    rangrange = conn.parameters.model_angle

                T = build_datasets_rot_mp(
                    in_path, tr_im, xc, yc, conn.parameters.model_R,
                    conn.parameters.model_RMAX, conn.parameters.model_P,
                    conn.parameters.model_step, rangrange,
                    conn.parameters.model_wsize,
                    conn.parameters.model_feature_type, feature_parameters,
                    depths, nimages, 'jpg', conn.parameters.model_njobs)
                for i in range(len(T)):
                    (data, rep, img) = T[i]
                    (height, width) = data.shape
                    if not passe:
                        passe = True
                        DATA = np.zeros((height * (len(T) + 100) *
                                         conn.parameters.model_ntimes, width))
                        REP = np.zeros(height * (len(T) + 100) *
                                       conn.parameters.model_ntimes)
                        b = 0
                        be = height
                    DATA[b:be, :] = data
                    REP[b:be] = rep
                    b = be
                    be = be + height

            REP = REP[0:b]
            DATA = DATA[0:b, :]

            clf = ExtraTreesClassifier(
                n_jobs=conn.parameters.model_njobs,
                n_estimators=conn.parameters.model_ntrees)
            clf = clf.fit(DATA, REP)

            parameters_hash = {}

            parameters_hash[
                'cytomine_id_terms'] = conn.parameters.cytomine_id_terms
            parameters_hash['model_R'] = conn.parameters.model_R
            parameters_hash['model_RMAX'] = conn.parameters.model_RMAX
            parameters_hash['model_P'] = conn.parameters.model_P
            parameters_hash['model_npred'] = conn.parameters.model_npred
            parameters_hash['model_ntrees'] = conn.parameters.model_ntrees
            parameters_hash['model_ntimes'] = conn.parameters.model_ntimes
            parameters_hash['model_angle'] = conn.parameters.model_angle
            parameters_hash['model_depth'] = conn.parameters.model_depth
            parameters_hash['model_step'] = conn.parameters.model_step
            parameters_hash['window_size'] = conn.parameters.model_wsize
            parameters_hash[
                'feature_type'] = conn.parameters.model_feature_type
            parameters_hash[
                'feature_haar_n'] = conn.parameters.model_feature_haar_n
            parameters_hash[
                'feature_gaussian_n'] = conn.parameters.model_feature_gaussian_n
            parameters_hash[
                'feature_gaussian_std'] = conn.parameters.model_feature_gaussian_std

            model_filename = joblib.dump(clf,
                                         os.path.join(
                                             out_path,
                                             '%d_model.joblib' % (id_term)),
                                         compress=3)[0]
            cov_filename = joblib.dump([mx, my, cm],
                                       os.path.join(
                                           out_path,
                                           '%d_cov.joblib' % (id_term)),
                                       compress=3)[0]
            parameter_filename = joblib.dump(
                parameters_hash,
                os.path.join(out_path, '%d_parameters.joblib' % id_term),
                compress=3)[0]
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=model_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=cov_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIndent=conn.job.id,
                filename=parameter_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            if conn.parameters.model_feature_type == 'haar' or conn.parameters.model_feature_type == 'gaussian':
                add_filename = joblib.dump(
                    feature_parameters,
                    out_path.rstrip('/') + '/' + '%d_fparameters.joblib' %
                    (id_term))[0]
                AttachedFile(
                    conn.job,
                    domainIdent=conn.job.id,
                    filename=add_filename,
                    domainClassName="be.cytomine.processing.Job").upload()

        Property(conn.job, key="id_terms", value=sfinal.rstrip(" ")).save()
        conn.job.update(progress=100,
                        status=Job.TERMINATED,
                        statusComment="Job terminated.")
    # Cytomine
    parser.add_argument('--cytomine_host', dest='host',
                        default='demo.cytomine.be', help="The Cytomine host")
    parser.add_argument('--cytomine_public_key', dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key', dest='private_key',
                        help="The Cytomine private key")

    parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance',
                        help="The image with annotations to delete")
    parser.add_argument('--cytomine_id_user', dest='id_user',
                        help="The user with annotations to delete")
    parser.add_argument('--cytomine_id_project', dest='id_project',
                        help="The project with annotations to delete")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:

        # Get the list of annotations
        annotations = AnnotationCollection()
        annotations.image = params.id_image_instance
        annotations.user = params.id_user
        annotations.project = params.id_project
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            annotation.delete()
def get_images_mask_per_annotation_per_user(proj_id, image_id, user_id,
                                            scale_factor, dest):
    im = ImageInstanceCollection()
    im.project = proj_id
    im.image = image_id
    im.fetch_with_filter("project", proj_id)
    image_width = int(im[0].width)
    image_height = int(im[0].height)
    print(image_height, image_width)

    annotations = AnnotationCollection()
    annotations.project = proj_id
    annotations.image = image_id

    annotations.user = user_id
    annotations.showWKT = True
    annotations.showMeta = True
    annotations.showTerm = True
    annotations.showGIS = True
    annotations.showImage = True
    annotations.showUser = True
    annotations.fetch()

    dct_anotations = {}
    for a in annotations:
        print(a.user)
        if len(a.term) == 1:
            term = a.term[0]
            if term not in dct_anotations:
                dct_anotations[term] = []
            dct_anotations[term].append(a.location)
        else:
            warnings.warn("Not suited for multiple or no annotation term")
    for t, lanno in dct_anotations.items():
        result_image = Image.new(mode='1',
                                 size=(int(image_width * scale_factor),
                                       int(image_height * scale_factor)),
                                 color=0)
        for pwkt in lanno:
            if pwkt.startswith("POLYGON"):
                label = "POLYGON"
            elif pwkt.startswith("MULTIPOLYGON"):
                label = "MULTIPOLYGON"

            coordinatesStringList = pwkt.replace(label, '')

            if label == "POLYGON":
                coordinates_string_lists = [coordinatesStringList]
            elif label == "MULTIPOLYGON":
                coordinates_string_lists = coordinatesStringList.split(
                    ')), ((')

                coordinates_string_lists = [
                    coordinatesStringList.replace('(', '').replace(')', '')
                    for coordinatesStringList in coordinates_string_lists
                ]

            for coordinatesStringList in coordinates_string_lists:
                #  create lists of x and y coordinates
                x_coords = []
                y_coords = []
                for point in coordinatesStringList.split(','):
                    point = point.strip(
                        string.whitespace)  # remove leading and ending spaces
                    point = point.strip(
                        string.punctuation
                    )  # Have seen some strings have a ')' at the end so remove it
                    x_coords.append(round(float(point.split(' ')[0])))
                    y_coords.append(round(float(point.split(' ')[1])))

                x_coords_correct_lod = [
                    int(x * scale_factor) for x in x_coords
                ]
                y_coords_correct_lod = [
                    image_height * scale_factor - int(x * scale_factor)
                    for x in y_coords
                ]
                coords = [
                    (i, j)
                    for i, j in zip(x_coords_correct_lod, y_coords_correct_lod)
                ]

                #  draw the polygone in an image and fill it
                ImageDraw.Draw(result_image).polygon(coords, outline=1, fill=1)

        result_image.save(params.dest + '/' + str(t) + '.png')
Пример #11
0
        annotation_group = AnnotationGroup(
            id_project=params.id_project,
            id_image_group=id_image_group).save()
        print(annotation_group)

        # 2) I add the 2 annotations into the group to create links
        al1 = AnnotationLink(id_annotation=annotation_point1.id,
                             id_annotation_group=annotation_group.id).save()
        print(al1)
        al2 = AnnotationLink(id_annotation=annotation_point2.id,
                             id_annotation_group=annotation_group.id).save()
        print(al2)

        # List all annotations in that annotation group:
        annots = AnnotationCollection()
        annots.project = 682669
        annots.showLink = True
        annots.group = annotation_group.id
        annots.fetch()
        print(annots)
        for annot in annots:
            n_links = len(annot.annotationLink)
            # n_links will be 2 as it contains links al1->annotation_group and al2->annotation_group
            linked_annot_ids = [
                al['annotation'] for al in annot.annotationLink
            ]

            print("Annotation {} in image {} has {} links (annotations: {})".
                  format(annot.id, annot.image, n_links, linked_annot_ids))

        # ---------------
        "Print the annotation geometry using OpenCV coordinate system for points."
    )

    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host,
                  public_key=params.public_key,
                  private_key=params.private_key) as cytomine:

        if params.opencv:
            image_instances = ImageInstanceCollection().fetch_with_filter(
                "project", params.id_project)

        # We want all annotations in a given project.
        annotations = AnnotationCollection()
        annotations.project = params.id_project  # Add a filter: only annotations from this project
        # You could add other filters:
        # annotations.image = id_image => Add a filter: only annotations from this image
        # annotations.images = [id1, id2] => Add a filter: only annotations from these images
        # annotations.user = id_user => Add a filter: only annotations from this user
        # ...
        annotations.showWKT = True  # Ask to return WKT location (geometry) in the response
        annotations.showMeta = True  # Ask to return meta information (id, ...) in the response
        annotations.showGIS = True  # Ask to return GIS information (perimeter, area, ...) in the response
        # ...
        # => Fetch annotations from the server with the given filters.
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            print(
Пример #13
0
def main(argv):
    with CytomineJob.from_cli(argv) as conn:
        # with Cytomine(argv) as conn:
        print(conn.parameters)

        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        statusComment="Initialization...")
        base_path = "{}".format(os.getenv("HOME"))  # Mandatory for Singularity
        working_path = os.path.join(base_path, str(conn.job.id))

        # with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key,
        #           verbose=logging.INFO) as cytomine:

        # ontology = Ontology("classPNcells"+str(conn.parameters.cytomine_id_project)).save()
        # ontology_collection=OntologyCollection().fetch()
        # print(ontology_collection)
        # ontology = Ontology("CLASSPNCELLS").save()
        # terms = TermCollection().fetch_with_filter("ontology", ontology.id)
        terms = TermCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        conn.job.update(status=Job.RUNNING,
                        progress=1,
                        statusComment="Terms collected...")
        print(terms)

        # term_P = Term("PositiveCell", ontology.id, "#FF0000").save()
        # term_N = Term("NegativeCell", ontology.id, "#00FF00").save()
        # term_P = Term("PositiveCell", ontology, "#FF0000").save()
        # term_N = Term("NegativeCell", ontology, "#00FF00").save()

        # Get all the terms of our ontology
        # terms = TermCollection().fetch_with_filter("ontology", ontology.id)
        # terms = TermCollection().fetch_with_filter("ontology", ontology)
        # print(terms)

        # #Loading pre-trained Stardist model
        # np.random.seed(17)
        # lbl_cmap = random_label_cmap()
        # #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46
        # #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI
        # model = StarDist2D(None, name='2D_versatile_HE', basedir='/models/')   #use local model file in ~/models/2D_versatile_HE/

        #Select images to process
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        conn.job.update(status=Job.RUNNING,
                        progress=2,
                        statusComment="Images gathered...")

        list_imgs = []
        if conn.parameters.cytomine_id_images == 'all':
            for image in images:
                list_imgs.append(int(image.id))
        else:
            list_imgs = [
                int(id_img)
                for id_img in conn.parameters.cytomine_id_images.split(',')
            ]
            print(list_imgs)

        #Go over images
        conn.job.update(status=Job.RUNNING,
                        progress=10,
                        statusComment="Running PN classification on image...")
        #for id_image in conn.monitor(list_imgs, prefix="Running PN classification on image", period=0.1):
        for id_image in list_imgs:

            roi_annotations = AnnotationCollection()
            roi_annotations.project = conn.parameters.cytomine_id_project
            roi_annotations.term = conn.parameters.cytomine_id_cell_term
            roi_annotations.image = id_image  #conn.parameters.cytomine_id_image
            roi_annotations.job = conn.parameters.cytomine_id_annotation_job
            roi_annotations.user = conn.parameters.cytomine_id_user_job
            roi_annotations.showWKT = True
            roi_annotations.fetch()
            print(roi_annotations)

            #Go over ROI in this image
            #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1):
            for roi in roi_annotations:
                #Get Cytomine ROI coordinates for remapping to whole-slide
                #Cytomine cartesian coordinate system, (0,0) is bottom left corner
                print(
                    "----------------------------Cells------------------------------"
                )
                roi_geometry = wkt.loads(roi.location)
                # print("ROI Geometry from Shapely: {}".format(roi_geometry))
                #                 print("ROI Bounds")
                #                 print(roi_geometry.bounds)
                minx = roi_geometry.bounds[0]
                miny = roi_geometry.bounds[3]
                #Dump ROI image into local PNG file
                # roi_path=os.path.join(working_path,str(roi_annotations.project)+'/'+str(roi_annotations.image)+'/'+str(roi.id))
                roi_path = os.path.join(
                    working_path,
                    str(roi_annotations.project) + '/' +
                    str(roi_annotations.image) + '/')
                #                 print(roi_path)
                roi_png_filename = os.path.join(roi_path + str(roi.id) +
                                                '.png')
                conn.job.update(status=Job.RUNNING,
                                progress=20,
                                statusComment=roi_png_filename)
                #                 print("roi_png_filename: %s" %roi_png_filename)
                roi.dump(dest_pattern=roi_png_filename, alpha=True)
                #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True)

                # im=Image.open(roi_png_filename)

                J = cv2.imread(roi_png_filename, cv2.IMREAD_UNCHANGED)
                J = cv2.cvtColor(J, cv2.COLOR_BGRA2RGBA)
                [r, c, h] = J.shape
                # print("J: ",J)

                if r < c:
                    blocksize = r
                else:
                    blocksize = c
                # print("blocksize:",blocksize)
                rr = np.zeros((blocksize, blocksize))
                cc = np.zeros((blocksize, blocksize))

                zz = [*range(1, blocksize + 1)]
                # print("zz:", zz)
                for i in zz:
                    rr[i - 1, :] = zz
                # print("rr shape:",rr.shape)

                zz = [*range(1, blocksize + 1)]
                for i in zz:
                    cc[:, i - 1] = zz
                # print("cc shape:",cc.shape)

                cc1 = np.asarray(cc) - 16.5
                rr1 = np.asarray(rr) - 16.5
                cc2 = np.asarray(cc1)**2
                rr2 = np.asarray(rr1)**2
                rrcc = np.asarray(cc2) + np.asarray(rr2)

                weight = np.sqrt(rrcc)
                # print("weight: ",weight)
                weight2 = 1. / weight
                # print("weight2: ",weight2)
                #                 print("weight2 shape:",weight2.shape)
                coord = [c / 2, r / 2]
                halfblocksize = blocksize / 2

                y = round(coord[1])
                x = round(coord[0])

                # Convert the RGB image to HSV
                Jalpha = J[:, :, 3]
                Jalphaloc = Jalpha / 255
                Jrgb = cv2.cvtColor(J, cv2.COLOR_RGBA2RGB)
                Jhsv = cv2.cvtColor(Jrgb, cv2.COLOR_RGB2HSV_FULL)
                Jhsv = Jhsv / 255
                Jhsv[:, :, 0] = Jhsv[:, :, 0] * Jalphaloc
                Jhsv[:, :, 1] = Jhsv[:, :, 1] * Jalphaloc
                Jhsv[:, :, 2] = Jhsv[:, :, 2] * Jalphaloc
                # print("Jhsv: ",Jhsv)

                # print("Jhsv size:",Jhsv.shape)
                # print("Jhsv class:",Jhsv.dtype)

                currentblock = Jhsv[0:blocksize, 0:blocksize, :]
                # print("currentblock: ",currentblock)
                #                 print(currentblock.dtype)
                currentblockH = currentblock[:, :, 0]
                currentblockV = 1 - currentblock[:, :, 2]
                hue = sum(sum(currentblockH * weight2))
                val = sum(sum(currentblockV * weight2))
                #                 print("hue:", hue)
                #                 print("val:", val)

                if hue < 2:
                    cellclass = 1
                elif val < 15:
                    cellclass = 2
                else:
                    if hue < 30 or val > 40:
                        cellclass = 1
                    else:
                        cellclass = 2

                # tags = TagCollection().fetch()
                # tags = TagCollection()
                # print(tags)

                if cellclass == 1:
                    #                     print("Positive (H: ", str(hue), ", V: ", str(val), ")")
                    id_terms = conn.parameters.cytomine_id_positive_term
                    # tag = Tag("Positive (H: ", str(hue), ", V: ", str(val), ")").save()
                    # print(tag)
                    # id_terms=Term("PositiveCell", ontology.id, "#FF0000").save()
                elif cellclass == 2:
                    #                     print("Negative (H: ", str(hue), ", V: ", str(val), ")")
                    id_terms = conn.parameters.cytomine_id_negative_term
                    # for t in tags:
                    # tag = Tag("Negative (H: ", str(hue), ", V: ", str(val), ")").save()
                    # print(tag)
                    # id_terms=Term("NegativeCell", ontology.id, "#00FF00").save()

                    # First we create the required resources

                cytomine_annotations = AnnotationCollection()
                # property_collection = PropertyCollection(uri()).fetch("annotation",id_image)
                # property_collection = PropertyCollection().uri()
                # print(property_collection)
                # print(cytomine_annotations)

                # property_collection.append(Property(Annotation().fetch(id_image), key="Hue", value=str(hue)))
                # property_collection.append(Property(Annotation().fetch(id_image), key="Val", value=str(val)))
                # property_collection.save()

                # prop1 = Property(Annotation().fetch(id_image), key="Hue", value=str(hue)).save()
                # prop2 = Property(Annotation().fetch(id_image), key="Val", value=str(val)).save()

                # prop1.Property(Annotation().fetch(id_image), key="Hue", value=str(hue)).save()
                # prop2.Property(Annotation().fetch(id_image), key="Val", value=str(val)).save()

                # for pos, polygroup in enumerate(roi_geometry,start=1):
                #     points=list()
                #     for i in range(len(polygroup[0])):
                #         p=Point(minx+polygroup[1][i],miny-polygroup[0][i])
                #         points.append(p)

                annotation = roi_geometry

                # tags.append(TagDomainAssociation(Annotation().fetch(id_image, tag.id))).save()

                # association = append(TagDomainAssociation(Annotation().fetch(id_image, tag.id))).save()
                # print(association)

                cytomine_annotations.append(
                    Annotation(
                        location=annotation.wkt,  #location=roi_geometry,
                        id_image=id_image,  #conn.parameters.cytomine_id_image,
                        id_project=conn.parameters.cytomine_id_project,
                        id_terms=[id_terms]))
                print(".", end='', flush=True)

                #Send Annotation Collection (for this ROI) to Cytomine server in one http request
                ca = cytomine_annotations.save()

        conn.job.update(status=Job.TERMINATED,
                        progress=100,
                        statusComment="Finished.")
Пример #14
0
def main(argv):
    with CytomineJob.from_cli(argv) as conn:
        conn.job.update(progress=0, statusComment="Initialization..")
        base_path = "{}".format(os.getenv("HOME"))  # Mandatory for Singularity
        working_path = os.path.join(base_path, str(conn.job.id))

        # Load pretrained model (assume the best of all)
        conn.job.update(progress=0,
                        statusComment="Loading segmentation model..")

        with open("/models/resnet50b_fpn256/config.json") as f:
            config = json.load(f)
        model = FPN.build_resnet_fpn(
            name=config['name'],
            input_size=conn.parameters.dataset_patch_size,  # must be / by 16
            input_channels=1 if config['input']['mode'] == 'grayscale' else 3,
            output_channels=config['fpn']['out_channels'],
            num_classes=2,  # legacy
            in_features=config['fpn']['in_features'],
            out_features=config['fpn']['out_features'])
        model.to(_DEVICE)
        model_dict = torch.load(config['weights'],
                                map_location=torch.device(_DEVICE))
        model.load_state_dict(model_dict['model'])

        # Select images to process
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)

        if conn.parameters.cytomine_id_images != 'all':
            images = [
                _ for _ in images
                if _.id in map(lambda x: int(x.strip()),
                               conn.parameters.cytomine_id_images.split(','))
            ]
        images_id = [image.id for image in images]

        # Download selected images into "working_directory"
        img_path = os.path.join(working_path, "images")
        os.makedirs(img_path)
        for image in conn.monitor(
                images,
                start=2,
                end=50,
                period=0.1,
                prefix="Downloading images into working directory.."):
            fname, fext = os.path.splitext(image.filename)
            if image.download(dest_pattern=os.path.join(
                    img_path, "{}{}".format(image.id, fext))) is not True:

                print("Failed to download image {}".format(image.filename))

        # create a file that lists all images (used by PatchBasedDataset
        conn.job.update(progress=50,
                        statusComment="Preparing data for execution..")
        images = os.listdir(img_path)
        images = list(map(lambda x: x + '\n', images))
        with open(os.path.join(working_path, 'images.txt'), 'w') as f:
            f.writelines(images)

        # Prepare dataset and dataloader objects
        ImgTypeBits = {'.dcm': 16}
        channel_bits = ImgTypeBits.get(fext.lower(), 8)
        mean, std = compute_mean_and_std(img_path, bits=channel_bits)

        dataset = InferencePatchBasedDataset(
            path=working_path,
            subset='images',
            patch_size=conn.parameters.dataset_patch_size,
            mode=config['input']['mode'],
            bits=channel_bits,
            mean=mean,
            std=std)

        dataloader = DataLoader(
            dataset=dataset,
            batch_size=conn.parameters.model_batch_size,
            drop_last=False,
            shuffle=False,
            num_workers=0,
            collate_fn=InferencePatchBasedDataset.collate_fn)

        # Go over images
        conn.job.update(status=Job.RUNNING,
                        progress=55,
                        statusComment="Running inference on images..")
        results = inference_on_segmentation(
            model, dataloader, conn.parameters.postprocess_p_threshold)

        for id_image in conn.monitor(
                images_id,
                start=90,
                end=95,
                prefix="Deleting old annotations on images..",
                period=0.1):
            # Delete old annotations
            del_annotations = AnnotationCollection()
            del_annotations.image = id_image
            del_annotations.user = conn.job.id
            del_annotations.project = conn.parameters.cytomine_id_project
            del_annotations.term = conn.parameters.cytomine_id_predict_term,
            del_annotations.fetch()
            for annotation in del_annotations:
                annotation.delete()

        conn.job.update(
            status=Job.RUNNING,
            progress=95,
            statusComment="Uploading new annotations to Cytomine server..")
        annotations = AnnotationCollection()
        for instance in results:
            idx, _ = os.path.splitext(instance['filename'])
            width, height = instance['size']

            for box in instance['bbox']:
                points = [
                    Point(box[0], height - 1 - box[1]),
                    Point(box[0], height - 1 - box[3]),
                    Point(box[2], height - 1 - box[3]),
                    Point(box[2], height - 1 - box[1])
                ]
                annotation = Polygon(points)

                annotations.append(
                    Annotation(
                        location=annotation.wkt,
                        id_image=int(idx),
                        id_terms=[conn.parameters.cytomine_id_predict_term],
                        id_project=conn.parameters.cytomine_id_project))
        annotations.save()

        conn.job.update(status=Job.TERMINATED,
                        status_comment="Finish",
                        progress=100)