def main(argv): with CytomineJob.from_cli(argv) as conn: conn.job.update(status=Job.RUNNING, progress=0, statusComment="Initialization...") # base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity base_path = "/home/mmu/Desktop" working_path = os.path.join(base_path, str(conn.job.id)) #Loading pre-trained Stardist model np.random.seed(17) lbl_cmap = random_label_cmap() #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46 #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI model = StarDist2D( None, name='2D_versatile_HE', basedir='/models/' ) #use local model file in ~/models/2D_versatile_HE/ #Select images to process images = ImageInstanceCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) list_imgs = [] if conn.parameters.cytomine_id_images == 'all': for image in images: list_imgs.append(int(image.id)) else: list_imgs = [ int(id_img) for id_img in conn.parameters.cytomine_id_images.split(',') ] #Go over images for id_image in conn.monitor(list_imgs, prefix="Running detection on image", period=0.1): #Dump ROI annotations in img from Cytomine server to local images #conn.job.update(status=Job.RUNNING, progress=0, statusComment="Fetching ROI annotations...") roi_annotations = AnnotationCollection() roi_annotations.project = conn.parameters.cytomine_id_project roi_annotations.term = conn.parameters.cytomine_id_roi_term roi_annotations.image = id_image #conn.parameters.cytomine_id_image roi_annotations.showWKT = True roi_annotations.fetch() print(roi_annotations) #Go over ROI in this image #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1): for roi in roi_annotations: #Get Cytomine ROI coordinates for remapping to whole-slide #Cytomine cartesian coordinate system, (0,0) is bottom left corner print( "----------------------------ROI------------------------------" ) roi_geometry = wkt.loads(roi.location) print("ROI Geometry from Shapely: {}".format(roi_geometry)) print("ROI Bounds") print(roi_geometry.bounds) minx = roi_geometry.bounds[0] miny = roi_geometry.bounds[3] #Dump ROI image into local PNG file roi_path = os.path.join( working_path, str(roi_annotations.project) + '/' + str(roi_annotations.image) + '/' + str(roi.id)) roi_png_filename = os.path.join(roi_path + '/' + str(roi.id) + '.png') print("roi_png_filename: %s" % roi_png_filename) roi.dump(dest_pattern=roi_png_filename, mask=True, alpha=True) #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True) #Stardist works with TIFF images without alpha channel, flattening PNG alpha mask to TIFF RGB im = Image.open(roi_png_filename) bg = Image.new("RGB", im.size, (255, 255, 255)) bg.paste(im, mask=im.split()[3]) roi_tif_filename = os.path.join(roi_path + '/' + str(roi.id) + '.tif') bg.save(roi_tif_filename, quality=100) X_files = sorted(glob(roi_path + '/' + str(roi.id) + '*.tif')) X = list(map(imread, X_files)) n_channel = 3 if X[0].ndim == 3 else X[0].shape[-1] axis_norm = ( 0, 1 ) # normalize channels independently (0,1,2) normalize channels jointly if n_channel > 1: print("Normalizing image channels %s." % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently')) #Going over ROI images in ROI directory (in our case: one ROI per directory) for x in range(0, len(X)): print("------------------- Processing ROI file %d: %s" % (x, roi_tif_filename)) img = normalize(X[x], conn.parameters.stardist_norm_perc_low, conn.parameters.stardist_norm_perc_high, axis=axis_norm) #Stardist model prediction with thresholds labels, details = model.predict_instances( img, prob_thresh=conn.parameters.stardist_prob_t, nms_thresh=conn.parameters.stardist_nms_t) print("Number of detected polygons: %d" % len(details['coord'])) cytomine_annotations = AnnotationCollection() #Go over detections in this ROI, convert and upload to Cytomine for pos, polygroup in enumerate(details['coord'], start=1): #Converting to Shapely annotation points = list() for i in range(len(polygroup[0])): #Cytomine cartesian coordinate system, (0,0) is bottom left corner #Mapping Stardist polygon detection coordinates to Cytomine ROI in whole slide image p = Point(minx + polygroup[1][i], miny - polygroup[0][i]) points.append(p) annotation = Polygon(points) #Append to Annotation collection cytomine_annotations.append( Annotation( location=annotation.wkt, id_image= id_image, #conn.parameters.cytomine_id_image, id_project=conn.parameters.cytomine_id_project, id_terms=[ conn.parameters.cytomine_id_cell_term ])) print(".", end='', flush=True) #Send Annotation Collection (for this ROI) to Cytomine server in one http request ca = cytomine_annotations.save() conn.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")
help="The Cytomine private key") parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance', help="The image in which we work") parser.add_argument('--cytomine_id_roi_term', dest='id_roi_term', help="The term that represents regions of interest") parser.add_argument('--cytomine_id_object_term', dest='id_object_term', help="The term that represents objects") params, other = parser.parse_known_args(sys.argv[1:]) with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key) as cytomine: roi_annotations = AnnotationCollection() roi_annotations.image = params.id_image_instance roi_annotations.term = params.id_roi_term roi_annotations.fetch() print(roi_annotations) for roi_annotation in roi_annotations: included_annotations = AnnotationCollection() included_annotations.image = params.id_image_instance included_annotations.term = params.id_object_term included_annotations.annotation = roi_annotation.id included_annotations.fetch() print("Number of annotations of term {} included in ROI {}: {}". format(params.id_object_term, roi_annotation.id, len(included_annotations)))
def main(argv): with CytomineJob.from_cli(argv) as conn: conn.job.update(progress=0, statusComment="Initialization..") base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity working_path = os.path.join(base_path, str(conn.job.id)) # Load pretrained model (assume the best of all) conn.job.update(progress=0, statusComment="Loading segmentation model..") with open("/models/resnet50b_fpn256/config.json") as f: config = json.load(f) model = FPN.build_resnet_fpn( name=config['name'], input_size=conn.parameters.dataset_patch_size, # must be / by 16 input_channels=1 if config['input']['mode'] == 'grayscale' else 3, output_channels=config['fpn']['out_channels'], num_classes=2, # legacy in_features=config['fpn']['in_features'], out_features=config['fpn']['out_features']) model.to(_DEVICE) model_dict = torch.load(config['weights'], map_location=torch.device(_DEVICE)) model.load_state_dict(model_dict['model']) # Select images to process images = ImageInstanceCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) if conn.parameters.cytomine_id_images != 'all': images = [ _ for _ in images if _.id in map(lambda x: int(x.strip()), conn.parameters.cytomine_id_images.split(',')) ] images_id = [image.id for image in images] # Download selected images into "working_directory" img_path = os.path.join(working_path, "images") os.makedirs(img_path) for image in conn.monitor( images, start=2, end=50, period=0.1, prefix="Downloading images into working directory.."): fname, fext = os.path.splitext(image.filename) if image.download(dest_pattern=os.path.join( img_path, "{}{}".format(image.id, fext))) is not True: print("Failed to download image {}".format(image.filename)) # create a file that lists all images (used by PatchBasedDataset conn.job.update(progress=50, statusComment="Preparing data for execution..") images = os.listdir(img_path) images = list(map(lambda x: x + '\n', images)) with open(os.path.join(working_path, 'images.txt'), 'w') as f: f.writelines(images) # Prepare dataset and dataloader objects ImgTypeBits = {'.dcm': 16} channel_bits = ImgTypeBits.get(fext.lower(), 8) mean, std = compute_mean_and_std(img_path, bits=channel_bits) dataset = InferencePatchBasedDataset( path=working_path, subset='images', patch_size=conn.parameters.dataset_patch_size, mode=config['input']['mode'], bits=channel_bits, mean=mean, std=std) dataloader = DataLoader( dataset=dataset, batch_size=conn.parameters.model_batch_size, drop_last=False, shuffle=False, num_workers=0, collate_fn=InferencePatchBasedDataset.collate_fn) # Go over images conn.job.update(status=Job.RUNNING, progress=55, statusComment="Running inference on images..") results = inference_on_segmentation( model, dataloader, conn.parameters.postprocess_p_threshold) for id_image in conn.monitor( images_id, start=90, end=95, prefix="Deleting old annotations on images..", period=0.1): # Delete old annotations del_annotations = AnnotationCollection() del_annotations.image = id_image del_annotations.user = conn.job.id del_annotations.project = conn.parameters.cytomine_id_project del_annotations.term = conn.parameters.cytomine_id_predict_term, del_annotations.fetch() for annotation in del_annotations: annotation.delete() conn.job.update( status=Job.RUNNING, progress=95, statusComment="Uploading new annotations to Cytomine server..") annotations = AnnotationCollection() for instance in results: idx, _ = os.path.splitext(instance['filename']) width, height = instance['size'] for box in instance['bbox']: points = [ Point(box[0], height - 1 - box[1]), Point(box[0], height - 1 - box[3]), Point(box[2], height - 1 - box[3]), Point(box[2], height - 1 - box[1]) ] annotation = Polygon(points) annotations.append( Annotation( location=annotation.wkt, id_image=int(idx), id_terms=[conn.parameters.cytomine_id_predict_term], id_project=conn.parameters.cytomine_id_project)) annotations.save() conn.job.update(status=Job.TERMINATED, status_comment="Finish", progress=100)
def main(argv): with CytomineJob.from_cli(argv) as conn: # with Cytomine(argv) as conn: print(conn.parameters) conn.job.update(status=Job.RUNNING, progress=0, statusComment="Initialization...") base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity working_path = os.path.join(base_path, str(conn.job.id)) # with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key, # verbose=logging.INFO) as cytomine: # ontology = Ontology("classPNcells"+str(conn.parameters.cytomine_id_project)).save() # ontology_collection=OntologyCollection().fetch() # print(ontology_collection) # ontology = Ontology("CLASSPNCELLS").save() # terms = TermCollection().fetch_with_filter("ontology", ontology.id) terms = TermCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) conn.job.update(status=Job.RUNNING, progress=1, statusComment="Terms collected...") print(terms) # term_P = Term("PositiveCell", ontology.id, "#FF0000").save() # term_N = Term("NegativeCell", ontology.id, "#00FF00").save() # term_P = Term("PositiveCell", ontology, "#FF0000").save() # term_N = Term("NegativeCell", ontology, "#00FF00").save() # Get all the terms of our ontology # terms = TermCollection().fetch_with_filter("ontology", ontology.id) # terms = TermCollection().fetch_with_filter("ontology", ontology) # print(terms) # #Loading pre-trained Stardist model # np.random.seed(17) # lbl_cmap = random_label_cmap() # #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46 # #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI # model = StarDist2D(None, name='2D_versatile_HE', basedir='/models/') #use local model file in ~/models/2D_versatile_HE/ #Select images to process images = ImageInstanceCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) conn.job.update(status=Job.RUNNING, progress=2, statusComment="Images gathered...") list_imgs = [] if conn.parameters.cytomine_id_images == 'all': for image in images: list_imgs.append(int(image.id)) else: list_imgs = [ int(id_img) for id_img in conn.parameters.cytomine_id_images.split(',') ] print(list_imgs) #Go over images conn.job.update(status=Job.RUNNING, progress=10, statusComment="Running PN classification on image...") #for id_image in conn.monitor(list_imgs, prefix="Running PN classification on image", period=0.1): for id_image in list_imgs: roi_annotations = AnnotationCollection() roi_annotations.project = conn.parameters.cytomine_id_project roi_annotations.term = conn.parameters.cytomine_id_cell_term roi_annotations.image = id_image #conn.parameters.cytomine_id_image roi_annotations.job = conn.parameters.cytomine_id_annotation_job roi_annotations.user = conn.parameters.cytomine_id_user_job roi_annotations.showWKT = True roi_annotations.fetch() print(roi_annotations) #Go over ROI in this image #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1): for roi in roi_annotations: #Get Cytomine ROI coordinates for remapping to whole-slide #Cytomine cartesian coordinate system, (0,0) is bottom left corner print( "----------------------------Cells------------------------------" ) roi_geometry = wkt.loads(roi.location) # print("ROI Geometry from Shapely: {}".format(roi_geometry)) # print("ROI Bounds") # print(roi_geometry.bounds) minx = roi_geometry.bounds[0] miny = roi_geometry.bounds[3] #Dump ROI image into local PNG file # roi_path=os.path.join(working_path,str(roi_annotations.project)+'/'+str(roi_annotations.image)+'/'+str(roi.id)) roi_path = os.path.join( working_path, str(roi_annotations.project) + '/' + str(roi_annotations.image) + '/') # print(roi_path) roi_png_filename = os.path.join(roi_path + str(roi.id) + '.png') conn.job.update(status=Job.RUNNING, progress=20, statusComment=roi_png_filename) # print("roi_png_filename: %s" %roi_png_filename) roi.dump(dest_pattern=roi_png_filename, alpha=True) #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True) # im=Image.open(roi_png_filename) J = cv2.imread(roi_png_filename, cv2.IMREAD_UNCHANGED) J = cv2.cvtColor(J, cv2.COLOR_BGRA2RGBA) [r, c, h] = J.shape # print("J: ",J) if r < c: blocksize = r else: blocksize = c # print("blocksize:",blocksize) rr = np.zeros((blocksize, blocksize)) cc = np.zeros((blocksize, blocksize)) zz = [*range(1, blocksize + 1)] # print("zz:", zz) for i in zz: rr[i - 1, :] = zz # print("rr shape:",rr.shape) zz = [*range(1, blocksize + 1)] for i in zz: cc[:, i - 1] = zz # print("cc shape:",cc.shape) cc1 = np.asarray(cc) - 16.5 rr1 = np.asarray(rr) - 16.5 cc2 = np.asarray(cc1)**2 rr2 = np.asarray(rr1)**2 rrcc = np.asarray(cc2) + np.asarray(rr2) weight = np.sqrt(rrcc) # print("weight: ",weight) weight2 = 1. / weight # print("weight2: ",weight2) # print("weight2 shape:",weight2.shape) coord = [c / 2, r / 2] halfblocksize = blocksize / 2 y = round(coord[1]) x = round(coord[0]) # Convert the RGB image to HSV Jalpha = J[:, :, 3] Jalphaloc = Jalpha / 255 Jrgb = cv2.cvtColor(J, cv2.COLOR_RGBA2RGB) Jhsv = cv2.cvtColor(Jrgb, cv2.COLOR_RGB2HSV_FULL) Jhsv = Jhsv / 255 Jhsv[:, :, 0] = Jhsv[:, :, 0] * Jalphaloc Jhsv[:, :, 1] = Jhsv[:, :, 1] * Jalphaloc Jhsv[:, :, 2] = Jhsv[:, :, 2] * Jalphaloc # print("Jhsv: ",Jhsv) # print("Jhsv size:",Jhsv.shape) # print("Jhsv class:",Jhsv.dtype) currentblock = Jhsv[0:blocksize, 0:blocksize, :] # print("currentblock: ",currentblock) # print(currentblock.dtype) currentblockH = currentblock[:, :, 0] currentblockV = 1 - currentblock[:, :, 2] hue = sum(sum(currentblockH * weight2)) val = sum(sum(currentblockV * weight2)) # print("hue:", hue) # print("val:", val) if hue < 2: cellclass = 1 elif val < 15: cellclass = 2 else: if hue < 30 or val > 40: cellclass = 1 else: cellclass = 2 # tags = TagCollection().fetch() # tags = TagCollection() # print(tags) if cellclass == 1: # print("Positive (H: ", str(hue), ", V: ", str(val), ")") id_terms = conn.parameters.cytomine_id_positive_term # tag = Tag("Positive (H: ", str(hue), ", V: ", str(val), ")").save() # print(tag) # id_terms=Term("PositiveCell", ontology.id, "#FF0000").save() elif cellclass == 2: # print("Negative (H: ", str(hue), ", V: ", str(val), ")") id_terms = conn.parameters.cytomine_id_negative_term # for t in tags: # tag = Tag("Negative (H: ", str(hue), ", V: ", str(val), ")").save() # print(tag) # id_terms=Term("NegativeCell", ontology.id, "#00FF00").save() # First we create the required resources cytomine_annotations = AnnotationCollection() # property_collection = PropertyCollection(uri()).fetch("annotation",id_image) # property_collection = PropertyCollection().uri() # print(property_collection) # print(cytomine_annotations) # property_collection.append(Property(Annotation().fetch(id_image), key="Hue", value=str(hue))) # property_collection.append(Property(Annotation().fetch(id_image), key="Val", value=str(val))) # property_collection.save() # prop1 = Property(Annotation().fetch(id_image), key="Hue", value=str(hue)).save() # prop2 = Property(Annotation().fetch(id_image), key="Val", value=str(val)).save() # prop1.Property(Annotation().fetch(id_image), key="Hue", value=str(hue)).save() # prop2.Property(Annotation().fetch(id_image), key="Val", value=str(val)).save() # for pos, polygroup in enumerate(roi_geometry,start=1): # points=list() # for i in range(len(polygroup[0])): # p=Point(minx+polygroup[1][i],miny-polygroup[0][i]) # points.append(p) annotation = roi_geometry # tags.append(TagDomainAssociation(Annotation().fetch(id_image, tag.id))).save() # association = append(TagDomainAssociation(Annotation().fetch(id_image, tag.id))).save() # print(association) cytomine_annotations.append( Annotation( location=annotation.wkt, #location=roi_geometry, id_image=id_image, #conn.parameters.cytomine_id_image, id_project=conn.parameters.cytomine_id_project, id_terms=[id_terms])) print(".", end='', flush=True) #Send Annotation Collection (for this ROI) to Cytomine server in one http request ca = cytomine_annotations.save() conn.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")
parser.add_argument('--cytomine_host', dest='host', default='demo.cytomine.be', help="The Cytomine host") parser.add_argument('--cytomine_public_key', dest='public_key', help="The Cytomine public key") parser.add_argument('--cytomine_private_key', dest='private_key', help="The Cytomine private key") parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance', help="The image in which we work") parser.add_argument('--cytomine_id_roi_term', dest='id_roi_term', help="The term that represents regions of interest") parser.add_argument('--cytomine_id_object_term', dest='id_object_term', help="The term that represents objects") params, other = parser.parse_known_args(sys.argv[1:]) with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key, verbose=logging.INFO) as cytomine: roi_annotations = AnnotationCollection() roi_annotations.image = params.id_image_instance roi_annotations.term = params.id_roi_term roi_annotations.fetch() print(roi_annotations) for roi_annotation in roi_annotations: included_annotations = AnnotationCollection() included_annotations.image = params.id_image_instance included_annotations.term = params.id_object_term included_annotations.annotation = roi_annotation.id included_annotations.fetch() print("Number of annotations of term {} included in ROI {}: {}".format( params.id_object_term, roi_annotation.id, len(included_annotations)))