def _load_rectangles(job: Job, image_id: str, term: int, detections: dict) -> None: progress = 10 job.update( progress=progress, status=Job.RUNNING, statusComment= f"Uploading detections of type rectangles to image {image_id} with terms {term}" ) rectangles = _generate_rectangles(detections) # Upload annotations to server delta = 85 / len(rectangles) annotations = AnnotationCollection() for rectangle in rectangles: annotations.append( Annotation(location=rectangle.wkt, id_image=image_id, id_terms=[term])) progress += delta job.update(progress=int(progress), status=Job.RUNNING) annotations.save() progress = 100 job.update(progress=progress, status=Job.TERMINATED, statusComment="All detections have been uploaded")
def upload_annotation(self, predicted_data, project_id): self.cj.job.update(progress=95, statusComment="Uploading annotations") annotations = AnnotationCollection() components = ObjectFinder(predicted_data).find_components() locations = [] for component in components: location = Polygon(component[0], component[1]) if location.is_valid: locations.append(location) else: fixed = fix_geometry(location) if fixed.is_valid and not fixed.is_empty: locations.append(fixed) for idx, loc in enumerate(locations): if not loc.is_valid: fixed = fix_geometry(loc) if fixed.is_valid and not fixed.is_empty: loc[idx] = fixed annotations.extend([ create_annotation_from_location(loc, self.image_instance.id, self.image_instance.height, project_id) for loc in locations ]) annotations.save(chunk=20)
def main(argv): with CytomineJob.from_cli(argv) as job: model_path = os.path.join(str(Path.home()), "models", "thyroid-unet") model_filepath = pick_model(model_path, job.parameters.tile_size, job.parameters.cytomine_zoom_level) device = torch.device(job.parameters.device) unet = Unet(job.parameters.init_fmaps, n_classes=1) unet.load_state_dict(torch.load(model_filepath, map_location=device)) unet.to(device) unet.eval() segmenter = UNetSegmenter(device=job.parameters.device, unet=unet, classes=[0, 1], threshold=job.parameters.threshold) working_path = os.path.join(str(Path.home()), "tmp") tile_builder = CytomineTileBuilder(working_path) builder = SSLWorkflowBuilder() builder.set_n_jobs(1) builder.set_overlap(job.parameters.tile_overlap) builder.set_tile_size(job.parameters.tile_size, job.parameters.tile_size) builder.set_tile_builder(tile_builder) builder.set_border_tiles(Workflow.BORDER_TILES_EXTEND) builder.set_background_class(0) builder.set_distance_tolerance(1) builder.set_seg_batch_size(job.parameters.batch_size) builder.set_segmenter(segmenter) workflow = builder.get() slide = CytomineSlide(img_instance=ImageInstance().fetch( job.parameters.cytomine_id_image), zoom_level=job.parameters.cytomine_zoom_level) results = workflow.process(slide) print("-------------------------") print(len(results)) print("-------------------------") collection = AnnotationCollection() for obj in results: wkt = shift_poly(obj.polygon, slide, zoom_level=job.parameters.cytomine_zoom_level).wkt collection.append( Annotation(location=wkt, id_image=job.parameters.cytomine_id_image, id_terms=[154005477], id_project=job.project.id)) collection.save(n_workers=job.parameters.n_jobs) return {}
def run(cyto_job, parameters): job = cyto_job.job project_id = cyto_job.project term_id = parameters.terms_list logging.info(f"########### Parameters = {str(parameters)}") logging.info(f"########### Term {str(term_id)}") logging.info(f"########### Project {str(project_id)}") annotations = AnnotationCollection() annotations.project = project_id annotations.terms = [term_id] annotations.fetch() progress = 0 progress_delta = 1.0 / (1.50 * len(annotations)) job.update( progress=progress, statusComment=f"Converting annotations from project {project_id}") new_annotations = AnnotationCollection() for a in annotations: if a.location is None: a.fetch() new_annotations.append( Annotation(a.location, a.image, a.term, a.project)) new_annotations.save(chunk=None) job.update(progress=0.25, statusComment=f"Deleting old annotations...") for a in annotations: a.delete() progress += progress_delta job.update(progress=progress)
def main(argv): print(argv) with CytomineJob.from_cli(argv) as cj: images = ImageInstanceCollection().fetch_with_filter("project", cj.parameters.cytomine_id_project) for image in cj.monitor(images, prefix="Running detection on image", period=0.1): # Resize image if needed resize_ratio = max(image.width, image.height) / cj.parameters.max_image_size if resize_ratio < 1: resize_ratio = 1 resized_width = int(image.width / resize_ratio) resized_height = int(image.height / resize_ratio) image.dump(dest_pattern="/tmp/{id}.jpg", max_size=max(resized_width, resized_height), bits=image.bitDepth) img = cv2.imread(image.filename, cv2.IMREAD_GRAYSCALE) thresholded_img = cv2.adaptiveThreshold(img, 2**image.bitDepth, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, cj.parameters.threshold_blocksize, cj.parameters.threshold_constant) kernel = np.ones((5, 5), np.uint8) eroded_img = cv2.erode(thresholded_img, kernel, iterations=cj.parameters.erode_iterations) dilated_img = cv2.dilate(eroded_img, kernel, iterations=cj.parameters.dilate_iterations) extension = 10 extended_img = cv2.copyMakeBorder(dilated_img, extension, extension, extension, extension, cv2.BORDER_CONSTANT, value=2**image.bitDepth) components = find_components(extended_img) zoom_factor = image.width / float(resized_width) for i, component in enumerate(components): converted = [] for point in component[0]: x = int((point[0] - extension) * zoom_factor) y = int(image.height - ((point[1] - extension) * zoom_factor)) converted.append((x, y)) components[i] = Polygon(converted) # Find largest component (whole image) largest = max(components, key=attrgetter('area')) components.remove(largest) # Only keep components greater than 5% of whole image min_area = int(0.05 * image.width * image.height) annotations = AnnotationCollection() for component in components: if component.area > min_area: annotations.append(Annotation(location=component.wkt, id_image=image.id, id_terms=[cj.parameters.cytomine_id_predicted_term], id_project=cj.parameters.cytomine_id_project)) if len(annotations) % 100 == 0: annotations.save() annotations = AnnotationCollection() annotations.save() cj.job.update(statusComment="Finished.")
def main(argv): with CytomineJob.from_cli(argv) as conn: conn.job.update(status=Job.RUNNING, progress=0, statusComment="Initialization...") # base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity base_path = "/home/mmu/Desktop" working_path = os.path.join(base_path, str(conn.job.id)) #Loading pre-trained Stardist model np.random.seed(17) lbl_cmap = random_label_cmap() #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46 #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI model = StarDist2D( None, name='2D_versatile_HE', basedir='/models/' ) #use local model file in ~/models/2D_versatile_HE/ #Select images to process images = ImageInstanceCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) list_imgs = [] if conn.parameters.cytomine_id_images == 'all': for image in images: list_imgs.append(int(image.id)) else: list_imgs = [ int(id_img) for id_img in conn.parameters.cytomine_id_images.split(',') ] #Go over images for id_image in conn.monitor(list_imgs, prefix="Running detection on image", period=0.1): #Dump ROI annotations in img from Cytomine server to local images #conn.job.update(status=Job.RUNNING, progress=0, statusComment="Fetching ROI annotations...") roi_annotations = AnnotationCollection() roi_annotations.project = conn.parameters.cytomine_id_project roi_annotations.term = conn.parameters.cytomine_id_roi_term roi_annotations.image = id_image #conn.parameters.cytomine_id_image roi_annotations.showWKT = True roi_annotations.fetch() print(roi_annotations) #Go over ROI in this image #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1): for roi in roi_annotations: #Get Cytomine ROI coordinates for remapping to whole-slide #Cytomine cartesian coordinate system, (0,0) is bottom left corner print( "----------------------------ROI------------------------------" ) roi_geometry = wkt.loads(roi.location) print("ROI Geometry from Shapely: {}".format(roi_geometry)) print("ROI Bounds") print(roi_geometry.bounds) minx = roi_geometry.bounds[0] miny = roi_geometry.bounds[3] #Dump ROI image into local PNG file roi_path = os.path.join( working_path, str(roi_annotations.project) + '/' + str(roi_annotations.image) + '/' + str(roi.id)) roi_png_filename = os.path.join(roi_path + '/' + str(roi.id) + '.png') print("roi_png_filename: %s" % roi_png_filename) roi.dump(dest_pattern=roi_png_filename, mask=True, alpha=True) #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True) #Stardist works with TIFF images without alpha channel, flattening PNG alpha mask to TIFF RGB im = Image.open(roi_png_filename) bg = Image.new("RGB", im.size, (255, 255, 255)) bg.paste(im, mask=im.split()[3]) roi_tif_filename = os.path.join(roi_path + '/' + str(roi.id) + '.tif') bg.save(roi_tif_filename, quality=100) X_files = sorted(glob(roi_path + '/' + str(roi.id) + '*.tif')) X = list(map(imread, X_files)) n_channel = 3 if X[0].ndim == 3 else X[0].shape[-1] axis_norm = ( 0, 1 ) # normalize channels independently (0,1,2) normalize channels jointly if n_channel > 1: print("Normalizing image channels %s." % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently')) #Going over ROI images in ROI directory (in our case: one ROI per directory) for x in range(0, len(X)): print("------------------- Processing ROI file %d: %s" % (x, roi_tif_filename)) img = normalize(X[x], conn.parameters.stardist_norm_perc_low, conn.parameters.stardist_norm_perc_high, axis=axis_norm) #Stardist model prediction with thresholds labels, details = model.predict_instances( img, prob_thresh=conn.parameters.stardist_prob_t, nms_thresh=conn.parameters.stardist_nms_t) print("Number of detected polygons: %d" % len(details['coord'])) cytomine_annotations = AnnotationCollection() #Go over detections in this ROI, convert and upload to Cytomine for pos, polygroup in enumerate(details['coord'], start=1): #Converting to Shapely annotation points = list() for i in range(len(polygroup[0])): #Cytomine cartesian coordinate system, (0,0) is bottom left corner #Mapping Stardist polygon detection coordinates to Cytomine ROI in whole slide image p = Point(minx + polygroup[1][i], miny - polygroup[0][i]) points.append(p) annotation = Polygon(points) #Append to Annotation collection cytomine_annotations.append( Annotation( location=annotation.wkt, id_image= id_image, #conn.parameters.cytomine_id_image, id_project=conn.parameters.cytomine_id_project, id_terms=[ conn.parameters.cytomine_id_cell_term ])) print(".", end='', flush=True) #Send Annotation Collection (for this ROI) to Cytomine server in one http request ca = cytomine_annotations.save() conn.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")
path_to_landmarks = os.path.join(params.landmarks, tissue, scale, f"{original_name}.csv") with open(path_to_landmarks, 'r') as csvfile: f_csv = csv.reader(csvfile, delimiter=str(','), quotechar=str('|')) headers = next(f_csv) annotations = AnnotationCollection() for row_landmarks in f_csv: id_landmark = int(row_landmarks[0]) # due to Cytomine point = Point(float(row_landmarks[1]), height - float(row_landmarks[2])) a = Annotation(location=point.wkt, id_image=image_id, id_project=params.id_project) a.property = [{ "key": "ANNOTATION_GROUP_ID", "value": id_landmark }] annotations.append(a) annotations.save()
def main(argv): with CytomineJob.from_cli(argv) as cj: cj.job.update(progress=1, statusComment="Initialisation") cj.log(str(cj.parameters)) term_ids = [cj.parameters.cytomine_id_predicted_term] \ if hasattr(cj.parameters, "cytomine_id_predicted_term") else None image_ids = [ int(image_id) for image_id in cj.parameters.cytomine_id_images.split(",") ] images = ImageInstanceCollection().fetch_with_filter( "project", cj.parameters.cytomine_id_project) images = [image for image in images if image.id in image_ids] tile_size = cj.parameters.tile_size tile_overlap = cj.parameters.tile_overlap filter_func = _get_filter(cj.parameters.filter) projection = cj.parameters.projection if projection not in ('min', 'max', 'average'): raise ValueError("Projection {} is not found".format(projection)) cj.log("Filter: {}".format(cj.parameters.filter)) cj.log("Projection: {}".format(projection)) for image in cj.monitor(images, prefix="Running detection on image", start=5, end=99): def worker_tile_func(tile): window = tile.np_image threshold = filter_func(window) return window, threshold cj.log("Get tiles for image {}".format(image.instanceFilename)) sldc_image = CytomineProjectionSlide(image, projection) tile_builder = CytomineProjectionTileBuilder("/tmp") topology = sldc_image.tile_topology(tile_builder, tile_size, tile_size, tile_overlap) results = generic_parallel(topology, worker_tile_func) thresholds = list() for result in results: tile, output = result window, threshold = output thresholds.append(threshold) global_threshold = int(np.mean(thresholds)) cj.log("Mean threshold is {}".format(global_threshold)) def worker_annotations_func(tile): filtered = img_as_uint(tile.np_image > global_threshold) return mask_to_objects_2d(filtered, offset=tile.abs_offset) cj.log( "Extract annotations from filtered tiles for image {}".format( image.instanceFilename)) results = generic_parallel(topology, worker_annotations_func) ids, geometries = list(), list() for result in results: tile, tile_geometries = result # Workaround for slow SemanticMerger but geometries shouldn't be filtered at this stage. tile_geometries = [ g for g in tile_geometries if g.area > cj.parameters.min_area ] ids.append(tile.identifier) geometries.append(tile_geometries) cj.log("Merge annotations from filtered tiles for image {}".format( image.instanceFilename)) merged_geometries = SemanticMerger(tolerance=1).merge( ids, geometries, topology) cj.log("{} merged geometries".format(len(merged_geometries))) if cj.parameters.annotation_slices == 'median': # By default, if no slice is given, an annotation is added to the median slice slice_ids = [None] else: slices = SliceInstanceCollection().fetch_with_filter( "imageinstance", image.id) if cj.parameters.annotation_slices == 'first': slice_ids = [slices[0].id] else: slice_ids = [sl.id for sl in slices] ac = AnnotationCollection() for geometry in merged_geometries: if geometry.area > cj.parameters.min_area: for slice_id in slice_ids: ac.append( Annotation(location=change_referential( geometry, image.height).wkt, id_image=image.id, id_terms=term_ids, id_slice=slice_id)) ac.save() cj.job.update(statusComment="Finished.", progress=100)
def main(argv): base_path = str(Path.home()) #Available filters filters = { 'binary': BinaryFilter(), 'adaptive': AdaptiveThresholdFilter(), 'otsu': OtsuFilter() } #Connect to Cytomine with CytomineJob.from_cli(argv) as cj: cj.job.update(status=Job.RUNNING, progress=0, statusComment="Initialisation...") working_path = os.path.join(base_path, "data", str(cj.job.id)) if not os.path.exists(working_path): os.makedirs(working_path) filter = filters.get(cj.parameters.cytomine_filter) #Initiatlize the reader to browse the whole image whole_slide = WholeSlide( cj.get_image_instance(cj.parameters.cytomine_id_image, True)) reader = CytomineReader(whole_slide, window_position=Bounds( 0, 0, cj.parameters.cytomine_tile_size, cj.parameters.cytomine_tile_size), zoom=cj.parameters.cytomine_zoom_level, overlap=cj.parameters.cytomine_tile_overlap) reader.window_position = Bounds(0, 0, reader.window_position.width, reader.window_position.height) #Browse the slide using reader i = 0 geometries = [] cj.job.update(progress=1, status_comment="Browsing big image...") while True: #Read next tile reader.read() image = reader.data #Saving tile image locally tile_filename = "%s/image-%d-zoom-%d-tile-%d-x-%d-y-%d.png" % ( working_path, cj.parameters.cytomine_id_image, cj.parameters.cytomine_zoom_level, i, reader.window_position.x, reader.window_position.y) image.save(tile_filename, "PNG") #Apply filtering cv_image = np.array(reader.result()) filtered_cv_image = filter.process(cv_image) i += 1 #Detect connected components components = ObjectFinder(filtered_cv_image).find_components() #Convert local coordinates (from the tile image) to global coordinates (the whole slide) components = whole_slide.convert_to_real_coordinates( components, reader.window_position, reader.zoom) geometries.extend( get_geometries(components, cj.parameters.cytomine_min_area, cj.parameters.cytomine_max_area)) #Upload annotations (geometries corresponding to connected components) to Cytomine core #Upload each geometry and predicted term annotations = AnnotationCollection() for geometry in geometries: pol = shapely.wkt.loads(geometry) if pol.is_valid: annotations.append( Annotation( location=geometry, id_image=cj.parameters.cytomine_id_image, id_project=cj.parameters.cytomine_id_project, id_terms=[ cj.parameters.cytomine_id_predicted_term ])) #Batches of 100 annotations if len(annotations) % 100 == 0: annotations.save() annotations = AnnotationCollection() annotations.save() geometries = [] if not reader.next(): break cj.job.update(progress=50, status_comment= "Detection done, starting Union over whole big image...") #Perform Union of geometries (because geometries are computed locally in each tile but objects (e.g. cell clusters) might overlap several tiles) host = cj.parameters.cytomine_host.replace("http://", "") unioncommand = "groovy -cp \"/lib/jars/*\" /app/union4.groovy http://%s %s %s %d %d %d %d %d %d %d %d %d %d" % ( host, cj._public_key, cj._private_key, cj.parameters.cytomine_id_image, cj.job.userJob, cj.parameters.cytomine_id_predicted_term, #union_term cj.parameters.cytomine_union_min_length, #union_minlength, cj.parameters.cytomine_union_bufferoverlap, #union_bufferoverlap, cj.parameters. cytomine_union_min_point_for_simplify, #union_minPointForSimplify, cj.parameters.cytomine_union_min_point, #union_minPoint, cj.parameters.cytomine_union_max_point, #union_maxPoint, cj.parameters.cytomine_union_nb_zones_width, #union_nbzonesWidth, cj.parameters.cytomine_union_nb_zones_height ) #union_nbzonesHeight) os.chdir(base_path) print(unioncommand) os.system(unioncommand) cj.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")
def main(): with CytomineJob.from_cli(sys.argv) as conn: base_path = "{}".format(os.getenv("HOME")) working_path = os.path.join(base_path, str(conn.job.id)) in_path = os.path.join(working_path, "in/") out_path = os.path.join(working_path, "out/") tr_working_path = os.path.join(base_path, str(conn.parameters.model_to_use)) tr_out_path = os.path.join(tr_working_path, "out/") if not os.path.exists(working_path): os.makedirs(working_path) os.makedirs(in_path) images = ImageInstanceCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) list_imgs = [] if conn.parameters.images_to_predict == 'all': for image in images: list_imgs.append(int(image.id)) image.dump(os.path.join(in_path, '%d.jpg' % (image.id))) else: list_imgs = [ int(id_img) for id_img in conn.parameters.images_to_predict.split(',') ] for image in images: if image.id in list_imgs: image.dump(os.path.join(in_path, '%d.jpg' % (image.id))) annotation_collection = AnnotationCollection() train_job = Job().fetch(conn.parameters.model_to_use) properties = PropertyCollection(train_job).fetch() str_terms = "" for prop in properties: if prop.fetch(key='id_terms') != None: str_terms = prop.fetch(key='id_terms').value term_list = [int(x) for x in str_terms.split(' ')] attached_files = AttachedFileCollection(train_job).fetch() for id_term in conn.monitor(term_list, start=10, end=90, period=0.05, prefix="Finding landmarks for terms..."): model_file = find_by_attribute(attached_files, "filename", "%d_model.joblib" % id_term) model_filepath = os.path.join(in_path, "%d_model.joblib" % id_term) model_file.download(model_filepath, override=True) cov_file = find_by_attribute(attached_files, 'filename', '%d_cov.joblib' % id_term) cov_filepath = os.path.join(in_path, "%d_cov.joblib" % id_term) cov_file.download(cov_filepath, override=True) parameters_file = find_by_attribute( attached_files, 'filename', '%d_parameters.joblib' % id_term) parameters_filepath = os.path.join( in_path, '%d_parameters.joblib' % id_term) parameters_file.download(parameters_filepath, override=True) model = joblib.load(model_filepath) [mx, my, cm] = joblib.load(cov_filepath) parameters_hash = joblib.load(parameters_filepath) feature_parameters = None if parameters_hash['feature_type'] in ['haar', 'gaussian']: fparameters_file = find_by_attribute( attached_files, 'filename', "%d_fparameters.joblib" % id_term) fparametersl_filepath = os.path.join( in_path, "%d_fparameters.joblib" % id_term) fparameters_file.download(fparametersl_filepath, override=True) feature_parameters = joblib.load(fparametersl_filepath) for id_img in list_imgs: (x, y) = searchpoint_cytomine( in_path, id_img, model, mx, my, cm, 1. / (2.**np.arange(parameters_hash['model_depth'])), parameters_hash['window_size'], parameters_hash['feature_type'], feature_parameters, 'jpg', parameters_hash['model_npred']) circle = Point(x, y) annotation_collection.append( Annotation(location=circle.wkt, id_image=id_img, id_terms=[id_term], id_project=conn.parameters.cytomine_id_project)) annotation_collection.save()
def run(argv): # CytomineJob.from_cli() uses the descriptor.json to automatically create the ArgumentParser with CytomineJob.from_cli(argv) as cj: cj.job.update(statusComment="Initialization...") id_project = cj.parameters.cytomine_id_project id_terms = cj.parameters.cytomine_id_terms id_tags_for_images = cj.parameters.cytomine_id_tags_for_images working_path = cj.parameters.working_path terms = TermCollection().fetch_with_filter("project", id_project) if id_terms: filtered_term_ids = [ int(id_term) for id_term in id_terms.split(',') ] filtered_terms = TermCollection() for term in terms: if term.id in filtered_term_ids: filtered_terms.append(term) else: filtered_terms = terms # Associate YOLO class index to Cytomine term classes_filename = os.path.join(working_path, CLASSES_FILENAME) with open(classes_filename, 'r') as f: classes = f.readlines() indexes_terms = {} for i, _class in enumerate(classes): _class = _class.strip() indexes_terms[i] = filtered_terms.find_by_attribute( "name", _class) cj.job.update(statusComment="Open model...", progress=1) # TODO... cj.job.update(statusComment="Predictions...", progress=5) images = ImageInstanceCollection( tags=id_tags_for_images).fetch_with_filter("project", id_project) for image in images: print("Prediction for image {}".format(image.instanceFilename)) # TODO: get predictions from YOLO # TODO: I suppose here for the sake of the demo that the output format is the same as input, which is not sure # <class> <x_center> <y_center> <width> <height> <proba> sample_predictions = [(0, 0.604000000000, 0.493846153846, 0.105600000000, 0.461538461538, 0.9), (0, 0.409200000000, 0.606153846154, 0.050400000000, 0.095384615385, 0.5)] ac = AnnotationCollection() for pred in sample_predictions: _class, xcenter, ycenter, width, height, proba = pred term_ids = [indexes_terms[_class].id ] if _class in indexes_terms.keys() else None if term_ids is None: print("No term found for class {}".format(_class)) geometry = yolo_to_geometry((xcenter, ycenter, width, height), image.width, image.height) properties = [{"key": "probability", "value": proba}] ac.append( Annotation(id_image=image.id, id_terms=term_ids, location=geometry.wkt, properties=properties)) ac.save() cj.job.update(statusComment="Finished", progress=100)
def main(argv): with CytomineJob.from_cli(argv) as conn: conn.job.update(progress=0, statusComment="Initialization..") base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity working_path = os.path.join(base_path, str(conn.job.id)) # Load pretrained model (assume the best of all) conn.job.update(progress=0, statusComment="Loading segmentation model..") with open("/models/resnet50b_fpn256/config.json") as f: config = json.load(f) model = FPN.build_resnet_fpn( name=config['name'], input_size=conn.parameters.dataset_patch_size, # must be / by 16 input_channels=1 if config['input']['mode'] == 'grayscale' else 3, output_channels=config['fpn']['out_channels'], num_classes=2, # legacy in_features=config['fpn']['in_features'], out_features=config['fpn']['out_features']) model.to(_DEVICE) model_dict = torch.load(config['weights'], map_location=torch.device(_DEVICE)) model.load_state_dict(model_dict['model']) # Select images to process images = ImageInstanceCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) if conn.parameters.cytomine_id_images != 'all': images = [ _ for _ in images if _.id in map(lambda x: int(x.strip()), conn.parameters.cytomine_id_images.split(',')) ] images_id = [image.id for image in images] # Download selected images into "working_directory" img_path = os.path.join(working_path, "images") os.makedirs(img_path) for image in conn.monitor( images, start=2, end=50, period=0.1, prefix="Downloading images into working directory.."): fname, fext = os.path.splitext(image.filename) if image.download(dest_pattern=os.path.join( img_path, "{}{}".format(image.id, fext))) is not True: print("Failed to download image {}".format(image.filename)) # create a file that lists all images (used by PatchBasedDataset conn.job.update(progress=50, statusComment="Preparing data for execution..") images = os.listdir(img_path) images = list(map(lambda x: x + '\n', images)) with open(os.path.join(working_path, 'images.txt'), 'w') as f: f.writelines(images) # Prepare dataset and dataloader objects ImgTypeBits = {'.dcm': 16} channel_bits = ImgTypeBits.get(fext.lower(), 8) mean, std = compute_mean_and_std(img_path, bits=channel_bits) dataset = InferencePatchBasedDataset( path=working_path, subset='images', patch_size=conn.parameters.dataset_patch_size, mode=config['input']['mode'], bits=channel_bits, mean=mean, std=std) dataloader = DataLoader( dataset=dataset, batch_size=conn.parameters.model_batch_size, drop_last=False, shuffle=False, num_workers=0, collate_fn=InferencePatchBasedDataset.collate_fn) # Go over images conn.job.update(status=Job.RUNNING, progress=55, statusComment="Running inference on images..") results = inference_on_segmentation( model, dataloader, conn.parameters.postprocess_p_threshold) for id_image in conn.monitor( images_id, start=90, end=95, prefix="Deleting old annotations on images..", period=0.1): # Delete old annotations del_annotations = AnnotationCollection() del_annotations.image = id_image del_annotations.user = conn.job.id del_annotations.project = conn.parameters.cytomine_id_project del_annotations.term = conn.parameters.cytomine_id_predict_term, del_annotations.fetch() for annotation in del_annotations: annotation.delete() conn.job.update( status=Job.RUNNING, progress=95, statusComment="Uploading new annotations to Cytomine server..") annotations = AnnotationCollection() for instance in results: idx, _ = os.path.splitext(instance['filename']) width, height = instance['size'] for box in instance['bbox']: points = [ Point(box[0], height - 1 - box[1]), Point(box[0], height - 1 - box[3]), Point(box[2], height - 1 - box[3]), Point(box[2], height - 1 - box[1]) ] annotation = Polygon(points) annotations.append( Annotation( location=annotation.wkt, id_image=int(idx), id_terms=[conn.parameters.cytomine_id_predict_term], id_project=conn.parameters.cytomine_id_project)) annotations.save() conn.job.update(status=Job.TERMINATED, status_comment="Finish", progress=100)
point = Point(10, 10) annotation_point = Annotation(location=point.wkt, id_image=params.id_image_instance).save() if params.id_term: AnnotationTerm(annotation_point.id, params.id_term).save() # Then, we add a rectangle as annotation rectangle = box(20, 20, 100, 100) annotation_rectangle = Annotation(location=rectangle.wkt, id_image=params.id_image_instance).save() if params.id_term: AnnotationTerm(annotation_rectangle.id, params.id_term).save() # We can also add a property (key-value pair) to an annotation Property(annotation_rectangle, key="my_property", value=10).save() # Print the list of annotations in the given image: annotations = AnnotationCollection() annotations.image = params.id_image_instance annotations.fetch() print(annotations) # We can also add multiple annotation in one request: annotations = AnnotationCollection() annotations.append(Annotation(location=point.wkt, id_image=params.id_image_instance, id_project=params.id_project)) annotations.append(Annotation(location=rectangle.wkt, id_image=params.id_image_instance, id_project=params.id_project)) annotations.save() # Print the list of annotations in the given image: annotations = AnnotationCollection() annotations.image = params.id_image_instance annotations.fetch() print(annotations)
def end_successful_import(self, path: Path, image: Image, *args, **kwargs): uf = self.get_uf(path) ai = AbstractImage() ai.uploadedFile = uf.id ai.originalFilename = uf.originalFilename ai.width = image.width ai.height = image.height ai.depth = image.depth ai.duration = image.duration ai.channels = image.n_intrinsic_channels ai.extrinsicChannels = image.n_channels if image.physical_size_x: ai.physicalSizeX = round( convert_quantity(image.physical_size_x, "micrometers"), 6) if image.physical_size_y: ai.physicalSizeY = round( convert_quantity(image.physical_size_y, "micrometers"), 6) if image.physical_size_z: ai.physicalSizeZ = round( convert_quantity(image.physical_size_z, "micrometers"), 6) if image.frame_rate: ai.fps = round(convert_quantity(image.frame_rate, "Hz"), 6) ai.magnification = parse_int(image.objective.nominal_magnification) ai.bitPerSample = dtype_to_bits(image.pixel_type) ai.samplePerPixel = image.n_channels / image.n_intrinsic_channels ai.save() self.abstract_images.append(ai) asc = AbstractSliceCollection() set_channel_names = image.n_intrinsic_channels == image.n_channels for c in range(image.n_intrinsic_channels): name = None color = None if set_channel_names: name = image.channels[c].suggested_name color = image.channels[c].hex_color for z in range(image.depth): for t in range(image.duration): mime = "image/pyrtiff" # TODO: remove asc.append( AbstractSlice(ai.id, uf.id, mime, c, z, t, channelName=name, channelColor=color)) asc.save() properties = PropertyCollection(ai) for metadata in image.raw_metadata.values(): if metadata.value is not None and str(metadata.value) != '': properties.append( Property(ai, metadata.namespaced_key, str(metadata.value))) try: properties.save() except CollectionPartialUploadException: pass # TODO: improve handling of this exception, but prevent to fail the import uf.status = UploadedFile.DEPLOYED uf.update() properties = PropertyCollection(ai) for k, v in self.user_properties: if v is not None and str(v) != '': properties.append(Property(ai, k, v)) try: properties.save() except CollectionPartialUploadException: pass # TODO: improve handling of this exception, but prevent to fail the import instances = [] for p in self.projects: instances.append(ImageInstance(ai.id, p.id).save()) self.images.append((ai, instances)) # TODO: temporary add annotations for backwards compatibility. # BUT it should be done by core when an image instance is created. if image.n_planes == 1 and len(instances) > 0: # TODO: currently only supports metadata annots on 2D images metadata_annots = image.annotations if len(metadata_annots) > 0: metadata_terms = [ ma.terms for ma in metadata_annots if len(ma.terms) > 0 ] metadata_terms = set(flatten(metadata_terms)) for instance in instances: project_id = instance.project project = self.projects.find_by_attribute('id', project_id) ontology_id = project.ontology # noqa ontology_terms = TermCollection().fetch_with_filter( "project", project_id) terms_id_mapping = {t.name: t.id for t in ontology_terms} for metadata_term in metadata_terms: if metadata_term not in terms_id_mapping: # TODO: user must have ontology rights ! term = Term(name=metadata_term, id_ontology=ontology_id, color="#AAAAAA").save() terms_id_mapping[term.name] = term.id annots = AnnotationCollection() for metadata_annot in metadata_annots: term_ids = [ terms_id_mapping[t] for t in metadata_annot.terms ] properties = [ dict(key=k, value=v) for k, v in metadata_annot.properties.items() ] annots.append( Annotation(location=metadata_annot.wkt, id_image=instance.id, id_terms=term_ids if len(term_ids) > 0 else None, properties=properties if len(properties) > 0 else None, user=uf.user)) try: annots.save() except CollectionPartialUploadException: pass
def upload_data(problemclass, nj, inputs, out_path, monitor_params=None, **kwargs): """Upload annotations or any other related results to the server. Parameters ---------- problemclass: str The problem class nj: CytomineJob|BiaflowsJob The CytomineJob or BiaflowsJob object. Ignored if do_export is True. inputs: list Input data as returned by the prepare_data out_path: str Output path monitor_params: dict|None A dictionnary of parameters to be passed to the data upload loop monitor. kwargs: dict Additional parameters for: * ObjDet/SptCnt: see function 'extract_annotations_objdet' * ObjSeg: see function 'extract_annotations_objseg' """ if "is_2d" in kwargs: warnings.warn( "As of version 0.9.3, the 'is_2d' parameter is not needed anymore in function 'upload_data' and " "is now ignored.", DeprecationWarning) if not nj.flags["do_upload_annotations"]: return if nj.flags["tiling"] and (problemclass != CLASS_OBJSEG and problemclass != CLASS_PIXCLA): print( "Annot. upload is only supported for one of {ObjSeg, PixCla} in 2D when tiling is enabled.. skipping !" ) return if monitor_params is None: monitor_params = dict() annotations = AnnotationCollection() if nj.flags["tiling"]: annotations.extend( extract_tiled_annotations( inputs, out_path, nj, label_merging=problemclass == CLASS_PIXCLA)) else: if problemclass == CLASS_OBJSEG: extract_fn = extract_annotations_objseg elif problemclass == CLASS_PIXCLA: extract_fn = extract_annotations_pixcla elif problemclass == CLASS_OBJDET or problemclass == CLASS_SPTCNT or problemclass == CLASS_LNDDET: extract_fn = extract_annotations_objdet elif problemclass == CLASS_LOOTRC or problemclass == CLASS_TRETRC: extract_fn = extract_annotations_lootrc elif problemclass == CLASS_PRTTRK: extract_fn = extract_annotations_prttrk elif problemclass == CLASS_OBJTRK: extract_fn = extract_annotations_objtrk else: raise NotImplementedError( "Upload data does not support problem class '{}' yet.".format( problemclass)) tracks = TrackCollection() monitor_params["prefix"] = "Extract masks/points/... from output data" for in_image in nj.monitor(inputs, **monitor_params): curr_tracks, curr_annots = extract_fn(out_path, in_image, nj.project.id, track_prefix=str(nj.job.id), **kwargs) tracks.extend(curr_tracks) annotations.extend(curr_annots) nj.job.update( statusComment="Upload extracted annotations (total: {})".format( len(annotations))) annotations.save(chunk=20, n_workers=min(4, cpu_count() * 2))
def run(debug=False): """ Gets project image from cytomine Args: debug (bool): If true will save annotations individually and plot any error Example: python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'dadb7d7a-5822-48f7-ab42-59bce27750ae' --cytomine_private_key 'd73f4602-51d2-4d15-91e4-d4cc175d65fd' --cytomine_id_project 187 --cytomine_id_image_instance 375 --cytomine_id_software 228848 python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'b6ebb23c-00ff-427b-be24-87b2a82490df' --cytomine_private_key '6812f09b-3f33-4938-82ca-b23032d377fd' --cytomine_id_project 154 --cytomine_id_image_instance 3643 python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_project 197 --cytomine_id_image_instance 2140 --cytomine_id_software 2633 docker run --gpus all -it --rm --mount type=bind,source=/home/giussepi/Public/environments/Cytomine/cyto_CRLM/,target=/CRLM,bind-propagation=private --network=host ttt --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_project 197 --cytomine_id_image_instance 31296 --cytomine_id_software 79732 """ parser = ArgumentParser(prog="Cytomine Python client example") # Cytomine connection parameters parser.add_argument('--cytomine_host', dest='host', default='demo.cytomine.be', help="The Cytomine host") parser.add_argument('--cytomine_public_key', dest='public_key', help="The Cytomine public key") parser.add_argument('--cytomine_private_key', dest='private_key', help="The Cytomine private key") parser.add_argument('--cytomine_id_project', dest='id_project', help="The project from which we want the images") parser.add_argument('--cytomine_id_software', dest='id_software', help="The software to be used to process the image") parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance', help="The image to which the annotation will be added") params, _ = parser.parse_known_args(sys.argv[1:]) with CytomineJob.from_cli(sys.argv[1:]) as cytomine: # TODO: To be tested on TITANx img = ImageInstance().fetch(params.id_image_instance) download_image(img) process_wsi_and_save(get_container_image_path(img)) new_annotations = generate_polygons(get_container_image_path(img), adapt_to_cytomine=True) annotation_collection = None for label_key in new_annotations: # Sending annotation batches to the server for sub_list in chunks(new_annotations[label_key], ANNOTATION_BATCH): if not debug: annotation_collection = AnnotationCollection() for exterior_points in sub_list: if debug: annotation_collection = AnnotationCollection() annotation_collection.append( Annotation(location=Polygon( exterior_points.astype(int).reshape( exterior_points.shape[0], exterior_points.shape[2]).tolist()).wkt, id_image=params.id_image_instance, id_project=params.id_project, id_terms=[CYTOMINE_LABELS[label_key]])) if debug: try: annotation_collection.save() except Exception as e: print( exterior_points.astype(int).reshape( exterior_points.shape[0], exterior_points.shape[2]).tolist()) plt.plot(*Polygon( exterior_points.astype(int).reshape( exterior_points.shape[0], exterior_points. shape[2])).exterior.coords.xy) plt.show() # raise(e) print(e) finally: time.sleep(1) if not debug: annotation_collection.save() time.sleep(ANNOTATION_SLEEP_TIME) # Adding pie chart labels data as image property # TODO: Change delete_results_file to True for final test on titanX num_pixels_per_label = get_pie_chart_data( get_container_image_path(img), delete_results_file=False) for percentage, label_ in zip(num_pixels_per_label, Label.names): Property(img, key=label_, value='{}%'.format(percentage)).save() remove_image_local_copy(img) cytomine.job.update(statusComment="Finished.")
def main(argv): with CytomineJob.from_cli(argv) as conn: conn.job.update(status=Job.RUNNING, progress=0, statusComment='Intialization...') base_path = "{}".format(os.getenv('HOME')) # Mandatory for Singularity working_path = os.path.join(base_path, str(conn.job.id)) # Loading models from models directory with tf.device('/cpu:0'): h_model = load_model('/models/head_dice_sm_9976.hdf5', compile=False) # head model h_model.compile(optimizer='adam', loss=dice_coef_loss, metrics=['accuracy']) op_model = load_model('/models/op_ce_sm_9991.hdf5', compile=True) # operculum model #op_model.compile(optimizer='adam', loss=dice_coef_loss, #metrics=['accuracy']) # Select images to process images = ImageInstanceCollection().fetch_with_filter('project', conn.parameters.cytomine_id_project) if conn.parameters.cytomine_id_images != 'all': # select only given image instances = [image for image in image_instances if image.id in id_list] images = [_ for _ in images if _.id in map(lambda x: int(x.strip()), conn.parameters.cytomine_id_images.split(','))] images_id = [image.id for image in images] # Download selected images into 'working_directory' img_path = os.path.join(working_path, 'images') # if not os.path.exists(img_path): os.makedirs(img_path) for image in conn.monitor( images, start=2, end=50, period=0.1, prefix='Downloading images into working directory...'): fname, fext = os.path.splitext(image.filename) if image.download(dest_pattern=os.path.join( img_path, "{}{}".format(image.id, fext))) is not True: # images are downloaded with image_ids as names print('Failed to download image {}'.format(image.filename)) # Prepare image file paths from image directory for execution conn.job.update(progress=50, statusComment="Preparing data for execution..") image_paths = glob.glob(os.path.join(img_path, '*')) std_size = (1032,1376) #maximum size that the model can handle model_size = 256 for i in range(len(image_paths)): org_img = Image.open(image_paths[i]) filename = os.path.basename(image_paths[i]) fname, fext = os.path.splitext(filename) fname = int(fname) org_img = img_to_array(org_img) img = org_img.copy() org_size = org_img.shape[:2] asp_ratio = org_size[0] / org_size[1] #for cropping and upscaling to original size if org_size[1] > std_size[1]: img = tf.image.resize(img, (675,900), method='nearest') img = tf.image.resize_with_crop_or_pad(img, std_size[0],std_size[1]) h_mask = predict_mask(img, h_model,model_size) h_mask = crop_to_aspect(h_mask, asp_ratio) h_mask = tf.image.resize(h_mask, std_size, method='nearest') h_up_mask = tf.image.resize_with_crop_or_pad(h_mask, 675,900) h_up_mask = tf.image.resize(h_up_mask, org_size, method='nearest') h_up_mask = np.asarray(h_up_mask).astype(np.uint8) _, h_up_mask = cv.threshold(h_up_mask, 0.001, 255, 0) kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (17, 17)) h_up_mask = cv.morphologyEx(h_up_mask, cv.MORPH_OPEN, kernel, iterations=5) h_up_mask = cv.morphologyEx(h_up_mask, cv.MORPH_CLOSE, kernel, iterations=1) #h_up_mask = cv.erode(h_up_mask ,kernel,iterations = 3) #h_up_mask = cv.dilate(h_up_mask ,kernel,iterations = 3) h_up_mask = np.expand_dims(h_up_mask, axis=-1) else: h_mask = predict_mask(img, h_model, model_size) h_mask = crop_to_aspect(h_mask, asp_ratio) h_up_mask = tf.image.resize(h_mask, org_size, method='nearest') h_up_mask = np.asarray(h_up_mask).astype(np.uint8) _, h_up_mask = cv.threshold(h_up_mask, 0.001, 255, 0) kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5)) #kernel = np.ones((9,9),np.uint8) h_up_mask = cv.morphologyEx(h_up_mask, cv.MORPH_CLOSE, kernel, iterations=3) h_up_mask = np.expand_dims(h_up_mask, axis=-1) box = bb_pts(h_up_mask) # bounding box points for operculum (x_min, y_min, x_max, y_max) w = box[0] h = box[1] tr_h = box[3] - box[1] # target height tr_w = box[2] - box[0] # target width crop_op_img = tf.image.crop_to_bounding_box(org_img, h, w, tr_h, tr_w) op_asp_ratio = crop_op_img.shape[0] / crop_op_img.shape[1] op_mask = predict_mask(crop_op_img, op_model, model_size) op_mask = crop_to_aspect(op_mask, op_asp_ratio) op_mask = tf.image.resize(op_mask, (crop_op_img.shape[0], crop_op_img.shape[1]), method='nearest') op_up_mask = np.zeros((org_img.shape[0],org_img.shape[1],1)).astype(np.uint8) # array of zeros to be filled with op mask op_up_mask[box[1]:box[3], box[0]:box[2]] = op_mask # paste op_mask in org_img (reversing the crop operation) #op_up_mask = tf.image.resize_with_crop_or_pad(op_mask, org_size[0], org_size[1]) h_polygon = h_make_polygon(h_up_mask) op_polygon = o_make_polygon(op_up_mask) conn.job.update( status=Job.RUNNING, progress=95, statusComment="Uploading new annotations to Cytomine server..") annotations = AnnotationCollection() annotations.append(Annotation(location=h_polygon[0].wkt, id_image=fname, id_terms=143971108, id_project=conn.parameters.cytomine_id_project)) annotations.append(Annotation(location=op_polygon[0].wkt, id_image=fname, id_term=143971084, id_project=conn.parameters.cytomine_id_project)) annotations.save() conn.job.update(status=Job.TERMINATED, status_comment="Finish", progress=100) # 524787186
def main(argv): with CytomineJob.from_cli(argv) as conn: # with Cytomine(argv) as conn: print(conn.parameters) conn.job.update(status=Job.RUNNING, progress=0, statusComment="Initialization...") base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity working_path = os.path.join(base_path, str(conn.job.id)) # with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key, # verbose=logging.INFO) as cytomine: # ontology = Ontology("classPNcells"+str(conn.parameters.cytomine_id_project)).save() # ontology_collection=OntologyCollection().fetch() # print(ontology_collection) # ontology = Ontology("CLASSPNCELLS").save() # terms = TermCollection().fetch_with_filter("ontology", ontology.id) terms = TermCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) conn.job.update(status=Job.RUNNING, progress=1, statusComment="Terms collected...") print(terms) # term_P = Term("PositiveCell", ontology.id, "#FF0000").save() # term_N = Term("NegativeCell", ontology.id, "#00FF00").save() # term_P = Term("PositiveCell", ontology, "#FF0000").save() # term_N = Term("NegativeCell", ontology, "#00FF00").save() # Get all the terms of our ontology # terms = TermCollection().fetch_with_filter("ontology", ontology.id) # terms = TermCollection().fetch_with_filter("ontology", ontology) # print(terms) # #Loading pre-trained Stardist model # np.random.seed(17) # lbl_cmap = random_label_cmap() # #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46 # #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI # model = StarDist2D(None, name='2D_versatile_HE', basedir='/models/') #use local model file in ~/models/2D_versatile_HE/ #Select images to process images = ImageInstanceCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) conn.job.update(status=Job.RUNNING, progress=2, statusComment="Images gathered...") list_imgs = [] if conn.parameters.cytomine_id_images == 'all': for image in images: list_imgs.append(int(image.id)) else: list_imgs = [ int(id_img) for id_img in conn.parameters.cytomine_id_images.split(',') ] print(list_imgs) #Go over images conn.job.update(status=Job.RUNNING, progress=10, statusComment="Running PN classification on image...") #for id_image in conn.monitor(list_imgs, prefix="Running PN classification on image", period=0.1): for id_image in list_imgs: roi_annotations = AnnotationCollection() roi_annotations.project = conn.parameters.cytomine_id_project roi_annotations.term = conn.parameters.cytomine_id_cell_term roi_annotations.image = id_image #conn.parameters.cytomine_id_image roi_annotations.job = conn.parameters.cytomine_id_annotation_job roi_annotations.user = conn.parameters.cytomine_id_user_job roi_annotations.showWKT = True roi_annotations.fetch() print(roi_annotations) #Go over ROI in this image #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1): for roi in roi_annotations: #Get Cytomine ROI coordinates for remapping to whole-slide #Cytomine cartesian coordinate system, (0,0) is bottom left corner print( "----------------------------Cells------------------------------" ) roi_geometry = wkt.loads(roi.location) # print("ROI Geometry from Shapely: {}".format(roi_geometry)) # print("ROI Bounds") # print(roi_geometry.bounds) minx = roi_geometry.bounds[0] miny = roi_geometry.bounds[3] #Dump ROI image into local PNG file # roi_path=os.path.join(working_path,str(roi_annotations.project)+'/'+str(roi_annotations.image)+'/'+str(roi.id)) roi_path = os.path.join( working_path, str(roi_annotations.project) + '/' + str(roi_annotations.image) + '/') # print(roi_path) roi_png_filename = os.path.join(roi_path + str(roi.id) + '.png') conn.job.update(status=Job.RUNNING, progress=20, statusComment=roi_png_filename) # print("roi_png_filename: %s" %roi_png_filename) roi.dump(dest_pattern=roi_png_filename, alpha=True) #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True) # im=Image.open(roi_png_filename) J = cv2.imread(roi_png_filename, cv2.IMREAD_UNCHANGED) J = cv2.cvtColor(J, cv2.COLOR_BGRA2RGBA) [r, c, h] = J.shape # print("J: ",J) if r < c: blocksize = r else: blocksize = c # print("blocksize:",blocksize) rr = np.zeros((blocksize, blocksize)) cc = np.zeros((blocksize, blocksize)) zz = [*range(1, blocksize + 1)] # print("zz:", zz) for i in zz: rr[i - 1, :] = zz # print("rr shape:",rr.shape) zz = [*range(1, blocksize + 1)] for i in zz: cc[:, i - 1] = zz # print("cc shape:",cc.shape) cc1 = np.asarray(cc) - 16.5 rr1 = np.asarray(rr) - 16.5 cc2 = np.asarray(cc1)**2 rr2 = np.asarray(rr1)**2 rrcc = np.asarray(cc2) + np.asarray(rr2) weight = np.sqrt(rrcc) # print("weight: ",weight) weight2 = 1. / weight # print("weight2: ",weight2) # print("weight2 shape:",weight2.shape) coord = [c / 2, r / 2] halfblocksize = blocksize / 2 y = round(coord[1]) x = round(coord[0]) # Convert the RGB image to HSV Jalpha = J[:, :, 3] Jalphaloc = Jalpha / 255 Jrgb = cv2.cvtColor(J, cv2.COLOR_RGBA2RGB) Jhsv = cv2.cvtColor(Jrgb, cv2.COLOR_RGB2HSV_FULL) Jhsv = Jhsv / 255 Jhsv[:, :, 0] = Jhsv[:, :, 0] * Jalphaloc Jhsv[:, :, 1] = Jhsv[:, :, 1] * Jalphaloc Jhsv[:, :, 2] = Jhsv[:, :, 2] * Jalphaloc # print("Jhsv: ",Jhsv) # print("Jhsv size:",Jhsv.shape) # print("Jhsv class:",Jhsv.dtype) currentblock = Jhsv[0:blocksize, 0:blocksize, :] # print("currentblock: ",currentblock) # print(currentblock.dtype) currentblockH = currentblock[:, :, 0] currentblockV = 1 - currentblock[:, :, 2] hue = sum(sum(currentblockH * weight2)) val = sum(sum(currentblockV * weight2)) # print("hue:", hue) # print("val:", val) if hue < 2: cellclass = 1 elif val < 15: cellclass = 2 else: if hue < 30 or val > 40: cellclass = 1 else: cellclass = 2 # tags = TagCollection().fetch() # tags = TagCollection() # print(tags) if cellclass == 1: # print("Positive (H: ", str(hue), ", V: ", str(val), ")") id_terms = conn.parameters.cytomine_id_positive_term # tag = Tag("Positive (H: ", str(hue), ", V: ", str(val), ")").save() # print(tag) # id_terms=Term("PositiveCell", ontology.id, "#FF0000").save() elif cellclass == 2: # print("Negative (H: ", str(hue), ", V: ", str(val), ")") id_terms = conn.parameters.cytomine_id_negative_term # for t in tags: # tag = Tag("Negative (H: ", str(hue), ", V: ", str(val), ")").save() # print(tag) # id_terms=Term("NegativeCell", ontology.id, "#00FF00").save() # First we create the required resources cytomine_annotations = AnnotationCollection() # property_collection = PropertyCollection(uri()).fetch("annotation",id_image) # property_collection = PropertyCollection().uri() # print(property_collection) # print(cytomine_annotations) # property_collection.append(Property(Annotation().fetch(id_image), key="Hue", value=str(hue))) # property_collection.append(Property(Annotation().fetch(id_image), key="Val", value=str(val))) # property_collection.save() # prop1 = Property(Annotation().fetch(id_image), key="Hue", value=str(hue)).save() # prop2 = Property(Annotation().fetch(id_image), key="Val", value=str(val)).save() # prop1.Property(Annotation().fetch(id_image), key="Hue", value=str(hue)).save() # prop2.Property(Annotation().fetch(id_image), key="Val", value=str(val)).save() # for pos, polygroup in enumerate(roi_geometry,start=1): # points=list() # for i in range(len(polygroup[0])): # p=Point(minx+polygroup[1][i],miny-polygroup[0][i]) # points.append(p) annotation = roi_geometry # tags.append(TagDomainAssociation(Annotation().fetch(id_image, tag.id))).save() # association = append(TagDomainAssociation(Annotation().fetch(id_image, tag.id))).save() # print(association) cytomine_annotations.append( Annotation( location=annotation.wkt, #location=roi_geometry, id_image=id_image, #conn.parameters.cytomine_id_image, id_project=conn.parameters.cytomine_id_project, id_terms=[id_terms])) print(".", end='', flush=True) #Send Annotation Collection (for this ROI) to Cytomine server in one http request ca = cytomine_annotations.save() conn.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")
def main(argv): with CytomineJob.from_cli(argv) as cj: # use only images from the current project cj.job.update(progress=1, statusComment="Preparing execution") # extract images to process if cj.parameters.cytomine_zoom_level > 0 and ( cj.parameters.cytomine_tile_size != 256 or cj.parameters.cytomine_tile_overlap != 0): raise ValueError( "when using zoom_level > 0, tile size should be 256 " "(given {}) and overlap should be 0 (given {})".format( cj.parameters.cytomine_tile_size, cj.parameters.cytomine_tile_overlap)) cj.job.update( progress=1, statusComment="Preparing execution (creating folders,...).") # working path root_path = str(Path.home()) working_path = os.path.join(root_path, "images") os.makedirs(working_path, exist_ok=True) # load training information cj.job.update(progress=5, statusComment="Extract properties from training job.") train_job = Job().fetch(cj.parameters.cytomine_id_job) properties = PropertyCollection(train_job).fetch().as_dict() binary = str2bool(properties["binary"].value) classes = parse_domain_list(properties["classes"].value) cj.job.update(progress=10, statusComment="Download the model file.") attached_files = AttachedFileCollection(train_job).fetch() model_file = attached_files.find_by_attribute("filename", "model.joblib") model_filepath = os.path.join(root_path, "model.joblib") model_file.download(model_filepath, override=True) pyxit = joblib.load(model_filepath) # set n_jobs pyxit.base_estimator.n_jobs = cj.parameters.n_jobs pyxit.n_jobs = cj.parameters.n_jobs cj.job.update(progress=45, statusComment="Build workflow.") builder = SSLWorkflowBuilder() builder.set_tile_size(cj.parameters.cytomine_tile_size, cj.parameters.cytomine_tile_size) builder.set_overlap(cj.parameters.cytomine_tile_overlap) builder.set_tile_builder( CytomineTileBuilder(working_path, n_jobs=cj.parameters.n_jobs)) builder.set_logger(StandardOutputLogger(level=Logger.INFO)) builder.set_n_jobs(1) builder.set_background_class(0) # value 0 will prevent merging but still requires to run the merging check # procedure (inefficient) builder.set_distance_tolerance(2 if cj.parameters.union_enabled else 0) builder.set_segmenter( ExtraTreesSegmenter( pyxit=pyxit, classes=classes, prediction_step=cj.parameters.pyxit_prediction_step, background=0, min_std=cj.parameters.tile_filter_min_stddev, max_mean=cj.parameters.tile_filter_max_mean)) workflow = builder.get() area_checker = AnnotationAreaChecker( min_area=cj.parameters.min_annotation_area, max_area=cj.parameters.max_annotation_area) def get_term(label): if binary: if "cytomine_id_predict_term" not in cj.parameters: return [] else: return [int(cj.parameters.cytomine_id_predict_term)] # multi-class return [label] zones = extract_images_or_rois(cj.parameters) for zone in cj.monitor(zones, start=50, end=90, period=0.05, prefix="Segmenting images/ROIs"): results = workflow.process(zone) annotations = AnnotationCollection() for obj in results: if not area_checker.check(obj.polygon): continue polygon = obj.polygon if isinstance(zone, ImageWindow): polygon = affine_transform( polygon, [1, 0, 0, 1, zone.abs_offset_x, zone.abs_offset_y]) polygon = change_referential(polygon, zone.base_image.height) if cj.parameters.cytomine_zoom_level > 0: zoom_mult = (2**cj.parameters.cytomine_zoom_level) polygon = affine_transform( polygon, [zoom_mult, 0, 0, zoom_mult, 0, 0]) annotations.append( Annotation(location=polygon.wkt, id_terms=get_term(obj.label), id_project=cj.project.id, id_image=zone.base_image.image_instance.id)) annotations.save() cj.job.update(status=Job.TERMINATED, status_comment="Finish", progress=100)
def main(argv): with CytomineJob.from_cli(argv) as cj: # prepare paths working_path = str(Path.home()) data_path = os.path.join(working_path, "pred_data") if not os.path.exists(data_path): os.makedirs(data_path) model_filename = "model.pkl" cj.job.update(progress=5, statusComment="Download model ...") model_job = Job().fetch(cj.parameters.cytomine_model_job_id) attached_files = AttachedFileCollection(model_job).fetch_with_filter( "project", cj.project.id) if not (0 < len(attached_files) < 2): raise ValueError( "More or less than 1 file attached to the Job (found {} file(s))." .format(len(attached_files))) attached_file = attached_files[0] if attached_file.filename != model_filename: raise ValueError( "Expected model file name is '{}' (found: '{}').".format( model_filename, attached_file.filename)) model_path = os.path.join(working_path, model_filename) attached_file.download(model_path) # load model with open(model_path, "rb") as file: data = pickle.load(file) model = data["model"] classifier = data["classifier"] network = data["network"] reduction = data["reduction"] # load and dump annotations cj.job.update(progress=10, statusComment="Download annotations.") annotations = get_annotations( project_id=cj.parameters.cytomine_project_id, images=parse_list_or_none(cj.parameters.cytomine_images_ids), users=parse_list_or_none(cj.parameters.cytomine_users_ids), showWKT=True) cj.job.update(statusComment="Fetch crops.", progress=15) n_samples = len(annotations) x = np.zeros([n_samples], dtype=np.object) for i, annotation in cj.monitor(enumerate(annotations), start=15, end=40, prefix="Fetch crops", period=0.1): file_format = os.path.join(data_path, "{id}.png") if not annotation.dump(dest_pattern=file_format): raise ValueError("Download error for annotation '{}'.".format( annotation.id)) x[i] = file_format.format(id=annotation.id) available_nets = { MODEL_RESNET50, MODEL_VGG19, MODEL_VGG16, MODEL_INCEPTION_V3, MODEL_INCEPTION_RESNET_V2, MODEL_MOBILE, MODEL_DENSE_NET_201, MODEL_NASNET_LARGE, MODEL_NASNET_MOBILE } if network not in available_nets: raise ValueError( "Invalid value (='{}'} for parameter 'network'.".format( network)) if reduction not in {"average_pooling"}: raise ValueError( "Invalid value (='{}') for parameter 'reduction'.".format( reduction)) if classifier not in {"svm"}: raise ValueError( "Invalid value (='{}') for parameter 'classifier'.".format( classifier)) # prepare network cj.job.update(statusComment="Load neural network '{}'".format(network), progress=40) features = PretrainedModelFeatures(model=network, layer="last", reduction=reduction, weights="imagenet") height, width, _ = features._get_input_shape(network) loader = ImageLoader(load_size_range=(height, height), crop_size=height, random_crop=False) cj.job.update(statusComment="Transform features.", progress=50) x_feat = batch_transform(loader, features, x, logger=cj.logger(start=50, end=70, period=0.1), batch_size=128) cj.job.update(statusComment="Prediction with '{}'.".format(classifier), progress=70) if hasattr(model, "n_jobs"): model.n_jobs = cj.parameters.n_jobs probas = None if hasattr(model, "predict_proba"): probas = model.predict_proba(x_feat) y_pred = model.classes_.take(np.argmax(probas, axis=1), axis=0) else: y_pred = model.predict(x_feat) cj.job.update(statusComment="Upload annotations.", progress=80) annotation_collection = AnnotationCollection() for i, annotation in cj.monitor(enumerate(annotations), start=80, end=100, period=0.1, prefix="Upload annotations"): annotation_collection.append( Annotation(location=annotation.location, id_image=annotation.image, id_project=annotation.project, term=[int(y_pred[i])], rate=float(probas[i]) if probas is not None else 1.0).save()) annotation_collection.save() cj.job.update(statusComment="Finished.", progress=100)