def main(argv): with CytomineJob.from_cli(argv) as conn: conn.job.update(status=Job.RUNNING, progress=0, statusComment="Initialization...") # base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity base_path = "/home/mmu/Desktop" working_path = os.path.join(base_path, str(conn.job.id)) #Loading pre-trained Stardist model np.random.seed(17) lbl_cmap = random_label_cmap() #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46 #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI model = StarDist2D( None, name='2D_versatile_HE', basedir='/models/' ) #use local model file in ~/models/2D_versatile_HE/ #Select images to process images = ImageInstanceCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) list_imgs = [] if conn.parameters.cytomine_id_images == 'all': for image in images: list_imgs.append(int(image.id)) else: list_imgs = [ int(id_img) for id_img in conn.parameters.cytomine_id_images.split(',') ] #Go over images for id_image in conn.monitor(list_imgs, prefix="Running detection on image", period=0.1): #Dump ROI annotations in img from Cytomine server to local images #conn.job.update(status=Job.RUNNING, progress=0, statusComment="Fetching ROI annotations...") roi_annotations = AnnotationCollection() roi_annotations.project = conn.parameters.cytomine_id_project roi_annotations.term = conn.parameters.cytomine_id_roi_term roi_annotations.image = id_image #conn.parameters.cytomine_id_image roi_annotations.showWKT = True roi_annotations.fetch() print(roi_annotations) #Go over ROI in this image #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1): for roi in roi_annotations: #Get Cytomine ROI coordinates for remapping to whole-slide #Cytomine cartesian coordinate system, (0,0) is bottom left corner print( "----------------------------ROI------------------------------" ) roi_geometry = wkt.loads(roi.location) print("ROI Geometry from Shapely: {}".format(roi_geometry)) print("ROI Bounds") print(roi_geometry.bounds) minx = roi_geometry.bounds[0] miny = roi_geometry.bounds[3] #Dump ROI image into local PNG file roi_path = os.path.join( working_path, str(roi_annotations.project) + '/' + str(roi_annotations.image) + '/' + str(roi.id)) roi_png_filename = os.path.join(roi_path + '/' + str(roi.id) + '.png') print("roi_png_filename: %s" % roi_png_filename) roi.dump(dest_pattern=roi_png_filename, mask=True, alpha=True) #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True) #Stardist works with TIFF images without alpha channel, flattening PNG alpha mask to TIFF RGB im = Image.open(roi_png_filename) bg = Image.new("RGB", im.size, (255, 255, 255)) bg.paste(im, mask=im.split()[3]) roi_tif_filename = os.path.join(roi_path + '/' + str(roi.id) + '.tif') bg.save(roi_tif_filename, quality=100) X_files = sorted(glob(roi_path + '/' + str(roi.id) + '*.tif')) X = list(map(imread, X_files)) n_channel = 3 if X[0].ndim == 3 else X[0].shape[-1] axis_norm = ( 0, 1 ) # normalize channels independently (0,1,2) normalize channels jointly if n_channel > 1: print("Normalizing image channels %s." % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently')) #Going over ROI images in ROI directory (in our case: one ROI per directory) for x in range(0, len(X)): print("------------------- Processing ROI file %d: %s" % (x, roi_tif_filename)) img = normalize(X[x], conn.parameters.stardist_norm_perc_low, conn.parameters.stardist_norm_perc_high, axis=axis_norm) #Stardist model prediction with thresholds labels, details = model.predict_instances( img, prob_thresh=conn.parameters.stardist_prob_t, nms_thresh=conn.parameters.stardist_nms_t) print("Number of detected polygons: %d" % len(details['coord'])) cytomine_annotations = AnnotationCollection() #Go over detections in this ROI, convert and upload to Cytomine for pos, polygroup in enumerate(details['coord'], start=1): #Converting to Shapely annotation points = list() for i in range(len(polygroup[0])): #Cytomine cartesian coordinate system, (0,0) is bottom left corner #Mapping Stardist polygon detection coordinates to Cytomine ROI in whole slide image p = Point(minx + polygroup[1][i], miny - polygroup[0][i]) points.append(p) annotation = Polygon(points) #Append to Annotation collection cytomine_annotations.append( Annotation( location=annotation.wkt, id_image= id_image, #conn.parameters.cytomine_id_image, id_project=conn.parameters.cytomine_id_project, id_terms=[ conn.parameters.cytomine_id_cell_term ])) print(".", end='', flush=True) #Send Annotation Collection (for this ROI) to Cytomine server in one http request ca = cytomine_annotations.save() conn.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")
headers = next(f_csv) for row in f_csv: image_id = row[0] tissue = row[1] dye = row[2] id2info[image_id] = (tissue, dye) with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key, verbose=logging.INFO) as cytomine: annotations = AnnotationCollection() annotations.project = params.id_project annotations.showWKT = True annotations.showMeta = True annotations.showGIS = True annotations.fetch() print(annotations) for annotation in annotations: print( "ID: {} | Image: {} | Project: {} | Term: {} | User: {} | Area: {} | Perimeter: {} | WKT: {}" .format(annotation.id, annotation.image, annotation.project, annotation.term, annotation.user, annotation.area, annotation.perimeter, annotation.location)) annot = Annotation().fetch(annotation.id) # Toutes les proprietes (collection) de l annotation properties = PropertyCollection(annot).fetch()
default='demo.cytomine.be', help="The Cytomine host") parser.add_argument('--cytomine_public_key', dest='public_key', help="The Cytomine public key") parser.add_argument('--cytomine_private_key', dest='private_key', help="The Cytomine private key") parser.add_argument('--cytomine_id_project', dest='id_project', help="The project from which we want the crop") parser.add_argument('--download_path', required=False, help="Where to store images") params, other = parser.parse_known_args(sys.argv[1:]) with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key, verbose=logging.INFO) as cytomine: annotations = AnnotationCollection() annotations.project = params.id_project annotations.showWKT = True annotations.showMeta = True annotations.showGIS = True annotations.fetch() print(annotations) for annotation in annotations: print("ID: {} | Image: {} | Project: {} | Term: {} | User: {} | Area: {} | Perimeter: {} | WKT: {}".format( annotation.id, annotation.image, annotation.project, annotation.term, annotation.user, annotation.area, annotation.perimeter, annotation.location
def main(): with CytomineJob.from_cli(sys.argv) as conn: conn.job.update(status=Job.RUNNING, progress=0, status_comment="Initialization of the training phase") # 1. Create working directories on the machine: # - WORKING_PATH/in: input images # - WORKING_PATH/out: output images # - WORKING_PATH/ground_truth: ground truth images # - WORKING_PATH/tmp: temporary path base_path = "{}".format(os.getenv("HOME")) gt_suffix = "_lbl" working_path = os.path.join(base_path, str(conn.job.id)) in_path = os.path.join(working_path, "in/") in_txt = os.path.join(in_path, 'txt/') out_path = os.path.join(working_path, "out/") gt_path = os.path.join(working_path, "ground_truth/") tmp_path = os.path.join(working_path, "tmp/") if not os.path.exists(working_path): os.makedirs(working_path) os.makedirs(in_path) os.makedirs(out_path) os.makedirs(gt_path) os.makedirs(tmp_path) os.makedirs(in_txt) # 2. Download the images (first input, then ground truth image) conn.job.update( progress=10, statusComment="Downloading images (to {})...".format(in_path)) print(conn.parameters) images = ImageInstanceCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) xpos = {} ypos = {} terms = {} for image in images: image.dump(dest_pattern=in_path.rstrip('/') + '/%d.%s' % (image.id, 'jpg')) annotations = AnnotationCollection() annotations.project = conn.parameters.cytomine_id_project annotations.showWKT = True annotations.showMeta = True annotations.showGIS = True annotations.showTerm = True annotations.image = image.id annotations.fetch() for ann in annotations: l = ann.location if l.rfind('POINT') == -1: pol = shapely.wkt.loads(l) poi = pol.centroid else: poi = shapely.wkt.loads(l) (cx, cy) = poi.xy xpos[(ann.term[0], image.id)] = int(cx[0]) ypos[(ann.term[0], image.id)] = image.height - int(cy[0]) terms[ann.term[0]] = 1 for image in images: F = open(in_txt + '%d.txt' % image.id, 'w') for t in terms.keys(): if (t, image.id) in xpos: F.write('%d %d %d %f %f\n' % (t, xpos[(t, image.id)], ypos[(t, image.id)], xpos[(t, image.id)] / float(image.width), ypos[(t, image.id)] / float(image.height))) F.close() depths = 1. / (2.**np.arange(conn.parameters.model_depth)) (xc, yc, xr, yr, ims, t_to_i, i_to_t) = getallcoords(in_txt) if conn.parameters.cytomine_id_terms == 'all': term_list = t_to_i.keys() else: term_list = [ int(term) for term in conn.parameters.cytomine_id_terms.split(',') ] if conn.parameters.cytomine_training_images == 'all': tr_im = ims else: tr_im = [ int(id_im) for id_im in conn.parameters.cytomine_training_images.split(',') ] DATA = None REP = None be = 0 #leprogres = 10 #pr_spacing = 90/len(term_list) #print(term_list) sfinal = "" for id_term in conn.monitor(term_list, start=10, end=90, period=0.05, prefix="Model building for terms..."): sfinal += "%d " % id_term (xc, yc, xr, yr) = getcoordsim(in_txt, id_term, tr_im) nimages = np.max(xc.shape) mx = np.mean(xr) my = np.mean(yr) P = np.zeros((2, nimages)) P[0, :] = xr P[1, :] = yr cm = np.cov(P) passe = False # additional parameters feature_parameters = None if conn.parameters.model_feature_type.lower() == 'gaussian': std_matrix = np.eye(2) * ( conn.parameters.model_feature_gaussian_std**2) feature_parameters = np.round( np.random.multivariate_normal( [0, 0], std_matrix, conn.parameters.model_feature_gaussian_n)).astype(int) elif conn.parameters.model_feature_type.lower() == 'haar': W = conn.parameters.model_wsize n = conn.parameters.model_feature_haar_n / ( 5 * conn.parameters.model_depth) h2 = generate_2_horizontal(W, n) v2 = generate_2_vertical(W, n) h3 = generate_3_horizontal(W, n) v3 = generate_3_vertical(W, n) sq = generate_square(W, n) feature_parameters = (h2, v2, h3, v3, sq) for times in range(conn.parameters.model_ntimes): if times == 0: rangrange = 0 else: rangrange = conn.parameters.model_angle T = build_datasets_rot_mp( in_path, tr_im, xc, yc, conn.parameters.model_R, conn.parameters.model_RMAX, conn.parameters.model_P, conn.parameters.model_step, rangrange, conn.parameters.model_wsize, conn.parameters.model_feature_type, feature_parameters, depths, nimages, 'jpg', conn.parameters.model_njobs) for i in range(len(T)): (data, rep, img) = T[i] (height, width) = data.shape if not passe: passe = True DATA = np.zeros((height * (len(T) + 100) * conn.parameters.model_ntimes, width)) REP = np.zeros(height * (len(T) + 100) * conn.parameters.model_ntimes) b = 0 be = height DATA[b:be, :] = data REP[b:be] = rep b = be be = be + height REP = REP[0:b] DATA = DATA[0:b, :] clf = ExtraTreesClassifier( n_jobs=conn.parameters.model_njobs, n_estimators=conn.parameters.model_ntrees) clf = clf.fit(DATA, REP) parameters_hash = {} parameters_hash[ 'cytomine_id_terms'] = conn.parameters.cytomine_id_terms parameters_hash['model_R'] = conn.parameters.model_R parameters_hash['model_RMAX'] = conn.parameters.model_RMAX parameters_hash['model_P'] = conn.parameters.model_P parameters_hash['model_npred'] = conn.parameters.model_npred parameters_hash['model_ntrees'] = conn.parameters.model_ntrees parameters_hash['model_ntimes'] = conn.parameters.model_ntimes parameters_hash['model_angle'] = conn.parameters.model_angle parameters_hash['model_depth'] = conn.parameters.model_depth parameters_hash['model_step'] = conn.parameters.model_step parameters_hash['window_size'] = conn.parameters.model_wsize parameters_hash[ 'feature_type'] = conn.parameters.model_feature_type parameters_hash[ 'feature_haar_n'] = conn.parameters.model_feature_haar_n parameters_hash[ 'feature_gaussian_n'] = conn.parameters.model_feature_gaussian_n parameters_hash[ 'feature_gaussian_std'] = conn.parameters.model_feature_gaussian_std model_filename = joblib.dump(clf, os.path.join( out_path, '%d_model.joblib' % (id_term)), compress=3)[0] cov_filename = joblib.dump([mx, my, cm], os.path.join( out_path, '%d_cov.joblib' % (id_term)), compress=3)[0] parameter_filename = joblib.dump( parameters_hash, os.path.join(out_path, '%d_parameters.joblib' % id_term), compress=3)[0] AttachedFile( conn.job, domainIdent=conn.job.id, filename=model_filename, domainClassName="be.cytomine.processing.Job").upload() AttachedFile( conn.job, domainIdent=conn.job.id, filename=cov_filename, domainClassName="be.cytomine.processing.Job").upload() AttachedFile( conn.job, domainIndent=conn.job.id, filename=parameter_filename, domainClassName="be.cytomine.processing.Job").upload() if conn.parameters.model_feature_type == 'haar' or conn.parameters.model_feature_type == 'gaussian': add_filename = joblib.dump( feature_parameters, out_path.rstrip('/') + '/' + '%d_fparameters.joblib' % (id_term))[0] AttachedFile( conn.job, domainIdent=conn.job.id, filename=add_filename, domainClassName="be.cytomine.processing.Job").upload() Property(conn.job, key="id_terms", value=sfinal.rstrip(" ")).save() conn.job.update(progress=100, status=Job.TERMINATED, statusComment="Job terminated.")
"If unset, all images in the project are used.", default=None) parser.add_argument('--cytomine_id_job') params, _ = parser.parse_known_args(sys.argv[1:]) with Cytomine(params.cytomine_host, params.cytomine_public_key, params.cytomine_private_key) as c: id_tags_for_images = params.cytomine_id_tags_for_images id_project = params.cytomine_id_project image_tags = id_tags_for_images if id_tags_for_images else None images = ImageInstanceCollection(tags=image_tags).fetch_with_filter( "project", id_project) image_ids = [image.id for image in images] groundtruths = AnnotationCollection() groundtruths.showTerm = True groundtruths.showWKT = True groundtruths.images = image_ids groundtruths.fetch() predictions = AnnotationCollection() predictions.showTerm = True predictions.showWKT = True predictions.images = image_ids predictions.job = params.cytomine_id_job predictions.fetch() print("There are {} groundtruths and {} predictions".format( len(groundtruths), len(predictions)))
def preprocess(cytomine, working_path, id_project, id_terms=None, id_tags_for_images=None): """ Get data from Cytomine in order to train YOLO. :param cytomine: The Cytomine client :param working_path: The path where files will be stored :param id_project: The Cytomine project ID used to get data :param id_terms: The Cytomine term IDS used to get data :param id_tags_for_images: The Cytomine tags IDS associated to images used to get data :return: classes_filename: The name of the file with classes image_filenames: A list of image filenames annotation_filenames: A list of filenames with annotations in YOLO format """ if not os.path.exists(working_path): os.makedirs(working_path) images_path = os.path.join(working_path, IMG_DIRECTORY) if not os.path.exists(images_path): os.makedirs(images_path) annotations_path = os.path.join(working_path, ANNOTATION_DIRECTORY) if not os.path.exists(annotations_path): os.makedirs(annotations_path) terms = TermCollection().fetch_with_filter("project", id_project) if id_terms: filtered_term_ids = [int(id_term) for id_term in id_terms.split(',')] filtered_terms = [term for term in terms if term.id in filtered_term_ids] else: filtered_terms = terms terms_indexes = {term.id: i for i, term in enumerate(filtered_terms)} # https://github.com/eriklindernoren/PyTorch-YOLOv3#train-on-custom-dataset # Write obj.names classes_filename = os.path.join(working_path, CLASSES_FILENAME) with open(classes_filename, 'w') as f: for term in filtered_terms: f.write(term.name + os.linesep) # Download images image_filenames = [] image_tags = id_tags_for_images if id_tags_for_images else None images = ImageInstanceCollection(tags=image_tags).fetch_with_filter("project", id_project) for image in images: image.dump(os.path.join(working_path, IMG_DIRECTORY, "{id}.png"), override=False) image_filenames.append(image.filename) # Create annotation files annotation_filenames = [] for image in images: annotations = AnnotationCollection() annotations.image = image.id annotations.terms = [t.id for t in filtered_terms] if id_terms else None annotations.showWKT = True annotations.showTerm = True annotations.fetch() filename = os.path.join(working_path, ANNOTATION_DIRECTORY, "{}.txt".format(image.id)) with open(filename, 'w') as f: for annotation in annotations: geometry = wkt.loads(annotation.location) x, y, w, h = geometry_to_yolo(geometry, image.width, image.height) for term_id in annotation.term: # <object-class> <x_center> <y_center> <width> <height> f.write("{} {:.12f} {:.12f} {:.12f} {:.12f}".format(terms_indexes[term_id], x, y, w, h) + os.linesep) annotation_filenames.append(filename) return classes_filename, image_filenames, annotation_filenames
def get_images_mask_per_annotation_per_user(proj_id, image_id, user_id, scale_factor, dest): im = ImageInstanceCollection() im.project = proj_id im.image = image_id im.fetch_with_filter("project", proj_id) image_width = int(im[0].width) image_height = int(im[0].height) print(image_height, image_width) annotations = AnnotationCollection() annotations.project = proj_id annotations.image = image_id annotations.user = user_id annotations.showWKT = True annotations.showMeta = True annotations.showTerm = True annotations.showGIS = True annotations.showImage = True annotations.showUser = True annotations.fetch() dct_anotations = {} for a in annotations: print(a.user) if len(a.term) == 1: term = a.term[0] if term not in dct_anotations: dct_anotations[term] = [] dct_anotations[term].append(a.location) else: warnings.warn("Not suited for multiple or no annotation term") for t, lanno in dct_anotations.items(): result_image = Image.new(mode='1', size=(int(image_width * scale_factor), int(image_height * scale_factor)), color=0) for pwkt in lanno: if pwkt.startswith("POLYGON"): label = "POLYGON" elif pwkt.startswith("MULTIPOLYGON"): label = "MULTIPOLYGON" coordinatesStringList = pwkt.replace(label, '') if label == "POLYGON": coordinates_string_lists = [coordinatesStringList] elif label == "MULTIPOLYGON": coordinates_string_lists = coordinatesStringList.split( ')), ((') coordinates_string_lists = [ coordinatesStringList.replace('(', '').replace(')', '') for coordinatesStringList in coordinates_string_lists ] for coordinatesStringList in coordinates_string_lists: # create lists of x and y coordinates x_coords = [] y_coords = [] for point in coordinatesStringList.split(','): point = point.strip( string.whitespace) # remove leading and ending spaces point = point.strip( string.punctuation ) # Have seen some strings have a ')' at the end so remove it x_coords.append(round(float(point.split(' ')[0]))) y_coords.append(round(float(point.split(' ')[1]))) x_coords_correct_lod = [ int(x * scale_factor) for x in x_coords ] y_coords_correct_lod = [ image_height * scale_factor - int(x * scale_factor) for x in y_coords ] coords = [ (i, j) for i, j in zip(x_coords_correct_lod, y_coords_correct_lod) ] # draw the polygone in an image and fill it ImageDraw.Draw(result_image).polygon(coords, outline=1, fill=1) result_image.save(params.dest + '/' + str(t) + '.png')
public_key=params.public_key, private_key=params.private_key) as cytomine: if params.opencv: image_instances = ImageInstanceCollection().fetch_with_filter( "project", params.id_project) # We want all annotations in a given project. annotations = AnnotationCollection() annotations.project = params.id_project # Add a filter: only annotations from this project # You could add other filters: # annotations.image = id_image => Add a filter: only annotations from this image # annotations.images = [id1, id2] => Add a filter: only annotations from these images # annotations.user = id_user => Add a filter: only annotations from this user # ... annotations.showWKT = True # Ask to return WKT location (geometry) in the response annotations.showMeta = True # Ask to return meta information (id, ...) in the response annotations.showGIS = True # Ask to return GIS information (perimeter, area, ...) in the response # ... # => Fetch annotations from the server with the given filters. annotations.fetch() print(annotations) for annotation in annotations: print( "ID: {} | Image: {} | Project: {} | Term: {} | User: {} | Area: {} | Perimeter: {} | WKT: {}" .format(annotation.id, annotation.image, annotation.project, annotation.term, annotation.user, annotation.area, annotation.perimeter, annotation.location)) # Annotation location is the annotation geometry in WKT format.
def main(argv): with CytomineJob.from_cli(argv) as conn: # with Cytomine(argv) as conn: print(conn.parameters) conn.job.update(status=Job.RUNNING, progress=0, statusComment="Initialization...") base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity working_path = os.path.join(base_path, str(conn.job.id)) # with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key, # verbose=logging.INFO) as cytomine: # ontology = Ontology("classPNcells"+str(conn.parameters.cytomine_id_project)).save() # ontology_collection=OntologyCollection().fetch() # print(ontology_collection) # ontology = Ontology("CLASSPNCELLS").save() # terms = TermCollection().fetch_with_filter("ontology", ontology.id) terms = TermCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) conn.job.update(status=Job.RUNNING, progress=1, statusComment="Terms collected...") print(terms) # term_P = Term("PositiveCell", ontology.id, "#FF0000").save() # term_N = Term("NegativeCell", ontology.id, "#00FF00").save() # term_P = Term("PositiveCell", ontology, "#FF0000").save() # term_N = Term("NegativeCell", ontology, "#00FF00").save() # Get all the terms of our ontology # terms = TermCollection().fetch_with_filter("ontology", ontology.id) # terms = TermCollection().fetch_with_filter("ontology", ontology) # print(terms) # #Loading pre-trained Stardist model # np.random.seed(17) # lbl_cmap = random_label_cmap() # #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46 # #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI # model = StarDist2D(None, name='2D_versatile_HE', basedir='/models/') #use local model file in ~/models/2D_versatile_HE/ #Select images to process images = ImageInstanceCollection().fetch_with_filter( "project", conn.parameters.cytomine_id_project) conn.job.update(status=Job.RUNNING, progress=2, statusComment="Images gathered...") list_imgs = [] if conn.parameters.cytomine_id_images == 'all': for image in images: list_imgs.append(int(image.id)) else: list_imgs = [ int(id_img) for id_img in conn.parameters.cytomine_id_images.split(',') ] print(list_imgs) #Go over images conn.job.update(status=Job.RUNNING, progress=10, statusComment="Running PN classification on image...") #for id_image in conn.monitor(list_imgs, prefix="Running PN classification on image", period=0.1): for id_image in list_imgs: roi_annotations = AnnotationCollection() roi_annotations.project = conn.parameters.cytomine_id_project roi_annotations.term = conn.parameters.cytomine_id_cell_term roi_annotations.image = id_image #conn.parameters.cytomine_id_image roi_annotations.job = conn.parameters.cytomine_id_annotation_job roi_annotations.user = conn.parameters.cytomine_id_user_job roi_annotations.showWKT = True roi_annotations.fetch() print(roi_annotations) #Go over ROI in this image #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1): for roi in roi_annotations: #Get Cytomine ROI coordinates for remapping to whole-slide #Cytomine cartesian coordinate system, (0,0) is bottom left corner print( "----------------------------Cells------------------------------" ) roi_geometry = wkt.loads(roi.location) # print("ROI Geometry from Shapely: {}".format(roi_geometry)) # print("ROI Bounds") # print(roi_geometry.bounds) minx = roi_geometry.bounds[0] miny = roi_geometry.bounds[3] #Dump ROI image into local PNG file # roi_path=os.path.join(working_path,str(roi_annotations.project)+'/'+str(roi_annotations.image)+'/'+str(roi.id)) roi_path = os.path.join( working_path, str(roi_annotations.project) + '/' + str(roi_annotations.image) + '/') # print(roi_path) roi_png_filename = os.path.join(roi_path + str(roi.id) + '.png') conn.job.update(status=Job.RUNNING, progress=20, statusComment=roi_png_filename) # print("roi_png_filename: %s" %roi_png_filename) roi.dump(dest_pattern=roi_png_filename, alpha=True) #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True) # im=Image.open(roi_png_filename) J = cv2.imread(roi_png_filename, cv2.IMREAD_UNCHANGED) J = cv2.cvtColor(J, cv2.COLOR_BGRA2RGBA) [r, c, h] = J.shape # print("J: ",J) if r < c: blocksize = r else: blocksize = c # print("blocksize:",blocksize) rr = np.zeros((blocksize, blocksize)) cc = np.zeros((blocksize, blocksize)) zz = [*range(1, blocksize + 1)] # print("zz:", zz) for i in zz: rr[i - 1, :] = zz # print("rr shape:",rr.shape) zz = [*range(1, blocksize + 1)] for i in zz: cc[:, i - 1] = zz # print("cc shape:",cc.shape) cc1 = np.asarray(cc) - 16.5 rr1 = np.asarray(rr) - 16.5 cc2 = np.asarray(cc1)**2 rr2 = np.asarray(rr1)**2 rrcc = np.asarray(cc2) + np.asarray(rr2) weight = np.sqrt(rrcc) # print("weight: ",weight) weight2 = 1. / weight # print("weight2: ",weight2) # print("weight2 shape:",weight2.shape) coord = [c / 2, r / 2] halfblocksize = blocksize / 2 y = round(coord[1]) x = round(coord[0]) # Convert the RGB image to HSV Jalpha = J[:, :, 3] Jalphaloc = Jalpha / 255 Jrgb = cv2.cvtColor(J, cv2.COLOR_RGBA2RGB) Jhsv = cv2.cvtColor(Jrgb, cv2.COLOR_RGB2HSV_FULL) Jhsv = Jhsv / 255 Jhsv[:, :, 0] = Jhsv[:, :, 0] * Jalphaloc Jhsv[:, :, 1] = Jhsv[:, :, 1] * Jalphaloc Jhsv[:, :, 2] = Jhsv[:, :, 2] * Jalphaloc # print("Jhsv: ",Jhsv) # print("Jhsv size:",Jhsv.shape) # print("Jhsv class:",Jhsv.dtype) currentblock = Jhsv[0:blocksize, 0:blocksize, :] # print("currentblock: ",currentblock) # print(currentblock.dtype) currentblockH = currentblock[:, :, 0] currentblockV = 1 - currentblock[:, :, 2] hue = sum(sum(currentblockH * weight2)) val = sum(sum(currentblockV * weight2)) # print("hue:", hue) # print("val:", val) if hue < 2: cellclass = 1 elif val < 15: cellclass = 2 else: if hue < 30 or val > 40: cellclass = 1 else: cellclass = 2 # tags = TagCollection().fetch() # tags = TagCollection() # print(tags) if cellclass == 1: # print("Positive (H: ", str(hue), ", V: ", str(val), ")") id_terms = conn.parameters.cytomine_id_positive_term # tag = Tag("Positive (H: ", str(hue), ", V: ", str(val), ")").save() # print(tag) # id_terms=Term("PositiveCell", ontology.id, "#FF0000").save() elif cellclass == 2: # print("Negative (H: ", str(hue), ", V: ", str(val), ")") id_terms = conn.parameters.cytomine_id_negative_term # for t in tags: # tag = Tag("Negative (H: ", str(hue), ", V: ", str(val), ")").save() # print(tag) # id_terms=Term("NegativeCell", ontology.id, "#00FF00").save() # First we create the required resources cytomine_annotations = AnnotationCollection() # property_collection = PropertyCollection(uri()).fetch("annotation",id_image) # property_collection = PropertyCollection().uri() # print(property_collection) # print(cytomine_annotations) # property_collection.append(Property(Annotation().fetch(id_image), key="Hue", value=str(hue))) # property_collection.append(Property(Annotation().fetch(id_image), key="Val", value=str(val))) # property_collection.save() # prop1 = Property(Annotation().fetch(id_image), key="Hue", value=str(hue)).save() # prop2 = Property(Annotation().fetch(id_image), key="Val", value=str(val)).save() # prop1.Property(Annotation().fetch(id_image), key="Hue", value=str(hue)).save() # prop2.Property(Annotation().fetch(id_image), key="Val", value=str(val)).save() # for pos, polygroup in enumerate(roi_geometry,start=1): # points=list() # for i in range(len(polygroup[0])): # p=Point(minx+polygroup[1][i],miny-polygroup[0][i]) # points.append(p) annotation = roi_geometry # tags.append(TagDomainAssociation(Annotation().fetch(id_image, tag.id))).save() # association = append(TagDomainAssociation(Annotation().fetch(id_image, tag.id))).save() # print(association) cytomine_annotations.append( Annotation( location=annotation.wkt, #location=roi_geometry, id_image=id_image, #conn.parameters.cytomine_id_image, id_project=conn.parameters.cytomine_id_project, id_terms=[id_terms])) print(".", end='', flush=True) #Send Annotation Collection (for this ROI) to Cytomine server in one http request ca = cytomine_annotations.save() conn.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")