def __call__(self, data): import pandas as pd from pathlib import Path from deformationcytometer.detection.includes.regionprops import mask_to_cells_edge, mask_to_cells_edge2 from deformationcytometer.evaluation.helper_functions import filterCells output_path = Path(data["filename"][:-4] + "_result_new.csv") if data["type"] != "image": if data["type"] == "start": # add ellipse marker type if write_clickpoints_file and write_clickpoints_markers: import clickpoints with clickpoints.DataFile(data["filename"][:-4] + ".cdb") as cdb: cdb.setMarkerType("cell", "#FF0000", mode=cdb.TYPE_Ellipse) # delete an existing outputfile if output_path.exists(): output_path.unlink() return data log("3find_cells", "detect", 1, data["index"]) new_cells = mask_to_cells_edge2(data["mask"], data["im"], data["config"], r_min, frame_data={"frames": data["index"], "timestamp": data["timestamp"]}) new_cells = pd.DataFrame(new_cells, columns=["frames", "timestamp", "x", "y", "rp", "long_axis", "short_axis", "angle", "irregularity", "solidity", "sharpness", "velocity", "cell_id", "tt", "tt_r2"]) if not output_path.exists(): with output_path.open("w") as fp: new_cells.to_csv(fp, index=False, header=True) else: with output_path.open("a") as fp: new_cells.to_csv(fp, index=False, header=False) # filter cells according to solidity and irregularity new_cells = filterCells(new_cells, solidity_threshold=self.solidity_threshold, irregularity_threshold=self.irregularity_threshold) if write_clickpoints_file and write_clickpoints_markers: import clickpoints with clickpoints.DataFile(data["filename"][:-4] + ".cdb") as cdb: for i, d in new_cells.iterrows(): cdb.setEllipse(frame=int(d.frames), x=d.x, y=d.y, width=d.long_axis / data["config"]["pixel_size"], height=d.short_axis / data["config"]["pixel_size"], angle=d.angle, type="cell") data["config"]["solidity"] = self.solidity_threshold data["config"]["irregularity"] = self.irregularity_threshold data["cells"] = new_cells del data["mask"] log("3find_cells", "detect", 0, data["index"]) return data
def __call__(self, data): import numpy as np import skimage.draw import clickpoints from pathlib import Path if data["type"] == "start" or data["type"] == "end": yield data return data_storage_mask_numpy = self.data_storage.get_stored(data["mask_info"]) #with clickpoints.DataFile(r"E:\FlowProject\2021.4.14\0.1 atm\2021_04_14_11_37_36_ellipse.cdb") as cdb: # + 10000 with clickpoints.DataFile(data["filename"][:-4]+"_ellipse.cdb") as cdb: # + 30000 #with clickpoints.DataFile(r"E:\FlowProject\2021.4.14\0.2 atm\2021_04_14_13_44_55_Fl_ellipse.cdb") as cdb: # + 40000 #with clickpoints.DataFile(r"E:\FlowProject\2021.4.14\0.5 atm\2021_04_14_13_04_12_Fl.cdb") as cdb: # + 0 path_entry = cdb.getPath(".")#Path(data["filename"]).parent) for i, index in enumerate(range(data["index"], data["end_index"])): img = cdb.table_image.get(cdb.table_image.filename==str(Path(data["filename"]).name), cdb.table_image.frame==index)#, path=path_entry) for ellipse in img.ellipses: data_storage_mask_numpy[i][skimage.draw.ellipse(ellipse.y, ellipse.x, ellipse.width / 2, ellipse.height / 2, data_storage_mask_numpy[i].shape, np.pi / 2 - np.deg2rad(ellipse.angle))] = 1 data_storage_mask_numpy[i][ skimage.draw.ellipse(ellipse.y, ellipse.x, ellipse.width / 2 - 3, ellipse.height / 2 - 3, data_storage_mask_numpy[i].shape, np.pi / 2 - np.deg2rad(ellipse.angle))] = 0 yield data
def setup_database_for_tfm(folder, name): ''' Sorting images into a clickpoints database. Frames are identified by leading numbers. Layers are identified by the file name. :param folder: Folder where images are searched. :param name: Name of the database. Needs to end with .cdb. :param return_db: Choose weather function returns the database object, or weather the connection to the database is closed :param key1,key2,key3: regular expression that define how to sort images. Can be single string or a list. If any of the regex is matched for one key, the image will be classified accordingly. Don't include the file ending. Typical image endings (.png,.tif ... ) are added automatically. key1: image after bead removal, key2: image before bead removal, key3: image of the cells. :param frame_key: reguar expression that defines how the frame number is searched. You must mark the group that contains the frame with parenthesis "()". :return: ''' # creating a new cdb database, will override an existing one. db = clickpoints.DataFile(os.path.join(folder, name), "w") folders = { "folder1_txt": os.getcwd(), "folder2_txt": os.getcwd(), "folder3_txt": os.getcwd(), "folder_out_txt": os.getcwd() } search_keys = { "after": "\d{1,4}after", "before": "\d{1,4}before", "cells": "\d{1,4}bf_before", "frames": "(\d{1,4})" } setup_database_internal(db, search_keys, folders)
def load_tracks_from_clickpoints(self, path, type): db = clickpoints.DataFile(path) type = db.getMarkerType(name=type) array = np.asarray(db.db.execute_sql("select sort_index, x, y, id from marker join image on marker.image_id = image.id where type_id = ? order by sort_index", [type.id]).fetchall(), dtype=float) dictionary = dict(zip(range(len(array), array[:,3]))) print("Marker loaded!") return db, array[:,:-1], type, dictionary
def __init__(self, db, method="r", raise_Error=True): self.raise_Error = raise_Error if isinstance(db, clickpoints.DataFile): self.file = db self.db_obj = True else: self.file = clickpoints.DataFile(db, method) self.db_obj = False
def __call__(self, data): import time predict_start_first = time.time() from deformationcytometer.detection.includes.UNETmodel import UNet import numpy as np from deformationcytometer.detection.includes.regionprops import preprocess, getTimestamp if data["type"] == "start" or data["type"] == "end": yield data return log("2detect", "prepare", 1, data["index"]) def preprocess(img): img = img - np.mean(img, axis=(1, 2))[:, None, None] img = img / np.std(img, axis=(1, 2))[:, None, None] return img.astype(np.float32) data_storage_numpy = self.data_storage.get_stored(data["data_info"]) data_storage_mask_numpy = self.data_storage.get_stored( data["mask_info"]) # initialize the unet if necessary im = data_storage_numpy[0] # batch[0]["im"] if self.unet is None or self.unet.shape[:2] != im.shape: im = data_storage_numpy[0] #batch[0]["im"] if self.network_weights is not None and self.network_weights != "": self.unet = UNet((im.shape[0], im.shape[1], 1), 1, d=8, weights=self.network_weights) else: self.unet = UNet((im.shape[0], im.shape[1], 1), 1, d=8) # predict cell masks from the image batch im_batch = preprocess(data_storage_numpy) import time predict_start = time.time() import tensorflow as tf with tf.device('/GPU:0'): prediction_mask_batch = self.unet.predict( im_batch[:, :, :, None])[:, :, :, 0] > 0.5 dt = time.time() - predict_start data_storage_mask_numpy[:] = prediction_mask_batch import clickpoints if self.write_clickpoints_masks: with clickpoints.DataFile(data["filename"][:-4] + ".cdb") as cdb: # iterate over all images and return them for mask, index in zip(data_storage_mask_numpy, range(data["index"], data["end_index"])): cdb.setMask(frame=index, data=mask.astype(np.uint8)) data["config"].update({"network": self.network_weights}) log("2detect", "prepare", 0, data["index"]) yield data
def __init__(self, db_path=None, **kwargs): if db_path is not None: self.db = clickpoints.DataFile(db_path) im_shape = self.db.getImages()[0].getShape() self.nx, self.ny = im_shape[1], im_shape[0] x, y = np.meshgrid(np.arange(self.nx), np.arange(self.ny)) x, y = x.flatten(), y.flatten() self.points = np.vstack((x, y)).T
def load_tracks_from_clickpoints(self, path, type): db = clickpoints.DataFile(path) tracks = db.getTracks(type=type) track_dict = dict(enumerate([t.id for t in tracks])) if self.Frames is None: array = np.asarray([db.db.execute_sql("select (SELECT x from marker as m WHERE m.image_id = i.id AND m.track_id=?) as x, (SELECT y from marker as m WHERE m.image_id = i.id AND m.track_id=?) as y from image as i order by sort_index",[track_dict[k],track_dict[k]]).fetchall() for k in sorted(track_dict.keys())], dtype=float) self.Frames = range(array.shape[1]) else: array = np.asarray([db.db.execute_sql("select (SELECT x from marker as m WHERE m.image_id = i.id AND m.track_id=?) as x, (SELECT y from marker as m WHERE m.image_id = i.id AND m.track_id=?) as y from image as i WHERE image.sort_index in ? order by sort_index",[track_dict[k], track_dict[k], self.Frames]).fetchall() for k in sorted(track_dict.keys())], dtype=float) print("Tracks loaded!") return db, array, track_dict
def __call__(self, data): import time predict_start_first = time.time() from deformationcytometer.detection.includes.UNETmodel import UNet import numpy as np import cv2 from skimage.filters import gaussian from skimage.morphology import area_opening from skimage import feature from scipy.ndimage import generate_binary_structure, binary_fill_holes from skimage import morphology from deformationcytometer.detection.includes.regionprops import preprocess, getTimestamp if data["type"] == "start" or data["type"] == "end": yield data return log("2detect", "prepare", 1, data["index"]) data_storage_numpy = self.data_storage.get_stored(data["data_info"]) data_storage_mask_numpy = self.data_storage.get_stored(data["mask_info"]) for i, im in enumerate(data_storage_numpy): f = im ff = f / f.max() * 255 ffl = ff ffl = np.uint8(ffl / ffl.max() * 255) fban = gaussian(f, sigma=1) - gaussian(f, sigma=6) fban = fban - fban.min() fban = np.uint8(fban / fban.max() * 255) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) gradient = cv2.morphologyEx(fban, cv2.MORPH_GRADIENT, kernel) fban = np.uint8(gradient / gradient.max() * 255) edges = feature.canny(fban, sigma=2, low_threshold=0.99, high_threshold=0.99, use_quantiles=True) struct = generate_binary_structure(2, 1) ffil = binary_fill_holes(edges, structure=struct).astype(int) ffil = np.uint8(ffil * 255) mask = area_opening(ffil, area_threshold=600, connectivity=1) import matplotlib.pyplot as plt data_storage_mask_numpy[i] = mask import clickpoints if self.write_clickpoints_masks: with clickpoints.DataFile(data["filename"][:-4] + ".cdb") as cdb: # iterate over all images and return them for mask, index in zip(data_storage_mask_numpy, range(data["index"], data["end_index"])): cdb.setMask(frame=index, data=mask.astype(np.uint8)) data["config"].update({"network": self.network_weights}) log("2detect", "prepare", 0, data["index"]) yield data
def set_up_additional_databases(ev_addon, db_name, illustration=False): folder = os.path.split(db_name)[0] os.makedirs(folder,exist_ok=True) export_db_path = os.path.join(folder,db_name) notes_txt = open(export_db_path[:-4] + "_notes.txt", "+a") if os.path.exists(export_db_path): exp_db = clickpoints.DataFile(export_db_path, "r") else: exp_db = clickpoints.DataFile(export_db_path, "w") exp_db.deletePaths() exp_db.setPath(folder) if illustration: mt1 = exp_db.setMaskType(name=ev_addon.net1_db_name, color=ev_addon.net1_db_color, index=1) mt2 = exp_db.setMaskType(name=ev_addon.net2_db_name, color=ev_addon.net2_db_color, index=2) mt_ov = exp_db.setMaskType(name=ev_addon.overlap_mask, color=ev_addon.overlap_mask_color, index=3) elt1 = exp_db.setMarkerType(name=ev_addon.net1_db_name, color=ev_addon.net1_db_color, mode=clickpoints.DataFile.TYPE_Ellipse) elt2 = exp_db.setMarkerType(name=ev_addon.net2_db_name, color=ev_addon.net2_db_color, mode=clickpoints.DataFile.TYPE_Ellipse) return exp_db, notes_txt
def process_load_images(filename): """ Loads an .tif file stack and yields all the images. """ import imageio from deformationcytometer.detection import pipey from deformationcytometer.detection.includes.regionprops import preprocess, getTimestamp from deformationcytometer.includes.includes import getConfig import clickpoints print("start load images", filename) log("1load_images", "prepare", 1) # open the image reader reader = imageio.get_reader(filename) # get the config file config = getConfig(filename) # get the total image count image_count = len(reader) print("create cdb", filename[:-4]+".cdb") if write_clickpoints_file: cdb = clickpoints.DataFile(filename[:-4]+".cdb", "w") cdb.setMaskType("prediction", color="#FF00FF", index=1) yield dict(filename=filename, index=-1, type="start") log("1load_images", "prepare", 0) log("1load_images", "read", 1) # iterate over all images in the file for image_index, im in enumerate(reader): if image_index == image_count: break # ensure image has only one channel if len(im.shape) == 3: im = im[:, :, 0] # get the timestamp from the file timestamp = float(getTimestamp(reader, image_index)) if write_clickpoints_file: cdb.setImage(filename, frame=image_index)#, timestamp=timestamp) log("1load_images", "read", 0, image_index) # return everything in a nicely packed dictionary yield dict(filename=filename, index=image_index, type="image", timestamp=timestamp, im=im, config=config, image_count=image_count) if image_index < image_count - 1: log("1load_images", "read", 1, image_index + 1) yield dict(filename=filename, index=image_count, type="end")
def load_from_clickpoints(self, file, marker_type, label=0, tag=None, n=None): import clickpoints db = clickpoints.DataFile(file) if tag is None: tag = file.split("/")[-1].split("\\")[-1] marker_type = db.getMarkerType(marker_type) if n is None: self.extend([dotdict({"image":self.__sample__(m.image.data,m.x,m.y), "label":label, "meta":self.__meta__(m.image,tag,m.x,m.y)}) for m in db.getMarkers(type=marker_type)]) else: i = int(db.getMarkers(type=marker_type).count()/n) try: self.extend([dotdict({"image":self.__sample__(m.image.data,m.x,m.y), "label":label, "meta":self.__meta__(m.image,tag,m.x,m.y)}) for m in db.getMarkers(type=marker_type)[:n*i:i]]) except ValueError: self.extend([dotdict({"image":self.__sample__(m.image,m.x,m.y), "label":label, "meta":self.__meta__(m.image.data,tag,m.x,m.y)}) for m in db.getMarkers(type=marker_type)]) raise Warning("Not enough data found in Database. n=%s , found %s "%(n,db.getMarkers(type=marker_type).count()))
def Create_DB(Name,pic_path,db_path,pic_pos="pos0"): """ Creates a Clickpointsdatabase Parameters -------------- Name: String Specifies path with the name for the database pic_path: String The path to all the pictures db_path: String The path to the folder in which the pictures can be found pic_pos: String """ db = clickpoints.DataFile(Name, 'w') images = glob.glob(pic_path) print(len(images)) layer_dict = {"MinP": 0, "MinIndices": 1, "MaxP":2, "MaxIndices": 3} db.setPath(db_path, 1) for image in images: path = os.path.sep.join(image.split(os.path.sep)[:-1]) file = image.split("/")[-1] idx = file.split("_")[2][3:] layer_str = file.split("_")[-1][1:-4] if not file.count(pic_pos): continue if layer_str.count("MinProj"): layer = 0 elif layer_str.count("MinIndices"): layer = 1 elif layer_str.count("MaxProj"): layer = 2 elif layer_str.count("MaxIndices"): layer = 3 else: raise ValueError("No known layer!") print(idx, layer) image = db.setImage(filename=file, path=1, layer=layer)#, frame=int(idx)) image.sort_index = int(idx) image.save()
def setup_masks_and_layers(db_name, input_folder, output_folder, markers=None, masks=None, layers=None, tracks=None): cdb_filepath = output_folder / db_name print(cdb_filepath) db = clickpoints.DataFile(cdb_filepath, 'w') # creates and opens the cdb file db.setPath(input_folder, 1) # sets path entry of input images in cdb file #db.setPath(output_folder, 2) # sets path entry of outputfolder images in cdb file # setting up marker types if not markers is None: for name, color in markers.items(): db.setMarkerType(name=name, color=color) # setting up track types if not tracks is None: for name, color in tracks.items(): db.setMarkerType(name=name, color=color, mode=db.TYPE_Track) # setting up mask types if not masks is None: for name, (color, index) in masks.items(): db.setMaskType(name=name, color=color, index=index) # setting up layers if not layers is None: base_layer = db.getLayer(layers[0], create=True, id=0) for name in layers[1:]: db.getLayer(name, base_layer=base_layer, create=True) return db
def __call__(self, data): import time predict_start_first = time.time() import pandas as pd from pathlib import Path from deformationcytometer.detection.includes.regionprops import mask_to_cells_edge, mask_to_cells_edge2 from deformationcytometer.evaluation.helper_functions import filterCells import numpy as np output_path = Path(data["filename"][:-4] + "_result_new.csv") if data["type"] != "image": if data["type"] == "start": # add ellipse marker type if self.write_clickpoints_markers: import clickpoints with clickpoints.DataFile(data["filename"][:-4] + ".cdb") as cdb: cdb.setMarkerType("cell", "#FF0000", mode=cdb.TYPE_Ellipse) # delete an existing outputfile if output_path.exists(): output_path.unlink() return data data_storage_mask_numpy = self.data_storage.get_stored( data["mask_info"]) log("3find_cells", "detect", 1, data["index"]) new_cells = [] row_indices = [0] for mask, timestamp, index in zip( data_storage_mask_numpy, data["timestamps"], range(data["index"], data["index"] + data_storage_mask_numpy.shape[0])): cells = mask_to_cells_edge2(mask, None, data["config"], self.r_min, frame_data={ "frames": index, "timestamp": timestamp }, hollow_masks=self.hollow_masks) row_indices.append(row_indices[-1] + len(cells)) new_cells.extend(cells) new_cells = pd.DataFrame(new_cells, columns=[ "frames", "timestamp", "x", "y", "rp", "long_axis", "short_axis", "angle", "irregularity", "solidity", "sharpness", "velocity", "cell_id", "tt", "tt_r2", "omega" ]) if not output_path.exists(): with output_path.open("w") as fp: new_cells.to_csv(fp, index=False, header=True) else: with output_path.open("a") as fp: new_cells.to_csv(fp, index=False, header=False) # filter cells according to solidity and irregularity #new_cells = filterCells(new_cells, solidity_threshold=self.solidity_threshold, # irregularity_threshold=self.irregularity_threshold) if self.write_clickpoints_markers: import clickpoints with clickpoints.DataFile(data["filename"][:-4] + ".cdb") as cdb: for i, d in new_cells.iterrows(): cdb.setEllipse( frame=int(d.frames), x=d.x, y=d.y, width=d.long_axis / data["config"]["pixel_size"], height=d.short_axis / data["config"]["pixel_size"], angle=d.angle, type="cell") data["config"]["solidity"] = self.solidity_threshold data["config"]["irregularity"] = self.irregularity_threshold #new_cells.set_index("frames", inplace=True) data["cells"] = new_cells data["row_indices"] = row_indices #del data["mask"] log("3find_cells", "detect", 0, data["index"]) return data
def spheroid_analysis_with_bf_core(inputfolder_path, save_images, use_existing_mean_images, analyze, pixelsizes_dict, outputfolder_mode, cdb): fl_images = collect_files( inputfolder_path, selector_path=["SphInv", "Fluo1"], selectors_file=["rep", "z0"], negative_selectors_file=[]) # finding all fl-images bf_images = collect_files( inputfolder_path, selector_path=["SphForce"], selectors_file=["rep0000", "modeBF"], negative_selectors_file=["above", "below"]) # finding all bf images at t=0 if outputfolder_mode == "mode1": # will create outpfolder by replacing a folder "raw data" with "analyzed data and copy the folder structure deeper outputfolder_path = re.sub("Raw Data|Raw_data", "Analyzed_Data", inputfolder_path, flags=re.I) outputfolder_path_profile = os.path.join(outputfolder_path, "Invasion_Profiles") outputfolder_path_mean = os.path.join(outputfolder_path, "Mean_images") if outputfolder_mode == "mode2": outputfolder_path = os.path.join(inputfolder_path, "Analyzed_Data") outputfolder_path_profile = os.path.join(outputfolder_path, "Invasion_Profiles") outputfolder_path_mean = os.path.join(outputfolder_path, "Mean_images") if save_images: # creating output folder if they don't exist already createFolder(outputfolder_path_profile) if analyze: createFolder(outputfolder_path_mean) with open(os.path.join(outputfolder_path, 'invasion_analysis.txt'), 'w') as f: # setting up tab delimited file f.write( "input_path\twell\tmode\tpos\tradius\td/2\tlambda\tradius_of_constant_cell_density\tpixelsize_fl_image" + "\n") for key1, value1 in fl_images.items(): for key2, value2 in fl_images[key1].items(): for key3, value3 in fl_images[key1][key2].items(): files_fl = value3 try: file_bf = bf_images[key1][key2][key3][ 0] ## check if this looses me something except: print( "\n--------------------------------------->no complementary file to:" ) print(files_fl[0]) print("!!! Position skipped !!!") continue print("using", files_fl[0], "\n", file_bf) #Insert here single positions to analyse #if not ("pos014" in files_fl[0]): #continue # Insert here single positions to skip # if ("pos014" in files_fl[0]): # continue meta_info_dict = get_meta_info2(files_fl, well=key2, pos=key3) # default output file for mean blended images output_filename_path = os.path.join( outputfolder_path_mean, ('Mean_Blend_' + meta_info_dict["date"] + "_" + meta_info_dict["well"] + "_" + meta_info_dict["pos"]) + ".tif") #reading fl images if os.path.exists( output_filename_path ) and use_existing_mean_images: # use existing mean blended images img_16bit = np.array(Image.open(output_filename_path)) else: # generate new mean blended images im_list = [np.array(Image.open(file)) for file in files_fl] stack = np.array( im_list, ndmin=3 ) # stacking al images, dimensions fixed to 3 , otherwise problems with single images mean_img = np.mean(stack, axis=0) # mean blending of imgaes img_16bit = mean_img.astype("uint16") if save_images: # saving mean blended image im = Image.fromarray(img_16bit) im.save(output_filename_path) # read bright field image img_bf = plt.imread(file_bf) if analyze: # performing analysis on mean blended images # try: res = analyze_profiles(img_fl=img_16bit, img_bf=img_bf, pixelsizes_dict=pixelsizes_dict, Mic=meta_info_dict["Mic"], nuc_size=nuc_size) p, rad, inv_front, blob, mask, dens, dt, img, img1, px_um = res plotting_invasion_profiles1(res, meta_info_dict, outputfolder_path_profile, output_filename_path) #if "well1" in files_fl[0] or "well2" in files_fl[0] or "015" in files_fl[0] or "016" in files_fl[0] or "017" in files_fl[0] or "018" in files_fl[0]: #pass #plotting_segementation(res, meta_info_dict, outputfolder_path_profile, output_filename_path) # writing information to text file output_text = [ meta_info_dict["file_path"], meta_info_dict["well"], meta_info_dict["mode"], meta_info_dict["pos"], str(rad.round(2)), str(inv_front.round(2)), str(p[1].round(2)), str(p[0].round(2)), str(px_um) ] with open( os.path.join(outputfolder_path, 'invasion_analysis.txt'), 'a') as f: f.write("\t".join(output_text) + "\n") if cdb and analyze and save_images: path_file = os.path.split(output_filename_path) db = clickpoints.DataFile( os.path.join(outputfolder_path_profile, path_file[1][:-4] + ".cdb"), "w") db.setImage(output_filename_path, frame=0) db.setMaskType("mask_cells", color="#1fff00", index=1) db.setMaskType("mask_blob", color="#ff0f1b", index=2) cdb_mask = copy.deepcopy(mask) * 1 cdb_mask[blob] = 2 db.setMask(image=db.getImages()[0], data=np.array(cdb_mask, dtype="uint8")) db.db.close()
from skimage.filters import threshold_otsu from skimage.morphology import skeletonize,remove_small_objects import cv2 from skimage.measure import regionprops def normalizing(img,lq=0,uq=100): img = img - np.percentile(img, lq) # 1 Percentile img = img / np.percentile(img,uq) # norm to 99 Percentile img[img < 0] = 0.0 img[img > 1] = 1.0 return img im = np.asarray(Image.open("/home/user/Desktop/biophysDS/dboehringer/Platte_3/Twitching-Experiments/Confocal-Experiments/2020-02-12-LuB1-Timelapse/stack 1/pos002/Pos002_S001_t314_z6_ch00.tif").convert("L")) db = clickpoints.DataFile("/home/user/Desktop/mask_spheroid_david.cdb") mask = db.getMask(frame=0).data.astype(bool) db.db.close() #im = ndi.gaussian_filter(im, 4) # Compute the Canny filter for two values of sigma im = normalizing(im, lq=10, uq=90) plt.figure() plt.imshow(im) med_filter = median_filter(im, size = 30) im_f = im - med_filter plt.figure();plt.imshow(im_f) #edges = feature.canny(im, sigma=i,mask=~mask.astype(bool)) #plt.figure()
from cell_moement_analysis.cell_movement_orientation import * from cell_moement_analysis.angel_calculations import FilterAndWeighting import clickpoints db_path = "/home/user/Desktop/biophysDS/abauer/ants_out/not_stitcheddatabase.cdb" db=clickpoints.DataFile(db_path,"r") ''' for i in range(0,18): output_folder = "/home/user/Desktop/biophysDS/abauer/ants_out/analysis_frame_" + str(i) createFolder(output_folder) # frame window min_frame = i*10000 max_frame = (i+1)*10000 angle_to_center_analysis(db, output_folder, output_file="nw_mean_angles.txt", min_frame=min_frame, max_frame=max_frame, ws_angles=1, ws_mean=30, bs_mean=2, weighting="nw", mark_center=True) angle_distance_distribution(db, output_folder, min_frame=min_frame, max_frame=max_frame, ws_angles=1, window_length=int(300 / (4.095 / 10) + 1), ymin=0, ymax=90, px_scale=4.0954 / 10) ''' filter_list=[(FilterAndWeighting.length_threshold,{"threshold":7}),(FilterAndWeighting.spatial_filter_radius,{"center":(332,736), "radius":70})] weighting_list=[(FilterAndWeighting.linear_weigthing,{})] output_folder = "/home/user/Desktop/biophysDS/abauer/ants_out/analysis_filters" createFolder(output_folder)
#slice2 = polar_array[r2[0]:r2[1], :] fig=plt.figure();plt.imshow(polar_array) pa1=patches.Rectangle(xy=[0,r1[1]],width=2000,height=window_size,fill=False,edgecolor="red",linewidth=2) plt.gca().add_patch(pa1) pa2=patches.Rectangle(xy=[0,r2[1]],width=2000,height=window_size,fill=False,edgecolor="yellow",linewidth=2) plt.gca().add_patch(pa2) return fig if __name__ == '__main__': # fibres with spheroid im = np.asarray(Image.open(r"\\131.188.117.96\biophysDS\dboehringer\Platte_3\Twitching-Experiments\Confocal-Experiments\2020-02-12-LuB1-Timelapse\stack 1\\pos002\\Pos002_S001_t314_z6_ch00.tif").convert("L")) db = clickpoints.DataFile(r"\\131.188.117.96\biophysDS\dboehringer\Platte_3\Migration-and-fiberorientation\Evaluation-Andi-David\Fiber orientation\2 - polar trafo correlation\testing_orientation/mask_spheroid_david.cdb") mask = db.getMask(frame=0).data.astype(bool) db.db.close() center=regionprops(mask.astype(int))[0].centroid polar_array, max_radius, center = polar_coordinate_transform(im, center, radius_res=2000, angle_res=2000) #ax_factor = r_factor * pixel_size # y_axis[pixel]*ax_factor --> y_axis[µn] #plt.figure();plt.imshow(im) #plt.figure();plt.imshow(polar_array) #correlation coefficient window_size = 30
try: analysis_function(frame, parameter_dict, res_dict, db=db, db_info=db_info, masks=masks, **kwargs) except Exception as e: if type(e) in (Mask_Error, FileNotFoundError, FindingBorderError, ShapeMismatchError): print(e) else: raise (e) return db_info, masks, res_dict ### code to work on clickpoint outside of the addon if __name__ == "__main__": ## setting up necessary paramteres # db=clickpoints.DataFile("/home/user/Desktop/Monolayers_new_images/monolayers_new_images/KO_DC1_tomatoshift/database.cdb","r") db = clickpoints.DataFile("/home/andy/test_data_pyTFM/KOshift/database.cdb", "r") parameter_dict = default_parameters res_dict = defaultdict(lambda: defaultdict(list)) db_info, all_frames = get_db_info_for_analysis(db) # db_info, masks, res_dict = apply_to_frames(db, parameter_dict, deformation, res_dict, frames="12", # db_info=db_info, masks=None) #db_info, masks, res_dict = apply_to_frames(db, parameter_dict, general_properties, res_dict=res_dict, # frames=all_frames, # db_info=db_info, masks=None) db_info, masks, res_dict = apply_to_frames(db, parameter_dict, FEM_full_analysis, res_dict=res_dict, frames="01", db_info=db_info, masks=None) # parameter_dict["filter_type"]=None # default_fig_parameters["file_names"]["traction"] = "t_none.png" # db_info, masks, res_dict = apply_to_frames(db, parameter_dict, traction_force, res_dict, frames="12",
import clickpoints db = clickpoints.DataFile("./Developement/Databases/click0.cdb") Y, X = db.getImage(frame=0).getShape() for marker in db.getMarkers(): marker.x = X - marker.x marker.y = Y - marker.y # db.setMarker(id=marker.id, x=x, y=y) print(marker) marker.save()
from PenguTrack.Filters import KalmanFilter from PenguTrack.Filters import MultiFilter from PenguTrack.Models import VariableSpeed # from PenguTrack.Detectors import ViBeSegmentation from PenguTrack.Detectors import SiAdViBeSegmentation # from PenguTrack.Detectors import BlobDetector from PenguTrack.Detectors import AreaDetector from PenguTrack.Detectors import BlobSegmentation # from PenguTrack.Detectors import Measurement as Pengu_Meas import scipy.stats as ss #resource.setrlimit(resource.RLIMIT_AS, (12000 * 1048576L, -1L)) # Connect to database db = clickpoints.DataFile("C:\\Users\\User\\Desktop\\241.cdb") start_frame = 0 #Initialise PenguTrack object_size = 2 # Object diameter (smallest) penguin_height = 0.462 #0.575 penguin_width = 0.21 object_number = 100 # Number of Objects in First Track # Initialize physical model as 2d variable speed model with 0.5 Hz frame-rate model = VariableSpeed(1, 1, dim=2, timeconst=0.5) uncertainty = 8 * object_size X = np.zeros(4).T # Initial Value for Position Q = np.diag([uncertainty, uncertainty]) # Prediction uncertainty R = np.diag([uncertainty * 2, uncertainty * 2]) # Measurement uncertainty
Tracker = HungarianTracker(KalmanFilter, model, np.diag(Q), np.diag(R), meas_dist=Meas_Dist, state_dist=State_Dist) Tracker.LogProbabilityThreshold = log_prob_threshold i_x = Tracker.Model.Measured_Variables.index("PositionX") i_y = Tracker.Model.Measured_Variables.index("PositionY") # Clickpoints DataBase import clickpoints # Open ClickPoints Database db = clickpoints.DataFile("./ExampleData/cell_data.cdb") # Define ClickPoints Marker track_marker_type = db.setMarkerType(name="Track_Marker", color="#00FF00", mode=db.TYPE_Track) # Delete Old Tracks db.deleteMarkers(type=track_marker_type) db.deleteTracks(type=track_marker_type) # Start Iteration over Images print('Starting Iteration') images = db.getImageIterator() for image in images: i = image.sort_index # Prediction step, without applied control(vector of zeros)
# You should have received a copy of the GNU General Public License # along with ClickPoints. If not, see <http://www.gnu.org/licenses/> from __future__ import print_function import clickpoints import os # to get query results as dictionaries def dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d # create a temporary ClickPoints table db = clickpoints.DataFile("tmp.cdb", "w") # enable dictionaries as query results db.db.connection().row_factory = dict_factory # open schema.sql with open("schema.sql", "w") as fp: # get al entries in the database table schema for row in db.db.execute_sql("SELECT * FROM sqlite_master").fetchall(): # write the sql commends to the file fp.write(row["sql"]+";\n") # query and write the version of the database row = db.db.execute_sql("SELECT * FROM meta WHERE key='version'").fetchone() fp.write("INSERT OR REPLACE INTO meta (id,key,value) VALUES\ ((SELECT id FROM meta WHERE key='version'),'version',%s);\n" % row["value"]) # close the database
frame=frame_number, x=line[1], y=line[2], width=line[4] / (config["pixel_size"] * 1e6), height=line[5] / (config["pixel_size"] * 1e6), angle=line[6], text=f"{line[3]}\n{line[7]}\n{line[8]}\n{line[9]}", type=ellipse_type) video_file = getInputFile() data_file = video_file.replace(".avi", "_result.txt").replace(".tif", "_result.txt") cdb_file = video_file.replace(".avi", ".cdb").replace(".tif", ".cdb") name_ex = os.path.basename(video_file) filename_base, file_extension = os.path.splitext(name_ex) output_path = os.path.dirname(video_file) configfile = output_path + r'/' + filename_base + '_config.txt' config = getConfig(configfile) # create a new clickpoints database db = clickpoints.DataFile(cdb_file, "w") # add the video to clickpoints addVideoToClickpoints(video_file, db) # add the ellipses from the results data addEllipses(data_file, db, video_file)
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt import seaborn as sn import my_plot sn.set_color_codes() my_plot.set_style("white_clickpoints") import clickpoints from PenguTrack.Detectors import SiAdViBeSegmentation db_start = clickpoints.DataFile("/home/alex/Masterarbeit/Data/Adelies/DataBases/252_GT_Detections.cdb") images = db_start.getImageIterator() init_buffer = [] for i in range(2): while True: img = images.next().data if img is not None: # print("Got img from cam") init_buffer.append(img) # print(init_buffer[-1].shape) # print(init_buffer[-1].dtype) break init = np.array(np.median(init_buffer, axis=0)) # Load horizon-markers horizont_type = db_start.getMarkerType(name="Horizon")
def spheroid_analysis_with_fl_core(inputfolder_path, save_images, folder_selector, analyze, magnification, pixel_size, outputfolder_mode, cdb): fl_images = collect_files(inputfolder_path, selector_path="SphInv", selectors_file=["rep", "Fluo"]) # finding all fl-images bf_images = collect_files( inputfolder_path, selector_path="SphForce", selectors_file=["rep0000", "modeBF"]) # finding all bf images at t=0 if outputfolder_mode == "mode1": # will create outpfolder by replacing a folder "raw data" with "analyzed data and copy the folder structure deeper outputfolder_path = re.sub("Raw Data|Raw_data", "Analyzed_Data", inputfolder_path, flags=re.I) outputfolder_name_profile = os.path.join(outputfolder_path, "Invasion_Profiles") outputfolder_path_invasion = os.path.join(outputfolder_path, "Mean_images") if outputfolder_mode == "mode2": outputfolder_path = os.path.join(inputfolder_path, "Analyzed_Data") outputfolder_path_profile = os.path.join(outputfolder_path, "Invasion_Profiles") outputfolder_path_mean = os.path.join(outputfolder_path, "Mean_images") if save_images: # creating output folder if they don't exist already createFolder(outputfolder_path_profile) if analyze: createFolder(outputfolder_path_mean) with open(os.path.join(outputfolder_path, 'invasion_analysis.txt'), 'w') as f: # setting up tab delimited file f.write( "input_path\twell\tmode\tpos\tradius\td/2\tlambda\tradius of constant cell density" + "\n") for key1, value1 in fl_images.items(): for key2, value2 in fl_images[key1].items(): for key3, value3 in fl_images[key1][key2].items(): files_fl = value3 print("using", files_fl[0]) im_list = [plt.imread(file) for file in files_fl] stack = np.array( im_list, ndmin=3 ) # stacking al images, dimensions fixed to 3 , otherwise problems with single images mean_img = np.mean(stack, axis=0) # mean blending of imgaes img_16bit = mean_img.astype("uint16") meta_info_dict = get_meta_info2(files_fl, well=key2, pos=key3) output_filename_path = os.path.join( outputfolder_path_mean, ('Mean_Blend_' + meta_info_dict["date"] + "_" + meta_info_dict["well"] + "_" + meta_info_dict["pos"]) + ".tif") if save_images: # saving image im = Image.fromarray(img_16bit) im.save(output_filename_path) if analyze: # performing analysis on mean blended images res = analyze_profiles(img_fl=img_16bit, px_um=px_um, Mic=meta_info_dict["Mic"], nuc_size=nuc_size) p, rad, inv_front, blob, mask, dens, dt, img, img1 = res plotting_invasion_profiles1(res, meta_info_dict, outputfolder_path_profile, output_filename_path, magnification, pixel_size) #plotting_segementation(res, meta_info_dict, outputfolder_path_profile, output_filename_path) # writing information to text file output_text = [ meta_info_dict["file_path"], meta_info_dict["well"], meta_info_dict["mode"], meta_info_dict["pos"], str(rad.round(2)), str(p[1].round(2)), str(inv_front.round(2)), str(p[0].round(2)) ] with open( os.path.join(outputfolder_path, 'invasion_analysis.txt'), 'a') as f: f.write("\t".join(output_text) + "\n") if cdb and analyze and save_images: path_file = os.path.split(output_filename_path) db = clickpoints.DataFile( os.path.join(outputfolder_path_profile, path_file[1][:-4] + ".cdb"), "w") db.setImage(output_filename_path, frame=0) db.setMaskType("mask_cells", color="#1fff00", index=1) db.setMaskType("mask_blob", color="#ff0f1b", index=2) cdb_mask = copy.deepcopy(mask) * 1 cdb_mask[blob] = 2 db.setMask(image=db.getImages()[0], data=np.array(cdb_mask, dtype="uint8")) db.db.close()
def __init__(self, database, command=None, name="", database_class=None, icon=None): # initialize the Widget base class QtWidgets.QWidget.__init__(self) # initialize the command class to communicate with ClickPoints self.cp = Command(command, self) # get the database instance, either it is already a database object or a filename if isinstance(database, str): # if we have a filename, open the file with the provided database class type or the default type if database_class: self.db = database_class(database) else: self.db = clickpoints.DataFile(database) else: # store the database object self.db = database # if the object should have a different class, convert it if database_class is not None: # store some pointers to the options _options = self.db._options _options_by_key = self.db._options_by_key # initiate a new database class instance with the new class type self.db = database_class(self.db.db.database) # and put the options pointers back in place self.db._options = _options self.db._options_by_key = _options_by_key # remember the add-on name self.addon_name = name # create an option category for the add-on self._options_category = "Addon - " + name self._option_widgets = {} self.db._last_category = self._options_category # set the icon for the add-on, if provided if icon is not None: self.setWindowIcon(icon) # wrap the run function, so that it automatically updates the current state of the add-on (for the button state in ClickPoints) function = self.run if not asyncio.iscoroutinefunction(self.run): # overload two matplotlib functions to help use them from the run function from a different thread plt.show = show plt.figure = figure function = self.run def run_wrapper(*args, **kwargs): self.run_started() try: return function(*args, **kwargs) finally: self.run_stopped() self.run = run_wrapper else: function = self.run async def run_wrapper(*args, **kwargs): self.run_started() try: return await function(*args, **kwargs) finally: self.run_stopped() self.run = run_wrapper self.run_threaded = self.run_async self._input_widgets = [] # connect the status changed signal (to be able to change the status from another thread) self._change_status.connect(self.cp.setStatus)
from qtpy import QtGui, QtCore, QtWidgets from qimage2ndarray import array2qimage import qtawesome as qta sys.path.insert( 0, os.path.join(os.path.dirname(__file__), "..", "includes", "qextendedgraphicsview")) from QExtendedGraphicsView import QExtendedGraphicsView __icon__ = "fa.tag" import clickpoints # Connect to database start_frame, database, port = clickpoints.GetCommandLineArgs() db = clickpoints.DataFile(database) com = clickpoints.Commands(port) # parameter marker_type_name = "marker" marker_type_class0 = "no-bead" marker_type_class1 = "bead" view_size = 30 view_o1 = int(view_size / 2) view_o2 = int(view_size / 2 + 0.5) # Check if the marker type is present for marker_type in [marker_type_name, marker_type_class0, marker_type_class1]: if not db.getMarkerType(marker_type): print("ERROR: Marker type %s does not exist" % marker_type)
def exp_border_real_data(): out_folder = "/home/user/Desktop/backup_from_harddrive/data_traction_force_microscopy/ev_paper_rd_expansion_fs3" createFolder(out_folder) border_ex_test = (list(range(0, 100, 2))) f_type = "non-circular" young = 1 h = 100 pixelsize = 1 filter = "gaussian" # retrieving clickpoints mask and traction forces folder = "/home/user/Desktop/backup_from_harddrive/data_traction_force_microscopy/WT_vs_KO_images/KOshift/" db = clickpoints.DataFile(os.path.join(folder, "database.cdb"), "r") mask = db.getMask(frame=2).data == 3 db.db.close() u, v = np.load(os.path.join(out_folder, "u.npy")), np.load( os.path.join(out_folder, "v.npy")) fx_f, fy_f = traction_wrapper( u, v, pixelsize, h, young, mask=mask, filter="gaussian", fs=3) # this filtersize is equal to 3*0.85 ~3.5 µm for real data mask = interpolation(mask, dims=fx_f.shape, min_cell_size=100) mask = binary_fill_holes(mask) np.save(os.path.join(out_folder, "mask.npy"), mask) stress_tensors, mean_normal_list, mask_exp_list = exp_border( exp_range=border_ex_test, fx_f=fx_f, fy_f=fy_f, mask=mask, out_folder=out_folder, method="binary_dilation") stress_tensor_b = stress_tensors[0] max_dict = get_max_values(fx_f=fx_f, fy_f=fy_f, stress_tensor_b=stress_tensor_b, exp_test=len(border_ex_test) > 0, mean_normal_list=mean_normal_list) # getting comparison scalar fields mask_fm = standard_measures(mask=mask, mean_normal_list=mean_normal_list, stress_tensor_b=stress_tensor_b) save_arr = np.array([ np.round(np.array(avg_normal_stress_be), 5), np.array(border_ex_test) ]).T np.savetxt(os.path.join(out_folder, "avg_norm_stress.txt"), save_arr, fmt="%.5f", delimiter=",") mask_exp = binary_dilation(mask, iterations=15) scalar_comaprisons = full_field_comparision( ) # r gives mostly the spatial distribution with suppress(KeyError): del scalar_comaprisons["forces"] plot_types = ["test for border expansion"] plot_types.extend( ["forces_forward", "correlation", "test for border expansion"]) # plot_types = [ "forces_backward", "full_stress_tensor_backward"] general_display(plot_types=plot_types, mask=mask, pixelsize=pixelsize, max_dict=max_dict, f_type=f_type, mean_normal_list=mean_normal_list, mask_exp_list=mask_exp_list, out_folder=out_folder, fx_f=fx_f, fy_f=fy_f, mask_exp=mask_exp, scalar_comaprisons=scalar_comaprisons, border_ex_test=border_ex_test, plot_gt_exp=False) plt.close("all")