def heuristic_biofilm_seg_unscaled_lsm700_63x(imgpath): matpath = os.path.splitext(imgpath.replace("_cr", ""))[0] + ".mat" segtpath = file_finder.get_labeled_path(matpath, "segmented") maskpath = file_finder.get_labeled_path(matpath, "biofilmmask") image = skimage.io.imread(imgpath) mask = slice63x.get_biofilm_mask(image) #scipy.io.savemat(distpath, {"image":distmask}) scipy.io.savemat(segtpath, {"image": mask}) scipy.io.savemat(maskpath, {"image": mask}) botmask = slice63x.fill_biofilm_mask(mask, "bottom") edgepath = file_finder.get_labeled_path(matpath, "edgemask") scipy.io.savemat(edgepath, {"image": botmask})
def heuristic_bottommask(imgpath): matpath = os.path.splitext(imgpath)[0] + ".mat" maskpath = file_finder.get_labeled_path(matpath, "biofilmmask") distpath = file_finder.get_labeled_path(matpath, "bottommask") try: mask = scipy.io.loadmat(maskpath)["image"] except OSError as e: print("you probably need to generate a mask file first") raise e flipped = np.flipud(mask) distmask = slice10x.distance_top_mask_flat.get_top_mask(flipped) unfliped = np.flipud(distmask) scipy.io.savemat(distpath, {"image": unfliped})
def heuristic_distmask(imgpath): imgpath = imgpath.replace("_cr.tiff", ".tiff") matpath = os.path.splitext(imgpath)[0] + ".mat" maskpath = file_finder.get_labeled_path(matpath, "biofilmmask") distpath = file_finder.get_labeled_path(matpath, "edgemask") try: mask = scipy.io.loadmat(maskpath)["image"] except OSError as e: print("you probably need to generate a mask file first") raise e distmask = slice10x.distance_top_mask_flat.get_top_mask(mask) scipy.io.savemat(distpath, {"image": distmask}) skimage.io.imsave(distpath.replace(".mat", ".tiff"), distmask.astype(np.uint8) * 255)
def heuristic_biofilmmask(imgpath): matpath = os.path.splitext(imgpath)[0] + ".mat" segtpath = file_finder.get_labeled_path(matpath, "segmented") maskpath = file_finder.get_labeled_path(matpath, "biofilmmask") try: mask = scipy.io.loadmat(segtpath)["image"].astype(np.bool) except (OSError, TypeError) as e: print("you probably need to generate a segmentation file first") raise e skimage.morphology.remove_small_objects(mask, min_size=64, connectivity=1, in_place=True) scipy.io.savemat(maskpath, {"image": mask})
def main(): parser = argparse.ArgumentParser() parser.add_argument('--make_backup', action="store_true") parser.add_argument('--tiff_file', "-f", type=str) parser.add_argument('--make_new_bfmask', action="store_true") parser.add_argument('--smooth_a_mask', type=str, default=None) parser.add_argument('--mask_name', default="biofilmmask") #parser.add_argument("--use_old_edgemask",action="store_true") #parser.add_argument('--segmentation_dir', type=str, default="segmented_laphat1") inputargs = parser.parse_args() image_name = os.path.splitext(os.path.basename(inputargs.tiff_file))[0] base_dir = os.path.dirname(inputargs.tiff_file) maskname = "_" + inputargs.mask_name if inputargs.make_backup: renames = [ #("_edgemask.mat", "_expandededgemask.mat"), #("_edgemask.tiff", "_expandededgemask.tiff"), ("_cr_distmap.mat", "_cr_olddistmap.mat"), ("_cr_biofilmmask.mat", "_cr_bfmaskcorrected.mat"), # ("_biofilmmask.tiff", "_expandedbfmask.tiff") ] for orig, newn in renames: shutil.move(os.path.join(base_dir, image_name, image_name + orig), os.path.join(base_dir, image_name, image_name + newn)) if inputargs.make_new_bfmask: image_path = os.path.join(base_dir, image_name, image_name + "_cr.tiff") outname = os.path.join(base_dir, image_name, image_name + maskname) im = skimage.io.imread(image_path) mask = slice10x.basic_segment(im) scipy.io.savemat(outname + ".mat", {"image": mask}) skimage.io.imsave(outname + ".tiff", mask.astype(np.uint8) * 255) if not (inputargs.smooth_a_mask is None): image_path = os.path.join(base_dir, image_name, image_name + "_cr.tiff") maskname = os.path.join(base_dir, image_name, image_name + ".mat") inmaskname = file_finder.get_labeled_path(maskname, inputargs.smooth_a_mask) outpath = file_finder.get_labeled_path(maskname, inputargs.mask_name) # load old mask, mask = scipy.io.loadmat(inmaskname)["image"] # smooth it smask = slice10x.segment.smooth_segmentation(mask) # save it scipy.io.savemat(outpath, {"image": smask})
def load_img_mask_pair(file, maskname, remove_cr=False): img = skimage.io.imread(file) matpath = os.path.splitext(file)[0] + ".mat" if remove_cr: matpath = matpath.replace("_cr.", ".") maskpath = file_finder.get_labeled_path(matpath, maskname) try: mask = scipy.io.loadmat(maskpath)["image"] except TypeError as e: raise FileNotFoundError("could not find", maskpath) return img, mask
def downscale(imgpath, reductions=None): print("Downscaling {0}".format(imgpath)) im = skimage.io.imread(imgpath) typ = np.iinfo(im.dtype) orig_size = im.shape print("Original size", orig_size) if reductions is None: target_size = 2048 reductions = int(np.ceil(max(orig_size) / target_size)) print("Reducing by {0}".format(reductions)) imf = im.astype(np.float) / typ.max small = skimage.transform.pyramid_reduce(imf, downscale=reductions) print("New size {0}", small.shape) #print("expected size {0}, {1}", orig_size[0]/reductions, orig_size[1]/reductions) small = (small * typ.max).astype(im.dtype) * 2 skimage.io.imsave( file_finder.get_labeled_path(imgpath, "reduced{0}".format(reductions)), small)
def main(): parser = argparse.ArgumentParser() parser.add_argument("-f", "--files", nargs="+") parser.add_argument("--model", type=str) parser.add_argument("--mask_name", type=str) parser.add_argument("--remove_cr_from_mat_path", action="store_true", default=False) pa = parser.parse_args() model = joblib.load(pa.model) for f in pa.files: im = skimage.io.imread(f) improc = segment_rf.get_features(im) segment = model.predict(improc).reshape(im.shape) matpath = os.path.splitext(f)[0] + ".mat" if pa.remove_cr_from_mat_path: matpath = matpath.replace("_cr.", ".") maskpath = file_finder.get_labeled_path(matpath, pa.mask_name) scipy.io.savemat(maskpath, {"image": segment})
def main(): parser = argparse.ArgumentParser() parser.add_argument('-f', '--files', nargs='+') parser.add_argument('-c', '--cell_width_pixels', type=int, default=10) parser.add_argument('--subtract_values_file', type=str) parser.add_argument('--subtract_red', nargs='+', type=str ) parser.add_argument('--subtract_green', nargs='+', type=str ) parser.add_argument('--subtract_blue', nargs='+', type=str ) pa = parser.parse_args() if pa.files is None: print(parser.usage()) print("no file specified") sys.exit(2) bg_autofluor = get_subtraction_numbers(pa) bg_color_subtract_seqs = get_color_subtraction_sequences(pa) for lsm_file in pa.files: base_dir = os.path.dirname(lsm_file) base_file = os.path.basename(lsm_file) base_no_ext = os.path.splitext(base_file)[0] red_files = glob.glob(os.path.join(base_dir, base_no_ext, base_no_ext + "_cr_i*.tif")) files_nums = [(int(re.match(r".*cr_i1j(\d+).tif", f).groups(0)[0]), f) for f in red_files] if len(red_files) == 0: files_nums = [(0, os.path.join(base_dir, base_no_ext, base_no_ext + "_cr.tiff"))] files_nums = sorted(files_nums) #print(red_files) if DEBUG: try: os.mkdir(os.path.join(base_dir, base_no_ext, debugdir)) except: pass base_fn = os.path.join(base_dir, base_no_ext, base_no_ext + ".mat") distmap_path = file_finder.get_labeled_path(base_fn, "distmap") distmap = scipy.io.loadmat(distmap_path)["distmap_masked"] bigmask = scipy.io.loadmat(file_finder.get_labeled_path(base_fn, "biofilmmask"))["image"].astype(np.bool) def laphat_segmentation(img): #, cell_width_pixels): return laphat_segment_v1(img, cell_width_pixels=pa.cell_width_pixels, small_cells=(pa.cell_width_pixels < 6)) def get_segmentation(x): return get_cached_segmentation(laphat_segmentation, x) try: with open(os.path.join(base_dir, base_no_ext, base_no_ext +".json")) as jsfp: file_info = json.load(jsfp) except: print("didnt find a split json file, pretending its a single image") file_info = {} big_image_col_shift = 0 for filenum, red_f in files_nums: print("procing {0}", red_f) big_image_col_shift = process_file(red_f, distmap, bigmask, get_segmentation, filenum, file_info, big_image_col_shift, bg_autofluor, bg_color_subtract_seqs)
print("no file or directory specified") sys.exit(2) # for red_f in pa.files: for lsm_file in pa.files: base_dir = os.path.dirname(lsm_file) base_file = os.path.basename(lsm_file) base_no_ext = os.path.splitext(base_file)[0] red_files = glob.glob( os.path.join(base_dir, base_no_ext, base_no_ext + "_cr_i*.tif")) files_nums = [(int(re.match(r".*cr_i1j(\d+).tif", f).groups(0)[0]), f) for f in red_files] files_nums = sorted(files_nums) base_fn = os.path.join(base_dir, base_no_ext, base_no_ext + ".mat") distmap_path = file_finder.get_labeled_path(base_fn, "distmap") distmap = scipy.io.loadmat(distmap_path)["distmap_masked"] bigmask = scipy.io.loadmat( file_finder.get_labeled_path( base_fn, "biofilmmask"))["image"].astype(np.bool) with open(os.path.join(base_dir, base_no_ext, base_no_ext + ".json")) as jsfp: file_info = json.load(jsfp) big_image_col_shift = 0 for filenum, red_f in files_nums: start = time.time() print("Processing {0}".format(red_f))
def main(): parser = argparse.ArgumentParser() parser.add_argument('-f', '--files', nargs='+') parser.add_argument('--sample_freq', type=float, default=0.25) parser.add_argument('--slice_width', type=float, default=0.5) parser.add_argument('--bg_subtract') parser.add_argument('--subtractions', default="all") pa = parser.parse_args() if pa.subtractions == "bg_only": subtractions = subtractions_bg_only elif pa.subtractions == "all": subtractions = subtractions_all else: # pa.substractions == "none": subtractions = subtractions_none color_chans = [ ch + "_" + des for des, dt in subtractions.items() for ch in dt.keys() ] color_chans_stats = [cc + "_" + s for cc in color_chans for s, _ in stats] names = fixed_heads + color_chans_stats try: with open(pa.bg_subtract) as bgjs: bg_subvals = json.load(bgjs) bg_subvals["red_raw"] = 0 bg_subvals["green_raw"] = 0 except TypeError: # why no file not founds! bg_subvals = {} bg_subvals["red_raw"] = 0 bg_subvals["green_raw"] = 0 datarow = init_dict(names) for num, f in enumerate(pa.files): print("{0} of {1}: {2}".format(num, len(pa.files), f)) ext = os.path.splitext(f)[1] imgname = os.path.splitext(os.path.basename(f))[0] dirname = os.path.dirname(f) if ext == ".lsm": tiffpath = os.path.join(dirname, imgname, imgname) img = np.dstack([ skimage.io.imread(tiffpath + chan + ".tiff") for chan in ["_cr", "_cg"] ]) sptsv = f.replace(".lsm", ".tsv") else: sptsv = f.replace(".tiff", ".tsv") img = skimage.io.imread(f) with open(sptsv, "w") as csvf: writer = csv.DictWriter(csvf, fieldnames=names, delimiter="\t") writer.writeheader() #mask = images_to_data.get_20_mask(f) #th_files = insert_dir_in_path(f, spore_segdir).replace(".tiff", "_T{*).tiff") #sptsv = f.replace(".tiff", "_T{0}.tsv").format(pa.threshold) channel_imgs = do_subtractions(img, subtractions, bg_subvals) base_dir = os.path.dirname(f) base_file = os.path.basename(f) base_no_ext = os.path.splitext(base_file)[0] base_fn = os.path.join(base_dir, base_no_ext, base_no_ext + ".mat") distmap_path = file_finder.get_labeled_path(base_fn, "distmap") distmap = scipy.io.loadmat(distmap_path)["distmap_masked"] mask = scipy.io.loadmat( file_finder.get_labeled_path( base_fn, "biofilmmask"))["image"].astype(np.bool) dmax = distmap.max() hw = pa.slice_width / 2 centers = np.arange(hw, dmax - hw, pa.sample_freq) for cdist in centers: dsr, dsc = np.where((distmap > (cdist - hw)) & (distmap <= (cdist + hw)) & mask) datarow["cdist"] = cdist datarow["pixels"] = len(dsr) for chan in channel_imgs.keys(): for stat, stat_func in stats: datarow[chan + "_" + stat] = stat_func( np.ravel(channel_imgs[chan][dsr, dsc])) writer.writerow(datarow) gc.collect()