def imread_tiff(directory: str, num_folder: int) -> dask.array: """Navigate to directory with .tif files and return images in a dask array.""" dir_files = [directory + '\\' + file for file in os.listdir(directory)] tif_files = dir_files[num_folder:] sample = imread(tif_files[0]) return sample
def imread_tiff(directory, num_folder): if not isinstance(directory, str) or not isinstance(num_folder, int): raise ValueError('check input types!') tiff_files = [directory + '\\' + file for file in os.listdir(directory)] tiff_files_trimmed = tiff_files[num_folder:] sample = imread(tiff_files_trimmed[0]) print(sample.shape) return sample
def extract_polygonal_arena_coordinates(video_path: str): """ Reads a random frame from the selected video, and opens an interactive GUI to let the user delineate the arena manually. Args: video_path: Path to the video file. Returns: np.ndarray: nx2 array containing the x-y coordinates of all n corners of the polygonal arena. int: Height of the video. int: Width of the video. """ current_video = imread(video_path) current_frame = np.random.choice(current_video.shape[0]) # Get and return the corners of the arena arena_corners = retrieve_corners_from_image( current_video[current_frame].compute()) return arena_corners, current_video.shape[2], current_video.shape[1]
""" Dynamically load irregularly shapes images of ants and bees """ import numpy as np from dask_image.imread import imread from dask.cache import Cache from napari import Viewer, gui_qt cache = Cache(2e9) # Leverage two gigabytes of memory cache.register() base_name = 'data/kaggle-nuclei/fixes/stage1_train/*' images = imread(base_name + '/images/image_gray.tif') labels = imread(base_name + '/labels/label.tif') print(images.shape) with gui_qt(): # create an empty viewer viewer = Viewer() # add the images image_layer = viewer.add_image(images, name='nuceli', colormap='gray') labels_layer = viewer.add_labels(labels, name='labels', opacity=0.5)
import napari from dask_image.imread import imread stack = imread("./haveLabel_test/images_old/*.jpg") stack2 = imread("./haveLabel_test/masks_old/*.jpg") stack3 = imread( "./resultsThreshold/HarDMSEG/reconstructed_haveLabel_test/*.jpg") stack4 = imread("./results/HarDMSEG/haveLabel_test/*.jpg") with napari.gui_qt(): viewer = napari.view_image(stack, name='Images') label_layer = viewer.add_image(stack2, name='True Labels', opacity=0.5, visible=False, gamma=100000) label_layer2 = viewer.add_image(stack3, name='Predicted Patch Labels', opacity=0.5, visible=False, gamma=100000) label_layer3 = viewer.add_image(stack4, name='Predicted Full Labels', opacity=0.5, visible=False, gamma=100000)
def simple_moviemaker(path): #comment the following out: if microscope == 'Jungfrau': pattern = re.compile( '^(?P<Timepoint>t[0-9]+)_.*_(?P<Movie_ID>XY[0-9]+)_.*.tif') if microscope == 'Eiger': #pattern=re.compile('^(?P<Classifier>.*)(?P<FOV>_[0-9])_.*(?P<Site>_s[0-9]+)_(?P<Timepoint>t[0-9]+).TIF') pattern = re.compile('^(?P<Movie_ID>.*)_(?P<Timepoint>t[0-9]+).TIF') if microscope == 'NIS': pattern = re.compile( '.*(?P<Timepoint>T[0-9]+)_(?P<Movie_ID>XY[0-9]+).*.tif') if microscope == 'micromanager': pattern = re.compile( '^(?P<Movie_ID>[0-9]{2})_(?P<Timepoint>[0-9]{5}).tiff') #pattern=re.compile('(?P<Movie_ID>.*)(?P<Timepoint>_t[0-9]+)') processed = [] #generating file list files = os.listdir(path) #checking individual files for item in files: current_ID = None #selecting only tifs if '.tif' in item or '.TIF' in item or '.tiff' in item: #in case a channel was specified, select only files with that channel if channel != None: if debugging == 'True': print(channel) if channel in item: #create empty list to append later current_Movie = [] #extracts ID and timepoint try: if microscope == 'Jungfrau' or microscope == 'NIS' or microscope == 'micromanager': Movie_ID, Timepoint = re.search( pattern, item).group('Movie_ID', 'Timepoint') current_ID = Movie_ID if microscope == 'Eiger': #Classifier, FOV, Site, Timepoint=re.search(pattern, item).group('Classifier', 'FOV', 'Site', 'Timepoint') Movie_ID, Timepoint = re.search( pattern, item).group('Movie_ID', 'Timepoint') current_ID = Movie_ID #current_ID=Classifier+FOV+Site #Movie_ID=current_ID #go to next interation of loop if ID is in processed if current_ID in processed: continue print(current_ID) #print(Timepoint) #exception in case an item was found that cant be matched except: print('{} does not match pattern'.format(item)) if debugging == 'True': print(sys.exc_info()) print(pattern) continue #check if movie has been processed already if current_ID != None: #get the files that belong to the current one for item in files: try: sanity_id, Timepoint = re.search( pattern, item).group('Movie_ID', 'Timepoint') except: continue if current_ID == sanity_id and channel in item: current_Movie.append(item) #append current id to the list of processed movies processed.append(current_ID) #sorting current list current_Movie = natsorted(current_Movie) tifseries = [] if microscope != 'micromanager': for i in current_Movie: #print('oldfiles:', oldfiles) if Movie_ID + 'movie' not in i: img = Image.open(os.path.join(path, i)) tifseries.append(img) if debugging == 'True': print(i) print(len(tifseries), ' open files') if microscope == 'micromanager': tifseries = [ imread(os.path.join(path, i)) for i in current_Movie ] # ============================================================================= # if debugging=='True': # print(tifseries) # ============================================================================= stack = da.stack(tifseries) stack = np.squeeze(stack) # ============================================================================= # for i in current_Movie: # if debugging=='True': # print(i) # if Movie_ID + 'movie' not in i : # img=imread(os.path.join(path, i)) # # if debugging=='True': # print(len(tifseries), ' open files') # ============================================================================= #print(tifseries) createFolder(os.path.join(path, 'movies')) Movie_ID = Movie_ID.replace(' ', '') tifseriespath = os.path.join(path, 'movies', Movie_ID + 'movie.tiff') if microscope == 'micromanager': pathsplit = path.split(sep='/') tifseriespath1 = os.path.join( path, 'movies', pathsplit[len(pathsplit) - 2] + '_' + Movie_ID + 'C1_movie.tiff') tifseriespath2 = os.path.join( path, 'movies', pathsplit[len(pathsplit) - 2] + '_' + Movie_ID + 'C2_movie.tiff') try: #tifseries[0].save(tifseriespath, save_all=True, append_images=tifseries[1:]) skimage.io.imsave(tifseriespath1, stack[:, 0, :, :]) skimage.io.imsave(tifseriespath2, stack[:, 1, :, :]) print('Movie saved as', tifseriespath) except (RuntimeError) as e: print(e) next else: print('Channel cannot be found') return processed
import napari import numpy as np import dask.array as da import numpy as np import imageio from dask import delayed import dask.array as da #from dask.cache import Cache from dask_image.imread import imread import time import pims import av # cache = Cache(2e9) # Leverage two gigabytes of memory # cache.register() folder = 'data-njs/anipose/hand-demo/2019-08-02/' file = folder + 'videos-raw/2019-08-02-vid01-camA.MOV' file = 'data-njs/whiskers/IMG_7988.mov' movie = imread(file) print(movie.shape) with napari.gui_qt(): viewer = napari.Viewer() viewer.add_image(movie) # Attribute Error
from dask.cache import Cache from napari import Viewer, gui_qt from pandas import read_csv from glob import glob cache = Cache(2e9) # Leverage two gigabytes of memory cache.register() base_dir = 'data-njs/ndcn/keiser/' slide_name = 'NA4009-02_AB' file_name = base_dir + 'annotations-train.csv' image_paths = glob(base_dir + 'tiles/' + slide_name + '/*.jpg') #image_paths.sort() image_names = [p[len(base_dir) + 6:] for p in image_paths] tiles = imread(base_dir + 'tiles/' + slide_name + '/*.jpg') print(tiles.shape) annotations = read_csv(file_name).set_index('imagename').loc[image_names] annot_types = ['cored', 'diffuse', 'CAA', 'negative'] id = annotations[annot_types].values.argmax(axis=1) border = 5 shapes = [[[i, -border, -border], [i, border + tiles.shape[1], border + tiles.shape[2]]] for i in range(len(tiles))] base_cols = ['red', 'green', 'blue', 'white'] colors = [base_cols[i] for i in id] with gui_qt():
def point_annotator( im_path: str, labels: List[str], ): """Create a GUI for annotating points in a series of images. Parameters ---------- im_path : str glob-like string for the images to be labeled. labels : List[str] list of the labels for each keypoint to be annotated (e.g., the body parts to be labeled). """ stack = imread(im_path) with napari.gui_qt(): viewer = napari.view_image(stack) points_layer = viewer.add_points( data=np.empty((0, 3)), properties={'label': labels}, edge_color='label', edge_color_cycle=COLOR_CYCLE, symbol='o', face_color='transparent', edge_width=8, size=12, ) points_layer.edge_color_mode = 'cycle' # add the label menu widget to the viewer label_widget = create_label_menu(points_layer, labels) viewer.window.add_dock_widget(label_widget) @viewer.bind_key('.') def next_label(event=None): """Keybinding to advance to the next label with wraparound""" current_properties = points_layer.current_properties current_label = current_properties['label'][0] ind = list(labels).index(current_label) new_ind = (ind + 1) % len(labels) new_label = labels[new_ind] current_properties['label'] = np.array([new_label]) points_layer.current_properties = current_properties def next_on_click(layer, event): """Mouse click binding to advance the label when a point is added""" if layer.mode == 'add': next_label() # by default, napari selects the point that was just added # disable that behavior, as the highlight gets in the way layer.selected_data = {} points_layer.mode = 'add' points_layer.mouse_drag_callbacks.append(next_on_click) @viewer.bind_key(',') def prev_label(event): """Keybinding to decrement to the previous label with wraparound""" current_properties = points_layer.current_properties current_label = current_properties['label'][0] ind = list(labels).index(current_label) n_labels = len(labels) new_ind = ((ind - 1) + n_labels) % n_labels new_label = labels[new_ind] current_properties['label'] = np.array([new_label]) points_layer.current_properties = current_properties
""" Dynamically load irregularly shapes images of bees from s3 """ import numpy as np from dask_image.imread import imread from dask.cache import Cache from napari import Viewer, gui_qt cache = Cache(2e9) # Leverage two gigabytes of memory cache.register() dir_bees = 's3://sofroniewn/image-data/bees/' bees = imread(dir_bees + '*.jpg') print(bees.shape) with gui_qt(): # create an empty viewer viewer = Viewer() # add the images viewer.add_image(ants, name='ants', contrast_limits=[0, 255])
def np_from_mov_pyav(path): container = av.open(path) return np.array([f.to_ndarray() for f in container.decode(video=0)]) def dask_from_mov(path): vid = imageio.get_reader(path, 'ffmpeg') shape = vid.get_meta_data()['size'][::-1] + (3, ) lazy_imread = delayed(vid.get_data) return da.stack([ da.from_delayed(lazy_imread(i), shape=shape, dtype=np.uint8) for i in range(vid.count_frames()) ]) def np_from_mov(path): vid = imageio.get_reader(path, 'ffmpeg') return np.array([im for im in vid.iter_data()], dtype=np.uint8) mov4 = imread(file) print('mov', mov4.shape) f = np.asarray(mov4[0]) print('f', f.shape) # t = time.time() # mov1 = np_from_mov_pyav(folder + 'videos-raw/2019-08-02-vid01-camA.MOV') # #mov2 = imread(folder + 'videos-raw/2019-08-02-vid01-camA.MOV') # #mov3 = np_from_mov(folder + 'videos-raw/2019-08-02-vid01-camA.MOV') # print(time.time() - t)
def crop(array): # simple cropping function return array[:, 2:, 10:-20, :500] if __name__ == "__main__": import sys from os import sep stackfolder = sys.argv[1] psffile = sys.argv[2] print(f"Stackfolder: {stackfolder}") print(f"PSF file: {psffile}") # load stacks with dask_image, and psf with skimage stack = imread(stackfolder + sep + "*.tif") psf = io.imread(psffile) # https://docs.python.org/3.8/library/functools.html#functools.partial deskew = last3dims(partial(pycudadecon.deskewGPU, angle=31.5)) deconv = last3dims(partial(pycudadecon.decon, psf=psf, background=10)) # note: this is done in two steps just as an example... # in reality pycudadecon.decon also has a deskew argument # map and chain those functions across all dask blocks deskewed = stack.map_blocks(deskew, dtype="uint16") deconvolved = deskewed.map_blocks(deconv, dtype="float32") #cropped = deconvolved.map_blocks(crop, dtype="float32") # put the resulting dask array into napari. # (don't forget the contrast limits and is_pyramid==False !)
import napari from dask_image.imread import imread stack = imread("./newdata/images_old/*.jpg") stack2 = imread("./resultsThreshold/HarDMSEG/reconstructed_newdata/*.jpg") stack3 = imread("./results/HarDMSEG/newdata/*.jpg") with napari.gui_qt(): viewer = napari.view_image(stack, name='Images') label_layer = viewer.add_image(stack3, name='Predicted Full Labels', opacity=0.5, visible=False, gamma=100000) label_layer2 = viewer.add_image(stack2, name='Predicted Patch Labels', opacity=0.5, visible=False, gamma=100000)
""" Dynamically load irregularly shapes images of ants and bees """ import numpy as np from dask_image.imread import imread from dask.cache import Cache from napari import Viewer, gui_qt cache = Cache(2e9) # Leverage two gigabytes of memory cache.register() dir_ants = 'data/hymenoptera/train/ants/' dir_bees = 'data/hymenoptera/train/bees/' ants = imread(dir_ants + '*.jpg') bees = imread(dir_bees + '*.jpg') print(ants.shape) print(bees.shape) offset = max(ants.shape[2], bees.shape[2]) + 20 with gui_qt(): # create an empty viewer viewer = Viewer() # add the images ant_layer = viewer.add_image(ants, name='ants', contrast_limits=[0, 255]) #bee_layer = viewer.add_image(bees, name='bees', contrast_limits=[0, 255])