def read(self): from skimage import io img = io.MultiImage(os.path.expanduser(self.filepath)) if len(img) == 1 and 2 < len(img[0].shape): img = img[0] return img
def gen_movie(delta=100, scaley=0.5, spacebetween=5, source_fold=None, source_file=None, frame_file=None, offset=1000, frame_no=200, target_fold='/home/mossing/Documents/notebooks/temp/', frame_rg=(1, 0), chunk_offset=None): if chunk_offset is None: chunk_offset = offset # source_fold and source_file are format strings, like 'whatever{0}whatever', where {0} specifies where a digit should go if source_fold is None or source_file is None: date, animalid, exptno = parse_sbx_filename(frame_file) source_fold = make_suite2p_fold(date, animalid, exptno) source_file = make_suite2p_file(date, animalid, exptno) filenames = [(source_fold + '/' + source_file).format(n) for n in range(1, 5)] print('loading images') print(filenames) imgs = [skio.MultiImage(filename) for filename in filenames] print('done loading images') (Ny, Nx) = imgs[0][0].shape tr = gen_transform(delta, scaley, (Ny, Nx)) frm_on = gen_frm_on(frame_file, rg=frame_rg) warped = [skt.warp(imgs[n][chunk_offset], tr) for n in range(4)] green_frame = np.vstack( [x[:int(Ny / 2 + spacebetween)] for x in warped[::-1]]) stim_on = gen_stim_on(green_frame.shape) if not os.path.exists(target_fold): os.makedirs(target_fold) print('got here') for t in range(frame_no): print(t) warped = [skt.warp(imgs[n][chunk_offset + t], tr) for n in range(4)] green_frame = np.vstack( [x[:int(Ny * scaley + spacebetween)] for x in warped[::-1]]) green_frame = green_frame * (green_frame > 0) red_frame = np.in1d(offset + t, frm_on) * stim_on rgb = np.dstack((red_frame, green_frame, red_frame)) plt.imsave(target_fold + '/{0:04d}.tif'.format(t), rgb)
def parse_dir(self, glob_pattern): from skimage import io import glob import os import numpy as np files = glob.glob(os.path.join(self.directory, glob_pattern)) array_list = [] for file in files: multi = io.MultiImage(file, as_gray=True) im_array = np.stack(multi) array_list.append(im_array) im_stack = np.stack(array_list) return im_stack
def get_slide(slide_name): """ Open a whole-slide image (*.tif). Args: slide_name: Name of the slide. Returns: An skimage object representing a whole-slide image. """ # try: slide = sk.MultiImage( f'{SRC_TRAIN_DIR}/{slide_name}.{SLIDE_EXT}')[BASE_PAGE] # except Exception as e: # print(e.) return slide
def test_codecs(self): img = io.MultiImage('/input/tests/data/test.tif') self.assertEqual((10, 10, 4), img[0].shape)
def ProcessGenerateRecordTask(task): ident = task['ident'] tiffPath = task['tiffPath'] tfrecordsPath = task['tfrecordsPath'] tileSize = task['tileSize'] outImageSize = task['outImageSize'] rotationStepsCount = task['rotationStepsCount'] #print("processing {0}".format(ident)) #print("reading image from disk {0}".format(ident)) #im = 255 - io.imread(filename,plugin="tifffile") multiimage = io.MultiImage(tiffPath) #print("multiimage of {0} elements".format(len(multiimage))) #for i in range(0,len(multiimage)): # print("level {0}, shape {1}".format(i,multiimage[i].shape)) im = 255 - multiimage[1] M = rotationStepsCount rotStep = 360.0 / M for i in range(0,M): tiles = [] tfrecordsPathIdx = "{0}-{1}.tfrecords".format(tfrecordsPath[0:-10], i) effectiveDegree = rotStep*i rotated = npImTrans.RotateWithoutCrop(im, effectiveDegree) #print("getting tiles") _,tiles = getNotEmptyTiles(rotated, tileSize, emptyCuttOffQuantile=None, emptyCutOffMaxThreshold=10) if len(tiles) == 0: print("WARN: Image {0} resulted in 0 tiles. producing blank (black) single tile TfRecords file".format(ident)) tiles = [ np.zeros((outImageSize,outImageSize,3),dtype=np.uint8) ] else: # filtering out non green nonRedTiles = [] for tile in tiles: red = np.mean(tile[:,:,0]) # supposing RGB, not BGR green = np.mean(tile[:,:,1]) #print("[R:{0}\tG:{1}; ratio {2}".format(red,green, green/red)) if green / red >= 1.2: # green must be at least 1.5 times more than red (to remove white marker tiles) nonRedTiles.append(tile) if len(nonRedTiles)>0: tiles = nonRedTiles else: print("WARN: omited non-green tiles filtering, as the filtering impose the empty tile set") # normalizing with contrasts contrasts = [] means = [] #print("normalzing {0} tiles".format(len(tiles))) for tile in tiles: mu = npImNorm.getImageMean_withoutBlack(tile, 40.0) if not np.isnan(mu): contrast = npImNorm.getImageContrast_withoutPureBlack(tile, precomputedMu=mu) means.append(mu) contrasts.append(contrast) if len(means)>0: # whole image is not entirly black meanContrast = np.mean(contrasts) meanMean = np.mean(means) if meanMean > 0.0: for j in range(0,len(tiles)): tiles[j] = npImNorm.GCNtoRGB_uint8(npImNorm.GCN(tiles[j], lambdaTerm=0.0, precomputedContrast=meanContrast, precomputedMean=meanMean), cutoffSigmasRange=2.0) if outImageSize != tileSize: resizedTiles = [] for tile in tiles: resizedTiles.append(cv2.resize(tile, dsize=(outImageSize, outImageSize), interpolation=cv2.INTER_AREA)) tiles = resizedTiles #print("saving {0}".format(len(tiles))) savePackAsTFRecord(tiles,tfrecordsPathIdx) sys.stdout.write("({0}:{1})".format(i,len(tiles))) sys.stdout.flush() #print("done") # debug preview # N = len(gatheredTiles) # cols = round(math.sqrt(N)) # rows = math.ceil(N/cols) # #plt.figure() # #r,c = tileIdx[N-1] # #plt.title("tile [{0},{1}]".format(r,c)) # #plt.imshow(gatheredTiles[N-1]) # fig, ax = plt.subplots(rows,cols) # fig.set_facecolor((0.3,0.3,0.3)) # idx = 1 # for row in range(0,rows): # for col in range(0,cols): # row = (idx - 1) // cols # col = (idx -1) % cols # #ax[row,col].set_title("tile [{0},{1}]".format(tile_r,tile_c)) # ax[row,col].axis('off') # if idx-1 < N: # ax[row,col].imshow(gatheredTiles[idx-1]) # idx = idx + 1 # plt.show() # display it return len(tiles)
#userinput for file root = tk.Tk() root.withdraw() filename = filedialog.askopenfilename() filext = os.path.splitext(filename)[1] #check the file type if filename.lower().endswith('.avi') or filename.lower().endswith('.mp4'): filetype = 0 global vid vid = imageio.get_reader(filename, ) nframes = vid.get_meta_data()['nframes'] elif filename.lower().endswith(('.tiff', '.tif')): tiffinfo = io.MultiImage(filename) if len(tiffinfo) > 1: filetype = 1 nframes = len(tiffinfo) else: import glob filetype = 2 filename = glob.glob( os.path.split(filename)[0] + os.sep + '*' + filext) filename.sort() nframes = len(filename) elif filename.lower().endswith(('.png', '.jpg', '.jpeg')): import glob filetype = 2 filename = glob.glob(os.path.split(filename)[0] + os.sep + '*' + filext) filename.sort()
def decodeTiff(path): path = path.decode("utf-8") image = io.MultiImage(path)[1] #print("decoded image type {0}. shape {1}. dtype {2}".format(type(image),image.shape, image.dtype)) return image
# Generate an icosphere # ----------------------------------------------------------------------------- print('Generating icosphere') icosphere = generate_icosphere(order) # ----------------------------------------------------------------------------- # Load and process the cubemap image # ----------------------------------------------------------------------------- print('Loading the cube map data') # Load the multi-page TIFF image # Channel 0: RGB # Channel 1: Depth # Channel 2: Sematic labels # Channel 3: Instance labels tiff = io.MultiImage('inputs/cubemap.tiff') # Convert RGB image to a torch tensor with dimensions (1, 3, H, W) cube_rgb = torch.from_numpy(tiff[0]).permute(2, 0, 1).float().unsqueeze(0) if cuda: cube_rgb = cube_rgb.cuda() # Convert depth image to torch tensor with dimensions (1, 1, H, W) cube_inv_depth = torch.from_numpy(tiff[1].astype( np.int32)).float().unsqueeze(0).unsqueeze(0) if cuda: cube_inv_depth = cube_inv_depth.cuda() # Convert inverse depth to regular depth cube_inv_depth[cube_inv_depth == 0] = -1 cube_depth = 1 / cube_inv_depth
# frameId = video.get(1) # ret, frame = video.read() # if ret == False: # break # if frameId % math.floor(frameRate) == 0: # cv2.imwrite('images/img'+str(i)+'.jpg', frame) # i+=1 # video.release() # cv2.destroyAllWindows() # ============================ # COLOR QUANTIZATION # ============================ # === LOAD IMAGE === imageCollection = io.MultiImage('./images/*.jpg') tmp_array = [] # === CONVERT 8 BIT TO FLOAT === for image in imageCollection: converted_image = img_as_float64(image) # === CONVERT IMAGE INTO 2D MATRIX FOR MANIPULATION === image_array = convert_3d_matrix_to_2d(converted_image) # === TRAIN MODEL TO AGGREGATE COLORS IN ORDER TO HAVE 64 DISTINCT COLORS IN IMAGE === image_sample = shuffle(image_array, random_state=0)[:1000] n_colors = 64 kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_sample) labels = kmeans.predict(image_array)
def analysis(faster_fit,k,II): import matplotlib.pyplot as plt import numpy as np import imageio import os if faster_fit: from edge_detection import linear_subpixel_detection as edge else: from edge_detection import errorfunction_subpixel_detection as edge from skimage.viewer.canvastools import RectangleTool from skimage.viewer.canvastools import LineTool from skimage.viewer import ImageViewer from skimage import io from shapely.geometry import LineString import tkinter as tk from tkinter import filedialog PO=3 #polyfit order for edge fitting to measure contact angle thresh=70 #replace with automatic treshold detection at some point #userinput for file root = tk.Tk() root.withdraw() filename = filedialog.askopenfilename() filext=os.path.splitext(filename)[1] #check the file type if filename.lower().endswith('.avi') or filename.lower().endswith('.mp4'): filetype=0 global vid vid = imageio.get_reader(filename,) nframes = vid.get_meta_data()['nframes'] elif filename.lower().endswith(('.tiff', '.tif')): tiffinfo=io.MultiImage(filename) if len(tiffinfo)>1: filetype=1 nframes=len(tiffinfo) else: import glob filetype=2 filename=glob.glob(os.path.split(filename)[0]+os.sep+'*'+filext) filename.sort() nframes=len(filename) elif filename.lower().endswith(('.png', '.jpg', '.jpeg')): import glob filetype=2 filename=glob.glob(os.path.split(filename)[0]+os.sep+'*'+filext) filename.sort() nframes=len(filename) else: print('unknown filetype') #function to read a specific frame from the movie, stack, or image sequence def getcurrframe(filename,framenr,filetype): def movie(): image=vid.get_data(framenr)[:,:,0] return image def tifstack(): stack=io.imread(filename) image=stack[framenr,:,:] return image def images(): image=io.imread(filename[framenr]) return image filetypes={0 : movie, 1 : tifstack, 2 : images } image=filetypes[filetype]() return image image = getcurrframe(filename,0,filetype) #preallocate crop coordinates, maybe unescecarry? coords=[0,0,0,0] #show the image and ask for a crop from the user, using skimage.viewer canvastools viewer = ImageViewer(image) rect_tool = RectangleTool(viewer, on_enter=viewer.closeEvent) #actually call the imageviewer viewer.show() #don't forget to show it coords=np.array(rect_tool.extents) coords=np.array(np.around(coords),dtype=int) #crop the image cropped=image[coords[2]:coords[3],coords[0]:coords[1]] framesize=cropped.shape baseinput=np.array([0,0,0,0]) #userinput for the baseline, on which we'll find the contact angles, using skimage.viewer viewer = ImageViewer(cropped) line_tool=LineTool(viewer,on_enter=viewer.closeEvent) viewer.show() baseinput=line_tool.end_points #extend the baseline to the edge of the frame (in case the drop grows) rightbasepoint=np.argmax([baseinput[0,0],baseinput[1,0]]) baseslope=np.float(baseinput[rightbasepoint,1]-baseinput[1-rightbasepoint,1])/(baseinput[rightbasepoint,0]-baseinput[1-rightbasepoint,0]) base=np.array([[0,baseinput[0,1]-baseslope*baseinput[0,0]],[framesize[1],baseslope*framesize[1]+baseinput[0,1]-baseslope*baseinput[0,0]]]) #preallocation of edges, angles and contact points edgeleft=np.zeros(framesize[0]) edgeright=np.zeros(framesize[0]) thetal=np.zeros(nframes) thetar=np.zeros(nframes) contactpointright=np.zeros(nframes) contactpointleft=np.zeros(nframes) dropvolume=np.zeros(nframes) plt.ion() #loop over frames for framenr in range (nframes): image = getcurrframe(filename,framenr,filetype) #get current frame cropped=np.array(image[round(coords[2]):round(coords[3]),round(coords[0]):round(coords[1])]) #crop frame edgeleft, edgeright=edge(cropped,thresh) #find the edge with edge function in edge_detection.py baseline=LineString(base) #using shapely we construct baseline rightline=LineString(np.column_stack((edgeright,(range(0,framesize[0]))))) #and the lines of the edges leftline=LineString(np.column_stack((edgeleft,(range(0,framesize[0]))))) leftcontact=baseline.intersection(leftline) #we find the intersectionpoint of the baseline with the edge rightcontact=baseline.intersection(rightline) #Detect small drops that are lower than 'k' pixels #This may break if the drop grows outside the frame on one side. Maybe fix later? fitpointsleft=edgeleft[range(np.int(np.floor(leftcontact.y)),np.int(np.floor(leftcontact.y)-k),-1)] if any(fitpointsleft==0): fitpointsleft=np.delete(fitpointsleft,range(np.argmax(fitpointsleft==0),k)) #polyfit the edges around the baseline, but flipped, because polyfitting a vertical line is bad leftfit=np.polyfit(range(0,fitpointsleft.shape[0]),fitpointsleft,PO) leftvec=np.array([1,leftfit[PO-1]]) #vector for angle calculation fitpointsright=edgeright[range(np.int(np.floor(leftcontact.y)),np.int(np.floor(leftcontact.y)-k),-1)] if any(fitpointsright==0): fitpointsright=np.delete(fitpointsright,range(np.argmax(fitpointsright==0),k)) #polyfit the edges around the baseline, but flipped, because polyfitting a vertical line is bad rightfit=np.polyfit(range(0,fitpointsright.shape[0]),fitpointsright,PO) rightvec=np.array([1,rightfit[PO-1]]) #vector for angle calculation basevec=np.array([-baseslope,1]) #base vector for angle calculation (allows for a sloped basline if the camera was tilted) #calculate the angles using the dot product. thetal[framenr]=np.arccos(np.dot(basevec,leftvec)/(np.sqrt(np.dot(basevec,basevec))*np.sqrt(np.dot(leftvec,leftvec))))*180/np.pi thetar[framenr]=180-np.arccos(np.dot(basevec,rightvec)/(np.sqrt(np.dot(basevec,basevec))*np.sqrt(np.dot(rightvec,rightvec))))*180/np.pi if framenr % II ==0: #plot every II frames, to get a visual indication of how far along the script is plt.clf() plt.imshow(image[round(coords[2]):round(coords[3]),round(coords[0]):round(coords[1])],cmap='gray',interpolation="nearest") plt.plot(edgeleft,range(0,framesize[0])) plt.plot(edgeright,range(0,framesize[0])) plt.plot([base[0,0],base[1,0]],[base[0,1],base[1,1]]) plt.title('frame %d of %d' % (framenr, nframes)) plt.pause(0.001) #save the contact point (the point where the polyfit intersects the baseline) contactpointright[framenr]=rightfit[PO] contactpointleft[framenr]=leftfit[PO] for height in range (0,min(np.int(np.floor(leftcontact.y)),np.int(np.floor(rightcontact.y)))): dropvolume[framenr]=dropvolume[framenr]+np.pi*np.square((edgeright[height]-edgeleft[height])/2) #volume of each slice in pixels^3, without taking a possible slanted baseline into account #using cylindrical slice we calculate the remaining volume slantedbasediff=max(np.floor(leftcontact.y),np.floor(rightcontact.y))-min(np.floor(leftcontact.y),np.floor(rightcontact.y)) #we assume that the radius is constant over the range of the slanted baseline, for small angles this is probably accurate, but for larger angles this is probably wrong. baseradius=(edgeright[np.int(min(np.floor(leftcontact.y),np.floor(rightcontact.y)))]-edgeleft[np.int(min(np.floor(leftcontact.y),np.floor(rightcontact.y)))])/2 dropvolume[framenr]=dropvolume[framenr]+.5*np.pi*np.square(baseradius)*slantedbasediff #%% fitsamplesize=3 if nframes>2*fitsamplesize+1: leftspeed=np.zeros(nframes) rightspeed=np.zeros(nframes) for framenr in range(fitsamplesize,nframes-fitsamplesize-1): rightposfit=np.polyfit(range(-fitsamplesize,fitsamplesize),contactpointright[range(framenr-fitsamplesize,framenr+fitsamplesize)],1) leftposfit=np.polyfit(range(-fitsamplesize,fitsamplesize),contactpointleft[range(framenr-fitsamplesize,framenr+fitsamplesize)],1) leftspeed[framenr]=leftposfit[0] rightspeed[framenr]=rightposfit[0] for fillinrest in range(0,fitsamplesize): leftspeed[fillinrest]=leftspeed[fitsamplesize] rightspeed[fillinrest]=rightspeed[fitsamplesize] for fillinrest in range(nframes-fitsamplesize-1,nframes-1): leftspeed[fillinrest]=leftspeed[nframes-fitsamplesize-1] rightspeed[fillinrest]=rightspeed[nframes-fitsamplesize-1] plt.close() #close the plot after we're done elif nframes>1: for framenr in range(0,nframes-2): leftspeed[framenr]=contactpointleft[framenr+1]-contactpointleft[framenr] rightspeed[framenr]=contactpointright[framenr+1]-contactpointright[framenr] rightspeed[framenr-1]=rightspeed[framenr-2] leftspeed[framenr-1]=leftspeed[framenr-2] else: leftspeed=0 rightspeed=0 return thetal, thetar, leftspeed, rightspeed, contactpointleft, contactpointright, dropvolume;
# Generate an icosphere # ----------------------------------------------------------------------------- print('Generating icosphere') icosphere = generate_icosphere(order) # ----------------------------------------------------------------------------- # Load and process the cubemap image # ----------------------------------------------------------------------------- print('Loading the cube map data') # Load the multi-page TIFF image # Channel 0: RGB # Channel 1: Depth # Channel 2: Sematic labels # Channel 3: Instance labels tiff = io.MultiImage('examples/inputs/cubemap.tiff') # Convert RGB image to a torch tensor with dimensions (1, 3, H, W) cube_rgb = torch.from_numpy(tiff[0]).permute(2, 0, 1).float().unsqueeze(0) if cuda: cube_rgb = cube_rgb.cuda() # Convert depth image to torch tensor with dimensions (1, 1, H, W) cube_inv_depth = torch.from_numpy(tiff[1].astype( np.int32)).float().unsqueeze(0).unsqueeze(0) if cuda: cube_inv_depth = cube_inv_depth.cuda() # Convert inverse depth to regular depth cube_inv_depth[cube_inv_depth == 0] = -1 cube_depth = 1 / cube_inv_depth