def removeScaleBarPIL( directory, filename, targetDirectory, infoBarHeight=False, verbose=False, return_image=False ): if infoBarHeight==False: infoBarHeight = getInfoBarHeightFromMetaData( directory, filename, verbose=verbose ) ## create output directory if it does not exist if not os.path.exists( targetDirectory ): os.makedirs( targetDirectory ) im = Image.open( directory + os.sep + filename ) width, height = im.size # Setting the points for cropped image left = 0 top = 0 right = width bottom = height-infoBarHeight scaling = es.autodetectScaling( filename, directory ) cropped = im.crop((left, top, right, bottom)) if scaling['editor'] == None: cropped.convert('L').save( targetDirectory + filename , "TIFF") else: cropped.convert('L').save( targetDirectory + filename , "TIFF", tiffinfo = es.setImageJScaling( scaling )) im.close() if return_image: return cropped else: cropped.close() return True
def getFolderScaling(directory): scaling = image_slicer.getEmptyScaling() for filename in os.listdir( directory ): if ( filename.endswith( ".tif" ) ): scaling = es.autodetectScaling( filename, directory ) break return scaling
def load_binary_image(self, color=None, round_scaling=4): self.scaling = es.autodetectScaling(self.filename, self.folder) if round_scaling > 0: self.scaling['x'] = round(self.scaling['x'], round_scaling) self.scaling['y'] = round(self.scaling['y'], round_scaling) self.img = cv2.imread(self.folder + os.sep + self.filename, -1) if self.img is None: print('Error loading {}'.format(self.filename)) exit() channel_count = self.get_image_channel_count() if channel_count == 3: print(' detected color image') #self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB) self.load_binary_image_from_color(color=self.analyze_color_BGR, color_id=self.color_id) else: print(' detected grayscale image') #self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) self.load_binary_image_from_binary()
allowed_file_extensions = [ '.tif' ] if os.path.isdir( settings["workingDirectory"] ) : settings["targetDirectory"] = getTargetFolder(settings) if not os.path.isdir( settings["targetDirectory"] ): os.mkdir(settings["targetDirectory"]) ## count valid files and get image scaling image_list = [] for file in os.listdir(settings["workingDirectory"]): file_name, file_extension = os.path.splitext( file ) if ( file_extension.lower() in allowed_file_extensions ): print('-'*40) print(file_name + file_extension) # process units scaling = es.autodetectScaling( file_name + file_extension, settings["workingDirectory"], verbose=False ) print(scaling) if scaling['unit'] != 'nm': #print(scaling) print('{}: UNIT {} IS WRONG!!! Expected nm. processing will fail or give wrong results.'.format(file_name, scaling['unit'])) # append to image list image_list.append((file_name, file_extension, scaling)) print( "{} images found!".format(len(image_list)) ) print() print( 'Processing image' ) # create result dictionary containing dataframes of pore data position = 0 result_columns = ['diameter', 'area', 'surface', 'volume', 'pixel size']
def stitchImages(settings, fileNameList, resultFile='', result_file_name=''): # check, if the image count will fit in new canvas if (settings["tile_count"] != settings["col_count"] * settings["row_count"]): print(" Error: Expected " + str(settings["col_count"] * settings["row_count"]) + " images, but found " + str(settings["tile_count"])) else: scaling_filename = None for x in fileNameList: if x != 'EMPTY': # fails if the first image is empty, which should never happen scaling_filename = x break if scaling_filename != None: scaling = es.autodetectScaling( os.path.basename(scaling_filename), os.path.dirname(os.path.abspath(scaling_filename))) scaling_file = Image.open(scaling_filename) if result_file_name == '': result_file_name = os.path.basename( settings["workingDirectory"]) print(" stitching " + str(settings["tile_count"]) + " images.") images = [] for x in fileNameList: if x == 'EMPTY': # fails if the first image is empty, which should never happen images.append( Image.new(scaling_file.mode, (scaling_file.size[0], scaling_file.size[1]), color='black')) else: images.append(Image.open(x)) h_sizes, v_sizes = [0] * settings["col_count"], [ 0 ] * settings["row_count"] # get grid size and create empty canvas for i, im in enumerate(images): print(" resizing image #" + str(i + 1), end=" \r") if (settings["scaleFactor"] < 1): newsize = (int(im.size[0] * settings["scaleFactor"]), int(im.size[1] * settings["scaleFactor"])) images[i] = images[i].resize(newsize, Image.ANTIALIAS) h, v = i % settings["col_count"], i // settings["col_count"] h_sizes[h] = max(h_sizes[h], images[i].size[0]) v_sizes[v] = max(v_sizes[v], images[i].size[1]) h_sizes, v_sizes = np.cumsum([0] + h_sizes), np.cumsum([0] + v_sizes) #print(h_sizes[-1], v_sizes[-1]) im_grid = Image.new(images[0].mode, (h_sizes[-1], v_sizes[-1]), color='white') # insert tiles to canvas for i, im in enumerate(images): print(" pasting image #" + str(i + 1), end=" \r") if (settings["imageDirection"] == 'v' ): # vertical tile sequence im_grid.paste(im, (h_sizes[i // settings["row_count"]], v_sizes[i % settings["row_count"]])) else: # horizontal tile sequence im_grid.paste(im, (h_sizes[i % settings["col_count"]], v_sizes[i // settings["col_count"]])) if resultFile == '': resultFile = settings[ "outputDirectory"] + os.sep + result_file_name + settings[ "fileType"] print(" saving result to " + resultFile) if settings["cropX"] > 0 and (settings["cropX"] < h_sizes[-1] or settings["cropY"] < v_sizes[-1]): crop_x = settings["cropX"] if settings["cropX"] < h_sizes[ -1] else h_sizes[-1] crop_y = settings["cropY"] if settings["cropY"] < v_sizes[ -1] else v_sizes[-1] im_grid = im_grid.crop((0, 0, crop_x, crop_y)) # set scaling for ImageJ if scaling == False or scaling == es.getEmptyScaling(): scaling = { 'x': settings["scaleX"], 'y': settings["scaleY"], 'unit': settings["scaleUnit"], 'editor': 'FEI-MAPS' } im_grid.save(resultFile, tiffinfo=es.setImageJScaling(scaling)) thumbXSize = 2000 thumbDirectory = settings["outputDirectory"] + os.sep + 'thumbnails' if (settings["createThumbnail"] and im_grid.size[0] > thumbXSize): if (not os.path.isdir(thumbDirectory)): os.mkdir(thumbDirectory) thumbFile = thumbDirectory + os.sep + result_file_name + settings[ "fileType"] print(" saving thumbnail to " + thumbFile) scaleFactor = thumbXSize / im_grid.size[0] newsize = (thumbXSize, int(scaleFactor * im_grid.size[1])) im_grid = im_grid.resize(newsize) scaling = { 'x': scaling['x'] / scaleFactor, 'y': scaling['y'] / scaleFactor, 'unit': scaling['unit'], 'editor': scaling['editor'] } im_grid.save(thumbFile, tiffinfo=es.setImageJScaling(scaling)) im_grid.close() if settings["openResultFile"]: if platform.system() == 'Darwin': # macOS subprocess.call(('open', resultFile)) elif platform.system() == 'Windows': # Windows os.startfile(resultFile) else: # linux variants subprocess.call(('xdg-open', resultFile)) else: print('Error, none of the files found!')
def sliceImage(settings, file_name, file_extension=False, verbose=False): # process file name if not file_extension == False: filename = file_name + file_extension else: filename = file_name file_name, file_extension = os.path.splitext(filename) if verbose: print(" read scaling") # get scaling scaling = es.getFEIScaling(filename, settings["workingDirectory"], verbose=verbose) if not scaling == False: noScaleBarDirectory = settings[ "workingDirectory"] + os.sep + 'no_scale_bar' + os.sep rsb.removeScaleBarPIL(settings["workingDirectory"], filename, noScaleBarDirectory, scaling=scaling) src_file = noScaleBarDirectory + filename else: scaling = es.autodetectScaling(filename, settings["workingDirectory"]) src_file = settings["workingDirectory"] + os.sep + filename # open image and get width/height if verbose: print(" slicing file") img = Image.open(src_file) width, height = img.size #cropping width / height crop_height = int(height / settings["row_count"]) crop_width = int(width / settings["col_count"]) slice_name = file_name + "_{}_{}" + file_extension targetDirectory = getTargetFolder(settings, file_name) sclices_already_exists = True if not settings["overwrite_existing"]: for i in range(settings["row_count"]): # start at i = 0 to row_count-1 for j in range( settings["col_count"]): # start at j = 0 to col_count-1 fileij = slice_name.format(i, j) if not os.path.isfile(targetDirectory + fileij): sclices_already_exists = False else: sclices_already_exists = False if sclices_already_exists: print(" The expected sliced images already exists! Doing nothing...") else: for file_old in os.listdir(targetDirectory): if (file_old.endswith(file_extension)): #if verbose: print("remove {}".formaT(targetDirectory + file_old)) os.remove(targetDirectory + file_old) for i in range(settings["row_count"]): # start at i = 0 to row_count-1 for j in range( settings["col_count"]): # start at j = 0 to col_count-1 fileij = slice_name.format(i, j) if verbose: print(" - " + fileij + ":") cropped_filename = targetDirectory + fileij img.crop( ((j * crop_width), (i * crop_height), ((j + 1) * crop_width), ((i + 1) * crop_height))).save( cropped_filename, tiffinfo=es.setImageJScaling(scaling)) img = None return scaling
def processCLD(directory, filename): global globMaskPagePos global outputDirName global sumResultCSV scaling = es.autodetectScaling(filename, directory) pageCnt = 0 im = Image.open(directory + os.sep + filename) # check page count in image for i in enumerate(ImageSequence.Iterator(im)): pageCnt += 1 if (pageCnt - 1 < globMaskPagePos): print( ' WARNING: The image has only {} page(s)! Trying to use page 1 as mask.' .format(pageCnt)) maskPagePos = 0 else: maskPagePos = globMaskPagePos # run analysis for i, page in enumerate(ImageSequence.Iterator(im)): if (i == maskPagePos): sumResultCSV += processDirectionalCLD(im, scaling, directory, 'horizontal') sumResultCSV += processDirectionalCLD(im, scaling, directory, 'vertical') img = imageio.imread(directory + os.sep + filename) chords_x = ps.filters.apply_chords(img, axis=0, spacing=1, trim_edges=True) chords_y = ps.filters.apply_chords(img, axis=1, spacing=1, trim_edges=True) #chords_z = ps.filters.apply_chords(img, axis=2, spacing=1, trim_edges=True) cld_x = ps.metrics.chord_length_distribution(chords_x, bins=100, log=True) cld_y = ps.metrics.chord_length_distribution(chords_y, bins=100, log=True) #cld_z = ps.metrics.chord_length_distribution( chords_z, bins=100, log=True ) fig, (ax0, ax1) = plt.subplots(ncols=2, nrows=1, figsize=(20, 10)) ax0.bar(cld_x.bin_centers, cld_x.relfreq, width=cld_x.bin_widths, edgecolor='k') ax1.bar(cld_y.bin_centers, cld_y.relfreq, width=cld_y.bin_widths, edgecolor='k') #ax2.bar(cld_z.bin_centers,cld_z.relfreq,width=cld_z.bin_widths,edgecolor='k') plt.savefig(directory + os.sep + filename + 'line_plot.svg') im.close() print()
def process_translation_of_folder(folder=None, multicore=True, do_nlm=False, mask_size=0.9, eq_hist=True, crop_thresh=0, limit=False): global translation global error_list global aligned_images if folder is None: folder = filedialog.askdirectory( title='Please select the image / working directory') # load images images, loaded_images = load_image_set(folder, limit) im_cnt = len(loaded_images) # load scaling #scaling = es.getImageJScaling( images[0], folder, verbose=True ) scaling = es.autodetectScaling(images[0], folder, verbose=True) scaling['y'] = scaling['y'] / math.cos( 38) #fix distortion due to FIB-geometry # load translation table translation_csv = folder + os.sep + 'translations.csv' translation = load_translation_csv(translation_csv, im_cnt) # process translation table if len(translation) != im_cnt - 1: translation = [] for f in images: translation.append([f, 0.0, 0.0]) print("processing {} images...".format(im_cnt)) loaded_images_resized = [] f = 1000 / loaded_images[1].shape[0] if f < .95: print(' reducing image size for translation calculations') for i, image in enumerate(loaded_images): loaded_images_resized.append( cv2.resize(image, None, fx=f, fy=f)) if multicore: process_translation_of_folder_multicore( images, loaded_images_resized, mask_size, eq_hist) else: process_translation_of_folder_singlecore( images, loaded_images_resized, mask_size, eq_hist) for i, translation_line in enumerate(translation): translation[i][1] = translation_line[1] / f translation[i][2] = translation_line[2] / f else: if multicore: process_translation_of_folder_multicore( images, loaded_images, mask_size, eq_hist) else: process_translation_of_folder_singlecore( images, loaded_images, mask_size, eq_hist) print("processing basic data...") #translation = sorted( translation ) #save results save_translation_csv(translation, translation_csv) if len(error_list) > 0: write_list_to_csv(sorted(error_list), folder + os.sep + 'error_list.csv', ['file_b', 'serverity']) # align images aligned_images = create_3D_stack(translation, loaded_images, do_nlm) print('saving images..') save_path = folder + 'aligned' + os.sep if not os.path.isdir(save_path): os.makedirs(save_path) stack_fn = save_path + "aligned_stack_({}).tif".format(im_cnt) tif.imsave(stack_fn, aligned_images, bigtiff=True) print('saved "{}"'.format(stack_fn)) if crop_thresh > 0: aligned_images = auto_crop_stack(aligned_images, threshold=crop_thresh) cropped_fn = save_path + 'cropped.tif' tif.imsave(cropped_fn, aligned_images, bigtiff=True) print('saved "{}"'.format(cropped_fn)) print('sucessfull') return translation, error_list, aligned_images, loaded_images, scaling