def bw_process(file, bw_path, param=.85): if '.json' in file: image_digitalizer = ImageSignalDigitalizer(config_file=file) if bw_path: if bw_path[-1] != '/': bw_path += '/' else: bw_path = './bn_images/' if not isdir(bw_path): mkdir(bw_path) temp_name = bw_path + 'bn_{0}.png' for x in image_digitalizer.process_images(): image = img_as_uint(x[0]) imsave(temp_name.format(x[1][:-4]), image) yield (image, 'bn_' + x[1][:-4] + '.png') else: last_name_file = file.split('/')[-1] image = imread(file) image = ImageSignalDigitalizer.process_image(image, param) image = img_as_uint(image) if bw_path: if bw_path[-1] != '/': bw_path += '/' temp_name = bw_path + 'bn_{0}.png' imsave(temp_name.format(last_name_file[:-4]), image) yield image, 'bn_' + file.split('/')[-1][:-4] + '.png'
def pred_model(test_x, test_y): print("start load model") trained_model = joblib.load('model.pkl') print("model loaded") predictions = trained_model.predict(test_x) print(predictions) print(test_y) pred = [] for p in predictions: pred.append(np.array(p, dtype=int)) miou = 0 for p, y in zip(pred, test_y): miou = miou + cal_miou( p, y) #jaccard_similarity_score(p,y, normalize = True) miou = miou / len(pred) #print("Test mean squared error :: " + str(mean_squared_error(test_y,predictions)) ) print("mIOU :: " + str(miou)) for i in range(len(predictions)): io.imsave( str(i) + "_out_pred.tif", img_as_uint(predictions[i].reshape(256, 256, 1))) io.imsave( str(i) + "_out_img.tif", img_as_uint(test_x[i].reshape(256, 256, 3)))
def RQA_eval(x_in, TR, RPpow, img_process): RQA_mat_L = zeros((TR, TR)) # Set L,R matrix to compute Recurrence matrix RQA_mat_L[0:TR, :] = x_in RQA_mat_R = RQA_mat_L.transpose() if img_process == -1: # Subtract two matirices RQA_value = RQA_mat_L - RQA_mat_R if img_process == 0: # RQA_value = RQA_value/amax(abs(RQA_value)) # Subtract two matirices RQA_value = abs(RQA_mat_L - RQA_mat_R) RQA_value = RQA_value ** RPpow if img_process == 1: # RQA_value = RQA_value/amax(abs(RQA_value)) RQA_value = img_as_uint(RQA_value) RQA_value = skimage.exposure.equalize_hist(RQA_value) if img_process == 2: # RQA_value = RQA_value/amax(abs(RQA_value)) RQA_value = img_as_uint(RQA_value) RQA_value = skimage.exposure.equalize_adapthist(RQA_value, clip_limit=0.01) RQA_value -= mean(RQA_value) return RQA_value
def time_get_files(file_path, period, binning, color_channel, connect_kafka): result = [] files = os.listdir(file_path) if period == 0: for file in files: start = time.time() if os.path.isfile(file_path + file): if file[-5] in color_channel: # 5th letter from the end of file name gives the color channel img = cv2.imread(file_path + file, -1) binned_img = block_reduce(img, block_size=(binning, binning), func=np.sum) if connect_kafka == "yes": ret, jpeg = cv2.imencode('.tif', img_as_uint(binned_img)) kafka_stream_target.connect(jpeg.tobytes()) stop = time.time() result.append(stop - start) else: print("period!=0") for file in files: start = time.time() if os.path.isfile(file_path + file): if file[-5] in color_channel: # 5th letter from the end of file name gives the color channel img = cv2.imread(file_path + file, -1) binned_img = block_reduce(img, block_size=(binning, binning), func=np.sum) if connect_kafka == "yes": ret, jpeg = cv2.imencode('.tif', img_as_uint(binned_img)) kafka_stream_target.connect(jpeg.tobytes()) time.sleep(period) stop = time.time() result.append(stop - start) return result
def write_two_planes(filepaths: List[str], outpath: str, z_level: float, x_min: int, x_max: int, y_min: int, y_max: int, scale_x: float, scale_y: float, flip_x: bool, flip_y: bool): """Writes out a cropped, scaled, and flipped TIFF file interpolated between two TIFFs Order of operations: cropping, scaling, flipping Parameters ---------- filepaths : List[str] List of file paths of original two TIFFs outpath : str File path to output TIFF z_level : float Float between 0 and 1 describing interpolation position between two planes, 0 being close to bottom x_min : int Minimum x-coordinate to keep in the output TIFF file x_max : int Maximum x-coordinate to keep in the output TIFF file y_min : int Minimum y-coordinate to keep in the output TIFF file y_max : int Maximum y-coordinate to keep in the output TIFF file scale_x : float Scaling factor in the x-dimension scale_y : float Scaling factor in the y-dimension flip_x : bool True if the output image should be reflected about the y axis (reversing the image along the x-dimension) flip_y : bool True if the output image should be reflected about the x axis (reversing the image along the y-dimension) Returns ------- None """ with warnings.catch_warnings(): warnings.simplefilter("ignore") p1 = img_as_uint( zoom( tifffile.imread(filepaths[0])[y_min - 1:y_max, x_min - 1:x_max], (scale_y, scale_x))) p2 = img_as_uint( zoom( tifffile.imread(filepaths[1])[y_min - 1:y_max, x_min - 1:x_max], (scale_y, scale_x))) # Uses linear spline to interpolate between two planes p_comb = ((1 - z_level) * p1) + (z_level * p2) p_comb = img_as_uint(np.around(p_comb).astype(np.uint16)) if flip_x: p_comb = np.flip(p_comb, 1) if flip_y: p_comb = np.flip(p_comb, 0) with tifffile.TiffWriter(outpath) as file: file.save(p_comb)
def stack_label_images_to_tidy_df_ratio( label_img_stack, num_intensity_img_stack, denom_intensity_img_stack, properties=["label", "mean_intensity", "area", "centroid"], ): features = pd.DataFrame() for i, lab_img in enumerate(label_img_stack): df = pd.DataFrame( measure.regionprops_table( lab_img, intensity_image=img_as_uint(num_intensity_img_stack[i, :, :]), properties=properties, )) df["frame"] = i df.rename( columns={ "mean_intensity": "mean_intensity_num", "centroid-0": "y", "centroid-1": "x", }, inplace=True, ) df["mean_intensity_denom"] = pd.DataFrame( measure.regionprops_table( lab_img, intensity_image=img_as_uint( denom_intensity_img_stack[i, :, :]), properties=["mean_intensity"], )["mean_intensity"]) df["mean_intensity_num_denom"] = (df["mean_intensity_num"] / df["mean_intensity_denom"]) features = features.append(df) return features
def saveResult(fileName, npyfile, flag_multi_class=False, num_class=2): save_path = fileName + "/../predict/" result_path = fileName + "/../results/" test_path = fileName # 重新命名? name = [] for pic in os.listdir(test_path): name.append(pic[:-4]) # print(name) # os.removedirs(save_path) # 递归删除predict目录下所有内容 try: del_file(save_path) os.rmdir(save_path) except Exception as e: os.mkdir(save_path) else: os.mkdir(save_path) # finally: # 无论异常与否,都要创建一个文件夹 # os.mkdir(save_path) for i, item in enumerate(npyfile): img = labelVisualize(num_class, COLOR_DICT, item) if flag_multi_class else item[:, :, 0] # print(img) # print(img_as_uint(img)) io.imsave(os.path.join(save_path, "%s_predict.png" % name[i]), img_as_uint(img)) io.imsave(os.path.join(result_path, "%s_predict.png" % name[i]), img_as_uint(img))
def get_hypo_rprops(hypo, filter=True, already_skeletonized=False, return_skeleton=False): """ Args: hypo: segmented hypocotyl image filter: boolean or list of [min_length, max_length] """ hypo_thresh = (hypo > 0.5) if not already_skeletonized: hypo_skeleton = label(img_as_uint(skeletonize_3d(hypo_thresh))) else: hypo_skeleton = label(img_as_uint(hypo_thresh)) hypo_rprops = regionprops(hypo_skeleton) # filter out small regions if filter: if isinstance(filter, Container): min_length, max_length = filter hypo_rprops = [ r for r in hypo_rprops if min_length <= r.area <= max_length ] else: otsu_thresh = threshold_otsu( np.array([r.area for r in hypo_rprops])) hypo_rprops = [r for r in hypo_rprops if r.area > otsu_thresh] if return_skeleton: return HypoResult(hypo_rprops), hypo_skeleton > 0 return HypoResult(hypo_rprops)
def get_hypo_rprops(hypo, filter=True, already_skeletonized=False, skeleton_method=skeletonize_3d, return_skeleton=False, dpm=False): """ Args: hypo: segmented hypocotyl image filter: boolean or list of [min_length, max_length] """ hypo_thresh = (hypo > 0.5) if not already_skeletonized: hypo_skeleton = label(img_as_uint(skeleton_method(hypo_thresh))) else: hypo_skeleton = label(img_as_uint(hypo_thresh)) hypo_rprops = regionprops(hypo_skeleton) # filter out small regions hypo_result = HypoResult(hypo_rprops, dpm) hypo_result.filter(flt=filter) if return_skeleton: return hypo_result, hypo_skeleton > 0 return hypo_result
def mono_check(plugin, fmt='png'): """Check the roundtrip behavior for images that support most types. All major input types should be handled. """ img = img_as_ubyte(data.moon()) r1 = roundtrip(img, plugin, fmt) testing.assert_allclose(img, r1) img2 = img > 128 r2 = roundtrip(img2, plugin, fmt) testing.assert_allclose(img2.astype(np.uint8), r2) img3 = img_as_float(img) r3 = roundtrip(img3, plugin, fmt) if r3.dtype.kind == 'f': testing.assert_allclose(img3, r3) else: testing.assert_allclose(r3, img_as_uint(img)) img4 = img_as_int(img) if fmt.lower() in (('tif', 'tiff')): img4 -= 100 r4 = roundtrip(img4, plugin, fmt) testing.assert_allclose(r4, img4) else: r4 = roundtrip(img4, plugin, fmt) testing.assert_allclose(r4, img_as_uint(img4)) img5 = img_as_uint(img) r5 = roundtrip(img5, plugin, fmt) testing.assert_allclose(r5, img5)
def time_kafka_producer(file_path, period, binning, color_channel, connect_kafka): result = [] files = os.listdir(file_path) if period == 0: # Stream as fast as possible. for file in files: if os.path.isfile(file_path + file): if file[-5] in color_channel: # 5th letter from the end of file name gives the color channel img = cv2.imread(file_path + file, -1) # print(type(img)) binned_img = block_reduce(img, block_size=(binning, binning), func=np.sum) if connect_kafka == "yes": ret, jpeg = cv2.imencode('.tif', img_as_uint(binned_img)) as_bytes = jpeg.tobytes() start = time.clock() kafka_stream_target.old_connect(as_bytes) stop = time.clock() result.append(stop - start) else: # Stream with given time period. for file in files: if os.path.isfile(file_path + file): if file[-5] in color_channel: # 5th letter from the end of file name gives the color channel img = cv2.imread(file_path + file, -1) binned_img = block_reduce(img, block_size=(binning, binning), func=np.sum) if connect_kafka == "yes": ret, jpeg = cv2.imencode('.tif', img_as_uint(binned_img)) as_bytes = jpeg.tobytes() start = time.time() kafka_stream_target.old_connect(as_bytes) time.sleep(period) stop = time.time() result.append(stop - start) return result
def equalize_adapthist(image, ntiles_x=8, ntiles_y=8, clip_limit=0.01, nbins=256): """Contrast Limited Adaptive Histogram Equalization. Parameters ---------- image : array-like Input image. ntiles_x : int, optional Number of tile regions in the X direction. Ranges between 2 and 16. ntiles_y : int, optional Number of tile regions in the Y direction. Ranges between 2 and 16. clip_limit : float: optional Clipping limit, normalized between 0 and 1 (higher values give more contrast). nbins : int, optional Number of gray bins for histogram ("dynamic range"). Returns ------- out : ndarray Equalized image. Notes ----- * The algorithm relies on an image whose rows and columns are even multiples of the number of tiles, so the extra rows and columns are left at their original values, thus preserving the input image shape. * For color images, the following steps are performed: - The image is converted to LAB color space - The CLAHE algorithm is run on the L channel - The image is converted back to RGB space and returned * For RGBA images, the original alpha channel is removed. References ---------- .. [1] http://tog.acm.org/resources/GraphicsGems/gems.html#gemsvi .. [2] https://en.wikipedia.org/wiki/CLAHE#CLAHE """ args = [None, ntiles_x, ntiles_y, clip_limit * nbins, nbins] if image.ndim > 2: lab_img = color.rgb2lab(skimage.img_as_float(image)) l_chan = lab_img[:, :, 0] l_chan /= np.max(np.abs(l_chan)) l_chan = skimage.img_as_uint(l_chan) args[0] = rescale_intensity(l_chan, out_range=(0, NR_OF_GREY - 1)) new_l = _clahe(*args).astype(float) new_l = rescale_intensity(new_l, out_range=(0, 100)) lab_img[:new_l.shape[0], :new_l.shape[1], 0] = new_l image = color.lab2rgb(lab_img) image = rescale_intensity(image, out_range=(0, 1)) else: image = skimage.img_as_uint(image) args[0] = rescale_intensity(image, out_range=(0, NR_OF_GREY - 1)) out = _clahe(*args) image[:out.shape[0], :out.shape[1]] = out image = rescale_intensity(image) return image
def save_images(self, images, fname='/Users/robertf/Downloads/tmp_stk.tif'): with TiffWriter(fname, bigtiff=False, imagej=True) as t: if len(images.shape) > 2: for i in range(images.shape[2]): t.save(img_as_uint(images[:, :, i])) else: t.save(img_as_uint(images)) return fname
def write_one_plane(filepath: str, outpath: str, x_min: int, x_max: int, y_min: int, y_max: int, scale_x: float, scale_y: float, flip_x: bool, flip_y: bool): """Writes out a cropped, scaled, and flipped single TIFF file from single TIFF Order of operations: cropping, scaling, flipping Parameters ---------- filepath : str File path of original TIFF outpath : str File path to output TIFF x_min : int Minimum x-coordinate to keep in the output TIFF file x_max : int Maximum x-coordinate to keep in the output TIFF file y_min : int Minimum y-coordinate to keep in the output TIFF file y_max : int Maximum y-coordinate to keep in the output TIFF file scale_x : float Scaling factor in the x-dimension scale_y : float Scaling factor in the y-dimension flip_x : bool True if the output image should be reflected about the y axis (reversing the image along the x-dimension) flip_y : bool True if the output image should be reflected about the x axis (reversing the image along the y-dimension) Returns ------- None """ plane_ndarray = img_as_uint(tifffile.imread(filepath)[y_min - 1:y_max, x_min - 1:x_max]) with warnings.catch_warnings(): warnings.simplefilter("ignore") plane = img_as_uint(zoom(plane_ndarray, (scale_y, scale_x))) if flip_x: plane = np.flip(plane, 1) if flip_y: plane = np.flip(plane, 0) with tifffile.TiffWriter(outpath) as file: file.save(plane)
def render_model_exemplars(pbar, pair: ExemplarShapePair, render_shape=config.SHAPE_REND_SHAPE): warnings.simplefilter('ignore') camera = cameras.spherical_coord_to_cam(pair.fov, pair.azimuth, pair.elevation) pbar.set_description(f'[{pair.id}] Loading shape') mesh, materials = pair.shape.load() camera.near, camera.far = compute_tight_clipping_planes( mesh, camera.view_mat()) pbar.set_description(f'[{pair.id}] Rendering segments') if not pair.data_exists(config.PAIR_FG_BBOX_NAME): segment_im = render_segments(mesh, camera) fg_mask = segment_im > -1 fg_bbox = mask_bbox(fg_mask) segment_im = crop_tight_fg(segment_im, render_shape, bbox=fg_bbox, fill=-1, order=0) pair.save_data(config.SHAPE_REND_SEGMENT_VIS_NAME, skimage.img_as_uint(visualize_map(segment_im))) pair.save_data(config.SHAPE_REND_SEGMENT_MAP_NAME, (segment_im + 1).astype(np.uint8)) tqdm.write(f" * Saving {pair.get_data_path(config.PAIR_FG_BBOX_NAME)}") pair.save_data(config.PAIR_RAW_SEGMENT_MAP_NAME, (segment_im + 1).astype(np.uint8)) pair.save_data(config.PAIR_FG_BBOX_NAME, fg_mask.astype(np.uint8) * 255) else: fg_mask = pair.load_data(config.PAIR_FG_BBOX_NAME) fg_bbox = mask_bbox(fg_mask) if not pair.data_exists(config.SHAPE_REND_PHONG_NAME): pbar.set_description('Rendering phong') phong_im = np.clip( render_wavefront_mtl(mesh, camera, materials, config.SHAPE_REND_RADMAP_PATH, gamma=2.2, ssaa=3, tonemap='reinhard'), 0, 1) phong_im = crop_tight_fg(phong_im, render_shape, bbox=fg_bbox) pbar.set_description(f'[{pair.id}] Saving data') pair.save_data(config.SHAPE_REND_PHONG_NAME, skimage.img_as_uint(phong_im))
def cast_img_uint16(tensor, catch_warning=False): """Cast the data in np.uint16. Negative values are not allowed as the skimage method 'img_as_uint' would clip them to 0. Positives values are scaled between 0 and 65535, excepted if they fit directly in 16 bit (in this case values are not modified). Parameters ---------- tensor : np.ndarray Image to cast. catch_warning : bool Catch and ignore UserWarning about possible precision or sign loss. Returns ------- tensor : np.ndarray, np.uint16 Image cast. """ # check tensor dtype check_array(tensor, ndim=[2, 3, 4, 5], dtype=[ np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64 ]) if tensor.dtype in [np.float16, np.float32, np.float64]: check_range_value(tensor, min_=0, max_=1) elif tensor.dtype in [np.int8, np.int16, np.int32, np.int64]: check_range_value(tensor, min_=0) if tensor.dtype == np.uint16: return tensor if (tensor.dtype in [np.uint32, np.uint64, np.int32, np.int64] and tensor.max() <= 65535): raise ValueError("Tensor values are between {0} and {1}. It fits in " "16 bits and won't be scaled between 0 and 65535. " "Use 'tensor.astype(np.uint16)' instead.".format( tensor.min(), tensor.max())) # cast tensor if catch_warning: with warnings.catch_warnings(): warnings.simplefilter("ignore") tensor = img_as_uint(tensor) else: tensor = img_as_uint(tensor) return tensor
def img_as_pil(arr, format_str=None): """Convert an scikit-image image (ndarray) to a PIL object. Derived from code in scikit-image PIL IO plugin. :param numpy.ndarray img: The image to convert. :return: PIL image. :rtype: Image """ if arr.ndim == 3: arr = img_as_ubyte(arr) mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]] elif format_str in ['png', 'PNG']: mode = 'I;16' mode_base = 'I' if arr.dtype.kind == 'f': arr = img_as_uint(arr) elif arr.max() < 256 and arr.min() >= 0: arr = arr.astype(np.uint8) mode = mode_base = 'L' else: arr = img_as_uint(arr) else: arr = img_as_ubyte(arr) mode = 'L' mode_base = 'L' try: array_buffer = arr.tobytes() except AttributeError: array_buffer = arr.tostring() # Numpy < 1.9 if arr.ndim == 2: im = Image.new(mode_base, arr.T.shape) try: im.frombytes(array_buffer, 'raw', mode) except AttributeError: im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7 else: image_shape = (arr.shape[1], arr.shape[0]) try: im = Image.frombytes(mode, image_shape, array_buffer) except AttributeError: im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7 return im
def run(self, workspace): x_name = self.x_name.value y_name = self.y_name.value images = workspace.image_set x = images.get_image(x_name) x_data = x.pixel_data x_data = skimage.img_as_uint(x_data) if x.dimensions == 3 or x.multichannel: y_data = numpy.zeros_like(x_data) for z, image in enumerate(x_data): y_data[z] = skimage.filters.rank.gradient(image, self.__structuring_element()) else: y_data = skimage.filters.rank.gradient(x_data, self.structuring_element.value) y = cellprofiler.image.Image( image=y_data, dimensions=x.dimensions, parent_image=x, ) images.add(y_name, y) if self.show_window: workspace.display_data.x_data = x_data workspace.display_data.y_data = y_data workspace.display_data.dimensions = x.dimensions
def test_tv_denoise_2d(self): """ Apply the TV denoising algorithm on the lena image provided by scipy """ # lena image lena = color.rgb2gray(data.lena())[:256, :256] # add noise to lena lena += 0.5 * lena.std() * np.random.randn(*lena.shape) # clip noise so that it does not exceed allowed range for float images. lena = np.clip(lena, 0, 1) # denoise denoised_lena = filter.tv_denoise(lena, weight=60.0) # which dtype? assert denoised_lena.dtype in [np.float, np.float32, np.float64] from scipy import ndimage grad = ndimage.morphological_gradient(lena, size=((3, 3))) grad_denoised = ndimage.morphological_gradient( denoised_lena, size=((3, 3))) # test if the total variation has decreased assert np.sqrt( (grad_denoised ** 2).sum()) < np.sqrt((grad ** 2).sum()) / 2 denoised_lena_int = filter.tv_denoise(img_as_uint(lena), weight=60.0, keep_type=True) assert denoised_lena_int.dtype is np.dtype('uint16')
def __convert_type(array): if im_type in (np.int16, np.uint16): return img_as_uint(array) elif im_type in (np.int8, np.uint8): return img_as_ubyte(array) else: return array
def test_save_im_colour_16bit(): im = np.random.uniform(size=(256, 256, 3)) im = img_as_uint(im) fname = os.path.join('tests', 'test_data', 'tmp.png') pu.image.save_im(fname, im, bitdepth=16) im2 = io.imread(fname) assert im.all() == im2.all()
def imSave(img, filename, range='float'): f = open(filename,'wb') w = png.Writer(313, 443, greyscale=True, bitdepth=8) img_w = exposure.rescale_intensity(img,out_range=range) img_w = img_as_uint(img_w) w.write(f, img_w) f.close()
def save_predictions(predictions): # remove first dimension from the shape predictions[np.where(predictions < 0.1)] = 0 predictions = predictions[0] nec = predictions[0, ] ede = predictions[1, ] enh = predictions[2, ] arr_dict = {'necrotic': nec, 'edema': ede, 'enhancing': enh} # now we have 3 3D volumes. We save it as jpg files # save necrotic as JPG files for key, npa in arr_dict.items(): for i in range(0, predictions.shape[-1]): im = npa[:, :, i] im = exposure.rescale_intensity(im, out_range='float') im = img_as_uint(im) if key == 'necrotic': filepath = os.path.join(app.config['PREDICTION_PATH'], 'necrotic', "nec_{}.png".format(i)) elif key == 'edema': filepath = os.path.join(app.config['PREDICTION_PATH'], 'edema', "ede_{}.png".format(i)) elif key == 'enhancing': filepath = os.path.join(app.config['PREDICTION_PATH'], 'enhancing', "enh_{}.png".format(i)) io.imsave(filepath, im)
def test_save_buttons(): viewer = get_image_viewer() sv = SaveButtons() viewer.plugins[0] += sv import tempfile fid, filename = tempfile.mkstemp(suffix='.png') os.close(fid) timer = QtCore.QTimer() timer.singleShot(100, QtWidgets.QApplication.quit) # exercise the button clicks sv.save_stack.click() sv.save_file.click() # call the save functions directly sv.save_to_stack() with expected_warnings(['precision loss']): sv.save_to_file(filename) img = data.imread(filename) with expected_warnings(['precision loss']): assert_almost_equal(img, img_as_uint(viewer.image)) img = io.pop() assert_almost_equal(img, viewer.image) os.remove(filename)
def saveRegion(self): imageBaseName = os.path.basename( self.regionImage.fileName).split('.')[0] pathToSave = os.path.join(workDir, imageBaseName) for c, arr in self.imagesArrays.iteritems(): io.imsave(os.path.join(pathToSave, '_'.join(( self._type, str(self.timeStamp), c, imageBaseName+'.tif'))), img_as_uint(arr)) if self.contour is not None: np.savetxt(os.path.join(pathToSave, '_'.join((self._type, str(self.timeStamp), 'contour.dat'))), self.contour) np.savetxt(os.path.join(pathToSave, '_'.join((self._type, str(self.timeStamp), 'mask.dat'))), self.mask) # pickle.dump(self.metaData, # open(os.path.join(pathToSave, 'metaData.dat'), 'wb')) writer = csv.writer(open(os.path.join(pathToSave, '_'.join((self._type, str(self.timeStamp), 'metaData.dat'))), 'wb'), delimiter=':') self.sortedmetaData = collections.OrderedDict(sorted( self.metaData.items())) for k, v in self.sortedmetaData.iteritems(): writer.writerow([k, v])
def guess_corners(bw): """ Infer the corners of an image using a Sobel filter to find the edges and a Harris filter to find the corners. Takes only as single color chanel. Parameters ---------- bw : (m x n) ndarray of ints Returns ------- corners : pixel coordinates of plot corners, unsorted outline : (m x n) ndarray of bools True -> plot area """ assert len(bw.shape) == 2 bw = img_as_uint(bw) e_map = ndimage.sobel(bw) markers = np.zeros(bw.shape, dtype=int) markers[bw < 30] = 1 markers[bw > 150] = 2 seg = ndimage.watershed_ift(e_map, np.asarray(markers, dtype=int)) outline = ndimage.binary_fill_holes(1 - seg) corners = harris(np.asarray(outline, dtype=int)) corners = approximate_polygon(corners, 1) return corners, outline
def render_model_exemplars(pbar, pair: ExemplarShapePair, render_shape=config.SHAPE_REND_SHAPE): camera = cameras.spherical_coord_to_cam(pair.fov, pair.azimuth, pair.elevation) pbar.set_description(f'[{pair.id}] Loading shape') mesh, materials = pair.shape.load() camera.near, camera.far = compute_tight_clipping_planes( mesh, camera.view_mat()) segment_im = render_segments(mesh, camera) fg_bbox = mask_bbox(segment_im > -1) pbar.set_description('Rendering preview') phong_im = np.clip( render_preview(mesh, camera, config.SHAPE_REND_RADMAP_PATH, gamma=2.2, ssaa=2), 0, 1) phong_im = crop_tight_fg(phong_im, render_shape, bbox=fg_bbox) vis.image(phong_im.transpose((2, 0, 1)), win='shape-preview') pbar.set_description(f'[{pair.id}] Saving data') with warnings.catch_warnings(): warnings.simplefilter('ignore') pair.save_data(config.SHAPE_REND_PREVIEW_NAME, skimage.img_as_uint(phong_im))
def _write_movie_frame(frame, base_path, image_filename): """ Takes a movie frame, adds the frame number to the bottom right of the image, and saves it to disk. """ # We have to set up a Matplotlib plot so we can add text. # scikit-image unfortunately has no text annotation methods. # fig, ax = plt.subplots() # ax.imshow(frame, cmap=cm.gray) # # # Remove whitespace and ticks from the image, since we're making a movie, not a plot. # # Matplotlib is expecting us to make a plot though, so removing all evidence of this is tricky. # plt.gca().set_axis_off() # plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) # plt.margins(0, 0) # plt.gca().xaxis.set_major_locator(plt.NullLocator()) # plt.gca().yaxis.set_major_locator(plt.NullLocator()) # Add the frame number to the bottom right of the image in red text # ax.annotate(str(frame_number + 1), xy=(1, 1), xytext=(frame.shape[1] - 9, frame.shape[0] - 5), color='r') log.info("Creating %s" % base_path + "/" + image_filename) # Write the frame to disk # fig.savefig(base_path + "/" + image_filename, bbox_inches='tight', pad_inches=0) # Closing the plot is required or memory goes nuts # plt.close() img = img_as_uint(frame) skimage.io.imsave(base_path + "/" + image_filename, img, plugin='freeimage')
def main(): args = vars(parser.parse_args()) filename = os.path.join(os.getcwd(), args["image"][0]) image = skimage.img_as_uint(color.rgb2gray(io.imread(filename))) subsample = 1 if (not args["subsample"] == 1): subsample = args["subsample"][0] image = transform.downscale_local_mean(image, (subsample, subsample)) image = transform.pyramid_expand(image, subsample, 0, 0) image = exposure.rescale_intensity(image, out_range=(0,args["depth"][0])) if (args["visualize"]): io.imshow(image) io.show() source = generate_face(image, subsample, args["depth"][0], FLICKER_SPEED) if source: with open(args["output"][0], 'w') as file_: file_.write(source) else: print "Attempted to generate source code, failed."
def average(files, self): start = timer() dir = os.listdir(files) first = None stacked = None i = 0 total_files = len(dir) for file in dir: i += 1 file = files + "/" + file image = cv2.imread(file, -1) image = img_as_float(image) print("(" + str(i) + "/" + str(total_files) + ") Stacking " + file + "...") if first is None: first = image stacked = image else: stacked += image with warnings.catch_warnings(): warnings.simplefilter("ignore") stacked = img_as_uint(stacked / len(dir)) stop = timer() total = str(float("%0.3f" % float(stop - start))) print("Stacking took " + total + "s") return stacked
def save(self, fname, shape=None, fps=None): """Save the percept as an MP4 or GIF Parameters ---------- fname : str The filename to be created, with the file extension indicating the file type. Percepts with time=None can be saved as images (e.g., '.jpg', '.png', '.gif'). Multi-frame percepts can be saved as movies (e.g., '.mp4', '.avi', '.mov') or '.gif'. shape : (height, width) or None, optional The desired width x height of the resulting image/video. Use (h, None) to use a specified height and automatically infer the width from the percept's aspect ratio. Analogously, use (None, w) to use a specified width. If shape is None, width will be set to 320px and height will be inferred accordingly. fps : float or None If None, uses the percept's time axis. Not supported for non-homogeneous time axis. Notes ----- * ``shape`` will be adjusted so that width and height are multiples of 16 to ensure compatibility with most codecs and players. """ data = self.data - self.data.min() if not isclose(np.max(data), 0): data = data / np.max(data) data = img_as_uint(data) if shape is None: # Use 320px width and infer height from aspect ratio: shape = (None, 320) height, width = shape if height is None and width is None: raise ValueError('If shape is a tuple, must specify either height ' 'or width or both.') # Infer height or width if necessary: if height is None and width is not None: height = width / self.data.shape[1] * self.data.shape[0] elif height is not None and width is None: width = height / self.data.shape[0] * self.data.shape[1] # Rescale percept to desired shape: data = resize(data, (np.int32(height), np.int32(width))) if self.time is None: # No time component, store as an image. imwrite will automatically # scale the gray levels: imageio.imwrite(fname, data) else: # With time component, store as a movie: if fps is None: interval = unique(np.diff(self.time)) if len(interval) > 1: raise NotImplementedError fps = 1000.0 / interval[0] imageio.mimwrite(fname, data.transpose((2, 0, 1)), fps=fps) logging.getLogger(__name__).info('Created %s.' % fname)
def _apply(func8, func16, image, selem, out, mask, shift_x, shift_y): selem = img_as_ubyte(selem > 0) image = np.ascontiguousarray(image) if mask is None: mask = np.ones(image.shape, dtype=np.uint8) else: mask = np.ascontiguousarray(mask) mask = img_as_ubyte(mask) if image is out: raise NotImplementedError("Cannot perform rank operation in place.") is_8bit = image.dtype in (np.uint8, np.int8) if func8 is not None and (is_8bit or func16 is None): out = _apply8(func8, image, selem, out, mask, shift_x, shift_y) else: image = img_as_uint(image) if out is None: out = np.zeros(image.shape, dtype=np.uint16) bitdepth = find_bitdepth(image) if bitdepth > 11: image = image >> 4 bitdepth = find_bitdepth(image) func16(image, selem, shift_x=shift_x, shift_y=shift_y, mask=mask, bitdepth=bitdepth + 1, out=out) return out
def color_check(plugin, fmt='png'): """Check roundtrip behavior for color images. All major input types should be handled as ubytes and read back correctly. """ img = img_as_ubyte(data.chelsea()) r1 = roundtrip(img, plugin, fmt) testing.assert_allclose(img, r1) img2 = img > 128 r2 = roundtrip(img2, plugin, fmt) testing.assert_allclose(img2.astype(np.uint8), r2) img3 = img_as_float(img) r3 = roundtrip(img3, plugin, fmt) testing.assert_allclose(r3, img) img4 = img_as_int(img) if fmt.lower() in (('tif', 'tiff')): img4 -= 100 r4 = roundtrip(img4, plugin, fmt) testing.assert_allclose(r4, img4) else: r4 = roundtrip(img4, plugin, fmt) testing.assert_allclose(r4, img_as_ubyte(img4)) img5 = img_as_uint(img) r5 = roundtrip(img5, plugin, fmt) testing.assert_allclose(r5, img)
def unmix_original_images(rois, images, alphas, names): for roi, image, alpha, name in zip(rois, images, alphas, names): # clean artifacts in image max_s = np.max(roi) image[image > max_s] = 0 # unmix whole brain image if np.sum(alpha) > 0.0: corrected_img = image - np.sum( [a * img for a, img in zip(alpha, images) if a != 0.0], axis=0) corrected_img[corrected_img < 0] = 0 else: corrected_img = image # extend shrank histogram corrected_img = rescale_histogram(corrected_img) # convert to uint16 with warnings.catch_warnings(): warnings.simplefilter("ignore") corrected_img = img_as_uint(corrected_img) # save image save_name = os.path.join(args.save_dir, name) tifffile.imsave(save_name, corrected_img, bigtiff=True)
def _read(path): img = io.imread(path) if img.dtype == ">u2": # big endian -> little endian img = img.astype(np.uint16) if img.ndim == 3 and (img.dtype == np.float32 or img.dtype == np.float64): img = color.rgb2gray(img) elif img.ndim == 3: img = img_as_uint(color.rgb2gray(img)) if force_dtype == DType.FLOAT: img = img.astype(DTYPE_FLOAT) elif force_dtype == DType.UNSIGNED_INT: img = img.astype(DTYPE_UNSIGNED_INT) elif force_dtype == DType.INT: img = img.astype(DTYPE_INT) if (img.dtype == np.float32 or img.dtype == np.float64) and (img.min() < 0.0 or img.max() > 1.0): raise RuntimeWarning( 'Image "{}" is of type float but not scaled between 0 and 1. This might cause trouble later. You might want to force conversion to another datatype using force_dtype=DType.UNSIGNED_INT (for example).' .format(path)) return img
def borders(img, filename): """ create borders on an image :param img: image :param filename: filepath :return: image with borders """ new_color = 0 # color of border image_border = 10 # size of border xrange, yrange = img.shape # get range of image for x in range(0, xrange): # iterate through image at the border for y in range(0, image_border): img[x, y] = new_color # set color in y-direction on top for y in range(yrange - image_border, yrange): img[x, y] = new_color # set color in y-direction at bottom for y in range(0, yrange): # iterate through image for x in range(0, image_border): img[x, y] = new_color # set color in x-direction on top for x in range(xrange - image_border, xrange): img[x, y] = new_color # set color in y-direction at bottom # sve image imsave(filename + "borders.png", img_as_uint(img)) return img
def main(): """ main method that is called to extract crops """ # set paths & create the file lists file_list_main, labels_list_main = get_labels_and_data() # go through all files if len(file_list_main) == len(labels_list_main): # if the lists have the same length for index in range(len(labels_list_main)): # go through list label = labels_list_main[index] # get label binary_file = to_binary(file_list_main[index]) # get binary coloured image file border_file = borders(binary_file, file_list_main[index]) # apply borders to image contours = get_contours(border_file, file_list_main[index]) # get contours of letters rgb_border_file = img_as_float(gray2rgb(border_file)) # get grey file # index for counting the crops (naming the files) index_crops = 0 # array that contains cropped images cropped_images = [] for cnt in contours: # for every contours x, y, w, h = cv2.boundingRect(cnt) # get rectangle draw_red_rectangle(x, y, w, h, rgb_border_file, 1) # draw rectangle crop = get_cropped_image(cnt, Image.open(file_list_main[index]), label, index_crops, index) # get crop cropped_images.append(crop) # append to list index_crops += 1 imsave(file_list_main[index] + "contours.png", img_as_uint(rgb_border_file))
def high_pass(image: np.ndarray, sigma: Union[Number, Tuple[Number]]) -> np.ndarray: """ Applies a gaussian high pass filter to an image Parameters ---------- image : numpy.ndarray[np.uint32] 2-d or 3-d image data sigma : Union[Number, Tuple[Number]] Standard deviation of gaussian kernel Returns ------- np.ndarray : Standard deviation of the Gaussian kernel that will be applied. If a float, an isotropic kernel will be assumed, otherwise the dimensions of the kernel give (z, y, x) """ if image.dtype != np.uint16: DataFormatWarning( 'gaussian filters currently only support uint16 images. Image data will be converted.' ) image = img_as_uint(image) blurred: np.ndarray = GaussianLowPass.low_pass(image, sigma) over_flow_ind: np.ndarray[bool] = image < blurred filtered: np.ndarray = image - blurred filtered[over_flow_ind] = 0 return filtered
def test_save_buttons(): viewer = get_image_viewer() sv = SaveButtons() viewer.plugins[0] += sv import tempfile fid, filename = tempfile.mkstemp(suffix='.png') os.close(fid) timer = QtCore.QTimer() timer.singleShot(100, QtGui.QApplication.quit) # exercise the button clicks sv.save_stack.click() sv.save_file.click() # call the save functions directly sv.save_to_stack() with expected_warnings(['precision loss']): sv.save_to_file(filename) img = data.imread(filename) with expected_warnings(['precision loss']): assert_almost_equal(img, img_as_uint(viewer.image)) img = io.pop() assert_almost_equal(img, viewer.image) os.remove(filename)
def showFile(file): print("Show file function!") with warnings.catch_warnings(): warnings.simplefilter("ignore") out = img_as_uint(file) if args.file: outname = outfileName(args.file) else: outname = "hello.png" #You need to save the image you want to see with warnings.catch_warnings(): warnings.simplefilter("ignore") io.imsave(str(outname), out) name = "showFile" #+ str(number) img = cv2.imread(str(outname), 0) imS = cv2.resize(img, (700, 760)) cv2.namedWindow(name, cv2.WINDOW_NORMAL) cv2.moveWindow(name, 05, 20) cv2.imshow(name, imS) cv2.waitKey(0) #closes after a key press cv2.destroyAllWindows()
def equalize_adapthist(image, ntiles_x=8, ntiles_y=8, clip_limit=0.01, nbins=256): args = [None, ntiles_x, ntiles_y, clip_limit * nbins, nbins] image = skimage.img_as_uint(image) args[0] = rescale_intensity(image, out_range=(0, NR_OF_GREY - 1)) out = _clahe(*args) image[:out.shape[0], :out.shape[1]] = out image = rescale_intensity(image) return image
def getImageContours(self, gray): ret, thresh = cv2.threshold(gray, 220, 255, cv2.THRESH_TOZERO) fill = ndimage.binary_fill_holes(thresh) clean = morphology.remove_small_objects(fill, 12) clean = img_as_uint(clean).astype(np.dtype('uint8')) contour_image = clean.copy() contours, hierarchy = cv2.findContours(contour_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS) return contours
def equalize_adapthist(image, ntiles_x=8, ntiles_y=8, clip_limit=0.01, nbins=256): """Contrast Limited Adaptive Histogram Equalization (CLAHE). An algorithm for local contrast enhancement, that uses histograms computed over different tile regions of the image. Local details can therefore be enhanced even in regions that are darker or lighter than most of the image. Parameters ---------- image : array-like Input image. ntiles_x : int, optional Number of tile regions in the X direction. Ranges between 1 and 16. ntiles_y : int, optional Number of tile regions in the Y direction. Ranges between 1 and 16. clip_limit : float: optional Clipping limit, normalized between 0 and 1 (higher values give more contrast). nbins : int, optional Number of gray bins for histogram ("dynamic range"). Returns ------- out : ndarray Equalized image. See Also -------- equalize_hist, rescale_intensity Notes ----- * For color images, the following steps are performed: - The image is converted to HSV color space - The CLAHE algorithm is run on the V (Value) channel - The image is converted back to RGB space and returned * For RGBA images, the original alpha channel is removed. * The CLAHE algorithm relies on image blocks of equal size. This may result in extra border pixels that would not be handled. In that case, we pad the image with a repeat of the border pixels, apply the algorithm, and then trim the image to original size. References ---------- .. [1] http://tog.acm.org/resources/GraphicsGems/gems.html#gemsvi .. [2] https://en.wikipedia.org/wiki/CLAHE#CLAHE """ image = skimage.img_as_uint(image) image = rescale_intensity(image, out_range=(0, NR_OF_GREY - 1)) out = _clahe(image, ntiles_x, ntiles_y, clip_limit * nbins, nbins) image[:out.shape[0], :out.shape[1]] = out image = skimage.img_as_float(image) return rescale_intensity(image)
def temp_filter_method_adaptive_thresholding(imageFile): img = data.imread(imageFile, as_grey=True) global_thresh = threshold_yen(img) # True False binary matrix represent color value of the img using global thresholding binary_global = img > global_thresh block_size = 40 # True False binary matrix represent color value of the img using adaptive thresholding binary_adaptive = threshold_adaptive(img, block_size, offset=0) # 0 1 binary matrix img_bin_global = clear_border(img_as_uint(binary_global)) # 0 1 binary matrix img_bin_adaptive = clear_border(img_as_uint(binary_adaptive)) bin_pos_mat = ocr.binary_matrix_to_position(binary_adaptive) np.savetxt("test.txt",bin_pos_mat) # %.5f specifies 5 decimal round
def test_adapthist_color(): '''Test an RGB color uint16 image ''' img = skimage.img_as_uint(data.lena()) adapted = exposure.equalize_adapthist(img, clip_limit=0.01) assert_almost_equal = np.testing.assert_almost_equal assert adapted.min() == 0 assert adapted.max() == 1.0 assert img.shape == adapted.shape full_scale = skimage.exposure.rescale_intensity(img) assert peak_snr(img, adapted) > 95.0 assert norm_brightness_err(img, adapted) < 0.05 return data, adapted
def test_adapthist_scalar(): '''Test a scalar uint8 image ''' img = skimage.img_as_ubyte(data.moon()) adapted = exposure.equalize_adapthist(img, clip_limit=0.02) assert adapted.min() == 0 assert adapted.max() == (1 << 16) - 1 assert img.shape == adapted.shape full_scale = skimage.exposure.rescale_intensity(skimage.img_as_uint(img)) assert_almost_equal = np.testing.assert_almost_equal assert_almost_equal(peak_snr(full_scale, adapted), 101.231, 3) assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.041, 3) return img, adapted
def scale_image_brightness(image, scale): ''' scale image brightness by a factor ''' adjusted_image = scale * img_as_float(image) # handle overflow: adjusted_image[adjusted_image >= 1.0] = 1.0 # catch warning for loosing some accuracy by converting back to int types with warnings.catch_warnings(): warnings.simplefilter('ignore', category=UserWarning) if image.dtype == np.dtype('uint8'): adjusted_image = img_as_ubyte(adjusted_image) elif image.dtype == np.dtype('uint16'): adjusted_image = img_as_uint(adjusted_image) return adjusted_image
def createContrastUsingSK(self): self._printTitle("createContrast with scikit image") # get list of ds images import glob import os dsImageList = glob.glob(self.dirs["regraw"] + "/*-DSx4.jpg") dsImageList.sort() from scipy import ndimage import scipy.misc import numpy as np import skimage # from skimage import color, filter, exposure, transform for file_to_use in dsImageList: print file_to_use outputname = os.path.join(self.dirs["regcontrast"], os.path.basename(file_to_use)).replace( ".jpg", "-csk.jpg" ) if not os.path.exists(outputname): image = ndimage.imread(file_to_use) image_gray = skimage.img_as_uint(skimage.color.rgb2gray(image)) img_eq = skimage.exposure.equalize_hist(image_gray) elevation = skimage.filter.sobel(img_eq) elevation = ndimage.gaussian_filter(elevation, 5) img_to_write = np.zeros((3000, 3000)) y_offset = round((img_to_write.shape[0] - elevation.shape[0]) / 2) x_offset = round((img_to_write.shape[1] - elevation.shape[1]) / 2) img_to_write[ y_offset : elevation.shape[0] + y_offset, x_offset : elevation.shape[1] + x_offset ] = elevation outputname = os.path.join(self.dirs["regcontrast"], os.path.basename(file_to_use)).replace( ".jpg", "-csk.jpg" ) img_to_write = skimage.transform.pyramids.pyramid_reduce(img_to_write) scipy.misc.imsave(outputname, img_to_write)
def test_compare_uint_vs_float(): # filters applied on 8-bit image ore 16-bit image (having only real 8-bit of # dynamic) should be identical # Create signed int8 image that and convert it to uint8 image_uint = img_as_uint(data.camera()) image_float = img_as_float(image_uint) methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'threshold', 'meansubtraction', 'morph_contr_enh', 'pop', 'tophat'] for method in methods: func = getattr(rank, method) out_u = func(image_uint, disk(3)) out_f = func(image_float, disk(3)) assert_array_equal(out_u, out_f)
def getdata(self, dtype=np.uint8, copy=True): a = self.array if a.dtype == dtype: if copy: # to be coherent a = np.copy(self.array) elif dtype == np.float: a = skimage.img_as_float(a, copy) elif dtype == np.int16: a = skimage.img_as_int(a, copy) elif dtype == np.uint16: a = skimage.img_as_uint(a, copy) elif dtype == np.uint8: a = skimage.img_as_ubyte(a, copy) else: pass # keep the wrong type for now and see what happens return a
def test_adapthist_color(): """Test an RGB color uint16 image """ img = skimage.img_as_uint(data.lena()) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") hist, bin_centers = exposure.histogram(img) assert len(w) > 0 adapted = exposure.equalize_adapthist(img, clip_limit=0.01) assert_almost_equal = np.testing.assert_almost_equal assert adapted.min() == 0 assert adapted.max() == 1.0 assert img.shape == adapted.shape full_scale = skimage.exposure.rescale_intensity(img) assert_almost_equal(peak_snr(full_scale, adapted), 102.940, 3) assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.0110, 3) return data, adapted
def test_adapthist_color(): """Test an RGB color uint16 image """ img = skimage.img_as_uint(data.astronaut()) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') hist, bin_centers = exposure.histogram(img) assert len(w) > 0 with expected_warnings(['precision loss']): adapted = exposure.equalize_adapthist(img, clip_limit=0.01) assert adapted.min() == 0 assert adapted.max() == 1.0 assert img.shape == adapted.shape full_scale = skimage.exposure.rescale_intensity(img) assert_almost_equal(peak_snr(full_scale, adapted), 109.393, 1) assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.02, 2) return data, adapted
def calc_regions(input_image, split_num, max_eigen_diff, max_merged_num, path): regions = init_regions(img_as_uint(rgb2hsv(image)), split_num) new_set_len = 0 old_set_len = len(regions) while new_set_len < old_set_len: old_set_len = len(regions) regions = iterate_regions(regions, max_eigen_diff, max_merged_num) new_set_len = len(regions) _logging_regions(regions) output_image = input_image for index, region in enumerate(regions): edge = make_region_edge(region) output_image = edge.draw(output_image, [255, 255, 255]) io.imsave(os.path.join(path, str(split_num) + '-' + "{:10.2f}".format(max_eigen_diff) + '-' + str(max_merged_num) + '-.jpg'), output_image)
def rescale16bit(imgIn, verbose=False): # if imgIn.max() < 2**16: # return imgIn.astype(np.uint16) # print("rescale16bit: WARNING imgIn has values greater than 2^16, rescaling (max: ",imgIn.max(),")") # return exposure.rescale_intensity(imgIn,in_range='uint16') #.astype(np.uint16) # imgIn /= imgIn.max() if imgIn.min()<0: imgIn += abs(imgIn.min()) imgOut = exposure.rescale_intensity(imgIn, in_range='uint16', out_range='uint16') if imgOut.min()<0: print("rescale16bit: WARNING imgOut has negative value") # imgOut *= 2**16 imgOut = imgOut.astype(np.uint16) out = img_as_uint(imgOut) if verbose: print("rescale16bit") print("type(image) ",type(out)) print("type(image[0][0]) ",type(out[0][0])) return out
def test_save_buttons(): viewer = get_image_viewer() sv = SaveButtons() viewer.plugins[0] += sv import tempfile fid, filename = tempfile.mkstemp(suffix='.png') os.close(fid) timer = QtCore.QTimer() timer.singleShot(100, QtGui.QApplication.quit) sv.save_to_stack() sv.save_to_file(filename) img = data.imread(filename) assert_almost_equal(img, img_as_uint(viewer.image)) img = io.pop() assert_almost_equal(img, viewer.image) os.remove(filename)