def test_adapthist_grayscale(): """Test a grayscale float image """ img = skimage.img_as_float(data.astronaut()) img = rgb2gray(img) img = np.dstack((img, img, img)) with expected_warnings(['precision loss|non-contiguous input', 'deprecated']): adapted_old = exposure.equalize_adapthist(img, 10, 9, clip_limit=0.01, nbins=128) adapted = exposure.equalize_adapthist(img, kernel_size=(57, 51), clip_limit=0.01, nbins=128) assert img.shape == adapted.shape assert_almost_equal(peak_snr(img, adapted), 102.078, 3) assert_almost_equal(norm_brightness_err(img, adapted), 0.0529, 3) return data, adapted
def extract_patches(path, numPatchesPerImage, patchSize): """ :param path: path to a RGB fundus image :param numPatchesPerImage: number of patches to extract per image :param patchSize: patch is nxn size :return: patches: matrix with an image patch in each row """ img = load(path) img = img[:,:,1] #contrast enhancemenet img = equalize_adapthist(img) windows = view_as_windows(img, (patchSize,patchSize)) j = 0 patches = np.zeros((numPatchesPerImage, patchSize*patchSize)) while(j < numPatchesPerImage): sx = np.random.randint(0, windows.shape[0] - 1) sy = np.random.randint(0, windows.shape[0] - 1) x = (patchSize/2 - 1) + sx y = (patchSize/2 - 1) + sy r = (img.shape[0]/2) - 1 if np.sqrt((x - r) ** 2 + (y - r) **2 ) < r: patch = windows[sx, sy, :].flatten() patches[j,:] = patch j += 1 else: if j > 0: j -= 1 return patches
def get_data(mypath): t0 = time.time() print mypath n = 100000 data = [] paths = [] for i in range(n): if i % 100 == 0: if i > 0: elapsed = time.time() - t0 left = n-i rate = i/elapsed ETA = left/rate print "ETA: {0}min".format(int(ETA/60)) path = join(mypath, str(i))+'.fits' if not os.path.exists(path): continue #this line reads the data img = pyfits.getdata(path,0,memmap=False) #this line take the absolute value (negative noise) img_adapteq = np.abs(img) #this is the preprocessing algorithm, comment this out to remove the preprocessing of the image completely img_adapteq = exposure.equalize_adapthist(np.log(img_adapteq + 1.0), clip_limit=0.5,kernel_size=(4,4)) #saving the paths is useful to restore which array belonged to which image on the harddrive paths.append(path) #add data to list data.append(img_adapteq)
def set_roi_images(self): for i,rois_ in enumerate(self.roi_sets): temp_im = rois_[0]['patches'][self.roi_idx][self._show_im[i]] temp_im /= np.max(temp_im) im_to_set = exposure.equalize_adapthist(temp_im, clip_limit=.005) #im_to_set *= (im_to_set+sobel(im_to_set)) #im_to_set = temp_im self.imgs[i].setImage(im_to_set,autolevels=1) #if 'centroid_patches' in rois_[0].keys(): # self.centroid_patches[i].setImage(rois_[0]['centroid_patches'][self.roi_idx],autolevels=1) m_ = rois_[0]['masks'][self.roi_idx] #print np.mean(np.array(np.where(m_)),axis=1) #print m_.shape==(100,100) #print np.all([iii.shape==(100,100) for iii in rois_[0]['masks']]), i m2_ = np.dstack([m_,np.zeros(m_.shape),np.zeros(m_.shape),m_]) self.masks[i].setImage(m2_) self.masks[i].setOpacity(.2) if rois_[0]['isPresent'][self.roi_idx]==0: fr = self.redframe else: fr = self.greenframe self.frames[i].setImage(fr) if rois_[0]['drawn_onday'][self.roi_idx]: self.drawnTexts[i].setText('Drawn On Day',color=[0,0,250]) else: self.drawnTexts[i].setText('Copied',color=[250,0,0]) self.confTxts[i].setText(self.confidence_labels[(rois_[0]['confidence'][self.roi_idx])])
def pre_process(y_dict, train_directories, images, output_shape, adaptive_histogram, jobid, arraysize, clip_limit=0.03): # Store preprocessed images X = [] y = [] for train_directory in train_directories: # Get valid training images filenames = [] for filename in os.listdir(train_directory): if filename.endswith(".jpeg") and filename.split('.')[0] in images: filenames.append(filename) start = len(filenames)/arraysize*jobid end = len(filenames)/arraysize*(jobid+1) if jobid+1 == arraysize: end = len(filenames) # preprocess each image for filename in filenames[start:end]: im = io.imread(train_directory + "/" + filename) im = rgb2gray(im) im = resize(im, output_shape) if adaptive_histogram: im = exposure.equalize_adapthist(im, clip_limit=clip_limit) X.append(im.flatten()) y.append(y_dict[filename.split(".jpeg")[0]]) return X, y
def find_blobs(filename): feature = "" raw_image = io.imread(filename) for channel in range(0, 4): if channel < 3: image = raw_image[:,:,channel] image_gray = rgb2gray(image) # Smoothing image_gray = img_as_ubyte(image_gray) image_gray = mean_bilateral(image_gray.astype(numpy.uint16), disk(20), s0=10, s1=10) # Increase contrast image_gray = exposure.equalize_adapthist(image_gray, clip_limit=0.03) # Find blobs blobs_doh = blob_doh(image_gray, min_sigma=1, max_sigma=20, threshold=.005) count = 0 for blob in blobs_doh: y, x, r = blob if (x-400)**2 + (y-400)**2 > distance: continue count = count + 1 feature = feature + " " + str(channel + 1) + ":" + str(count) return feature
def imagefile2dat(imageFilename, rotate = False, overwrite = False): """Load an image file and save in format to be read by C code""" global m global n global fringeDatFilename global wrappedDatFilename # read image file orig = io.imread(imageFilename, as_grey=True) img = exposure.equalize_adapthist(orig) img = exposure.rescale_intensity(img,out_range=(0, 255)) if rotate: img = np.transpose(img) n = len(img) m = len(img[0]) fileroot, ext = os.path.splitext(imageFilename) fringeDatFilename = fileroot+'.dat' wrappedDatFilename = fileroot+'W.dat' if os.path.isfile(fringeDatFilename) == False or overwrite == True: print 'Writing '+fringeDatFilename # write in proper binary format data = np.reshape( np.transpose(img), (n*m,1)) newFile = open (fringeDatFilename, "wb") newFile.write(pack(str(n*m)+'B', *data)) newFile.close() else: print 'Skipped overwriting '+fringeDatFilename return img
def enhance(in_file, clip_limit=0.010, in_mask=None, out_file=None): import numpy as np import nibabel as nb import os.path as op from skimage import exposure, img_as_int if out_file is None: fname, fext = op.splitext(op.basename(in_file)) if fext == '.gz': fname, _ = op.splitext(fname) out_file = op.abspath('./%s_enh.nii.gz' % fname) im = nb.load(in_file) imdata = im.get_data() imshape = im.get_shape() if in_mask is not None: msk = nb.load(in_mask).get_data() msk[msk > 0] = 1 msk[msk < 1] = 0 imdata = imdata * msk immin = imdata.min() imdata = (imdata - immin).astype(np.uint16) adapted = exposure.equalize_adapthist(imdata.reshape(imshape[0], -1), clip_limit=clip_limit) nb.Nifti1Image(adapted.reshape(imshape), im.get_affine(), im.get_header()).to_filename(out_file) return out_file
def Image_ws_tranche(image): laser = Detect_laser(image) laser_tranche = tranche_image(laser,60) image_g = skimage.color.rgb2gray(image) image_g = image_g * laser_tranche image_med = rank2.median((image_g*255).astype('uint8'),disk(8)) image_clahe = exposure.equalize_adapthist(image_med, clip_limit=0.03) image_clahe_stretch = exposure.rescale_intensity(image_clahe, out_range=(0, 256)) image_grad = rank2.gradient(image_clahe_stretch,disk(3)) image_grad_mark = image_grad<20 image_grad_forws = rank2.gradient(image_clahe_stretch,disk(1)) image_grad_mark_closed = closing(image_grad_mark,disk(1)) Labelised = (skimage.measure.label(image_grad_mark_closed,8,0))+1 Watersheded = watershed(image_grad_forws,Labelised) cooc = coocurence_liste(Watersheded,laser,3) x,y = compte_occurences(cooc) return x,y
def equalize_hist_adapt(img=None, window_shape=(10, 10), nbins=256): ''' Contrast Limited Adaptive Histogram Equalization (CLAHE). Increases local contrast. Parameters ---------- img : array_like Single image as numpy array or multiple images as array-like object window_shape : tuple of integers Specifies the shape of the window as follows (dx, dy) nbins : integer Number of bins to calculate histogram References ---------- .. [1] http://scikit-image.org/docs/dev/auto_examples/color_exposure/plot_local_equalize.html # noqa .. [2] https://en.wikipedia.org/wiki/Histogram_equalization ''' minimum = img.min() maximum = img.max() img = rescale_intensity(img, 0, 1) img = exposure.equalize_adapthist(img, kernel_size=window_shape, nbins=nbins) img_out = rescale_intensity(img, minimum, maximum) return img_out
def returnProcessedImage(que,folder,img_flist): X = [] for fname in img_flist: cur_img = imread(folder+'/'+fname , as_grey=True) cur_img = 1 - cur_img ######## randomly add samples # random add contrast r_for_eq = random() cur_img = equalize_adapthist(cur_img,ntiles_x=8,ntiles_y=8,clip_limit=(r_for_eq+0.5)/3) #random morphological operation r_for_mf_1 = random() if 0.05 < r_for_mf_1 < 0.25: # small vessel selem1 = disk(0.5+r_for_mf_1) cur_img = dilation(cur_img,selem1) cur_img = erosion(cur_img,selem1) elif 0.25 < r_for_mf_1 < 0.5: # large vessel selem2 = disk(2.5+r_for_mf_1*3) cur_img = dilation(cur_img,selem2) cur_img = erosion(cur_img,selem2) elif 0.5 < r_for_mf_1 < 0.75: # exudate selem1 = disk(9.21) selem2 = disk(7.21) dilated1 = dilation(cur_img, selem1) dilated2 = dilation(cur_img, selem2) cur_img = np.subtract(dilated1, dilated2) cur_img = img_as_float(cur_img) X.append([cur_img.tolist()]) # X = np.array(X , dtype = theano.config.floatX) que.put(X) return X
def main(image): matplotlib.rcParams["font.size"] = 10 def show_img(img, axes): """Plot the image as float""" # img = img_as_float(img) ax_img = axes ax_img.imshow(img, cmap=plt.cm.gray) ax_img.set_axis_off() return ax_img # Open and read in the fits image try: fits = pyfits.open(image) # fits = Image.open(image) except IOError: print "Can not read the fits image: " + image + " !!" # Check the input image img = fits[0].data # img = np.array(fits) if img.ndim != 2: raise NameError("Data need to be 2-D image !") # Logrithm scaling of the image img_log = np.log10(img) img_log = img_as_float(img_log) # Contrast streching p5, p95 = np.percentile(img, (2, 98)) img_rescale = exposure.rescale_intensity(img, in_range=(p5, p95)) # Adaptive equalization img_new = bytescale(img_rescale) img_ahe = exposure.equalize_adapthist(img_new, ntiles_x=16, ntiles_y=16, clip_limit=0.05, nbins=256) img_ahe = img_as_float(img_ahe) # Display results fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 5)) # Original image ax_img = show_img(img_log, axes[0]) ax_img.set_title("Original") # Contrast Enhanced one ax_img = show_img(img_rescale, axes[1]) ax_img.set_title("Rescale") # AHE Enhanced one ax_img = show_img(img_ahe, axes[2]) ax_img.set_title("AHE") # Prevent overlap of y-axis fig.subplots_adjust(bottom=0.1, right=0.9, top=0.9, left=0.1, wspace=0.05) # Save a PNG file plt.gcf().savefig("ahe_test.png")
def equalize_adaptive(image, n_tiles=8, clip_limit=0.01): eqproj = equalize_adapthist(image, ntiles_x=n_tiles, ntiles_y=n_tiles, clip_limit=clip_limit) return eqproj
def analyse(self, **kwargs): image_object = kwargs['image'] if image_object is None: raise RuntimeError() # Read the image image = cv2.imread(self.image_utils.getOutputFilename(image_object.id)) if image is None: print('File not found') return # Work on green channel gray = image[:, :, 1] # Apply otsu thresholding thresh = filters.threshold_otsu(gray) gray[gray < thresh] = 0 # Apply histogram equalization gray = exposure.equalize_adapthist(gray) * 255 # Create elevation map elevation_map = filters.sobel(gray) gray = gray.astype(int) # Create cell markers markers = numpy.zeros_like(gray) markers[gray < 100] = 2 # seen as white in plot markers[gray > 150] = 1 # seen as black in plot # Segment with watershed using elevation map segmentation = morphology.watershed(elevation_map, markers) segmentation = ndi.binary_fill_holes(segmentation - 1) # labeled_image, n = ndi.label(segmentation) # Watershed with distance transform kernel = numpy.ones((5, 5), numpy.uint8) distance = ndi.distance_transform_edt(segmentation) distance2 = cv2.erode(distance, kernel) distance2 = cv2.dilate(distance2, kernel) local_max = peak_local_max(distance2, num_peaks=1, indices=False, labels=segmentation) markers2 = ndi.label(local_max)[0] labels = morphology.watershed(-distance2, markers2, mask=segmentation) # Extract regions (caching signifies more memory use) regions = regionprops(labels, cache=True) # Filter out big wrong regions regions = [region for region in regions if region.area < 2000] # Set result result = str(len(regions)) return result
def segment(self, image): img = equalize_adapthist(image) (x,y,c) = img.shape points = img.reshape(x * y, c) labels = self.logreg.predict(points) labeled_img = labels.reshape(x, y) (top, right, bottom, left) = self.get_bounding_rect(labeled_img) # return img[left-self.margin:right+self.margin,top-self.margin:bottom+self.margin] return img[left:right,top:bottom]
def equalize_stack(stack): stack_array = np.dstack([equalize_adapthist(stack.plane(n)) for n in range(stack.zdim)]) s = Stack(stack_array) s.history = stack.history + ['equalized_stack'] return s
def pre_process(y_dict, train_directories, valid_directories, test_directories, output_shape, adaptive_histogram, clip_limit=0.03): X_train = []; y_train = []; X_test = []; y_test = []; X_valid = []; y_valid = []; for train_directory in train_directories: for filename in os.listdir(train_directory): if filename.endswith(".jpeg"): im = io.imread(train_directory + "/" + filename) im = rgb2gray(im) im = resize(im, output_shape) if adaptive_histogram: im = exposure.equalize_adapthist(im, clip_limit=clip_limit) X_train.append(im.flatten()) y_train.append(y_dict[filename.split(".jpeg")[0]]) for valid_directory in valid_directories: for filename in os.listdir(valid_directory): if filename.endswith(".jpeg"): im = io.imread(valid_directory + "/" + filename) im = rgb2gray(im) im = resize(im, output_shape) if adaptive_histogram: im = exposure.equalize_adapthist(im, clip_limit=clip_limit) X_valid.append(im.flatten()) y_valid.append(y_dict[filename.split(".jpeg")[0]]) for test_directory in test_directories: for filename in os.listdir(test_directory): if filename.endswith(".jpeg"): im = io.imread(test_directory + "/" + filename) im = rgb2gray(im) im = resize(im, output_shape) if adaptive_histogram: im = exposure.equalize_adapthist(im, clip_limit=clip_limit) X_test.append(im.flatten()) y_test.append(y_dict[filename.split(".jpeg")[0]]) y_train = label_binarize(y_train, classes=[0,1,2,3,4]) y_test = label_binarize(y_test, classes=[0,1,2,3,4]) y_valid = label_binarize(y_valid, classes=[0,1,2,3,4]) return X_train, y_train, X_valid, y_valid, X_test, y_test
def single_img_resize(img, img_rows, img_cols, equalize=True): new_img = np.zeros([img_rows,img_cols]) if equalize: img = equalize_adapthist( img, clip_limit=0.05 ) # img = clahe.apply(cv2.convertScaleAbs(img)) new_img = cv2.resize( img, (img_rows, img_cols), interpolation=cv2.INTER_NEAREST ) return new_img
def equalize_adaptive(image, n_tiles=8, clip_limit=0.01, name='equalize_adaptive'): eqproj = equalize_adapthist(image.image_array, ntiles_x=n_tiles, ntiles_y=n_tiles, clip_limit=clip_limit) ia = ImageArray(eqproj, name) ia.history = image.history + [name] return ia
def transform(self, Xb, yb): Xb, yb = super(EqualizeAdaptHistBatchIteratorMixin, self).transform(Xb, yb) # TODO doesn't work for greyscale image Xb_transformed = np.asarray([ equalize_adapthist(img, ntiles_x=self.eqadapthist_ntiles_x, ntiles_y=self.eqadapthist_ntiles_y, clip_limit=self.eqadapthist_clip_limit, nbins=self.eqadapthist_nbins) for img in Xb.transpose(0, 2, 3, 1)]) # Back from b01c to bc01 Xb_transformed = Xb_transformed.transpose(0, 3, 1, 2).astype(np.float32) return Xb_transformed, yb
def test_adapthist_grayscale(): """Test a grayscale float image """ img = skimage.img_as_float(data.lena()) img = rgb2gray(img) img = np.dstack((img, img, img)) adapted = exposure.equalize_adapthist(img, 10, 9, clip_limit=0.01, nbins=128) assert_almost_equal = np.testing.assert_almost_equal assert img.shape == adapted.shape assert_almost_equal(peak_snr(img, adapted), 97.531, 3) assert_almost_equal(norm_brightness_err(img, adapted), 0.0313, 3) return data, adapted
def test_adapthist_scalar(): """Test a scalar uint8 image """ img = skimage.img_as_ubyte(data.moon()) adapted = exposure.equalize_adapthist(img, kernel_size=64, clip_limit=0.02) assert adapted.min() == 0.0 assert adapted.max() == 1.0 assert img.shape == adapted.shape full_scale = skimage.exposure.rescale_intensity(skimage.img_as_float(img)) assert_almost_equal(peak_snr(full_scale, adapted), 102.066, 3) assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.038, 3)
def test_adapthist_grayscale(): '''Test a grayscale float image ''' img = skimage.img_as_float(data.lena()) img = rgb2gray(img) img = np.dstack((img, img, img)) adapted = exposure.equalize_adapthist(img, 10, 9, clip_limit=0.01, nbins=128) assert_almost_equal = np.testing.assert_almost_equal assert img.shape == adapted.shape assert peak_snr(img, adapted) > 95.0 assert norm_brightness_err(img, adapted) < 0.05 return data, adapted
def transform(f): path = f city_dir_name = f.split("/")[-3] image = tifffile.imread(path) bands = [] for band in range(8): bands.append(equalize_adapthist(image[..., band]) * 2047) img = np.array(np.stack(bands, axis=-1), dtype="uint16") clahe_city_dir = os.path.join(wdata_dir, city_dir_name) os.makedirs(clahe_city_dir, exist_ok=True) mul_dir = os.path.join(clahe_city_dir, 'CLAHE-MUL-PanSharpen') os.makedirs(mul_dir, exist_ok=True) tifffile.imsave(os.path.join(mul_dir, f.split("/")[-1]), img, planarconfig='contig')
def test_adapthist_color(): '''Test an RGB color uint16 image ''' img = skimage.img_as_uint(data.lena()) adapted = exposure.equalize_adapthist(img, clip_limit=0.01) assert_almost_equal = np.testing.assert_almost_equal assert adapted.min() == 0 assert adapted.max() == 1.0 assert img.shape == adapted.shape full_scale = skimage.exposure.rescale_intensity(img) assert peak_snr(img, adapted) > 95.0 assert norm_brightness_err(img, adapted) < 0.05 return data, adapted
def test_adapthist_alpha(): """Test an RGBA color image """ img = skimage.img_as_float(data.astronaut()) alpha = np.ones((img.shape[0], img.shape[1]), dtype=float) img = np.dstack((img, alpha)) with expected_warnings(['precision loss']): adapted = exposure.equalize_adapthist(img) assert adapted.shape != img.shape img = img[:, :, :3] full_scale = skimage.exposure.rescale_intensity(img) assert img.shape == adapted.shape assert_almost_equal(peak_snr(full_scale, adapted), 109.393, 2) assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.0248, 3)
def test_adapthist_alpha(): """Test an RGBA color image """ img = skimage.img_as_float(data.lena()) alpha = np.ones((img.shape[0], img.shape[1]), dtype=float) img = np.dstack((img, alpha)) adapted = exposure.equalize_adapthist(img) assert adapted.shape != img.shape img = img[:, :, :3] full_scale = skimage.exposure.rescale_intensity(img) assert img.shape == adapted.shape assert_almost_equal = np.testing.assert_almost_equal assert_almost_equal(peak_snr(full_scale, adapted), 106.86, 2) assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.0509, 3)
def preprocess3(X,weight=0.1): """ Pre-process images that are fed to neural network. :param X: X """ progbar = Progbar(X.shape[0]) # progress bar for pre-processing status tracking for i in range(X.shape[0]): for j in range(X.shape[1]): X[i, j] = denoise_tv_chambolle(X[i, j], weight=weight, multichannel=False) X[i, j] = equalize_adapthist(X[i, j]) # X[i, j] = cut(X[i, j],0.33,0.66) progbar.add(1) return X
def test_adapthist_scalar(): '''Test a scalar uint8 image ''' img = skimage.img_as_ubyte(data.moon()) adapted = exposure.equalize_adapthist(img, clip_limit=0.02) assert adapted.min() == 0 assert adapted.max() == (1 << 16) - 1 assert img.shape == adapted.shape full_scale = skimage.exposure.rescale_intensity(skimage.img_as_uint(img)) assert_almost_equal = np.testing.assert_almost_equal assert_almost_equal(peak_snr(full_scale, adapted), 101.231, 3) assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.041, 3) return img, adapted
def preprocess(img): if img.ndim == 3: I = clahe_each(img) else: I = equalize_adapthist(img) try: mask = img.mask except AttributeError: pass else: I = ma.masked_array(I, mask=mask) return I
def AHE(img): img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03) return img_adapteq
ax_img, ax_hist, ax_cdf = plot_img_and_hist(actual, axes[:, 2]) ax_img.set_title('Tensorflow CLAHE') ax_cdf.set_ylabel('Fraction of total intensity') ax_cdf.set_yticks(np.linspace(0, 1, 5)) # prevent overlap of y-axis labels fig.tight_layout() def compare(expected, actual): return (expected == actual).all() # Load an image to use for testing test_image = data.moon() expected_result = exposure.equalize_adapthist(test_image, clip_limit=0.03) #plot_comparison(test_image, expected_result, expected_result) print(compare(expected_result, expected_result)) #plt.show() import equalize_adapthist a, b = equalize_adapthist.histogram(test_image) equalize_adapthist.tfhist(test_image) print(a) print(b) # get equalization
def generate_image(image, dz=1, dx=1, dynamic_range=[0, 1], hist_eq=False, post_proc=False, z_label='z', x_label='x', filename='./bmode.png', save_flag=True, display_flag=False): """ display/save output image with user-specified dynamic range :param image: input image for display :param dz: axial sampling interval :param dx: lateral sampling interval :param dynamic_range: displayed dynamic range :param hist_eq: enable to perform histogram equalization :param post_proc: apply image post-processing :param z_label: label for z (axial) axis :param x_label: label for x (lateral) axis :param filename: file path and name of saved .png :param save_flag: enable to save .png :param display_flag: enable to display image """ if not display_flag: import matplotlib matplotlib.use('Agg') msg = 'WARNING [generate_image] Using Agg matplotlib backend.' print(msg) logging.warning(msg) import matplotlib.pyplot as plt import numpy as np uint16_scale = 65535 # generate lat and axi meshes based on pixel spacing (dz and dx) axi, lat = calc_ticks(image, dz, dx) if dynamic_range[0] > dynamic_range[1]: tmp = dynamic_range dynamic_range[0] = tmp[1] dynamic_range[1] = tmp[0] msg = 'WARNING [generate_image] Dynamic range bounds out of order. ' \ 'Reversing bounds for display...' print(msg) logging.warning(msg) # perform post-processing on full image if post_proc: from skimage import filters msg = '[generate_image] Performing image post-processing...' logging.debug(msg) print(msg) raw = image image = filters.gaussian(raw, sigma=0.75) # clip image bounds based on specified dynamic range if dynamic_range[0] < np.min(image): dynamic_range[0] = np.min(image) image = np.clip(image, dynamic_range[0], dynamic_range[1]) # perform histogram equalization on clipped and normalized image if hist_eq: from skimage import exposure msg = '[generate_image] Performing histogram equalization...' logging.debug(msg) print(msg) image += np.abs(dynamic_range[0]) image /= np.abs(dynamic_range[0]) image *= uint16_scale image = exposure.equalize_adapthist(image.astype('uint16'), clip_limit=0.005) # display image with geometry specified by lat and axi meshes plt.pcolormesh(lat, axi, image, cmap='gray') plt.axis('image') plt.xlabel(x_label) plt.ylabel(z_label) plt.gca().invert_yaxis() if save_flag: create_dir(filename) try: plt.savefig(filename) msg = '[generate_image] Image saved to ' + filename logging.info(msg) print(msg) except OSError as err: msg = 'ERROR [generate_image] Failed to save PNG : {0}'.format(err) logging.error(msg) print(msg) sys.exit() if display_flag: try: msg = '[generate_image] Displaying image in matplotlib figure...' print(msg) logging.debug(msg) plt.show() except: msg = 'ERROR [generate_image] matplotlib backend failed to ' \ 'display image. Exiting script...' logging.error(msg) print(msg) sys.exit()
def channelwise_ahe(img): img_ahe = img.copy() for i in range(img.shape[2]): img_ahe[:, :, i] = exposure.equalize_adapthist(img[:, :, i], clip_limit=0.03) return img_ahe
def main(): """Entry point""" parser = ArgumentParser( description='Batch export freesurfer results to animated gifs', formatter_class=RawTextHelpFormatter) g_input = parser.add_argument_group('Inputs') g_input.add_argument('-S', '--subjects-dir', action='store', default=os.getcwd()) g_input.add_argument('-s', '--subject-id', action='store') g_input.add_argument('-t', '--temp-dir', action='store') g_input.add_argument('--keep-temp', action='store_true', default=False) g_input.add_argument('--zoom', action='store_true', default=False) g_input.add_argument('--hist-eq', action='store_true', default=False) g_outputs = parser.add_argument_group('Outputs') g_outputs.add_argument('-o', '--output-dir', action='store', default='fs2gif') opts = parser.parse_args() if opts.temp_dir is None: tmpdir = mkdtemp() else: tmpdir = op.abspath(opts.temp_dir) try: os.makedirs(tmpdir) except OSError as exc: if exc.errno != EEXIST: raise exc out_dir = op.abspath(opts.output_dir) try: os.makedirs(out_dir) except OSError as exc: if exc.errno != EEXIST: raise exc subjects_dir = op.abspath(opts.subjects_dir) subject_list = opts.subject_id if subject_list is None: subject_list = [ name for name in os.listdir(subjects_dir) if op.isdir(os.path.join(subjects_dir, name)) ] elif isinstance(subject_list, string_types): if '*' not in subject_list: subject_list = [subject_list] else: all_dirs = [ op.join(subjects_dir, name) for name in os.listdir(subjects_dir) if op.isdir(os.path.join(subjects_dir, name)) ] pattern = glob.glob( op.abspath(op.join(subjects_dir, opts.subject_id))) subject_list = list(set(pattern).intersection(set(all_dirs))) environ = os.environ.copy() environ['SUBJECTS_DIR'] = subjects_dir # tcl_file = pkgr.resource_filename('structural_dhcp_mriqc', 'data/fsexport.tcl') tcl_contents = """ SetOrientation 0 SetCursor 0 128 128 128 SetDisplayFlag 3 0 SetDisplayFlag 22 1 set i 0 """ for sub_path in subject_list: subid = op.basename(sub_path) tmp_sub = op.join(tmpdir, subid) try: os.makedirs(tmp_sub) except OSError as exc: if exc.errno != EEXIST: raise exc niifile = op.join(tmp_sub, '%s.nii.gz') % subid ref_file = op.join(sub_path, 'mri', 'T1.mgz') sp.call(['mri_convert', op.join(sub_path, 'mri', 'norm.mgz'), niifile], cwd=tmp_sub) data = nb.load(niifile).get_data() data[data > 0] = 1 # Compute brain bounding box indexes = np.argwhere(data) bbox_min = indexes.min(0) bbox_max = indexes.max(0) + 1 center = np.average([bbox_min, bbox_max], axis=0) if opts.hist_eq: modnii = op.join(tmp_sub, '%s.nii.gz' % subid) ref_file = op.join(tmp_sub, '%s.mgz' % subid) img = nb.load(niifile) data = exposure.equalize_adapthist(img.get_data(), clip_limit=0.03) nb.Nifti1Image(data, img.get_affine(), img.get_header()).to_filename(modnii) sp.call(['mri_convert', modnii, ref_file], cwd=tmp_sub) if not opts.zoom: # Export tiffs for left hemisphere tcl_file = op.join(tmp_sub, '%s.tcl' % subid) with open(tcl_file, 'w') as tclfp: tclfp.write(tcl_contents) tclfp.write( 'for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2])) tclfp.write(' SetSlice $slice\n') tclfp.write(' RedrawScreen\n') tclfp.write(' SaveTIFF [format "%s/%s-' % (tmp_sub, subid) + '%03d.tif" $i]\n') tclfp.write(' incr i\n') tclfp.write('}\n') tclfp.write('QuitMedit\n') sp.call([ 'tkmedit', subid, 'T1.mgz', 'lh.pial', '-aux-surface', 'rh.pial', '-tcl', tcl_file ], env=environ) # Convert to animated gif sp.call([ 'convert', '-delay', '10', '-loop', '0', '%s/%s-*.tif' % (tmp_sub, subid), '%s/%s.gif' % (out_dir, subid) ]) else: # Export tiffs for left hemisphere tcl_file = op.join(tmp_sub, 'lh-%s.tcl' % subid) with open(tcl_file, 'w') as tclfp: tclfp.write(tcl_contents) tclfp.write('SetZoomLevel 2') tclfp.write( 'for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2])) tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] + 30, center[1] - 10)) tclfp.write(' SetSlice $slice\n') tclfp.write(' RedrawScreen\n') tclfp.write(' SaveTIFF [format "%s/%s-lh-' % (tmp_sub, subid) + '%03d.tif" $i]\n') tclfp.write(' incr i\n') tclfp.write('}\n') tclfp.write('QuitMedit\n') sp.call( ['tkmedit', subid, 'norm.mgz', 'lh.white', '-tcl', tcl_file], env=environ) # Export tiffs for right hemisphere tcl_file = op.join(tmp_sub, 'rh-%s.tcl' % subid) with open(tcl_file, 'w') as tclfp: tclfp.write(tcl_contents) tclfp.write('SetZoomLevel 2') tclfp.write( 'for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2])) tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] - 30, center[1] - 10)) tclfp.write(' SetSlice $slice\n') tclfp.write(' RedrawScreen\n') tclfp.write(' SaveTIFF [format "%s/%s-rh-' % (tmp_sub, subid) + '%03d.tif" $slice]\n') tclfp.write(' incr i\n') tclfp.write('}\n') tclfp.write('QuitMedit\n') sp.call( ['tkmedit', subid, 'norm.mgz', 'rh.white', '-tcl', tcl_file], env=environ) # Convert to animated gif sp.call([ 'convert', '-delay', '10', '-loop', '0', '%s/%s-lh-*.tif' % (tmp_sub, subid), '%s/%s-lh.gif' % (out_dir, subid) ]) sp.call([ 'convert', '-delay', '10', '-loop', '0', '%s/%s-rh-*.tif' % (tmp_sub, subid), '%s/%s-rh.gif' % (out_dir, subid) ]) if not opts.keep_temp: try: rmtree(tmp_sub) except: pass
print("Both clip limits must be provided. Ignoring clipping") est_ts = np.round(data['timescale'], 8) obs_ts = np.round(data['t'], 3) plt.ion() fig, (ax1, ax2) = plt.subplots(2, 1) for i in range(est_ts.size): t_est = est_ts[i] j = np.searchsorted(obs_ts, t_est, side='right') - 1 t_obs = obs_ts[j] fig.suptitle('Filename {}'.format(args.filename)) im_orig = obs_frames[j, :, :].T im_norm = (im_orig-im_orig.min())/(im_orig.max()-im_orig.min()) im_eq = equalize_adapthist(im_norm, kernel_size=(24, 120)) # im_eq = equalize_hist(im_norm) ax1.clear() if args.equalize: ax1.imshow(im_eq, cmap='gray') else: ax1.imshow(im_orig, cmap='gray') ax1.title.set_text('Observed Image for {:02f}'.format(t_obs)) im_orig = est_frames[i, :, :].T im_norm = (im_orig-im_orig.min())/(im_orig.max()-im_orig.min()) im_eq = equalize_adapthist(im_norm, kernel_size=(24, 120)) # im_eq = equalize_hist(im_norm) ax2.clear() if args.equalize: ax2.imshow(im_eq, cmap='gray')
''' Created on Aug 6, 2019 @author: jsaavedr Otsu ''' import matplotlib.pyplot as plt import basis import pai_io import skimage.exposure as exposure import numpy as np if __name__ == '__main__' : #filename ='../images/gray/mri.tif' #filename ='../images/gray/im_3.tif' filename ='../images/gray/Lowcontr.tif' image=pai_io.imread(filename, as_gray = True) image_eq = basis.to_uint8(exposure.equalize_hist(image)) image_eqa = basis.to_uint8(exposure.equalize_adapthist(image,clip_limit=0.1)) fig, xs = plt.subplots(1,3) for i in range(3): xs[i].set_axis_off() xs[0].imshow(image, cmap = 'gray', vmin =0 , vmax=255) xs[0].set_title('Original') xs[1].imshow(image_eq, cmap = 'gray', vmin = 0, vmax = 255) xs[1].set_title('Eq') xs[2].imshow(image_eqa, cmap = 'gray', vmin = 0, vmax = 255) xs[2].set_title('Adaptive Eq') plt.show()
print("[INFO] loading model....") model = load_model(args["model"]) labelnames = open("signnames.csv").read().strip().split("\n")[1:] labelnames = [l.split(",")[1] for l in labelnames] print("[INFO] predicting...") imagepath = list(paths.list_images(args["images"])) #print(imagepath) random.shuffle(imagepath) imagePaths = imagepath[:25] #print(imagePaths) for (i,path) in enumerate(imagePaths): print(i) image = io.imread(path) image = transform.resize(image,(32,32)) image = exposure.equalize_adapthist(image, clip_limit=0.1) image = image.astype("float")/255.0 image = np.expand_dims(image, axis=0) preds = model.predict(image) j = preds.argmax(axis=1)[0] label = labelnames[j] image = cv2.imread(path) image = imutils.resize(image, width=128) cv2.putText(image, label, (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2) p = os.path.sep.join([args["examples"], "{}.png".format(i)]) cv2.imwrite(p, image)
def standardize(self, x): """Apply the normalization configuration to a batch of inputs. # Arguments x: batch of inputs to be normalized. # Returns The inputs, normalized. """ if self.preprocessing_function: x = self.preprocessing_function(x) if self.rescale: x *= self.rescale # x is a single image, so it doesn't have image number at index 0 img_channel_axis = self.channel_axis - 1 if self.samplewise_center: x -= np.mean(x, axis=img_channel_axis, keepdims=True) if self.samplewise_std_normalization: x /= (np.std(x, axis=img_channel_axis, keepdims=True) + 1e-7) if self.featurewise_center: if self.mean is not None: x -= self.mean else: warnings.warn('This ImageDataGenerator specifies ' '`featurewise_center`, but it hasn\'t' 'been fit on any training data. Fit it ' 'first by calling `.fit(numpy_data)`.') if self.featurewise_std_normalization: if self.std is not None: x /= (self.std + 1e-7) else: warnings.warn('This ImageDataGenerator specifies ' '`featurewise_std_normalization`, but it hasn\'t' 'been fit on any training data. Fit it ' 'first by calling `.fit(numpy_data)`.') if self.zca_whitening: if self.principal_components is not None: flatx = np.reshape(x, (x.size)) whitex = np.dot(flatx, self.principal_components) x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2])) else: warnings.warn('This ImageDataGenerator specifies ' '`zca_whitening`, but it hasn\'t' 'been fit on any training data. Fit it ' 'first by calling `.fit(numpy_data)`.') # Define Custom CLAHE Algorithm For Image Preprocessing if self.adaptive_equalization: if np.shape(x)[2] == 1: x = exposure.equalize_adapthist(x, clip_limit=0.03, nbins=48) if np.shape(x)[2] == 3: x = img_as_float(x) img_hsv = color.rgb2hsv(x) brightness = img_hsv[:, :, 2] with warnings.catch_warnings(): warnings.simplefilter("ignore") b_adapteq = exposure.equalize_adapthist(brightness, clip_limit=0.03, nbins=48) img_hsv[:, :, 2] = b_adapteq x = color.hsv2rgb(img_hsv) else: warnings.warn('Improper Image Size - Expected Format - ?,?,3') return x
# Import the necessary modules from skimage import data, exposure # Load the image original_image = data.coffee() # Apply the adaptive equalization on the original image adapthist_eq_image = exposure.equalize_adapthist(original_image, clip_limit=0.03) # Compare the original image to the equalized show_image(original_image) show_image(adapthist_eq_image, '#ImageProcessingDatacamp')
dtype=numpy.uint8) # vigra.writeHDF5(exposure.equalize_hist(img), "volume_big_stuff_classifier_also_from_C_equalized.h5", "data") # mask_input = vigra.dropChannelAxis(mask_input) # img_mask = numpy.logical_not(mask_input[1, :, :]) # Contrast stretching p2, p98 = np.percentile(img, (2, 98)) img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98)) # Equalization img_eq = exposure.equalize_hist(img) # Adaptive Equalization img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03) # Display results fig = plt.figure(figsize=(8, 5)) axes = np.zeros((2, 4), dtype=np.object) axes[0, 0] = fig.add_subplot(2, 4, 1) for i in range(1, 4): axes[0, i] = fig.add_subplot(2, 4, 1 + i, sharex=axes[0, 0], sharey=axes[0, 0]) for i in range(0, 4): axes[1, i] = fig.add_subplot(2, 4, 5 + i) ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
def enhance_contrast(self, img): img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = equalize_adapthist(img) img = rescale_intensity(img, out_range='uint8').astype(np.uint8) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) return img
# return np.stack(conv_bucket, axis=2).astype("uint8") # kernel_sizes = [9,15,30,60] # fig, axs = plt.subplots(nrows = 1, ncols = len(kernel_sizes), figsize=(15,15)); # pic = imageio.imread('Image Processing\image\cheon.jpg') # for k, ax in zip(kernel_sizes, axs): # kernel = np.ones((k,k)) # kernel /= np.sum(kernel) # ax.imshow(Convolution(pic, kernel)); # ax.set_title("Convolved By Kernel: {}".format(k)); # ax.set_axis_off() # plt.show() #edge kernel img = color.rgb2gray(image) kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) edges = convolve2d(img, kernel, mode='valid') edges_equalized = exposure.equalize_adapthist(edges / np.max(np.abs(edges)), clip_limit=0.03) plt.figure(figsize=(5, 5)) plt.imshow(edges_equalized, cmap='gray') plt.axis('off') plt.show()
def ada_hist_eq(self,im,cl): # adaptive histogram equalisation with warnings.catch_warnings(): warnings.simplefilter("ignore") new_im = exposure.equalize_adapthist(im,clip_limit=cl) return new_im
def adaptiveEq(image, limit=0.03): # Adaptive Equalization img_adapteq = exposure.equalize_adapthist(image, clip_limit=limit) return img_adapteq
def draw_boxes(image, boxes=None, refined_boxes=None, masks=None, captions=None, visibilities=None, title="", ax=None): """Draw bounding boxes and segmentation masks with different customizations. boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates. refined_boxes: Like boxes, but draw with solid lines to show that they're the result of refining 'boxes'. masks: [N, height, width] captions: List of N titles to display on each box visibilities: (optional) List of values of 0, 1, or 2. Determine how prominent each bounding box should be. title: An optional title to show over the image ax: (optional) Matplotlib axis to draw on. """ # Number of boxes assert boxes is not None or refined_boxes is not None N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0] # Matplotlib Axis if not ax: _, ax = plt.subplots(1, figsize=(12, 12)) # Generate random colors colors = random_colors(N) # Show area outside image boundaries. margin = image.shape[0] // 10 ax.set_ylim(image.shape[0] + margin, -margin) ax.set_xlim(-margin, image.shape[1] + margin) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): # Box visibility visibility = visibilities[i] if visibilities is not None else 1 if visibility == 0: color = "gray" style = "dotted" alpha = 0.5 elif visibility == 1: color = colors[i] style = "dotted" alpha = 1 elif visibility == 2: color = colors[i] style = "solid" alpha = 1 # Boxes if boxes is not None: if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in cropping. continue y1, x1, y2, x2 = boxes[i] p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=alpha, linestyle=style, edgecolor=color, facecolor='none') ax.add_patch(p) # Refined boxes if refined_boxes is not None and visibility > 0: ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32) p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, edgecolor=color, facecolor='none') ax.add_patch(p) # Connect the top-left corners of the anchor and proposal if boxes is not None: ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) # Captions if captions is not None: caption = captions[i] # If there are refined boxes, display captions on them if refined_boxes is not None: y1, x1, y2, x2 = ry1, rx1, ry2, rx2 x = random.randint(x1, (x1 + x2) // 2) ax.text(x1, y1, caption, size=11, verticalalignment='top', color='w', backgroundcolor="none", bbox={ 'facecolor': color, 'alpha': 0.5, 'pad': 2, 'edgecolor': 'none' }) # Masks if masks is not None: mask = masks[:, :, i] masked_image = apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) if image.shape[-1] == 8: # added for wv2 brg = reorder_to_brg(image) brg_adap = exposure.equalize_adapthist(brg, clip_limit=0.0055) ax.imshow(brg_adap) else: ax.imshow(masked_image.astype(np.uint8))
def _fix_hist(image): """Apply adaptive histogram""" image = equalize_adapthist( image) * 255 * 1.14 # empirical magic number image[image <= 8.] = 0. return image
def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10): """ anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates. proposals: [n, 4] the same anchors but refined to fit objects better. """ masked_image = image.copy() # Pick random anchors in case there are too many. ids = np.arange(rois.shape[0], dtype=np.int32) ids = np.random.choice(ids, limit, replace=False) if ids.shape[0] > limit else ids fig, ax = plt.subplots(1, figsize=(12, 12)) if rois.shape[0] > limit: plt.title("Showing {} random ROIs out of {}".format( len(ids), rois.shape[0])) else: plt.title("{} ROIs".format(len(ids))) # Show area outside image boundaries. ax.set_ylim(image.shape[0] + 20, -20) ax.set_xlim(-50, image.shape[1] + 20) ax.axis('off') for i, id in enumerate(ids): color = np.random.rand(3) class_id = class_ids[id] # ROI y1, x1, y2, x2 = rois[id] p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, edgecolor=color if class_id else "gray", facecolor='none', linestyle="dashed") ax.add_patch(p) # Refined ROI if class_id: ry1, rx1, ry2, rx2 = refined_rois[id] p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, edgecolor=color, facecolor='none') ax.add_patch(p) # Connect the top-left corners of the anchor and proposal for easy visualization ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) # Label label = class_names[class_id] ax.text(rx1, ry1 + 8, "{}".format(label), color='w', size=11, backgroundcolor="none") # Mask m = utils.unmold_mask(mask[id], rois[id][:4].astype(np.int32), image.shape) masked_image = apply_mask(masked_image, m, color) if image.shape[-1] == 8: # added for wv2 brg = reorder_to_brg(image) brg_adap = exposure.equalize_adapthist(brg, clip_limit=0.0055) ax.imshow(brg_adap) else: ax.imshow(masked_image) # Print stats print("Positive ROIs: ", class_ids[class_ids > 0].shape[0]) print("Negative ROIs: ", class_ids[class_ids == 0].shape[0]) print("Positive Ratio: {:.2f}".format(class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))
def random_transform(self, x, seed=None): """Randomly augment a single image tensor. # Arguments x: 3D tensor, single image. seed: random seed. # Returns A randomly transformed version of the input (same shape). """ # x is a single image, so it doesn't have image number at index 0 img_row_axis = self.row_axis - 1 img_col_axis = self.col_axis - 1 img_channel_axis = self.channel_axis - 1 if seed is not None: np.random.seed(seed) # use composition of homographies # to generate final transform that needs to be applied if self.rotation_range: theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range) else: theta = 0 if self.height_shift_range: tx = np.random.uniform( -self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis] else: tx = 0 if self.width_shift_range: ty = np.random.uniform( -self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis] else: ty = 0 if self.shear_range: shear = np.random.uniform(-self.shear_range, self.shear_range) else: shear = 0 if self.zoom_range[0] == 1 and self.zoom_range[1] == 1: zx, zy = 1, 1 else: zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2) transform_matrix = None if theta != 0: rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) transform_matrix = rotation_matrix if tx != 0 or ty != 0: shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) transform_matrix = shift_matrix if transform_matrix is None else np.dot( transform_matrix, shift_matrix) if shear != 0: shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]]) transform_matrix = shear_matrix if transform_matrix is None else np.dot( transform_matrix, shear_matrix) if zx != 1 or zy != 1: zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]]) transform_matrix = zoom_matrix if transform_matrix is None else np.dot( transform_matrix, zoom_matrix) if transform_matrix is not None: h, w = x.shape[img_row_axis], x.shape[img_col_axis] transform_matrix = transform_matrix_offset_center( transform_matrix, h, w) x = apply_transform(x, transform_matrix, img_channel_axis, fill_mode=self.fill_mode, cval=self.cval) if self.channel_shift_range != 0: x = random_channel_shift(x, self.channel_shift_range, img_channel_axis) if self.horizontal_flip: if np.random.random() < 0.5: x = flip_axis(x, img_col_axis) if self.vertical_flip: if np.random.random() < 0.5: x = flip_axis(x, img_row_axis) if self.histogram_equalization: #if np.random.random() < 0.5: x = exposure.equalize_hist(x) if self.contrast_stretching: #if np.random.random() < 0.5: p2, p98 = np.percentile(x, (2, 98)) x = exposure.rescale_intensity(x, in_range=(p2, p98)) if self.adaptive_equalization: #if np.random.random() < 0.5: x = exposure.equalize_adapthist(x, clip_limit=0.03) return x
def data_clean_func(image=None): if image is not None: #print(len(np.unique(image))) #clean_image = image ''' plt.hist(image) plt.show() ''' ''' plt.imshow(image, cmap='gray') plt.title('Original Image') plt.show() ''' threshold = 0.85 default_fill = 0.0 frac_of_high_clip = 1 / 9 image[image > threshold] = default_fill image[image < frac_of_high_clip * (1.0 - threshold)] = default_fill ''' plt.imshow(image, cmap='gray') plt.title('After Clipping') plt.show() ''' image = scipy.ndimage.median_filter(image, size=(4, 4)) ''' plt.imshow(image, cmap='gray') plt.title('After Median Filter') plt.show() ''' image = skimage.filters.gaussian(image, sigma=0.01, output=None, mode='reflect', preserve_range=True) #################################################################### # Added to ensure negligible loss when converting to int16 # within exposure.equalize_adapthist image = (image / np.max(image) * (2**16)).astype(np.uint16) # A "Monkey Patch" could possibly be used as a cleaner solution, # but would be more involved than is necessary for my application #################################################################### image = exposure.equalize_adapthist(image, kernel_size=image.shape[0] // 8, clip_limit=0.005, nbins=2**13) image = image.astype(np.float64) ''' plt.imshow(image, cmap='gray') plt.title('After Local Adapt Hist') plt.show() ''' image = scipy.ndimage.median_filter(image, size=(3, 1)) image = scipy.ndimage.median_filter(image, size=(1, 3)) image = skimage.filters.gaussian(image, sigma=0.1, output=None, mode='reflect', preserve_range=True) image = exposure.rescale_intensity(image, in_range='image', out_range=(0.0, 1.0)) ''' plt.imshow(image, cmap='gray') plt.title('Final Image') plt.show() ''' ''' plt.hist(image) plt.show() ''' clean_image = image.astype(np.float32) else: clean_image = image return clean_image
im[y, x + Delta] + im[y, x - Delta]) // 8 return check def check_edge_overlapp(y, x, r, edge): if abs(y - im.shape[0]) <= r or abs(x - im.shape[1]) <= r: r = 0 overlapp = np.zeros(edge.shape, dtype=bool) rr, cc = circle(y, x, r) overlapp[rr, cc] = edge[rr, cc] return np.sum(overlapp) image = io.imread('dropbox/retinopathy/sample/13_left.jpeg') img_resc = transform.rescale(rgb2gray(image), 0.25) img_gray = img_as_ubyte(exposure.equalize_adapthist(img_resc, clip_limit=0.3)) #plt.imshow(img_gray, cmap = plt.get_cmap('gray')) #io.imsave('dropbox/retinopathy/bw.jpeg',img_gray) #Invert White<->Black image_gray = 255 - img_gray image_gray_rgb = gray2rgb(img_gray) #Make blurry image for edge detection im = ndimage.gaussian_filter(img_gray, 4) # Compute the Canny filter for two values of sigma edges1 = feature.canny(im, sigma=0.1) edges2 = feature.canny(im, sigma=3)
for f in flist: im = io.imread(f)[:, :, :, 0].astype(float) im = im.transpose() # Reorder to XYZ [X, Y, Z] = im.shape im[im == 0] = np.nan # Trim off 0s, which are introduced mostly as part of stitching im_clip = (im - np.nanmin(im)) / (np.nanmax(im) - np.nanmin(im)) # Put back the zeros im_clip[np.isnan(im_clip)] = 0 # Initiate kernel kernel = np.array([X // 4, Y // 4, Z // 1]) equalized = equalize_adapthist(im_clip, kernel_size=kernel, clip_limit=1) equalized = equalized.transpose() equalized = equalized - equalized.min() io.imsave(path.splitext(f)[0] + '_R_eq.tif', util.img_as_uint(equalized)) print(f'Saved {f}') # #%% Normalize (2D) # for f in flist: # im = io.imread(f).astype(float) # print(f'Loaded {f}') # # Convert all 0 into NaN # im[ im == 0] = np.nan
def display_instances(image, boxes, masks, class_ids, class_names, scores=None, title="", figsize=(16, 16), ax=None, show_mask=True, show_bbox=True, colors=None, captions=None): """ boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. masks: [height, width, num_instances] class_ids: [num_instances] class_names: list of class names of the dataset scores: (optional) confidence scores for each box title: (optional) Figure title show_mask, show_bbox: To show masks and bounding boxes or not figsize: (optional) the size of the image colors: (optional) An array or colors to use with each object captions: (optional) A list of strings to use as captions for each object """ # Number of instances N = boxes.shape[0] if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] # If no axis is passed, create one and automatically call show() auto_show = False if not ax: _, ax = plt.subplots(1, figsize=figsize) auto_show = True # Generate random colors colors = colors or random_colors(N) # Show area outside image boundaries. height, width = image.shape[:2] ax.set_ylim(height + 10, -10) ax.set_xlim(-10, width + 10) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): color = colors[i] # Bounding box if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] if show_bbox: p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7, linestyle="dashed", edgecolor=color, facecolor='none') ax.add_patch(p) # Label if not captions: class_id = class_ids[i] score = scores[i] if scores is not None else None label = class_names[class_id] x = random.randint(x1, (x1 + x2) // 2) caption = "{} {:.3f}".format(label, score) if score else label else: caption = captions[i] ax.text(x1, y1 + 8, caption, color='w', size=11, backgroundcolor="none") # Mask mask = masks[:, :, i] if show_mask: masked_image = apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) if image.shape[-1] == 8: # added for wv2 using RGBNRGB for two seasons brg = reorder_to_brg(image) brg_adap = exposure.equalize_adapthist(brg, clip_limit=0.0055) ax.imshow( brg_adap) # added band reordering for wv2 and adaptive stretch else: image[image < 0] = 0 image = percentile_rescale(image) ax.imshow(image, cmap='brg') if auto_show: plt.show()
test = ndimage.median_filter(Refvolume.getOriginVolume()[:,:,60], 10) plt.figure(1) plt.imshow(test,cmap='Greys_r') plt.show() from skimage import exposure denoise_volume = exposure.rescale_intensity(test, in_range=(50,255)) plt.figure(2) plt.imshow(denoise_volume,cmap='Greys_r') plt.show() denoise_volume2 = exposure.equalize_adapthist(denoise_volume,clip_limit=0.1) plt.figure(3) plt.imshow(ndimage.gaussian_filter(denoise_volume2,2),cmap='Greys_r') plt.show() from skimage.filters import threshold_otsu, threshold_adaptive denoise_volume3 = threshold_adaptive(ndimage.gaussian_filter(denoise_volume2,2), 5, offset=0) plt.figure(4) plt.imshow(denoise_volume3,cmap='Greys_r') plt.show() # # plot = Visualization.DataVisulization(ndimage.gaussian_filter(new_volume2,2), 80) # plot.contour3d()
def save_stretched_png(self, folder: str) -> str: data = self.data.copy() path = join(folder, f"{self.key}.png") assert data.dtype == np.float32 and data.max() <= 1.0 and data.min( ) >= 0.0, f"{data.dtype} {data.max()} {data.min()}" if data.ndim == 2: # mono data = crop_center(data, data.shape[1] - 128, data.shape[0] - 128) with Timer("stretch"): data = Stretch().stretch(data) data = transform.downscale_local_mean(data, (4, 4)) data = np.clip(data, 0.0, 1.0) assert data.dtype == np.float32 and data.max() <= 1.0 and data.min( ) >= 0.0, f"{data.dtype} {data.max()} {data.min()}" with Timer(f"saving {self.key}.png"): scaled = np.interp(data, (0, 1), (0, 65535)).astype(np.uint16) png_image = PILImage.fromarray(scaled) png_image.save(path) elif data.ndim == 3: # osc channels = [] for i in range(3): channel_data = data[i] channel_data = crop_center(channel_data, channel_data.shape[1] - 128, channel_data.shape[0] - 128) with Timer("stretch"): channel_data = Stretch().stretch(channel_data) channel_data = transform.downscale_local_mean( channel_data, (4, 4)) channel_data = np.clip(channel_data, 0.0, 1.0) assert channel_data.dtype == np.float32 and channel_data.max( ) <= 1.0 and channel_data.min( ) >= 0.0, f"{channel_data.dtype} {channel_data.max()} {channel_data.min()}" # channel_data = np.interp(channel_data, (0.0, 1.0), (0, 255)).astype(np.uint8) channels.append(channel_data) final = np.dstack(channels) final = exposure.equalize_adapthist(final, clip_limit=0.0001, nbins=1024) final = np.interp(final, (0.0, 1.0), (0, 255)).astype(np.uint8) with Timer(f"saving {self.key}.png"): assert final.dtype == np.uint8 png_image = PILImage.fromarray(final, mode="RGB") converter = ImageEnhance.Color(png_image) saturated = converter.enhance(2) # increase color saturation saturated.save(path) pass else: raise Exception(f"invalid image dimensions {data.ndim}") return path
def __call__(self, m): m = (m - m.min()) / (m.max() - m.min()) # *t.max() 是为了让2d的图的最大值不会飘离实际 return np.array([ t.max() * equalize_adapthist(t, kernel_size=self.kernel) for t in m ])
# %% -------------------- import matplotlib.pyplot as plt import numpy as np from skimage import exposure from common.utilities import get_image_as_array # %% -------------------- img = get_image_as_array( 'D:/GWU/4 Spring 2021/6501 Capstone/VBD CXR/PyCharm ' 'Workspace/vbd_cxr/9_data/512/transformed_data/train/0c4a6bc602d1d207f217212c68a7131b.jpeg' ) img = np.asarray(img) plt.figure(figsize=(12, 12)) plt.imshow(img, 'gray') plt.show() # %% -------------------- img_hist = exposure.equalize_hist(img) plt.figure(figsize=(12, 12)) plt.imshow(img_hist, 'gray') plt.show() # %% -------------------- img_clahe = exposure.equalize_adapthist(img / np.max(img)) plt.figure(figsize=(12, 12)) plt.imshow(img_clahe, 'gray') plt.show()
def contraste_adaptativo(img): # Contraste adaptativo: https://en.wikipedia.org/wiki/Adaptive_histogram_equalization img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03) return img_adapteq
def show(img): # Display the image. fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 3)) ax1.imshow(img, cmap=plt.cm.gray) ax1.set_axis_off() # Display the histogram. ax2.hist(img.ravel(), lw=0, bins=256) ax2.set_xlim(0, img.max()) ax2.set_yticks([]) plt.show() # In[7]: show(img) # In[11]: show(skie.rescale_intensity(img, in_range=(0.4, .95), out_range=(0, 1))) # In[9]: show(skie.equalize_adapthist(img)) # In[ ]:
def contrast(image): image_CLAHE = equalize_adapthist(image) return (image_CLAHE)