def autolevels(image,minPercent=2,maxPercent=98,funcName='mean',perChannel=False): ''' Rescale intensity of an image. For RGB images, the new limits are calculated per channel and then mean or median of these limits are applied to the whole image (if perChannel option is False). ''' # dictionary of functions funcs = {'mean':np.mean,'median':np.median,'min':np.min,'max':np.max} # calculate percentiles (returns 3 values for RGB pictures or vectors, 1 for grayscale images) if image.shape[1] == 3: pMin,pMax = np.percentile(image,(minPercent, maxPercent),axis=0) else: pMin,pMax = np.percentile(image,(minPercent, maxPercent),axis=(0,1)) # Apply normalisation if not perChannel: # finds new min and max using selected function applied to all channels newMin = funcs[funcName](pMin) newMax = funcs[funcName](pMax) auto = exposure.rescale_intensity(image,in_range=(newMin,newMax)) else: # applies a rescale on each channel separately r_channel = exposure.rescale_intensity(image[:,:,0], in_range=(pMin[0],pMax[0])) g_channel = exposure.rescale_intensity(image[:,:,1], in_range=(pMin[1],pMax[1])) b_channel = exposure.rescale_intensity(image[:,:,2], in_range=(pMin[2],pMax[2])) auto = np.stack((r_channel,g_channel,b_channel),axis=2) return auto
def edge(): #plt.switch_backend('MacOSX') image = io.imread(path + "bibme0.png") print type(image) print image.shape # edge_roberts = roberts(image) # edge_sobel = sobel(image) fig = plt.figure(figsize=(14, 7)) ax_each = fig.add_subplot(121, adjustable='box-forced') ax_hsv = fig.add_subplot(122, sharex=ax_each, sharey=ax_each, adjustable='box-forced') # We use 1 - sobel_each(image) # but this will not work if image is not normalized ax_each.imshow(rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray) #ax_each.imshow(sobel_each(image)) ax_each.set_xticks([]), ax_each.set_yticks([]) ax_each.set_title("Sobel filter computed\n on individual RGB channels") # We use 1 - sobel_hsv(image) but this will not work if image is not normalized ax_hsv.imshow(rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray) ax_hsv.set_xticks([]), ax_hsv.set_yticks([]) ax_hsv.set_title("Sobel filter computed\n on (V)alue converted image (HSV)") fig.savefig(out_path + 'sobel_gray.png') plt.show()
def handle(self, *args, **options): # vars experiment_name = options['expt'] series_name = options['series'] t = options['t'] if experiment_name!='' and series_name!='': experiment = Experiment.objects.get(name=experiment_name) series = experiment.series.get(name=series_name) # select composite composite = series.composites.get() zmean = exposure.rescale_intensity(composite.gons.get(channel__name='-zmean', t=t).load() * 1.0) zmod = exposure.rescale_intensity(composite.gons.get(channel__name='-zmod', t=t).load() * 1.0) zdiff = np.zeros(zmean.shape) for unique in np.unique(zmod): print(unique, len(np.unique(zmod))) zdiff[zmod==unique] = np.mean(zmean[zmod==unique]) / np.sum(zmean) plt.imshow(zdiff, cmap='Greys_r') plt.show() # imsave('zdiff.tiff', zdiff) else: print('Please enter an experiment')
def mod_zedge(composite, mod_id, algorithm, **kwargs): zedge_channel, zedge_channel_created = composite.channels.get_or_create(name="-zedge") for t in range(composite.series.ts): print("step02 | processing mod_zedge t{}/{}...".format(t + 1, composite.series.ts), end="\r") zdiff_mask = composite.masks.get(channel__name__contains=kwargs["channel_unique_override"], t=t).load() zbf = exposure.rescale_intensity(composite.gons.get(channel__name="-zbf", t=t).load() * 1.0) zedge = zbf.copy() binary_mask = zdiff_mask > 0 outside_edge = distance_transform_edt(dilate(edge_image(binary_mask), iterations=4)) outside_edge = 1.0 - exposure.rescale_intensity(outside_edge * 1.0) zedge *= outside_edge * outside_edge zedge_gon, zedge_gon_created = composite.gons.get_or_create( experiment=composite.experiment, series=composite.series, channel=zedge_channel, t=t ) zedge_gon.set_origin(0, 0, 0, t) zedge_gon.set_extent(composite.series.rs, composite.series.cs, 1) zedge_gon.array = zedge.copy() zedge_gon.save_array(composite.series.experiment.composite_path, composite.templates.get(name="source")) zedge_gon.save()
def _write_image(self, img_data, filename, img_format=None, dtype=None): """ Output image data to a file, in a given image format. Assumes that the output directory exists (must be checked before). @param img_data :: image data in the usual numpy representation @param filename :: file name, including directory and extension @param img_format :: image file format @param dtype :: can be used to force a pixel type, otherwise the type of the input data is used Returns:: name of the file saved """ if not img_format: img_format = self.default_out_format filename = filename + '.' + img_format if dtype and img_data.dtype != dtype: img_data = np.array(img_data, dtype=dtype) if img_format == 'tiff' and _USING_PLUGIN_TIFFFILE: img_data = exposure.rescale_intensity(img_data, out_range='uint16') skio.imsave(filename, img_data, plugin='tifffile') else: img_data = exposure.rescale_intensity(img_data, out_range='uint16') skio.imsave(filename, img_data) return filename
def rgb2he2(img): # This implementation follows http://web.hku.hk/~ccsigma/color-deconv/color-deconv.html assert (img.ndim == 3) assert (img.shape[2] == 3) height, width, _ = img.shape img = -np.log((img + 1.0) / img.max()) # the following lines are replaced with the final result, # to speed up computations # # he = np.array([0.550, 0.758, 0.351]); he /= norm(he) # eo = np.array([0.398, 0.634, 0.600]); eo /= norm(eo) # bg = np.array([0.754, 0.077, 0.652]); bg /= norm(bg) # # M = np.hstack((he.reshape(3,1), eo.reshape(3,1), bg.reshape(3,1))) # D = alg.inv(M) # D = np.array([[ 1.92129515, 1.00941672, -2.34107612], [-2.34500192, 0.47155124, 2.65616872], [ 1.21495282, -0.99544467, 0.2459345 ]]) rgb = img.swapaxes(2, 0).reshape((3, height*width)) heb = np.dot(D, rgb) res_img = heb.reshape((3, width, height)).swapaxes(0, 2) return rescale_intensity(res_img[:,:,0], out_range=(0,1)), \ rescale_intensity(res_img[:,:,1], out_range=(0,1)), \ rescale_intensity(res_img[:,:,2], out_range=(0,1))
def juntarcanais(c1, c2): h = exposure.rescale_intensity(c1, out_range=(0, 1)) d = exposure.rescale_intensity(c2, out_range=(0, 1)) zdh = np.dstack((np.zeros_like(h), d, h)) return zdh
def handle(self, *args, **options): # vars experiment_name = options['expt'] series_name = options['series'] t = options['t'] R = 1 delta_z = -8 # sigma = 5 if experiment_name!='' and series_name!='': experiment = Experiment.objects.get(name=experiment_name) series = experiment.series.get(name=series_name) # select composite composite = series.composites.get() # load gfp gfp_gon = composite.gons.get(t=t, channel__name='0') gfp_start = exposure.rescale_intensity(gfp_gon.load() * 1.0) print('loaded gfp...') # load bf bf_gon = composite.gons.get(t=t, channel__name='1') bf = exposure.rescale_intensity(bf_gon.load() * 1.0) print('loaded bf...') for sigma in [0, 5, 10, 20]: gfp = gf(gfp_start, sigma=sigma) # <<< SMOOTHING for level in range(gfp.shape[2]): print('level {} {}...'.format(R, level)) gfp[:,:,level] = convolve(gfp[:,:,level], np.ones((R,R))) # initialise images Z = np.zeros(composite.series.shape(d=2), dtype=int) Zmean = np.zeros(composite.series.shape(d=2)) Zbf = np.zeros(composite.series.shape(d=2)) Z = np.argmax(gfp, axis=2) + delta_z # outliers Z[Z<0] = 0 Z[Z>composite.series.zs-1] = composite.series.zs-1 for level in range(bf.shape[2]): print('level {}...'.format(level)) bf_level = bf[:,:,level] Zbf[Z==level] = bf_level[Z==level] Zmean = 1 - np.mean(gfp, axis=2) / np.max(gfp, axis=2) imsave('zbf_R-{}_sigma-{}_delta_z{}.png'.format(R, sigma, delta_z), Zbf) # plt.imshow(Zbf, cmap='Greys_r') # plt.show() else: print('Please enter an experiment')
def equalize_adapthist(image, ntiles_x=8, ntiles_y=8, clip_limit=0.01, nbins=256): """Contrast Limited Adaptive Histogram Equalization. Parameters ---------- image : array-like Input image. ntiles_x : int, optional Number of tile regions in the X direction. Ranges between 2 and 16. ntiles_y : int, optional Number of tile regions in the Y direction. Ranges between 2 and 16. clip_limit : float: optional Clipping limit, normalized between 0 and 1 (higher values give more contrast). nbins : int, optional Number of gray bins for histogram ("dynamic range"). Returns ------- out : ndarray Equalized image. Notes ----- * The algorithm relies on an image whose rows and columns are even multiples of the number of tiles, so the extra rows and columns are left at their original values, thus preserving the input image shape. * For color images, the following steps are performed: - The image is converted to LAB color space - The CLAHE algorithm is run on the L channel - The image is converted back to RGB space and returned * For RGBA images, the original alpha channel is removed. References ---------- .. [1] http://tog.acm.org/resources/GraphicsGems/gems.html#gemsvi .. [2] https://en.wikipedia.org/wiki/CLAHE#CLAHE """ args = [None, ntiles_x, ntiles_y, clip_limit * nbins, nbins] if image.ndim > 2: lab_img = color.rgb2lab(skimage.img_as_float(image)) l_chan = lab_img[:, :, 0] l_chan /= np.max(np.abs(l_chan)) l_chan = skimage.img_as_uint(l_chan) args[0] = rescale_intensity(l_chan, out_range=(0, NR_OF_GREY - 1)) new_l = _clahe(*args).astype(float) new_l = rescale_intensity(new_l, out_range=(0, 100)) lab_img[:new_l.shape[0], :new_l.shape[1], 0] = new_l image = color.lab2rgb(lab_img) image = rescale_intensity(image, out_range=(0, 1)) else: image = skimage.img_as_uint(image) args[0] = rescale_intensity(image, out_range=(0, NR_OF_GREY - 1)) out = _clahe(*args) image[:out.shape[0], :out.shape[1]] = out image = rescale_intensity(image) return image
def _color_correction(self, band, band_id, low, coverage): self.output("Color correcting band %s" % band_id, normal=True, color='green', indent=1) p_low, cloud_cut_low = self._percent_cut(band, low, 100 - (coverage * 3 / 4)) temp = numpy.zeros(numpy.shape(band), dtype=numpy.uint16) cloud_divide = 65000 - coverage * 100 mask = numpy.logical_and(band < cloud_cut_low, band > 0) temp[mask] = rescale_intensity(band[mask], in_range=(p_low, cloud_cut_low), out_range=(256, cloud_divide)) temp[band >= cloud_cut_low] = rescale_intensity(band[band >= cloud_cut_low], out_range=(cloud_divide, 65535)) return temp
def equalize_adapthist(image, ntiles_x=8, ntiles_y=8, clip_limit=0.01, nbins=256): args = [None, ntiles_x, ntiles_y, clip_limit * nbins, nbins] image = skimage.img_as_uint(image) args[0] = rescale_intensity(image, out_range=(0, NR_OF_GREY - 1)) out = _clahe(*args) image[:out.shape[0], :out.shape[1]] = out image = rescale_intensity(image) return image
def equalize_adapthist(image, ntiles_x=8, ntiles_y=8, clip_limit=0.01, nbins=256): """Contrast Limited Adaptive Histogram Equalization (CLAHE). An algorithm for local contrast enhancement, that uses histograms computed over different tile regions of the image. Local details can therefore be enhanced even in regions that are darker or lighter than most of the image. Parameters ---------- image : array-like Input image. ntiles_x : int, optional Number of tile regions in the X direction. Ranges between 1 and 16. ntiles_y : int, optional Number of tile regions in the Y direction. Ranges between 1 and 16. clip_limit : float: optional Clipping limit, normalized between 0 and 1 (higher values give more contrast). nbins : int, optional Number of gray bins for histogram ("dynamic range"). Returns ------- out : ndarray Equalized image. See Also -------- equalize_hist, rescale_intensity Notes ----- * For color images, the following steps are performed: - The image is converted to HSV color space - The CLAHE algorithm is run on the V (Value) channel - The image is converted back to RGB space and returned * For RGBA images, the original alpha channel is removed. * The CLAHE algorithm relies on image blocks of equal size. This may result in extra border pixels that would not be handled. In that case, we pad the image with a repeat of the border pixels, apply the algorithm, and then trim the image to original size. References ---------- .. [1] http://tog.acm.org/resources/GraphicsGems/gems.html#gemsvi .. [2] https://en.wikipedia.org/wiki/CLAHE#CLAHE """ image = skimage.img_as_uint(image) image = rescale_intensity(image, out_range=(0, NR_OF_GREY - 1)) out = _clahe(image, ntiles_x, ntiles_y, clip_limit * nbins, nbins) image[:out.shape[0], :out.shape[1]] = out image = skimage.img_as_float(image) return rescale_intensity(image)
def _get_scalebar(self): """Get the length in pixels of the image scale bar""" box=(0,419,519,520) #row where scalebar exists im=self.crop_image(box=box, copy=True) im=skimage.img_as_float(im) im=exposure.rescale_intensity(im,in_range=(0.49,0.5)) #saturate black and white pixels im=exposure.rescale_intensity(im) #make sure they're black and white im=np.diff(im[0]) #1d numpy array, differences lim=[np.where(im>0.9)[0][0], np.where(im<-0.9)[0][0]] #first occurance of both cases assert len(lim)==2, 'Couldn\'t find scalebar' return lim[1]-lim[0]
def watershed(image): """ the watershed algorithm """ if len(image.shape) != 2: raise TypeError("The input image must be gray-scale ") h, w = image.shape image = cv2.equalizeHist(image) image = denoise_bilateral(image, sigma_range=0.1, sigma_spatial=10) image = rescale_intensity(image) image = img_as_ubyte(image) image = rescale_intensity(image) # com.debug_im(image) _, thres = cv2.threshold(image, 80, 255, cv2.THRESH_BINARY_INV) distance = ndi.distance_transform_edt(thres) local_maxi = peak_local_max(distance, indices=False, labels=thres, min_distance=5) # com.debug_im(thres) # implt = plt.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest') # plt.show() markers = ndi.label(local_maxi, np.ones((3, 3)))[0] labels = ws(-distance, markers, mask=thres) labels = np.uint8(labels) # result = np.round(255.0 / np.amax(labels) * labels).astype(np.uint8) # com.debug_im(result) segments = [] for idx in range(1, np.amax(labels) + 1): indices = np.where(labels == idx) left = np.amin(indices[1]) right = np.amax(indices[1]) top = np.amin(indices[0]) down = np.amax(indices[0]) # region = labels[top:down, left:right] # m = (region > 0) & (region != idx) # region[m] = 0 # region[region >= 1] = 1 region = image[top:down, left:right] cont = Contour(mask=region) cont.lt = [left, top] cont.rb = [right, down] segments.append(cont) return segments
def rescale_img(image, **kwargs): """ Rescale image values between minimum and maximum cut levels to values between 0 and 1, inclusive. Parameters ---------- image : array_like The 2D array of the image. {cutlevel_params} Returns ------- out : tuple Returns a tuple containing (``outimg``, ``min_cut``, ``max_cut``), which are the output scaled image and the minimum and maximum cut levels. """ from skimage import exposure image = image.astype(np.float64) min_cut, max_cut = find_imgcuts(image, **kwargs) outimg = exposure.rescale_intensity(image, in_range=(min_cut, max_cut), out_range=(0, 1)) return outimg, min_cut, max_cut
def saveimage_16bit(image, fname='Test.tif', folder=None, rescale=True, dtype=np.uint16, imager=None): ''' Saves an images as a 16 bit tiff ''' # rotate the reverse direction image = tf.rotate(image, -1 * _imager_rot[imager]) # if scaled to 0,1 then rescale back to 16 bit if rescale: # print 'rescaled' image = rescale_intensity( image, in_range=(0, 1), out_range=(0, 2**16)) # Ensureing all the values are integers image = image.astype(dtype) folder = folder or '' image = io.imsave( os.path.join(folder, fname), image)
def main(): args = vars(parser.parse_args()) filename = os.path.join(os.getcwd(), args["image"][0]) image = skimage.img_as_uint(color.rgb2gray(io.imread(filename))) subsample = 1 if (not args["subsample"] == 1): subsample = args["subsample"][0] image = transform.downscale_local_mean(image, (subsample, subsample)) image = transform.pyramid_expand(image, subsample, 0, 0) image = exposure.rescale_intensity(image, out_range=(0,args["depth"][0])) if (args["visualize"]): io.imshow(image) io.show() source = generate_face(image, subsample, args["depth"][0], FLICKER_SPEED) if source: with open(args["output"][0], 'w') as file_: file_.write(source) else: print "Attempted to generate source code, failed."
def mpl_image_to_rgba(mpl_image): """Return RGB image from the given matplotlib image object. Each image in a matplotlib figure has its own colormap and normalization function. Return RGBA (RGB + alpha channel) image with float dtype. Parameters ---------- mpl_image : matplotlib.image.AxesImage object The image being converted. Returns ------- img : array of float, shape (M, N, 4) An image of float values in [0, 1]. """ image = mpl_image.get_array() if image.ndim == 2: input_range = (mpl_image.norm.vmin, mpl_image.norm.vmax) image = rescale_intensity(image, in_range=input_range) # cmap complains on bool arrays image = mpl_image.cmap(img_as_float(image)) elif image.ndim == 3 and image.shape[2] == 3: # add alpha channel if it's missing image = np.dstack((image, np.ones_like(image))) return img_as_float(image)
def proc_mbi(imgarray): # Normalize image: img = img_as_float(imgarray,force_copy=True) # Image equalization (Contrast stretching): p2,p98 = np.percentile(img, (2,98)) img = exposure.rescale_intensity(img, in_range=(p2, p98), out_range=(0, 1)) # Gamma correction: #img = exposure.adjust_gamma(img, 0.5) # Or Sigmoid correction: img = exposure.adjust_sigmoid(img) print "Init Morph Proc..." sizes = range(2,40,5) angles = [0,18,36,54,72,90,108,126,144,162] szimg = img.shape all_thr = np.zeros((len(sizes),szimg[0], szimg[1])).astype('float64') all_dmp = np.zeros((len(sizes) - 1,szimg[0], szimg[1])).astype('float64') idx = 0 for sz in sizes: print sz builds_by_size = np.zeros(szimg).astype('float64') for ang in angles: print ang stel = ia870.iaseline(sz, ang) oprec = opening_by_reconstruction(img, stel) thr = np.absolute(img-oprec) builds_by_size += thr all_thr[idx,:,:] = (builds_by_size / len(angles)) if idx>0: all_dmp[idx-1,:,:] = all_thr[idx,:,:] - all_thr[idx-1,:,:] idx += 1 mbi = np.mean(all_dmp, axis=0) return mbi
def plot_aop_rgb(rgbArray,ext,ls_pct=5,plot_title=''): ''' read in and plot 3 bands of a reflectance array as an RGB image -------- Parameters -------- rgbArray: ndarray (m x n x 3) 3-band array of reflectance values, created from stack_rgb ext: tuple Extent of reflectance data to be plotted (xMin, xMax, yMin, yMax) Stored in metadata['spatial extent'] from aop_h5refl2array function ls_pct: integer or float, optional linear stretch percent plot_title: string, optional image title Returns -------- plots RGB image of 3 bands of reflectance data -------- Examples: -------- >>> plot_aop_rgb(SERCrgb, sercMetadata['spatial extent'], plot_title = 'SERC RGB')''' pLow, pHigh = np.percentile(rgbArray[~np.isnan(rgbArray)], (ls_pct,100-ls_pct)) img_rescale = exposure.rescale_intensity(rgbArray, in_range=(pLow,pHigh)) plt.imshow(img_rescale,extent=ext) plt.title(plot_title + '\n Linear ' + str(ls_pct) + '% Contrast Stretch'); ax = plt.gca(); ax.ticklabel_format(useOffset=False, style='plain') rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90)
def remove_background(self): L_b = 3 self.i_original = self.i_original[self.sub[1]:self.sub[3],self.sub[0]:self.sub[2],] segments = slic(self.i_original, n_segments=2, compactness=0.1,enforce_connectivity=False) # segments += 1 temp = self.i_original if sum(sum(segments[:5,:5])) > 10: for ii in range(0,3): temp[:,:,ii] = (np.ones([self.i_original.shape[0],self.i_original.shape[1]])-segments)*self.i_original[:,:,ii] #Haut, Bas, Droite, Gauche temp[:L_b,:,ii] = 0 temp[self.i_original.shape[0]-L_b-1:self.i_original.shape[0]-1,:,ii]=0 temp[:,self.i_original.shape[1]-L_b-1:self.i_original.shape[1]-1,ii] = 0 temp[:,:L_b,ii] = 0 else: for ii in range(0,3): temp[:,:,ii] = segments*self.i_original[:,:,ii] # print "else" #Haut, Bas, Droite, Gauche temp[:L_b,:,ii] = 0 temp[self.i_original.shape[0]-L_b-1:self.i_original.shape[0]-1,:,ii]=0 temp[:,self.i_original.shape[1]-L_b-1:self.i_original.shape[1]-1,ii] = 0 temp[:,:L_b,ii] = 0 # pdb.set_trace() fig, ax = plt.subplots(1, 1) ax.imshow(mark_boundaries(self.i_original,segments)) ax.imshow(temp) plt.show() p2, p98 = np.percentile(temp, (2, 98)) temp = exposure.rescale_intensity(temp, in_range=(p2, p98)) return temp
def _preprocess(self, frame, stretch_intensity=True, blur=1, denoise=0): """ 1. convert frame to grayscale 2. remove noise from frame. increase denoise value for more noise filtering 3. stretch contrast """ if len(frame.shape) != 2: frm = grayspace(frame) else: frm = frame / self.pixel_depth * 255 frm = frm.astype('uint8') # self.preprocessed_frame = frame # if denoise: # frm = self._denoise(frm, weight=denoise) # print 'gray', frm.shape if blur: frm = gaussian(frm, blur) * 255 frm = frm.astype('uint8') # frm1 = gaussian(self.preprocessed_frame, blur, # multichannel=True) * 255 # self.preprocessed_frame = frm1.astype('uint8') if stretch_intensity: frm = rescale_intensity(frm) # frm = self._contrast_equalization(frm) # self.preprocessed_frame = self._contrast_equalization(self.preprocessed_frame) return frm
def Image_ws_tranche(image): laser = Detect_laser(image) laser_tranche = tranche_image(laser,60) image_g = skimage.color.rgb2gray(image) image_g = image_g * laser_tranche image_med = rank2.median((image_g*255).astype('uint8'),disk(8)) image_clahe = exposure.equalize_adapthist(image_med, clip_limit=0.03) image_clahe_stretch = exposure.rescale_intensity(image_clahe, out_range=(0, 256)) image_grad = rank2.gradient(image_clahe_stretch,disk(3)) image_grad_mark = image_grad<20 image_grad_forws = rank2.gradient(image_clahe_stretch,disk(1)) image_grad_mark_closed = closing(image_grad_mark,disk(1)) Labelised = (skimage.measure.label(image_grad_mark_closed,8,0))+1 Watersheded = watershed(image_grad_forws,Labelised) cooc = coocurence_liste(Watersheded,laser,3) x,y = compte_occurences(cooc) return x,y
def segmenter_data_transform(imb, rotate=None, normalize_pctwise=False): if isinstance(imb, tuple) and len(imb) == 2: imgs,labels = imb else: imgs = imb # rotate image if training if rotate is not None: for i in xrange(imgs.shape[0]): degrees = float(np.random.randint(rotate[0], rotate[1])) if \ isinstance(rotate, tuple) else rotate imgs[i,0] = scipy.misc.imrotate(imgs[i,0], degrees, interp='bilinear') if isinstance(imb, tuple): labels[i,0] = scipy.misc.imrotate(labels[i,0], degrees, interp='bilinear') # assume they are square sz = c.fcn_img_size x,y = np.random.randint(0,imgs.shape[2]-sz,2) if imgs.shape[2] > sz else (0,0) imgs = nn.utils.floatX(imgs[:,:, x:x+sz, y:y+sz])/255. if not normalize_pctwise: pad = imgs.shape[2] // 5 cut = imgs[:,0,pad:-pad,pad:-pad] mu = cut.mean(axis=(1,2)).reshape(imgs.shape[0],1,1,1) sigma = cut.std(axis=(1,2)).reshape(imgs.shape[0],1,1,1) imgs = (imgs - mu) / sigma imgs = np.minimum(3, np.maximum(-3, imgs)) else: pclow, pchigh = normalize_pctwise if isinstance(normalize_pctwise, tuple) else (20,70) for i in xrange(imgs.shape[0]): pl,ph = np.percentile(imgs[i],(pclow, pchigh)) imgs[i] = exposure.rescale_intensity(imgs[i], in_range=(pl, ph)); imgs[i] = 2*imgs[i]/imgs[i].max() - 1. # or other rescaling here to approximate ~ N(0,1) if isinstance(imb, tuple): labels = nn.utils.floatX(labels[:,:, x:x+sz, y:y+sz]) return imgs, labels return imgs
def rescale_nuclei(self): '''Rescale nuclei in the set''' if self.number_of_cells() == 0: return new_values = [] for cur_cell in self.cells: nucleus_values = np.extract(cur_cell.nucleus, cur_cell.pic_nucleus) mean_value = np.mean(nucleus_values, dtype = float) new_values.append(nucleus_values/mean_value) cur_cell.nucleus_mean_value = mean_value p2,p98 = np.percentile(np.concatenate(new_values),(2,98)) for cur_cell in self.cells: rescaled_norm_pic = rescale_intensity(cur_cell.pic_nucleus/cur_cell.nucleus_mean_value, in_range=(p2, p98)) cur_cell.rescaled_nucleus_pic = np.floor(rescaled_norm_pic*200).astype(np.uint8)
def get_overlay(fifo): # get the whole FIFO ir_raw = fifo.read() # trim to 128 bytes ir_trimmed = ir_raw[0:128] # go all numpy on it ir = np.frombuffer(ir_trimmed, np.uint16) # set the array shape to the sensor shape (16x4) ir = ir.reshape((16, 4))[::-1, ::-1] ir = img_as_float(ir) # stretch contrast on our heat map p2, p98 = np.percentile(ir, (2, 98)) ir = exposure.rescale_intensity(ir, in_range=(p2, p98)) # increase even further? (optional) # ir = exposure.equalize_hist(ir) # turn our array into pretty colors cmap = plt.get_cmap('spectral') rgba_img = cmap(ir) rgb_img = np.delete(rgba_img, 3, 2) # align the IR array with the camera tform = transform.AffineTransform( scale=SCALE, rotation=ROT, translation=OFFSET) ir_aligned = transform.warp( rgb_img, tform.inverse, mode='constant', output_shape=im.shape) # turn it back into a ubyte so it'll display on the preview overlay ir_byte = img_as_ubyte(ir_aligned) # return buffer return np.getbuffer(ir_byte)
def findSources(image): """Return sources sorted by brightness. """ img1 = image.copy() src_mask = makeSourcesMask(img1) img1[~src_mask] = img1[src_mask].min() img1 = exposure.rescale_intensity(img1) img1[~src_mask] = 0. img1.set_fill_value(0.) def obj_params_with_offset(img, labels, aslice, label_idx): y_offset = aslice[0].start x_offset = aslice[1].start thumb = img[aslice] lb = labels[aslice] yc, xc = ndimage.center_of_mass(thumb, labels=lb, index=label_idx) br = thumb[lb == label_idx].sum() #the intensity of the source return [br, xc + x_offset, yc + y_offset] srcs_labels, num_srcs = ndimage.label(img1) if num_srcs < 10: print("WARNING: Only %d sources found." % (num_srcs)) #Eliminate here all 1 pixel sources all_objects = [[ind + 1, aslice] for ind, aslice in enumerate(ndimage.find_objects(srcs_labels)) if srcs_labels[aslice].shape != (1,1)] lum = np.array([obj_params_with_offset(img1, srcs_labels, aslice, lab_idx) for lab_idx, aslice in all_objects]) lum = lum[lum[:,0].argsort()[::-1]] #sort by brightness highest to smallest return lum[:,1:]
def embed(self, img, payload, k = 6, tv_denoising_weight = 4, rescale = True): if len(payload) > self.max_payload: raise ValueError("payload too long") padded = bytearray(payload) + b"\x00" * (self.max_payload - len(payload)) encoded = self.rscodec.encode(padded) if img.ndim == 2: output = self._embed(img, encoded, k) elif img.ndim == 3: output = numpy.zeros(img.shape, dtype=float) for i in range(img.shape[2]): output[:,:,i] = self._embed(img[:,:,i], encoded, k) #y, cb, cr = rgb_to_ycbcr(img) #y2 = self._embed(y, encoded, k) #cb = self._embed(cb, encoded, k) #cr = self._embed(cr, encoded, k) #y2 = rescale_intensity(y2, out_range = (numpy.min(y), numpy.max(y))) #Cb2 = rescale_intensity(Cb2, out_range = (numpy.min(Cb), numpy.max(Cb))) #Cr2 = rescale_intensity(Cr2, out_range = (numpy.min(Cr), numpy.max(Cr))) #output = ycbcr_to_rgb(y2, cb, cr) else: raise TypeError("img must be a 2d or 3d array") #if tv_denoising_weight > 0: # output = tv_denoise(output, tv_denoising_weight) if rescale: output = rescale_intensity(output, out_range = (numpy.min(img), numpy.max(img))) #return toimage(output,cmin=0,cmax=255) return output
def print_hog_image(image): """ image is expected to be in it's original format function prints hog image """ print image.shape image = color.rgb2gray(image) fd, hog_image = hog(image, orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1), visualise=True, normalise=True) print "finished hog..." fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True) ax1.axis('off') ax1.imshow(image, cmap=plt.cm.gray) ax1.set_title('Input image') ax1.set_adjustable('box-forced') # Rescale histogram for better display hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) ax2.axis('off') ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray) ax2.set_title('Histogram of Oriented Gradients') ax1.set_adjustable('box-forced') plt.show()
def warp_rect(self, u_cont): pts = u_cont.reshape(4, 2) rect = np.zeros((4, 2), dtype="float32") s = pts.sum(axis=1) rect[0] = pts[np.argmin(s)] rect[2] = pts[np.argmax(s)] diff = np.diff(pts, axis=1) rect[1] = pts[np.argmin(diff)] rect[3] = pts[np.argmax(diff)] rect *= self.ratio (tl, tr, br, bl) = rect width_a = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) width_b = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) height_a = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) height_b = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) max_w = max(int(width_a), int(width_b)) max_h = max(int(height_a), int(height_b)) dst = np.array([ [0, 0], [max_w - 1, 0], [max_w - 1, max_h - 1], [0, max_h - 1]], dtype="float32") m = cv2.getPerspectiveTransform(rect, dst) warp = cv2.warpPerspective(self.orig, m, (max_w, max_h)) warp = exposure.rescale_intensity(warp, out_range=(0, 255)) bop = 15 light = 15 return cv2.copyMakeBorder(warp, bop, bop, light, light, cv2.BORDER_CONSTANT, (255, 255, 0))
images = glob.glob('Al_SiC_1D_0_*[0-4].edf') light = fabio.open('Al_SiC_1D_0_0005.edf') bg = light.data dark = fabio.open('Al_SiC_1D_0_0006.edf') darkfield = dark.data base = np.empty([5682, 1780]) I = 0 for i in images: edf = fabio.open(str(i)) img = edf.data new = np.divide(img, bg, dtype=np.float32) normalised = exposure.rescale_intensity(new, (0.05, 1.7)) final = normalised[365:1795, 390:2170] plt.figure(figsize=(8, 4)) # plt.imshow(final) # plt.imsave('Al_SiC_1D_0_000'+str(I)+'.jpg', new) x = 0 while x < final.shape[0]: base[x + I, :] = final[x, :] x = x + 1 I = I + 1063 plt.imshow(base)
from skimage import data from skimage.exposure import rescale_intensity import matplotlib.pyplot as plt import cv2 img = cv2.imread("E:\Kuliah\semes 7\PCD\TUGAS\default.jpg", 0) cv2.imshow('image', img) # hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # cv2.imshow('image',hsv) cv2.waitKey(0) cv2.destroyAllWindows() fig, (ax_each, ax_hsv) = plt.subplots(ncols=2, figsize=(14, 7)) # We use 1 - sobel_each(image) but this won't work if image is not normalized ax_each.imshow(rescale_intensity(1 - sobel_each(image))) ax_each.set_xticks([]), ax_each.set_yticks([]) ax_each.set_title("Sobel filter computed\n on individual RGB channels") # We use 1 - sobel_hsv(image) but this won't work if image is not normalized ax_hsv.imshow(rescale_intensity(1 - sobel_hsv(image))) ax_hsv.set_xticks([]), ax_hsv.set_yticks([]) ax_hsv.set_title("Sobel filter computed\n on (V)alue converted image (HSV)") ###################################################################### # Notice that the result for the value-filtered image preserves the color of # the original image, but channel filtered image combines in a more # surprising way. In other common cases, smoothing for example, the channel # filtered image will produce a better result than the value-filtered image. # # You can also create your own handler functions for ``adapt_rgb``. To do so,
def test_rescale_in_range(): image = np.array([51., 102., 153.]) out = exposure.rescale_intensity(image, in_range=(0, 255)) assert_close(out, [0.2, 0.4, 0.6])
def test_rescale_stretch(): image = np.array([51, 102, 153], dtype=np.uint8) out = exposure.rescale_intensity(image) assert out.dtype == np.uint8 assert_close(out, [0, 127, 255])
hsi_o = np.stack((h, s, i_s), axis=2) result = matplotlib.colors.hsv_to_rgb(hsi_o) result = result * 255 result[result > 255] = 255 result[result < 0] = 0 return picture # Load an example image #imagename=sys.argv[1] img = cv.imread('problem2_2.bmp') # Contrast stretching p2, p98 = np.percentile(img, (2, 98)) img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98)) # Equalization img_eq = dhe(img) # Specification img_specification = hist_specification(img) # Display results fig = plt.figure(figsize=(8, 5)) axes = np.zeros((2, 4), dtype=np.object) axes[0, 0] = fig.add_subplot(2, 4, 1) for i in range(1, 4): axes[0, i] = fig.add_subplot(2, 4, 1 + i,
def test_rescale_uint14_limits(): image = np.array([0, uint16_max], dtype=np.uint16) out = exposure.rescale_intensity(image, out_range='uint14') assert_close(out, [0, uint14_max])
def test_rescale_named_out_range(): image = np.array([0, uint16_max], dtype=np.uint16) out = exposure.rescale_intensity(image, out_range='uint10') assert_close(out, [0, uint10_max])
def test_rescale_out_range(): image = np.array([-10, 0, 10], dtype=np.int8) out = exposure.rescale_intensity(image, out_range=(0, 127)) assert out.dtype == np.int8 assert_close(out, [0, 63, 127])
def test_rescale_in_range_clip(): image = np.array([51., 102., 153.]) out = exposure.rescale_intensity(image, in_range=(0, 102)) assert_close(out, [0.5, 1, 1])
def plot_rgb(arr, rgb=(0, 1, 2), ax=None, extent=None, title="", figsize=(10, 10), stretch=None, str_clip=2): """Plot three bands in a numpy array as a composite RGB image. Parameters ---------- arr: numpy array An n dimension numpy array in rasterio band order (bands, x, y) rgb: list Indices of the three bands to be plotted (default = 0,1,2) extent: tuple The extent object that matplotlib expects (left, right, bottom, top) title: string (optional) String representing the title of the plot ax: object The axes object where the ax element should be plotted. Default = none figsize: tuple (optional) The x and y integer dimensions of the output plot if preferred to set. stretch: Boolean If True a linear stretch will be applied str_clip: int (optional) The % of clip to apply to the stretch. Default = 2 (2 and 98) Returns ---------- fig, ax : figure object, axes object The figure and axes object associated with the 3 band image. If the ax keyword is specified, the figure return will be None. """ if len(arr.shape) != 3: raise Exception("""Input needs to be 3 dimensions and in rasterio order with bands first""") # Index bands for plotting and clean up data for matplotlib rgb_bands = arr[rgb] if stretch: s_min = str_clip s_max = 100 - str_clip arr_rescaled = np.zeros_like(rgb_bands) for ii, band in enumerate(rgb_bands): lower, upper = np.percentile(band, (s_min, s_max)) arr_rescaled[ii] = exposure.rescale_intensity(band, in_range=(lower, upper)) rgb_bands = arr_rescaled.copy() # If type is masked array - add alpha channel for plotting if ma.is_masked(rgb_bands): # Build alpha channel mask = ~(np.ma.getmask(rgb_bands[0])) * 255 # Add the mask to the array & swap the axes order from (bands, # rows, columns) to (rows, columns, bands) for plotting rgb_bands = np.vstack((bytescale(rgb_bands), np.expand_dims(mask, axis=0))).\ transpose([1, 2, 0]) else: # Index bands for plotting and clean up data for matplotlib rgb_bands = bytescale(rgb_bands).transpose([1, 2, 0]) # Then plot. Define ax if it's default to none if ax is None: fig, ax = plt.subplots(figsize=figsize) else: fig = None ax.imshow(rgb_bands, extent=extent) ax.set_title(title) ax.set(xticks=[], yticks=[]) return fig, ax
im = np.array([-128, -1], dtype=np.int8) frequencies, bin_centers = exposure.histogram(im) assert_array_equal(bin_centers, np.arange(-128, 0)) assert frequencies[0] == 1 assert frequencies[-1] == 1 assert_array_equal(frequencies[1:-1], 0) # Test histogram equalization # =========================== np.random.seed(0) # squeeze image intensities to lower image contrast test_img = skimage.img_as_float(data.camera()) test_img = exposure.rescale_intensity(test_img / 5. + 100) def test_equalize_ubyte(): img = skimage.img_as_ubyte(test_img) img_eq = exposure.equalize_hist(img) cdf, bin_edges = exposure.cumulative_distribution(img_eq) check_cdf_slope(cdf) def test_equalize_float(): img = skimage.img_as_float(test_img) img_eq = exposure.equalize_hist(img) cdf, bin_edges = exposure.cumulative_distribution(img_eq)
def test_rescale_shrink(): image = np.array([51., 102., 153.]) out = exposure.rescale_intensity(image) assert_close(out, [0, 0.5, 1])
result = rmlp(bl, T=1 / 255., r=4, K=11) # calculate measures if ground truth exists if gt is not None: for i, bb in enumerate(bl): v_mse = np.linalg.norm(bb - gt) v_ssim = ssim(bb, gt, data_range=(bb.max() - bb.min())) logger.info("blurred {}".format(i)) logger.info(".. MSE = {:.4f}, SSIM = {:.4f}".format(v_mse, v_ssim)) v_mse = np.linalg.norm(result - gt) v_ssim = ssim(result, gt, data_range=(result.max() - result.min())) logger.info("result") logger.info(".. MSE = {:.4f}, SSIM = {:.4f}".format(v_mse, v_ssim)) else: logger.info("no ground truth for quality evaluation") return result if __name__ == '__main__': root = "data/mt" try: result = demo(root) # convert to uint8 for preview result = rescale_intensity(result, out_range=(0, 2**8 - 1)) result = result.astype(np.uint8) imageio.imwrite(os.path.join(root, "result.png"), result) except Exception as e: logger.error(traceback.format_exc())
from skimage import exposure color_image = data.hubble_deep_field() # for illustration purposes, we work on a crop of the image. x_0 = 70 y_0 = 354 width = 100 height = 100 img = color.rgb2grey(color_image)[y_0:(y_0 + height), x_0:(x_0 + width)] # the rescaling is done only for visualization purpose. # the algorithms would work identically in an unscaled version of the # image. However, the parameter h needs to be adapted to the scale. img = exposure.rescale_intensity(img) ############################################################## # MAXIMA DETECTION # Maxima in the galaxy image are detected by mathematical morphology. # There is no a priori constraint on the density. # We find all local maxima local_maxima = extrema.local_maxima(img) label_maxima = label(local_maxima) overlay = color.label2rgb(label_maxima, img, alpha=0.7, bg_label=0, bg_color=None,
def scale_intensity(data, out_min=0, out_max=255): """Scale intensity of data in a range defined by [out_min, out_max], based on the 2nd and 98th percentiles.""" p2, p98 = np.percentile(data, (2, 98)) return rescale_intensity(data, in_range=(p2, p98), out_range=(out_min, out_max))
import matplotlib.pyplot as plt from skimage.feature import hog from skimage import data, color, exposure image = color.rgb2gray(data.astronaut()) fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualise=True) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) print hog_image ax1.axis('off') ax1.imshow(image, cmap=plt.cm.gray) ax1.set_title('Input image') # Rescale histogram for better display hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) ax2.axis('off') ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray) ax2.set_title('Histogram of Oriented Gradients') plt.show()
plot_matches) from skimage.transform import warp, AffineTransform from skimage.exposure import rescale_intensity from skimage.color import rgb2gray from skimage.measure import ransac # generate synthetic checkerboard image and add gradient for the later matching checkerboard = img_as_float(data.checkerboard()) img_orig = np.zeros(list(checkerboard.shape) + [3]) # depth of 3 img_orig[..., 0] = checkerboard # first layer is checkboard # create a gradient lighting on the image gradient_r, gradient_c = (np.mgrid[0:img_orig.shape[0], 0:img_orig.shape[1]] / float(img_orig.shape[0])) img_orig[..., 1] = gradient_r img_orig[..., 2] = gradient_c img_orig = rescale_intensity(img_orig) img_orig_gray = rgb2gray(img_orig) # warp synthetic image tform = AffineTransform(scale=(0.9, 0.9), rotation=0.2, translation=(20, -10)) img_warped = warp(img_orig, tform.inverse, output_shape=(200, 200)) img_warped_gray = rgb2gray(img_warped) # extract corners using Harris' corner measure coords_orig = corner_peaks(corner_harris(img_orig_gray), threshold_rel=0.001, min_distance=5) coords_warped = corner_peaks(corner_harris(img_warped_gray), threshold_rel=0.001, min_distance=5)
bbox = plate['bbox'] reg = plate['reg'] xran = (bbox[0], bbox[0]+bbox[2]) yran = (bbox[1], bbox[1]+bbox[3]) print count, actualFina, xran, yran bbox, bestInd, bestAngle = deskew.Deskew(im, (xran, yran)) rotIm = deskew.RotateAndCrop(im, (xran, yran), bestAngle) #misc.imsave("rotIm{0}.png".format(count), rotIm) imScore = RgbToPlateBackgroundScore(rotIm) #normContrast = exposure.equalize_hist(imScore) normContrast = exposure.rescale_intensity(imScore) #normContrast = exposure.equalize_adapthist(imScore) thresh = 0.6 * (normContrast.min() + normContrast.max()) #normContrast = (normContrast > 0.5) #print normContrast.min(), normContrast.max() misc.imsave("{0}.png".format(outRootFina), normContrast) pickle.dump((bbox, bestAngle), open("{0}.deskew".format(outRootFina), "wb"), protocol=-1) if 0: import matplotlib.pyplot as plt dat = normContrast.reshape((normContrast.size,)) plt.subplot(3,1,1) ims = plt.imshow(normContrast)
def plot_visualization(vid, vid_orig, vis_dir, plot_img=False, write_video=True): def crop_center(img, cropx, cropy): y, x, _ = img.shape startx = x // 2 - (cropx // 2) starty = y // 2 - (cropy // 2) return img[starty:starty + cropy, startx:startx + cropx, :] def plot_img_and_hist(image, axes, bins=256): #Plot an image along with its histogram and cumulative histogram. image = img_as_float(image) ax_img, ax_hist = axes ax_cdf = ax_hist.twinx() # Display image ax_img.imshow(image, cmap=plt.cm.gray) ax_img.set_axis_off() ax_img.set_adjustable('box-forced') # Display histogram ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black') ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0)) ax_hist.set_xlabel('Pixel intensity') ax_hist.set_xlim(0, 1) ax_hist.set_yticks([]) # Display cumulative distribution img_cdf, bins = exposure.cumulative_distribution(image, bins) ax_cdf.plot(bins, img_cdf, 'r') ax_cdf.set_yticks([]) return ax_img, ax_hist, ax_cdf def PCA(data): m, n = data.shape[0], data.shape[1] #print(m, n) mean = np.mean(data, axis=0) data -= np.tile(mean, (m, 1)) # calculate the covariance matrix cov = np.matmul(np.transpose(data), data) evals, evecs = np.linalg.eigh(cov) # sort eigenvalue in decreasing order idx = np.argsort(evals)[::-1] evecs = evecs[:, idx] evals = evals[idx] #print(evals) evecs = evecs[:, 0] return np.matmul(data, evecs), evals[0] / sum(evals) width, height = 112, 112 video_histeq = [] for i in range(vid.shape[0]): frame = crop_center(vid[i], 112, 112) frame = np.reshape(frame, (112 * 112, 3)) frame, K = PCA(frame) frame = np.reshape(frame, (112, 112)) max, min = np.max(frame), np.min(frame) frame = ((frame - min) / (max - min) * 255).astype('uint8') # Contrast stretching p2, p98 = np.percentile(frame, (2, 98)) img_rescale = exposure.rescale_intensity(frame, in_range=(p2, p98)) # Equalization img_eq = exposure.equalize_hist(frame) video_histeq.append(img_eq) # # Adaptive Equalization # img_adapteq = exposure.equalize_adapthist(frame, clip_limit=0.03) if plot_img: # Display results fig = plt.figure(figsize=(12, 16)) axes = np.zeros((4, 3), dtype=np.object) axes[0, 0] = fig.add_subplot(4, 3, 1) for j in range(1, 3): axes[0, j] = fig.add_subplot(4, 3, 1 + j, sharex=axes[0, 0], sharey=axes[0, 0]) for j in range(3, 12): axes[j // 3, j % 3] = fig.add_subplot(4, 3, 1 + j) ax_img, ax_hist, ax_cdf = plot_img_and_hist(frame, axes[0:2, 0]) ax_img.set_title('PCA on 3 channels ({:.4f})'.format(K)) y_min, y_max = ax_hist.get_ylim() ax_hist.set_ylabel('Number of pixels') ax_hist.set_yticks(np.linspace(0, y_max, 5)) ax_img, ax_hist, ax_cdf = plot_img_and_hist( img_rescale, axes[0:2, 1]) ax_img.set_title('Contrast stretching') ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[0:2, 2]) ax_img.set_title('Histogram equalization') #ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[0:1, 3]) #ax_img.set_title('Adaptive equalization') ax_cdf.set_ylabel('Fraction of total intensity') ax_cdf.set_yticks(np.linspace(0, 1, 5)) print(vid_orig[j].shape) frame_downsample_crop = crop_center(vid_orig[j], 112, 112) frame = crop_center(vid[j], 112, 112) axes[2, 0].imshow(frame_downsample_crop.astype('uint8')) axes[2, 0].set_title('Dowmsampled') frame_scaled_joint = linear_scaling( frame, vid, video_linear_scaling=True, joint_channels=True).astype('uint8') axes[2, 1].imshow(frame_scaled_joint.astype('uint8')) axes[2, 1].set_title('Joint Scaling') frame_scaled_separate = linear_scaling( frame, vid, video_linear_scaling=True, joint_channels=False).astype('uint8') axes[2, 2].imshow(frame_scaled_separate.astype('uint8')) axes[2, 2].set_title('Separate Scaling') for j in range(frame.shape[2]): axes[3, j].imshow(frame[:, :, j], cmap=plt.get_cmap('jet')) axes[3, j].set_title('Channel{}'.format(j)) # prevent overlap of y-axis labels fig.tight_layout() plt.savefig('{}/vis_{}.png'.format(vis_dir, i)) plt.close() if write_video: fourcc = cv2.VideoWriter_fourcc(*'XVID') # Be sure to use lower case output = "{}/hist_eq.avi".format(vis_dir) out = cv2.VideoWriter(output, fourcc, 10.0, (width, height), False) vid = np.multiply(np.asarray(video_histeq), 255).astype('uint8') print(vid.shape) print(output) for i in range(vid.shape[0]): frame = vid[i] #frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) #frame = frame.reshape(112, 112, 3) # print(frame) out.write(frame) out.release() cv2.destroyAllWindows()
def contrastStretching(image): # Contrast stretching p2, p98 = np.percentile(image, (25, 90)) img_rescale = exposure.rescale_intensity(image, in_range=(p2, p98)) return img_rescale
ax[2].imshow(p8 >= t_loc_otsu, cmap=plt.cm.gray) ax[2].set_title('Original >= local Otsu' % t_glob_otsu) ax[3].imshow(glob_otsu, cmap=plt.cm.gray) ax[3].set_title('Global Otsu ($t=%d$)' % t_glob_otsu) for a in ax: a.axis('off') plt.tight_layout() ###################################################################### # The example below performs the same comparison, using a 3D image this time. brain = exposure.rescale_intensity(data.brain().astype(float)) radius = 5 neighborhood = ball(radius) # t_loc_otsu is an image t_loc_otsu = rank.otsu(brain, neighborhood) loc_otsu = brain >= t_loc_otsu # t_glob_otsu is a scalar t_glob_otsu = threshold_otsu(brain) glob_otsu = brain >= t_glob_otsu fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12),
if len(img.shape) == 3: plt.imshow(img, cmap='gray') else: # len(img.shape) == 2 plt.imshow(gray2rgb(img), cmap='gray') plt.title(title) # Histograma plt.subplot(num_plot + 2) plt.hist(img_as_float(img).ravel(), bins=bins) plt.xlim(0, 1) def file_read(): file_path = filedialog.askopenfilename() return file_path try: # Busco imagen y obtengo su ruta root = tk.Tk() root.withdraw() img = imread(file_read()) out_range = (0, 50) img_shrink = rescale_intensity(img, out_range=out_range).astype(np.uint8) plot_img_hist(img, 221, 'Imagen original') plot_img_hist(img_shrink, 222, 'Imagen shrinking', bins=out_range[1]-out_range[0]) plt.show() except: print('Cerraste la ventana!') print('👋🏽') root.destroy()
###################################################################### # We can use these functions as we would normally use them, but now they work # with both gray-scale and color images. Let's plot the results with a color # image: from skimage import data from skimage.exposure import rescale_intensity import matplotlib.pyplot as plt image = data.astronaut() # 导入宇航员数据 fig, (ax_each, ax_hsv) = plt.subplots(ncols=2, figsize=(14, 7)) # 申明PLOT # We use 1 - sobel_each(image) but this won't work if image is not normalized ax_each.imshow(rescale_intensity( 1 - sobel_each(image))) # sobel运算,并把结果归一化后分布到0-255 SOBEL计算3次 ax_each.set_xticks([]), ax_each.set_yticks([]) ax_each.set_title("Sobel filter computed\n on individual RGB channels") # We use 1 - sobel_hsv(image) but this won't work if image is not normalized ax_hsv.imshow(rescale_intensity(1 - sobel_hsv(image))) # 只计算V ax_hsv.set_xticks([]), ax_hsv.set_yticks([]) ax_hsv.set_title("Sobel filter computed\n on (V)alue converted image (HSV)") ###################################################################### # Notice that the result for the value-filtered image preserves the color of # the original image, but channel filtered image combines in a more # surprising way. In other common cases, smoothing for example, the channel # filtered image will produce a better result than the value-filtered image. # # You can also create your own handler functions for ``adapt_rgb``. To do so,
def pad_img_and_add_interp_down_channel(array, downsample_axis = 'x', downsample_ratio = [1,2], shape=[128,128]): ''' This function takes in an image and outputs a three channel image, where the first channel is the fully-sampled image, the second sample is a downsampled version of this image, and the third channel contains ''' if len(array.shape) != 0: if len(shape)>2: shape = shape[0:2] array = exposure.rescale_intensity(array, in_range='image', out_range=(0.0,1.0)) mask = np.ones(array.shape) #print(full_image.shape) if downsample_ratio[0] == 0: downsample_ratio[0] = 1 elif downsample_ratio[1] == 0: downsample_ratio[1] = 1 if downsample_axis == 'x': latent_image = np.zeros((array.shape[0], int(np.ceil(array.shape[1]/downsample_ratio[1])))) downsample_ratio = downsample_ratio[1] j_count = 0 for j in range(array.shape[1]): if j%downsample_ratio==0: latent_image[:, j_count] = array[:, j] j_count += 1 else: mask[:,j] = 0 elif downsample_axis == 'y': latent_image = np.zeros((int(np.ceil(array.shape[0]/downsample_ratio[0])), array.shape[1])) downsample_ratio = downsample_ratio[0] i_count = 0 for i in range(array.shape[0]): if i%downsample_ratio==0: latent_image[i_count, :] = array[i, :] i_count += 1 else: mask[i,:] = 0 elif downsample_axis == 'both': latent_image = np.zeros((int(np.ceil(array.shape[0]/downsample_ratio[0])), int(np.ceil(array.shape[1]/downsample_ratio[1])))) mask = np.zeros(array.shape) i_count = 0 for i in range(0, array.shape[0], downsample_ratio[0]): j_count = 0 for j in range(0, array.shape[1], downsample_ratio[1]): latent_image[i_count, j_count] = array[i, j] mask[i,j] = 1 if j%downsample_ratio[1]==0: j_count += 1 if i%downsample_ratio[0]==0: i_count += 1 down_image = skimage.transform.resize(latent_image, output_shape=array.shape, order=3, mode='reflect', cval=0, clip=True, preserve_range=True, anti_aliasing=True, anti_aliasing_sigma=None) full_i_shape = array.shape[0] full_j_shape = array.shape[1] if full_i_shape%shape[0] != 0: i_left = full_i_shape%shape[0] i_pad = (shape[0] - i_left)//2 rest_i = (shape[0] - i_left)%2 else: i_left = 0 i_pad = 0 rest_i = 0 if full_j_shape%shape[1] != 0: j_left = full_j_shape%shape[1] j_pad = (shape[1] - j_left)//2 rest_j = (shape[1] - j_left)%2 else: j_left = 0 j_pad = 0 rest_j = 0 #print('i_left = '+str(i_left)) #print('j_left = '+str(j_left)) #print('i_pad = '+str(i_pad)) #print('j_pad = '+str(j_pad)) #print('rest_i = '+str(rest_i)) #print('rest_j = '+str(rest_j)) full_image = np.zeros((full_i_shape, full_j_shape, 3), dtype = np.float32) full_image[...,0] = array # Target Array full_image[...,1] = down_image # Downsampled Array full_image[...,2] = mask # Mask Array - for display pad_image = np.pad(full_image, [(i_pad, ), (j_pad, ), (0,)], mode='constant', constant_values = 0) padded_multi_chan_image = np.pad(pad_image, [(0, rest_i), (0, rest_j), (0, 0)], mode='constant', constant_values = 0) else: padded_multi_chan_image = np.array(0) return padded_multi_chan_image
def intensity(image_array: ndarray): v_min, v_max = np.percentile(image_array, (0.2, 99.8)) better_contrast = exposure.rescale_intensity(image_array, in_range=(v_min, v_max)) return better_contrast
board = np.zeros((9, 9), dtype="int") stepX = warped.shape[1] // 9 stepY = warped.shape[0] // 9 cellLocs = [] for y in range(0, 9): row = [] for x in range(0, 9): startX = x * stepX startY = y * stepY endX = (x + 1) * stepX endY = (y + 1) * stepY row.append((startX, startY, endX, endY)) cell = warped[startY:endY, startX:endX] cell = exposure.rescale_intensity(cell, out_range=(0, 255)) cell = cell.astype("uint8") thresh = cv2.threshold(cell, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] thresh = clear_border(thresh) digit = extract_digit(thresh, ticked) if digit != ".": roi = cv2.resize(digit, (28, 28)) roi = roi.astype("float") / 255.0 roi = img_to_array(roi) roi = np.expand_dims(roi, axis=0) pred = model.predict(roi).argmax(axis=1)[0] #print(pred) board[y, x] = pred cellLocs.append(row)
def equalize_image(image): image = exposure.rescale_intensity(image, in_range=(0, 255)) return exposure.equalize_adapthist(image)
def plot_visualization_frame(eval_vis_dir, frame, name): def crop_center(img, cropx, cropy): y, x, _ = img.shape startx = x // 2 - (cropx // 2) starty = y // 2 - (cropy // 2) return img[starty:starty + cropy, startx:startx + cropx, :] def plot_img_and_hist(image, axes, bins=256): #Plot an image along with its histogram and cumulative histogram. image = img_as_float(image) ax_img, ax_hist = axes ax_cdf = ax_hist.twinx() # Display image ax_img.imshow(image, cmap=plt.cm.gray) ax_img.set_axis_off() ax_img.set_adjustable('box-forced') # Display histogram ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black') ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0)) ax_hist.set_xlabel('Pixel intensity') ax_hist.set_xlim(0, 1) ax_hist.set_yticks([]) # Display cumulative distribution img_cdf, bins = exposure.cumulative_distribution(image, bins) ax_cdf.plot(bins, img_cdf, 'r') ax_cdf.set_yticks([]) return ax_img, ax_hist, ax_cdf frame = crop_center(frame, 112, 112) frame = frame[:, :, 0] max, min = np.max(frame), np.min(frame) frame = ((frame - min) / (max - min) * 255).astype('uint8') # Contrast stretching p2, p98 = np.percentile(frame, (2, 98)) img_rescale = exposure.rescale_intensity(frame, in_range=(p2, p98)) # Equalization img_eq = exposure.equalize_hist(frame) # Display results fig = plt.figure(figsize=(12, 8)) axes = np.zeros((2, 3), dtype=np.object) axes[0, 0] = fig.add_subplot(2, 3, 1) for i in range(1, 3): axes[0, i] = fig.add_subplot(2, 3, 1 + i, sharex=axes[0, 0], sharey=axes[0, 0]) for i in range(3, 6): axes[i // 3, i % 3] = fig.add_subplot(2, 3, 1 + i) ax_img, ax_hist, ax_cdf = plot_img_and_hist(frame, axes[0:2, 0]) ax_img.set_title('Feature map') y_min, y_max = ax_hist.get_ylim() ax_hist.set_ylabel('Number of pixels') ax_hist.set_yticks(np.linspace(0, y_max, 5)) ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[0:2, 1]) ax_img.set_title('Contrast stretching') ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[0:2, 2]) ax_img.set_title('Histogram equalization') ax_cdf.set_ylabel('Fraction of total intensity') ax_cdf.set_yticks(np.linspace(0, 1, 5)) fig.tight_layout() plt.savefig(os.path.join(eval_vis_dir, '{}.png'.format(name))) plt.close()
def pad_img_and_add_down_channel(array, downsample_axis = 'x', downsample_ratio = [1,2], shape=[128,128], gauss_blur_std = None): ''' This function takes in an image and outputs a three channel image, where the first channel is the fully-sampled image, the second sample is a downsampled version of this image, and the third channel contains the downsampling binary mask ''' if len(array.shape) != 0: if len(shape)>2: shape = shape[0:2] array = exposure.rescale_intensity(array, in_range='image', out_range=(0.0,1.0)) down_image = np.array(array, dtype = np.float32) mask = np.ones(array.shape) #print(full_image.shape) if downsample_ratio[0] == 0: downsample_ratio[0] = 1 if downsample_ratio[1] == 0: downsample_ratio[1] = 1 if downsample_axis == 'x': downsample_ratio = downsample_ratio[1] for j in range(array.shape[1]): if j%downsample_ratio!=0: mask[:, j] = 0 elif downsample_axis == 'y': downsample_ratio = downsample_ratio[0] for i in range(array.shape[0]): if i%downsampling_ratio[1]!=0: mask[i, :] = 0 elif downsample_axis == 'both': downsample_ratio_j = downsample_ratio[1] downsample_ratio_i = downsample_ratio[0] if downsample_ratio_j > 0: for j in range(array.shape[1]): if j%downsample_ratio[1]!=0: mask[:, j] = 0 if downsample_ratio_i > 0: for i in range(array.shape[0]): if i%downsample_ratio[0]!=0: mask[i, :] = 0 down_image = np.multiply(mask, down_image) full_i_shape = array.shape[0] full_j_shape = array.shape[1] if full_i_shape%shape[0] != 0: i_left = full_i_shape%shape[0] i_pad = (shape[0] - i_left)//2 rest_i = (shape[0] - i_left)%2 else: i_left = 0 i_pad = 0 rest_i = 0 if full_j_shape%shape[1] != 0: j_left = full_j_shape%shape[1] j_pad = (shape[1] - j_left)//2 rest_j = (shape[1] - j_left)%2 else: j_left = 0 j_pad = 0 rest_j = 0 #print('i_left = '+str(i_left)) #print('j_left = '+str(j_left)) #print('i_pad = '+str(i_pad)) #print('j_pad = '+str(j_pad)) #print('rest_i = '+str(rest_i)) #print('rest_j = '+str(rest_j)) if gauss_blur_std is not None: down_image = scipy.ndimage.gaussian_filter(down_image, sigma=gauss_blur_std, order=0, output=None, mode='reflect', cval=0.0, truncate=6.0) full_image = np.zeros((full_i_shape, full_j_shape, 3), dtype = np.float32) full_image[...,0] = array # Target Array full_image[...,1] = down_image # Downsampled Array full_image[...,2] = mask # Mask Array - for display pad_image = np.pad(full_image, [(i_pad, ), (j_pad, ), (0,)], mode='constant', constant_values = 0) padded_multi_chan_image = np.pad(pad_image, [(0, rest_i), (0, rest_j), (0, 0)], mode='constant', constant_values = 0) else: padded_multi_chan_image = np.array(0) return padded_multi_chan_image