def fit(self, x, y, sort=True): if sort: i = np.argsort(x) x, y = x[i], y[i] self.x = x self.y = y x, y = dedup_np(x, y) self.ll = order_filter(y, self.w, self.l) self.uu = order_filter(y, self.w, self.u) self.L = beta_spline(x, self.ll, self.n) self.U = beta_spline(x, self.uu, self.n)
def mov_order(x, order = 'med', windsize=3, lag='lagged'): #if windsize is even raise ValueError if lag == 'lagged': lead = windsize//2 elif lag == 'centered': lead = 0 elif lag == 'leading': lead = -windsize//2 +1 else: raise ValueError if np.isfinite(order) == True: #if np.isnumber(order): ord = order # note: ord is a builtin function elif order == 'med': ord = (windsize - 1)/2 elif order == 'min': ord = 0 elif order == 'max': ord = windsize - 1 else: raise ValueError #return signal.order_filter(x,np.ones(windsize),ord)[:-lead] xext = expandarr(x, windsize) #np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]] return signal.order_filter(xext,np.ones(windsize),ord)[windsize-lead:-(windsize+lead)]
def load_imgs(file_paths, resize=0.5, normalize=False, filter_rank=0): slice_ = (slice(0, 112), slice(0, 92)) h_slice, w_slice = slice_ h = (h_slice.stop - h_slice.start) // (h_slice.step or 1) w = (w_slice.stop - w_slice.start) // (w_slice.step or 1) if resize is not None: resize = float(resize) h = int(resize * h) w = int(resize * w) n_faces = len(file_paths) faces = np.zeros((n_faces, h, w), dtype=np.float32) # iterate over the collected file path to load the jpeg files as numpy # arrays for i, file_path in enumerate(file_paths): img = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE) face = np.asarray(img[slice_], dtype=np.float32) if normalize: face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats if resize is not None: face = cv2.resize(face, (46, 56), interpolation=cv2.INTER_CUBIC) if filter_rank is not None: from scipy import signal # 滤波器 face = signal.order_filter(face, np.ones((3, 3)), rank=filter_rank) faces[i, ...] = face return faces
def _normalize(img): M = np.max(img) # 4-neighborhood # kernel = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] # 8-neighborhood kernel = np.ones((3, 3), dtype=np.int) filtered = order_filter(img, kernel, np.sum(kernel) - 1) m = np.mean(img[np.equal(np.equal(img, filtered), filtered != M)]) return img * ((M - m)**2)
def isflipped(img): '''check whether a (THRESHOLDED) image is flipped. In other words, do the lines run from x=0... or y=0.... Note that an image that I define as "not flipped" for these purposes will by default appear flipped in matplotlib. Note also, that if the image is full of pictures, there is no guarantee that this method will work. What is for sure is that if the image is flipped improperly, later clusterings will most likely fail to identify blocks of text. input: A thresholded image that will be shrunk and checked for line order. output: True if words run in the y-direction. ''' imsml = greater(1- np.min(img[::5,::5],2),.5) dims = shape(imsml) smx = ss.order_filter(imsml, ones((dims[0]/72*2 + 1,1)), dims[0]/36 -1) smy = ss.order_filter(imsml, ones((1,dims[1]/72*2 + 1 )), dims[1]/36 -1) clx = cluster_img(smx) cly = cluster_img(smy) if len(clx)> len(cly): return True else: return False
def fit(self, x, y, sort=True): """Fit the model. Args: x (np.array): The control variable. y (np.array): The response variable. """ if sort: i = np.argsort(x) x, y = x[i], y[i] self.x = x self.y = y x, y = dedup_np(x, y) self.order = order_filter(y, self.w, self.i) self.spline = beta_spline(x, self.order, self.n)
def non_max_sup(acc): """Perform non-maximum suppresion with a 3-by-3 neighborhood """ # TODO: 5 marks. As before, if you use for loops you will get 3 marks # instead of five. Hint: use the order_filter that I already imported. A # local maximum is strictly larger than all of its neighborhood. #Implementation based off hint give in Lecutre 31 slides domain = np.ones((3,3)) nms_2largest = order_filter(acc, domain, 7) nms_map = np.subtract(acc,nms_2largest) #Binarize nms_map = np.where(nms_map > 0, 1, 0) return nms_map
def process(my_smooth=True, order_filt=True): img_res = 100 pdf_files = [f for f in os.listdir(att_dir) if '.pdf' or '.PDF' in f] for f in pdf_files: inp_f = os.path.join(att_dir, f) out_f = os.path.join(att_dir, os.path.splitext(f)[0] + '.png') cmd = 'convert -density {2} {0} {1}'.format(inp_f, out_f, img_res) out = subprocess.Popen(cmd, shell=True) out.communicate() #os.remove(inp_f) pngs = [ os.path.join(att_dir, f) for f in os.listdir(att_dir) if '.png' in f ] for p in pngs: img = mpimg.imread(p) blue = squeeze(img[:, :, 2]) others = np.sum(img[:, :, 0:2], 2) final = blue - others final[less(final, 0)] = 0. winsize = 5 wid = winsize / 2 funny = False circle_only = True if order_filt: order_mask = less(dist(winsize, c=True), wid) n = len(nonzero(order_mask)[0]) lowest = int(n * .3) convo = ss.order_filter(final, order_mask, lowest) convo = ss.medfilt(convo, 7) return final, convo, order_mask if circle_only: gaussian = 1. * less(dist(winsize, c=True), wid) elif funny: gaussian = exp(-1 * (dist(winsize) / wid)**2) else: gaussian = exp(-1 * (dist(20, c=True) / 20 / wid)**2) gaussian /= sum(gaussian) if my_smooth: convo = ss.convolve2d(final, gaussian) else: convo = blur_image(final, 20) return final, convo, gaussian
def process(my_smooth = True, order_filt = True): img_res = 100 pdf_files = [f for f in os.listdir(att_dir) if '.pdf' or '.PDF' in f] for f in pdf_files: inp_f = os.path.join(att_dir,f) out_f = os.path.join(att_dir,os.path.splitext(f)[0] + '.png') cmd = 'convert -density {2} {0} {1}'.format(inp_f, out_f, img_res) out = subprocess.Popen(cmd, shell = True) out.communicate() #os.remove(inp_f) pngs = [os.path.join(att_dir,f) for f in os.listdir(att_dir) if '.png' in f] for p in pngs: img = mpimg.imread(p) blue= squeeze(img[:,:,2]) others = np.sum(img[:,:,0:2],2) final = blue - others final[less(final,0)] = 0. winsize = 5 wid = winsize/2 funny = False circle_only = True if order_filt: order_mask = less(dist(winsize, c = True), wid) n = len(nonzero(order_mask)[0]) lowest =int( n * .3) convo = ss.order_filter(final, order_mask, lowest) convo = ss.medfilt(convo,7) return final, convo, order_mask if circle_only: gaussian = 1.* less(dist(winsize,c = True),wid) elif funny: gaussian = exp(-1 * (dist(winsize) /wid )**2) else: gaussian = exp(-1 * (dist(20, c = True)/20 /wid )**2) gaussian /= sum(gaussian) if my_smooth: convo = ss.convolve2d(final, gaussian) else: convo = blur_image(final, 20) return final, convo, gaussian
def movorder(x, order='med', windsize=3, lag='lagged'): '''moving order statistics Parameters ---------- x : array time series data order : float or 'med', 'min', 'max' which order statistic to calculate windsize : int window size lag : 'lagged', 'centered', or 'leading' location of window relative to current position Returns ------- filtered array ''' #if windsize is even should it raise ValueError if lag == 'lagged': lead = windsize // 2 elif lag == 'centered': lead = 0 elif lag == 'leading': lead = -windsize // 2 + 1 else: raise ValueError if np.isfinite(order): #if np.isnumber(order): ord = order # note: ord is a builtin function elif order == 'med': ord = (windsize - 1) / 2 elif order == 'min': ord = 0 elif order == 'max': ord = windsize - 1 else: raise ValueError #return signal.order_filter(x,np.ones(windsize),ord)[:-lead] xext = expandarr(x, windsize) #np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]] return signal.order_filter(xext, np.ones(windsize), ord)[windsize - lead:-(windsize + lead)]
def fit(self, x, y, sort=True): """Fit the model. Args: x (np.array): The control variable. y (np.array): The response variable. """ if sort: i = np.argsort(x) x, y = x[i], y[i] self.order = order_filter(y, self.w, self.i) self.interpo = interp1d(x[::self.k], self.order[::self.k], bounds_error=False, fill_value=0) self.x = x self.y = y
def movorder(x, order = 'med', windsize=3, lag='lagged'): '''moving order statistics Parameters ---------- x : array time series data order : float or 'med', 'min', 'max' which order statistic to calculate windsize : int window size lag : 'lagged', 'centered', or 'leading' location of window relative to current position Returns ------- filtered array ''' #if windsize is even should it raise ValueError if lag == 'lagged': lead = windsize//2 elif lag == 'centered': lead = 0 elif lag == 'leading': lead = -windsize//2 +1 else: raise ValueError if np.isfinite(order) == True: #if np.isnumber(order): ord = order # note: ord is a builtin function elif order == 'med': ord = (windsize - 1)/2 elif order == 'min': ord = 0 elif order == 'max': ord = windsize - 1 else: raise ValueError #return signal.order_filter(x,np.ones(windsize),ord)[:-lead] xext = expandarr(x, windsize) #np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]] return signal.order_filter(xext,np.ones(windsize),ord)[windsize-lead:-(windsize+lead)]
def Bfx_lbp(I, R=None, options={}): """ X, Xn, options = Bfx_lbp(I, R, options) Toolbox: Balu Local Binary Patterns features X is the features vector, Xn is the list of feature names (see Example to see how it works). It calculates the LBP over the a regular grid of patches. The function uses scikit-image's local_binary_pattern implementation (see http://scikit-image.org/docs/dev/api/skimage.feature.html#local-binary-pattern). It returns a matrix of uniform lbp82 descriptors for I, made by concatenating histograms of each grid cell in the image. Grid size is options['hdiv'] * options['vdiv'] R is a binary image or empty. If R is given the lbp will be computed the corresponding pixles R==0 in image I will be set to 0. options['mappingtype'] can have one of this options: {'nri_uniform', 'uniform', 'ror', 'default'}. If not options is provided 'nri_uniform' which produces histograms of 59 bins will be used. Output: X is a matrix of size ((hdiv*vdiv) x 59), each row has a histogram corresponding to a grid cell. We use 59 bins. options['x'] of size hdiv*vdiv is the x coordinates of center of ith grid cell options['y'] of size hdiv*vdiv is the y coordinates of center of ith grid cell Both coordinates are calculated as if image was a square of side length 1. References: Ojala, T.; Pietikainen, M. & Maenpaa, T. Multiresolution gray-scale and rotation invariant texture classification with local binary patterns. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2002, 24, 971-987. Mu, Y. et al (2008): Discriminative Local Binary Patterns for Human Detection in Personal Album. CVPR-2008. Example 1: import numpy as np from balu.ImagesAndData import balu_imageload from balu.FeatureExtraction import Bfx_lbp from balu.InputOutput import Bio_printfeatures from matplotlib.pyplot import bar, figure, show options = { 'weight': 0, # Weigth of the histogram bins 'vdiv': 3, # one vertical divition 'hdiv': 3, # one horizontal divition 'samples': 8, # number of neighbor samples 'mappingtype': 'nri_uniform' # uniform LBP } I = balu_imageload('testimg1.jpg') # input image J = I[119:219, 119:239, 1] # region of interest (green) figure(1) imshow(J, cmap='gray') # image to be analyzed X, Xn = Bfx_lbp(J, None, options) # LBP features figure(2); bar(np.arange(X.shape[1]), X[0, :]) Bio_printfeatures(X, Xn) show() Example 2: import numpy as np from balu.ImagesAndData import balu_imageload from balu.FeatureExtraction import Bfx_lbp from balu.InputOutput import Bio_printfeatures from matplotlib.pyplot import bar, figure, show options = { 'weight': 0, # Weigth of the histogram bins 'vdiv': 3, # one vertical divition 'hdiv': 3, # one horizontal divition 'samples': 8, # number of neighbor samples 'mappingtype': 'uniform' # uniform LBP } I = balu_imageload('testimg1.jpg') # input image J = I[119:219, 119:239, 1] # region of interest (green) figure(1) imshow(J, cmap='gray') # image to be analyzed X, Xn = Bfx_lbp(J, None, options) # LBP features figure(2); bar(np.arange(X.shape[1]), X[0, :]) Bio_printfeatures(X, Xn) show() See also Bfx_gabor, Bfx_clp, Bfx_fourier, Bfx_dct. (c) GRIMA-DCCUC, 2011 http://grima.ing.puc.cl With collaboration from: Diego Patiño ([email protected]) -> Translated implementation into python (2017) """ if R is None: R = np.ones(I.shape) if 'show' not in options: options['show'] = False if 'normalize' not in options: options['normalize'] = False if options['show']: print('--- extracting local binary patterns features...') if 'samples' not in options: options['samples'] = 8 if 'integral' not in options: options['integral'] = False if 'radius' not in options: options['radius'] = np.log(options['samples']) / np.log(2.0) - 1 if 'weight' not in options: options['weight'] = 0 LBPst = 'LBP' if 'mappingtype' not in options: options['mappingtype'] = 'nri_uniform' if options['mappingtype'] == 'ror': num_patterns = 256 elif options['mappingtype'] == 'uniform': num_patterns = 10 elif options['mappingtype'] == 'nri_uniform': num_patterns = 59 else: options['mappingtype'] = 'default' num_patterns = 256 st = '{0},{1}'.format(options['samples'], options['mappingtype']) # Get lbp image if R is not None: I[np.where(R == 0)] = 0 radius = options['radius'] P = options['samples'] LBP = local_binary_pattern(I, P=P, R=radius, method=options['mappingtype']) n1, n2 = LBP.shape options['Ilbp'] = LBP if options['integral']: options['Hx'] = Bim_inthist(LBP, num_patterns) vdiv = options['vdiv'] hdiv = options['hdiv'] modn1 = n1 % vdiv if modn1 != 0: LBP = np.concatenate((LBP.T, np.zeros((LBP.shape[1], vdiv - modn1))), 1).T I = np.concatenate((I.T, np.zeros((I.shape[1], vdiv - modn1))), 1).T modn2 = n2 % hdiv if modn2 != 0: LBP = np.concatenate((LBP, np.zeros((LBP.shape[0], hdiv - modn2))), 1) I = np.concatenate((I, np.zeros((I.shape[0], hdiv - modn2))), 1) n1, n2 = LBP.shape ylen = int(np.round(n1 / vdiv)) xlen = int(np.round(n2 / hdiv)) # split image into blocks (saved as columns) grid_img = view_as_blocks(LBP, block_shape=(ylen, xlen)) if options['weight'] > 0: LBPst = 'w' + LBPst mt = int(2 * radius - 1) mt2 = float(mt**2) Id = I.astype(int) weight = options['weight'] if weight == 1: W = np.abs( convolve2d(Id, np.ones((mt, mt)) / mt2, mode='same') - Id) elif weight == 2: W = (np.abs( convolve2d(Id, np.ones((mt, mt)) / mt2, mode='same') - Id)) / (Id + 1) elif weight == 3: W = np.abs(median(Id, square(mt)) - Id) elif weight == 4: W = np.abs(median(Id, square(mt)) - Id) / (Id + 1) elif weight == 5: W = np.abs(order_filter(Id, np.ones((mt, mt)), 0) - Id) elif weight == 6: W = np.abs(order_filter(Id, np.ones((mt, mt)), 0) - Id) / (Id + 1) elif weight == 7: Id = convolve2d(Id, np.ones((mt, mt)) / mt2, mode='same') W = np.abs(order_filter(Id, np.ones((mt, mt)), 0) - Id) / (Id + 1) elif weight == 8: Id = median(Id, square(mt)) W = np.abs(order_filter(Id, np.ones((mt, mt)), 0) - Id) / (Id + 1) elif weight == 9: Id = median(Id, square(mt)) W = np.abs(order_filter(Id, np.ones((mt, mt)), 1) - Id) / (Id + 1) else: print("Bfx_lbp does not recognize options['weight'] = {0}.".format( options['weight'])) grid_W = view_as_blocks(W, block_shape=(ylen, xlen)) num_rows_blocks, num_cols_blocks = grid_W.shape[0:2] desc = np.zeros((num_patterns, num_cols_blocks * num_rows_blocks)) p = 0 for br in range(num_rows_blocks): for bc in range(num_cols_blocks): x = grid_img[br, bc].astype(int).ravel() y = grid_W[br, bc].ravel() d = np.zeros(num_patterns) for k in range(ylen * xlen): d[x[k]] += y[k] desc[:, p] = d p += 1 else: desc = (np.histogram(grid_img.ravel(), num_patterns)[0])[None] # calculate coordinates of descriptors as if it was square w/ side=1 dx = 1.0 / float(hdiv) dy = 1.0 / float(vdiv) x = np.linspace(dx / 2.0, 1 - dx / 2.0, hdiv) y = np.linspace(dy / 2.0, 1 - dy / 2.0, vdiv) options['x'] = x options['y'] = y D = desc.T M, N = D.shape Xn = (N * M) * [None] X = np.zeros((1, N * M)) k = 0 for i in range(M): for j in range(N): Xn[k] = '{0}({1},{2})[{3}] '.format( LBPst, i, j, st) X[0, k] = D[i, j] k += 1 if options['normalize']: X = X / np.sum(X) return X, Xn
def maxfilter(x, size=5): return signal.order_filter(x, np.ones(size), size - 1)
def minfilter(x, size=5): return signal.order_filter(x, np.ones(size), 0)
def old_research(): mag = mag_from_file(filename) plt.gcf().canvas.set_window_title(filename + " mag") plt.plot(mag) plt.show() plt.clf() lowpass_mag = butter_lowpass_filter(mag, cutoff, fs, order) plt.gcf().canvas.set_window_title(filename + " lowpass filter mag") plt.plot(lowpass_mag) plt.show() plt.clf() sig_order = signal.order_filter(mag, domain, 0) moving_mag_data = moving_average(mag, 50) savgol_moving = savgol_filter(moving_mag_data, 29, 12) savgol = savgol_filter(mag, 5, 2) # plt.gcf().canvas.set_window_title(filename + " mag") # plt.plot(mag) # plt.show() # plt.clf() plt.gcf().canvas.set_window_title(filename + " sig order") plt.plot(sig_order) plt.show() plt.clf() plt.gcf().canvas.set_window_title(filename + " savgol moving") plt.plot(savgol_moving) # plt.show() plt.clf() for filename in glob.iglob('*.txt'): mag = mag_from_file(filename) plt.gcf().canvas.set_window_title(filename + " magnitude") plt.plot(mag) # plt.show() plt.clf() filename = '5shots2.txt' data = pd.read_csv(filename) x_data = get_data(data, 'x') y_data = get_data(data, 'y') z_data = get_data(data, 'z') mag52 = magnitude(x_data, y_data, z_data) # plt.plot(mag) # plt.gcf().canvas.set_window_title(filename + " magnitude") # plt.show() x_data = mag52 # x_data = x_data[0:1000] # plt.plot(x_data) # plt.gcf().canvas.set_window_title(filename + " raw data 3 shots") # plt.show() # standardized_data = preprocessing.scale(x_data) # plt.plot(standardized_data) # plt.gcf().canvas.set_window_title(filename + " standardized data 3 shots") # plt.show() # # Normalize # reshaped_x_data = x_data.reshape(-1, 1) # normalize_data = preprocessing.normalize(reshaped_x_data) # plt.plot(normalize_data) # plt.gcf().canvas.set_window_title(filename + " new normalized 3 shots") # plt.show() # plt.plot(x_data) # plt.gcf().canvas.set_window_title(filename + " raw data 3 shots") # plt.show() moving_x_data = moving_average(x_data, 50) plt.plot(moving_x_data) plt.gcf().canvas.set_window_title(filename + " moving average 3 shots")
def run(): for r,d,fs in os.walk(os.path.join(att_dir,'tmp')): null = [ os.remove(os.path.join(r,f)) for f in fs] attachments.check_att(att_dir) pdfs = dict([(os.path.join(att_dir,'tmp/tmp_{0:04d}.pdf'.format(idx)), \ os.path.join(att_dir,f)) for idx, f in enumerate(os.listdir(att_dir)) if '.pdf' in f.lower()]) for k,v in pdfs.iteritems(): os.rename(v,k) #Convert PDFs res = 300 cvsub = ['''convert -density {2} {0} {1}; rm {0}'''.\ format(p , p.replace('.pdf','.png'), res) for p in pdfs.keys()] for c in cvsub: print 'calling for ' + c subprocess.call(c,shell = True) #Find all files produced by convert inp_files=[ os.path.join(att_dir,'tmp',e) for e in it.chain(\ *[filter( lambda x: os.path.splitext(os.path.basename(key))[0]\ in x and True or False, os.listdir(os.path.join(att_dir,'tmp'))) for key in pdfs.keys()])] bluechannels = [] x_inches = .25 y_inches = .15 skip = 5 #open them and get the blue channels for i in inp_files: full = mpimg.imread(i) if isflipped(full): full = transpose(full,(1,0,2)) blue= squeeze(full[:,:,2]) others = np.sum(full[:,:,0:2],2) blue = blue - others blue[less(blue,0)] = 0. blue[greater(blue,0)] = 1. xrad = floor(res/skip*x_inches) yrad = floor(res/skip*y_inches) bluesmall = blue[::skip,::skip] bluesmall = ss.order_filter( bluesmall, ones((xrad,1)), xrad-1) bluesmall = ss.order_filter( bluesmall, ones((1,yrad)), yrad-1) #FLIP THE CLUSTERS JUST TO BE CONFUSING cls = cluster_img(bluesmall) cls = [cl.T * 5 for cl in cls] root = os.path.join(att_dir,'pages') if not os.path.isdir(root): os.mkdir(root) num_max = max(array([0] + list(it.chain(*[re.findall(re.compile('[\d]+'),f) for f in os.listdir(root)])),int)) this_folder = os.path.join(root,'page_{0:05d}'.format(num_max+1)) os.mkdir(this_folder) Image.fromarray(transpose(array(blue*255,dtype=np.uint8),(1,0))).save(open(os.path.join(this_folder,'blue.png'),'w')) Image.fromarray(transpose(array(full*255,dtype=np.uint8),(1,0,2))).save(open(os.path.join(this_folder,'full.png'),'w')) outline_folder = os.path.join(this_folder,'highlights') for idx,c in enumerate(cls): bounds =array([ np.min(c[0]),np.min(c[1]),np.max(c[0]),np.max(c[1])]) b0 = array(bounds) bounds =bounds + 100 *array( [-1,-1,1,1] ) clip_bounds(bounds, shape(blue.T)) subimg = full[bounds[0]:bounds[2], bounds[1]:bounds[3]] coords = { 'full_bounds':list(bounds), 'cluster_bounds':list(b0)} Image.fromarray(transpose(array(subimg*255,dtype=np.uint8),(1,0,2))).save(open(os.path.join(this_folder,'hl_{0:02d}.png').format(idx),'w')) fopen = open('hl_{0:02d}.txt'.format(idx), 'w') fopen.write(simplejson.dumps(coords)) fopen.close()
def speckle_centroid(frame, image=None, center=None): """ Function speckle_centroid estimates the centroid of a (usually) saturated image by finding local maxima in intensity and attempting to identify symmetric pairs of maxima. The function takes one argument: 1. The original filename. If no flux array is supplied, it will be read from this file. Optional arguments: 2. A 2D flux array to centroid. 3. The first guess for the centroid. Default is the center of the image. Function speckle_centroid returns the centroid as a list [yc, xc] """ if image is None: frame_dw = re.sub(".fits", "_dw.fits", frame) image = pyf.open(frame_dw)[0].data if center is None: center = [image.shape[0] // 2, image.shape[1] // 2] yc = center[0] + 0.0 xc = center[1] + 0.0 ################################################################# # Pull out the center of the image. This requires a good # initial guess. ################################################################# x = np.linspace(-60., 60, 601) + xc y = np.linspace(-60., 60, 601) + yc x, y = np.meshgrid(x, y) imcenter = ndimage.map_coordinates(image, [y, x], order=1) ################################################################# # Convolve with a 2-D Gaussian to look for local maxima. ################################################################# sig = 2 x2 = np.linspace(-2, 2, 21) y2 = np.linspace(-2, 2, 21) x2, y2 = np.meshgrid(x2, y2) r = np.sqrt(x2**2 + y2**2) psf = r < 1.8 speckles = signal.convolve(imcenter, psf, mode='same') ################################################################# # Pull out the coordinates of the local maxima. ################################################################# nmax = 9 domain = np.ones((nmax, nmax)) specklemax = signal.order_filter(speckles, domain, nmax**2 - 1) maxima = np.where(np.all([specklemax == speckles, specklemax > 0], axis=0)) x = np.reshape(x[maxima], (-1)) y = np.reshape(y[maxima], (-1)) newarr = np.zeros((2, x.shape[0])) newarr[0] = y newarr[1] = x ################################################################# # Minimize the squared offsets of the best-fit points. ################################################################# p0 = [yc, xc] p1, success = optimize.leastsq(chi2speckle, p0[:], args=(x, y)) return [p1[0], p1[1]]
# -*- coding: utf-8 -*- """ 演示中值滤波。 """ from scipy import signal import numpy as np import pylab as pl t = np.arange(0, 20, 0.1) x = np.sin(t) x[np.random.randint(0, len(t), 20)] += np.random.standard_normal(20) * 0.6 x2 = signal.medfilt(x, 5) pl.plot(t, x) pl.plot(t, x2 + 0.5) x3 = signal.order_filter(x, np.ones(5), 2) print((np.all(x2 == x3))) pl.show()
# -*- coding: utf-8 -*- """ 演示排序滤波。 """ from scipy import signal import numpy as np import pylab as pl t = np.arange(0, 20, 0.01) x = (np.sin(t) + 2)* np.sin(100*t) x2 = signal.order_filter(x, np.ones(11), 10) pl.plot(t, x) pl.plot(t, x2) pl.show()
nbi30L = MDSconn.get(r'_x=\D3D::TOP.NB.NB30L:PINJ_30L').data() nbi30L_t = MDSconn.get(r'dim_of(_x)').data() ##import IPython ##IPython.embed() shot, chord, dark_shot, type, channels, idx, white, t_start, t_integ, tg, data_grp, bg_grp, wl, comments, shot_time, timing_def, timing_mode, bg_mode, camera_type, gain, bandwidth, binning, raw, data = data if wl < 4050 or wl > 4100: continue t_start = t_start[tg <= data_grp.max()] t_integ = t_integ[tg <= data_grp.max()] time_vec = t_start + old_div(t_integ, 2.0) corrupted = data - order_filter(data, np.ones((1, 5)), 1) corrupted[:, 0] = 0 corrupted[:, -1] = 0 corrupted = corrupted > mquantiles(corrupted, 0.999) ''' # get background for it in np.arange(data.shape[0]): data[it,corrupted[it]] = np.interp(np.where(corrupted[it])[0],np.where(~corrupted[it])[0],data[it,~corrupted[it]]) bcg = mquantiles(data[it],.3) data[it] -= bcg ''' #import IPython #IPython.embed() # plot data matrix
# -*- coding: utf-8 -*- """ 演示排序滤波。 """ from scipy import signal import numpy as np import pylab as pl t = np.arange(0, 20, 0.01) x = (np.sin(t) + 2) * np.sin(100 * t) x2 = signal.order_filter(x, np.ones(11), 10) pl.plot(t, x) pl.plot(t, x2) pl.show()
# -*- coding: utf-8 -*- """ 演示中值滤波。 """ from scipy import signal import numpy as np import pylab as pl t = np.arange(0, 20, 0.1) x = np.sin(t) x[np.random.randint(0, len(t), 20)] += np.random.standard_normal(20)*0.6 x2 = signal.medfilt(x, 5) pl.plot(t, x) pl.plot(t,x2+0.5) x3 = signal.order_filter(x, np.ones(5), 2) print np.all(x2==x3) pl.show()
def test_basic(self): assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1), [2, 3, 2])
def test_basic(self): assert_array_equal(signal.order_filter([1,2,3],[1,0,1],1), [2,3,2])
print("-----------------( END (B) )-------------------") A[A < (v[mask1])[0]] = 0 A[A > (v[mask1])[-1]] = 0 print("------------( A )-------------------") print(A) print((v[mask1])[0], (v[mask1])[-1]) print(np.min(A), np.max(A)) print(A) print("-----------------( END (A) )-------------------") C = A + B matrix_3d = C.reshape([421, 512, 512]) domain = np.identity(3) test_2 = matrix_3d[420, :, :] signal.order_filter(test_2, domain, 0) test_3 = matrix_3d[:, 500, :] plt.imshow(test_2, cmap='gray') plt.show() data = np.zeros((421, 512)) for i in range(0, 512): a = matrix_3d[:, :, i] for j in range(0, 421): data[j][i] = np.sum(a[j, :]) data = np.rot90(np.rot90(np.rot90(data))) plt.imshow(data, cmap='gray') plt.show()