def make_disp_map(mode, screen_res, fig_width, fig_height, fig_no): if mode == 'diagonal': max_x = int(pl.floor((screen_res[0]-fig_width)/20)) max_y = int(pl.floor((screen_res[1]-fig_height)/20)) elif mode == 'mosaic': max_x = int(pl.floor(screen_res[0]/fig_width)) max_y = int(pl.floor(screen_res[1]/fig_height)) disp_map = [] #[(0, 60, fig_width, fig_height)] top_offset = 60 for y in range(0,max_y): if y == max_y-1: mx = max_x-1 else: mx = max_x for x in range(0,mx): if mode=='mosaic': disp_map.append((x*fig_width, top_offset+y*fig_height, fig_width, fig_height)) elif mode=='diagonal': disp_map.append((20*x*y, top_offset+15*y*x, fig_width, fig_height)) return disp_map[np.mod(fig_no, max_x*max_y-1)]
def jetWoGn(reverse=False): """ jetWoGn(reverse=False) - returning a colormap similar to cm.jet, but without green. if reverse=True, the map starts with red instead of blue. """ m=18 # magic number, which works fine m0=pl.floor(m*0.0) m1=pl.floor(m*0.2) m2=pl.floor(m*0.2) m3=pl.floor(m/2)-m2-m1 b_ = np.hstack( (0.4*np.arange(m1)/(m1-1.)+0.6, np.ones((m2+m3,)) ) ) g_ = np.hstack( (np.zeros((m1,)),np.arange(m2)/(m2-1.),np.ones((m3,))) ) r_ = np.hstack( (np.zeros((m1,)),np.zeros((m2,)),np.arange(m3)/(m3-1.))) r = np.hstack((r_,pl.flipud(b_))) g = np.hstack((g_,pl.flipud(g_))) b = np.hstack((b_,pl.flipud(r_))) if reverse: r = pl.flipud(r) g = pl.flipud(g) b = pl.flipud(b) ra = pl.linspace(0.0,1.0,m) cdict = {'red': zip(ra,r,r), 'green': zip(ra,g,g), 'blue': zip(ra,b,b)} return pl.matplotlib.colors.LinearSegmentedColormap('new_RdBl',cdict,256)
def _pvoc(self, X_hat, Phi_hat=None, R=None): """ :: a phase vocoder - time-stretch inputs: X_hat - estimate of signal magnitude [Phi_hat] - estimate of signal phase [R] - resynthesis hop ratio output: updates self.X_hat with modified complex spectrum """ N = self.nfft W = self.wfft H = self.nhop R = 1.0 if R is None else R dphi = (2*P.pi * H * P.arange(N/2+1)) / N print "Phase Vocoder Resynthesis...", N, W, H, R A = P.angle(self.STFT) if Phi_hat is None else Phi_hat phs = A[:,0] self.X_hat = [] n_cols = X_hat.shape[1] t = 0 while P.floor(t) < n_cols: tf = t - P.floor(t) idx = P.arange(2)+int(P.floor(t)) idx[1] = n_cols-1 if t >= n_cols-1 else idx[1] Xh = X_hat[:,idx] Xh = (1-tf)*Xh[:,0] + tf*Xh[:,1] self.X_hat.append(Xh*P.exp( 1j * phs)) U = A[:,idx[1]] - A[:,idx[0]] - dphi U = U - P.np.round(U/(2*P.pi))*2*P.pi phs += (U + dphi) t += P.randn()*P.sqrt(PVOC_VAR*R) + R # 10% variance self.X_hat = P.np.array(self.X_hat).T
def __init__(self, scanList, stateArray): if len(scanList)!=stateArray.shape[0]: raise Exception('number of scan and state should be the same') times = [scan.timestamp for scan in scanList] self.avgTime = times[int(pl.floor(len(times)/2))] #self.avgTime = pl.mean([scan.timestamp for scan in scanList]) #transform the 3d coordinates of each scan #and numpy.vstack all the output m*3 array together #find average Lidar frame avgBodyState = stateArray[int(pl.floor(len(stateArray)/2))] #avgBodyState=pl.mean(stateArray, 0) w_R_avg_L, w_T_avg_L = self._bodyState2LidarState(avgBodyState) self.avgLidarState = self._matrix2State(w_R_avg_L, w_T_avg_L) transform = self._transformPointsFromBodyToAvgLidar #from data points with transformation to avgState self.dataPoints = pl.vstack([transform(scan.dataArray, state, w_R_avg_L, w_T_avg_L) for scan, state in zip(scanList, stateArray) if scan.hasValidData()]) self.intensity = pl.vstack([scan.intensityArray for scan in scanList if scan.hasValidData()]).flatten() if self.dataPoints.shape[0]!=self.intensity.shape[0]: raise Exception('dist and intensity have different size')
def dia1P_generateEnsamble(L,dilution,nSamp,distType,distPar,fNameBase): """ Generates nSamp samples of LxL lattice with given dilution. The cluster sizes are generated according to the given distType: distType = 'P' : power law with exponent distPar distType = 'E' : exponential with exponent distPar The nodes are written to the file fNameBase.node The cluster size distribution is written to the file fNameBase.clusSize The cluster width distribution is written to the file fNameBase.clusWidth """ # Open files for writing nodeFile = open(fNameBase+'.node','w+') clusFile = open(fNameBase+'.clusSize','w+') widthFile = open(fNameBase+'.clusWidth','w+') # lists for storing the size and width distributions sizes = [] widths = [] nClus = 0 # Total number of clusters for sNum in range(nSamp): # Working on the sNum^th instance # Generate cluster sizes cSizes = [] if distType == 'P': # Find the number of variables to draw nVar = pylab.floor(L*L*dilution*scipy.special.zeta(distPar,1)/scipy.special.zeta(distPar-1,1)) cSizes = list(statUtils.discretePow(int(nVar),t=distPar).astype(int)) elif distType == 'E': nVar = pylab.floor(L*L*dilution*(1 - pylab.exp(-distPar))) cSizes = list(statUtils.discreteExpn(int(nVar),l=distPar).astype(int)) # Generate clusters clusters, n1, n2 = dia1P_generateDualClusters(L,cSizes) # Write clusters to the file dia1P_writeClus(nodeFile,L,clusters) nClus = len(clusters) # Update the total number of clusters # Update size and width distributions for clus in clusters: sz = dia1P_dualClusterSize(clus,L,statType='S') # Calculate the size of the cluster wd = dia1P_dualClusterSize(clus,L,statType='W') # Calculate the width of the cluster sizes.append(sz) widths.append(wd) # Write distributions to files for size in sizes: clusFile.write(str(size) + '\n') for width in widths: widthFile.write(str(width) +'\n') # Close files nodeFile.close() clusFile.close() widthFile.close()
def printTrack( fid , resized , frame , pos , sz ): p0 = [pos[0]-pylab.floor(sz[0]/2),pos[1]-pylab.ceil(sz[1]/2)] p1 = [pos[0]+pylab.floor(sz[0]/2),pos[1]+pylab.ceil(sz[1]/2)] if resized: p0 = [x*2 for x in p0] p1 = [x*2 for x in p1] fid.write(str(frame)+","+str(p0[1])+","+str(p0[0])+","+str(p1[1])+","+str(p1[0])+"\n")
def binvalues(bin_number,base): #returns binvalue, bin min, bin max for given bin number and base bin_max=float(base) bin_min=1.0 for i in range(bin_number): bin_min*=base bin_max*=base bin_min=pl.floor(bin_min) bin_max=pl.floor(bin_max) if (bin_min!=bin_max): bin_min+=1.0 return (pl.sqrt(bin_min*bin_max),bin_min,bin_max)
def wave2sram(waveA,waveB):#{{{ r'''Construct sram sequence for waveform. This takes python array and converts it to sram data for hand off to the FPGA. Wave cannot be nddata - this is stupid...''' if not len(waveA)==len(waveB): raise Exception('Lengths of DAC A and DAC B waveforms must be equal.') dataA=[long(py.floor(0x1FFF*y)) for y in waveA] # Multiply wave by full scale of DAC dataB=[long(py.floor(0x1FFF*y)) for y in waveB] truncatedA=[y & 0x3FFF for y in dataA] # Chop off everything except lowest 14 bits truncatedB=[y & 0x3FFF for y in dataB] dacAData=truncatedA dacBData=[y<<14 for y in truncatedB] # Shift DAC B data by 14 bits, why is this done?? sram=[dacAData[i]|dacBData[i] for i in range(len(dacAData))] # Combine DAC A and DAC B return sram#}}}
def elapsed_to_abs_time(seconds, base_time=None): """ Convert relative (elapsed) time in seconds to astronomical datetime. In: seconds : float, elapsed seconds base_time : datetime, recording start time (if available) Out: datetime """ if not base_time: base_time = dt.datetime(1900, 01, 01, 0, 0, 0,tzinfo=None) return base_time + dt.timedelta(seconds=pl.floor(seconds),\ microseconds=(seconds - pl.floor(seconds))*(10**6))
def choose_patches(IMAGES, L, batch_size=1000): sz = int(sqrt(L)) imsz = shape(IMAGES)[0] num_images = shape(IMAGES)[2] BUFF = 4 X = matrix(zeros([L,batch_size],'d')) for i in range(batch_size): j = int(floor(num_images * rand())) r = sz/2+BUFF+int(floor((imsz-sz-2*BUFF)*rand())) c = sz/2+BUFF+int(floor((imsz-sz-2*BUFF)*rand())) X[:,i] = reshape(IMAGES[r-sz/2:r+sz/2,c-sz/2:c+sz/2,j],[L,1]) return X
def get_subwindow(im, pos, sz, cos_window): """ Obtain sub-window from image, with replication-padding. Returns sub-window of image IM centered at POS ([y, x] coordinates), with size SZ ([height, width]). If any pixels are outside of the image, they will replicate the values at the borders. The subwindow is also normalized to range -0.5 .. 0.5, and the given cosine window COS_WINDOW is applied (though this part could be omitted to make the function more general). """ if pylab.isscalar(sz): # square sub-window sz = [sz, sz] ys = pylab.floor(pos[0]) \ + pylab.arange(sz[0], dtype=int) - pylab.floor(sz[0]/2) xs = pylab.floor(pos[1]) \ + pylab.arange(sz[1], dtype=int) - pylab.floor(sz[1]/2) ys = ys.astype(int) xs = xs.astype(int) # check for out-of-bounds coordinates, # and set them to the values at the borders ys[ys < 0] = 0 ys[ys >= im.shape[0]] = im.shape[0] - 1 xs[xs < 0] = 0 xs[xs >= im.shape[1]] = im.shape[1] - 1 #zs = range(im.shape[2]) # extract image #out = im[pylab.ix_(ys, xs, zs)] out = im[pylab.ix_(ys, xs)] if debug: print("Out max/min value==", out.max(), "/", out.min()) pylab.figure() pylab.imshow(out, cmap=pylab.cm.gray) pylab.title("cropped subwindow") #pre-process window -- # normalize to range -0.5 .. 0.5 # pixels are already in range 0 to 1 out = out.astype(pylab.float64) / 255 - 0.5 # apply cosine window out = pylab.multiply(cos_window, out) return out
def as_sdms(self,decimals=0): min_val_size = len(str(int(floor(abs(self.lower_bound)*180/pi)))) max_val_size = len(str(int(floor(abs(self.upper_bound)*180/pi)))) deg_size=max(min_val_size, max_val_size) sign_char = '- +'[int(sign(self.value))+1] d_float = abs(self.value)*180/pi d_int = int(floor(d_float)) m_float = 60*(d_float - d_int) m_int = int(floor(m_float)) s_float = 60*(m_float - m_int) s_int = int(floor(s_float)) frac_int = int(floor(10**decimals*(s_float - s_int)+0.5)) if frac_int >= 10**decimals: frac_int -= 10**decimals s_int +=1 if s_int >= 60: s_int -= 60 m_int += 1 if m_int >= 60: m_int -= 60 d_int += 1 max_d = int(floor(self.upper_bound*180/pi+0.5)) min_d = int(floor(self.lower_bound*180/pi+0.5)) if d_int >= max_d and self.cyclical and not self.include_upper_bound: d_int -= (max_d-min_d) base_str = sign_char+str(d_int).rjust(deg_size,'0')+':'+str(m_int).rjust(2,'0')+':'+str(s_int).rjust(2,'0') if decimals is 0: return base_str else: return base_str+'.'+str(frac_int).rjust(decimals,'0')
def check(nelx,nely,rmin,x,dc): dcn=py.zeros((nely,nelx)); for i in range(1,nelx+1): for j in range(1,nely+1): sumx=0.0 for k in range(py.maximum(i-py.floor(rmin),1),py.minimum(i+py.floor(rmin),nelx)+1): for l in range(py.maximum(j-py.floor(rmin),1),py.minimum(j+py.floor(rmin),nely)+1): fac = rmin-py.sqrt((i-k)**2+(j-l)**2) sumx = sumx+py.maximum(0,fac) dcn[j-1,i-1] = dcn[j-1,i-1] + py.maximum(0,fac)*x[l-1,k-1]*dc[l-1,k-1] dcn[j-1,i-1] = dcn[j-1,i-1]/(x[j-1,i-1]*sumx) return dcn
def add_zeroline(current_data): from pylab import plot, legend, xticks, floor, xlim,ylim t = current_data.t #legend(('surface','topography'),loc='lower left') plot(t, 0*t, 'k') n = int(floor(t.max()/1800.)) + 2 xticks([1800*i for i in range(n)],[str(0.5*i) for i in range(n)])
def plot_viz_of_stochs(vars, viz_func, figsize=(8,6)): """ Plot autocorrelation for all stochs in a dict or dict of dicts :Parameters: - `vars` : dictionary - `viz_func` : visualazation function such as ``acorr``, ``show_trace``, or ``hist`` - `figsize` : tuple, size of figure """ pl.figure(figsize=figsize) cells, stochs = tally_stochs(vars) # for each stoch, make an autocorrelation plot for each dimension rows = pl.floor(pl.sqrt(cells)) cols = pl.ceil(cells/rows) tile = 1 for s in sorted(stochs, key=lambda s: s.__name__): trace = s.trace() if len(trace.shape) == 1: trace = trace.reshape((len(trace), 1)) for d in range(len(pl.atleast_1d(s.value))): pl.subplot(rows, cols, tile) viz_func(pl.atleast_2d(trace)[:, d]) pl.title('\n\n%s[%d]'%(s.__name__, d), va='top', ha='center', fontsize=8) tile += 1
def render_network(A): [L, M] = shape(A) sz = int(sqrt(L)) buf = 1 A = asarray(A) if floor(sqrt(M)) ** 2 != M: m = int(sqrt(M / 2)) n = M / m else: m = int(sqrt(M)) n = m array = -ones([buf + m * (sz + buf), buf + n * (sz + buf)], "d") k = 0 for i in range(m): for j in range(n): clim = max(abs(A[:, k])) x_offset = buf + i * (sz + buf) y_offset = buf + j * (sz + buf) array[x_offset : x_offset + sz, y_offset : y_offset + sz] = reshape(A[:, k], [sz, sz]) / clim k += 1 return array
def _pvoc2(self, X_hat, Phi_hat=None, R=None): """ :: alternate (batch) implementation of phase vocoder - time-stretch inputs: X_hat - estimate of signal magnitude [Phi_hat] - estimate of signal phase [R] - resynthesis hop ratio output: updates self.X_hat with modified complex spectrum """ N, W, H = self.nfft, self.wfft, self.nhop R = 1.0 if R is None else R dphi = P.atleast_2d((2*P.pi * H * P.arange(N/2+1)) / N).T print "Phase Vocoder Resynthesis...", N, W, H, R A = P.angle(self.STFT) if Phi_hat is None else Phi_hat U = P.diff(A,1) - dphi U = U - P.np.round(U/(2*P.pi))*2*P.pi t = P.arange(0,n_cols,R) tf = t - P.floor(t) phs = P.c_[A[:,0], U] phs += U[:,idx[1]] + dphi # Problem, what is idx ? Xh = (1-tf)*Xh[:-1] + tf*Xh[1:] Xh *= P.exp( 1j * phs) self.X_hat = Xh
def get_filterbanks(nfilt=20,nfft=512,samplerate=16000,lowfreq=0,highfreq=None): """Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1) :param nfilt: the number of filters in the filterbank, default 20. :param nfft: the FFT size. Default is 512. :param samplerate: the samplerate of the signal we are working with. Affects mel spacing. :param lowfreq: lowest band edge of mel filters, default 0 Hz :param highfreq: highest band edge of mel filters, default samplerate/2 :returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter. """ highfreq= highfreq or samplerate/2 assert highfreq <= samplerate/2, "highfreq is greater than samplerate/2" # compute points evenly spaced in mels lowmel = hz2mel(lowfreq) highmel = hz2mel(highfreq) melpoints = pylab.linspace(lowmel,highmel,nfilt+2) # our points are in Hz, but we use fft bins, so we have to convert # from Hz to fft bin number bin = pylab.floor((nfft+1)*mel2hz(melpoints)/samplerate) fbank = pylab.zeros([nfilt,nfft/2+1]) for j in xrange(0,nfilt): for i in xrange(int(bin[j]),int(bin[j+1])): fbank[j,i] = (i - bin[j])/(bin[j+1]-bin[j]) for i in xrange(int(bin[j+1]),int(bin[j+2])): fbank[j,i] = (bin[j+2]-i)/(bin[j+2]-bin[j+1]) return fbank
def displayData(X): print "Visualizing" m, n = X.shape width = round(sqrt(n)) height = width display_rows = int(floor(sqrt(m))) display_cols = int(ceil(m/display_rows)) print "Cell width:", width print "Cell height:", height print "Display rows:", display_rows print "Display columns:", display_cols display = zeros((display_rows*height,display_cols*width)) # Iterate through the training sets, reshape each one and populate # the display matrix with the letter matrixes. for xrow in range(0, m): rowindex = divide(xrow, display_cols) columnindex = remainder(xrow, display_cols) rowstart = int(rowindex*height) rowend = int((rowindex+1)*height) colstart = int(columnindex*width) colend = int((columnindex+1)*width) display[rowstart:rowend, colstart:colend] = X[xrow,:].reshape(height,width).transpose() imshow(display, cmap=get_cmap('binary'), interpolation='none') # Show plot without blocking draw()
def _make_log_freq_map(self): """ :: For the given ncoef (bands-per-octave) and nfft, calculate the center frequencies and bandwidths of linear and log-scaled frequency axes for a constant-Q transform. """ fp = self.feature_params bpo = float(self.nbpo) # Bands per octave self._fftN = float(self.nfft) hi_edge = float( self.hi ) lo_edge = float( self.lo ) f_ratio = 2.0**( 1.0 / bpo ) # Constant-Q bandwidth self._cqtN = float( P.floor(P.log(hi_edge/lo_edge)/P.log(f_ratio)) ) self._dctN = self._cqtN self._outN = float(self.nfft/2+1) if self._cqtN<1: print "warning: cqtN not positive definite" mxnorm = P.empty(self._cqtN) # Normalization coefficients fftfrqs = self._fftfrqs #P.array([i * self.sample_rate / float(self._fftN) for i in P.arange(self._outN)]) logfrqs=P.array([lo_edge * P.exp(P.log(2.0)*i/bpo) for i in P.arange(self._cqtN)]) logfbws=P.array([max(logfrqs[i] * (f_ratio - 1.0), self.sample_rate / float(self._fftN)) for i in P.arange(self._cqtN)]) #self._fftfrqs = fftfrqs self._logfrqs = logfrqs self._logfbws = logfbws self._make_cqt()
def add_zeroline(current_data): from pylab import plot, legend, xticks, floor t = current_data.t #legend(('surface','topography'),loc='lower left') plot(t, 0*t, 'k') n = int(floor(t.max()/3600.) + 2) xticks([3600*i for i in range(n)])
def logBinInt(base,s,ps): # Bin the data in bins with edges base**1, base**2 ... # all s such that base**i < x <= base**(i+1) goes in the bin i+1. # It is assumed that all s are strictly posetive integers and # that s is sorted. # Check for sorted input for ind in range(1,len(s)): if s[ind] <= s[ind-1]: print "Unsorted input or repeated integers input. Returning." return [],[] # Construct a list of all edges of the bins maxPow = pylab.ceil(pylab.log(max(s))/pylab.log(base)).astype(int) maxEdge = pylab.floor(base**maxPow) # Generate edges of the bins binEdge = [] lastEdge = pylab.floor(base) exp = 1.0 while lastEdge < maxEdge: if pylab.floor(base**exp) > lastEdge: binEdge.append(lastEdge) lastEdge = pylab.floor(base**exp) exp = exp+1 binEdge.append(maxEdge) binEdge = numpy.array(binEdge) # Calculate bin sizes and centers binSize = numpy.zeros(len(binEdge)) binCenter = numpy.zeros(len(binEdge)) binSize[0] = binEdge[0] binCenter[0] = (1.0 + binEdge[0])*0.5 for j in range(1,len(binEdge)): binSize[j] = binEdge[j] - binEdge[j-1] binCenter[j] = (binEdge[j] + binEdge[j-1] + 1)*0.5 # Calculate bin probability binProb = numpy.zeros(len(binEdge)) binNum = 0 for data, prob in zip(s,ps): while data > binEdge[binNum]: binNum = binNum+1 binProb[binNum] = binProb[binNum] + prob binProb = binProb/binSize return binCenter, binProb
def fix_gauge(current_data): from pylab import plot, legend, xticks, floor, yticks, xlabel, savefig t = current_data.t gaugeno = current_data.gaugeno n = int(floor(t.max() / 1800.) + 2) xticks([1800*i for i in range(n)],[str(i/2.) for i in range(n)],\ fontsize=15) yticks(fontsize=15) xlabel("Hours")
def pV(image, x, y): #Get the value of a point (interpolated x, y) in the given image if not image.in_bounds(x, y): return 0 x_low, x_high = floor(x), floor(x + 1) y_low, y_high = floor(y), floor(y + 1) x_y = (x_high - x_low) * (y_high - y_low) a = x_high - x b = y_high - y c = x - x_low d = y - y_low return image[x_low, y_low] / x_y * a * b \ + image[x_high, y_low] / x_y * c * b \ + image[x_low , y_high] / x_y * a * d \ + image[x_high, y_high] / x_y * c * d
def error_label(norm, maxnorm, xpos, ypos): vexp = pylab.floor(pylab.log10(norm)) if norm == 0: vexp = 0 vmant = norm / 10**vexp thistext = r'$\Vert$E$\Vert_1 = \, %4.2f' % vmant if vexp != 0: thistext += r' \times \, 10^{%2d}' % vexp thistext += '$\n' vexp = pylab.floor(pylab.log10(maxnorm)) if maxnorm == 0: vexp = 0 vmant = maxnorm / 10**vexp thistext += r'$\Vert$E$\Vert_\infty \! = \, %4.2f' % vmant if vexp != 0: thistext += r' \times \, 10^{%2d}' % vexp thistext += '$' pylab.text(xpos, ypos, thistext, va='top')
def add_zeroline(current_data): from pylab import plot, legend, xticks, floor, axis, xlabel t = current_data.t gaugeno = current_data.gaugeno plot(t, 0*t, 'k') n = int(floor(t.max()/3600.) + 2) xticks([3600*i for i in range(n)], ['%i' % i for i in range(n)]) xlabel('time (hours)')
def pV(image, x, y): #Get the value of a point (interpolated x, y) in the given image if not image.in_bounds(x, y): return 0 x_low, x_high = floor(x), floor(x+1) y_low, y_high = floor(y), floor(y+1) x_y = (x_high - x_low) * (y_high - y_low) a = x_high - x b = y_high - y c = x - x_low d = y - y_low return image[x_low, y_low] / x_y * a * b \ + image[x_high, y_low] / x_y * c * b \ + image[x_low , y_high] / x_y * a * d \ + image[x_high, y_high] / x_y * c * d
def add_zeroline(current_data): from pylab import plot, legend, xticks, floor, axis, xlabel t = current_data.t gaugeno = current_data.gaugeno plot(t, 0 * t, 'k') n = int(floor(t.max() / 3600.) + 2) xticks([3600 * i for i in range(n)], ['%i' % i for i in range(n)]) xlabel('time (hours)')
def fix_gauge(current_data): from pylab import plot, legend, xticks, floor, yticks,xlabel,savefig t = current_data.t gaugeno = current_data.gaugeno n = int(floor(t.max()/1800.) + 2) xticks([1800*i for i in range(n)],[str(i/2.) for i in range(n)],\ fontsize=15) yticks(fontsize=15) xlabel("Hours")
def add_zeroline(current_data): from pylab import plot, legend, xticks, floor, yticks t = current_data.t #legend(('surface','topography'),loc='lower left') #plot([0,10800],[0,0],'k') n = floor(t.max()/3600.) + 2 xticks([3600*i for i in range(n)],[str(i) for i in range(n)],\ fontsize=15) yticks(fontsize=15)
def add_zeroline(current_data): from pylab import plot, legend, xticks, floor, xlim,ylim t = current_data.t #legend(('surface','topography'),loc='lower left') plot(t, 0*t, 'k') n = int(floor(t.max()/1800.)) + 2 xticks([1800*i for i in range(n)],[str(0.5*i) for i in range(n)]) xlim(25000,t.max()) #ylim(-0.5,0.5) print "+++ gaugeno = ",current_data.gaugeno
def _set_range( self, xLims, xStep, yLims=None, yStep=None, zLims=None, zStep=None, ): self.xNbOfSteps = int( abs(pl.floor((float(xLims[1]) - float(xLims[0])) / float(xStep))) + 1) self.xPositions = pl.linspace(xLims[0], xLims[1], self.xNbOfSteps) self.xStep = xStep if yLims is not None or yStep is not None: self.yNbOfSteps = int( abs( pl.floor((float(yLims[1]) - float(yLims[0])) / float(yStep))) + 1) self.yPositions = pl.linspace(yLims[0], yLims[1], self.yNbOfSteps) self.yStep = yStep else: self.yNbOfSteps = 1 self.yPositions = pl.array([0]) self.yStep = 0 if zLims is not None or zStep is not None: self.zNbOfSteps = int( abs( pl.floor((float(zLims[1]) - float(zLims[0])) / float(zStep))) + 1) self.zPositions = pl.linspace(zLims[0], zLims[1], self.zNbOfSteps) self.zStep = zStep else: self.zNbOfSteps = 1 self.zPositions = pl.array([0]) self.zStep = 0 self.totalNbOfSteps = self.xNbOfSteps * self.yNbOfSteps
def update_ret_response(self, new_img): ''' :param new_img: new frame should be normalized, for tracker_status estimating the rect_snr :return: ''' self.canvas = new_img.copy() self.trackNo += 1 # get subwindow at current estimated target position, to train classifier x = self.get_subwindow(new_img, self.pos, self.window_sz, self.cos_window) # calculate response of the classifier at all locations k = self.dense_gauss_kernel(self.sigma, x, self.z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(self.alphaf, kf) response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # target location is at the maximum response row, col = pylab.unravel_index(response.argmax(), response.shape) # roi rect's topleft point add [row, col] self.tly, self.tlx = self.pos - pylab.floor(self.window_sz / 2) # here the pos is not given to self.pos at once, we need to check the psr first. # if it above the threashhold(default is 5), self.pos = pos. pos = np.array([self.tly, self.tlx]) + np.array([row, col]) # Noting, for pos(cy,cx)! for cv2.rect rect(x,y,w,h)! rect = pylab.array([ pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0] ]) rect = rect.astype(np.int) self.psr, self.trkStatus = self.tracker_status(col, row, response, rect, new_img) self.pos = pos #only update when tracker_status's psr is high if (self.psr > 10): #computing new_alphaf and observed x as z x = self.get_subwindow(new_img, self.pos, self.window_sz, self.cos_window) # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain) k = self.dense_gauss_kernel(self.sigma, x) new_alphaf = pylab.divide( self.yf, (pylab.fft2(k) + self.lambda_value)) # Eq. 7 new_z = x # subsequent frames, interpolate model f = self.interpolation_factor self.alphaf = (1 - f) * self.alphaf + f * new_alphaf self.z = (1 - f) * self.z + f * new_z ok = 1 return ok, rect, self.psr, response
def add_zeroline(current_data): from pylab import plot, legend, xticks, floor, xlim t = current_data.t dtaxis=60. # Set the axis time increment, in seconds #t = t/dtaxis tmax = t.max() #print "tmax = ",tmax legend(('surface','topography'),loc='lower left') plot([0,tmax],[0,0],'k') n = int(floor(tmax) + 2) xticks([dtaxis*i for i in range(n)]) xlim(0.,tmax)
def fix_gauge(current_data): from pylab import plot, legend, xticks, floor, yticks t = current_data.t gaugeno = current_data.gaugeno if gaugeno == 19750: plot(TG_19750[:,0],TG_19750[:,1],'r') legend(('GeoClaw','Tide Gauge'),loc='lower left') #plot([0,10800],[0,0],'k') n = int(floor(t.max()/3600.) + 2) xticks([3600*i for i in range(n)],[str(i) for i in range(n)],\ fontsize=15) yticks(fontsize=15)
def add_zeroline(current_data): from pylab import plot, legend, xticks, floor, xlim t = current_data.t dtaxis = 60. # Set the axis time increment, in seconds #t = t/dtaxis tmax = t.max() #print "tmax = ",tmax legend(('surface', 'topography'), loc='lower left') plot([0, tmax], [0, 0], 'k') n = int(floor(tmax) + 2) xticks([dtaxis * i for i in range(n)]) xlim(0., tmax)
def as_hms(self, decimals=0): h_float = abs(self.value)*12.0/pi h_int = int(floor(h_float)) m_float = 60*(h_float - h_int) m_int = int(floor(m_float)) s_float = 60*(m_float - m_int) s_int = int(floor(s_float)) frac_int = int(floor(10**decimals*(s_float - s_int)+0.5)) if frac_int >= 10**decimals: frac_int -= 10**decimals s_int +=1 if s_int >= 60: s_int -= 60 m_int += 1 if m_int >= 60: m_int -= 60 h_int += 1 max_h = int(floor(self.upper_bound*12/pi+0.5)) min_h = int(floor(self.lower_bound*12/pi+0.5)) if h_int >= max_h and self.cyclical and not self.include_upper_bound: h_int -= (max_h-min_h) sign_char='' if self.value < 0: sign_char = '-' base_str = sign_char+str(h_int).rjust(2,'0')+':'+str(m_int).rjust(2,'0')+':'+str(s_int).rjust(2,'0') if decimals is 0: return base_str else: return base_str+'.'+str(frac_int).rjust(decimals,'0')
def initialize(self, image, pos, target_sz): if len(image.shape) == 3 and image.shape[2] > 1: image = rgb2gray(image) self.image = image if self.should_resize_image: self.image = scipy.misc.imresize(self.image, 0.5) self.image = self.image / 255.0 # window size, taking padding into account self.sz = pylab.floor(target_sz * (1 + self.padding)) self.pos = pos # desired output (gaussian shaped), bandwidth proportional to target size output_sigma = pylab.sqrt(pylab.prod( self.sz)) * self.output_sigma_factor grid_y = pylab.arange(self.sz[0]) - pylab.floor(self.sz[0] / 2) grid_x = pylab.arange(self.sz[1]) - pylab.floor(self.sz[1] / 2) #[rs, cs] = ndgrid(grid_x, grid_y) rs, cs = pylab.meshgrid(grid_x, grid_y) self.y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2)) self.yf = pylab.fft2(self.y) # store pre-computed cosine window self.cos_window = pylab.outer(pylab.hanning(self.sz[0]), pylab.hanning(self.sz[1])) # get subwindow at current estimated target position, # to train classifer x = get_subwindow(self.image, self.pos, self.sz, self.cos_window) # Kernel Regularized Least-Squares, # calculate alphas (in Fourier domain) k = dense_gauss_kernel(self.sigma, x) self.alphaf = pylab.divide( self.yf, (pylab.fft2(k) + self.lambda_value)) # Eq. 7 self.z = x return
def gauge_afteraxes(current_data): from pylab import plot, xticks, floor t = current_data.t #legend(('surface','topography'),loc='lower left') plot(t, 0*t, 'k') n = int(floor(t.max()/3600.) + 2) xticks([3600*i for i in range(n)]) plt.xlabel("t (s)") plt.ylabel("Surface (m)") if current_data.gaugeno == 4: # Add gauge observations plot(gauge_data[:,0] * 60.0, gauge_data[:,1], 'x')
def solve(n, x, y): small = int(floor((1 + sqrt(1 + 8 * n)) / 4)) level = (abs(x) + abs(y)) / 2 + 1 left = n - small * (2 * small - 1) height = y + 1 if level <= small: return 1.0 elif level > small + 1 or left < height or height > small * 2: return 0.0 elif left >= height + 2 * small: return 1.0 else: return prob(height, left)
def plotOrderEvolution(self): py.figure() for order in py.arange(-self.ordreMaxPlot, self.ordreMaxPlot + 1, 1): middleK = py.floor(len(self.k) / 2) + 1 py.subplot(2, 4, order + self.ordreMaxPlot + 1) slice = int(middleK + py.fix(order / self.ordreMax * middleK)) py.plot(self.t.T / 1e-6 * self.Texp, self.momentumDensity[:, slice], 'b') py.title('Ordre ' + str(order)) py.xlabel('time (µs)') py.ylabel('density') py.tight_layout() py.show()
def afterframe(current_data): from pylab import plot, legend, xticks, floor, axis, xlabel, title t = current_data.t gaugeno = current_data.gaugeno if gaugeno == 1: title('Wilford') elif gaugeno == 2: title('Teton City') # plot(t, 0*t, 'k') n = int(floor(t.max() / 3600.) + 2) xticks([3600 * i for i in range(n)], ['%i' % i for i in range(n)]) xlabel('time (hours)')
def crop_image(in_file, out_file, sideX, sideY = 0): if sideY == 0: sideY = sideX try: img = spimage.sp_image_read(in_file,0) except: print "Error: %s is not a readable .h5 file\n" % in_file exit(1) shifted = 0 if img.shifted: shifted = 1 img = spimage.sp_image_shift(img) print "shifted = ", shifted lowX = img.detector.image_center[0]-(sideX/2.0-0.5) highX = img.detector.image_center[0]+(sideX/2.0-0.5) lowY = img.detector.image_center[1]-(sideY/2.0-0.5) highY = img.detector.image_center[1]+(sideY/2.0-0.5) print lowX, " ", highX print lowY, " ", highY if lowX != pylab.floor(lowX): lowX = int(pylab.floor(lowX)) highX = int(pylab.floor(highX)) img.detector.image_center[0] -= 0.5 else: lowX = int(lowX) highX = int(highX) if lowY != pylab.floor(lowY): lowY = int(pylab.floor(lowY)) highY = int(pylab.floor(highY)) img.detector.image_center[1] -= 0.5 else: lowY = int(lowY) highY = int(highY) cropped = spimage.rectangle_crop(img,lowX,lowY,highX,highY) print "did crop" if shifted: cropped = spimage.sp_image_shift(cropped) print "shifted (or not)" print "write ", out_file #print "orientation = ", cropped.detector.orientation #print spimage.sp_3matrix_get(cropped.detector.orientation,0,0,0) try: spimage.sp_image_write(cropped,out_file,16) except: print "Error: can not write to %s\n" % out_file print "end"
def val_change(self): self.v_i = int(pl.floor(self.start_v.value() * 1000)) # in mV self.v_f = int(pl.ceil(self.end_v.value() * 1000)) # in mV self.dt = int(self.time.value() * 1000) # in ms if self.no_of_steps == self.steps.value(): # step_size was modified self.step_size = self.step_size_field.value() # in mV self.no_of_steps = int(float(self.v_f - self.v_i) // self.step_size) + 1 self.steps.setValue(self.no_of_steps) else: self.no_of_steps = self.steps.value() self.step_size = int(float(self.v_f - self.v_i) / (self.no_of_steps - 1)) self.step_size_field.setValue(self.step_size) self.repeats = self.reps.value() self.counterDev = self.cb.currentIndex()
def jetWoGn(reverse=False): """ jetWoGn(reverse=False) - returning a colormap similar to cm.jet, but without green. if reverse=True, the map starts with red instead of blue. """ m = 18 # magic number, which works fine m0 = pylab.floor(m * 0.0) m1 = pylab.floor(m * 0.2) m2 = pylab.floor(m * 0.2) m3 = pylab.floor(m / 2) - m2 - m1 b_ = pylab.hstack( (0.4 * pylab.arange(m1) / (m1 - 1.) + 0.6, pylab.ones((m2 + m3, )))) g_ = pylab.hstack((pylab.zeros( (m1, )), pylab.arange(m2) / (m2 - 1.), pylab.ones((m3, )))) r_ = pylab.hstack((pylab.zeros((m1, )), pylab.zeros( (m2, )), pylab.arange(m3) / (m3 - 1.))) r = pylab.hstack((r_, pylab.flipud(b_))) g = pylab.hstack((g_, pylab.flipud(g_))) b = pylab.hstack((b_, pylab.flipud(r_))) if reverse: r = pylab.flipud(r) g = pylab.flipud(g) b = pylab.flipud(b) ra = pylab.linspace(0.0, 1.0, m) cdict = { 'red': zip(ra, r, r), 'green': zip(ra, g, g), 'blue': zip(ra, b, b) } return LinearSegmentedColormap('new_RdBl', cdict, 256)
def _SxyzPosChange(self, event, axis, ui_element): """ Function called when the user changes the value of the axis position. (called exclusively by the functions immediately below) Will give the orders and ensure a smooth movement to position. """ currentVoltage = ui_element.value() smoothNbOfSteps = int( max((abs( pl.floor((event - currentVoltage) / float(self.SMOOTH_STEP))), 1)) + 1) smoothPositions = pl.linspace(currentVoltage, event, smoothNbOfSteps) for volts in smoothPositions: axis.write(volts * self.CONVERSION_FACTOR) time.sleep(self.SMOOTH_DELAY)
def adjust(self, x=None): v = self.value if x is not None: v = x if self.cyclical: if self.include_upper_bound and v == self.upper_bound: return self.value range = self.upper_bound - self.lower_bound steps = floor((v - self.lower_bound)/range) v -= steps*range else: v=max(self.lower_bound, min(v,self.upper_bound)) if x is None: self.value = v return v
def _fillOneSlot(self, point): ''' point, 3-dim array ''' #pdb.set_trace() rotDeg = pl.arctan(point[1] / point[0]) * RAD2DEG if point[0] < 0 and point[1] > 0: rotDeg += 180 elif point[0] < 0 and point[1] < 0: rotDeg -= 180 col = pl.floor((rotDeg - self.rotDegMin) / self.rotDegRes) xyDist = pl.norm(point[:2]) row = pl.floor( (pl.arctan(point[2] / xyDist) * RAD2DEG - self.vertDegMin) / self.vertDegRes) try: self.valSlots[row, col] += pl.norm(point) self.cntSlots[row, col] += 1 except IndexError: exit(0) return row, col
def add_zeroline(current_data): from pylab import plot, legend, xticks, floor, axis, xlabel t = current_data.t gaugeno = current_data.gaugeno if gaugeno == 32412: try: plot(TG32412[:,0], TG32412[:,1], 'r') legend(['GeoClaw','Obs'],loc='lower right') except: pass axis((0,t.max(),-0.3,0.3)) plot(t, 0*t, 'k') n = int(floor(t.max()/3600.) + 2) xticks([3600*i for i in range(n)], ['%i' % i for i in range(n)]) xlabel('time (hours)')
def dense_gauss_kernel(sigma, x, y=None): """ Gaussian Kernel with dense sampling. Evaluates a gaussian kernel with bandwidth SIGMA for all displacements between input images X and Y, which must both be MxN. They must also be periodic (ie., pre-processed with a cosine window). The result is an MxN map of responses. If X and Y are the same, omit the third parameter to re-use some values, which is faster. """ xf = pylab.fft2(x) # x in Fourier domain x_flat = x.flatten() xx = pylab.dot(x_flat.transpose(), x_flat) # squared norm of x if y is not None: # general case, x and y are different yf = pylab.fft2(y) y_flat = y.flatten() yy = pylab.dot(y_flat.transpose(), y_flat) else: # auto-correlation of x, avoid repeating a few operations yf = xf yy = xx # cross-correlation term in Fourier domain xyf = pylab.multiply(xf, pylab.conj(yf)) # to spatial domain xyf_ifft = pylab.ifft2(xyf) #xy_complex = circshift(xyf_ifft, floor(x.shape/2)) row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int) xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0) xy_complex = pylab.roll(xy_complex, col_shift, axis=1) xy = pylab.real(xy_complex) # calculate gaussian response for all positions scaling = -1 / (sigma**2) xx_yy = xx + yy xx_yy_2xy = xx_yy - 2 * xy k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size)) #print("dense_gauss_kernel x.shape ==", x.shape) #print("dense_gauss_kernel k.shape ==", k.shape) return k
def dec2num(dectime): """ Function converts decimal time from year fraction (e.g., 1991.875 such as in IDL, CCG) to a python decimal numtime """ from pylab import floor, drange, num2date, date2num if not isinstance(dectime, list): dectime = [dectime] newdectime = [] for dd in dectime: yr = floor(dd) Days0 = date2num(datetime(int(yr), 1, 1)) if calendar.isleap(yr): DaysPerYear = 366. else: DaysPerYear = 365. DayFrac = (dd - yr) * DaysPerYear newdectime.append(Days0 + DayFrac) if len(newdectime) == 1: return newdectime[0] return newdectime
def val_change(self): self.stop() self.v_i = int(pl.floor(self.start_v.value() * 1000)) # in mV self.v_f = int(pl.ceil(self.end_v.value() * 1000)) # in mV self.dt = int(self.time.value() * 1000) # in ms if self.no_of_steps == self.steps.value(): # step_size was modified self.step_size = self.step_size_field.value() # in mV self.no_of_steps = int(abs(float(self.v_f - self.v_i)) // self.step_size) + 1 self.steps.setValue(self.no_of_steps) else: # no_of_steps was modified self.no_of_steps = self.steps.value() self.step_size = int(abs(float(self.v_f - self.v_i)) / (self.no_of_steps - 1)) self.step_size_field.setValue(self.step_size) self.counterDev = self.cb.currentIndex() self.nb_of_scans = self.scanNb_field.value() self.backAndForthMode = self.backAndForthCheckBox.isChecked()
def dense_gauss_kernel(sigma, x, y=None): """ 通过高斯核计算余弦子窗口图像块的响应图 利用带宽是 sigma 的高斯核估计两个图像块 X (MxN) 和 Y (MxN) 的关系。X, Y 是循环的、经余弦窗处理的。输出结果是 响应图矩阵 MxN. 如果 X = Y, 则函数调用时取消 y,则加快计算。 该函数对应原文中的公式 (16),以及算法1中的 function k = dgk(x1, x2, sigma) :param sigma: 高斯核带宽 :param x: 余弦子窗口图像块 :param y: 空或者模板图像块 :return: 响应图 """ # 计算图像块 x 的傅里叶变换 xf = pylab.fft2(x) # x in Fourier domain # 把图像块 x 拉平 x_flat = x.flatten() # 计算 x 的2范数平方 xx = pylab.dot(x_flat.transpose(), x_flat) # squared norm of x if y is not None: # 一半情况, x 和 y 是不同的,计算 y 的傅里叶变化和2范数平方 yf = pylab.fft2(y) y_flat = y.flatten() yy = pylab.dot(y_flat.transpose(), y_flat) else: # x 的自相关,避免重复计算 yf = xf yy = xx # 傅里叶域的互相关计算,逐元素相乘 xyf = pylab.multiply(xf, pylab.conj(yf)) # 转化为频率域 xyf_ifft = pylab.ifft2(xyf) # 对频率域里的矩阵块进行滚动平移,分别沿 row 和 col 轴 row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int) xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0) xy_complex = pylab.roll(xy_complex, col_shift, axis=1) xy = pylab.real(xy_complex) # 计算高斯核响应图 scaling = -1 / (sigma**2) xx_yy = xx + yy xx_yy_2xy = xx_yy - 2 * xy return pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))
def fit_latent_simplex(X, iter=10000, burn=5000, thin=5): vars = latent_simplex(X) m = mc.MAP([vars['alpha'], vars['X_obs']]) m.fit(method='fmin_powell', verbose=0) #print vars['pi'].value m = mc.MCMC(vars) for alpha_t in m.alpha: m.use_step_method(mc.AdaptiveMetropolis, alpha_t) m.sample(iter, burn, thin, verbose=0) pi = m.pi.trace() print 'mean: ', pl.floor(m.pi.stats()['mean'] * 100. + .5) / 100. #print 'ui:\n', pl.floor(m.pi.stats()['95% HPD interval']*100.+.5)/100. return m, pi.view(pl.recarray)
def ReSample(data, phase, nps): """ re-samples the data with nps frames per stride. phase gives the phase information at a given cycle. the output will be truncated towards an integer number of strides. The last stride will usually be removed. Data must be given in a D x N-format, D: number of Dimensions, N: number of samples """ minPhi = ceil(phase[0] / (2. * pi)) * 2. * pi maxPhi = floor(phase[-1] / (2. * pi)) * 2. * pi nFrames = round((maxPhi - minPhi) / (2. * pi) * nps) phi_new = linspace(minPhi, maxPhi, nFrames, endpoint=False) return vstack( [interp(phi_new, phase, data[row, :]) for row in range(data.shape[0])])
def CountHunry(arg, dirname, files): for file in files: filepath = os.path.join(dirname, file) if filepath == os.path.join(dirname, 'RecourceUptakenCells.dat'): uptake_res_data = p.genfromtxt(filepath) uptake_res_data = uptake_res_data[-p.floor(uptake_res_data.shape[0] ):] for k in xrange(uptake_res_data.shape[0]): uptake_res_data[k, :] = uptake_res_data[k, :] \ / uptake_res_data[k, :].sum() means_uptake = uptake_res_data[:, 0].mean() STD_uptake = uptake_res_data[:, 0].std() filepath_turb = os.path.join(dirname, 'ModelParams.dat') l = re.split(" ", ln.getline(filepath_turb, 6)) turb_param = float(l[6]) l = re.split(" ", ln.getline(filepath_turb, important_line)) interesting_param = float(l[6]) arg.append( (means_uptake, STD_uptake, turb_param, interesting_param))
def CountTrueDeathRate(arg, dirname, files): for file in files: Grand_mean = p.nan Grand_STD = p.nan filepath = os.path.join(dirname, file) if filepath == os.path.join(dirname, 'GeneralData.dat'): data = p.genfromtxt(filepath) if data[-1, 4] != 0.0: half_of_data = p.floor(data.shape[0] / 2) data_chopped = data[half_of_data:-1, :] Grand_mean = (data_chopped[:, 11] / data_chopped[:, 4]).mean() Grand_STD = (data_chopped[:, 11] / data_chopped[:, 4]).std() filepath_turb = os.path.join(dirname, 'ModelParams.dat') l = re.split(" ", ln.getline(filepath_turb, 6)) turb_param = float(l[6]) l = re.split(" ", ln.getline(filepath_turb, important_line)) interesting_param = float(l[6]) arg.append( (Grand_mean, Grand_STD, turb_param, interesting_param)) else: break
def __init__( self, mua_data, dt, lfp_data=None, z_start=0., z_space=0.1, casename='', rneg_factor=1E6, tstim=0, sub_at='base', verbose=False ): ''' This function ... Aguments -------- Keyword arguments ----------------- ''' if verbose: msg = 'This is class *LPA_Signal* in *pyLPA* module' print msg if not oopt_import: print oopt_import_msg self.nstim, self.ntime, self.nchan = pl.asarray(mua_data).shape self.dt = dt self.z_start = z_start self.z_space = z_space self.el_coords = pl.arange(z_start, z_start+self.nchan*z_space, z_space) self.rneg_factor = rneg_factor self.tstim = tstim self.sub_at = sub_at self.verbose = verbose self.tstim_idx = pl.floor(self.tstim/self.dt) # create matrices and calculate variances self.importDataset(mua_data, 'MUA') if not lfp_data==None: self.importDataset(lfp_data, 'LFP')